diff --git a/.gitignore b/.gitignore
index ad3cd3f..096e586 100644
--- a/.gitignore
+++ b/.gitignore
@@ -3,4 +3,4 @@
*.swp
one_key.sh
sniffer-agent
-vendor/github.com
+# vendor/github.com
diff --git a/vendor/github.com/Shopify/sarama/.gitignore b/vendor/github.com/Shopify/sarama/.gitignore
new file mode 100644
index 0000000..c6c482d
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/.gitignore
@@ -0,0 +1,26 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+*.test
+
+# Folders
+_obj
+_test
+.vagrant
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+
+coverage.txt
diff --git a/vendor/github.com/Shopify/sarama/.travis.yml b/vendor/github.com/Shopify/sarama/.travis.yml
new file mode 100644
index 0000000..ba1c0ab
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/.travis.yml
@@ -0,0 +1,37 @@
+language: go
+go:
+- 1.7.x
+- 1.8.x
+- 1.9.x
+
+env:
+ global:
+ - KAFKA_PEERS=localhost:9091,localhost:9092,localhost:9093,localhost:9094,localhost:9095
+ - TOXIPROXY_ADDR=http://localhost:8474
+ - KAFKA_INSTALL_ROOT=/home/travis/kafka
+ - KAFKA_HOSTNAME=localhost
+ - DEBUG=true
+ matrix:
+ - KAFKA_VERSION=0.10.2.1
+ - KAFKA_VERSION=0.11.0.2
+ - KAFKA_VERSION=1.0.0
+
+before_install:
+- export REPOSITORY_ROOT=${TRAVIS_BUILD_DIR}
+- vagrant/install_cluster.sh
+- vagrant/boot_cluster.sh
+- vagrant/create_topics.sh
+
+install:
+- make install_dependencies
+
+script:
+- make test
+- make vet
+- make errcheck
+- make fmt
+
+after_success:
+ - bash <(curl -s https://codecov.io/bash)
+
+sudo: false
diff --git a/vendor/github.com/Shopify/sarama/CHANGELOG.md b/vendor/github.com/Shopify/sarama/CHANGELOG.md
new file mode 100644
index 0000000..028a180
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/CHANGELOG.md
@@ -0,0 +1,435 @@
+# Changelog
+
+#### Version 1.14.0 (2017-11-13)
+
+New Features:
+ - Add support for the new Kafka 0.11 record-batch format, including the wire
+ protocol and the necessary behavioural changes in the producer and consumer.
+ Transactions and idempotency are not yet supported, but producing and
+ consuming should work with all the existing bells and whistles (batching,
+ compression, etc) as well as the new custom headers. Thanks to Vlad Hanciuta
+ of Arista Networks for this work. Part of
+ ([#901](https://github.com/Shopify/sarama/issues/901)).
+
+Bug Fixes:
+ - Fix encoding of ProduceResponse versions in test
+ ([#970](https://github.com/Shopify/sarama/pull/970)).
+ - Return partial replicas list when we have it
+ ([#975](https://github.com/Shopify/sarama/pull/975)).
+
+#### Version 1.13.0 (2017-10-04)
+
+New Features:
+ - Support for FetchRequest version 3
+ ([#905](https://github.com/Shopify/sarama/pull/905)).
+ - Permit setting version on mock FetchResponses
+ ([#939](https://github.com/Shopify/sarama/pull/939)).
+ - Add a configuration option to support storing only minimal metadata for
+ extremely large clusters
+ ([#937](https://github.com/Shopify/sarama/pull/937)).
+ - Add `PartitionOffsetManager.ResetOffset` for backtracking tracked offsets
+ ([#932](https://github.com/Shopify/sarama/pull/932)).
+
+Improvements:
+ - Provide the block-level timestamp when consuming compressed messages
+ ([#885](https://github.com/Shopify/sarama/issues/885)).
+ - `Client.Replicas` and `Client.InSyncReplicas` now respect the order returned
+ by the broker, which can be meaningful
+ ([#930](https://github.com/Shopify/sarama/pull/930)).
+ - Use a `Ticker` to reduce consumer timer overhead at the cost of higher
+ variance in the actual timeout
+ ([#933](https://github.com/Shopify/sarama/pull/933)).
+
+Bug Fixes:
+ - Gracefully handle messages with negative timestamps
+ ([#907](https://github.com/Shopify/sarama/pull/907)).
+ - Raise a proper error when encountering an unknown message version
+ ([#940](https://github.com/Shopify/sarama/pull/940)).
+
+#### Version 1.12.0 (2017-05-08)
+
+New Features:
+ - Added support for the `ApiVersions` request and response pair, and Kafka
+ version 0.10.2 ([#867](https://github.com/Shopify/sarama/pull/867)). Note
+ that you still need to specify the Kafka version in the Sarama configuration
+ for the time being.
+ - Added a `Brokers` method to the Client which returns the complete set of
+ active brokers ([#813](https://github.com/Shopify/sarama/pull/813)).
+ - Added an `InSyncReplicas` method to the Client which returns the set of all
+ in-sync broker IDs for the given partition, now that the Kafka versions for
+ which this was misleading are no longer in our supported set
+ ([#872](https://github.com/Shopify/sarama/pull/872)).
+ - Added a `NewCustomHashPartitioner` method which allows constructing a hash
+ partitioner with a custom hash method in case the default (FNV-1a) is not
+ suitable
+ ([#837](https://github.com/Shopify/sarama/pull/837),
+ [#841](https://github.com/Shopify/sarama/pull/841)).
+
+Improvements:
+ - Recognize more Kafka error codes
+ ([#859](https://github.com/Shopify/sarama/pull/859)).
+
+Bug Fixes:
+ - Fix an issue where decoding a malformed FetchRequest would not return the
+ correct error ([#818](https://github.com/Shopify/sarama/pull/818)).
+ - Respect ordering of group protocols in JoinGroupRequests. This fix is
+ transparent if you're using the `AddGroupProtocol` or
+ `AddGroupProtocolMetadata` helpers; otherwise you will need to switch from
+ the `GroupProtocols` field (now deprecated) to use `OrderedGroupProtocols`
+ ([#812](https://github.com/Shopify/sarama/issues/812)).
+ - Fix an alignment-related issue with atomics on 32-bit architectures
+ ([#859](https://github.com/Shopify/sarama/pull/859)).
+
+#### Version 1.11.0 (2016-12-20)
+
+_Important:_ As of Sarama 1.11 it is necessary to set the config value of
+`Producer.Return.Successes` to true in order to use the SyncProducer. Previous
+versions would silently override this value when instantiating a SyncProducer
+which led to unexpected values and data races.
+
+New Features:
+ - Metrics! Thanks to Sébastien Launay for all his work on this feature
+ ([#701](https://github.com/Shopify/sarama/pull/701),
+ [#746](https://github.com/Shopify/sarama/pull/746),
+ [#766](https://github.com/Shopify/sarama/pull/766)).
+ - Add support for LZ4 compression
+ ([#786](https://github.com/Shopify/sarama/pull/786)).
+ - Add support for ListOffsetRequest v1 and Kafka 0.10.1
+ ([#775](https://github.com/Shopify/sarama/pull/775)).
+ - Added a `HighWaterMarks` method to the Consumer which aggregates the
+ `HighWaterMarkOffset` values of its child topic/partitions
+ ([#769](https://github.com/Shopify/sarama/pull/769)).
+
+Bug Fixes:
+ - Fixed producing when using timestamps, compression and Kafka 0.10
+ ([#759](https://github.com/Shopify/sarama/pull/759)).
+ - Added missing decoder methods to DescribeGroups response
+ ([#756](https://github.com/Shopify/sarama/pull/756)).
+ - Fix producer shutdown when `Return.Errors` is disabled
+ ([#787](https://github.com/Shopify/sarama/pull/787)).
+ - Don't mutate configuration in SyncProducer
+ ([#790](https://github.com/Shopify/sarama/pull/790)).
+ - Fix crash on SASL initialization failure
+ ([#795](https://github.com/Shopify/sarama/pull/795)).
+
+#### Version 1.10.1 (2016-08-30)
+
+Bug Fixes:
+ - Fix the documentation for `HashPartitioner` which was incorrect
+ ([#717](https://github.com/Shopify/sarama/pull/717)).
+ - Permit client creation even when it is limited by ACLs
+ ([#722](https://github.com/Shopify/sarama/pull/722)).
+ - Several fixes to the consumer timer optimization code, regressions introduced
+ in v1.10.0. Go's timers are finicky
+ ([#730](https://github.com/Shopify/sarama/pull/730),
+ [#733](https://github.com/Shopify/sarama/pull/733),
+ [#734](https://github.com/Shopify/sarama/pull/734)).
+ - Handle consuming compressed relative offsets with Kafka 0.10
+ ([#735](https://github.com/Shopify/sarama/pull/735)).
+
+#### Version 1.10.0 (2016-08-02)
+
+_Important:_ As of Sarama 1.10 it is necessary to tell Sarama the version of
+Kafka you are running against (via the `config.Version` value) in order to use
+features that may not be compatible with old Kafka versions. If you don't
+specify this value it will default to 0.8.2 (the minimum supported), and trying
+to use more recent features (like the offset manager) will fail with an error.
+
+_Also:_ The offset-manager's behaviour has been changed to match the upstream
+java consumer (see [#705](https://github.com/Shopify/sarama/pull/705) and
+[#713](https://github.com/Shopify/sarama/pull/713)). If you use the
+offset-manager, please ensure that you are committing one *greater* than the
+last consumed message offset or else you may end up consuming duplicate
+messages.
+
+New Features:
+ - Support for Kafka 0.10
+ ([#672](https://github.com/Shopify/sarama/pull/672),
+ [#678](https://github.com/Shopify/sarama/pull/678),
+ [#681](https://github.com/Shopify/sarama/pull/681), and others).
+ - Support for configuring the target Kafka version
+ ([#676](https://github.com/Shopify/sarama/pull/676)).
+ - Batch producing support in the SyncProducer
+ ([#677](https://github.com/Shopify/sarama/pull/677)).
+ - Extend producer mock to allow setting expectations on message contents
+ ([#667](https://github.com/Shopify/sarama/pull/667)).
+
+Improvements:
+ - Support `nil` compressed messages for deleting in compacted topics
+ ([#634](https://github.com/Shopify/sarama/pull/634)).
+ - Pre-allocate decoding errors, greatly reducing heap usage and GC time against
+ misbehaving brokers ([#690](https://github.com/Shopify/sarama/pull/690)).
+ - Re-use consumer expiry timers, removing one allocation per consumed message
+ ([#707](https://github.com/Shopify/sarama/pull/707)).
+
+Bug Fixes:
+ - Actually default the client ID to "sarama" like we say we do
+ ([#664](https://github.com/Shopify/sarama/pull/664)).
+ - Fix a rare issue where `Client.Leader` could return the wrong error
+ ([#685](https://github.com/Shopify/sarama/pull/685)).
+ - Fix a possible tight loop in the consumer
+ ([#693](https://github.com/Shopify/sarama/pull/693)).
+ - Match upstream's offset-tracking behaviour
+ ([#705](https://github.com/Shopify/sarama/pull/705)).
+ - Report UnknownTopicOrPartition errors from the offset manager
+ ([#706](https://github.com/Shopify/sarama/pull/706)).
+ - Fix possible negative partition value from the HashPartitioner
+ ([#709](https://github.com/Shopify/sarama/pull/709)).
+
+#### Version 1.9.0 (2016-05-16)
+
+New Features:
+ - Add support for custom offset manager retention durations
+ ([#602](https://github.com/Shopify/sarama/pull/602)).
+ - Publish low-level mocks to enable testing of third-party producer/consumer
+ implementations ([#570](https://github.com/Shopify/sarama/pull/570)).
+ - Declare support for Golang 1.6
+ ([#611](https://github.com/Shopify/sarama/pull/611)).
+ - Support for SASL plain-text auth
+ ([#648](https://github.com/Shopify/sarama/pull/648)).
+
+Improvements:
+ - Simplified broker locking scheme slightly
+ ([#604](https://github.com/Shopify/sarama/pull/604)).
+ - Documentation cleanup
+ ([#605](https://github.com/Shopify/sarama/pull/605),
+ [#621](https://github.com/Shopify/sarama/pull/621),
+ [#654](https://github.com/Shopify/sarama/pull/654)).
+
+Bug Fixes:
+ - Fix race condition shutting down the OffsetManager
+ ([#658](https://github.com/Shopify/sarama/pull/658)).
+
+#### Version 1.8.0 (2016-02-01)
+
+New Features:
+ - Full support for Kafka 0.9:
+ - All protocol messages and fields
+ ([#586](https://github.com/Shopify/sarama/pull/586),
+ [#588](https://github.com/Shopify/sarama/pull/588),
+ [#590](https://github.com/Shopify/sarama/pull/590)).
+ - Verified that TLS support works
+ ([#581](https://github.com/Shopify/sarama/pull/581)).
+ - Fixed the OffsetManager compatibility
+ ([#585](https://github.com/Shopify/sarama/pull/585)).
+
+Improvements:
+ - Optimize for fewer system calls when reading from the network
+ ([#584](https://github.com/Shopify/sarama/pull/584)).
+ - Automatically retry `InvalidMessage` errors to match upstream behaviour
+ ([#589](https://github.com/Shopify/sarama/pull/589)).
+
+#### Version 1.7.0 (2015-12-11)
+
+New Features:
+ - Preliminary support for Kafka 0.9
+ ([#572](https://github.com/Shopify/sarama/pull/572)). This comes with several
+ caveats:
+ - Protocol-layer support is mostly in place
+ ([#577](https://github.com/Shopify/sarama/pull/577)), however Kafka 0.9
+ renamed some messages and fields, which we did not in order to preserve API
+ compatibility.
+ - The producer and consumer work against 0.9, but the offset manager does
+ not ([#573](https://github.com/Shopify/sarama/pull/573)).
+ - TLS support may or may not work
+ ([#581](https://github.com/Shopify/sarama/pull/581)).
+
+Improvements:
+ - Don't wait for request timeouts on dead brokers, greatly speeding recovery
+ when the TCP connection is left hanging
+ ([#548](https://github.com/Shopify/sarama/pull/548)).
+ - Refactored part of the producer. The new version provides a much more elegant
+ solution to [#449](https://github.com/Shopify/sarama/pull/449). It is also
+ slightly more efficient, and much more precise in calculating batch sizes
+ when compression is used
+ ([#549](https://github.com/Shopify/sarama/pull/549),
+ [#550](https://github.com/Shopify/sarama/pull/550),
+ [#551](https://github.com/Shopify/sarama/pull/551)).
+
+Bug Fixes:
+ - Fix race condition in consumer test mock
+ ([#553](https://github.com/Shopify/sarama/pull/553)).
+
+#### Version 1.6.1 (2015-09-25)
+
+Bug Fixes:
+ - Fix panic that could occur if a user-supplied message value failed to encode
+ ([#449](https://github.com/Shopify/sarama/pull/449)).
+
+#### Version 1.6.0 (2015-09-04)
+
+New Features:
+ - Implementation of a consumer offset manager using the APIs introduced in
+ Kafka 0.8.2. The API is designed mainly for integration into a future
+ high-level consumer, not for direct use, although it is *possible* to use it
+ directly.
+ ([#461](https://github.com/Shopify/sarama/pull/461)).
+
+Improvements:
+ - CRC32 calculation is much faster on machines with SSE4.2 instructions,
+ removing a major hotspot from most profiles
+ ([#255](https://github.com/Shopify/sarama/pull/255)).
+
+Bug Fixes:
+ - Make protocol decoding more robust against some malformed packets generated
+ by go-fuzz ([#523](https://github.com/Shopify/sarama/pull/523),
+ [#525](https://github.com/Shopify/sarama/pull/525)) or found in other ways
+ ([#528](https://github.com/Shopify/sarama/pull/528)).
+ - Fix a potential race condition panic in the consumer on shutdown
+ ([#529](https://github.com/Shopify/sarama/pull/529)).
+
+#### Version 1.5.0 (2015-08-17)
+
+New Features:
+ - TLS-encrypted network connections are now supported. This feature is subject
+ to change when Kafka releases built-in TLS support, but for now this is
+ enough to work with TLS-terminating proxies
+ ([#154](https://github.com/Shopify/sarama/pull/154)).
+
+Improvements:
+ - The consumer will not block if a single partition is not drained by the user;
+ all other partitions will continue to consume normally
+ ([#485](https://github.com/Shopify/sarama/pull/485)).
+ - Formatting of error strings has been much improved
+ ([#495](https://github.com/Shopify/sarama/pull/495)).
+ - Internal refactoring of the producer for code cleanliness and to enable
+ future work ([#300](https://github.com/Shopify/sarama/pull/300)).
+
+Bug Fixes:
+ - Fix a potential deadlock in the consumer on shutdown
+ ([#475](https://github.com/Shopify/sarama/pull/475)).
+
+#### Version 1.4.3 (2015-07-21)
+
+Bug Fixes:
+ - Don't include the partitioner in the producer's "fetch partitions"
+ circuit-breaker ([#466](https://github.com/Shopify/sarama/pull/466)).
+ - Don't retry messages until the broker is closed when abandoning a broker in
+ the producer ([#468](https://github.com/Shopify/sarama/pull/468)).
+ - Update the import path for snappy-go, it has moved again and the API has
+ changed slightly ([#486](https://github.com/Shopify/sarama/pull/486)).
+
+#### Version 1.4.2 (2015-05-27)
+
+Bug Fixes:
+ - Update the import path for snappy-go, it has moved from google code to github
+ ([#456](https://github.com/Shopify/sarama/pull/456)).
+
+#### Version 1.4.1 (2015-05-25)
+
+Improvements:
+ - Optimizations when decoding snappy messages, thanks to John Potocny
+ ([#446](https://github.com/Shopify/sarama/pull/446)).
+
+Bug Fixes:
+ - Fix hypothetical race conditions on producer shutdown
+ ([#450](https://github.com/Shopify/sarama/pull/450),
+ [#451](https://github.com/Shopify/sarama/pull/451)).
+
+#### Version 1.4.0 (2015-05-01)
+
+New Features:
+ - The consumer now implements `Topics()` and `Partitions()` methods to enable
+ users to dynamically choose what topics/partitions to consume without
+ instantiating a full client
+ ([#431](https://github.com/Shopify/sarama/pull/431)).
+ - The partition-consumer now exposes the high water mark offset value returned
+ by the broker via the `HighWaterMarkOffset()` method ([#339](https://github.com/Shopify/sarama/pull/339)).
+ - Added a `kafka-console-consumer` tool capable of handling multiple
+ partitions, and deprecated the now-obsolete `kafka-console-partitionConsumer`
+ ([#439](https://github.com/Shopify/sarama/pull/439),
+ [#442](https://github.com/Shopify/sarama/pull/442)).
+
+Improvements:
+ - The producer's logging during retry scenarios is more consistent, more
+ useful, and slightly less verbose
+ ([#429](https://github.com/Shopify/sarama/pull/429)).
+ - The client now shuffles its initial list of seed brokers in order to prevent
+ thundering herd on the first broker in the list
+ ([#441](https://github.com/Shopify/sarama/pull/441)).
+
+Bug Fixes:
+ - The producer now correctly manages its state if retries occur when it is
+ shutting down, fixing several instances of confusing behaviour and at least
+ one potential deadlock ([#419](https://github.com/Shopify/sarama/pull/419)).
+ - The consumer now handles messages for different partitions asynchronously,
+ making it much more resilient to specific user code ordering
+ ([#325](https://github.com/Shopify/sarama/pull/325)).
+
+#### Version 1.3.0 (2015-04-16)
+
+New Features:
+ - The client now tracks consumer group coordinators using
+ ConsumerMetadataRequests similar to how it tracks partition leadership using
+ regular MetadataRequests ([#411](https://github.com/Shopify/sarama/pull/411)).
+ This adds two methods to the client API:
+ - `Coordinator(consumerGroup string) (*Broker, error)`
+ - `RefreshCoordinator(consumerGroup string) error`
+
+Improvements:
+ - ConsumerMetadataResponses now automatically create a Broker object out of the
+ ID/address/port combination for the Coordinator; accessing the fields
+ individually has been deprecated
+ ([#413](https://github.com/Shopify/sarama/pull/413)).
+ - Much improved handling of `OffsetOutOfRange` errors in the consumer.
+ Consumers will fail to start if the provided offset is out of range
+ ([#418](https://github.com/Shopify/sarama/pull/418))
+ and they will automatically shut down if the offset falls out of range
+ ([#424](https://github.com/Shopify/sarama/pull/424)).
+ - Small performance improvement in encoding and decoding protocol messages
+ ([#427](https://github.com/Shopify/sarama/pull/427)).
+
+Bug Fixes:
+ - Fix a rare race condition in the client's background metadata refresher if
+ it happens to be activated while the client is being closed
+ ([#422](https://github.com/Shopify/sarama/pull/422)).
+
+#### Version 1.2.0 (2015-04-07)
+
+Improvements:
+ - The producer's behaviour when `Flush.Frequency` is set is now more intuitive
+ ([#389](https://github.com/Shopify/sarama/pull/389)).
+ - The producer is now somewhat more memory-efficient during and after retrying
+ messages due to an improved queue implementation
+ ([#396](https://github.com/Shopify/sarama/pull/396)).
+ - The consumer produces much more useful logging output when leadership
+ changes ([#385](https://github.com/Shopify/sarama/pull/385)).
+ - The client's `GetOffset` method will now automatically refresh metadata and
+ retry once in the event of stale information or similar
+ ([#394](https://github.com/Shopify/sarama/pull/394)).
+ - Broker connections now have support for using TCP keepalives
+ ([#407](https://github.com/Shopify/sarama/issues/407)).
+
+Bug Fixes:
+ - The OffsetCommitRequest message now correctly implements all three possible
+ API versions ([#390](https://github.com/Shopify/sarama/pull/390),
+ [#400](https://github.com/Shopify/sarama/pull/400)).
+
+#### Version 1.1.0 (2015-03-20)
+
+Improvements:
+ - Wrap the producer's partitioner call in a circuit-breaker so that repeatedly
+ broken topics don't choke throughput
+ ([#373](https://github.com/Shopify/sarama/pull/373)).
+
+Bug Fixes:
+ - Fix the producer's internal reference counting in certain unusual scenarios
+ ([#367](https://github.com/Shopify/sarama/pull/367)).
+ - Fix the consumer's internal reference counting in certain unusual scenarios
+ ([#369](https://github.com/Shopify/sarama/pull/369)).
+ - Fix a condition where the producer's internal control messages could have
+ gotten stuck ([#368](https://github.com/Shopify/sarama/pull/368)).
+ - Fix an issue where invalid partition lists would be cached when asking for
+ metadata for a non-existant topic ([#372](https://github.com/Shopify/sarama/pull/372)).
+
+
+#### Version 1.0.0 (2015-03-17)
+
+Version 1.0.0 is the first tagged version, and is almost a complete rewrite. The primary differences with previous untagged versions are:
+
+- The producer has been rewritten; there is now a `SyncProducer` with a blocking API, and an `AsyncProducer` that is non-blocking.
+- The consumer has been rewritten to only open one connection per broker instead of one connection per partition.
+- The main types of Sarama are now interfaces to make depedency injection easy; mock implementations for `Consumer`, `SyncProducer` and `AsyncProducer` are provided in the `github.com/Shopify/sarama/mocks` package.
+- For most uses cases, it is no longer necessary to open a `Client`; this will be done for you.
+- All the configuration values have been unified in the `Config` struct.
+- Much improved test suite.
diff --git a/vendor/github.com/Shopify/sarama/LICENSE b/vendor/github.com/Shopify/sarama/LICENSE
new file mode 100644
index 0000000..8121b63
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/LICENSE
@@ -0,0 +1,20 @@
+Copyright (c) 2013 Evan Huus
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/Shopify/sarama/Makefile b/vendor/github.com/Shopify/sarama/Makefile
new file mode 100644
index 0000000..58a39e4
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/Makefile
@@ -0,0 +1,29 @@
+default: fmt vet errcheck test
+
+# Taken from https://github.com/codecov/example-go#caveat-multiple-files
+test:
+ echo "" > coverage.txt
+ for d in `go list ./... | grep -v vendor`; do \
+ go test -v -timeout 60s -race -coverprofile=profile.out -covermode=atomic $$d; \
+ if [ -f profile.out ]; then \
+ cat profile.out >> coverage.txt; \
+ rm profile.out; \
+ fi \
+ done
+
+vet:
+ go vet ./...
+
+errcheck:
+ errcheck github.com/Shopify/sarama/...
+
+fmt:
+ @if [ -n "$$(go fmt ./...)" ]; then echo 'Please run go fmt on your code.' && exit 1; fi
+
+install_dependencies: install_errcheck get
+
+install_errcheck:
+ go get github.com/kisielk/errcheck
+
+get:
+ go get -t
diff --git a/vendor/github.com/Shopify/sarama/README.md b/vendor/github.com/Shopify/sarama/README.md
new file mode 100644
index 0000000..25eb0cf
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/README.md
@@ -0,0 +1,39 @@
+sarama
+======
+
+[](https://godoc.org/github.com/Shopify/sarama)
+[](https://travis-ci.org/Shopify/sarama)
+[](https://codecov.io/gh/Shopify/sarama)
+
+Sarama is an MIT-licensed Go client library for [Apache Kafka](https://kafka.apache.org/) version 0.8 (and later).
+
+### Getting started
+
+- API documentation and examples are available via [godoc](https://godoc.org/github.com/Shopify/sarama).
+- Mocks for testing are available in the [mocks](./mocks) subpackage.
+- The [examples](./examples) directory contains more elaborate example applications.
+- The [tools](./tools) directory contains command line tools that can be useful for testing, diagnostics, and instrumentation.
+
+You might also want to look at the [Frequently Asked Questions](https://github.com/Shopify/sarama/wiki/Frequently-Asked-Questions).
+
+### Compatibility and API stability
+
+Sarama provides a "2 releases + 2 months" compatibility guarantee: we support
+the two latest stable releases of Kafka and Go, and we provide a two month
+grace period for older releases. This means we currently officially support
+Go 1.9 through 1.7, and Kafka 1.0 through 0.10, although older releases are
+still likely to work.
+
+Sarama follows semantic versioning and provides API stability via the gopkg.in service.
+You can import a version with a guaranteed stable API via http://gopkg.in/Shopify/sarama.v1.
+A changelog is available [here](CHANGELOG.md).
+
+### Contributing
+
+* Get started by checking our [contribution guidelines](https://github.com/Shopify/sarama/blob/master/.github/CONTRIBUTING.md).
+* Read the [Sarama wiki](https://github.com/Shopify/sarama/wiki) for more
+ technical and design details.
+* The [Kafka Protocol Specification](https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol)
+ contains a wealth of useful information.
+* For more general issues, there is [a google group](https://groups.google.com/forum/#!forum/kafka-clients) for Kafka client developers.
+* If you have any questions, just ask!
diff --git a/vendor/github.com/Shopify/sarama/Vagrantfile b/vendor/github.com/Shopify/sarama/Vagrantfile
new file mode 100644
index 0000000..f4b848a
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/Vagrantfile
@@ -0,0 +1,20 @@
+# -*- mode: ruby -*-
+# vi: set ft=ruby :
+
+# Vagrantfile API/syntax version. Don't touch unless you know what you're doing!
+VAGRANTFILE_API_VERSION = "2"
+
+# We have 5 * 192MB ZK processes and 5 * 320MB Kafka processes => 2560MB
+MEMORY = 3072
+
+Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
+ config.vm.box = "ubuntu/trusty64"
+
+ config.vm.provision :shell, path: "vagrant/provision.sh"
+
+ config.vm.network "private_network", ip: "192.168.100.67"
+
+ config.vm.provider "virtualbox" do |v|
+ v.memory = MEMORY
+ end
+end
diff --git a/vendor/github.com/Shopify/sarama/api_versions_request.go b/vendor/github.com/Shopify/sarama/api_versions_request.go
new file mode 100644
index 0000000..ab65f01
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/api_versions_request.go
@@ -0,0 +1,24 @@
+package sarama
+
+type ApiVersionsRequest struct {
+}
+
+func (r *ApiVersionsRequest) encode(pe packetEncoder) error {
+ return nil
+}
+
+func (r *ApiVersionsRequest) decode(pd packetDecoder, version int16) (err error) {
+ return nil
+}
+
+func (r *ApiVersionsRequest) key() int16 {
+ return 18
+}
+
+func (r *ApiVersionsRequest) version() int16 {
+ return 0
+}
+
+func (r *ApiVersionsRequest) requiredVersion() KafkaVersion {
+ return V0_10_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/api_versions_response.go b/vendor/github.com/Shopify/sarama/api_versions_response.go
new file mode 100644
index 0000000..23bc326
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/api_versions_response.go
@@ -0,0 +1,87 @@
+package sarama
+
+type ApiVersionsResponseBlock struct {
+ ApiKey int16
+ MinVersion int16
+ MaxVersion int16
+}
+
+func (b *ApiVersionsResponseBlock) encode(pe packetEncoder) error {
+ pe.putInt16(b.ApiKey)
+ pe.putInt16(b.MinVersion)
+ pe.putInt16(b.MaxVersion)
+ return nil
+}
+
+func (b *ApiVersionsResponseBlock) decode(pd packetDecoder) error {
+ var err error
+
+ if b.ApiKey, err = pd.getInt16(); err != nil {
+ return err
+ }
+
+ if b.MinVersion, err = pd.getInt16(); err != nil {
+ return err
+ }
+
+ if b.MaxVersion, err = pd.getInt16(); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+type ApiVersionsResponse struct {
+ Err KError
+ ApiVersions []*ApiVersionsResponseBlock
+}
+
+func (r *ApiVersionsResponse) encode(pe packetEncoder) error {
+ pe.putInt16(int16(r.Err))
+ if err := pe.putArrayLength(len(r.ApiVersions)); err != nil {
+ return err
+ }
+ for _, apiVersion := range r.ApiVersions {
+ if err := apiVersion.encode(pe); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (r *ApiVersionsResponse) decode(pd packetDecoder, version int16) error {
+ kerr, err := pd.getInt16()
+ if err != nil {
+ return err
+ }
+
+ r.Err = KError(kerr)
+
+ numBlocks, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ r.ApiVersions = make([]*ApiVersionsResponseBlock, numBlocks)
+ for i := 0; i < numBlocks; i++ {
+ block := new(ApiVersionsResponseBlock)
+ if err := block.decode(pd); err != nil {
+ return err
+ }
+ r.ApiVersions[i] = block
+ }
+
+ return nil
+}
+
+func (r *ApiVersionsResponse) key() int16 {
+ return 18
+}
+
+func (r *ApiVersionsResponse) version() int16 {
+ return 0
+}
+
+func (r *ApiVersionsResponse) requiredVersion() KafkaVersion {
+ return V0_10_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/async_producer.go b/vendor/github.com/Shopify/sarama/async_producer.go
new file mode 100644
index 0000000..1eff81c
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/async_producer.go
@@ -0,0 +1,921 @@
+package sarama
+
+import (
+ "encoding/binary"
+ "fmt"
+ "sync"
+ "time"
+
+ "github.com/eapache/go-resiliency/breaker"
+ "github.com/eapache/queue"
+)
+
+// AsyncProducer publishes Kafka messages using a non-blocking API. It routes messages
+// to the correct broker for the provided topic-partition, refreshing metadata as appropriate,
+// and parses responses for errors. You must read from the Errors() channel or the
+// producer will deadlock. You must call Close() or AsyncClose() on a producer to avoid
+// leaks: it will not be garbage-collected automatically when it passes out of
+// scope.
+type AsyncProducer interface {
+
+ // AsyncClose triggers a shutdown of the producer. The shutdown has completed
+ // when both the Errors and Successes channels have been closed. When calling
+ // AsyncClose, you *must* continue to read from those channels in order to
+ // drain the results of any messages in flight.
+ AsyncClose()
+
+ // Close shuts down the producer and waits for any buffered messages to be
+ // flushed. You must call this function before a producer object passes out of
+ // scope, as it may otherwise leak memory. You must call this before calling
+ // Close on the underlying client.
+ Close() error
+
+ // Input is the input channel for the user to write messages to that they
+ // wish to send.
+ Input() chan<- *ProducerMessage
+
+ // Successes is the success output channel back to the user when Return.Successes is
+ // enabled. If Return.Successes is true, you MUST read from this channel or the
+ // Producer will deadlock. It is suggested that you send and read messages
+ // together in a single select statement.
+ Successes() <-chan *ProducerMessage
+
+ // Errors is the error output channel back to the user. You MUST read from this
+ // channel or the Producer will deadlock when the channel is full. Alternatively,
+ // you can set Producer.Return.Errors in your config to false, which prevents
+ // errors to be returned.
+ Errors() <-chan *ProducerError
+}
+
+type asyncProducer struct {
+ client Client
+ conf *Config
+ ownClient bool
+
+ errors chan *ProducerError
+ input, successes, retries chan *ProducerMessage
+ inFlight sync.WaitGroup
+
+ brokers map[*Broker]chan<- *ProducerMessage
+ brokerRefs map[chan<- *ProducerMessage]int
+ brokerLock sync.Mutex
+}
+
+// NewAsyncProducer creates a new AsyncProducer using the given broker addresses and configuration.
+func NewAsyncProducer(addrs []string, conf *Config) (AsyncProducer, error) {
+ client, err := NewClient(addrs, conf)
+ if err != nil {
+ return nil, err
+ }
+
+ p, err := NewAsyncProducerFromClient(client)
+ if err != nil {
+ return nil, err
+ }
+ p.(*asyncProducer).ownClient = true
+ return p, nil
+}
+
+// NewAsyncProducerFromClient creates a new Producer using the given client. It is still
+// necessary to call Close() on the underlying client when shutting down this producer.
+func NewAsyncProducerFromClient(client Client) (AsyncProducer, error) {
+ // Check that we are not dealing with a closed Client before processing any other arguments
+ if client.Closed() {
+ return nil, ErrClosedClient
+ }
+
+ p := &asyncProducer{
+ client: client,
+ conf: client.Config(),
+ errors: make(chan *ProducerError),
+ input: make(chan *ProducerMessage),
+ successes: make(chan *ProducerMessage),
+ retries: make(chan *ProducerMessage),
+ brokers: make(map[*Broker]chan<- *ProducerMessage),
+ brokerRefs: make(map[chan<- *ProducerMessage]int),
+ }
+
+ // launch our singleton dispatchers
+ go withRecover(p.dispatcher)
+ go withRecover(p.retryHandler)
+
+ return p, nil
+}
+
+type flagSet int8
+
+const (
+ syn flagSet = 1 << iota // first message from partitionProducer to brokerProducer
+ fin // final message from partitionProducer to brokerProducer and back
+ shutdown // start the shutdown process
+)
+
+// ProducerMessage is the collection of elements passed to the Producer in order to send a message.
+type ProducerMessage struct {
+ Topic string // The Kafka topic for this message.
+ // The partitioning key for this message. Pre-existing Encoders include
+ // StringEncoder and ByteEncoder.
+ Key Encoder
+ // The actual message to store in Kafka. Pre-existing Encoders include
+ // StringEncoder and ByteEncoder.
+ Value Encoder
+
+ // The headers are key-value pairs that are transparently passed
+ // by Kafka between producers and consumers.
+ Headers []RecordHeader
+
+ // This field is used to hold arbitrary data you wish to include so it
+ // will be available when receiving on the Successes and Errors channels.
+ // Sarama completely ignores this field and is only to be used for
+ // pass-through data.
+ Metadata interface{}
+
+ // Below this point are filled in by the producer as the message is processed
+
+ // Offset is the offset of the message stored on the broker. This is only
+ // guaranteed to be defined if the message was successfully delivered and
+ // RequiredAcks is not NoResponse.
+ Offset int64
+ // Partition is the partition that the message was sent to. This is only
+ // guaranteed to be defined if the message was successfully delivered.
+ Partition int32
+ // Timestamp is the timestamp assigned to the message by the broker. This
+ // is only guaranteed to be defined if the message was successfully
+ // delivered, RequiredAcks is not NoResponse, and the Kafka broker is at
+ // least version 0.10.0.
+ Timestamp time.Time
+
+ retries int
+ flags flagSet
+}
+
+const producerMessageOverhead = 26 // the metadata overhead of CRC, flags, etc.
+
+func (m *ProducerMessage) byteSize(version int) int {
+ var size int
+ if version >= 2 {
+ size = maximumRecordOverhead
+ for _, h := range m.Headers {
+ size += len(h.Key) + len(h.Value) + 2*binary.MaxVarintLen32
+ }
+ } else {
+ size = producerMessageOverhead
+ }
+ if m.Key != nil {
+ size += m.Key.Length()
+ }
+ if m.Value != nil {
+ size += m.Value.Length()
+ }
+ return size
+}
+
+func (m *ProducerMessage) clear() {
+ m.flags = 0
+ m.retries = 0
+}
+
+// ProducerError is the type of error generated when the producer fails to deliver a message.
+// It contains the original ProducerMessage as well as the actual error value.
+type ProducerError struct {
+ Msg *ProducerMessage
+ Err error
+}
+
+func (pe ProducerError) Error() string {
+ return fmt.Sprintf("kafka: Failed to produce message to topic %s: %s", pe.Msg.Topic, pe.Err)
+}
+
+// ProducerErrors is a type that wraps a batch of "ProducerError"s and implements the Error interface.
+// It can be returned from the Producer's Close method to avoid the need to manually drain the Errors channel
+// when closing a producer.
+type ProducerErrors []*ProducerError
+
+func (pe ProducerErrors) Error() string {
+ return fmt.Sprintf("kafka: Failed to deliver %d messages.", len(pe))
+}
+
+func (p *asyncProducer) Errors() <-chan *ProducerError {
+ return p.errors
+}
+
+func (p *asyncProducer) Successes() <-chan *ProducerMessage {
+ return p.successes
+}
+
+func (p *asyncProducer) Input() chan<- *ProducerMessage {
+ return p.input
+}
+
+func (p *asyncProducer) Close() error {
+ p.AsyncClose()
+
+ if p.conf.Producer.Return.Successes {
+ go withRecover(func() {
+ for range p.successes {
+ }
+ })
+ }
+
+ var errors ProducerErrors
+ if p.conf.Producer.Return.Errors {
+ for event := range p.errors {
+ errors = append(errors, event)
+ }
+ } else {
+ <-p.errors
+ }
+
+ if len(errors) > 0 {
+ return errors
+ }
+ return nil
+}
+
+func (p *asyncProducer) AsyncClose() {
+ go withRecover(p.shutdown)
+}
+
+// singleton
+// dispatches messages by topic
+func (p *asyncProducer) dispatcher() {
+ handlers := make(map[string]chan<- *ProducerMessage)
+ shuttingDown := false
+
+ for msg := range p.input {
+ if msg == nil {
+ Logger.Println("Something tried to send a nil message, it was ignored.")
+ continue
+ }
+
+ if msg.flags&shutdown != 0 {
+ shuttingDown = true
+ p.inFlight.Done()
+ continue
+ } else if msg.retries == 0 {
+ if shuttingDown {
+ // we can't just call returnError here because that decrements the wait group,
+ // which hasn't been incremented yet for this message, and shouldn't be
+ pErr := &ProducerError{Msg: msg, Err: ErrShuttingDown}
+ if p.conf.Producer.Return.Errors {
+ p.errors <- pErr
+ } else {
+ Logger.Println(pErr)
+ }
+ continue
+ }
+ p.inFlight.Add(1)
+ }
+
+ version := 1
+ if p.conf.Version.IsAtLeast(V0_11_0_0) {
+ version = 2
+ }
+ if msg.byteSize(version) > p.conf.Producer.MaxMessageBytes {
+ p.returnError(msg, ErrMessageSizeTooLarge)
+ continue
+ }
+
+ handler := handlers[msg.Topic]
+ if handler == nil {
+ handler = p.newTopicProducer(msg.Topic)
+ handlers[msg.Topic] = handler
+ }
+
+ handler <- msg
+ }
+
+ for _, handler := range handlers {
+ close(handler)
+ }
+}
+
+// one per topic
+// partitions messages, then dispatches them by partition
+type topicProducer struct {
+ parent *asyncProducer
+ topic string
+ input <-chan *ProducerMessage
+
+ breaker *breaker.Breaker
+ handlers map[int32]chan<- *ProducerMessage
+ partitioner Partitioner
+}
+
+func (p *asyncProducer) newTopicProducer(topic string) chan<- *ProducerMessage {
+ input := make(chan *ProducerMessage, p.conf.ChannelBufferSize)
+ tp := &topicProducer{
+ parent: p,
+ topic: topic,
+ input: input,
+ breaker: breaker.New(3, 1, 10*time.Second),
+ handlers: make(map[int32]chan<- *ProducerMessage),
+ partitioner: p.conf.Producer.Partitioner(topic),
+ }
+ go withRecover(tp.dispatch)
+ return input
+}
+
+func (tp *topicProducer) dispatch() {
+ for msg := range tp.input {
+ if msg.retries == 0 {
+ if err := tp.partitionMessage(msg); err != nil {
+ tp.parent.returnError(msg, err)
+ continue
+ }
+ }
+
+ handler := tp.handlers[msg.Partition]
+ if handler == nil {
+ handler = tp.parent.newPartitionProducer(msg.Topic, msg.Partition)
+ tp.handlers[msg.Partition] = handler
+ }
+
+ handler <- msg
+ }
+
+ for _, handler := range tp.handlers {
+ close(handler)
+ }
+}
+
+func (tp *topicProducer) partitionMessage(msg *ProducerMessage) error {
+ var partitions []int32
+
+ err := tp.breaker.Run(func() (err error) {
+ if tp.partitioner.RequiresConsistency() {
+ partitions, err = tp.parent.client.Partitions(msg.Topic)
+ } else {
+ partitions, err = tp.parent.client.WritablePartitions(msg.Topic)
+ }
+ return
+ })
+
+ if err != nil {
+ return err
+ }
+
+ numPartitions := int32(len(partitions))
+
+ if numPartitions == 0 {
+ return ErrLeaderNotAvailable
+ }
+
+ choice, err := tp.partitioner.Partition(msg, numPartitions)
+
+ if err != nil {
+ return err
+ } else if choice < 0 || choice >= numPartitions {
+ return ErrInvalidPartition
+ }
+
+ msg.Partition = partitions[choice]
+
+ return nil
+}
+
+// one per partition per topic
+// dispatches messages to the appropriate broker
+// also responsible for maintaining message order during retries
+type partitionProducer struct {
+ parent *asyncProducer
+ topic string
+ partition int32
+ input <-chan *ProducerMessage
+
+ leader *Broker
+ breaker *breaker.Breaker
+ output chan<- *ProducerMessage
+
+ // highWatermark tracks the "current" retry level, which is the only one where we actually let messages through,
+ // all other messages get buffered in retryState[msg.retries].buf to preserve ordering
+ // retryState[msg.retries].expectChaser simply tracks whether we've seen a fin message for a given level (and
+ // therefore whether our buffer is complete and safe to flush)
+ highWatermark int
+ retryState []partitionRetryState
+}
+
+type partitionRetryState struct {
+ buf []*ProducerMessage
+ expectChaser bool
+}
+
+func (p *asyncProducer) newPartitionProducer(topic string, partition int32) chan<- *ProducerMessage {
+ input := make(chan *ProducerMessage, p.conf.ChannelBufferSize)
+ pp := &partitionProducer{
+ parent: p,
+ topic: topic,
+ partition: partition,
+ input: input,
+
+ breaker: breaker.New(3, 1, 10*time.Second),
+ retryState: make([]partitionRetryState, p.conf.Producer.Retry.Max+1),
+ }
+ go withRecover(pp.dispatch)
+ return input
+}
+
+func (pp *partitionProducer) dispatch() {
+ // try to prefetch the leader; if this doesn't work, we'll do a proper call to `updateLeader`
+ // on the first message
+ pp.leader, _ = pp.parent.client.Leader(pp.topic, pp.partition)
+ if pp.leader != nil {
+ pp.output = pp.parent.getBrokerProducer(pp.leader)
+ pp.parent.inFlight.Add(1) // we're generating a syn message; track it so we don't shut down while it's still inflight
+ pp.output <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: syn}
+ }
+
+ for msg := range pp.input {
+ if msg.retries > pp.highWatermark {
+ // a new, higher, retry level; handle it and then back off
+ pp.newHighWatermark(msg.retries)
+ time.Sleep(pp.parent.conf.Producer.Retry.Backoff)
+ } else if pp.highWatermark > 0 {
+ // we are retrying something (else highWatermark would be 0) but this message is not a *new* retry level
+ if msg.retries < pp.highWatermark {
+ // in fact this message is not even the current retry level, so buffer it for now (unless it's a just a fin)
+ if msg.flags&fin == fin {
+ pp.retryState[msg.retries].expectChaser = false
+ pp.parent.inFlight.Done() // this fin is now handled and will be garbage collected
+ } else {
+ pp.retryState[msg.retries].buf = append(pp.retryState[msg.retries].buf, msg)
+ }
+ continue
+ } else if msg.flags&fin == fin {
+ // this message is of the current retry level (msg.retries == highWatermark) and the fin flag is set,
+ // meaning this retry level is done and we can go down (at least) one level and flush that
+ pp.retryState[pp.highWatermark].expectChaser = false
+ pp.flushRetryBuffers()
+ pp.parent.inFlight.Done() // this fin is now handled and will be garbage collected
+ continue
+ }
+ }
+
+ // if we made it this far then the current msg contains real data, and can be sent to the next goroutine
+ // without breaking any of our ordering guarantees
+
+ if pp.output == nil {
+ if err := pp.updateLeader(); err != nil {
+ pp.parent.returnError(msg, err)
+ time.Sleep(pp.parent.conf.Producer.Retry.Backoff)
+ continue
+ }
+ Logger.Printf("producer/leader/%s/%d selected broker %d\n", pp.topic, pp.partition, pp.leader.ID())
+ }
+
+ pp.output <- msg
+ }
+
+ if pp.output != nil {
+ pp.parent.unrefBrokerProducer(pp.leader, pp.output)
+ }
+}
+
+func (pp *partitionProducer) newHighWatermark(hwm int) {
+ Logger.Printf("producer/leader/%s/%d state change to [retrying-%d]\n", pp.topic, pp.partition, hwm)
+ pp.highWatermark = hwm
+
+ // send off a fin so that we know when everything "in between" has made it
+ // back to us and we can safely flush the backlog (otherwise we risk re-ordering messages)
+ pp.retryState[pp.highWatermark].expectChaser = true
+ pp.parent.inFlight.Add(1) // we're generating a fin message; track it so we don't shut down while it's still inflight
+ pp.output <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: fin, retries: pp.highWatermark - 1}
+
+ // a new HWM means that our current broker selection is out of date
+ Logger.Printf("producer/leader/%s/%d abandoning broker %d\n", pp.topic, pp.partition, pp.leader.ID())
+ pp.parent.unrefBrokerProducer(pp.leader, pp.output)
+ pp.output = nil
+}
+
+func (pp *partitionProducer) flushRetryBuffers() {
+ Logger.Printf("producer/leader/%s/%d state change to [flushing-%d]\n", pp.topic, pp.partition, pp.highWatermark)
+ for {
+ pp.highWatermark--
+
+ if pp.output == nil {
+ if err := pp.updateLeader(); err != nil {
+ pp.parent.returnErrors(pp.retryState[pp.highWatermark].buf, err)
+ goto flushDone
+ }
+ Logger.Printf("producer/leader/%s/%d selected broker %d\n", pp.topic, pp.partition, pp.leader.ID())
+ }
+
+ for _, msg := range pp.retryState[pp.highWatermark].buf {
+ pp.output <- msg
+ }
+
+ flushDone:
+ pp.retryState[pp.highWatermark].buf = nil
+ if pp.retryState[pp.highWatermark].expectChaser {
+ Logger.Printf("producer/leader/%s/%d state change to [retrying-%d]\n", pp.topic, pp.partition, pp.highWatermark)
+ break
+ } else if pp.highWatermark == 0 {
+ Logger.Printf("producer/leader/%s/%d state change to [normal]\n", pp.topic, pp.partition)
+ break
+ }
+ }
+}
+
+func (pp *partitionProducer) updateLeader() error {
+ return pp.breaker.Run(func() (err error) {
+ if err = pp.parent.client.RefreshMetadata(pp.topic); err != nil {
+ return err
+ }
+
+ if pp.leader, err = pp.parent.client.Leader(pp.topic, pp.partition); err != nil {
+ return err
+ }
+
+ pp.output = pp.parent.getBrokerProducer(pp.leader)
+ pp.parent.inFlight.Add(1) // we're generating a syn message; track it so we don't shut down while it's still inflight
+ pp.output <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: syn}
+
+ return nil
+ })
+}
+
+// one per broker; also constructs an associated flusher
+func (p *asyncProducer) newBrokerProducer(broker *Broker) chan<- *ProducerMessage {
+ var (
+ input = make(chan *ProducerMessage)
+ bridge = make(chan *produceSet)
+ responses = make(chan *brokerProducerResponse)
+ )
+
+ bp := &brokerProducer{
+ parent: p,
+ broker: broker,
+ input: input,
+ output: bridge,
+ responses: responses,
+ buffer: newProduceSet(p),
+ currentRetries: make(map[string]map[int32]error),
+ }
+ go withRecover(bp.run)
+
+ // minimal bridge to make the network response `select`able
+ go withRecover(func() {
+ for set := range bridge {
+ request := set.buildRequest()
+
+ response, err := broker.Produce(request)
+
+ responses <- &brokerProducerResponse{
+ set: set,
+ err: err,
+ res: response,
+ }
+ }
+ close(responses)
+ })
+
+ return input
+}
+
+type brokerProducerResponse struct {
+ set *produceSet
+ err error
+ res *ProduceResponse
+}
+
+// groups messages together into appropriately-sized batches for sending to the broker
+// handles state related to retries etc
+type brokerProducer struct {
+ parent *asyncProducer
+ broker *Broker
+
+ input <-chan *ProducerMessage
+ output chan<- *produceSet
+ responses <-chan *brokerProducerResponse
+
+ buffer *produceSet
+ timer <-chan time.Time
+ timerFired bool
+
+ closing error
+ currentRetries map[string]map[int32]error
+}
+
+func (bp *brokerProducer) run() {
+ var output chan<- *produceSet
+ Logger.Printf("producer/broker/%d starting up\n", bp.broker.ID())
+
+ for {
+ select {
+ case msg := <-bp.input:
+ if msg == nil {
+ bp.shutdown()
+ return
+ }
+
+ if msg.flags&syn == syn {
+ Logger.Printf("producer/broker/%d state change to [open] on %s/%d\n",
+ bp.broker.ID(), msg.Topic, msg.Partition)
+ if bp.currentRetries[msg.Topic] == nil {
+ bp.currentRetries[msg.Topic] = make(map[int32]error)
+ }
+ bp.currentRetries[msg.Topic][msg.Partition] = nil
+ bp.parent.inFlight.Done()
+ continue
+ }
+
+ if reason := bp.needsRetry(msg); reason != nil {
+ bp.parent.retryMessage(msg, reason)
+
+ if bp.closing == nil && msg.flags&fin == fin {
+ // we were retrying this partition but we can start processing again
+ delete(bp.currentRetries[msg.Topic], msg.Partition)
+ Logger.Printf("producer/broker/%d state change to [closed] on %s/%d\n",
+ bp.broker.ID(), msg.Topic, msg.Partition)
+ }
+
+ continue
+ }
+
+ if bp.buffer.wouldOverflow(msg) {
+ if err := bp.waitForSpace(msg); err != nil {
+ bp.parent.retryMessage(msg, err)
+ continue
+ }
+ }
+
+ if err := bp.buffer.add(msg); err != nil {
+ bp.parent.returnError(msg, err)
+ continue
+ }
+
+ if bp.parent.conf.Producer.Flush.Frequency > 0 && bp.timer == nil {
+ bp.timer = time.After(bp.parent.conf.Producer.Flush.Frequency)
+ }
+ case <-bp.timer:
+ bp.timerFired = true
+ case output <- bp.buffer:
+ bp.rollOver()
+ case response := <-bp.responses:
+ bp.handleResponse(response)
+ }
+
+ if bp.timerFired || bp.buffer.readyToFlush() {
+ output = bp.output
+ } else {
+ output = nil
+ }
+ }
+}
+
+func (bp *brokerProducer) shutdown() {
+ for !bp.buffer.empty() {
+ select {
+ case response := <-bp.responses:
+ bp.handleResponse(response)
+ case bp.output <- bp.buffer:
+ bp.rollOver()
+ }
+ }
+ close(bp.output)
+ for response := range bp.responses {
+ bp.handleResponse(response)
+ }
+
+ Logger.Printf("producer/broker/%d shut down\n", bp.broker.ID())
+}
+
+func (bp *brokerProducer) needsRetry(msg *ProducerMessage) error {
+ if bp.closing != nil {
+ return bp.closing
+ }
+
+ return bp.currentRetries[msg.Topic][msg.Partition]
+}
+
+func (bp *brokerProducer) waitForSpace(msg *ProducerMessage) error {
+ Logger.Printf("producer/broker/%d maximum request accumulated, waiting for space\n", bp.broker.ID())
+
+ for {
+ select {
+ case response := <-bp.responses:
+ bp.handleResponse(response)
+ // handling a response can change our state, so re-check some things
+ if reason := bp.needsRetry(msg); reason != nil {
+ return reason
+ } else if !bp.buffer.wouldOverflow(msg) {
+ return nil
+ }
+ case bp.output <- bp.buffer:
+ bp.rollOver()
+ return nil
+ }
+ }
+}
+
+func (bp *brokerProducer) rollOver() {
+ bp.timer = nil
+ bp.timerFired = false
+ bp.buffer = newProduceSet(bp.parent)
+}
+
+func (bp *brokerProducer) handleResponse(response *brokerProducerResponse) {
+ if response.err != nil {
+ bp.handleError(response.set, response.err)
+ } else {
+ bp.handleSuccess(response.set, response.res)
+ }
+
+ if bp.buffer.empty() {
+ bp.rollOver() // this can happen if the response invalidated our buffer
+ }
+}
+
+func (bp *brokerProducer) handleSuccess(sent *produceSet, response *ProduceResponse) {
+ // we iterate through the blocks in the request set, not the response, so that we notice
+ // if the response is missing a block completely
+ sent.eachPartition(func(topic string, partition int32, msgs []*ProducerMessage) {
+ if response == nil {
+ // this only happens when RequiredAcks is NoResponse, so we have to assume success
+ bp.parent.returnSuccesses(msgs)
+ return
+ }
+
+ block := response.GetBlock(topic, partition)
+ if block == nil {
+ bp.parent.returnErrors(msgs, ErrIncompleteResponse)
+ return
+ }
+
+ switch block.Err {
+ // Success
+ case ErrNoError:
+ if bp.parent.conf.Version.IsAtLeast(V0_10_0_0) && !block.Timestamp.IsZero() {
+ for _, msg := range msgs {
+ msg.Timestamp = block.Timestamp
+ }
+ }
+ for i, msg := range msgs {
+ msg.Offset = block.Offset + int64(i)
+ }
+ bp.parent.returnSuccesses(msgs)
+ // Retriable errors
+ case ErrInvalidMessage, ErrUnknownTopicOrPartition, ErrLeaderNotAvailable, ErrNotLeaderForPartition,
+ ErrRequestTimedOut, ErrNotEnoughReplicas, ErrNotEnoughReplicasAfterAppend:
+ Logger.Printf("producer/broker/%d state change to [retrying] on %s/%d because %v\n",
+ bp.broker.ID(), topic, partition, block.Err)
+ bp.currentRetries[topic][partition] = block.Err
+ bp.parent.retryMessages(msgs, block.Err)
+ bp.parent.retryMessages(bp.buffer.dropPartition(topic, partition), block.Err)
+ // Other non-retriable errors
+ default:
+ bp.parent.returnErrors(msgs, block.Err)
+ }
+ })
+}
+
+func (bp *brokerProducer) handleError(sent *produceSet, err error) {
+ switch err.(type) {
+ case PacketEncodingError:
+ sent.eachPartition(func(topic string, partition int32, msgs []*ProducerMessage) {
+ bp.parent.returnErrors(msgs, err)
+ })
+ default:
+ Logger.Printf("producer/broker/%d state change to [closing] because %s\n", bp.broker.ID(), err)
+ bp.parent.abandonBrokerConnection(bp.broker)
+ _ = bp.broker.Close()
+ bp.closing = err
+ sent.eachPartition(func(topic string, partition int32, msgs []*ProducerMessage) {
+ bp.parent.retryMessages(msgs, err)
+ })
+ bp.buffer.eachPartition(func(topic string, partition int32, msgs []*ProducerMessage) {
+ bp.parent.retryMessages(msgs, err)
+ })
+ bp.rollOver()
+ }
+}
+
+// singleton
+// effectively a "bridge" between the flushers and the dispatcher in order to avoid deadlock
+// based on https://godoc.org/github.com/eapache/channels#InfiniteChannel
+func (p *asyncProducer) retryHandler() {
+ var msg *ProducerMessage
+ buf := queue.New()
+
+ for {
+ if buf.Length() == 0 {
+ msg = <-p.retries
+ } else {
+ select {
+ case msg = <-p.retries:
+ case p.input <- buf.Peek().(*ProducerMessage):
+ buf.Remove()
+ continue
+ }
+ }
+
+ if msg == nil {
+ return
+ }
+
+ buf.Add(msg)
+ }
+}
+
+// utility functions
+
+func (p *asyncProducer) shutdown() {
+ Logger.Println("Producer shutting down.")
+ p.inFlight.Add(1)
+ p.input <- &ProducerMessage{flags: shutdown}
+
+ p.inFlight.Wait()
+
+ if p.ownClient {
+ err := p.client.Close()
+ if err != nil {
+ Logger.Println("producer/shutdown failed to close the embedded client:", err)
+ }
+ }
+
+ close(p.input)
+ close(p.retries)
+ close(p.errors)
+ close(p.successes)
+}
+
+func (p *asyncProducer) returnError(msg *ProducerMessage, err error) {
+ msg.clear()
+ pErr := &ProducerError{Msg: msg, Err: err}
+ if p.conf.Producer.Return.Errors {
+ p.errors <- pErr
+ } else {
+ Logger.Println(pErr)
+ }
+ p.inFlight.Done()
+}
+
+func (p *asyncProducer) returnErrors(batch []*ProducerMessage, err error) {
+ for _, msg := range batch {
+ p.returnError(msg, err)
+ }
+}
+
+func (p *asyncProducer) returnSuccesses(batch []*ProducerMessage) {
+ for _, msg := range batch {
+ if p.conf.Producer.Return.Successes {
+ msg.clear()
+ p.successes <- msg
+ }
+ p.inFlight.Done()
+ }
+}
+
+func (p *asyncProducer) retryMessage(msg *ProducerMessage, err error) {
+ if msg.retries >= p.conf.Producer.Retry.Max {
+ p.returnError(msg, err)
+ } else {
+ msg.retries++
+ p.retries <- msg
+ }
+}
+
+func (p *asyncProducer) retryMessages(batch []*ProducerMessage, err error) {
+ for _, msg := range batch {
+ p.retryMessage(msg, err)
+ }
+}
+
+func (p *asyncProducer) getBrokerProducer(broker *Broker) chan<- *ProducerMessage {
+ p.brokerLock.Lock()
+ defer p.brokerLock.Unlock()
+
+ bp := p.brokers[broker]
+
+ if bp == nil {
+ bp = p.newBrokerProducer(broker)
+ p.brokers[broker] = bp
+ p.brokerRefs[bp] = 0
+ }
+
+ p.brokerRefs[bp]++
+
+ return bp
+}
+
+func (p *asyncProducer) unrefBrokerProducer(broker *Broker, bp chan<- *ProducerMessage) {
+ p.brokerLock.Lock()
+ defer p.brokerLock.Unlock()
+
+ p.brokerRefs[bp]--
+ if p.brokerRefs[bp] == 0 {
+ close(bp)
+ delete(p.brokerRefs, bp)
+
+ if p.brokers[broker] == bp {
+ delete(p.brokers, broker)
+ }
+ }
+}
+
+func (p *asyncProducer) abandonBrokerConnection(broker *Broker) {
+ p.brokerLock.Lock()
+ defer p.brokerLock.Unlock()
+
+ delete(p.brokers, broker)
+}
diff --git a/vendor/github.com/Shopify/sarama/broker.go b/vendor/github.com/Shopify/sarama/broker.go
new file mode 100644
index 0000000..923b07f
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/broker.go
@@ -0,0 +1,692 @@
+package sarama
+
+import (
+ "crypto/tls"
+ "encoding/binary"
+ "fmt"
+ "io"
+ "net"
+ "strconv"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/rcrowley/go-metrics"
+)
+
+// Broker represents a single Kafka broker connection. All operations on this object are entirely concurrency-safe.
+type Broker struct {
+ id int32
+ addr string
+
+ conf *Config
+ correlationID int32
+ conn net.Conn
+ connErr error
+ lock sync.Mutex
+ opened int32
+
+ responses chan responsePromise
+ done chan bool
+
+ incomingByteRate metrics.Meter
+ requestRate metrics.Meter
+ requestSize metrics.Histogram
+ requestLatency metrics.Histogram
+ outgoingByteRate metrics.Meter
+ responseRate metrics.Meter
+ responseSize metrics.Histogram
+ brokerIncomingByteRate metrics.Meter
+ brokerRequestRate metrics.Meter
+ brokerRequestSize metrics.Histogram
+ brokerRequestLatency metrics.Histogram
+ brokerOutgoingByteRate metrics.Meter
+ brokerResponseRate metrics.Meter
+ brokerResponseSize metrics.Histogram
+}
+
+type responsePromise struct {
+ requestTime time.Time
+ correlationID int32
+ packets chan []byte
+ errors chan error
+}
+
+// NewBroker creates and returns a Broker targeting the given host:port address.
+// This does not attempt to actually connect, you have to call Open() for that.
+func NewBroker(addr string) *Broker {
+ return &Broker{id: -1, addr: addr}
+}
+
+// Open tries to connect to the Broker if it is not already connected or connecting, but does not block
+// waiting for the connection to complete. This means that any subsequent operations on the broker will
+// block waiting for the connection to succeed or fail. To get the effect of a fully synchronous Open call,
+// follow it by a call to Connected(). The only errors Open will return directly are ConfigurationError or
+// AlreadyConnected. If conf is nil, the result of NewConfig() is used.
+func (b *Broker) Open(conf *Config) error {
+ if !atomic.CompareAndSwapInt32(&b.opened, 0, 1) {
+ return ErrAlreadyConnected
+ }
+
+ if conf == nil {
+ conf = NewConfig()
+ }
+
+ err := conf.Validate()
+ if err != nil {
+ return err
+ }
+
+ b.lock.Lock()
+
+ go withRecover(func() {
+ defer b.lock.Unlock()
+
+ dialer := net.Dialer{
+ Timeout: conf.Net.DialTimeout,
+ KeepAlive: conf.Net.KeepAlive,
+ }
+
+ if conf.Net.TLS.Enable {
+ b.conn, b.connErr = tls.DialWithDialer(&dialer, "tcp", b.addr, conf.Net.TLS.Config)
+ } else {
+ b.conn, b.connErr = dialer.Dial("tcp", b.addr)
+ }
+ if b.connErr != nil {
+ Logger.Printf("Failed to connect to broker %s: %s\n", b.addr, b.connErr)
+ b.conn = nil
+ atomic.StoreInt32(&b.opened, 0)
+ return
+ }
+ b.conn = newBufConn(b.conn)
+
+ b.conf = conf
+
+ // Create or reuse the global metrics shared between brokers
+ b.incomingByteRate = metrics.GetOrRegisterMeter("incoming-byte-rate", conf.MetricRegistry)
+ b.requestRate = metrics.GetOrRegisterMeter("request-rate", conf.MetricRegistry)
+ b.requestSize = getOrRegisterHistogram("request-size", conf.MetricRegistry)
+ b.requestLatency = getOrRegisterHistogram("request-latency-in-ms", conf.MetricRegistry)
+ b.outgoingByteRate = metrics.GetOrRegisterMeter("outgoing-byte-rate", conf.MetricRegistry)
+ b.responseRate = metrics.GetOrRegisterMeter("response-rate", conf.MetricRegistry)
+ b.responseSize = getOrRegisterHistogram("response-size", conf.MetricRegistry)
+ // Do not gather metrics for seeded broker (only used during bootstrap) because they share
+ // the same id (-1) and are already exposed through the global metrics above
+ if b.id >= 0 {
+ b.brokerIncomingByteRate = getOrRegisterBrokerMeter("incoming-byte-rate", b, conf.MetricRegistry)
+ b.brokerRequestRate = getOrRegisterBrokerMeter("request-rate", b, conf.MetricRegistry)
+ b.brokerRequestSize = getOrRegisterBrokerHistogram("request-size", b, conf.MetricRegistry)
+ b.brokerRequestLatency = getOrRegisterBrokerHistogram("request-latency-in-ms", b, conf.MetricRegistry)
+ b.brokerOutgoingByteRate = getOrRegisterBrokerMeter("outgoing-byte-rate", b, conf.MetricRegistry)
+ b.brokerResponseRate = getOrRegisterBrokerMeter("response-rate", b, conf.MetricRegistry)
+ b.brokerResponseSize = getOrRegisterBrokerHistogram("response-size", b, conf.MetricRegistry)
+ }
+
+ if conf.Net.SASL.Enable {
+ b.connErr = b.sendAndReceiveSASLPlainAuth()
+ if b.connErr != nil {
+ err = b.conn.Close()
+ if err == nil {
+ Logger.Printf("Closed connection to broker %s\n", b.addr)
+ } else {
+ Logger.Printf("Error while closing connection to broker %s: %s\n", b.addr, err)
+ }
+ b.conn = nil
+ atomic.StoreInt32(&b.opened, 0)
+ return
+ }
+ }
+
+ b.done = make(chan bool)
+ b.responses = make(chan responsePromise, b.conf.Net.MaxOpenRequests-1)
+
+ if b.id >= 0 {
+ Logger.Printf("Connected to broker at %s (registered as #%d)\n", b.addr, b.id)
+ } else {
+ Logger.Printf("Connected to broker at %s (unregistered)\n", b.addr)
+ }
+ go withRecover(b.responseReceiver)
+ })
+
+ return nil
+}
+
+// Connected returns true if the broker is connected and false otherwise. If the broker is not
+// connected but it had tried to connect, the error from that connection attempt is also returned.
+func (b *Broker) Connected() (bool, error) {
+ b.lock.Lock()
+ defer b.lock.Unlock()
+
+ return b.conn != nil, b.connErr
+}
+
+func (b *Broker) Close() error {
+ b.lock.Lock()
+ defer b.lock.Unlock()
+
+ if b.conn == nil {
+ return ErrNotConnected
+ }
+
+ close(b.responses)
+ <-b.done
+
+ err := b.conn.Close()
+
+ b.conn = nil
+ b.connErr = nil
+ b.done = nil
+ b.responses = nil
+
+ if b.id >= 0 {
+ b.conf.MetricRegistry.Unregister(getMetricNameForBroker("incoming-byte-rate", b))
+ b.conf.MetricRegistry.Unregister(getMetricNameForBroker("request-rate", b))
+ b.conf.MetricRegistry.Unregister(getMetricNameForBroker("outgoing-byte-rate", b))
+ b.conf.MetricRegistry.Unregister(getMetricNameForBroker("response-rate", b))
+ }
+
+ if err == nil {
+ Logger.Printf("Closed connection to broker %s\n", b.addr)
+ } else {
+ Logger.Printf("Error while closing connection to broker %s: %s\n", b.addr, err)
+ }
+
+ atomic.StoreInt32(&b.opened, 0)
+
+ return err
+}
+
+// ID returns the broker ID retrieved from Kafka's metadata, or -1 if that is not known.
+func (b *Broker) ID() int32 {
+ return b.id
+}
+
+// Addr returns the broker address as either retrieved from Kafka's metadata or passed to NewBroker.
+func (b *Broker) Addr() string {
+ return b.addr
+}
+
+func (b *Broker) GetMetadata(request *MetadataRequest) (*MetadataResponse, error) {
+ response := new(MetadataResponse)
+
+ err := b.sendAndReceive(request, response)
+
+ if err != nil {
+ return nil, err
+ }
+
+ return response, nil
+}
+
+func (b *Broker) GetConsumerMetadata(request *ConsumerMetadataRequest) (*ConsumerMetadataResponse, error) {
+ response := new(ConsumerMetadataResponse)
+
+ err := b.sendAndReceive(request, response)
+
+ if err != nil {
+ return nil, err
+ }
+
+ return response, nil
+}
+
+func (b *Broker) GetAvailableOffsets(request *OffsetRequest) (*OffsetResponse, error) {
+ response := new(OffsetResponse)
+
+ err := b.sendAndReceive(request, response)
+
+ if err != nil {
+ return nil, err
+ }
+
+ return response, nil
+}
+
+func (b *Broker) Produce(request *ProduceRequest) (*ProduceResponse, error) {
+ var response *ProduceResponse
+ var err error
+
+ if request.RequiredAcks == NoResponse {
+ err = b.sendAndReceive(request, nil)
+ } else {
+ response = new(ProduceResponse)
+ err = b.sendAndReceive(request, response)
+ }
+
+ if err != nil {
+ return nil, err
+ }
+
+ return response, nil
+}
+
+func (b *Broker) Fetch(request *FetchRequest) (*FetchResponse, error) {
+ response := new(FetchResponse)
+
+ err := b.sendAndReceive(request, response)
+
+ if err != nil {
+ return nil, err
+ }
+
+ return response, nil
+}
+
+func (b *Broker) CommitOffset(request *OffsetCommitRequest) (*OffsetCommitResponse, error) {
+ response := new(OffsetCommitResponse)
+
+ err := b.sendAndReceive(request, response)
+
+ if err != nil {
+ return nil, err
+ }
+
+ return response, nil
+}
+
+func (b *Broker) FetchOffset(request *OffsetFetchRequest) (*OffsetFetchResponse, error) {
+ response := new(OffsetFetchResponse)
+
+ err := b.sendAndReceive(request, response)
+
+ if err != nil {
+ return nil, err
+ }
+
+ return response, nil
+}
+
+func (b *Broker) JoinGroup(request *JoinGroupRequest) (*JoinGroupResponse, error) {
+ response := new(JoinGroupResponse)
+
+ err := b.sendAndReceive(request, response)
+ if err != nil {
+ return nil, err
+ }
+
+ return response, nil
+}
+
+func (b *Broker) SyncGroup(request *SyncGroupRequest) (*SyncGroupResponse, error) {
+ response := new(SyncGroupResponse)
+
+ err := b.sendAndReceive(request, response)
+ if err != nil {
+ return nil, err
+ }
+
+ return response, nil
+}
+
+func (b *Broker) LeaveGroup(request *LeaveGroupRequest) (*LeaveGroupResponse, error) {
+ response := new(LeaveGroupResponse)
+
+ err := b.sendAndReceive(request, response)
+ if err != nil {
+ return nil, err
+ }
+
+ return response, nil
+}
+
+func (b *Broker) Heartbeat(request *HeartbeatRequest) (*HeartbeatResponse, error) {
+ response := new(HeartbeatResponse)
+
+ err := b.sendAndReceive(request, response)
+ if err != nil {
+ return nil, err
+ }
+
+ return response, nil
+}
+
+func (b *Broker) ListGroups(request *ListGroupsRequest) (*ListGroupsResponse, error) {
+ response := new(ListGroupsResponse)
+
+ err := b.sendAndReceive(request, response)
+ if err != nil {
+ return nil, err
+ }
+
+ return response, nil
+}
+
+func (b *Broker) DescribeGroups(request *DescribeGroupsRequest) (*DescribeGroupsResponse, error) {
+ response := new(DescribeGroupsResponse)
+
+ err := b.sendAndReceive(request, response)
+ if err != nil {
+ return nil, err
+ }
+
+ return response, nil
+}
+
+func (b *Broker) ApiVersions(request *ApiVersionsRequest) (*ApiVersionsResponse, error) {
+ response := new(ApiVersionsResponse)
+
+ err := b.sendAndReceive(request, response)
+ if err != nil {
+ return nil, err
+ }
+
+ return response, nil
+}
+
+func (b *Broker) send(rb protocolBody, promiseResponse bool) (*responsePromise, error) {
+ b.lock.Lock()
+ defer b.lock.Unlock()
+
+ if b.conn == nil {
+ if b.connErr != nil {
+ return nil, b.connErr
+ }
+ return nil, ErrNotConnected
+ }
+
+ if !b.conf.Version.IsAtLeast(rb.requiredVersion()) {
+ return nil, ErrUnsupportedVersion
+ }
+
+ req := &request{correlationID: b.correlationID, clientID: b.conf.ClientID, body: rb}
+ buf, err := encode(req, b.conf.MetricRegistry)
+ if err != nil {
+ return nil, err
+ }
+
+ err = b.conn.SetWriteDeadline(time.Now().Add(b.conf.Net.WriteTimeout))
+ if err != nil {
+ return nil, err
+ }
+
+ requestTime := time.Now()
+ bytes, err := b.conn.Write(buf)
+ b.updateOutgoingCommunicationMetrics(bytes)
+ if err != nil {
+ return nil, err
+ }
+ b.correlationID++
+
+ if !promiseResponse {
+ // Record request latency without the response
+ b.updateRequestLatencyMetrics(time.Since(requestTime))
+ return nil, nil
+ }
+
+ promise := responsePromise{requestTime, req.correlationID, make(chan []byte), make(chan error)}
+ b.responses <- promise
+
+ return &promise, nil
+}
+
+func (b *Broker) sendAndReceive(req protocolBody, res versionedDecoder) error {
+ promise, err := b.send(req, res != nil)
+
+ if err != nil {
+ return err
+ }
+
+ if promise == nil {
+ return nil
+ }
+
+ select {
+ case buf := <-promise.packets:
+ return versionedDecode(buf, res, req.version())
+ case err = <-promise.errors:
+ return err
+ }
+}
+
+func (b *Broker) decode(pd packetDecoder) (err error) {
+ b.id, err = pd.getInt32()
+ if err != nil {
+ return err
+ }
+
+ host, err := pd.getString()
+ if err != nil {
+ return err
+ }
+
+ port, err := pd.getInt32()
+ if err != nil {
+ return err
+ }
+
+ b.addr = net.JoinHostPort(host, fmt.Sprint(port))
+ if _, _, err := net.SplitHostPort(b.addr); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (b *Broker) encode(pe packetEncoder) (err error) {
+
+ host, portstr, err := net.SplitHostPort(b.addr)
+ if err != nil {
+ return err
+ }
+ port, err := strconv.Atoi(portstr)
+ if err != nil {
+ return err
+ }
+
+ pe.putInt32(b.id)
+
+ err = pe.putString(host)
+ if err != nil {
+ return err
+ }
+
+ pe.putInt32(int32(port))
+
+ return nil
+}
+
+func (b *Broker) responseReceiver() {
+ var dead error
+ header := make([]byte, 8)
+ for response := range b.responses {
+ if dead != nil {
+ response.errors <- dead
+ continue
+ }
+
+ err := b.conn.SetReadDeadline(time.Now().Add(b.conf.Net.ReadTimeout))
+ if err != nil {
+ dead = err
+ response.errors <- err
+ continue
+ }
+
+ bytesReadHeader, err := io.ReadFull(b.conn, header)
+ requestLatency := time.Since(response.requestTime)
+ if err != nil {
+ b.updateIncomingCommunicationMetrics(bytesReadHeader, requestLatency)
+ dead = err
+ response.errors <- err
+ continue
+ }
+
+ decodedHeader := responseHeader{}
+ err = decode(header, &decodedHeader)
+ if err != nil {
+ b.updateIncomingCommunicationMetrics(bytesReadHeader, requestLatency)
+ dead = err
+ response.errors <- err
+ continue
+ }
+ if decodedHeader.correlationID != response.correlationID {
+ b.updateIncomingCommunicationMetrics(bytesReadHeader, requestLatency)
+ // TODO if decoded ID < cur ID, discard until we catch up
+ // TODO if decoded ID > cur ID, save it so when cur ID catches up we have a response
+ dead = PacketDecodingError{fmt.Sprintf("correlation ID didn't match, wanted %d, got %d", response.correlationID, decodedHeader.correlationID)}
+ response.errors <- dead
+ continue
+ }
+
+ buf := make([]byte, decodedHeader.length-4)
+ bytesReadBody, err := io.ReadFull(b.conn, buf)
+ b.updateIncomingCommunicationMetrics(bytesReadHeader+bytesReadBody, requestLatency)
+ if err != nil {
+ dead = err
+ response.errors <- err
+ continue
+ }
+
+ response.packets <- buf
+ }
+ close(b.done)
+}
+
+func (b *Broker) sendAndReceiveSASLPlainHandshake() error {
+ rb := &SaslHandshakeRequest{"PLAIN"}
+ req := &request{correlationID: b.correlationID, clientID: b.conf.ClientID, body: rb}
+ buf, err := encode(req, b.conf.MetricRegistry)
+ if err != nil {
+ return err
+ }
+
+ err = b.conn.SetWriteDeadline(time.Now().Add(b.conf.Net.WriteTimeout))
+ if err != nil {
+ return err
+ }
+
+ requestTime := time.Now()
+ bytes, err := b.conn.Write(buf)
+ b.updateOutgoingCommunicationMetrics(bytes)
+ if err != nil {
+ Logger.Printf("Failed to send SASL handshake %s: %s\n", b.addr, err.Error())
+ return err
+ }
+ b.correlationID++
+ //wait for the response
+ header := make([]byte, 8) // response header
+ _, err = io.ReadFull(b.conn, header)
+ if err != nil {
+ Logger.Printf("Failed to read SASL handshake header : %s\n", err.Error())
+ return err
+ }
+ length := binary.BigEndian.Uint32(header[:4])
+ payload := make([]byte, length-4)
+ n, err := io.ReadFull(b.conn, payload)
+ if err != nil {
+ Logger.Printf("Failed to read SASL handshake payload : %s\n", err.Error())
+ return err
+ }
+ b.updateIncomingCommunicationMetrics(n+8, time.Since(requestTime))
+ res := &SaslHandshakeResponse{}
+ err = versionedDecode(payload, res, 0)
+ if err != nil {
+ Logger.Printf("Failed to parse SASL handshake : %s\n", err.Error())
+ return err
+ }
+ if res.Err != ErrNoError {
+ Logger.Printf("Invalid SASL Mechanism : %s\n", res.Err.Error())
+ return res.Err
+ }
+ Logger.Print("Successful SASL handshake")
+ return nil
+}
+
+// Kafka 0.10.0 plans to support SASL Plain and Kerberos as per PR #812 (KIP-43)/(JIRA KAFKA-3149)
+// Some hosted kafka services such as IBM Message Hub already offer SASL/PLAIN auth with Kafka 0.9
+//
+// In SASL Plain, Kafka expects the auth header to be in the following format
+// Message format (from https://tools.ietf.org/html/rfc4616):
+//
+// message = [authzid] UTF8NUL authcid UTF8NUL passwd
+// authcid = 1*SAFE ; MUST accept up to 255 octets
+// authzid = 1*SAFE ; MUST accept up to 255 octets
+// passwd = 1*SAFE ; MUST accept up to 255 octets
+// UTF8NUL = %x00 ; UTF-8 encoded NUL character
+//
+// SAFE = UTF1 / UTF2 / UTF3 / UTF4
+// ;; any UTF-8 encoded Unicode character except NUL
+//
+// When credentials are valid, Kafka returns a 4 byte array of null characters.
+// When credentials are invalid, Kafka closes the connection. This does not seem to be the ideal way
+// of responding to bad credentials but thats how its being done today.
+func (b *Broker) sendAndReceiveSASLPlainAuth() error {
+ if b.conf.Net.SASL.Handshake {
+ handshakeErr := b.sendAndReceiveSASLPlainHandshake()
+ if handshakeErr != nil {
+ Logger.Printf("Error while performing SASL handshake %s\n", b.addr)
+ return handshakeErr
+ }
+ }
+ length := 1 + len(b.conf.Net.SASL.User) + 1 + len(b.conf.Net.SASL.Password)
+ authBytes := make([]byte, length+4) //4 byte length header + auth data
+ binary.BigEndian.PutUint32(authBytes, uint32(length))
+ copy(authBytes[4:], []byte("\x00"+b.conf.Net.SASL.User+"\x00"+b.conf.Net.SASL.Password))
+
+ err := b.conn.SetWriteDeadline(time.Now().Add(b.conf.Net.WriteTimeout))
+ if err != nil {
+ Logger.Printf("Failed to set write deadline when doing SASL auth with broker %s: %s\n", b.addr, err.Error())
+ return err
+ }
+
+ requestTime := time.Now()
+ bytesWritten, err := b.conn.Write(authBytes)
+ b.updateOutgoingCommunicationMetrics(bytesWritten)
+ if err != nil {
+ Logger.Printf("Failed to write SASL auth header to broker %s: %s\n", b.addr, err.Error())
+ return err
+ }
+
+ header := make([]byte, 4)
+ n, err := io.ReadFull(b.conn, header)
+ b.updateIncomingCommunicationMetrics(n, time.Since(requestTime))
+ // If the credentials are valid, we would get a 4 byte response filled with null characters.
+ // Otherwise, the broker closes the connection and we get an EOF
+ if err != nil {
+ Logger.Printf("Failed to read response while authenticating with SASL to broker %s: %s\n", b.addr, err.Error())
+ return err
+ }
+
+ Logger.Printf("SASL authentication successful with broker %s:%v - %v\n", b.addr, n, header)
+ return nil
+}
+
+func (b *Broker) updateIncomingCommunicationMetrics(bytes int, requestLatency time.Duration) {
+ b.updateRequestLatencyMetrics(requestLatency)
+ b.responseRate.Mark(1)
+ if b.brokerResponseRate != nil {
+ b.brokerResponseRate.Mark(1)
+ }
+ responseSize := int64(bytes)
+ b.incomingByteRate.Mark(responseSize)
+ if b.brokerIncomingByteRate != nil {
+ b.brokerIncomingByteRate.Mark(responseSize)
+ }
+ b.responseSize.Update(responseSize)
+ if b.brokerResponseSize != nil {
+ b.brokerResponseSize.Update(responseSize)
+ }
+}
+
+func (b *Broker) updateRequestLatencyMetrics(requestLatency time.Duration) {
+ requestLatencyInMs := int64(requestLatency / time.Millisecond)
+ b.requestLatency.Update(requestLatencyInMs)
+ if b.brokerRequestLatency != nil {
+ b.brokerRequestLatency.Update(requestLatencyInMs)
+ }
+}
+
+func (b *Broker) updateOutgoingCommunicationMetrics(bytes int) {
+ b.requestRate.Mark(1)
+ if b.brokerRequestRate != nil {
+ b.brokerRequestRate.Mark(1)
+ }
+ requestSize := int64(bytes)
+ b.outgoingByteRate.Mark(requestSize)
+ if b.brokerOutgoingByteRate != nil {
+ b.brokerOutgoingByteRate.Mark(requestSize)
+ }
+ b.requestSize.Update(requestSize)
+ if b.brokerRequestSize != nil {
+ b.brokerRequestSize.Update(requestSize)
+ }
+}
diff --git a/vendor/github.com/Shopify/sarama/client.go b/vendor/github.com/Shopify/sarama/client.go
new file mode 100644
index 0000000..3dbfc4b
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/client.go
@@ -0,0 +1,794 @@
+package sarama
+
+import (
+ "math/rand"
+ "sort"
+ "sync"
+ "time"
+)
+
+// Client is a generic Kafka client. It manages connections to one or more Kafka brokers.
+// You MUST call Close() on a client to avoid leaks, it will not be garbage-collected
+// automatically when it passes out of scope. It is safe to share a client amongst many
+// users, however Kafka will process requests from a single client strictly in serial,
+// so it is generally more efficient to use the default one client per producer/consumer.
+type Client interface {
+ // Config returns the Config struct of the client. This struct should not be
+ // altered after it has been created.
+ Config() *Config
+
+ // Brokers returns the current set of active brokers as retrieved from cluster metadata.
+ Brokers() []*Broker
+
+ // Topics returns the set of available topics as retrieved from cluster metadata.
+ Topics() ([]string, error)
+
+ // Partitions returns the sorted list of all partition IDs for the given topic.
+ Partitions(topic string) ([]int32, error)
+
+ // WritablePartitions returns the sorted list of all writable partition IDs for
+ // the given topic, where "writable" means "having a valid leader accepting
+ // writes".
+ WritablePartitions(topic string) ([]int32, error)
+
+ // Leader returns the broker object that is the leader of the current
+ // topic/partition, as determined by querying the cluster metadata.
+ Leader(topic string, partitionID int32) (*Broker, error)
+
+ // Replicas returns the set of all replica IDs for the given partition.
+ Replicas(topic string, partitionID int32) ([]int32, error)
+
+ // InSyncReplicas returns the set of all in-sync replica IDs for the given
+ // partition. In-sync replicas are replicas which are fully caught up with
+ // the partition leader.
+ InSyncReplicas(topic string, partitionID int32) ([]int32, error)
+
+ // RefreshMetadata takes a list of topics and queries the cluster to refresh the
+ // available metadata for those topics. If no topics are provided, it will refresh
+ // metadata for all topics.
+ RefreshMetadata(topics ...string) error
+
+ // GetOffset queries the cluster to get the most recent available offset at the
+ // given time (in milliseconds) on the topic/partition combination.
+ // Time should be OffsetOldest for the earliest available offset,
+ // OffsetNewest for the offset of the message that will be produced next, or a time.
+ GetOffset(topic string, partitionID int32, time int64) (int64, error)
+
+ // Coordinator returns the coordinating broker for a consumer group. It will
+ // return a locally cached value if it's available. You can call
+ // RefreshCoordinator to update the cached value. This function only works on
+ // Kafka 0.8.2 and higher.
+ Coordinator(consumerGroup string) (*Broker, error)
+
+ // RefreshCoordinator retrieves the coordinator for a consumer group and stores it
+ // in local cache. This function only works on Kafka 0.8.2 and higher.
+ RefreshCoordinator(consumerGroup string) error
+
+ // Close shuts down all broker connections managed by this client. It is required
+ // to call this function before a client object passes out of scope, as it will
+ // otherwise leak memory. You must close any Producers or Consumers using a client
+ // before you close the client.
+ Close() error
+
+ // Closed returns true if the client has already had Close called on it
+ Closed() bool
+}
+
+const (
+ // OffsetNewest stands for the log head offset, i.e. the offset that will be
+ // assigned to the next message that will be produced to the partition. You
+ // can send this to a client's GetOffset method to get this offset, or when
+ // calling ConsumePartition to start consuming new messages.
+ OffsetNewest int64 = -1
+ // OffsetOldest stands for the oldest offset available on the broker for a
+ // partition. You can send this to a client's GetOffset method to get this
+ // offset, or when calling ConsumePartition to start consuming from the
+ // oldest offset that is still available on the broker.
+ OffsetOldest int64 = -2
+)
+
+type client struct {
+ conf *Config
+ closer, closed chan none // for shutting down background metadata updater
+
+ // the broker addresses given to us through the constructor are not guaranteed to be returned in
+ // the cluster metadata (I *think* it only returns brokers who are currently leading partitions?)
+ // so we store them separately
+ seedBrokers []*Broker
+ deadSeeds []*Broker
+
+ brokers map[int32]*Broker // maps broker ids to brokers
+ metadata map[string]map[int32]*PartitionMetadata // maps topics to partition ids to metadata
+ coordinators map[string]int32 // Maps consumer group names to coordinating broker IDs
+
+ // If the number of partitions is large, we can get some churn calling cachedPartitions,
+ // so the result is cached. It is important to update this value whenever metadata is changed
+ cachedPartitionsResults map[string][maxPartitionIndex][]int32
+
+ lock sync.RWMutex // protects access to the maps that hold cluster state.
+}
+
+// NewClient creates a new Client. It connects to one of the given broker addresses
+// and uses that broker to automatically fetch metadata on the rest of the kafka cluster. If metadata cannot
+// be retrieved from any of the given broker addresses, the client is not created.
+func NewClient(addrs []string, conf *Config) (Client, error) {
+ Logger.Println("Initializing new client")
+
+ if conf == nil {
+ conf = NewConfig()
+ }
+
+ if err := conf.Validate(); err != nil {
+ return nil, err
+ }
+
+ if len(addrs) < 1 {
+ return nil, ConfigurationError("You must provide at least one broker address")
+ }
+
+ client := &client{
+ conf: conf,
+ closer: make(chan none),
+ closed: make(chan none),
+ brokers: make(map[int32]*Broker),
+ metadata: make(map[string]map[int32]*PartitionMetadata),
+ cachedPartitionsResults: make(map[string][maxPartitionIndex][]int32),
+ coordinators: make(map[string]int32),
+ }
+
+ random := rand.New(rand.NewSource(time.Now().UnixNano()))
+ for _, index := range random.Perm(len(addrs)) {
+ client.seedBrokers = append(client.seedBrokers, NewBroker(addrs[index]))
+ }
+
+ if conf.Metadata.Full {
+ // do an initial fetch of all cluster metadata by specifying an empty list of topics
+ err := client.RefreshMetadata()
+ switch err {
+ case nil:
+ break
+ case ErrLeaderNotAvailable, ErrReplicaNotAvailable, ErrTopicAuthorizationFailed, ErrClusterAuthorizationFailed:
+ // indicates that maybe part of the cluster is down, but is not fatal to creating the client
+ Logger.Println(err)
+ default:
+ close(client.closed) // we haven't started the background updater yet, so we have to do this manually
+ _ = client.Close()
+ return nil, err
+ }
+ }
+ go withRecover(client.backgroundMetadataUpdater)
+
+ Logger.Println("Successfully initialized new client")
+
+ return client, nil
+}
+
+func (client *client) Config() *Config {
+ return client.conf
+}
+
+func (client *client) Brokers() []*Broker {
+ client.lock.RLock()
+ defer client.lock.RUnlock()
+ brokers := make([]*Broker, 0)
+ for _, broker := range client.brokers {
+ brokers = append(brokers, broker)
+ }
+ return brokers
+}
+
+func (client *client) Close() error {
+ if client.Closed() {
+ // Chances are this is being called from a defer() and the error will go unobserved
+ // so we go ahead and log the event in this case.
+ Logger.Printf("Close() called on already closed client")
+ return ErrClosedClient
+ }
+
+ // shutdown and wait for the background thread before we take the lock, to avoid races
+ close(client.closer)
+ <-client.closed
+
+ client.lock.Lock()
+ defer client.lock.Unlock()
+ Logger.Println("Closing Client")
+
+ for _, broker := range client.brokers {
+ safeAsyncClose(broker)
+ }
+
+ for _, broker := range client.seedBrokers {
+ safeAsyncClose(broker)
+ }
+
+ client.brokers = nil
+ client.metadata = nil
+
+ return nil
+}
+
+func (client *client) Closed() bool {
+ return client.brokers == nil
+}
+
+func (client *client) Topics() ([]string, error) {
+ if client.Closed() {
+ return nil, ErrClosedClient
+ }
+
+ client.lock.RLock()
+ defer client.lock.RUnlock()
+
+ ret := make([]string, 0, len(client.metadata))
+ for topic := range client.metadata {
+ ret = append(ret, topic)
+ }
+
+ return ret, nil
+}
+
+func (client *client) Partitions(topic string) ([]int32, error) {
+ if client.Closed() {
+ return nil, ErrClosedClient
+ }
+
+ partitions := client.cachedPartitions(topic, allPartitions)
+
+ if len(partitions) == 0 {
+ err := client.RefreshMetadata(topic)
+ if err != nil {
+ return nil, err
+ }
+ partitions = client.cachedPartitions(topic, allPartitions)
+ }
+
+ if partitions == nil {
+ return nil, ErrUnknownTopicOrPartition
+ }
+
+ return partitions, nil
+}
+
+func (client *client) WritablePartitions(topic string) ([]int32, error) {
+ if client.Closed() {
+ return nil, ErrClosedClient
+ }
+
+ partitions := client.cachedPartitions(topic, writablePartitions)
+
+ // len==0 catches when it's nil (no such topic) and the odd case when every single
+ // partition is undergoing leader election simultaneously. Callers have to be able to handle
+ // this function returning an empty slice (which is a valid return value) but catching it
+ // here the first time (note we *don't* catch it below where we return ErrUnknownTopicOrPartition) triggers
+ // a metadata refresh as a nicety so callers can just try again and don't have to manually
+ // trigger a refresh (otherwise they'd just keep getting a stale cached copy).
+ if len(partitions) == 0 {
+ err := client.RefreshMetadata(topic)
+ if err != nil {
+ return nil, err
+ }
+ partitions = client.cachedPartitions(topic, writablePartitions)
+ }
+
+ if partitions == nil {
+ return nil, ErrUnknownTopicOrPartition
+ }
+
+ return partitions, nil
+}
+
+func (client *client) Replicas(topic string, partitionID int32) ([]int32, error) {
+ if client.Closed() {
+ return nil, ErrClosedClient
+ }
+
+ metadata := client.cachedMetadata(topic, partitionID)
+
+ if metadata == nil {
+ err := client.RefreshMetadata(topic)
+ if err != nil {
+ return nil, err
+ }
+ metadata = client.cachedMetadata(topic, partitionID)
+ }
+
+ if metadata == nil {
+ return nil, ErrUnknownTopicOrPartition
+ }
+
+ if metadata.Err == ErrReplicaNotAvailable {
+ return dupInt32Slice(metadata.Replicas), metadata.Err
+ }
+ return dupInt32Slice(metadata.Replicas), nil
+}
+
+func (client *client) InSyncReplicas(topic string, partitionID int32) ([]int32, error) {
+ if client.Closed() {
+ return nil, ErrClosedClient
+ }
+
+ metadata := client.cachedMetadata(topic, partitionID)
+
+ if metadata == nil {
+ err := client.RefreshMetadata(topic)
+ if err != nil {
+ return nil, err
+ }
+ metadata = client.cachedMetadata(topic, partitionID)
+ }
+
+ if metadata == nil {
+ return nil, ErrUnknownTopicOrPartition
+ }
+
+ if metadata.Err == ErrReplicaNotAvailable {
+ return dupInt32Slice(metadata.Isr), metadata.Err
+ }
+ return dupInt32Slice(metadata.Isr), nil
+}
+
+func (client *client) Leader(topic string, partitionID int32) (*Broker, error) {
+ if client.Closed() {
+ return nil, ErrClosedClient
+ }
+
+ leader, err := client.cachedLeader(topic, partitionID)
+
+ if leader == nil {
+ err = client.RefreshMetadata(topic)
+ if err != nil {
+ return nil, err
+ }
+ leader, err = client.cachedLeader(topic, partitionID)
+ }
+
+ return leader, err
+}
+
+func (client *client) RefreshMetadata(topics ...string) error {
+ if client.Closed() {
+ return ErrClosedClient
+ }
+
+ // Prior to 0.8.2, Kafka will throw exceptions on an empty topic and not return a proper
+ // error. This handles the case by returning an error instead of sending it
+ // off to Kafka. See: https://github.com/Shopify/sarama/pull/38#issuecomment-26362310
+ for _, topic := range topics {
+ if len(topic) == 0 {
+ return ErrInvalidTopic // this is the error that 0.8.2 and later correctly return
+ }
+ }
+
+ return client.tryRefreshMetadata(topics, client.conf.Metadata.Retry.Max)
+}
+
+func (client *client) GetOffset(topic string, partitionID int32, time int64) (int64, error) {
+ if client.Closed() {
+ return -1, ErrClosedClient
+ }
+
+ offset, err := client.getOffset(topic, partitionID, time)
+
+ if err != nil {
+ if err := client.RefreshMetadata(topic); err != nil {
+ return -1, err
+ }
+ return client.getOffset(topic, partitionID, time)
+ }
+
+ return offset, err
+}
+
+func (client *client) Coordinator(consumerGroup string) (*Broker, error) {
+ if client.Closed() {
+ return nil, ErrClosedClient
+ }
+
+ coordinator := client.cachedCoordinator(consumerGroup)
+
+ if coordinator == nil {
+ if err := client.RefreshCoordinator(consumerGroup); err != nil {
+ return nil, err
+ }
+ coordinator = client.cachedCoordinator(consumerGroup)
+ }
+
+ if coordinator == nil {
+ return nil, ErrConsumerCoordinatorNotAvailable
+ }
+
+ _ = coordinator.Open(client.conf)
+ return coordinator, nil
+}
+
+func (client *client) RefreshCoordinator(consumerGroup string) error {
+ if client.Closed() {
+ return ErrClosedClient
+ }
+
+ response, err := client.getConsumerMetadata(consumerGroup, client.conf.Metadata.Retry.Max)
+ if err != nil {
+ return err
+ }
+
+ client.lock.Lock()
+ defer client.lock.Unlock()
+ client.registerBroker(response.Coordinator)
+ client.coordinators[consumerGroup] = response.Coordinator.ID()
+ return nil
+}
+
+// private broker management helpers
+
+// registerBroker makes sure a broker received by a Metadata or Coordinator request is registered
+// in the brokers map. It returns the broker that is registered, which may be the provided broker,
+// or a previously registered Broker instance. You must hold the write lock before calling this function.
+func (client *client) registerBroker(broker *Broker) {
+ if client.brokers[broker.ID()] == nil {
+ client.brokers[broker.ID()] = broker
+ Logger.Printf("client/brokers registered new broker #%d at %s", broker.ID(), broker.Addr())
+ } else if broker.Addr() != client.brokers[broker.ID()].Addr() {
+ safeAsyncClose(client.brokers[broker.ID()])
+ client.brokers[broker.ID()] = broker
+ Logger.Printf("client/brokers replaced registered broker #%d with %s", broker.ID(), broker.Addr())
+ }
+}
+
+// deregisterBroker removes a broker from the seedsBroker list, and if it's
+// not the seedbroker, removes it from brokers map completely.
+func (client *client) deregisterBroker(broker *Broker) {
+ client.lock.Lock()
+ defer client.lock.Unlock()
+
+ if len(client.seedBrokers) > 0 && broker == client.seedBrokers[0] {
+ client.deadSeeds = append(client.deadSeeds, broker)
+ client.seedBrokers = client.seedBrokers[1:]
+ } else {
+ // we do this so that our loop in `tryRefreshMetadata` doesn't go on forever,
+ // but we really shouldn't have to; once that loop is made better this case can be
+ // removed, and the function generally can be renamed from `deregisterBroker` to
+ // `nextSeedBroker` or something
+ Logger.Printf("client/brokers deregistered broker #%d at %s", broker.ID(), broker.Addr())
+ delete(client.brokers, broker.ID())
+ }
+}
+
+func (client *client) resurrectDeadBrokers() {
+ client.lock.Lock()
+ defer client.lock.Unlock()
+
+ Logger.Printf("client/brokers resurrecting %d dead seed brokers", len(client.deadSeeds))
+ client.seedBrokers = append(client.seedBrokers, client.deadSeeds...)
+ client.deadSeeds = nil
+}
+
+func (client *client) any() *Broker {
+ client.lock.RLock()
+ defer client.lock.RUnlock()
+
+ if len(client.seedBrokers) > 0 {
+ _ = client.seedBrokers[0].Open(client.conf)
+ return client.seedBrokers[0]
+ }
+
+ // not guaranteed to be random *or* deterministic
+ for _, broker := range client.brokers {
+ _ = broker.Open(client.conf)
+ return broker
+ }
+
+ return nil
+}
+
+// private caching/lazy metadata helpers
+
+type partitionType int
+
+const (
+ allPartitions partitionType = iota
+ writablePartitions
+ // If you add any more types, update the partition cache in update()
+
+ // Ensure this is the last partition type value
+ maxPartitionIndex
+)
+
+func (client *client) cachedMetadata(topic string, partitionID int32) *PartitionMetadata {
+ client.lock.RLock()
+ defer client.lock.RUnlock()
+
+ partitions := client.metadata[topic]
+ if partitions != nil {
+ return partitions[partitionID]
+ }
+
+ return nil
+}
+
+func (client *client) cachedPartitions(topic string, partitionSet partitionType) []int32 {
+ client.lock.RLock()
+ defer client.lock.RUnlock()
+
+ partitions, exists := client.cachedPartitionsResults[topic]
+
+ if !exists {
+ return nil
+ }
+ return partitions[partitionSet]
+}
+
+func (client *client) setPartitionCache(topic string, partitionSet partitionType) []int32 {
+ partitions := client.metadata[topic]
+
+ if partitions == nil {
+ return nil
+ }
+
+ ret := make([]int32, 0, len(partitions))
+ for _, partition := range partitions {
+ if partitionSet == writablePartitions && partition.Err == ErrLeaderNotAvailable {
+ continue
+ }
+ ret = append(ret, partition.ID)
+ }
+
+ sort.Sort(int32Slice(ret))
+ return ret
+}
+
+func (client *client) cachedLeader(topic string, partitionID int32) (*Broker, error) {
+ client.lock.RLock()
+ defer client.lock.RUnlock()
+
+ partitions := client.metadata[topic]
+ if partitions != nil {
+ metadata, ok := partitions[partitionID]
+ if ok {
+ if metadata.Err == ErrLeaderNotAvailable {
+ return nil, ErrLeaderNotAvailable
+ }
+ b := client.brokers[metadata.Leader]
+ if b == nil {
+ return nil, ErrLeaderNotAvailable
+ }
+ _ = b.Open(client.conf)
+ return b, nil
+ }
+ }
+
+ return nil, ErrUnknownTopicOrPartition
+}
+
+func (client *client) getOffset(topic string, partitionID int32, time int64) (int64, error) {
+ broker, err := client.Leader(topic, partitionID)
+ if err != nil {
+ return -1, err
+ }
+
+ request := &OffsetRequest{}
+ if client.conf.Version.IsAtLeast(V0_10_1_0) {
+ request.Version = 1
+ }
+ request.AddBlock(topic, partitionID, time, 1)
+
+ response, err := broker.GetAvailableOffsets(request)
+ if err != nil {
+ _ = broker.Close()
+ return -1, err
+ }
+
+ block := response.GetBlock(topic, partitionID)
+ if block == nil {
+ _ = broker.Close()
+ return -1, ErrIncompleteResponse
+ }
+ if block.Err != ErrNoError {
+ return -1, block.Err
+ }
+ if len(block.Offsets) != 1 {
+ return -1, ErrOffsetOutOfRange
+ }
+
+ return block.Offsets[0], nil
+}
+
+// core metadata update logic
+
+func (client *client) backgroundMetadataUpdater() {
+ defer close(client.closed)
+
+ if client.conf.Metadata.RefreshFrequency == time.Duration(0) {
+ return
+ }
+
+ ticker := time.NewTicker(client.conf.Metadata.RefreshFrequency)
+ defer ticker.Stop()
+
+ for {
+ select {
+ case <-ticker.C:
+ topics := []string{}
+ if !client.conf.Metadata.Full {
+ if specificTopics, err := client.Topics(); err != nil {
+ Logger.Println("Client background metadata topic load:", err)
+ break
+ } else if len(specificTopics) == 0 {
+ Logger.Println("Client background metadata update: no specific topics to update")
+ break
+ } else {
+ topics = specificTopics
+ }
+ }
+
+ if err := client.RefreshMetadata(topics...); err != nil {
+ Logger.Println("Client background metadata update:", err)
+ }
+ case <-client.closer:
+ return
+ }
+ }
+}
+
+func (client *client) tryRefreshMetadata(topics []string, attemptsRemaining int) error {
+ retry := func(err error) error {
+ if attemptsRemaining > 0 {
+ Logger.Printf("client/metadata retrying after %dms... (%d attempts remaining)\n", client.conf.Metadata.Retry.Backoff/time.Millisecond, attemptsRemaining)
+ time.Sleep(client.conf.Metadata.Retry.Backoff)
+ return client.tryRefreshMetadata(topics, attemptsRemaining-1)
+ }
+ return err
+ }
+
+ for broker := client.any(); broker != nil; broker = client.any() {
+ if len(topics) > 0 {
+ Logger.Printf("client/metadata fetching metadata for %v from broker %s\n", topics, broker.addr)
+ } else {
+ Logger.Printf("client/metadata fetching metadata for all topics from broker %s\n", broker.addr)
+ }
+ response, err := broker.GetMetadata(&MetadataRequest{Topics: topics})
+
+ switch err.(type) {
+ case nil:
+ // valid response, use it
+ shouldRetry, err := client.updateMetadata(response)
+ if shouldRetry {
+ Logger.Println("client/metadata found some partitions to be leaderless")
+ return retry(err) // note: err can be nil
+ }
+ return err
+
+ case PacketEncodingError:
+ // didn't even send, return the error
+ return err
+ default:
+ // some other error, remove that broker and try again
+ Logger.Println("client/metadata got error from broker while fetching metadata:", err)
+ _ = broker.Close()
+ client.deregisterBroker(broker)
+ }
+ }
+
+ Logger.Println("client/metadata no available broker to send metadata request to")
+ client.resurrectDeadBrokers()
+ return retry(ErrOutOfBrokers)
+}
+
+// if no fatal error, returns a list of topics that need retrying due to ErrLeaderNotAvailable
+func (client *client) updateMetadata(data *MetadataResponse) (retry bool, err error) {
+ client.lock.Lock()
+ defer client.lock.Unlock()
+
+ // For all the brokers we received:
+ // - if it is a new ID, save it
+ // - if it is an existing ID, but the address we have is stale, discard the old one and save it
+ // - otherwise ignore it, replacing our existing one would just bounce the connection
+ for _, broker := range data.Brokers {
+ client.registerBroker(broker)
+ }
+
+ for _, topic := range data.Topics {
+ delete(client.metadata, topic.Name)
+ delete(client.cachedPartitionsResults, topic.Name)
+
+ switch topic.Err {
+ case ErrNoError:
+ break
+ case ErrInvalidTopic, ErrTopicAuthorizationFailed: // don't retry, don't store partial results
+ err = topic.Err
+ continue
+ case ErrUnknownTopicOrPartition: // retry, do not store partial partition results
+ err = topic.Err
+ retry = true
+ continue
+ case ErrLeaderNotAvailable: // retry, but store partial partition results
+ retry = true
+ break
+ default: // don't retry, don't store partial results
+ Logger.Printf("Unexpected topic-level metadata error: %s", topic.Err)
+ err = topic.Err
+ continue
+ }
+
+ client.metadata[topic.Name] = make(map[int32]*PartitionMetadata, len(topic.Partitions))
+ for _, partition := range topic.Partitions {
+ client.metadata[topic.Name][partition.ID] = partition
+ if partition.Err == ErrLeaderNotAvailable {
+ retry = true
+ }
+ }
+
+ var partitionCache [maxPartitionIndex][]int32
+ partitionCache[allPartitions] = client.setPartitionCache(topic.Name, allPartitions)
+ partitionCache[writablePartitions] = client.setPartitionCache(topic.Name, writablePartitions)
+ client.cachedPartitionsResults[topic.Name] = partitionCache
+ }
+
+ return
+}
+
+func (client *client) cachedCoordinator(consumerGroup string) *Broker {
+ client.lock.RLock()
+ defer client.lock.RUnlock()
+ if coordinatorID, ok := client.coordinators[consumerGroup]; ok {
+ return client.brokers[coordinatorID]
+ }
+ return nil
+}
+
+func (client *client) getConsumerMetadata(consumerGroup string, attemptsRemaining int) (*ConsumerMetadataResponse, error) {
+ retry := func(err error) (*ConsumerMetadataResponse, error) {
+ if attemptsRemaining > 0 {
+ Logger.Printf("client/coordinator retrying after %dms... (%d attempts remaining)\n", client.conf.Metadata.Retry.Backoff/time.Millisecond, attemptsRemaining)
+ time.Sleep(client.conf.Metadata.Retry.Backoff)
+ return client.getConsumerMetadata(consumerGroup, attemptsRemaining-1)
+ }
+ return nil, err
+ }
+
+ for broker := client.any(); broker != nil; broker = client.any() {
+ Logger.Printf("client/coordinator requesting coordinator for consumergroup %s from %s\n", consumerGroup, broker.Addr())
+
+ request := new(ConsumerMetadataRequest)
+ request.ConsumerGroup = consumerGroup
+
+ response, err := broker.GetConsumerMetadata(request)
+
+ if err != nil {
+ Logger.Printf("client/coordinator request to broker %s failed: %s\n", broker.Addr(), err)
+
+ switch err.(type) {
+ case PacketEncodingError:
+ return nil, err
+ default:
+ _ = broker.Close()
+ client.deregisterBroker(broker)
+ continue
+ }
+ }
+
+ switch response.Err {
+ case ErrNoError:
+ Logger.Printf("client/coordinator coordinator for consumergroup %s is #%d (%s)\n", consumerGroup, response.Coordinator.ID(), response.Coordinator.Addr())
+ return response, nil
+
+ case ErrConsumerCoordinatorNotAvailable:
+ Logger.Printf("client/coordinator coordinator for consumer group %s is not available\n", consumerGroup)
+
+ // This is very ugly, but this scenario will only happen once per cluster.
+ // The __consumer_offsets topic only has to be created one time.
+ // The number of partitions not configurable, but partition 0 should always exist.
+ if _, err := client.Leader("__consumer_offsets", 0); err != nil {
+ Logger.Printf("client/coordinator the __consumer_offsets topic is not initialized completely yet. Waiting 2 seconds...\n")
+ time.Sleep(2 * time.Second)
+ }
+
+ return retry(ErrConsumerCoordinatorNotAvailable)
+ default:
+ return nil, response.Err
+ }
+ }
+
+ Logger.Println("client/coordinator no available broker to send consumer metadata request to")
+ client.resurrectDeadBrokers()
+ return retry(ErrOutOfBrokers)
+}
diff --git a/vendor/github.com/Shopify/sarama/config.go b/vendor/github.com/Shopify/sarama/config.go
new file mode 100644
index 0000000..e4ff680
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/config.go
@@ -0,0 +1,442 @@
+package sarama
+
+import (
+ "crypto/tls"
+ "regexp"
+ "time"
+
+ "github.com/rcrowley/go-metrics"
+)
+
+const defaultClientID = "sarama"
+
+var validID = regexp.MustCompile(`\A[A-Za-z0-9._-]+\z`)
+
+// Config is used to pass multiple configuration options to Sarama's constructors.
+type Config struct {
+ // Net is the namespace for network-level properties used by the Broker, and
+ // shared by the Client/Producer/Consumer.
+ Net struct {
+ // How many outstanding requests a connection is allowed to have before
+ // sending on it blocks (default 5).
+ MaxOpenRequests int
+
+ // All three of the below configurations are similar to the
+ // `socket.timeout.ms` setting in JVM kafka. All of them default
+ // to 30 seconds.
+ DialTimeout time.Duration // How long to wait for the initial connection.
+ ReadTimeout time.Duration // How long to wait for a response.
+ WriteTimeout time.Duration // How long to wait for a transmit.
+
+ TLS struct {
+ // Whether or not to use TLS when connecting to the broker
+ // (defaults to false).
+ Enable bool
+ // The TLS configuration to use for secure connections if
+ // enabled (defaults to nil).
+ Config *tls.Config
+ }
+
+ // SASL based authentication with broker. While there are multiple SASL authentication methods
+ // the current implementation is limited to plaintext (SASL/PLAIN) authentication
+ SASL struct {
+ // Whether or not to use SASL authentication when connecting to the broker
+ // (defaults to false).
+ Enable bool
+ // Whether or not to send the Kafka SASL handshake first if enabled
+ // (defaults to true). You should only set this to false if you're using
+ // a non-Kafka SASL proxy.
+ Handshake bool
+ //username and password for SASL/PLAIN authentication
+ User string
+ Password string
+ }
+
+ // KeepAlive specifies the keep-alive period for an active network connection.
+ // If zero, keep-alives are disabled. (default is 0: disabled).
+ KeepAlive time.Duration
+ }
+
+ // Metadata is the namespace for metadata management properties used by the
+ // Client, and shared by the Producer/Consumer.
+ Metadata struct {
+ Retry struct {
+ // The total number of times to retry a metadata request when the
+ // cluster is in the middle of a leader election (default 3).
+ Max int
+ // How long to wait for leader election to occur before retrying
+ // (default 250ms). Similar to the JVM's `retry.backoff.ms`.
+ Backoff time.Duration
+ }
+ // How frequently to refresh the cluster metadata in the background.
+ // Defaults to 10 minutes. Set to 0 to disable. Similar to
+ // `topic.metadata.refresh.interval.ms` in the JVM version.
+ RefreshFrequency time.Duration
+
+ // Whether to maintain a full set of metadata for all topics, or just
+ // the minimal set that has been necessary so far. The full set is simpler
+ // and usually more convenient, but can take up a substantial amount of
+ // memory if you have many topics and partitions. Defaults to true.
+ Full bool
+ }
+
+ // Producer is the namespace for configuration related to producing messages,
+ // used by the Producer.
+ Producer struct {
+ // The maximum permitted size of a message (defaults to 1000000). Should be
+ // set equal to or smaller than the broker's `message.max.bytes`.
+ MaxMessageBytes int
+ // The level of acknowledgement reliability needed from the broker (defaults
+ // to WaitForLocal). Equivalent to the `request.required.acks` setting of the
+ // JVM producer.
+ RequiredAcks RequiredAcks
+ // The maximum duration the broker will wait the receipt of the number of
+ // RequiredAcks (defaults to 10 seconds). This is only relevant when
+ // RequiredAcks is set to WaitForAll or a number > 1. Only supports
+ // millisecond resolution, nanoseconds will be truncated. Equivalent to
+ // the JVM producer's `request.timeout.ms` setting.
+ Timeout time.Duration
+ // The type of compression to use on messages (defaults to no compression).
+ // Similar to `compression.codec` setting of the JVM producer.
+ Compression CompressionCodec
+ // Generates partitioners for choosing the partition to send messages to
+ // (defaults to hashing the message key). Similar to the `partitioner.class`
+ // setting for the JVM producer.
+ Partitioner PartitionerConstructor
+
+ // Return specifies what channels will be populated. If they are set to true,
+ // you must read from the respective channels to prevent deadlock. If,
+ // however, this config is used to create a `SyncProducer`, both must be set
+ // to true and you shall not read from the channels since the producer does
+ // this internally.
+ Return struct {
+ // If enabled, successfully delivered messages will be returned on the
+ // Successes channel (default disabled).
+ Successes bool
+
+ // If enabled, messages that failed to deliver will be returned on the
+ // Errors channel, including error (default enabled).
+ Errors bool
+ }
+
+ // The following config options control how often messages are batched up and
+ // sent to the broker. By default, messages are sent as fast as possible, and
+ // all messages received while the current batch is in-flight are placed
+ // into the subsequent batch.
+ Flush struct {
+ // The best-effort number of bytes needed to trigger a flush. Use the
+ // global sarama.MaxRequestSize to set a hard upper limit.
+ Bytes int
+ // The best-effort number of messages needed to trigger a flush. Use
+ // `MaxMessages` to set a hard upper limit.
+ Messages int
+ // The best-effort frequency of flushes. Equivalent to
+ // `queue.buffering.max.ms` setting of JVM producer.
+ Frequency time.Duration
+ // The maximum number of messages the producer will send in a single
+ // broker request. Defaults to 0 for unlimited. Similar to
+ // `queue.buffering.max.messages` in the JVM producer.
+ MaxMessages int
+ }
+
+ Retry struct {
+ // The total number of times to retry sending a message (default 3).
+ // Similar to the `message.send.max.retries` setting of the JVM producer.
+ Max int
+ // How long to wait for the cluster to settle between retries
+ // (default 100ms). Similar to the `retry.backoff.ms` setting of the
+ // JVM producer.
+ Backoff time.Duration
+ }
+ }
+
+ // Consumer is the namespace for configuration related to consuming messages,
+ // used by the Consumer.
+ //
+ // Note that Sarama's Consumer type does not currently support automatic
+ // consumer-group rebalancing and offset tracking. For Zookeeper-based
+ // tracking (Kafka 0.8.2 and earlier), the https://github.com/wvanbergen/kafka
+ // library builds on Sarama to add this support. For Kafka-based tracking
+ // (Kafka 0.9 and later), the https://github.com/bsm/sarama-cluster library
+ // builds on Sarama to add this support.
+ Consumer struct {
+ Retry struct {
+ // How long to wait after a failing to read from a partition before
+ // trying again (default 2s).
+ Backoff time.Duration
+ }
+
+ // Fetch is the namespace for controlling how many bytes are retrieved by any
+ // given request.
+ Fetch struct {
+ // The minimum number of message bytes to fetch in a request - the broker
+ // will wait until at least this many are available. The default is 1,
+ // as 0 causes the consumer to spin when no messages are available.
+ // Equivalent to the JVM's `fetch.min.bytes`.
+ Min int32
+ // The default number of message bytes to fetch from the broker in each
+ // request (default 32768). This should be larger than the majority of
+ // your messages, or else the consumer will spend a lot of time
+ // negotiating sizes and not actually consuming. Similar to the JVM's
+ // `fetch.message.max.bytes`.
+ Default int32
+ // The maximum number of message bytes to fetch from the broker in a
+ // single request. Messages larger than this will return
+ // ErrMessageTooLarge and will not be consumable, so you must be sure
+ // this is at least as large as your largest message. Defaults to 0
+ // (no limit). Similar to the JVM's `fetch.message.max.bytes`. The
+ // global `sarama.MaxResponseSize` still applies.
+ Max int32
+ }
+ // The maximum amount of time the broker will wait for Consumer.Fetch.Min
+ // bytes to become available before it returns fewer than that anyways. The
+ // default is 250ms, since 0 causes the consumer to spin when no events are
+ // available. 100-500ms is a reasonable range for most cases. Kafka only
+ // supports precision up to milliseconds; nanoseconds will be truncated.
+ // Equivalent to the JVM's `fetch.wait.max.ms`.
+ MaxWaitTime time.Duration
+
+ // The maximum amount of time the consumer expects a message takes to
+ // process for the user. If writing to the Messages channel takes longer
+ // than this, that partition will stop fetching more messages until it
+ // can proceed again.
+ // Note that, since the Messages channel is buffered, the actual grace time is
+ // (MaxProcessingTime * ChanneBufferSize). Defaults to 100ms.
+ // If a message is not written to the Messages channel between two ticks
+ // of the expiryTicker then a timeout is detected.
+ // Using a ticker instead of a timer to detect timeouts should typically
+ // result in many fewer calls to Timer functions which may result in a
+ // significant performance improvement if many messages are being sent
+ // and timeouts are infrequent.
+ // The disadvantage of using a ticker instead of a timer is that
+ // timeouts will be less accurate. That is, the effective timeout could
+ // be between `MaxProcessingTime` and `2 * MaxProcessingTime`. For
+ // example, if `MaxProcessingTime` is 100ms then a delay of 180ms
+ // between two messages being sent may not be recognized as a timeout.
+ MaxProcessingTime time.Duration
+
+ // Return specifies what channels will be populated. If they are set to true,
+ // you must read from them to prevent deadlock.
+ Return struct {
+ // If enabled, any errors that occurred while consuming are returned on
+ // the Errors channel (default disabled).
+ Errors bool
+ }
+
+ // Offsets specifies configuration for how and when to commit consumed
+ // offsets. This currently requires the manual use of an OffsetManager
+ // but will eventually be automated.
+ Offsets struct {
+ // How frequently to commit updated offsets. Defaults to 1s.
+ CommitInterval time.Duration
+
+ // The initial offset to use if no offset was previously committed.
+ // Should be OffsetNewest or OffsetOldest. Defaults to OffsetNewest.
+ Initial int64
+
+ // The retention duration for committed offsets. If zero, disabled
+ // (in which case the `offsets.retention.minutes` option on the
+ // broker will be used). Kafka only supports precision up to
+ // milliseconds; nanoseconds will be truncated. Requires Kafka
+ // broker version 0.9.0 or later.
+ // (default is 0: disabled).
+ Retention time.Duration
+ }
+ }
+
+ // A user-provided string sent with every request to the brokers for logging,
+ // debugging, and auditing purposes. Defaults to "sarama", but you should
+ // probably set it to something specific to your application.
+ ClientID string
+ // The number of events to buffer in internal and external channels. This
+ // permits the producer and consumer to continue processing some messages
+ // in the background while user code is working, greatly improving throughput.
+ // Defaults to 256.
+ ChannelBufferSize int
+ // The version of Kafka that Sarama will assume it is running against.
+ // Defaults to the oldest supported stable version. Since Kafka provides
+ // backwards-compatibility, setting it to a version older than you have
+ // will not break anything, although it may prevent you from using the
+ // latest features. Setting it to a version greater than you are actually
+ // running may lead to random breakage.
+ Version KafkaVersion
+ // The registry to define metrics into.
+ // Defaults to a local registry.
+ // If you want to disable metrics gathering, set "metrics.UseNilMetrics" to "true"
+ // prior to starting Sarama.
+ // See Examples on how to use the metrics registry
+ MetricRegistry metrics.Registry
+}
+
+// NewConfig returns a new configuration instance with sane defaults.
+func NewConfig() *Config {
+ c := &Config{}
+
+ c.Net.MaxOpenRequests = 5
+ c.Net.DialTimeout = 30 * time.Second
+ c.Net.ReadTimeout = 30 * time.Second
+ c.Net.WriteTimeout = 30 * time.Second
+ c.Net.SASL.Handshake = true
+
+ c.Metadata.Retry.Max = 3
+ c.Metadata.Retry.Backoff = 250 * time.Millisecond
+ c.Metadata.RefreshFrequency = 10 * time.Minute
+ c.Metadata.Full = true
+
+ c.Producer.MaxMessageBytes = 1000000
+ c.Producer.RequiredAcks = WaitForLocal
+ c.Producer.Timeout = 10 * time.Second
+ c.Producer.Partitioner = NewHashPartitioner
+ c.Producer.Retry.Max = 3
+ c.Producer.Retry.Backoff = 100 * time.Millisecond
+ c.Producer.Return.Errors = true
+
+ c.Consumer.Fetch.Min = 1
+ c.Consumer.Fetch.Default = 32768
+ c.Consumer.Retry.Backoff = 2 * time.Second
+ c.Consumer.MaxWaitTime = 250 * time.Millisecond
+ c.Consumer.MaxProcessingTime = 100 * time.Millisecond
+ c.Consumer.Return.Errors = false
+ c.Consumer.Offsets.CommitInterval = 1 * time.Second
+ c.Consumer.Offsets.Initial = OffsetNewest
+
+ c.ClientID = defaultClientID
+ c.ChannelBufferSize = 256
+ c.Version = minVersion
+ c.MetricRegistry = metrics.NewRegistry()
+
+ return c
+}
+
+// Validate checks a Config instance. It will return a
+// ConfigurationError if the specified values don't make sense.
+func (c *Config) Validate() error {
+ // some configuration values should be warned on but not fail completely, do those first
+ if c.Net.TLS.Enable == false && c.Net.TLS.Config != nil {
+ Logger.Println("Net.TLS is disabled but a non-nil configuration was provided.")
+ }
+ if c.Net.SASL.Enable == false {
+ if c.Net.SASL.User != "" {
+ Logger.Println("Net.SASL is disabled but a non-empty username was provided.")
+ }
+ if c.Net.SASL.Password != "" {
+ Logger.Println("Net.SASL is disabled but a non-empty password was provided.")
+ }
+ }
+ if c.Producer.RequiredAcks > 1 {
+ Logger.Println("Producer.RequiredAcks > 1 is deprecated and will raise an exception with kafka >= 0.8.2.0.")
+ }
+ if c.Producer.MaxMessageBytes >= int(MaxRequestSize) {
+ Logger.Println("Producer.MaxMessageBytes must be smaller than MaxRequestSize; it will be ignored.")
+ }
+ if c.Producer.Flush.Bytes >= int(MaxRequestSize) {
+ Logger.Println("Producer.Flush.Bytes must be smaller than MaxRequestSize; it will be ignored.")
+ }
+ if (c.Producer.Flush.Bytes > 0 || c.Producer.Flush.Messages > 0) && c.Producer.Flush.Frequency == 0 {
+ Logger.Println("Producer.Flush: Bytes or Messages are set, but Frequency is not; messages may not get flushed.")
+ }
+ if c.Producer.Timeout%time.Millisecond != 0 {
+ Logger.Println("Producer.Timeout only supports millisecond resolution; nanoseconds will be truncated.")
+ }
+ if c.Consumer.MaxWaitTime < 100*time.Millisecond {
+ Logger.Println("Consumer.MaxWaitTime is very low, which can cause high CPU and network usage. See documentation for details.")
+ }
+ if c.Consumer.MaxWaitTime%time.Millisecond != 0 {
+ Logger.Println("Consumer.MaxWaitTime only supports millisecond precision; nanoseconds will be truncated.")
+ }
+ if c.Consumer.Offsets.Retention%time.Millisecond != 0 {
+ Logger.Println("Consumer.Offsets.Retention only supports millisecond precision; nanoseconds will be truncated.")
+ }
+ if c.ClientID == defaultClientID {
+ Logger.Println("ClientID is the default of 'sarama', you should consider setting it to something application-specific.")
+ }
+
+ // validate Net values
+ switch {
+ case c.Net.MaxOpenRequests <= 0:
+ return ConfigurationError("Net.MaxOpenRequests must be > 0")
+ case c.Net.DialTimeout <= 0:
+ return ConfigurationError("Net.DialTimeout must be > 0")
+ case c.Net.ReadTimeout <= 0:
+ return ConfigurationError("Net.ReadTimeout must be > 0")
+ case c.Net.WriteTimeout <= 0:
+ return ConfigurationError("Net.WriteTimeout must be > 0")
+ case c.Net.KeepAlive < 0:
+ return ConfigurationError("Net.KeepAlive must be >= 0")
+ case c.Net.SASL.Enable == true && c.Net.SASL.User == "":
+ return ConfigurationError("Net.SASL.User must not be empty when SASL is enabled")
+ case c.Net.SASL.Enable == true && c.Net.SASL.Password == "":
+ return ConfigurationError("Net.SASL.Password must not be empty when SASL is enabled")
+ }
+
+ // validate the Metadata values
+ switch {
+ case c.Metadata.Retry.Max < 0:
+ return ConfigurationError("Metadata.Retry.Max must be >= 0")
+ case c.Metadata.Retry.Backoff < 0:
+ return ConfigurationError("Metadata.Retry.Backoff must be >= 0")
+ case c.Metadata.RefreshFrequency < 0:
+ return ConfigurationError("Metadata.RefreshFrequency must be >= 0")
+ }
+
+ // validate the Producer values
+ switch {
+ case c.Producer.MaxMessageBytes <= 0:
+ return ConfigurationError("Producer.MaxMessageBytes must be > 0")
+ case c.Producer.RequiredAcks < -1:
+ return ConfigurationError("Producer.RequiredAcks must be >= -1")
+ case c.Producer.Timeout <= 0:
+ return ConfigurationError("Producer.Timeout must be > 0")
+ case c.Producer.Partitioner == nil:
+ return ConfigurationError("Producer.Partitioner must not be nil")
+ case c.Producer.Flush.Bytes < 0:
+ return ConfigurationError("Producer.Flush.Bytes must be >= 0")
+ case c.Producer.Flush.Messages < 0:
+ return ConfigurationError("Producer.Flush.Messages must be >= 0")
+ case c.Producer.Flush.Frequency < 0:
+ return ConfigurationError("Producer.Flush.Frequency must be >= 0")
+ case c.Producer.Flush.MaxMessages < 0:
+ return ConfigurationError("Producer.Flush.MaxMessages must be >= 0")
+ case c.Producer.Flush.MaxMessages > 0 && c.Producer.Flush.MaxMessages < c.Producer.Flush.Messages:
+ return ConfigurationError("Producer.Flush.MaxMessages must be >= Producer.Flush.Messages when set")
+ case c.Producer.Retry.Max < 0:
+ return ConfigurationError("Producer.Retry.Max must be >= 0")
+ case c.Producer.Retry.Backoff < 0:
+ return ConfigurationError("Producer.Retry.Backoff must be >= 0")
+ }
+
+ if c.Producer.Compression == CompressionLZ4 && !c.Version.IsAtLeast(V0_10_0_0) {
+ return ConfigurationError("lz4 compression requires Version >= V0_10_0_0")
+ }
+
+ // validate the Consumer values
+ switch {
+ case c.Consumer.Fetch.Min <= 0:
+ return ConfigurationError("Consumer.Fetch.Min must be > 0")
+ case c.Consumer.Fetch.Default <= 0:
+ return ConfigurationError("Consumer.Fetch.Default must be > 0")
+ case c.Consumer.Fetch.Max < 0:
+ return ConfigurationError("Consumer.Fetch.Max must be >= 0")
+ case c.Consumer.MaxWaitTime < 1*time.Millisecond:
+ return ConfigurationError("Consumer.MaxWaitTime must be >= 1ms")
+ case c.Consumer.MaxProcessingTime <= 0:
+ return ConfigurationError("Consumer.MaxProcessingTime must be > 0")
+ case c.Consumer.Retry.Backoff < 0:
+ return ConfigurationError("Consumer.Retry.Backoff must be >= 0")
+ case c.Consumer.Offsets.CommitInterval <= 0:
+ return ConfigurationError("Consumer.Offsets.CommitInterval must be > 0")
+ case c.Consumer.Offsets.Initial != OffsetOldest && c.Consumer.Offsets.Initial != OffsetNewest:
+ return ConfigurationError("Consumer.Offsets.Initial must be OffsetOldest or OffsetNewest")
+
+ }
+
+ // validate misc shared values
+ switch {
+ case c.ChannelBufferSize < 0:
+ return ConfigurationError("ChannelBufferSize must be >= 0")
+ case !validID.MatchString(c.ClientID):
+ return ConfigurationError("ClientID is invalid")
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/consumer.go b/vendor/github.com/Shopify/sarama/consumer.go
new file mode 100644
index 0000000..1a07289
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/consumer.go
@@ -0,0 +1,806 @@
+package sarama
+
+import (
+ "errors"
+ "fmt"
+ "sync"
+ "sync/atomic"
+ "time"
+)
+
+// ConsumerMessage encapsulates a Kafka message returned by the consumer.
+type ConsumerMessage struct {
+ Key, Value []byte
+ Topic string
+ Partition int32
+ Offset int64
+ Timestamp time.Time // only set if kafka is version 0.10+, inner message timestamp
+ BlockTimestamp time.Time // only set if kafka is version 0.10+, outer (compressed) block timestamp
+ Headers []*RecordHeader // only set if kafka is version 0.11+
+}
+
+// ConsumerError is what is provided to the user when an error occurs.
+// It wraps an error and includes the topic and partition.
+type ConsumerError struct {
+ Topic string
+ Partition int32
+ Err error
+}
+
+func (ce ConsumerError) Error() string {
+ return fmt.Sprintf("kafka: error while consuming %s/%d: %s", ce.Topic, ce.Partition, ce.Err)
+}
+
+// ConsumerErrors is a type that wraps a batch of errors and implements the Error interface.
+// It can be returned from the PartitionConsumer's Close methods to avoid the need to manually drain errors
+// when stopping.
+type ConsumerErrors []*ConsumerError
+
+func (ce ConsumerErrors) Error() string {
+ return fmt.Sprintf("kafka: %d errors while consuming", len(ce))
+}
+
+// Consumer manages PartitionConsumers which process Kafka messages from brokers. You MUST call Close()
+// on a consumer to avoid leaks, it will not be garbage-collected automatically when it passes out of
+// scope.
+//
+// Sarama's Consumer type does not currently support automatic consumer-group rebalancing and offset tracking.
+// For Zookeeper-based tracking (Kafka 0.8.2 and earlier), the https://github.com/wvanbergen/kafka library
+// builds on Sarama to add this support. For Kafka-based tracking (Kafka 0.9 and later), the
+// https://github.com/bsm/sarama-cluster library builds on Sarama to add this support.
+type Consumer interface {
+
+ // Topics returns the set of available topics as retrieved from the cluster
+ // metadata. This method is the same as Client.Topics(), and is provided for
+ // convenience.
+ Topics() ([]string, error)
+
+ // Partitions returns the sorted list of all partition IDs for the given topic.
+ // This method is the same as Client.Partitions(), and is provided for convenience.
+ Partitions(topic string) ([]int32, error)
+
+ // ConsumePartition creates a PartitionConsumer on the given topic/partition with
+ // the given offset. It will return an error if this Consumer is already consuming
+ // on the given topic/partition. Offset can be a literal offset, or OffsetNewest
+ // or OffsetOldest
+ ConsumePartition(topic string, partition int32, offset int64) (PartitionConsumer, error)
+
+ // HighWaterMarks returns the current high water marks for each topic and partition.
+ // Consistency between partitions is not guaranteed since high water marks are updated separately.
+ HighWaterMarks() map[string]map[int32]int64
+
+ // Close shuts down the consumer. It must be called after all child
+ // PartitionConsumers have already been closed.
+ Close() error
+}
+
+type consumer struct {
+ client Client
+ conf *Config
+ ownClient bool
+
+ lock sync.Mutex
+ children map[string]map[int32]*partitionConsumer
+ brokerConsumers map[*Broker]*brokerConsumer
+}
+
+// NewConsumer creates a new consumer using the given broker addresses and configuration.
+func NewConsumer(addrs []string, config *Config) (Consumer, error) {
+ client, err := NewClient(addrs, config)
+ if err != nil {
+ return nil, err
+ }
+
+ c, err := NewConsumerFromClient(client)
+ if err != nil {
+ return nil, err
+ }
+ c.(*consumer).ownClient = true
+ return c, nil
+}
+
+// NewConsumerFromClient creates a new consumer using the given client. It is still
+// necessary to call Close() on the underlying client when shutting down this consumer.
+func NewConsumerFromClient(client Client) (Consumer, error) {
+ // Check that we are not dealing with a closed Client before processing any other arguments
+ if client.Closed() {
+ return nil, ErrClosedClient
+ }
+
+ c := &consumer{
+ client: client,
+ conf: client.Config(),
+ children: make(map[string]map[int32]*partitionConsumer),
+ brokerConsumers: make(map[*Broker]*brokerConsumer),
+ }
+
+ return c, nil
+}
+
+func (c *consumer) Close() error {
+ if c.ownClient {
+ return c.client.Close()
+ }
+ return nil
+}
+
+func (c *consumer) Topics() ([]string, error) {
+ return c.client.Topics()
+}
+
+func (c *consumer) Partitions(topic string) ([]int32, error) {
+ return c.client.Partitions(topic)
+}
+
+func (c *consumer) ConsumePartition(topic string, partition int32, offset int64) (PartitionConsumer, error) {
+ child := &partitionConsumer{
+ consumer: c,
+ conf: c.conf,
+ topic: topic,
+ partition: partition,
+ messages: make(chan *ConsumerMessage, c.conf.ChannelBufferSize),
+ errors: make(chan *ConsumerError, c.conf.ChannelBufferSize),
+ feeder: make(chan *FetchResponse, 1),
+ trigger: make(chan none, 1),
+ dying: make(chan none),
+ fetchSize: c.conf.Consumer.Fetch.Default,
+ }
+
+ if err := child.chooseStartingOffset(offset); err != nil {
+ return nil, err
+ }
+
+ var leader *Broker
+ var err error
+ if leader, err = c.client.Leader(child.topic, child.partition); err != nil {
+ return nil, err
+ }
+
+ if err := c.addChild(child); err != nil {
+ return nil, err
+ }
+
+ go withRecover(child.dispatcher)
+ go withRecover(child.responseFeeder)
+
+ child.broker = c.refBrokerConsumer(leader)
+ child.broker.input <- child
+
+ return child, nil
+}
+
+func (c *consumer) HighWaterMarks() map[string]map[int32]int64 {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+
+ hwms := make(map[string]map[int32]int64)
+ for topic, p := range c.children {
+ hwm := make(map[int32]int64, len(p))
+ for partition, pc := range p {
+ hwm[partition] = pc.HighWaterMarkOffset()
+ }
+ hwms[topic] = hwm
+ }
+
+ return hwms
+}
+
+func (c *consumer) addChild(child *partitionConsumer) error {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+
+ topicChildren := c.children[child.topic]
+ if topicChildren == nil {
+ topicChildren = make(map[int32]*partitionConsumer)
+ c.children[child.topic] = topicChildren
+ }
+
+ if topicChildren[child.partition] != nil {
+ return ConfigurationError("That topic/partition is already being consumed")
+ }
+
+ topicChildren[child.partition] = child
+ return nil
+}
+
+func (c *consumer) removeChild(child *partitionConsumer) {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+
+ delete(c.children[child.topic], child.partition)
+}
+
+func (c *consumer) refBrokerConsumer(broker *Broker) *brokerConsumer {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+
+ bc := c.brokerConsumers[broker]
+ if bc == nil {
+ bc = c.newBrokerConsumer(broker)
+ c.brokerConsumers[broker] = bc
+ }
+
+ bc.refs++
+
+ return bc
+}
+
+func (c *consumer) unrefBrokerConsumer(brokerWorker *brokerConsumer) {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+
+ brokerWorker.refs--
+
+ if brokerWorker.refs == 0 {
+ close(brokerWorker.input)
+ if c.brokerConsumers[brokerWorker.broker] == brokerWorker {
+ delete(c.brokerConsumers, brokerWorker.broker)
+ }
+ }
+}
+
+func (c *consumer) abandonBrokerConsumer(brokerWorker *brokerConsumer) {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+
+ delete(c.brokerConsumers, brokerWorker.broker)
+}
+
+// PartitionConsumer
+
+// PartitionConsumer processes Kafka messages from a given topic and partition. You MUST call one of Close() or
+// AsyncClose() on a PartitionConsumer to avoid leaks; it will not be garbage-collected automatically when it passes out
+// of scope.
+//
+// The simplest way of using a PartitionConsumer is to loop over its Messages channel using a for/range
+// loop. The PartitionConsumer will only stop itself in one case: when the offset being consumed is reported
+// as out of range by the brokers. In this case you should decide what you want to do (try a different offset,
+// notify a human, etc) and handle it appropriately. For all other error cases, it will just keep retrying.
+// By default, it logs these errors to sarama.Logger; if you want to be notified directly of all errors, set
+// your config's Consumer.Return.Errors to true and read from the Errors channel, using a select statement
+// or a separate goroutine. Check out the Consumer examples to see implementations of these different approaches.
+//
+// To terminate such a for/range loop while the loop is executing, call AsyncClose. This will kick off the process of
+// consumer tear-down & return imediately. Continue to loop, servicing the Messages channel until the teardown process
+// AsyncClose initiated closes it (thus terminating the for/range loop). If you've already ceased reading Messages, call
+// Close; this will signal the PartitionConsumer's goroutines to begin shutting down (just like AsyncClose), but will
+// also drain the Messages channel, harvest all errors & return them once cleanup has completed.
+type PartitionConsumer interface {
+
+ // AsyncClose initiates a shutdown of the PartitionConsumer. This method will return immediately, after which you
+ // should continue to service the 'Messages' and 'Errors' channels until they are empty. It is required to call this
+ // function, or Close before a consumer object passes out of scope, as it will otherwise leak memory. You must call
+ // this before calling Close on the underlying client.
+ AsyncClose()
+
+ // Close stops the PartitionConsumer from fetching messages. It will initiate a shutdown just like AsyncClose, drain
+ // the Messages channel, harvest any errors & return them to the caller. Note that if you are continuing to service
+ // the Messages channel when this function is called, you will be competing with Close for messages; consider
+ // calling AsyncClose, instead. It is required to call this function (or AsyncClose) before a consumer object passes
+ // out of scope, as it will otherwise leak memory. You must call this before calling Close on the underlying client.
+ Close() error
+
+ // Messages returns the read channel for the messages that are returned by
+ // the broker.
+ Messages() <-chan *ConsumerMessage
+
+ // Errors returns a read channel of errors that occurred during consuming, if
+ // enabled. By default, errors are logged and not returned over this channel.
+ // If you want to implement any custom error handling, set your config's
+ // Consumer.Return.Errors setting to true, and read from this channel.
+ Errors() <-chan *ConsumerError
+
+ // HighWaterMarkOffset returns the high water mark offset of the partition,
+ // i.e. the offset that will be used for the next message that will be produced.
+ // You can use this to determine how far behind the processing is.
+ HighWaterMarkOffset() int64
+}
+
+type partitionConsumer struct {
+ highWaterMarkOffset int64 // must be at the top of the struct because https://golang.org/pkg/sync/atomic/#pkg-note-BUG
+ consumer *consumer
+ conf *Config
+ topic string
+ partition int32
+
+ broker *brokerConsumer
+ messages chan *ConsumerMessage
+ errors chan *ConsumerError
+ feeder chan *FetchResponse
+
+ trigger, dying chan none
+ responseResult error
+
+ fetchSize int32
+ offset int64
+}
+
+var errTimedOut = errors.New("timed out feeding messages to the user") // not user-facing
+
+func (child *partitionConsumer) sendError(err error) {
+ cErr := &ConsumerError{
+ Topic: child.topic,
+ Partition: child.partition,
+ Err: err,
+ }
+
+ if child.conf.Consumer.Return.Errors {
+ child.errors <- cErr
+ } else {
+ Logger.Println(cErr)
+ }
+}
+
+func (child *partitionConsumer) dispatcher() {
+ for range child.trigger {
+ select {
+ case <-child.dying:
+ close(child.trigger)
+ case <-time.After(child.conf.Consumer.Retry.Backoff):
+ if child.broker != nil {
+ child.consumer.unrefBrokerConsumer(child.broker)
+ child.broker = nil
+ }
+
+ Logger.Printf("consumer/%s/%d finding new broker\n", child.topic, child.partition)
+ if err := child.dispatch(); err != nil {
+ child.sendError(err)
+ child.trigger <- none{}
+ }
+ }
+ }
+
+ if child.broker != nil {
+ child.consumer.unrefBrokerConsumer(child.broker)
+ }
+ child.consumer.removeChild(child)
+ close(child.feeder)
+}
+
+func (child *partitionConsumer) dispatch() error {
+ if err := child.consumer.client.RefreshMetadata(child.topic); err != nil {
+ return err
+ }
+
+ var leader *Broker
+ var err error
+ if leader, err = child.consumer.client.Leader(child.topic, child.partition); err != nil {
+ return err
+ }
+
+ child.broker = child.consumer.refBrokerConsumer(leader)
+
+ child.broker.input <- child
+
+ return nil
+}
+
+func (child *partitionConsumer) chooseStartingOffset(offset int64) error {
+ newestOffset, err := child.consumer.client.GetOffset(child.topic, child.partition, OffsetNewest)
+ if err != nil {
+ return err
+ }
+ oldestOffset, err := child.consumer.client.GetOffset(child.topic, child.partition, OffsetOldest)
+ if err != nil {
+ return err
+ }
+
+ switch {
+ case offset == OffsetNewest:
+ child.offset = newestOffset
+ case offset == OffsetOldest:
+ child.offset = oldestOffset
+ case offset >= oldestOffset && offset <= newestOffset:
+ child.offset = offset
+ default:
+ return ErrOffsetOutOfRange
+ }
+
+ return nil
+}
+
+func (child *partitionConsumer) Messages() <-chan *ConsumerMessage {
+ return child.messages
+}
+
+func (child *partitionConsumer) Errors() <-chan *ConsumerError {
+ return child.errors
+}
+
+func (child *partitionConsumer) AsyncClose() {
+ // this triggers whatever broker owns this child to abandon it and close its trigger channel, which causes
+ // the dispatcher to exit its loop, which removes it from the consumer then closes its 'messages' and
+ // 'errors' channel (alternatively, if the child is already at the dispatcher for some reason, that will
+ // also just close itself)
+ close(child.dying)
+}
+
+func (child *partitionConsumer) Close() error {
+ child.AsyncClose()
+
+ go withRecover(func() {
+ for range child.messages {
+ // drain
+ }
+ })
+
+ var errors ConsumerErrors
+ for err := range child.errors {
+ errors = append(errors, err)
+ }
+
+ if len(errors) > 0 {
+ return errors
+ }
+ return nil
+}
+
+func (child *partitionConsumer) HighWaterMarkOffset() int64 {
+ return atomic.LoadInt64(&child.highWaterMarkOffset)
+}
+
+func (child *partitionConsumer) responseFeeder() {
+ var msgs []*ConsumerMessage
+ msgSent := false
+
+feederLoop:
+ for response := range child.feeder {
+ msgs, child.responseResult = child.parseResponse(response)
+ expiryTicker := time.NewTicker(child.conf.Consumer.MaxProcessingTime)
+
+ for i, msg := range msgs {
+ messageSelect:
+ select {
+ case child.messages <- msg:
+ msgSent = true
+ case <-expiryTicker.C:
+ if !msgSent {
+ child.responseResult = errTimedOut
+ child.broker.acks.Done()
+ for _, msg = range msgs[i:] {
+ child.messages <- msg
+ }
+ child.broker.input <- child
+ continue feederLoop
+ } else {
+ // current message has not been sent, return to select
+ // statement
+ msgSent = false
+ goto messageSelect
+ }
+ }
+ }
+
+ expiryTicker.Stop()
+ child.broker.acks.Done()
+ }
+
+ close(child.messages)
+ close(child.errors)
+}
+
+func (child *partitionConsumer) parseMessages(msgSet *MessageSet) ([]*ConsumerMessage, error) {
+ var messages []*ConsumerMessage
+ var incomplete bool
+ prelude := true
+
+ for _, msgBlock := range msgSet.Messages {
+ for _, msg := range msgBlock.Messages() {
+ offset := msg.Offset
+ if msg.Msg.Version >= 1 {
+ baseOffset := msgBlock.Offset - msgBlock.Messages()[len(msgBlock.Messages())-1].Offset
+ offset += baseOffset
+ }
+ if prelude && offset < child.offset {
+ continue
+ }
+ prelude = false
+
+ if offset >= child.offset {
+ messages = append(messages, &ConsumerMessage{
+ Topic: child.topic,
+ Partition: child.partition,
+ Key: msg.Msg.Key,
+ Value: msg.Msg.Value,
+ Offset: offset,
+ Timestamp: msg.Msg.Timestamp,
+ BlockTimestamp: msgBlock.Msg.Timestamp,
+ })
+ child.offset = offset + 1
+ } else {
+ incomplete = true
+ }
+ }
+ }
+
+ if incomplete || len(messages) == 0 {
+ return nil, ErrIncompleteResponse
+ }
+ return messages, nil
+}
+
+func (child *partitionConsumer) parseRecords(batch *RecordBatch) ([]*ConsumerMessage, error) {
+ var messages []*ConsumerMessage
+ var incomplete bool
+ prelude := true
+
+ for _, rec := range batch.Records {
+ offset := batch.FirstOffset + rec.OffsetDelta
+ if prelude && offset < child.offset {
+ continue
+ }
+ prelude = false
+
+ if offset >= child.offset {
+ messages = append(messages, &ConsumerMessage{
+ Topic: child.topic,
+ Partition: child.partition,
+ Key: rec.Key,
+ Value: rec.Value,
+ Offset: offset,
+ Timestamp: batch.FirstTimestamp.Add(rec.TimestampDelta),
+ Headers: rec.Headers,
+ })
+ child.offset = offset + 1
+ } else {
+ incomplete = true
+ }
+ }
+
+ if incomplete || len(messages) == 0 {
+ return nil, ErrIncompleteResponse
+ }
+ return messages, nil
+}
+
+func (child *partitionConsumer) parseResponse(response *FetchResponse) ([]*ConsumerMessage, error) {
+ block := response.GetBlock(child.topic, child.partition)
+ if block == nil {
+ return nil, ErrIncompleteResponse
+ }
+
+ if block.Err != ErrNoError {
+ return nil, block.Err
+ }
+
+ nRecs, err := block.Records.numRecords()
+ if err != nil {
+ return nil, err
+ }
+ if nRecs == 0 {
+ partialTrailingMessage, err := block.Records.isPartial()
+ if err != nil {
+ return nil, err
+ }
+ // We got no messages. If we got a trailing one then we need to ask for more data.
+ // Otherwise we just poll again and wait for one to be produced...
+ if partialTrailingMessage {
+ if child.conf.Consumer.Fetch.Max > 0 && child.fetchSize == child.conf.Consumer.Fetch.Max {
+ // we can't ask for more data, we've hit the configured limit
+ child.sendError(ErrMessageTooLarge)
+ child.offset++ // skip this one so we can keep processing future messages
+ } else {
+ child.fetchSize *= 2
+ if child.conf.Consumer.Fetch.Max > 0 && child.fetchSize > child.conf.Consumer.Fetch.Max {
+ child.fetchSize = child.conf.Consumer.Fetch.Max
+ }
+ }
+ }
+
+ return nil, nil
+ }
+
+ // we got messages, reset our fetch size in case it was increased for a previous request
+ child.fetchSize = child.conf.Consumer.Fetch.Default
+ atomic.StoreInt64(&child.highWaterMarkOffset, block.HighWaterMarkOffset)
+
+ if control, err := block.Records.isControl(); err != nil || control {
+ return nil, err
+ }
+
+ if block.Records.recordsType == legacyRecords {
+ return child.parseMessages(block.Records.msgSet)
+ }
+ return child.parseRecords(block.Records.recordBatch)
+}
+
+// brokerConsumer
+
+type brokerConsumer struct {
+ consumer *consumer
+ broker *Broker
+ input chan *partitionConsumer
+ newSubscriptions chan []*partitionConsumer
+ wait chan none
+ subscriptions map[*partitionConsumer]none
+ acks sync.WaitGroup
+ refs int
+}
+
+func (c *consumer) newBrokerConsumer(broker *Broker) *brokerConsumer {
+ bc := &brokerConsumer{
+ consumer: c,
+ broker: broker,
+ input: make(chan *partitionConsumer),
+ newSubscriptions: make(chan []*partitionConsumer),
+ wait: make(chan none),
+ subscriptions: make(map[*partitionConsumer]none),
+ refs: 0,
+ }
+
+ go withRecover(bc.subscriptionManager)
+ go withRecover(bc.subscriptionConsumer)
+
+ return bc
+}
+
+func (bc *brokerConsumer) subscriptionManager() {
+ var buffer []*partitionConsumer
+
+ // The subscriptionManager constantly accepts new subscriptions on `input` (even when the main subscriptionConsumer
+ // goroutine is in the middle of a network request) and batches it up. The main worker goroutine picks
+ // up a batch of new subscriptions between every network request by reading from `newSubscriptions`, so we give
+ // it nil if no new subscriptions are available. We also write to `wait` only when new subscriptions is available,
+ // so the main goroutine can block waiting for work if it has none.
+ for {
+ if len(buffer) > 0 {
+ select {
+ case event, ok := <-bc.input:
+ if !ok {
+ goto done
+ }
+ buffer = append(buffer, event)
+ case bc.newSubscriptions <- buffer:
+ buffer = nil
+ case bc.wait <- none{}:
+ }
+ } else {
+ select {
+ case event, ok := <-bc.input:
+ if !ok {
+ goto done
+ }
+ buffer = append(buffer, event)
+ case bc.newSubscriptions <- nil:
+ }
+ }
+ }
+
+done:
+ close(bc.wait)
+ if len(buffer) > 0 {
+ bc.newSubscriptions <- buffer
+ }
+ close(bc.newSubscriptions)
+}
+
+func (bc *brokerConsumer) subscriptionConsumer() {
+ <-bc.wait // wait for our first piece of work
+
+ // the subscriptionConsumer ensures we will get nil right away if no new subscriptions is available
+ for newSubscriptions := range bc.newSubscriptions {
+ bc.updateSubscriptions(newSubscriptions)
+
+ if len(bc.subscriptions) == 0 {
+ // We're about to be shut down or we're about to receive more subscriptions.
+ // Either way, the signal just hasn't propagated to our goroutine yet.
+ <-bc.wait
+ continue
+ }
+
+ response, err := bc.fetchNewMessages()
+
+ if err != nil {
+ Logger.Printf("consumer/broker/%d disconnecting due to error processing FetchRequest: %s\n", bc.broker.ID(), err)
+ bc.abort(err)
+ return
+ }
+
+ bc.acks.Add(len(bc.subscriptions))
+ for child := range bc.subscriptions {
+ child.feeder <- response
+ }
+ bc.acks.Wait()
+ bc.handleResponses()
+ }
+}
+
+func (bc *brokerConsumer) updateSubscriptions(newSubscriptions []*partitionConsumer) {
+ for _, child := range newSubscriptions {
+ bc.subscriptions[child] = none{}
+ Logger.Printf("consumer/broker/%d added subscription to %s/%d\n", bc.broker.ID(), child.topic, child.partition)
+ }
+
+ for child := range bc.subscriptions {
+ select {
+ case <-child.dying:
+ Logger.Printf("consumer/broker/%d closed dead subscription to %s/%d\n", bc.broker.ID(), child.topic, child.partition)
+ close(child.trigger)
+ delete(bc.subscriptions, child)
+ default:
+ break
+ }
+ }
+}
+
+func (bc *brokerConsumer) handleResponses() {
+ // handles the response codes left for us by our subscriptions, and abandons ones that have been closed
+ for child := range bc.subscriptions {
+ result := child.responseResult
+ child.responseResult = nil
+
+ switch result {
+ case nil:
+ break
+ case errTimedOut:
+ Logger.Printf("consumer/broker/%d abandoned subscription to %s/%d because consuming was taking too long\n",
+ bc.broker.ID(), child.topic, child.partition)
+ delete(bc.subscriptions, child)
+ case ErrOffsetOutOfRange:
+ // there's no point in retrying this it will just fail the same way again
+ // shut it down and force the user to choose what to do
+ child.sendError(result)
+ Logger.Printf("consumer/%s/%d shutting down because %s\n", child.topic, child.partition, result)
+ close(child.trigger)
+ delete(bc.subscriptions, child)
+ case ErrUnknownTopicOrPartition, ErrNotLeaderForPartition, ErrLeaderNotAvailable, ErrReplicaNotAvailable:
+ // not an error, but does need redispatching
+ Logger.Printf("consumer/broker/%d abandoned subscription to %s/%d because %s\n",
+ bc.broker.ID(), child.topic, child.partition, result)
+ child.trigger <- none{}
+ delete(bc.subscriptions, child)
+ default:
+ // dunno, tell the user and try redispatching
+ child.sendError(result)
+ Logger.Printf("consumer/broker/%d abandoned subscription to %s/%d because %s\n",
+ bc.broker.ID(), child.topic, child.partition, result)
+ child.trigger <- none{}
+ delete(bc.subscriptions, child)
+ }
+ }
+}
+
+func (bc *brokerConsumer) abort(err error) {
+ bc.consumer.abandonBrokerConsumer(bc)
+ _ = bc.broker.Close() // we don't care about the error this might return, we already have one
+
+ for child := range bc.subscriptions {
+ child.sendError(err)
+ child.trigger <- none{}
+ }
+
+ for newSubscriptions := range bc.newSubscriptions {
+ if len(newSubscriptions) == 0 {
+ <-bc.wait
+ continue
+ }
+ for _, child := range newSubscriptions {
+ child.sendError(err)
+ child.trigger <- none{}
+ }
+ }
+}
+
+func (bc *brokerConsumer) fetchNewMessages() (*FetchResponse, error) {
+ request := &FetchRequest{
+ MinBytes: bc.consumer.conf.Consumer.Fetch.Min,
+ MaxWaitTime: int32(bc.consumer.conf.Consumer.MaxWaitTime / time.Millisecond),
+ }
+ if bc.consumer.conf.Version.IsAtLeast(V0_10_0_0) {
+ request.Version = 2
+ }
+ if bc.consumer.conf.Version.IsAtLeast(V0_10_1_0) {
+ request.Version = 3
+ request.MaxBytes = MaxResponseSize
+ }
+ if bc.consumer.conf.Version.IsAtLeast(V0_11_0_0) {
+ request.Version = 4
+ request.Isolation = ReadUncommitted // We don't support yet transactions.
+ }
+
+ for child := range bc.subscriptions {
+ request.AddBlock(child.topic, child.partition, child.offset, child.fetchSize)
+ }
+
+ return bc.broker.Fetch(request)
+}
diff --git a/vendor/github.com/Shopify/sarama/consumer_group_members.go b/vendor/github.com/Shopify/sarama/consumer_group_members.go
new file mode 100644
index 0000000..9d92d35
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/consumer_group_members.go
@@ -0,0 +1,94 @@
+package sarama
+
+type ConsumerGroupMemberMetadata struct {
+ Version int16
+ Topics []string
+ UserData []byte
+}
+
+func (m *ConsumerGroupMemberMetadata) encode(pe packetEncoder) error {
+ pe.putInt16(m.Version)
+
+ if err := pe.putStringArray(m.Topics); err != nil {
+ return err
+ }
+
+ if err := pe.putBytes(m.UserData); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (m *ConsumerGroupMemberMetadata) decode(pd packetDecoder) (err error) {
+ if m.Version, err = pd.getInt16(); err != nil {
+ return
+ }
+
+ if m.Topics, err = pd.getStringArray(); err != nil {
+ return
+ }
+
+ if m.UserData, err = pd.getBytes(); err != nil {
+ return
+ }
+
+ return nil
+}
+
+type ConsumerGroupMemberAssignment struct {
+ Version int16
+ Topics map[string][]int32
+ UserData []byte
+}
+
+func (m *ConsumerGroupMemberAssignment) encode(pe packetEncoder) error {
+ pe.putInt16(m.Version)
+
+ if err := pe.putArrayLength(len(m.Topics)); err != nil {
+ return err
+ }
+
+ for topic, partitions := range m.Topics {
+ if err := pe.putString(topic); err != nil {
+ return err
+ }
+ if err := pe.putInt32Array(partitions); err != nil {
+ return err
+ }
+ }
+
+ if err := pe.putBytes(m.UserData); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (m *ConsumerGroupMemberAssignment) decode(pd packetDecoder) (err error) {
+ if m.Version, err = pd.getInt16(); err != nil {
+ return
+ }
+
+ var topicLen int
+ if topicLen, err = pd.getArrayLength(); err != nil {
+ return
+ }
+
+ m.Topics = make(map[string][]int32, topicLen)
+ for i := 0; i < topicLen; i++ {
+ var topic string
+ if topic, err = pd.getString(); err != nil {
+ return
+ }
+ if m.Topics[topic], err = pd.getInt32Array(); err != nil {
+ return
+ }
+ }
+
+ if m.UserData, err = pd.getBytes(); err != nil {
+ return
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/consumer_metadata_request.go b/vendor/github.com/Shopify/sarama/consumer_metadata_request.go
new file mode 100644
index 0000000..483be33
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/consumer_metadata_request.go
@@ -0,0 +1,26 @@
+package sarama
+
+type ConsumerMetadataRequest struct {
+ ConsumerGroup string
+}
+
+func (r *ConsumerMetadataRequest) encode(pe packetEncoder) error {
+ return pe.putString(r.ConsumerGroup)
+}
+
+func (r *ConsumerMetadataRequest) decode(pd packetDecoder, version int16) (err error) {
+ r.ConsumerGroup, err = pd.getString()
+ return err
+}
+
+func (r *ConsumerMetadataRequest) key() int16 {
+ return 10
+}
+
+func (r *ConsumerMetadataRequest) version() int16 {
+ return 0
+}
+
+func (r *ConsumerMetadataRequest) requiredVersion() KafkaVersion {
+ return V0_8_2_0
+}
diff --git a/vendor/github.com/Shopify/sarama/consumer_metadata_response.go b/vendor/github.com/Shopify/sarama/consumer_metadata_response.go
new file mode 100644
index 0000000..6b9632b
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/consumer_metadata_response.go
@@ -0,0 +1,85 @@
+package sarama
+
+import (
+ "net"
+ "strconv"
+)
+
+type ConsumerMetadataResponse struct {
+ Err KError
+ Coordinator *Broker
+ CoordinatorID int32 // deprecated: use Coordinator.ID()
+ CoordinatorHost string // deprecated: use Coordinator.Addr()
+ CoordinatorPort int32 // deprecated: use Coordinator.Addr()
+}
+
+func (r *ConsumerMetadataResponse) decode(pd packetDecoder, version int16) (err error) {
+ tmp, err := pd.getInt16()
+ if err != nil {
+ return err
+ }
+ r.Err = KError(tmp)
+
+ coordinator := new(Broker)
+ if err := coordinator.decode(pd); err != nil {
+ return err
+ }
+ if coordinator.addr == ":0" {
+ return nil
+ }
+ r.Coordinator = coordinator
+
+ // this can all go away in 2.0, but we have to fill in deprecated fields to maintain
+ // backwards compatibility
+ host, portstr, err := net.SplitHostPort(r.Coordinator.Addr())
+ if err != nil {
+ return err
+ }
+ port, err := strconv.ParseInt(portstr, 10, 32)
+ if err != nil {
+ return err
+ }
+ r.CoordinatorID = r.Coordinator.ID()
+ r.CoordinatorHost = host
+ r.CoordinatorPort = int32(port)
+
+ return nil
+}
+
+func (r *ConsumerMetadataResponse) encode(pe packetEncoder) error {
+ pe.putInt16(int16(r.Err))
+ if r.Coordinator != nil {
+ host, portstr, err := net.SplitHostPort(r.Coordinator.Addr())
+ if err != nil {
+ return err
+ }
+ port, err := strconv.ParseInt(portstr, 10, 32)
+ if err != nil {
+ return err
+ }
+ pe.putInt32(r.Coordinator.ID())
+ if err := pe.putString(host); err != nil {
+ return err
+ }
+ pe.putInt32(int32(port))
+ return nil
+ }
+ pe.putInt32(r.CoordinatorID)
+ if err := pe.putString(r.CoordinatorHost); err != nil {
+ return err
+ }
+ pe.putInt32(r.CoordinatorPort)
+ return nil
+}
+
+func (r *ConsumerMetadataResponse) key() int16 {
+ return 10
+}
+
+func (r *ConsumerMetadataResponse) version() int16 {
+ return 0
+}
+
+func (r *ConsumerMetadataResponse) requiredVersion() KafkaVersion {
+ return V0_8_2_0
+}
diff --git a/vendor/github.com/Shopify/sarama/crc32_field.go b/vendor/github.com/Shopify/sarama/crc32_field.go
new file mode 100644
index 0000000..1f14443
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/crc32_field.go
@@ -0,0 +1,69 @@
+package sarama
+
+import (
+ "encoding/binary"
+ "fmt"
+ "hash/crc32"
+)
+
+type crcPolynomial int8
+
+const (
+ crcIEEE crcPolynomial = iota
+ crcCastagnoli
+)
+
+var castagnoliTable = crc32.MakeTable(crc32.Castagnoli)
+
+// crc32Field implements the pushEncoder and pushDecoder interfaces for calculating CRC32s.
+type crc32Field struct {
+ startOffset int
+ polynomial crcPolynomial
+}
+
+func (c *crc32Field) saveOffset(in int) {
+ c.startOffset = in
+}
+
+func (c *crc32Field) reserveLength() int {
+ return 4
+}
+
+func newCRC32Field(polynomial crcPolynomial) *crc32Field {
+ return &crc32Field{polynomial: polynomial}
+}
+
+func (c *crc32Field) run(curOffset int, buf []byte) error {
+ crc, err := c.crc(curOffset, buf)
+ if err != nil {
+ return err
+ }
+ binary.BigEndian.PutUint32(buf[c.startOffset:], crc)
+ return nil
+}
+
+func (c *crc32Field) check(curOffset int, buf []byte) error {
+ crc, err := c.crc(curOffset, buf)
+ if err != nil {
+ return err
+ }
+
+ expected := binary.BigEndian.Uint32(buf[c.startOffset:])
+ if crc != expected {
+ return PacketDecodingError{fmt.Sprintf("CRC didn't match expected %#x got %#x", expected, crc)}
+ }
+
+ return nil
+}
+func (c *crc32Field) crc(curOffset int, buf []byte) (uint32, error) {
+ var tab *crc32.Table
+ switch c.polynomial {
+ case crcIEEE:
+ tab = crc32.IEEETable
+ case crcCastagnoli:
+ tab = castagnoliTable
+ default:
+ return 0, PacketDecodingError{"invalid CRC type"}
+ }
+ return crc32.Checksum(buf[c.startOffset+4:curOffset], tab), nil
+}
diff --git a/vendor/github.com/Shopify/sarama/describe_groups_request.go b/vendor/github.com/Shopify/sarama/describe_groups_request.go
new file mode 100644
index 0000000..1fb3567
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/describe_groups_request.go
@@ -0,0 +1,30 @@
+package sarama
+
+type DescribeGroupsRequest struct {
+ Groups []string
+}
+
+func (r *DescribeGroupsRequest) encode(pe packetEncoder) error {
+ return pe.putStringArray(r.Groups)
+}
+
+func (r *DescribeGroupsRequest) decode(pd packetDecoder, version int16) (err error) {
+ r.Groups, err = pd.getStringArray()
+ return
+}
+
+func (r *DescribeGroupsRequest) key() int16 {
+ return 15
+}
+
+func (r *DescribeGroupsRequest) version() int16 {
+ return 0
+}
+
+func (r *DescribeGroupsRequest) requiredVersion() KafkaVersion {
+ return V0_9_0_0
+}
+
+func (r *DescribeGroupsRequest) AddGroup(group string) {
+ r.Groups = append(r.Groups, group)
+}
diff --git a/vendor/github.com/Shopify/sarama/describe_groups_response.go b/vendor/github.com/Shopify/sarama/describe_groups_response.go
new file mode 100644
index 0000000..542b3a9
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/describe_groups_response.go
@@ -0,0 +1,187 @@
+package sarama
+
+type DescribeGroupsResponse struct {
+ Groups []*GroupDescription
+}
+
+func (r *DescribeGroupsResponse) encode(pe packetEncoder) error {
+ if err := pe.putArrayLength(len(r.Groups)); err != nil {
+ return err
+ }
+
+ for _, groupDescription := range r.Groups {
+ if err := groupDescription.encode(pe); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (r *DescribeGroupsResponse) decode(pd packetDecoder, version int16) (err error) {
+ n, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ r.Groups = make([]*GroupDescription, n)
+ for i := 0; i < n; i++ {
+ r.Groups[i] = new(GroupDescription)
+ if err := r.Groups[i].decode(pd); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (r *DescribeGroupsResponse) key() int16 {
+ return 15
+}
+
+func (r *DescribeGroupsResponse) version() int16 {
+ return 0
+}
+
+func (r *DescribeGroupsResponse) requiredVersion() KafkaVersion {
+ return V0_9_0_0
+}
+
+type GroupDescription struct {
+ Err KError
+ GroupId string
+ State string
+ ProtocolType string
+ Protocol string
+ Members map[string]*GroupMemberDescription
+}
+
+func (gd *GroupDescription) encode(pe packetEncoder) error {
+ pe.putInt16(int16(gd.Err))
+
+ if err := pe.putString(gd.GroupId); err != nil {
+ return err
+ }
+ if err := pe.putString(gd.State); err != nil {
+ return err
+ }
+ if err := pe.putString(gd.ProtocolType); err != nil {
+ return err
+ }
+ if err := pe.putString(gd.Protocol); err != nil {
+ return err
+ }
+
+ if err := pe.putArrayLength(len(gd.Members)); err != nil {
+ return err
+ }
+
+ for memberId, groupMemberDescription := range gd.Members {
+ if err := pe.putString(memberId); err != nil {
+ return err
+ }
+ if err := groupMemberDescription.encode(pe); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (gd *GroupDescription) decode(pd packetDecoder) (err error) {
+ kerr, err := pd.getInt16()
+ if err != nil {
+ return err
+ }
+
+ gd.Err = KError(kerr)
+
+ if gd.GroupId, err = pd.getString(); err != nil {
+ return
+ }
+ if gd.State, err = pd.getString(); err != nil {
+ return
+ }
+ if gd.ProtocolType, err = pd.getString(); err != nil {
+ return
+ }
+ if gd.Protocol, err = pd.getString(); err != nil {
+ return
+ }
+
+ n, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+ if n == 0 {
+ return nil
+ }
+
+ gd.Members = make(map[string]*GroupMemberDescription)
+ for i := 0; i < n; i++ {
+ memberId, err := pd.getString()
+ if err != nil {
+ return err
+ }
+
+ gd.Members[memberId] = new(GroupMemberDescription)
+ if err := gd.Members[memberId].decode(pd); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+type GroupMemberDescription struct {
+ ClientId string
+ ClientHost string
+ MemberMetadata []byte
+ MemberAssignment []byte
+}
+
+func (gmd *GroupMemberDescription) encode(pe packetEncoder) error {
+ if err := pe.putString(gmd.ClientId); err != nil {
+ return err
+ }
+ if err := pe.putString(gmd.ClientHost); err != nil {
+ return err
+ }
+ if err := pe.putBytes(gmd.MemberMetadata); err != nil {
+ return err
+ }
+ if err := pe.putBytes(gmd.MemberAssignment); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (gmd *GroupMemberDescription) decode(pd packetDecoder) (err error) {
+ if gmd.ClientId, err = pd.getString(); err != nil {
+ return
+ }
+ if gmd.ClientHost, err = pd.getString(); err != nil {
+ return
+ }
+ if gmd.MemberMetadata, err = pd.getBytes(); err != nil {
+ return
+ }
+ if gmd.MemberAssignment, err = pd.getBytes(); err != nil {
+ return
+ }
+
+ return nil
+}
+
+func (gmd *GroupMemberDescription) GetMemberAssignment() (*ConsumerGroupMemberAssignment, error) {
+ assignment := new(ConsumerGroupMemberAssignment)
+ err := decode(gmd.MemberAssignment, assignment)
+ return assignment, err
+}
+
+func (gmd *GroupMemberDescription) GetMemberMetadata() (*ConsumerGroupMemberMetadata, error) {
+ metadata := new(ConsumerGroupMemberMetadata)
+ err := decode(gmd.MemberMetadata, metadata)
+ return metadata, err
+}
diff --git a/vendor/github.com/Shopify/sarama/dev.yml b/vendor/github.com/Shopify/sarama/dev.yml
new file mode 100644
index 0000000..294fcdb
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/dev.yml
@@ -0,0 +1,10 @@
+name: sarama
+
+up:
+ - go:
+ version: '1.9'
+
+commands:
+ test:
+ run: make test
+ desc: 'run unit tests'
diff --git a/vendor/github.com/Shopify/sarama/encoder_decoder.go b/vendor/github.com/Shopify/sarama/encoder_decoder.go
new file mode 100644
index 0000000..7ce3bc0
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/encoder_decoder.go
@@ -0,0 +1,89 @@
+package sarama
+
+import (
+ "fmt"
+
+ "github.com/rcrowley/go-metrics"
+)
+
+// Encoder is the interface that wraps the basic Encode method.
+// Anything implementing Encoder can be turned into bytes using Kafka's encoding rules.
+type encoder interface {
+ encode(pe packetEncoder) error
+}
+
+// Encode takes an Encoder and turns it into bytes while potentially recording metrics.
+func encode(e encoder, metricRegistry metrics.Registry) ([]byte, error) {
+ if e == nil {
+ return nil, nil
+ }
+
+ var prepEnc prepEncoder
+ var realEnc realEncoder
+
+ err := e.encode(&prepEnc)
+ if err != nil {
+ return nil, err
+ }
+
+ if prepEnc.length < 0 || prepEnc.length > int(MaxRequestSize) {
+ return nil, PacketEncodingError{fmt.Sprintf("invalid request size (%d)", prepEnc.length)}
+ }
+
+ realEnc.raw = make([]byte, prepEnc.length)
+ realEnc.registry = metricRegistry
+ err = e.encode(&realEnc)
+ if err != nil {
+ return nil, err
+ }
+
+ return realEnc.raw, nil
+}
+
+// Decoder is the interface that wraps the basic Decode method.
+// Anything implementing Decoder can be extracted from bytes using Kafka's encoding rules.
+type decoder interface {
+ decode(pd packetDecoder) error
+}
+
+type versionedDecoder interface {
+ decode(pd packetDecoder, version int16) error
+}
+
+// Decode takes bytes and a Decoder and fills the fields of the decoder from the bytes,
+// interpreted using Kafka's encoding rules.
+func decode(buf []byte, in decoder) error {
+ if buf == nil {
+ return nil
+ }
+
+ helper := realDecoder{raw: buf}
+ err := in.decode(&helper)
+ if err != nil {
+ return err
+ }
+
+ if helper.off != len(buf) {
+ return PacketDecodingError{"invalid length"}
+ }
+
+ return nil
+}
+
+func versionedDecode(buf []byte, in versionedDecoder, version int16) error {
+ if buf == nil {
+ return nil
+ }
+
+ helper := realDecoder{raw: buf}
+ err := in.decode(&helper, version)
+ if err != nil {
+ return err
+ }
+
+ if helper.off != len(buf) {
+ return PacketDecodingError{"invalid length"}
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/errors.go b/vendor/github.com/Shopify/sarama/errors.go
new file mode 100644
index 0000000..b6242cd
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/errors.go
@@ -0,0 +1,269 @@
+package sarama
+
+import (
+ "errors"
+ "fmt"
+)
+
+// ErrOutOfBrokers is the error returned when the client has run out of brokers to talk to because all of them errored
+// or otherwise failed to respond.
+var ErrOutOfBrokers = errors.New("kafka: client has run out of available brokers to talk to (Is your cluster reachable?)")
+
+// ErrClosedClient is the error returned when a method is called on a client that has been closed.
+var ErrClosedClient = errors.New("kafka: tried to use a client that was closed")
+
+// ErrIncompleteResponse is the error returned when the server returns a syntactically valid response, but it does
+// not contain the expected information.
+var ErrIncompleteResponse = errors.New("kafka: response did not contain all the expected topic/partition blocks")
+
+// ErrInvalidPartition is the error returned when a partitioner returns an invalid partition index
+// (meaning one outside of the range [0...numPartitions-1]).
+var ErrInvalidPartition = errors.New("kafka: partitioner returned an invalid partition index")
+
+// ErrAlreadyConnected is the error returned when calling Open() on a Broker that is already connected or connecting.
+var ErrAlreadyConnected = errors.New("kafka: broker connection already initiated")
+
+// ErrNotConnected is the error returned when trying to send or call Close() on a Broker that is not connected.
+var ErrNotConnected = errors.New("kafka: broker not connected")
+
+// ErrInsufficientData is returned when decoding and the packet is truncated. This can be expected
+// when requesting messages, since as an optimization the server is allowed to return a partial message at the end
+// of the message set.
+var ErrInsufficientData = errors.New("kafka: insufficient data to decode packet, more bytes expected")
+
+// ErrShuttingDown is returned when a producer receives a message during shutdown.
+var ErrShuttingDown = errors.New("kafka: message received by producer in process of shutting down")
+
+// ErrMessageTooLarge is returned when the next message to consume is larger than the configured Consumer.Fetch.Max
+var ErrMessageTooLarge = errors.New("kafka: message is larger than Consumer.Fetch.Max")
+
+// PacketEncodingError is returned from a failure while encoding a Kafka packet. This can happen, for example,
+// if you try to encode a string over 2^15 characters in length, since Kafka's encoding rules do not permit that.
+type PacketEncodingError struct {
+ Info string
+}
+
+func (err PacketEncodingError) Error() string {
+ return fmt.Sprintf("kafka: error encoding packet: %s", err.Info)
+}
+
+// PacketDecodingError is returned when there was an error (other than truncated data) decoding the Kafka broker's response.
+// This can be a bad CRC or length field, or any other invalid value.
+type PacketDecodingError struct {
+ Info string
+}
+
+func (err PacketDecodingError) Error() string {
+ return fmt.Sprintf("kafka: error decoding packet: %s", err.Info)
+}
+
+// ConfigurationError is the type of error returned from a constructor (e.g. NewClient, or NewConsumer)
+// when the specified configuration is invalid.
+type ConfigurationError string
+
+func (err ConfigurationError) Error() string {
+ return "kafka: invalid configuration (" + string(err) + ")"
+}
+
+// KError is the type of error that can be returned directly by the Kafka broker.
+// See https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-ErrorCodes
+type KError int16
+
+// Numeric error codes returned by the Kafka server.
+const (
+ ErrNoError KError = 0
+ ErrUnknown KError = -1
+ ErrOffsetOutOfRange KError = 1
+ ErrInvalidMessage KError = 2
+ ErrUnknownTopicOrPartition KError = 3
+ ErrInvalidMessageSize KError = 4
+ ErrLeaderNotAvailable KError = 5
+ ErrNotLeaderForPartition KError = 6
+ ErrRequestTimedOut KError = 7
+ ErrBrokerNotAvailable KError = 8
+ ErrReplicaNotAvailable KError = 9
+ ErrMessageSizeTooLarge KError = 10
+ ErrStaleControllerEpochCode KError = 11
+ ErrOffsetMetadataTooLarge KError = 12
+ ErrNetworkException KError = 13
+ ErrOffsetsLoadInProgress KError = 14
+ ErrConsumerCoordinatorNotAvailable KError = 15
+ ErrNotCoordinatorForConsumer KError = 16
+ ErrInvalidTopic KError = 17
+ ErrMessageSetSizeTooLarge KError = 18
+ ErrNotEnoughReplicas KError = 19
+ ErrNotEnoughReplicasAfterAppend KError = 20
+ ErrInvalidRequiredAcks KError = 21
+ ErrIllegalGeneration KError = 22
+ ErrInconsistentGroupProtocol KError = 23
+ ErrInvalidGroupId KError = 24
+ ErrUnknownMemberId KError = 25
+ ErrInvalidSessionTimeout KError = 26
+ ErrRebalanceInProgress KError = 27
+ ErrInvalidCommitOffsetSize KError = 28
+ ErrTopicAuthorizationFailed KError = 29
+ ErrGroupAuthorizationFailed KError = 30
+ ErrClusterAuthorizationFailed KError = 31
+ ErrInvalidTimestamp KError = 32
+ ErrUnsupportedSASLMechanism KError = 33
+ ErrIllegalSASLState KError = 34
+ ErrUnsupportedVersion KError = 35
+ ErrTopicAlreadyExists KError = 36
+ ErrInvalidPartitions KError = 37
+ ErrInvalidReplicationFactor KError = 38
+ ErrInvalidReplicaAssignment KError = 39
+ ErrInvalidConfig KError = 40
+ ErrNotController KError = 41
+ ErrInvalidRequest KError = 42
+ ErrUnsupportedForMessageFormat KError = 43
+ ErrPolicyViolation KError = 44
+ ErrOutOfOrderSequenceNumber KError = 45
+ ErrDuplicateSequenceNumber KError = 46
+ ErrInvalidProducerEpoch KError = 47
+ ErrInvalidTxnState KError = 48
+ ErrInvalidProducerIDMapping KError = 49
+ ErrInvalidTransactionTimeout KError = 50
+ ErrConcurrentTransactions KError = 51
+ ErrTransactionCoordinatorFenced KError = 52
+ ErrTransactionalIDAuthorizationFailed KError = 53
+ ErrSecurityDisabled KError = 54
+ ErrOperationNotAttempted KError = 55
+ ErrKafkaStorageError KError = 56
+ ErrLogDirNotFound KError = 57
+ ErrSASLAuthenticationFailed KError = 58
+ ErrUnknownProducerID KError = 59
+ ErrReassignmentInProgress KError = 60
+)
+
+func (err KError) Error() string {
+ // Error messages stolen/adapted from
+ // https://kafka.apache.org/protocol#protocol_error_codes
+ switch err {
+ case ErrNoError:
+ return "kafka server: Not an error, why are you printing me?"
+ case ErrUnknown:
+ return "kafka server: Unexpected (unknown?) server error."
+ case ErrOffsetOutOfRange:
+ return "kafka server: The requested offset is outside the range of offsets maintained by the server for the given topic/partition."
+ case ErrInvalidMessage:
+ return "kafka server: Message contents does not match its CRC."
+ case ErrUnknownTopicOrPartition:
+ return "kafka server: Request was for a topic or partition that does not exist on this broker."
+ case ErrInvalidMessageSize:
+ return "kafka server: The message has a negative size."
+ case ErrLeaderNotAvailable:
+ return "kafka server: In the middle of a leadership election, there is currently no leader for this partition and hence it is unavailable for writes."
+ case ErrNotLeaderForPartition:
+ return "kafka server: Tried to send a message to a replica that is not the leader for some partition. Your metadata is out of date."
+ case ErrRequestTimedOut:
+ return "kafka server: Request exceeded the user-specified time limit in the request."
+ case ErrBrokerNotAvailable:
+ return "kafka server: Broker not available. Not a client facing error, we should never receive this!!!"
+ case ErrReplicaNotAvailable:
+ return "kafka server: Replica information not available, one or more brokers are down."
+ case ErrMessageSizeTooLarge:
+ return "kafka server: Message was too large, server rejected it to avoid allocation error."
+ case ErrStaleControllerEpochCode:
+ return "kafka server: StaleControllerEpochCode (internal error code for broker-to-broker communication)."
+ case ErrOffsetMetadataTooLarge:
+ return "kafka server: Specified a string larger than the configured maximum for offset metadata."
+ case ErrNetworkException:
+ return "kafka server: The server disconnected before a response was received."
+ case ErrOffsetsLoadInProgress:
+ return "kafka server: The broker is still loading offsets after a leader change for that offset's topic partition."
+ case ErrConsumerCoordinatorNotAvailable:
+ return "kafka server: Offset's topic has not yet been created."
+ case ErrNotCoordinatorForConsumer:
+ return "kafka server: Request was for a consumer group that is not coordinated by this broker."
+ case ErrInvalidTopic:
+ return "kafka server: The request attempted to perform an operation on an invalid topic."
+ case ErrMessageSetSizeTooLarge:
+ return "kafka server: The request included message batch larger than the configured segment size on the server."
+ case ErrNotEnoughReplicas:
+ return "kafka server: Messages are rejected since there are fewer in-sync replicas than required."
+ case ErrNotEnoughReplicasAfterAppend:
+ return "kafka server: Messages are written to the log, but to fewer in-sync replicas than required."
+ case ErrInvalidRequiredAcks:
+ return "kafka server: The number of required acks is invalid (should be either -1, 0, or 1)."
+ case ErrIllegalGeneration:
+ return "kafka server: The provided generation id is not the current generation."
+ case ErrInconsistentGroupProtocol:
+ return "kafka server: The provider group protocol type is incompatible with the other members."
+ case ErrInvalidGroupId:
+ return "kafka server: The provided group id was empty."
+ case ErrUnknownMemberId:
+ return "kafka server: The provided member is not known in the current generation."
+ case ErrInvalidSessionTimeout:
+ return "kafka server: The provided session timeout is outside the allowed range."
+ case ErrRebalanceInProgress:
+ return "kafka server: A rebalance for the group is in progress. Please re-join the group."
+ case ErrInvalidCommitOffsetSize:
+ return "kafka server: The provided commit metadata was too large."
+ case ErrTopicAuthorizationFailed:
+ return "kafka server: The client is not authorized to access this topic."
+ case ErrGroupAuthorizationFailed:
+ return "kafka server: The client is not authorized to access this group."
+ case ErrClusterAuthorizationFailed:
+ return "kafka server: The client is not authorized to send this request type."
+ case ErrInvalidTimestamp:
+ return "kafka server: The timestamp of the message is out of acceptable range."
+ case ErrUnsupportedSASLMechanism:
+ return "kafka server: The broker does not support the requested SASL mechanism."
+ case ErrIllegalSASLState:
+ return "kafka server: Request is not valid given the current SASL state."
+ case ErrUnsupportedVersion:
+ return "kafka server: The version of API is not supported."
+ case ErrTopicAlreadyExists:
+ return "kafka server: Topic with this name already exists."
+ case ErrInvalidPartitions:
+ return "kafka server: Number of partitions is invalid."
+ case ErrInvalidReplicationFactor:
+ return "kafka server: Replication-factor is invalid."
+ case ErrInvalidReplicaAssignment:
+ return "kafka server: Replica assignment is invalid."
+ case ErrInvalidConfig:
+ return "kafka server: Configuration is invalid."
+ case ErrNotController:
+ return "kafka server: This is not the correct controller for this cluster."
+ case ErrInvalidRequest:
+ return "kafka server: This most likely occurs because of a request being malformed by the client library or the message was sent to an incompatible broker. See the broker logs for more details."
+ case ErrUnsupportedForMessageFormat:
+ return "kafka server: The requested operation is not supported by the message format version."
+ case ErrPolicyViolation:
+ return "kafka server: Request parameters do not satisfy the configured policy."
+ case ErrOutOfOrderSequenceNumber:
+ return "kafka server: The broker received an out of order sequence number."
+ case ErrDuplicateSequenceNumber:
+ return "kafka server: The broker received a duplicate sequence number."
+ case ErrInvalidProducerEpoch:
+ return "kafka server: Producer attempted an operation with an old epoch."
+ case ErrInvalidTxnState:
+ return "kafka server: The producer attempted a transactional operation in an invalid state."
+ case ErrInvalidProducerIDMapping:
+ return "kafka server: The producer attempted to use a producer id which is not currently assigned to its transactional id."
+ case ErrInvalidTransactionTimeout:
+ return "kafka server: The transaction timeout is larger than the maximum value allowed by the broker (as configured by max.transaction.timeout.ms)."
+ case ErrConcurrentTransactions:
+ return "kafka server: The producer attempted to update a transaction while another concurrent operation on the same transaction was ongoing."
+ case ErrTransactionCoordinatorFenced:
+ return "kafka server: The transaction coordinator sending a WriteTxnMarker is no longer the current coordinator for a given producer."
+ case ErrTransactionalIDAuthorizationFailed:
+ return "kafka server: Transactional ID authorization failed."
+ case ErrSecurityDisabled:
+ return "kafka server: Security features are disabled."
+ case ErrOperationNotAttempted:
+ return "kafka server: The broker did not attempt to execute this operation."
+ case ErrKafkaStorageError:
+ return "kafka server: Disk error when trying to access log file on the disk."
+ case ErrLogDirNotFound:
+ return "kafka server: The specified log directory is not found in the broker config."
+ case ErrSASLAuthenticationFailed:
+ return "kafka server: SASL Authentication failed."
+ case ErrUnknownProducerID:
+ return "kafka server: The broker could not locate the producer metadata associated with the Producer ID."
+ case ErrReassignmentInProgress:
+ return "kafka server: A partition reassignment is in progress."
+ }
+
+ return fmt.Sprintf("Unknown error, how did this happen? Error code = %d", err)
+}
diff --git a/vendor/github.com/Shopify/sarama/fetch_request.go b/vendor/github.com/Shopify/sarama/fetch_request.go
new file mode 100644
index 0000000..8c8e3a5
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/fetch_request.go
@@ -0,0 +1,170 @@
+package sarama
+
+type fetchRequestBlock struct {
+ fetchOffset int64
+ maxBytes int32
+}
+
+func (b *fetchRequestBlock) encode(pe packetEncoder) error {
+ pe.putInt64(b.fetchOffset)
+ pe.putInt32(b.maxBytes)
+ return nil
+}
+
+func (b *fetchRequestBlock) decode(pd packetDecoder) (err error) {
+ if b.fetchOffset, err = pd.getInt64(); err != nil {
+ return err
+ }
+ if b.maxBytes, err = pd.getInt32(); err != nil {
+ return err
+ }
+ return nil
+}
+
+// FetchRequest (API key 1) will fetch Kafka messages. Version 3 introduced the MaxBytes field. See
+// https://issues.apache.org/jira/browse/KAFKA-2063 for a discussion of the issues leading up to that. The KIP is at
+// https://cwiki.apache.org/confluence/display/KAFKA/KIP-74%3A+Add+Fetch+Response+Size+Limit+in+Bytes
+type FetchRequest struct {
+ MaxWaitTime int32
+ MinBytes int32
+ MaxBytes int32
+ Version int16
+ Isolation IsolationLevel
+ blocks map[string]map[int32]*fetchRequestBlock
+}
+
+type IsolationLevel int8
+
+const (
+ ReadUncommitted IsolationLevel = 0
+ ReadCommitted IsolationLevel = 1
+)
+
+func (r *FetchRequest) encode(pe packetEncoder) (err error) {
+ pe.putInt32(-1) // replica ID is always -1 for clients
+ pe.putInt32(r.MaxWaitTime)
+ pe.putInt32(r.MinBytes)
+ if r.Version >= 3 {
+ pe.putInt32(r.MaxBytes)
+ }
+ if r.Version >= 4 {
+ pe.putInt8(int8(r.Isolation))
+ }
+ err = pe.putArrayLength(len(r.blocks))
+ if err != nil {
+ return err
+ }
+ for topic, blocks := range r.blocks {
+ err = pe.putString(topic)
+ if err != nil {
+ return err
+ }
+ err = pe.putArrayLength(len(blocks))
+ if err != nil {
+ return err
+ }
+ for partition, block := range blocks {
+ pe.putInt32(partition)
+ err = block.encode(pe)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func (r *FetchRequest) decode(pd packetDecoder, version int16) (err error) {
+ r.Version = version
+ if _, err = pd.getInt32(); err != nil {
+ return err
+ }
+ if r.MaxWaitTime, err = pd.getInt32(); err != nil {
+ return err
+ }
+ if r.MinBytes, err = pd.getInt32(); err != nil {
+ return err
+ }
+ if r.Version >= 3 {
+ if r.MaxBytes, err = pd.getInt32(); err != nil {
+ return err
+ }
+ }
+ if r.Version >= 4 {
+ isolation, err := pd.getInt8()
+ if err != nil {
+ return err
+ }
+ r.Isolation = IsolationLevel(isolation)
+ }
+ topicCount, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+ if topicCount == 0 {
+ return nil
+ }
+ r.blocks = make(map[string]map[int32]*fetchRequestBlock)
+ for i := 0; i < topicCount; i++ {
+ topic, err := pd.getString()
+ if err != nil {
+ return err
+ }
+ partitionCount, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+ r.blocks[topic] = make(map[int32]*fetchRequestBlock)
+ for j := 0; j < partitionCount; j++ {
+ partition, err := pd.getInt32()
+ if err != nil {
+ return err
+ }
+ fetchBlock := &fetchRequestBlock{}
+ if err = fetchBlock.decode(pd); err != nil {
+ return err
+ }
+ r.blocks[topic][partition] = fetchBlock
+ }
+ }
+ return nil
+}
+
+func (r *FetchRequest) key() int16 {
+ return 1
+}
+
+func (r *FetchRequest) version() int16 {
+ return r.Version
+}
+
+func (r *FetchRequest) requiredVersion() KafkaVersion {
+ switch r.Version {
+ case 1:
+ return V0_9_0_0
+ case 2:
+ return V0_10_0_0
+ case 3:
+ return V0_10_1_0
+ case 4:
+ return V0_11_0_0
+ default:
+ return minVersion
+ }
+}
+
+func (r *FetchRequest) AddBlock(topic string, partitionID int32, fetchOffset int64, maxBytes int32) {
+ if r.blocks == nil {
+ r.blocks = make(map[string]map[int32]*fetchRequestBlock)
+ }
+
+ if r.blocks[topic] == nil {
+ r.blocks[topic] = make(map[int32]*fetchRequestBlock)
+ }
+
+ tmp := new(fetchRequestBlock)
+ tmp.maxBytes = maxBytes
+ tmp.fetchOffset = fetchOffset
+
+ r.blocks[topic][partitionID] = tmp
+}
diff --git a/vendor/github.com/Shopify/sarama/fetch_response.go b/vendor/github.com/Shopify/sarama/fetch_response.go
new file mode 100644
index 0000000..3433bcf
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/fetch_response.go
@@ -0,0 +1,315 @@
+package sarama
+
+import "time"
+
+type AbortedTransaction struct {
+ ProducerID int64
+ FirstOffset int64
+}
+
+func (t *AbortedTransaction) decode(pd packetDecoder) (err error) {
+ if t.ProducerID, err = pd.getInt64(); err != nil {
+ return err
+ }
+
+ if t.FirstOffset, err = pd.getInt64(); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (t *AbortedTransaction) encode(pe packetEncoder) (err error) {
+ pe.putInt64(t.ProducerID)
+ pe.putInt64(t.FirstOffset)
+
+ return nil
+}
+
+type FetchResponseBlock struct {
+ Err KError
+ HighWaterMarkOffset int64
+ LastStableOffset int64
+ AbortedTransactions []*AbortedTransaction
+ Records Records
+}
+
+func (b *FetchResponseBlock) decode(pd packetDecoder, version int16) (err error) {
+ tmp, err := pd.getInt16()
+ if err != nil {
+ return err
+ }
+ b.Err = KError(tmp)
+
+ b.HighWaterMarkOffset, err = pd.getInt64()
+ if err != nil {
+ return err
+ }
+
+ if version >= 4 {
+ b.LastStableOffset, err = pd.getInt64()
+ if err != nil {
+ return err
+ }
+
+ numTransact, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ if numTransact >= 0 {
+ b.AbortedTransactions = make([]*AbortedTransaction, numTransact)
+ }
+
+ for i := 0; i < numTransact; i++ {
+ transact := new(AbortedTransaction)
+ if err = transact.decode(pd); err != nil {
+ return err
+ }
+ b.AbortedTransactions[i] = transact
+ }
+ }
+
+ recordsSize, err := pd.getInt32()
+ if err != nil {
+ return err
+ }
+
+ recordsDecoder, err := pd.getSubset(int(recordsSize))
+ if err != nil {
+ return err
+ }
+ if recordsSize > 0 {
+ if err = b.Records.decode(recordsDecoder); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (b *FetchResponseBlock) encode(pe packetEncoder, version int16) (err error) {
+ pe.putInt16(int16(b.Err))
+
+ pe.putInt64(b.HighWaterMarkOffset)
+
+ if version >= 4 {
+ pe.putInt64(b.LastStableOffset)
+
+ if err = pe.putArrayLength(len(b.AbortedTransactions)); err != nil {
+ return err
+ }
+ for _, transact := range b.AbortedTransactions {
+ if err = transact.encode(pe); err != nil {
+ return err
+ }
+ }
+ }
+
+ pe.push(&lengthField{})
+ err = b.Records.encode(pe)
+ if err != nil {
+ return err
+ }
+ return pe.pop()
+}
+
+type FetchResponse struct {
+ Blocks map[string]map[int32]*FetchResponseBlock
+ ThrottleTime time.Duration
+ Version int16 // v1 requires 0.9+, v2 requires 0.10+
+}
+
+func (r *FetchResponse) decode(pd packetDecoder, version int16) (err error) {
+ r.Version = version
+
+ if r.Version >= 1 {
+ throttle, err := pd.getInt32()
+ if err != nil {
+ return err
+ }
+ r.ThrottleTime = time.Duration(throttle) * time.Millisecond
+ }
+
+ numTopics, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ r.Blocks = make(map[string]map[int32]*FetchResponseBlock, numTopics)
+ for i := 0; i < numTopics; i++ {
+ name, err := pd.getString()
+ if err != nil {
+ return err
+ }
+
+ numBlocks, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ r.Blocks[name] = make(map[int32]*FetchResponseBlock, numBlocks)
+
+ for j := 0; j < numBlocks; j++ {
+ id, err := pd.getInt32()
+ if err != nil {
+ return err
+ }
+
+ block := new(FetchResponseBlock)
+ err = block.decode(pd, version)
+ if err != nil {
+ return err
+ }
+ r.Blocks[name][id] = block
+ }
+ }
+
+ return nil
+}
+
+func (r *FetchResponse) encode(pe packetEncoder) (err error) {
+ if r.Version >= 1 {
+ pe.putInt32(int32(r.ThrottleTime / time.Millisecond))
+ }
+
+ err = pe.putArrayLength(len(r.Blocks))
+ if err != nil {
+ return err
+ }
+
+ for topic, partitions := range r.Blocks {
+ err = pe.putString(topic)
+ if err != nil {
+ return err
+ }
+
+ err = pe.putArrayLength(len(partitions))
+ if err != nil {
+ return err
+ }
+
+ for id, block := range partitions {
+ pe.putInt32(id)
+ err = block.encode(pe, r.Version)
+ if err != nil {
+ return err
+ }
+ }
+
+ }
+ return nil
+}
+
+func (r *FetchResponse) key() int16 {
+ return 1
+}
+
+func (r *FetchResponse) version() int16 {
+ return r.Version
+}
+
+func (r *FetchResponse) requiredVersion() KafkaVersion {
+ switch r.Version {
+ case 1:
+ return V0_9_0_0
+ case 2:
+ return V0_10_0_0
+ case 3:
+ return V0_10_1_0
+ case 4:
+ return V0_11_0_0
+ default:
+ return minVersion
+ }
+}
+
+func (r *FetchResponse) GetBlock(topic string, partition int32) *FetchResponseBlock {
+ if r.Blocks == nil {
+ return nil
+ }
+
+ if r.Blocks[topic] == nil {
+ return nil
+ }
+
+ return r.Blocks[topic][partition]
+}
+
+func (r *FetchResponse) AddError(topic string, partition int32, err KError) {
+ if r.Blocks == nil {
+ r.Blocks = make(map[string]map[int32]*FetchResponseBlock)
+ }
+ partitions, ok := r.Blocks[topic]
+ if !ok {
+ partitions = make(map[int32]*FetchResponseBlock)
+ r.Blocks[topic] = partitions
+ }
+ frb, ok := partitions[partition]
+ if !ok {
+ frb = new(FetchResponseBlock)
+ partitions[partition] = frb
+ }
+ frb.Err = err
+}
+
+func (r *FetchResponse) getOrCreateBlock(topic string, partition int32) *FetchResponseBlock {
+ if r.Blocks == nil {
+ r.Blocks = make(map[string]map[int32]*FetchResponseBlock)
+ }
+ partitions, ok := r.Blocks[topic]
+ if !ok {
+ partitions = make(map[int32]*FetchResponseBlock)
+ r.Blocks[topic] = partitions
+ }
+ frb, ok := partitions[partition]
+ if !ok {
+ frb = new(FetchResponseBlock)
+ partitions[partition] = frb
+ }
+
+ return frb
+}
+
+func encodeKV(key, value Encoder) ([]byte, []byte) {
+ var kb []byte
+ var vb []byte
+ if key != nil {
+ kb, _ = key.Encode()
+ }
+ if value != nil {
+ vb, _ = value.Encode()
+ }
+
+ return kb, vb
+}
+
+func (r *FetchResponse) AddMessage(topic string, partition int32, key, value Encoder, offset int64) {
+ frb := r.getOrCreateBlock(topic, partition)
+ kb, vb := encodeKV(key, value)
+ msg := &Message{Key: kb, Value: vb}
+ msgBlock := &MessageBlock{Msg: msg, Offset: offset}
+ set := frb.Records.msgSet
+ if set == nil {
+ set = &MessageSet{}
+ frb.Records = newLegacyRecords(set)
+ }
+ set.Messages = append(set.Messages, msgBlock)
+}
+
+func (r *FetchResponse) AddRecord(topic string, partition int32, key, value Encoder, offset int64) {
+ frb := r.getOrCreateBlock(topic, partition)
+ kb, vb := encodeKV(key, value)
+ rec := &Record{Key: kb, Value: vb, OffsetDelta: offset}
+ batch := frb.Records.recordBatch
+ if batch == nil {
+ batch = &RecordBatch{Version: 2}
+ frb.Records = newDefaultRecords(batch)
+ }
+ batch.addRecord(rec)
+}
+
+func (r *FetchResponse) SetLastStableOffset(topic string, partition int32, offset int64) {
+ frb := r.getOrCreateBlock(topic, partition)
+ frb.LastStableOffset = offset
+}
diff --git a/vendor/github.com/Shopify/sarama/heartbeat_request.go b/vendor/github.com/Shopify/sarama/heartbeat_request.go
new file mode 100644
index 0000000..ce49c47
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/heartbeat_request.go
@@ -0,0 +1,47 @@
+package sarama
+
+type HeartbeatRequest struct {
+ GroupId string
+ GenerationId int32
+ MemberId string
+}
+
+func (r *HeartbeatRequest) encode(pe packetEncoder) error {
+ if err := pe.putString(r.GroupId); err != nil {
+ return err
+ }
+
+ pe.putInt32(r.GenerationId)
+
+ if err := pe.putString(r.MemberId); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (r *HeartbeatRequest) decode(pd packetDecoder, version int16) (err error) {
+ if r.GroupId, err = pd.getString(); err != nil {
+ return
+ }
+ if r.GenerationId, err = pd.getInt32(); err != nil {
+ return
+ }
+ if r.MemberId, err = pd.getString(); err != nil {
+ return
+ }
+
+ return nil
+}
+
+func (r *HeartbeatRequest) key() int16 {
+ return 12
+}
+
+func (r *HeartbeatRequest) version() int16 {
+ return 0
+}
+
+func (r *HeartbeatRequest) requiredVersion() KafkaVersion {
+ return V0_9_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/heartbeat_response.go b/vendor/github.com/Shopify/sarama/heartbeat_response.go
new file mode 100644
index 0000000..766f5fd
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/heartbeat_response.go
@@ -0,0 +1,32 @@
+package sarama
+
+type HeartbeatResponse struct {
+ Err KError
+}
+
+func (r *HeartbeatResponse) encode(pe packetEncoder) error {
+ pe.putInt16(int16(r.Err))
+ return nil
+}
+
+func (r *HeartbeatResponse) decode(pd packetDecoder, version int16) error {
+ kerr, err := pd.getInt16()
+ if err != nil {
+ return err
+ }
+ r.Err = KError(kerr)
+
+ return nil
+}
+
+func (r *HeartbeatResponse) key() int16 {
+ return 12
+}
+
+func (r *HeartbeatResponse) version() int16 {
+ return 0
+}
+
+func (r *HeartbeatResponse) requiredVersion() KafkaVersion {
+ return V0_9_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/join_group_request.go b/vendor/github.com/Shopify/sarama/join_group_request.go
new file mode 100644
index 0000000..3a7ba17
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/join_group_request.go
@@ -0,0 +1,143 @@
+package sarama
+
+type GroupProtocol struct {
+ Name string
+ Metadata []byte
+}
+
+func (p *GroupProtocol) decode(pd packetDecoder) (err error) {
+ p.Name, err = pd.getString()
+ if err != nil {
+ return err
+ }
+ p.Metadata, err = pd.getBytes()
+ return err
+}
+
+func (p *GroupProtocol) encode(pe packetEncoder) (err error) {
+ if err := pe.putString(p.Name); err != nil {
+ return err
+ }
+ if err := pe.putBytes(p.Metadata); err != nil {
+ return err
+ }
+ return nil
+}
+
+type JoinGroupRequest struct {
+ GroupId string
+ SessionTimeout int32
+ MemberId string
+ ProtocolType string
+ GroupProtocols map[string][]byte // deprecated; use OrderedGroupProtocols
+ OrderedGroupProtocols []*GroupProtocol
+}
+
+func (r *JoinGroupRequest) encode(pe packetEncoder) error {
+ if err := pe.putString(r.GroupId); err != nil {
+ return err
+ }
+ pe.putInt32(r.SessionTimeout)
+ if err := pe.putString(r.MemberId); err != nil {
+ return err
+ }
+ if err := pe.putString(r.ProtocolType); err != nil {
+ return err
+ }
+
+ if len(r.GroupProtocols) > 0 {
+ if len(r.OrderedGroupProtocols) > 0 {
+ return PacketDecodingError{"cannot specify both GroupProtocols and OrderedGroupProtocols on JoinGroupRequest"}
+ }
+
+ if err := pe.putArrayLength(len(r.GroupProtocols)); err != nil {
+ return err
+ }
+ for name, metadata := range r.GroupProtocols {
+ if err := pe.putString(name); err != nil {
+ return err
+ }
+ if err := pe.putBytes(metadata); err != nil {
+ return err
+ }
+ }
+ } else {
+ if err := pe.putArrayLength(len(r.OrderedGroupProtocols)); err != nil {
+ return err
+ }
+ for _, protocol := range r.OrderedGroupProtocols {
+ if err := protocol.encode(pe); err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+
+func (r *JoinGroupRequest) decode(pd packetDecoder, version int16) (err error) {
+ if r.GroupId, err = pd.getString(); err != nil {
+ return
+ }
+
+ if r.SessionTimeout, err = pd.getInt32(); err != nil {
+ return
+ }
+
+ if r.MemberId, err = pd.getString(); err != nil {
+ return
+ }
+
+ if r.ProtocolType, err = pd.getString(); err != nil {
+ return
+ }
+
+ n, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+ if n == 0 {
+ return nil
+ }
+
+ r.GroupProtocols = make(map[string][]byte)
+ for i := 0; i < n; i++ {
+ protocol := &GroupProtocol{}
+ if err := protocol.decode(pd); err != nil {
+ return err
+ }
+ r.GroupProtocols[protocol.Name] = protocol.Metadata
+ r.OrderedGroupProtocols = append(r.OrderedGroupProtocols, protocol)
+ }
+
+ return nil
+}
+
+func (r *JoinGroupRequest) key() int16 {
+ return 11
+}
+
+func (r *JoinGroupRequest) version() int16 {
+ return 0
+}
+
+func (r *JoinGroupRequest) requiredVersion() KafkaVersion {
+ return V0_9_0_0
+}
+
+func (r *JoinGroupRequest) AddGroupProtocol(name string, metadata []byte) {
+ r.OrderedGroupProtocols = append(r.OrderedGroupProtocols, &GroupProtocol{
+ Name: name,
+ Metadata: metadata,
+ })
+}
+
+func (r *JoinGroupRequest) AddGroupProtocolMetadata(name string, metadata *ConsumerGroupMemberMetadata) error {
+ bin, err := encode(metadata, nil)
+ if err != nil {
+ return err
+ }
+
+ r.AddGroupProtocol(name, bin)
+ return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/join_group_response.go b/vendor/github.com/Shopify/sarama/join_group_response.go
new file mode 100644
index 0000000..6d35fe3
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/join_group_response.go
@@ -0,0 +1,115 @@
+package sarama
+
+type JoinGroupResponse struct {
+ Err KError
+ GenerationId int32
+ GroupProtocol string
+ LeaderId string
+ MemberId string
+ Members map[string][]byte
+}
+
+func (r *JoinGroupResponse) GetMembers() (map[string]ConsumerGroupMemberMetadata, error) {
+ members := make(map[string]ConsumerGroupMemberMetadata, len(r.Members))
+ for id, bin := range r.Members {
+ meta := new(ConsumerGroupMemberMetadata)
+ if err := decode(bin, meta); err != nil {
+ return nil, err
+ }
+ members[id] = *meta
+ }
+ return members, nil
+}
+
+func (r *JoinGroupResponse) encode(pe packetEncoder) error {
+ pe.putInt16(int16(r.Err))
+ pe.putInt32(r.GenerationId)
+
+ if err := pe.putString(r.GroupProtocol); err != nil {
+ return err
+ }
+ if err := pe.putString(r.LeaderId); err != nil {
+ return err
+ }
+ if err := pe.putString(r.MemberId); err != nil {
+ return err
+ }
+
+ if err := pe.putArrayLength(len(r.Members)); err != nil {
+ return err
+ }
+
+ for memberId, memberMetadata := range r.Members {
+ if err := pe.putString(memberId); err != nil {
+ return err
+ }
+
+ if err := pe.putBytes(memberMetadata); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (r *JoinGroupResponse) decode(pd packetDecoder, version int16) (err error) {
+ kerr, err := pd.getInt16()
+ if err != nil {
+ return err
+ }
+
+ r.Err = KError(kerr)
+
+ if r.GenerationId, err = pd.getInt32(); err != nil {
+ return
+ }
+
+ if r.GroupProtocol, err = pd.getString(); err != nil {
+ return
+ }
+
+ if r.LeaderId, err = pd.getString(); err != nil {
+ return
+ }
+
+ if r.MemberId, err = pd.getString(); err != nil {
+ return
+ }
+
+ n, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+ if n == 0 {
+ return nil
+ }
+
+ r.Members = make(map[string][]byte)
+ for i := 0; i < n; i++ {
+ memberId, err := pd.getString()
+ if err != nil {
+ return err
+ }
+
+ memberMetadata, err := pd.getBytes()
+ if err != nil {
+ return err
+ }
+
+ r.Members[memberId] = memberMetadata
+ }
+
+ return nil
+}
+
+func (r *JoinGroupResponse) key() int16 {
+ return 11
+}
+
+func (r *JoinGroupResponse) version() int16 {
+ return 0
+}
+
+func (r *JoinGroupResponse) requiredVersion() KafkaVersion {
+ return V0_9_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/leave_group_request.go b/vendor/github.com/Shopify/sarama/leave_group_request.go
new file mode 100644
index 0000000..e177427
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/leave_group_request.go
@@ -0,0 +1,40 @@
+package sarama
+
+type LeaveGroupRequest struct {
+ GroupId string
+ MemberId string
+}
+
+func (r *LeaveGroupRequest) encode(pe packetEncoder) error {
+ if err := pe.putString(r.GroupId); err != nil {
+ return err
+ }
+ if err := pe.putString(r.MemberId); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (r *LeaveGroupRequest) decode(pd packetDecoder, version int16) (err error) {
+ if r.GroupId, err = pd.getString(); err != nil {
+ return
+ }
+ if r.MemberId, err = pd.getString(); err != nil {
+ return
+ }
+
+ return nil
+}
+
+func (r *LeaveGroupRequest) key() int16 {
+ return 13
+}
+
+func (r *LeaveGroupRequest) version() int16 {
+ return 0
+}
+
+func (r *LeaveGroupRequest) requiredVersion() KafkaVersion {
+ return V0_9_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/leave_group_response.go b/vendor/github.com/Shopify/sarama/leave_group_response.go
new file mode 100644
index 0000000..d60c626
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/leave_group_response.go
@@ -0,0 +1,32 @@
+package sarama
+
+type LeaveGroupResponse struct {
+ Err KError
+}
+
+func (r *LeaveGroupResponse) encode(pe packetEncoder) error {
+ pe.putInt16(int16(r.Err))
+ return nil
+}
+
+func (r *LeaveGroupResponse) decode(pd packetDecoder, version int16) (err error) {
+ kerr, err := pd.getInt16()
+ if err != nil {
+ return err
+ }
+ r.Err = KError(kerr)
+
+ return nil
+}
+
+func (r *LeaveGroupResponse) key() int16 {
+ return 13
+}
+
+func (r *LeaveGroupResponse) version() int16 {
+ return 0
+}
+
+func (r *LeaveGroupResponse) requiredVersion() KafkaVersion {
+ return V0_9_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/length_field.go b/vendor/github.com/Shopify/sarama/length_field.go
new file mode 100644
index 0000000..576b1a6
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/length_field.go
@@ -0,0 +1,69 @@
+package sarama
+
+import "encoding/binary"
+
+// LengthField implements the PushEncoder and PushDecoder interfaces for calculating 4-byte lengths.
+type lengthField struct {
+ startOffset int
+}
+
+func (l *lengthField) saveOffset(in int) {
+ l.startOffset = in
+}
+
+func (l *lengthField) reserveLength() int {
+ return 4
+}
+
+func (l *lengthField) run(curOffset int, buf []byte) error {
+ binary.BigEndian.PutUint32(buf[l.startOffset:], uint32(curOffset-l.startOffset-4))
+ return nil
+}
+
+func (l *lengthField) check(curOffset int, buf []byte) error {
+ if uint32(curOffset-l.startOffset-4) != binary.BigEndian.Uint32(buf[l.startOffset:]) {
+ return PacketDecodingError{"length field invalid"}
+ }
+
+ return nil
+}
+
+type varintLengthField struct {
+ startOffset int
+ length int64
+}
+
+func (l *varintLengthField) decode(pd packetDecoder) error {
+ var err error
+ l.length, err = pd.getVarint()
+ return err
+}
+
+func (l *varintLengthField) saveOffset(in int) {
+ l.startOffset = in
+}
+
+func (l *varintLengthField) adjustLength(currOffset int) int {
+ oldFieldSize := l.reserveLength()
+ l.length = int64(currOffset - l.startOffset - oldFieldSize)
+
+ return l.reserveLength() - oldFieldSize
+}
+
+func (l *varintLengthField) reserveLength() int {
+ var tmp [binary.MaxVarintLen64]byte
+ return binary.PutVarint(tmp[:], l.length)
+}
+
+func (l *varintLengthField) run(curOffset int, buf []byte) error {
+ binary.PutVarint(buf[l.startOffset:], l.length)
+ return nil
+}
+
+func (l *varintLengthField) check(curOffset int, buf []byte) error {
+ if int64(curOffset-l.startOffset-l.reserveLength()) != l.length {
+ return PacketDecodingError{"length field invalid"}
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/list_groups_request.go b/vendor/github.com/Shopify/sarama/list_groups_request.go
new file mode 100644
index 0000000..3b16abf
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/list_groups_request.go
@@ -0,0 +1,24 @@
+package sarama
+
+type ListGroupsRequest struct {
+}
+
+func (r *ListGroupsRequest) encode(pe packetEncoder) error {
+ return nil
+}
+
+func (r *ListGroupsRequest) decode(pd packetDecoder, version int16) (err error) {
+ return nil
+}
+
+func (r *ListGroupsRequest) key() int16 {
+ return 16
+}
+
+func (r *ListGroupsRequest) version() int16 {
+ return 0
+}
+
+func (r *ListGroupsRequest) requiredVersion() KafkaVersion {
+ return V0_9_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/list_groups_response.go b/vendor/github.com/Shopify/sarama/list_groups_response.go
new file mode 100644
index 0000000..56115d4
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/list_groups_response.go
@@ -0,0 +1,69 @@
+package sarama
+
+type ListGroupsResponse struct {
+ Err KError
+ Groups map[string]string
+}
+
+func (r *ListGroupsResponse) encode(pe packetEncoder) error {
+ pe.putInt16(int16(r.Err))
+
+ if err := pe.putArrayLength(len(r.Groups)); err != nil {
+ return err
+ }
+ for groupId, protocolType := range r.Groups {
+ if err := pe.putString(groupId); err != nil {
+ return err
+ }
+ if err := pe.putString(protocolType); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (r *ListGroupsResponse) decode(pd packetDecoder, version int16) error {
+ kerr, err := pd.getInt16()
+ if err != nil {
+ return err
+ }
+
+ r.Err = KError(kerr)
+
+ n, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+ if n == 0 {
+ return nil
+ }
+
+ r.Groups = make(map[string]string)
+ for i := 0; i < n; i++ {
+ groupId, err := pd.getString()
+ if err != nil {
+ return err
+ }
+ protocolType, err := pd.getString()
+ if err != nil {
+ return err
+ }
+
+ r.Groups[groupId] = protocolType
+ }
+
+ return nil
+}
+
+func (r *ListGroupsResponse) key() int16 {
+ return 16
+}
+
+func (r *ListGroupsResponse) version() int16 {
+ return 0
+}
+
+func (r *ListGroupsResponse) requiredVersion() KafkaVersion {
+ return V0_9_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/message.go b/vendor/github.com/Shopify/sarama/message.go
new file mode 100644
index 0000000..bd5650b
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/message.go
@@ -0,0 +1,200 @@
+package sarama
+
+import (
+ "bytes"
+ "compress/gzip"
+ "fmt"
+ "io/ioutil"
+ "time"
+
+ "github.com/eapache/go-xerial-snappy"
+ "github.com/pierrec/lz4"
+)
+
+// CompressionCodec represents the various compression codecs recognized by Kafka in messages.
+type CompressionCodec int8
+
+// only the last two bits are really used
+const compressionCodecMask int8 = 0x03
+
+const (
+ CompressionNone CompressionCodec = 0
+ CompressionGZIP CompressionCodec = 1
+ CompressionSnappy CompressionCodec = 2
+ CompressionLZ4 CompressionCodec = 3
+)
+
+type Message struct {
+ Codec CompressionCodec // codec used to compress the message contents
+ Key []byte // the message key, may be nil
+ Value []byte // the message contents
+ Set *MessageSet // the message set a message might wrap
+ Version int8 // v1 requires Kafka 0.10
+ Timestamp time.Time // the timestamp of the message (version 1+ only)
+
+ compressedCache []byte
+ compressedSize int // used for computing the compression ratio metrics
+}
+
+func (m *Message) encode(pe packetEncoder) error {
+ pe.push(newCRC32Field(crcIEEE))
+
+ pe.putInt8(m.Version)
+
+ attributes := int8(m.Codec) & compressionCodecMask
+ pe.putInt8(attributes)
+
+ if m.Version >= 1 {
+ if err := (Timestamp{&m.Timestamp}).encode(pe); err != nil {
+ return err
+ }
+ }
+
+ err := pe.putBytes(m.Key)
+ if err != nil {
+ return err
+ }
+
+ var payload []byte
+
+ if m.compressedCache != nil {
+ payload = m.compressedCache
+ m.compressedCache = nil
+ } else if m.Value != nil {
+ switch m.Codec {
+ case CompressionNone:
+ payload = m.Value
+ case CompressionGZIP:
+ var buf bytes.Buffer
+ writer := gzip.NewWriter(&buf)
+ if _, err = writer.Write(m.Value); err != nil {
+ return err
+ }
+ if err = writer.Close(); err != nil {
+ return err
+ }
+ m.compressedCache = buf.Bytes()
+ payload = m.compressedCache
+ case CompressionSnappy:
+ tmp := snappy.Encode(m.Value)
+ m.compressedCache = tmp
+ payload = m.compressedCache
+ case CompressionLZ4:
+ var buf bytes.Buffer
+ writer := lz4.NewWriter(&buf)
+ if _, err = writer.Write(m.Value); err != nil {
+ return err
+ }
+ if err = writer.Close(); err != nil {
+ return err
+ }
+ m.compressedCache = buf.Bytes()
+ payload = m.compressedCache
+
+ default:
+ return PacketEncodingError{fmt.Sprintf("unsupported compression codec (%d)", m.Codec)}
+ }
+ // Keep in mind the compressed payload size for metric gathering
+ m.compressedSize = len(payload)
+ }
+
+ if err = pe.putBytes(payload); err != nil {
+ return err
+ }
+
+ return pe.pop()
+}
+
+func (m *Message) decode(pd packetDecoder) (err error) {
+ err = pd.push(newCRC32Field(crcIEEE))
+ if err != nil {
+ return err
+ }
+
+ m.Version, err = pd.getInt8()
+ if err != nil {
+ return err
+ }
+
+ if m.Version > 1 {
+ return PacketDecodingError{fmt.Sprintf("unknown magic byte (%v)", m.Version)}
+ }
+
+ attribute, err := pd.getInt8()
+ if err != nil {
+ return err
+ }
+ m.Codec = CompressionCodec(attribute & compressionCodecMask)
+
+ if m.Version == 1 {
+ if err := (Timestamp{&m.Timestamp}).decode(pd); err != nil {
+ return err
+ }
+ }
+
+ m.Key, err = pd.getBytes()
+ if err != nil {
+ return err
+ }
+
+ m.Value, err = pd.getBytes()
+ if err != nil {
+ return err
+ }
+
+ // Required for deep equal assertion during tests but might be useful
+ // for future metrics about the compression ratio in fetch requests
+ m.compressedSize = len(m.Value)
+
+ switch m.Codec {
+ case CompressionNone:
+ // nothing to do
+ case CompressionGZIP:
+ if m.Value == nil {
+ break
+ }
+ reader, err := gzip.NewReader(bytes.NewReader(m.Value))
+ if err != nil {
+ return err
+ }
+ if m.Value, err = ioutil.ReadAll(reader); err != nil {
+ return err
+ }
+ if err := m.decodeSet(); err != nil {
+ return err
+ }
+ case CompressionSnappy:
+ if m.Value == nil {
+ break
+ }
+ if m.Value, err = snappy.Decode(m.Value); err != nil {
+ return err
+ }
+ if err := m.decodeSet(); err != nil {
+ return err
+ }
+ case CompressionLZ4:
+ if m.Value == nil {
+ break
+ }
+ reader := lz4.NewReader(bytes.NewReader(m.Value))
+ if m.Value, err = ioutil.ReadAll(reader); err != nil {
+ return err
+ }
+ if err := m.decodeSet(); err != nil {
+ return err
+ }
+
+ default:
+ return PacketDecodingError{fmt.Sprintf("invalid compression specified (%d)", m.Codec)}
+ }
+
+ return pd.pop()
+}
+
+// decodes a message set from a previousy encoded bulk-message
+func (m *Message) decodeSet() (err error) {
+ pd := realDecoder{raw: m.Value}
+ m.Set = &MessageSet{}
+ return m.Set.decode(&pd)
+}
diff --git a/vendor/github.com/Shopify/sarama/message_set.go b/vendor/github.com/Shopify/sarama/message_set.go
new file mode 100644
index 0000000..f028784
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/message_set.go
@@ -0,0 +1,89 @@
+package sarama
+
+type MessageBlock struct {
+ Offset int64
+ Msg *Message
+}
+
+// Messages convenience helper which returns either all the
+// messages that are wrapped in this block
+func (msb *MessageBlock) Messages() []*MessageBlock {
+ if msb.Msg.Set != nil {
+ return msb.Msg.Set.Messages
+ }
+ return []*MessageBlock{msb}
+}
+
+func (msb *MessageBlock) encode(pe packetEncoder) error {
+ pe.putInt64(msb.Offset)
+ pe.push(&lengthField{})
+ err := msb.Msg.encode(pe)
+ if err != nil {
+ return err
+ }
+ return pe.pop()
+}
+
+func (msb *MessageBlock) decode(pd packetDecoder) (err error) {
+ if msb.Offset, err = pd.getInt64(); err != nil {
+ return err
+ }
+
+ if err = pd.push(&lengthField{}); err != nil {
+ return err
+ }
+
+ msb.Msg = new(Message)
+ if err = msb.Msg.decode(pd); err != nil {
+ return err
+ }
+
+ if err = pd.pop(); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+type MessageSet struct {
+ PartialTrailingMessage bool // whether the set on the wire contained an incomplete trailing MessageBlock
+ Messages []*MessageBlock
+}
+
+func (ms *MessageSet) encode(pe packetEncoder) error {
+ for i := range ms.Messages {
+ err := ms.Messages[i].encode(pe)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (ms *MessageSet) decode(pd packetDecoder) (err error) {
+ ms.Messages = nil
+
+ for pd.remaining() > 0 {
+ msb := new(MessageBlock)
+ err = msb.decode(pd)
+ switch err {
+ case nil:
+ ms.Messages = append(ms.Messages, msb)
+ case ErrInsufficientData:
+ // As an optimization the server is allowed to return a partial message at the
+ // end of the message set. Clients should handle this case. So we just ignore such things.
+ ms.PartialTrailingMessage = true
+ return nil
+ default:
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (ms *MessageSet) addMessage(msg *Message) {
+ block := new(MessageBlock)
+ block.Msg = msg
+ ms.Messages = append(ms.Messages, block)
+}
diff --git a/vendor/github.com/Shopify/sarama/metadata_request.go b/vendor/github.com/Shopify/sarama/metadata_request.go
new file mode 100644
index 0000000..9a26b55
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/metadata_request.go
@@ -0,0 +1,52 @@
+package sarama
+
+type MetadataRequest struct {
+ Topics []string
+}
+
+func (r *MetadataRequest) encode(pe packetEncoder) error {
+ err := pe.putArrayLength(len(r.Topics))
+ if err != nil {
+ return err
+ }
+
+ for i := range r.Topics {
+ err = pe.putString(r.Topics[i])
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (r *MetadataRequest) decode(pd packetDecoder, version int16) error {
+ topicCount, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+ if topicCount == 0 {
+ return nil
+ }
+
+ r.Topics = make([]string, topicCount)
+ for i := range r.Topics {
+ topic, err := pd.getString()
+ if err != nil {
+ return err
+ }
+ r.Topics[i] = topic
+ }
+ return nil
+}
+
+func (r *MetadataRequest) key() int16 {
+ return 3
+}
+
+func (r *MetadataRequest) version() int16 {
+ return 0
+}
+
+func (r *MetadataRequest) requiredVersion() KafkaVersion {
+ return minVersion
+}
diff --git a/vendor/github.com/Shopify/sarama/metadata_response.go b/vendor/github.com/Shopify/sarama/metadata_response.go
new file mode 100644
index 0000000..f9d6a42
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/metadata_response.go
@@ -0,0 +1,239 @@
+package sarama
+
+type PartitionMetadata struct {
+ Err KError
+ ID int32
+ Leader int32
+ Replicas []int32
+ Isr []int32
+}
+
+func (pm *PartitionMetadata) decode(pd packetDecoder) (err error) {
+ tmp, err := pd.getInt16()
+ if err != nil {
+ return err
+ }
+ pm.Err = KError(tmp)
+
+ pm.ID, err = pd.getInt32()
+ if err != nil {
+ return err
+ }
+
+ pm.Leader, err = pd.getInt32()
+ if err != nil {
+ return err
+ }
+
+ pm.Replicas, err = pd.getInt32Array()
+ if err != nil {
+ return err
+ }
+
+ pm.Isr, err = pd.getInt32Array()
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (pm *PartitionMetadata) encode(pe packetEncoder) (err error) {
+ pe.putInt16(int16(pm.Err))
+ pe.putInt32(pm.ID)
+ pe.putInt32(pm.Leader)
+
+ err = pe.putInt32Array(pm.Replicas)
+ if err != nil {
+ return err
+ }
+
+ err = pe.putInt32Array(pm.Isr)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+type TopicMetadata struct {
+ Err KError
+ Name string
+ Partitions []*PartitionMetadata
+}
+
+func (tm *TopicMetadata) decode(pd packetDecoder) (err error) {
+ tmp, err := pd.getInt16()
+ if err != nil {
+ return err
+ }
+ tm.Err = KError(tmp)
+
+ tm.Name, err = pd.getString()
+ if err != nil {
+ return err
+ }
+
+ n, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+ tm.Partitions = make([]*PartitionMetadata, n)
+ for i := 0; i < n; i++ {
+ tm.Partitions[i] = new(PartitionMetadata)
+ err = tm.Partitions[i].decode(pd)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (tm *TopicMetadata) encode(pe packetEncoder) (err error) {
+ pe.putInt16(int16(tm.Err))
+
+ err = pe.putString(tm.Name)
+ if err != nil {
+ return err
+ }
+
+ err = pe.putArrayLength(len(tm.Partitions))
+ if err != nil {
+ return err
+ }
+
+ for _, pm := range tm.Partitions {
+ err = pm.encode(pe)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+type MetadataResponse struct {
+ Brokers []*Broker
+ Topics []*TopicMetadata
+}
+
+func (r *MetadataResponse) decode(pd packetDecoder, version int16) (err error) {
+ n, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ r.Brokers = make([]*Broker, n)
+ for i := 0; i < n; i++ {
+ r.Brokers[i] = new(Broker)
+ err = r.Brokers[i].decode(pd)
+ if err != nil {
+ return err
+ }
+ }
+
+ n, err = pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ r.Topics = make([]*TopicMetadata, n)
+ for i := 0; i < n; i++ {
+ r.Topics[i] = new(TopicMetadata)
+ err = r.Topics[i].decode(pd)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (r *MetadataResponse) encode(pe packetEncoder) error {
+ err := pe.putArrayLength(len(r.Brokers))
+ if err != nil {
+ return err
+ }
+ for _, broker := range r.Brokers {
+ err = broker.encode(pe)
+ if err != nil {
+ return err
+ }
+ }
+
+ err = pe.putArrayLength(len(r.Topics))
+ if err != nil {
+ return err
+ }
+ for _, tm := range r.Topics {
+ err = tm.encode(pe)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (r *MetadataResponse) key() int16 {
+ return 3
+}
+
+func (r *MetadataResponse) version() int16 {
+ return 0
+}
+
+func (r *MetadataResponse) requiredVersion() KafkaVersion {
+ return minVersion
+}
+
+// testing API
+
+func (r *MetadataResponse) AddBroker(addr string, id int32) {
+ r.Brokers = append(r.Brokers, &Broker{id: id, addr: addr})
+}
+
+func (r *MetadataResponse) AddTopic(topic string, err KError) *TopicMetadata {
+ var tmatch *TopicMetadata
+
+ for _, tm := range r.Topics {
+ if tm.Name == topic {
+ tmatch = tm
+ goto foundTopic
+ }
+ }
+
+ tmatch = new(TopicMetadata)
+ tmatch.Name = topic
+ r.Topics = append(r.Topics, tmatch)
+
+foundTopic:
+
+ tmatch.Err = err
+ return tmatch
+}
+
+func (r *MetadataResponse) AddTopicPartition(topic string, partition, brokerID int32, replicas, isr []int32, err KError) {
+ tmatch := r.AddTopic(topic, ErrNoError)
+ var pmatch *PartitionMetadata
+
+ for _, pm := range tmatch.Partitions {
+ if pm.ID == partition {
+ pmatch = pm
+ goto foundPartition
+ }
+ }
+
+ pmatch = new(PartitionMetadata)
+ pmatch.ID = partition
+ tmatch.Partitions = append(tmatch.Partitions, pmatch)
+
+foundPartition:
+
+ pmatch.Leader = brokerID
+ pmatch.Replicas = replicas
+ pmatch.Isr = isr
+ pmatch.Err = err
+
+}
diff --git a/vendor/github.com/Shopify/sarama/metrics.go b/vendor/github.com/Shopify/sarama/metrics.go
new file mode 100644
index 0000000..4869708
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/metrics.go
@@ -0,0 +1,51 @@
+package sarama
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/rcrowley/go-metrics"
+)
+
+// Use exponentially decaying reservoir for sampling histograms with the same defaults as the Java library:
+// 1028 elements, which offers a 99.9% confidence level with a 5% margin of error assuming a normal distribution,
+// and an alpha factor of 0.015, which heavily biases the reservoir to the past 5 minutes of measurements.
+// See https://github.com/dropwizard/metrics/blob/v3.1.0/metrics-core/src/main/java/com/codahale/metrics/ExponentiallyDecayingReservoir.java#L38
+const (
+ metricsReservoirSize = 1028
+ metricsAlphaFactor = 0.015
+)
+
+func getOrRegisterHistogram(name string, r metrics.Registry) metrics.Histogram {
+ return r.GetOrRegister(name, func() metrics.Histogram {
+ return metrics.NewHistogram(metrics.NewExpDecaySample(metricsReservoirSize, metricsAlphaFactor))
+ }).(metrics.Histogram)
+}
+
+func getMetricNameForBroker(name string, broker *Broker) string {
+ // Use broker id like the Java client as it does not contain '.' or ':' characters that
+ // can be interpreted as special character by monitoring tool (e.g. Graphite)
+ return fmt.Sprintf(name+"-for-broker-%d", broker.ID())
+}
+
+func getOrRegisterBrokerMeter(name string, broker *Broker, r metrics.Registry) metrics.Meter {
+ return metrics.GetOrRegisterMeter(getMetricNameForBroker(name, broker), r)
+}
+
+func getOrRegisterBrokerHistogram(name string, broker *Broker, r metrics.Registry) metrics.Histogram {
+ return getOrRegisterHistogram(getMetricNameForBroker(name, broker), r)
+}
+
+func getMetricNameForTopic(name string, topic string) string {
+ // Convert dot to _ since reporters like Graphite typically use dot to represent hierarchy
+ // cf. KAFKA-1902 and KAFKA-2337
+ return fmt.Sprintf(name+"-for-topic-%s", strings.Replace(topic, ".", "_", -1))
+}
+
+func getOrRegisterTopicMeter(name string, topic string, r metrics.Registry) metrics.Meter {
+ return metrics.GetOrRegisterMeter(getMetricNameForTopic(name, topic), r)
+}
+
+func getOrRegisterTopicHistogram(name string, topic string, r metrics.Registry) metrics.Histogram {
+ return getOrRegisterHistogram(getMetricNameForTopic(name, topic), r)
+}
diff --git a/vendor/github.com/Shopify/sarama/mockbroker.go b/vendor/github.com/Shopify/sarama/mockbroker.go
new file mode 100644
index 0000000..0734d34
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/mockbroker.go
@@ -0,0 +1,324 @@
+package sarama
+
+import (
+ "bytes"
+ "encoding/binary"
+ "fmt"
+ "io"
+ "net"
+ "reflect"
+ "strconv"
+ "sync"
+ "time"
+
+ "github.com/davecgh/go-spew/spew"
+)
+
+const (
+ expectationTimeout = 500 * time.Millisecond
+)
+
+type requestHandlerFunc func(req *request) (res encoder)
+
+// RequestNotifierFunc is invoked when a mock broker processes a request successfully
+// and will provides the number of bytes read and written.
+type RequestNotifierFunc func(bytesRead, bytesWritten int)
+
+// MockBroker is a mock Kafka broker that is used in unit tests. It is exposed
+// to facilitate testing of higher level or specialized consumers and producers
+// built on top of Sarama. Note that it does not 'mimic' the Kafka API protocol,
+// but rather provides a facility to do that. It takes care of the TCP
+// transport, request unmarshaling, response marshaling, and makes it the test
+// writer responsibility to program correct according to the Kafka API protocol
+// MockBroker behaviour.
+//
+// MockBroker is implemented as a TCP server listening on a kernel-selected
+// localhost port that can accept many connections. It reads Kafka requests
+// from that connection and returns responses programmed by the SetHandlerByMap
+// function. If a MockBroker receives a request that it has no programmed
+// response for, then it returns nothing and the request times out.
+//
+// A set of MockRequest builders to define mappings used by MockBroker is
+// provided by Sarama. But users can develop MockRequests of their own and use
+// them along with or instead of the standard ones.
+//
+// When running tests with MockBroker it is strongly recommended to specify
+// a timeout to `go test` so that if the broker hangs waiting for a response,
+// the test panics.
+//
+// It is not necessary to prefix message length or correlation ID to your
+// response bytes, the server does that automatically as a convenience.
+type MockBroker struct {
+ brokerID int32
+ port int32
+ closing chan none
+ stopper chan none
+ expectations chan encoder
+ listener net.Listener
+ t TestReporter
+ latency time.Duration
+ handler requestHandlerFunc
+ notifier RequestNotifierFunc
+ history []RequestResponse
+ lock sync.Mutex
+}
+
+// RequestResponse represents a Request/Response pair processed by MockBroker.
+type RequestResponse struct {
+ Request protocolBody
+ Response encoder
+}
+
+// SetLatency makes broker pause for the specified period every time before
+// replying.
+func (b *MockBroker) SetLatency(latency time.Duration) {
+ b.latency = latency
+}
+
+// SetHandlerByMap defines mapping of Request types to MockResponses. When a
+// request is received by the broker, it looks up the request type in the map
+// and uses the found MockResponse instance to generate an appropriate reply.
+// If the request type is not found in the map then nothing is sent.
+func (b *MockBroker) SetHandlerByMap(handlerMap map[string]MockResponse) {
+ b.setHandler(func(req *request) (res encoder) {
+ reqTypeName := reflect.TypeOf(req.body).Elem().Name()
+ mockResponse := handlerMap[reqTypeName]
+ if mockResponse == nil {
+ return nil
+ }
+ return mockResponse.For(req.body)
+ })
+}
+
+// SetNotifier set a function that will get invoked whenever a request has been
+// processed successfully and will provide the number of bytes read and written
+func (b *MockBroker) SetNotifier(notifier RequestNotifierFunc) {
+ b.lock.Lock()
+ b.notifier = notifier
+ b.lock.Unlock()
+}
+
+// BrokerID returns broker ID assigned to the broker.
+func (b *MockBroker) BrokerID() int32 {
+ return b.brokerID
+}
+
+// History returns a slice of RequestResponse pairs in the order they were
+// processed by the broker. Note that in case of multiple connections to the
+// broker the order expected by a test can be different from the order recorded
+// in the history, unless some synchronization is implemented in the test.
+func (b *MockBroker) History() []RequestResponse {
+ b.lock.Lock()
+ history := make([]RequestResponse, len(b.history))
+ copy(history, b.history)
+ b.lock.Unlock()
+ return history
+}
+
+// Port returns the TCP port number the broker is listening for requests on.
+func (b *MockBroker) Port() int32 {
+ return b.port
+}
+
+// Addr returns the broker connection string in the form "
:".
+func (b *MockBroker) Addr() string {
+ return b.listener.Addr().String()
+}
+
+// Close terminates the broker blocking until it stops internal goroutines and
+// releases all resources.
+func (b *MockBroker) Close() {
+ close(b.expectations)
+ if len(b.expectations) > 0 {
+ buf := bytes.NewBufferString(fmt.Sprintf("mockbroker/%d: not all expectations were satisfied! Still waiting on:\n", b.BrokerID()))
+ for e := range b.expectations {
+ _, _ = buf.WriteString(spew.Sdump(e))
+ }
+ b.t.Error(buf.String())
+ }
+ close(b.closing)
+ <-b.stopper
+}
+
+// setHandler sets the specified function as the request handler. Whenever
+// a mock broker reads a request from the wire it passes the request to the
+// function and sends back whatever the handler function returns.
+func (b *MockBroker) setHandler(handler requestHandlerFunc) {
+ b.lock.Lock()
+ b.handler = handler
+ b.lock.Unlock()
+}
+
+func (b *MockBroker) serverLoop() {
+ defer close(b.stopper)
+ var err error
+ var conn net.Conn
+
+ go func() {
+ <-b.closing
+ err := b.listener.Close()
+ if err != nil {
+ b.t.Error(err)
+ }
+ }()
+
+ wg := &sync.WaitGroup{}
+ i := 0
+ for conn, err = b.listener.Accept(); err == nil; conn, err = b.listener.Accept() {
+ wg.Add(1)
+ go b.handleRequests(conn, i, wg)
+ i++
+ }
+ wg.Wait()
+ Logger.Printf("*** mockbroker/%d: listener closed, err=%v", b.BrokerID(), err)
+}
+
+func (b *MockBroker) handleRequests(conn net.Conn, idx int, wg *sync.WaitGroup) {
+ defer wg.Done()
+ defer func() {
+ _ = conn.Close()
+ }()
+ Logger.Printf("*** mockbroker/%d/%d: connection opened", b.BrokerID(), idx)
+ var err error
+
+ abort := make(chan none)
+ defer close(abort)
+ go func() {
+ select {
+ case <-b.closing:
+ _ = conn.Close()
+ case <-abort:
+ }
+ }()
+
+ resHeader := make([]byte, 8)
+ for {
+ req, bytesRead, err := decodeRequest(conn)
+ if err != nil {
+ Logger.Printf("*** mockbroker/%d/%d: invalid request: err=%+v, %+v", b.brokerID, idx, err, spew.Sdump(req))
+ b.serverError(err)
+ break
+ }
+
+ if b.latency > 0 {
+ time.Sleep(b.latency)
+ }
+
+ b.lock.Lock()
+ res := b.handler(req)
+ b.history = append(b.history, RequestResponse{req.body, res})
+ b.lock.Unlock()
+
+ if res == nil {
+ Logger.Printf("*** mockbroker/%d/%d: ignored %v", b.brokerID, idx, spew.Sdump(req))
+ continue
+ }
+ Logger.Printf("*** mockbroker/%d/%d: served %v -> %v", b.brokerID, idx, req, res)
+
+ encodedRes, err := encode(res, nil)
+ if err != nil {
+ b.serverError(err)
+ break
+ }
+ if len(encodedRes) == 0 {
+ b.lock.Lock()
+ if b.notifier != nil {
+ b.notifier(bytesRead, 0)
+ }
+ b.lock.Unlock()
+ continue
+ }
+
+ binary.BigEndian.PutUint32(resHeader, uint32(len(encodedRes)+4))
+ binary.BigEndian.PutUint32(resHeader[4:], uint32(req.correlationID))
+ if _, err = conn.Write(resHeader); err != nil {
+ b.serverError(err)
+ break
+ }
+ if _, err = conn.Write(encodedRes); err != nil {
+ b.serverError(err)
+ break
+ }
+
+ b.lock.Lock()
+ if b.notifier != nil {
+ b.notifier(bytesRead, len(resHeader)+len(encodedRes))
+ }
+ b.lock.Unlock()
+ }
+ Logger.Printf("*** mockbroker/%d/%d: connection closed, err=%v", b.BrokerID(), idx, err)
+}
+
+func (b *MockBroker) defaultRequestHandler(req *request) (res encoder) {
+ select {
+ case res, ok := <-b.expectations:
+ if !ok {
+ return nil
+ }
+ return res
+ case <-time.After(expectationTimeout):
+ return nil
+ }
+}
+
+func (b *MockBroker) serverError(err error) {
+ isConnectionClosedError := false
+ if _, ok := err.(*net.OpError); ok {
+ isConnectionClosedError = true
+ } else if err == io.EOF {
+ isConnectionClosedError = true
+ } else if err.Error() == "use of closed network connection" {
+ isConnectionClosedError = true
+ }
+
+ if isConnectionClosedError {
+ return
+ }
+
+ b.t.Errorf(err.Error())
+}
+
+// NewMockBroker launches a fake Kafka broker. It takes a TestReporter as provided by the
+// test framework and a channel of responses to use. If an error occurs it is
+// simply logged to the TestReporter and the broker exits.
+func NewMockBroker(t TestReporter, brokerID int32) *MockBroker {
+ return NewMockBrokerAddr(t, brokerID, "localhost:0")
+}
+
+// NewMockBrokerAddr behaves like newMockBroker but listens on the address you give
+// it rather than just some ephemeral port.
+func NewMockBrokerAddr(t TestReporter, brokerID int32, addr string) *MockBroker {
+ var err error
+
+ broker := &MockBroker{
+ closing: make(chan none),
+ stopper: make(chan none),
+ t: t,
+ brokerID: brokerID,
+ expectations: make(chan encoder, 512),
+ }
+ broker.handler = broker.defaultRequestHandler
+
+ broker.listener, err = net.Listen("tcp", addr)
+ if err != nil {
+ t.Fatal(err)
+ }
+ Logger.Printf("*** mockbroker/%d listening on %s\n", brokerID, broker.listener.Addr().String())
+ _, portStr, err := net.SplitHostPort(broker.listener.Addr().String())
+ if err != nil {
+ t.Fatal(err)
+ }
+ tmp, err := strconv.ParseInt(portStr, 10, 32)
+ if err != nil {
+ t.Fatal(err)
+ }
+ broker.port = int32(tmp)
+
+ go broker.serverLoop()
+
+ return broker
+}
+
+func (b *MockBroker) Returns(e encoder) {
+ b.expectations <- e
+}
diff --git a/vendor/github.com/Shopify/sarama/mockresponses.go b/vendor/github.com/Shopify/sarama/mockresponses.go
new file mode 100644
index 0000000..9659757
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/mockresponses.go
@@ -0,0 +1,469 @@
+package sarama
+
+import (
+ "fmt"
+)
+
+// TestReporter has methods matching go's testing.T to avoid importing
+// `testing` in the main part of the library.
+type TestReporter interface {
+ Error(...interface{})
+ Errorf(string, ...interface{})
+ Fatal(...interface{})
+ Fatalf(string, ...interface{})
+}
+
+// MockResponse is a response builder interface it defines one method that
+// allows generating a response based on a request body. MockResponses are used
+// to program behavior of MockBroker in tests.
+type MockResponse interface {
+ For(reqBody versionedDecoder) (res encoder)
+}
+
+// MockWrapper is a mock response builder that returns a particular concrete
+// response regardless of the actual request passed to the `For` method.
+type MockWrapper struct {
+ res encoder
+}
+
+func (mw *MockWrapper) For(reqBody versionedDecoder) (res encoder) {
+ return mw.res
+}
+
+func NewMockWrapper(res encoder) *MockWrapper {
+ return &MockWrapper{res: res}
+}
+
+// MockSequence is a mock response builder that is created from a sequence of
+// concrete responses. Every time when a `MockBroker` calls its `For` method
+// the next response from the sequence is returned. When the end of the
+// sequence is reached the last element from the sequence is returned.
+type MockSequence struct {
+ responses []MockResponse
+}
+
+func NewMockSequence(responses ...interface{}) *MockSequence {
+ ms := &MockSequence{}
+ ms.responses = make([]MockResponse, len(responses))
+ for i, res := range responses {
+ switch res := res.(type) {
+ case MockResponse:
+ ms.responses[i] = res
+ case encoder:
+ ms.responses[i] = NewMockWrapper(res)
+ default:
+ panic(fmt.Sprintf("Unexpected response type: %T", res))
+ }
+ }
+ return ms
+}
+
+func (mc *MockSequence) For(reqBody versionedDecoder) (res encoder) {
+ res = mc.responses[0].For(reqBody)
+ if len(mc.responses) > 1 {
+ mc.responses = mc.responses[1:]
+ }
+ return res
+}
+
+// MockMetadataResponse is a `MetadataResponse` builder.
+type MockMetadataResponse struct {
+ leaders map[string]map[int32]int32
+ brokers map[string]int32
+ t TestReporter
+}
+
+func NewMockMetadataResponse(t TestReporter) *MockMetadataResponse {
+ return &MockMetadataResponse{
+ leaders: make(map[string]map[int32]int32),
+ brokers: make(map[string]int32),
+ t: t,
+ }
+}
+
+func (mmr *MockMetadataResponse) SetLeader(topic string, partition, brokerID int32) *MockMetadataResponse {
+ partitions := mmr.leaders[topic]
+ if partitions == nil {
+ partitions = make(map[int32]int32)
+ mmr.leaders[topic] = partitions
+ }
+ partitions[partition] = brokerID
+ return mmr
+}
+
+func (mmr *MockMetadataResponse) SetBroker(addr string, brokerID int32) *MockMetadataResponse {
+ mmr.brokers[addr] = brokerID
+ return mmr
+}
+
+func (mmr *MockMetadataResponse) For(reqBody versionedDecoder) encoder {
+ metadataRequest := reqBody.(*MetadataRequest)
+ metadataResponse := &MetadataResponse{}
+ for addr, brokerID := range mmr.brokers {
+ metadataResponse.AddBroker(addr, brokerID)
+ }
+ if len(metadataRequest.Topics) == 0 {
+ for topic, partitions := range mmr.leaders {
+ for partition, brokerID := range partitions {
+ metadataResponse.AddTopicPartition(topic, partition, brokerID, nil, nil, ErrNoError)
+ }
+ }
+ return metadataResponse
+ }
+ for _, topic := range metadataRequest.Topics {
+ for partition, brokerID := range mmr.leaders[topic] {
+ metadataResponse.AddTopicPartition(topic, partition, brokerID, nil, nil, ErrNoError)
+ }
+ }
+ return metadataResponse
+}
+
+// MockOffsetResponse is an `OffsetResponse` builder.
+type MockOffsetResponse struct {
+ offsets map[string]map[int32]map[int64]int64
+ t TestReporter
+ version int16
+}
+
+func NewMockOffsetResponse(t TestReporter) *MockOffsetResponse {
+ return &MockOffsetResponse{
+ offsets: make(map[string]map[int32]map[int64]int64),
+ t: t,
+ }
+}
+
+func (mor *MockOffsetResponse) SetVersion(version int16) *MockOffsetResponse {
+ mor.version = version
+ return mor
+}
+
+func (mor *MockOffsetResponse) SetOffset(topic string, partition int32, time, offset int64) *MockOffsetResponse {
+ partitions := mor.offsets[topic]
+ if partitions == nil {
+ partitions = make(map[int32]map[int64]int64)
+ mor.offsets[topic] = partitions
+ }
+ times := partitions[partition]
+ if times == nil {
+ times = make(map[int64]int64)
+ partitions[partition] = times
+ }
+ times[time] = offset
+ return mor
+}
+
+func (mor *MockOffsetResponse) For(reqBody versionedDecoder) encoder {
+ offsetRequest := reqBody.(*OffsetRequest)
+ offsetResponse := &OffsetResponse{Version: mor.version}
+ for topic, partitions := range offsetRequest.blocks {
+ for partition, block := range partitions {
+ offset := mor.getOffset(topic, partition, block.time)
+ offsetResponse.AddTopicPartition(topic, partition, offset)
+ }
+ }
+ return offsetResponse
+}
+
+func (mor *MockOffsetResponse) getOffset(topic string, partition int32, time int64) int64 {
+ partitions := mor.offsets[topic]
+ if partitions == nil {
+ mor.t.Errorf("missing topic: %s", topic)
+ }
+ times := partitions[partition]
+ if times == nil {
+ mor.t.Errorf("missing partition: %d", partition)
+ }
+ offset, ok := times[time]
+ if !ok {
+ mor.t.Errorf("missing time: %d", time)
+ }
+ return offset
+}
+
+// MockFetchResponse is a `FetchResponse` builder.
+type MockFetchResponse struct {
+ messages map[string]map[int32]map[int64]Encoder
+ highWaterMarks map[string]map[int32]int64
+ t TestReporter
+ batchSize int
+ version int16
+}
+
+func NewMockFetchResponse(t TestReporter, batchSize int) *MockFetchResponse {
+ return &MockFetchResponse{
+ messages: make(map[string]map[int32]map[int64]Encoder),
+ highWaterMarks: make(map[string]map[int32]int64),
+ t: t,
+ batchSize: batchSize,
+ }
+}
+
+func (mfr *MockFetchResponse) SetVersion(version int16) *MockFetchResponse {
+ mfr.version = version
+ return mfr
+}
+
+func (mfr *MockFetchResponse) SetMessage(topic string, partition int32, offset int64, msg Encoder) *MockFetchResponse {
+ partitions := mfr.messages[topic]
+ if partitions == nil {
+ partitions = make(map[int32]map[int64]Encoder)
+ mfr.messages[topic] = partitions
+ }
+ messages := partitions[partition]
+ if messages == nil {
+ messages = make(map[int64]Encoder)
+ partitions[partition] = messages
+ }
+ messages[offset] = msg
+ return mfr
+}
+
+func (mfr *MockFetchResponse) SetHighWaterMark(topic string, partition int32, offset int64) *MockFetchResponse {
+ partitions := mfr.highWaterMarks[topic]
+ if partitions == nil {
+ partitions = make(map[int32]int64)
+ mfr.highWaterMarks[topic] = partitions
+ }
+ partitions[partition] = offset
+ return mfr
+}
+
+func (mfr *MockFetchResponse) For(reqBody versionedDecoder) encoder {
+ fetchRequest := reqBody.(*FetchRequest)
+ res := &FetchResponse{
+ Version: mfr.version,
+ }
+ for topic, partitions := range fetchRequest.blocks {
+ for partition, block := range partitions {
+ initialOffset := block.fetchOffset
+ offset := initialOffset
+ maxOffset := initialOffset + int64(mfr.getMessageCount(topic, partition))
+ for i := 0; i < mfr.batchSize && offset < maxOffset; {
+ msg := mfr.getMessage(topic, partition, offset)
+ if msg != nil {
+ res.AddMessage(topic, partition, nil, msg, offset)
+ i++
+ }
+ offset++
+ }
+ fb := res.GetBlock(topic, partition)
+ if fb == nil {
+ res.AddError(topic, partition, ErrNoError)
+ fb = res.GetBlock(topic, partition)
+ }
+ fb.HighWaterMarkOffset = mfr.getHighWaterMark(topic, partition)
+ }
+ }
+ return res
+}
+
+func (mfr *MockFetchResponse) getMessage(topic string, partition int32, offset int64) Encoder {
+ partitions := mfr.messages[topic]
+ if partitions == nil {
+ return nil
+ }
+ messages := partitions[partition]
+ if messages == nil {
+ return nil
+ }
+ return messages[offset]
+}
+
+func (mfr *MockFetchResponse) getMessageCount(topic string, partition int32) int {
+ partitions := mfr.messages[topic]
+ if partitions == nil {
+ return 0
+ }
+ messages := partitions[partition]
+ if messages == nil {
+ return 0
+ }
+ return len(messages)
+}
+
+func (mfr *MockFetchResponse) getHighWaterMark(topic string, partition int32) int64 {
+ partitions := mfr.highWaterMarks[topic]
+ if partitions == nil {
+ return 0
+ }
+ return partitions[partition]
+}
+
+// MockConsumerMetadataResponse is a `ConsumerMetadataResponse` builder.
+type MockConsumerMetadataResponse struct {
+ coordinators map[string]interface{}
+ t TestReporter
+}
+
+func NewMockConsumerMetadataResponse(t TestReporter) *MockConsumerMetadataResponse {
+ return &MockConsumerMetadataResponse{
+ coordinators: make(map[string]interface{}),
+ t: t,
+ }
+}
+
+func (mr *MockConsumerMetadataResponse) SetCoordinator(group string, broker *MockBroker) *MockConsumerMetadataResponse {
+ mr.coordinators[group] = broker
+ return mr
+}
+
+func (mr *MockConsumerMetadataResponse) SetError(group string, kerror KError) *MockConsumerMetadataResponse {
+ mr.coordinators[group] = kerror
+ return mr
+}
+
+func (mr *MockConsumerMetadataResponse) For(reqBody versionedDecoder) encoder {
+ req := reqBody.(*ConsumerMetadataRequest)
+ group := req.ConsumerGroup
+ res := &ConsumerMetadataResponse{}
+ v := mr.coordinators[group]
+ switch v := v.(type) {
+ case *MockBroker:
+ res.Coordinator = &Broker{id: v.BrokerID(), addr: v.Addr()}
+ case KError:
+ res.Err = v
+ }
+ return res
+}
+
+// MockOffsetCommitResponse is a `OffsetCommitResponse` builder.
+type MockOffsetCommitResponse struct {
+ errors map[string]map[string]map[int32]KError
+ t TestReporter
+}
+
+func NewMockOffsetCommitResponse(t TestReporter) *MockOffsetCommitResponse {
+ return &MockOffsetCommitResponse{t: t}
+}
+
+func (mr *MockOffsetCommitResponse) SetError(group, topic string, partition int32, kerror KError) *MockOffsetCommitResponse {
+ if mr.errors == nil {
+ mr.errors = make(map[string]map[string]map[int32]KError)
+ }
+ topics := mr.errors[group]
+ if topics == nil {
+ topics = make(map[string]map[int32]KError)
+ mr.errors[group] = topics
+ }
+ partitions := topics[topic]
+ if partitions == nil {
+ partitions = make(map[int32]KError)
+ topics[topic] = partitions
+ }
+ partitions[partition] = kerror
+ return mr
+}
+
+func (mr *MockOffsetCommitResponse) For(reqBody versionedDecoder) encoder {
+ req := reqBody.(*OffsetCommitRequest)
+ group := req.ConsumerGroup
+ res := &OffsetCommitResponse{}
+ for topic, partitions := range req.blocks {
+ for partition := range partitions {
+ res.AddError(topic, partition, mr.getError(group, topic, partition))
+ }
+ }
+ return res
+}
+
+func (mr *MockOffsetCommitResponse) getError(group, topic string, partition int32) KError {
+ topics := mr.errors[group]
+ if topics == nil {
+ return ErrNoError
+ }
+ partitions := topics[topic]
+ if partitions == nil {
+ return ErrNoError
+ }
+ kerror, ok := partitions[partition]
+ if !ok {
+ return ErrNoError
+ }
+ return kerror
+}
+
+// MockProduceResponse is a `ProduceResponse` builder.
+type MockProduceResponse struct {
+ errors map[string]map[int32]KError
+ t TestReporter
+}
+
+func NewMockProduceResponse(t TestReporter) *MockProduceResponse {
+ return &MockProduceResponse{t: t}
+}
+
+func (mr *MockProduceResponse) SetError(topic string, partition int32, kerror KError) *MockProduceResponse {
+ if mr.errors == nil {
+ mr.errors = make(map[string]map[int32]KError)
+ }
+ partitions := mr.errors[topic]
+ if partitions == nil {
+ partitions = make(map[int32]KError)
+ mr.errors[topic] = partitions
+ }
+ partitions[partition] = kerror
+ return mr
+}
+
+func (mr *MockProduceResponse) For(reqBody versionedDecoder) encoder {
+ req := reqBody.(*ProduceRequest)
+ res := &ProduceResponse{}
+ for topic, partitions := range req.records {
+ for partition := range partitions {
+ res.AddTopicPartition(topic, partition, mr.getError(topic, partition))
+ }
+ }
+ return res
+}
+
+func (mr *MockProduceResponse) getError(topic string, partition int32) KError {
+ partitions := mr.errors[topic]
+ if partitions == nil {
+ return ErrNoError
+ }
+ kerror, ok := partitions[partition]
+ if !ok {
+ return ErrNoError
+ }
+ return kerror
+}
+
+// MockOffsetFetchResponse is a `OffsetFetchResponse` builder.
+type MockOffsetFetchResponse struct {
+ offsets map[string]map[string]map[int32]*OffsetFetchResponseBlock
+ t TestReporter
+}
+
+func NewMockOffsetFetchResponse(t TestReporter) *MockOffsetFetchResponse {
+ return &MockOffsetFetchResponse{t: t}
+}
+
+func (mr *MockOffsetFetchResponse) SetOffset(group, topic string, partition int32, offset int64, metadata string, kerror KError) *MockOffsetFetchResponse {
+ if mr.offsets == nil {
+ mr.offsets = make(map[string]map[string]map[int32]*OffsetFetchResponseBlock)
+ }
+ topics := mr.offsets[group]
+ if topics == nil {
+ topics = make(map[string]map[int32]*OffsetFetchResponseBlock)
+ mr.offsets[group] = topics
+ }
+ partitions := topics[topic]
+ if partitions == nil {
+ partitions = make(map[int32]*OffsetFetchResponseBlock)
+ topics[topic] = partitions
+ }
+ partitions[partition] = &OffsetFetchResponseBlock{offset, metadata, kerror}
+ return mr
+}
+
+func (mr *MockOffsetFetchResponse) For(reqBody versionedDecoder) encoder {
+ req := reqBody.(*OffsetFetchRequest)
+ group := req.ConsumerGroup
+ res := &OffsetFetchResponse{}
+ for topic, partitions := range mr.offsets[group] {
+ for partition, block := range partitions {
+ res.AddBlock(topic, partition, block)
+ }
+ }
+ return res
+}
diff --git a/vendor/github.com/Shopify/sarama/offset_commit_request.go b/vendor/github.com/Shopify/sarama/offset_commit_request.go
new file mode 100644
index 0000000..b21ea63
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/offset_commit_request.go
@@ -0,0 +1,190 @@
+package sarama
+
+// ReceiveTime is a special value for the timestamp field of Offset Commit Requests which
+// tells the broker to set the timestamp to the time at which the request was received.
+// The timestamp is only used if message version 1 is used, which requires kafka 0.8.2.
+const ReceiveTime int64 = -1
+
+// GroupGenerationUndefined is a special value for the group generation field of
+// Offset Commit Requests that should be used when a consumer group does not rely
+// on Kafka for partition management.
+const GroupGenerationUndefined = -1
+
+type offsetCommitRequestBlock struct {
+ offset int64
+ timestamp int64
+ metadata string
+}
+
+func (b *offsetCommitRequestBlock) encode(pe packetEncoder, version int16) error {
+ pe.putInt64(b.offset)
+ if version == 1 {
+ pe.putInt64(b.timestamp)
+ } else if b.timestamp != 0 {
+ Logger.Println("Non-zero timestamp specified for OffsetCommitRequest not v1, it will be ignored")
+ }
+
+ return pe.putString(b.metadata)
+}
+
+func (b *offsetCommitRequestBlock) decode(pd packetDecoder, version int16) (err error) {
+ if b.offset, err = pd.getInt64(); err != nil {
+ return err
+ }
+ if version == 1 {
+ if b.timestamp, err = pd.getInt64(); err != nil {
+ return err
+ }
+ }
+ b.metadata, err = pd.getString()
+ return err
+}
+
+type OffsetCommitRequest struct {
+ ConsumerGroup string
+ ConsumerGroupGeneration int32 // v1 or later
+ ConsumerID string // v1 or later
+ RetentionTime int64 // v2 or later
+
+ // Version can be:
+ // - 0 (kafka 0.8.1 and later)
+ // - 1 (kafka 0.8.2 and later)
+ // - 2 (kafka 0.9.0 and later)
+ Version int16
+ blocks map[string]map[int32]*offsetCommitRequestBlock
+}
+
+func (r *OffsetCommitRequest) encode(pe packetEncoder) error {
+ if r.Version < 0 || r.Version > 2 {
+ return PacketEncodingError{"invalid or unsupported OffsetCommitRequest version field"}
+ }
+
+ if err := pe.putString(r.ConsumerGroup); err != nil {
+ return err
+ }
+
+ if r.Version >= 1 {
+ pe.putInt32(r.ConsumerGroupGeneration)
+ if err := pe.putString(r.ConsumerID); err != nil {
+ return err
+ }
+ } else {
+ if r.ConsumerGroupGeneration != 0 {
+ Logger.Println("Non-zero ConsumerGroupGeneration specified for OffsetCommitRequest v0, it will be ignored")
+ }
+ if r.ConsumerID != "" {
+ Logger.Println("Non-empty ConsumerID specified for OffsetCommitRequest v0, it will be ignored")
+ }
+ }
+
+ if r.Version >= 2 {
+ pe.putInt64(r.RetentionTime)
+ } else if r.RetentionTime != 0 {
+ Logger.Println("Non-zero RetentionTime specified for OffsetCommitRequest version <2, it will be ignored")
+ }
+
+ if err := pe.putArrayLength(len(r.blocks)); err != nil {
+ return err
+ }
+ for topic, partitions := range r.blocks {
+ if err := pe.putString(topic); err != nil {
+ return err
+ }
+ if err := pe.putArrayLength(len(partitions)); err != nil {
+ return err
+ }
+ for partition, block := range partitions {
+ pe.putInt32(partition)
+ if err := block.encode(pe, r.Version); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func (r *OffsetCommitRequest) decode(pd packetDecoder, version int16) (err error) {
+ r.Version = version
+
+ if r.ConsumerGroup, err = pd.getString(); err != nil {
+ return err
+ }
+
+ if r.Version >= 1 {
+ if r.ConsumerGroupGeneration, err = pd.getInt32(); err != nil {
+ return err
+ }
+ if r.ConsumerID, err = pd.getString(); err != nil {
+ return err
+ }
+ }
+
+ if r.Version >= 2 {
+ if r.RetentionTime, err = pd.getInt64(); err != nil {
+ return err
+ }
+ }
+
+ topicCount, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+ if topicCount == 0 {
+ return nil
+ }
+ r.blocks = make(map[string]map[int32]*offsetCommitRequestBlock)
+ for i := 0; i < topicCount; i++ {
+ topic, err := pd.getString()
+ if err != nil {
+ return err
+ }
+ partitionCount, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+ r.blocks[topic] = make(map[int32]*offsetCommitRequestBlock)
+ for j := 0; j < partitionCount; j++ {
+ partition, err := pd.getInt32()
+ if err != nil {
+ return err
+ }
+ block := &offsetCommitRequestBlock{}
+ if err := block.decode(pd, r.Version); err != nil {
+ return err
+ }
+ r.blocks[topic][partition] = block
+ }
+ }
+ return nil
+}
+
+func (r *OffsetCommitRequest) key() int16 {
+ return 8
+}
+
+func (r *OffsetCommitRequest) version() int16 {
+ return r.Version
+}
+
+func (r *OffsetCommitRequest) requiredVersion() KafkaVersion {
+ switch r.Version {
+ case 1:
+ return V0_8_2_0
+ case 2:
+ return V0_9_0_0
+ default:
+ return minVersion
+ }
+}
+
+func (r *OffsetCommitRequest) AddBlock(topic string, partitionID int32, offset int64, timestamp int64, metadata string) {
+ if r.blocks == nil {
+ r.blocks = make(map[string]map[int32]*offsetCommitRequestBlock)
+ }
+
+ if r.blocks[topic] == nil {
+ r.blocks[topic] = make(map[int32]*offsetCommitRequestBlock)
+ }
+
+ r.blocks[topic][partitionID] = &offsetCommitRequestBlock{offset, timestamp, metadata}
+}
diff --git a/vendor/github.com/Shopify/sarama/offset_commit_response.go b/vendor/github.com/Shopify/sarama/offset_commit_response.go
new file mode 100644
index 0000000..7f277e7
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/offset_commit_response.go
@@ -0,0 +1,85 @@
+package sarama
+
+type OffsetCommitResponse struct {
+ Errors map[string]map[int32]KError
+}
+
+func (r *OffsetCommitResponse) AddError(topic string, partition int32, kerror KError) {
+ if r.Errors == nil {
+ r.Errors = make(map[string]map[int32]KError)
+ }
+ partitions := r.Errors[topic]
+ if partitions == nil {
+ partitions = make(map[int32]KError)
+ r.Errors[topic] = partitions
+ }
+ partitions[partition] = kerror
+}
+
+func (r *OffsetCommitResponse) encode(pe packetEncoder) error {
+ if err := pe.putArrayLength(len(r.Errors)); err != nil {
+ return err
+ }
+ for topic, partitions := range r.Errors {
+ if err := pe.putString(topic); err != nil {
+ return err
+ }
+ if err := pe.putArrayLength(len(partitions)); err != nil {
+ return err
+ }
+ for partition, kerror := range partitions {
+ pe.putInt32(partition)
+ pe.putInt16(int16(kerror))
+ }
+ }
+ return nil
+}
+
+func (r *OffsetCommitResponse) decode(pd packetDecoder, version int16) (err error) {
+ numTopics, err := pd.getArrayLength()
+ if err != nil || numTopics == 0 {
+ return err
+ }
+
+ r.Errors = make(map[string]map[int32]KError, numTopics)
+ for i := 0; i < numTopics; i++ {
+ name, err := pd.getString()
+ if err != nil {
+ return err
+ }
+
+ numErrors, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ r.Errors[name] = make(map[int32]KError, numErrors)
+
+ for j := 0; j < numErrors; j++ {
+ id, err := pd.getInt32()
+ if err != nil {
+ return err
+ }
+
+ tmp, err := pd.getInt16()
+ if err != nil {
+ return err
+ }
+ r.Errors[name][id] = KError(tmp)
+ }
+ }
+
+ return nil
+}
+
+func (r *OffsetCommitResponse) key() int16 {
+ return 8
+}
+
+func (r *OffsetCommitResponse) version() int16 {
+ return 0
+}
+
+func (r *OffsetCommitResponse) requiredVersion() KafkaVersion {
+ return minVersion
+}
diff --git a/vendor/github.com/Shopify/sarama/offset_fetch_request.go b/vendor/github.com/Shopify/sarama/offset_fetch_request.go
new file mode 100644
index 0000000..b19fe79
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/offset_fetch_request.go
@@ -0,0 +1,81 @@
+package sarama
+
+type OffsetFetchRequest struct {
+ ConsumerGroup string
+ Version int16
+ partitions map[string][]int32
+}
+
+func (r *OffsetFetchRequest) encode(pe packetEncoder) (err error) {
+ if r.Version < 0 || r.Version > 1 {
+ return PacketEncodingError{"invalid or unsupported OffsetFetchRequest version field"}
+ }
+
+ if err = pe.putString(r.ConsumerGroup); err != nil {
+ return err
+ }
+ if err = pe.putArrayLength(len(r.partitions)); err != nil {
+ return err
+ }
+ for topic, partitions := range r.partitions {
+ if err = pe.putString(topic); err != nil {
+ return err
+ }
+ if err = pe.putInt32Array(partitions); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (r *OffsetFetchRequest) decode(pd packetDecoder, version int16) (err error) {
+ r.Version = version
+ if r.ConsumerGroup, err = pd.getString(); err != nil {
+ return err
+ }
+ partitionCount, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+ if partitionCount == 0 {
+ return nil
+ }
+ r.partitions = make(map[string][]int32)
+ for i := 0; i < partitionCount; i++ {
+ topic, err := pd.getString()
+ if err != nil {
+ return err
+ }
+ partitions, err := pd.getInt32Array()
+ if err != nil {
+ return err
+ }
+ r.partitions[topic] = partitions
+ }
+ return nil
+}
+
+func (r *OffsetFetchRequest) key() int16 {
+ return 9
+}
+
+func (r *OffsetFetchRequest) version() int16 {
+ return r.Version
+}
+
+func (r *OffsetFetchRequest) requiredVersion() KafkaVersion {
+ switch r.Version {
+ case 1:
+ return V0_8_2_0
+ default:
+ return minVersion
+ }
+}
+
+func (r *OffsetFetchRequest) AddPartition(topic string, partitionID int32) {
+ if r.partitions == nil {
+ r.partitions = make(map[string][]int32)
+ }
+
+ r.partitions[topic] = append(r.partitions[topic], partitionID)
+}
diff --git a/vendor/github.com/Shopify/sarama/offset_fetch_response.go b/vendor/github.com/Shopify/sarama/offset_fetch_response.go
new file mode 100644
index 0000000..323220e
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/offset_fetch_response.go
@@ -0,0 +1,143 @@
+package sarama
+
+type OffsetFetchResponseBlock struct {
+ Offset int64
+ Metadata string
+ Err KError
+}
+
+func (b *OffsetFetchResponseBlock) decode(pd packetDecoder) (err error) {
+ b.Offset, err = pd.getInt64()
+ if err != nil {
+ return err
+ }
+
+ b.Metadata, err = pd.getString()
+ if err != nil {
+ return err
+ }
+
+ tmp, err := pd.getInt16()
+ if err != nil {
+ return err
+ }
+ b.Err = KError(tmp)
+
+ return nil
+}
+
+func (b *OffsetFetchResponseBlock) encode(pe packetEncoder) (err error) {
+ pe.putInt64(b.Offset)
+
+ err = pe.putString(b.Metadata)
+ if err != nil {
+ return err
+ }
+
+ pe.putInt16(int16(b.Err))
+
+ return nil
+}
+
+type OffsetFetchResponse struct {
+ Blocks map[string]map[int32]*OffsetFetchResponseBlock
+}
+
+func (r *OffsetFetchResponse) encode(pe packetEncoder) error {
+ if err := pe.putArrayLength(len(r.Blocks)); err != nil {
+ return err
+ }
+ for topic, partitions := range r.Blocks {
+ if err := pe.putString(topic); err != nil {
+ return err
+ }
+ if err := pe.putArrayLength(len(partitions)); err != nil {
+ return err
+ }
+ for partition, block := range partitions {
+ pe.putInt32(partition)
+ if err := block.encode(pe); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func (r *OffsetFetchResponse) decode(pd packetDecoder, version int16) (err error) {
+ numTopics, err := pd.getArrayLength()
+ if err != nil || numTopics == 0 {
+ return err
+ }
+
+ r.Blocks = make(map[string]map[int32]*OffsetFetchResponseBlock, numTopics)
+ for i := 0; i < numTopics; i++ {
+ name, err := pd.getString()
+ if err != nil {
+ return err
+ }
+
+ numBlocks, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ if numBlocks == 0 {
+ r.Blocks[name] = nil
+ continue
+ }
+ r.Blocks[name] = make(map[int32]*OffsetFetchResponseBlock, numBlocks)
+
+ for j := 0; j < numBlocks; j++ {
+ id, err := pd.getInt32()
+ if err != nil {
+ return err
+ }
+
+ block := new(OffsetFetchResponseBlock)
+ err = block.decode(pd)
+ if err != nil {
+ return err
+ }
+ r.Blocks[name][id] = block
+ }
+ }
+
+ return nil
+}
+
+func (r *OffsetFetchResponse) key() int16 {
+ return 9
+}
+
+func (r *OffsetFetchResponse) version() int16 {
+ return 0
+}
+
+func (r *OffsetFetchResponse) requiredVersion() KafkaVersion {
+ return minVersion
+}
+
+func (r *OffsetFetchResponse) GetBlock(topic string, partition int32) *OffsetFetchResponseBlock {
+ if r.Blocks == nil {
+ return nil
+ }
+
+ if r.Blocks[topic] == nil {
+ return nil
+ }
+
+ return r.Blocks[topic][partition]
+}
+
+func (r *OffsetFetchResponse) AddBlock(topic string, partition int32, block *OffsetFetchResponseBlock) {
+ if r.Blocks == nil {
+ r.Blocks = make(map[string]map[int32]*OffsetFetchResponseBlock)
+ }
+ partitions := r.Blocks[topic]
+ if partitions == nil {
+ partitions = make(map[int32]*OffsetFetchResponseBlock)
+ r.Blocks[topic] = partitions
+ }
+ partitions[partition] = block
+}
diff --git a/vendor/github.com/Shopify/sarama/offset_manager.go b/vendor/github.com/Shopify/sarama/offset_manager.go
new file mode 100644
index 0000000..6c01f95
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/offset_manager.go
@@ -0,0 +1,560 @@
+package sarama
+
+import (
+ "sync"
+ "time"
+)
+
+// Offset Manager
+
+// OffsetManager uses Kafka to store and fetch consumed partition offsets.
+type OffsetManager interface {
+ // ManagePartition creates a PartitionOffsetManager on the given topic/partition.
+ // It will return an error if this OffsetManager is already managing the given
+ // topic/partition.
+ ManagePartition(topic string, partition int32) (PartitionOffsetManager, error)
+
+ // Close stops the OffsetManager from managing offsets. It is required to call
+ // this function before an OffsetManager object passes out of scope, as it
+ // will otherwise leak memory. You must call this after all the
+ // PartitionOffsetManagers are closed.
+ Close() error
+}
+
+type offsetManager struct {
+ client Client
+ conf *Config
+ group string
+
+ lock sync.Mutex
+ poms map[string]map[int32]*partitionOffsetManager
+ boms map[*Broker]*brokerOffsetManager
+}
+
+// NewOffsetManagerFromClient creates a new OffsetManager from the given client.
+// It is still necessary to call Close() on the underlying client when finished with the partition manager.
+func NewOffsetManagerFromClient(group string, client Client) (OffsetManager, error) {
+ // Check that we are not dealing with a closed Client before processing any other arguments
+ if client.Closed() {
+ return nil, ErrClosedClient
+ }
+
+ om := &offsetManager{
+ client: client,
+ conf: client.Config(),
+ group: group,
+ poms: make(map[string]map[int32]*partitionOffsetManager),
+ boms: make(map[*Broker]*brokerOffsetManager),
+ }
+
+ return om, nil
+}
+
+func (om *offsetManager) ManagePartition(topic string, partition int32) (PartitionOffsetManager, error) {
+ pom, err := om.newPartitionOffsetManager(topic, partition)
+ if err != nil {
+ return nil, err
+ }
+
+ om.lock.Lock()
+ defer om.lock.Unlock()
+
+ topicManagers := om.poms[topic]
+ if topicManagers == nil {
+ topicManagers = make(map[int32]*partitionOffsetManager)
+ om.poms[topic] = topicManagers
+ }
+
+ if topicManagers[partition] != nil {
+ return nil, ConfigurationError("That topic/partition is already being managed")
+ }
+
+ topicManagers[partition] = pom
+ return pom, nil
+}
+
+func (om *offsetManager) Close() error {
+ return nil
+}
+
+func (om *offsetManager) refBrokerOffsetManager(broker *Broker) *brokerOffsetManager {
+ om.lock.Lock()
+ defer om.lock.Unlock()
+
+ bom := om.boms[broker]
+ if bom == nil {
+ bom = om.newBrokerOffsetManager(broker)
+ om.boms[broker] = bom
+ }
+
+ bom.refs++
+
+ return bom
+}
+
+func (om *offsetManager) unrefBrokerOffsetManager(bom *brokerOffsetManager) {
+ om.lock.Lock()
+ defer om.lock.Unlock()
+
+ bom.refs--
+
+ if bom.refs == 0 {
+ close(bom.updateSubscriptions)
+ if om.boms[bom.broker] == bom {
+ delete(om.boms, bom.broker)
+ }
+ }
+}
+
+func (om *offsetManager) abandonBroker(bom *brokerOffsetManager) {
+ om.lock.Lock()
+ defer om.lock.Unlock()
+
+ delete(om.boms, bom.broker)
+}
+
+func (om *offsetManager) abandonPartitionOffsetManager(pom *partitionOffsetManager) {
+ om.lock.Lock()
+ defer om.lock.Unlock()
+
+ delete(om.poms[pom.topic], pom.partition)
+ if len(om.poms[pom.topic]) == 0 {
+ delete(om.poms, pom.topic)
+ }
+}
+
+// Partition Offset Manager
+
+// PartitionOffsetManager uses Kafka to store and fetch consumed partition offsets. You MUST call Close()
+// on a partition offset manager to avoid leaks, it will not be garbage-collected automatically when it passes
+// out of scope.
+type PartitionOffsetManager interface {
+ // NextOffset returns the next offset that should be consumed for the managed
+ // partition, accompanied by metadata which can be used to reconstruct the state
+ // of the partition consumer when it resumes. NextOffset() will return
+ // `config.Consumer.Offsets.Initial` and an empty metadata string if no offset
+ // was committed for this partition yet.
+ NextOffset() (int64, string)
+
+ // MarkOffset marks the provided offset, alongside a metadata string
+ // that represents the state of the partition consumer at that point in time. The
+ // metadata string can be used by another consumer to restore that state, so it
+ // can resume consumption.
+ //
+ // To follow upstream conventions, you are expected to mark the offset of the
+ // next message to read, not the last message read. Thus, when calling `MarkOffset`
+ // you should typically add one to the offset of the last consumed message.
+ //
+ // Note: calling MarkOffset does not necessarily commit the offset to the backend
+ // store immediately for efficiency reasons, and it may never be committed if
+ // your application crashes. This means that you may end up processing the same
+ // message twice, and your processing should ideally be idempotent.
+ MarkOffset(offset int64, metadata string)
+
+ // ResetOffset resets to the provided offset, alongside a metadata string that
+ // represents the state of the partition consumer at that point in time. Reset
+ // acts as a counterpart to MarkOffset, the difference being that it allows to
+ // reset an offset to an earlier or smaller value, where MarkOffset only
+ // allows incrementing the offset. cf MarkOffset for more details.
+ ResetOffset(offset int64, metadata string)
+
+ // Errors returns a read channel of errors that occur during offset management, if
+ // enabled. By default, errors are logged and not returned over this channel. If
+ // you want to implement any custom error handling, set your config's
+ // Consumer.Return.Errors setting to true, and read from this channel.
+ Errors() <-chan *ConsumerError
+
+ // AsyncClose initiates a shutdown of the PartitionOffsetManager. This method will
+ // return immediately, after which you should wait until the 'errors' channel has
+ // been drained and closed. It is required to call this function, or Close before
+ // a consumer object passes out of scope, as it will otherwise leak memory. You
+ // must call this before calling Close on the underlying client.
+ AsyncClose()
+
+ // Close stops the PartitionOffsetManager from managing offsets. It is required to
+ // call this function (or AsyncClose) before a PartitionOffsetManager object
+ // passes out of scope, as it will otherwise leak memory. You must call this
+ // before calling Close on the underlying client.
+ Close() error
+}
+
+type partitionOffsetManager struct {
+ parent *offsetManager
+ topic string
+ partition int32
+
+ lock sync.Mutex
+ offset int64
+ metadata string
+ dirty bool
+ clean sync.Cond
+ broker *brokerOffsetManager
+
+ errors chan *ConsumerError
+ rebalance chan none
+ dying chan none
+}
+
+func (om *offsetManager) newPartitionOffsetManager(topic string, partition int32) (*partitionOffsetManager, error) {
+ pom := &partitionOffsetManager{
+ parent: om,
+ topic: topic,
+ partition: partition,
+ errors: make(chan *ConsumerError, om.conf.ChannelBufferSize),
+ rebalance: make(chan none, 1),
+ dying: make(chan none),
+ }
+ pom.clean.L = &pom.lock
+
+ if err := pom.selectBroker(); err != nil {
+ return nil, err
+ }
+
+ if err := pom.fetchInitialOffset(om.conf.Metadata.Retry.Max); err != nil {
+ return nil, err
+ }
+
+ pom.broker.updateSubscriptions <- pom
+
+ go withRecover(pom.mainLoop)
+
+ return pom, nil
+}
+
+func (pom *partitionOffsetManager) mainLoop() {
+ for {
+ select {
+ case <-pom.rebalance:
+ if err := pom.selectBroker(); err != nil {
+ pom.handleError(err)
+ pom.rebalance <- none{}
+ } else {
+ pom.broker.updateSubscriptions <- pom
+ }
+ case <-pom.dying:
+ if pom.broker != nil {
+ select {
+ case <-pom.rebalance:
+ case pom.broker.updateSubscriptions <- pom:
+ }
+ pom.parent.unrefBrokerOffsetManager(pom.broker)
+ }
+ pom.parent.abandonPartitionOffsetManager(pom)
+ close(pom.errors)
+ return
+ }
+ }
+}
+
+func (pom *partitionOffsetManager) selectBroker() error {
+ if pom.broker != nil {
+ pom.parent.unrefBrokerOffsetManager(pom.broker)
+ pom.broker = nil
+ }
+
+ var broker *Broker
+ var err error
+
+ if err = pom.parent.client.RefreshCoordinator(pom.parent.group); err != nil {
+ return err
+ }
+
+ if broker, err = pom.parent.client.Coordinator(pom.parent.group); err != nil {
+ return err
+ }
+
+ pom.broker = pom.parent.refBrokerOffsetManager(broker)
+ return nil
+}
+
+func (pom *partitionOffsetManager) fetchInitialOffset(retries int) error {
+ request := new(OffsetFetchRequest)
+ request.Version = 1
+ request.ConsumerGroup = pom.parent.group
+ request.AddPartition(pom.topic, pom.partition)
+
+ response, err := pom.broker.broker.FetchOffset(request)
+ if err != nil {
+ return err
+ }
+
+ block := response.GetBlock(pom.topic, pom.partition)
+ if block == nil {
+ return ErrIncompleteResponse
+ }
+
+ switch block.Err {
+ case ErrNoError:
+ pom.offset = block.Offset
+ pom.metadata = block.Metadata
+ return nil
+ case ErrNotCoordinatorForConsumer:
+ if retries <= 0 {
+ return block.Err
+ }
+ if err := pom.selectBroker(); err != nil {
+ return err
+ }
+ return pom.fetchInitialOffset(retries - 1)
+ case ErrOffsetsLoadInProgress:
+ if retries <= 0 {
+ return block.Err
+ }
+ time.Sleep(pom.parent.conf.Metadata.Retry.Backoff)
+ return pom.fetchInitialOffset(retries - 1)
+ default:
+ return block.Err
+ }
+}
+
+func (pom *partitionOffsetManager) handleError(err error) {
+ cErr := &ConsumerError{
+ Topic: pom.topic,
+ Partition: pom.partition,
+ Err: err,
+ }
+
+ if pom.parent.conf.Consumer.Return.Errors {
+ pom.errors <- cErr
+ } else {
+ Logger.Println(cErr)
+ }
+}
+
+func (pom *partitionOffsetManager) Errors() <-chan *ConsumerError {
+ return pom.errors
+}
+
+func (pom *partitionOffsetManager) MarkOffset(offset int64, metadata string) {
+ pom.lock.Lock()
+ defer pom.lock.Unlock()
+
+ if offset > pom.offset {
+ pom.offset = offset
+ pom.metadata = metadata
+ pom.dirty = true
+ }
+}
+
+func (pom *partitionOffsetManager) ResetOffset(offset int64, metadata string) {
+ pom.lock.Lock()
+ defer pom.lock.Unlock()
+
+ if offset <= pom.offset {
+ pom.offset = offset
+ pom.metadata = metadata
+ pom.dirty = true
+ }
+}
+
+func (pom *partitionOffsetManager) updateCommitted(offset int64, metadata string) {
+ pom.lock.Lock()
+ defer pom.lock.Unlock()
+
+ if pom.offset == offset && pom.metadata == metadata {
+ pom.dirty = false
+ pom.clean.Signal()
+ }
+}
+
+func (pom *partitionOffsetManager) NextOffset() (int64, string) {
+ pom.lock.Lock()
+ defer pom.lock.Unlock()
+
+ if pom.offset >= 0 {
+ return pom.offset, pom.metadata
+ }
+
+ return pom.parent.conf.Consumer.Offsets.Initial, ""
+}
+
+func (pom *partitionOffsetManager) AsyncClose() {
+ go func() {
+ pom.lock.Lock()
+ defer pom.lock.Unlock()
+
+ for pom.dirty {
+ pom.clean.Wait()
+ }
+
+ close(pom.dying)
+ }()
+}
+
+func (pom *partitionOffsetManager) Close() error {
+ pom.AsyncClose()
+
+ var errors ConsumerErrors
+ for err := range pom.errors {
+ errors = append(errors, err)
+ }
+
+ if len(errors) > 0 {
+ return errors
+ }
+ return nil
+}
+
+// Broker Offset Manager
+
+type brokerOffsetManager struct {
+ parent *offsetManager
+ broker *Broker
+ timer *time.Ticker
+ updateSubscriptions chan *partitionOffsetManager
+ subscriptions map[*partitionOffsetManager]none
+ refs int
+}
+
+func (om *offsetManager) newBrokerOffsetManager(broker *Broker) *brokerOffsetManager {
+ bom := &brokerOffsetManager{
+ parent: om,
+ broker: broker,
+ timer: time.NewTicker(om.conf.Consumer.Offsets.CommitInterval),
+ updateSubscriptions: make(chan *partitionOffsetManager),
+ subscriptions: make(map[*partitionOffsetManager]none),
+ }
+
+ go withRecover(bom.mainLoop)
+
+ return bom
+}
+
+func (bom *brokerOffsetManager) mainLoop() {
+ for {
+ select {
+ case <-bom.timer.C:
+ if len(bom.subscriptions) > 0 {
+ bom.flushToBroker()
+ }
+ case s, ok := <-bom.updateSubscriptions:
+ if !ok {
+ bom.timer.Stop()
+ return
+ }
+ if _, ok := bom.subscriptions[s]; ok {
+ delete(bom.subscriptions, s)
+ } else {
+ bom.subscriptions[s] = none{}
+ }
+ }
+ }
+}
+
+func (bom *brokerOffsetManager) flushToBroker() {
+ request := bom.constructRequest()
+ if request == nil {
+ return
+ }
+
+ response, err := bom.broker.CommitOffset(request)
+
+ if err != nil {
+ bom.abort(err)
+ return
+ }
+
+ for s := range bom.subscriptions {
+ if request.blocks[s.topic] == nil || request.blocks[s.topic][s.partition] == nil {
+ continue
+ }
+
+ var err KError
+ var ok bool
+
+ if response.Errors[s.topic] == nil {
+ s.handleError(ErrIncompleteResponse)
+ delete(bom.subscriptions, s)
+ s.rebalance <- none{}
+ continue
+ }
+ if err, ok = response.Errors[s.topic][s.partition]; !ok {
+ s.handleError(ErrIncompleteResponse)
+ delete(bom.subscriptions, s)
+ s.rebalance <- none{}
+ continue
+ }
+
+ switch err {
+ case ErrNoError:
+ block := request.blocks[s.topic][s.partition]
+ s.updateCommitted(block.offset, block.metadata)
+ case ErrNotLeaderForPartition, ErrLeaderNotAvailable,
+ ErrConsumerCoordinatorNotAvailable, ErrNotCoordinatorForConsumer:
+ // not a critical error, we just need to redispatch
+ delete(bom.subscriptions, s)
+ s.rebalance <- none{}
+ case ErrOffsetMetadataTooLarge, ErrInvalidCommitOffsetSize:
+ // nothing we can do about this, just tell the user and carry on
+ s.handleError(err)
+ case ErrOffsetsLoadInProgress:
+ // nothing wrong but we didn't commit, we'll get it next time round
+ break
+ case ErrUnknownTopicOrPartition:
+ // let the user know *and* try redispatching - if topic-auto-create is
+ // enabled, redispatching should trigger a metadata request and create the
+ // topic; if not then re-dispatching won't help, but we've let the user
+ // know and it shouldn't hurt either (see https://github.com/Shopify/sarama/issues/706)
+ fallthrough
+ default:
+ // dunno, tell the user and try redispatching
+ s.handleError(err)
+ delete(bom.subscriptions, s)
+ s.rebalance <- none{}
+ }
+ }
+}
+
+func (bom *brokerOffsetManager) constructRequest() *OffsetCommitRequest {
+ var r *OffsetCommitRequest
+ var perPartitionTimestamp int64
+ if bom.parent.conf.Consumer.Offsets.Retention == 0 {
+ perPartitionTimestamp = ReceiveTime
+ r = &OffsetCommitRequest{
+ Version: 1,
+ ConsumerGroup: bom.parent.group,
+ ConsumerGroupGeneration: GroupGenerationUndefined,
+ }
+ } else {
+ r = &OffsetCommitRequest{
+ Version: 2,
+ RetentionTime: int64(bom.parent.conf.Consumer.Offsets.Retention / time.Millisecond),
+ ConsumerGroup: bom.parent.group,
+ ConsumerGroupGeneration: GroupGenerationUndefined,
+ }
+
+ }
+
+ for s := range bom.subscriptions {
+ s.lock.Lock()
+ if s.dirty {
+ r.AddBlock(s.topic, s.partition, s.offset, perPartitionTimestamp, s.metadata)
+ }
+ s.lock.Unlock()
+ }
+
+ if len(r.blocks) > 0 {
+ return r
+ }
+
+ return nil
+}
+
+func (bom *brokerOffsetManager) abort(err error) {
+ _ = bom.broker.Close() // we don't care about the error this might return, we already have one
+ bom.parent.abandonBroker(bom)
+
+ for pom := range bom.subscriptions {
+ pom.handleError(err)
+ pom.rebalance <- none{}
+ }
+
+ for s := range bom.updateSubscriptions {
+ if _, ok := bom.subscriptions[s]; !ok {
+ s.handleError(err)
+ s.rebalance <- none{}
+ }
+ }
+
+ bom.subscriptions = make(map[*partitionOffsetManager]none)
+}
diff --git a/vendor/github.com/Shopify/sarama/offset_request.go b/vendor/github.com/Shopify/sarama/offset_request.go
new file mode 100644
index 0000000..6c26960
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/offset_request.go
@@ -0,0 +1,132 @@
+package sarama
+
+type offsetRequestBlock struct {
+ time int64
+ maxOffsets int32 // Only used in version 0
+}
+
+func (b *offsetRequestBlock) encode(pe packetEncoder, version int16) error {
+ pe.putInt64(int64(b.time))
+ if version == 0 {
+ pe.putInt32(b.maxOffsets)
+ }
+
+ return nil
+}
+
+func (b *offsetRequestBlock) decode(pd packetDecoder, version int16) (err error) {
+ if b.time, err = pd.getInt64(); err != nil {
+ return err
+ }
+ if version == 0 {
+ if b.maxOffsets, err = pd.getInt32(); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+type OffsetRequest struct {
+ Version int16
+ blocks map[string]map[int32]*offsetRequestBlock
+}
+
+func (r *OffsetRequest) encode(pe packetEncoder) error {
+ pe.putInt32(-1) // replica ID is always -1 for clients
+ err := pe.putArrayLength(len(r.blocks))
+ if err != nil {
+ return err
+ }
+ for topic, partitions := range r.blocks {
+ err = pe.putString(topic)
+ if err != nil {
+ return err
+ }
+ err = pe.putArrayLength(len(partitions))
+ if err != nil {
+ return err
+ }
+ for partition, block := range partitions {
+ pe.putInt32(partition)
+ if err = block.encode(pe, r.Version); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func (r *OffsetRequest) decode(pd packetDecoder, version int16) error {
+ r.Version = version
+
+ // Ignore replica ID
+ if _, err := pd.getInt32(); err != nil {
+ return err
+ }
+ blockCount, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+ if blockCount == 0 {
+ return nil
+ }
+ r.blocks = make(map[string]map[int32]*offsetRequestBlock)
+ for i := 0; i < blockCount; i++ {
+ topic, err := pd.getString()
+ if err != nil {
+ return err
+ }
+ partitionCount, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+ r.blocks[topic] = make(map[int32]*offsetRequestBlock)
+ for j := 0; j < partitionCount; j++ {
+ partition, err := pd.getInt32()
+ if err != nil {
+ return err
+ }
+ block := &offsetRequestBlock{}
+ if err := block.decode(pd, version); err != nil {
+ return err
+ }
+ r.blocks[topic][partition] = block
+ }
+ }
+ return nil
+}
+
+func (r *OffsetRequest) key() int16 {
+ return 2
+}
+
+func (r *OffsetRequest) version() int16 {
+ return r.Version
+}
+
+func (r *OffsetRequest) requiredVersion() KafkaVersion {
+ switch r.Version {
+ case 1:
+ return V0_10_1_0
+ default:
+ return minVersion
+ }
+}
+
+func (r *OffsetRequest) AddBlock(topic string, partitionID int32, time int64, maxOffsets int32) {
+ if r.blocks == nil {
+ r.blocks = make(map[string]map[int32]*offsetRequestBlock)
+ }
+
+ if r.blocks[topic] == nil {
+ r.blocks[topic] = make(map[int32]*offsetRequestBlock)
+ }
+
+ tmp := new(offsetRequestBlock)
+ tmp.time = time
+ if r.Version == 0 {
+ tmp.maxOffsets = maxOffsets
+ }
+
+ r.blocks[topic][partitionID] = tmp
+}
diff --git a/vendor/github.com/Shopify/sarama/offset_response.go b/vendor/github.com/Shopify/sarama/offset_response.go
new file mode 100644
index 0000000..9a9cfe9
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/offset_response.go
@@ -0,0 +1,174 @@
+package sarama
+
+type OffsetResponseBlock struct {
+ Err KError
+ Offsets []int64 // Version 0
+ Offset int64 // Version 1
+ Timestamp int64 // Version 1
+}
+
+func (b *OffsetResponseBlock) decode(pd packetDecoder, version int16) (err error) {
+ tmp, err := pd.getInt16()
+ if err != nil {
+ return err
+ }
+ b.Err = KError(tmp)
+
+ if version == 0 {
+ b.Offsets, err = pd.getInt64Array()
+
+ return err
+ }
+
+ b.Timestamp, err = pd.getInt64()
+ if err != nil {
+ return err
+ }
+
+ b.Offset, err = pd.getInt64()
+ if err != nil {
+ return err
+ }
+
+ // For backwards compatibility put the offset in the offsets array too
+ b.Offsets = []int64{b.Offset}
+
+ return nil
+}
+
+func (b *OffsetResponseBlock) encode(pe packetEncoder, version int16) (err error) {
+ pe.putInt16(int16(b.Err))
+
+ if version == 0 {
+ return pe.putInt64Array(b.Offsets)
+ }
+
+ pe.putInt64(b.Timestamp)
+ pe.putInt64(b.Offset)
+
+ return nil
+}
+
+type OffsetResponse struct {
+ Version int16
+ Blocks map[string]map[int32]*OffsetResponseBlock
+}
+
+func (r *OffsetResponse) decode(pd packetDecoder, version int16) (err error) {
+ numTopics, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ r.Blocks = make(map[string]map[int32]*OffsetResponseBlock, numTopics)
+ for i := 0; i < numTopics; i++ {
+ name, err := pd.getString()
+ if err != nil {
+ return err
+ }
+
+ numBlocks, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ r.Blocks[name] = make(map[int32]*OffsetResponseBlock, numBlocks)
+
+ for j := 0; j < numBlocks; j++ {
+ id, err := pd.getInt32()
+ if err != nil {
+ return err
+ }
+
+ block := new(OffsetResponseBlock)
+ err = block.decode(pd, version)
+ if err != nil {
+ return err
+ }
+ r.Blocks[name][id] = block
+ }
+ }
+
+ return nil
+}
+
+func (r *OffsetResponse) GetBlock(topic string, partition int32) *OffsetResponseBlock {
+ if r.Blocks == nil {
+ return nil
+ }
+
+ if r.Blocks[topic] == nil {
+ return nil
+ }
+
+ return r.Blocks[topic][partition]
+}
+
+/*
+// [0 0 0 1 ntopics
+0 8 109 121 95 116 111 112 105 99 topic
+0 0 0 1 npartitions
+0 0 0 0 id
+0 0
+
+0 0 0 1 0 0 0 0
+0 1 1 1 0 0 0 1
+0 8 109 121 95 116 111 112
+105 99 0 0 0 1 0 0
+0 0 0 0 0 0 0 1
+0 0 0 0 0 1 1 1]
+
+*/
+func (r *OffsetResponse) encode(pe packetEncoder) (err error) {
+ if err = pe.putArrayLength(len(r.Blocks)); err != nil {
+ return err
+ }
+
+ for topic, partitions := range r.Blocks {
+ if err = pe.putString(topic); err != nil {
+ return err
+ }
+ if err = pe.putArrayLength(len(partitions)); err != nil {
+ return err
+ }
+ for partition, block := range partitions {
+ pe.putInt32(partition)
+ if err = block.encode(pe, r.version()); err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+
+func (r *OffsetResponse) key() int16 {
+ return 2
+}
+
+func (r *OffsetResponse) version() int16 {
+ return r.Version
+}
+
+func (r *OffsetResponse) requiredVersion() KafkaVersion {
+ switch r.Version {
+ case 1:
+ return V0_10_1_0
+ default:
+ return minVersion
+ }
+}
+
+// testing API
+
+func (r *OffsetResponse) AddTopicPartition(topic string, partition int32, offset int64) {
+ if r.Blocks == nil {
+ r.Blocks = make(map[string]map[int32]*OffsetResponseBlock)
+ }
+ byTopic, ok := r.Blocks[topic]
+ if !ok {
+ byTopic = make(map[int32]*OffsetResponseBlock)
+ r.Blocks[topic] = byTopic
+ }
+ byTopic[partition] = &OffsetResponseBlock{Offsets: []int64{offset}, Offset: offset}
+}
diff --git a/vendor/github.com/Shopify/sarama/packet_decoder.go b/vendor/github.com/Shopify/sarama/packet_decoder.go
new file mode 100644
index 0000000..f5b8b10
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/packet_decoder.go
@@ -0,0 +1,59 @@
+package sarama
+
+// PacketDecoder is the interface providing helpers for reading with Kafka's encoding rules.
+// Types implementing Decoder only need to worry about calling methods like GetString,
+// not about how a string is represented in Kafka.
+type packetDecoder interface {
+ // Primitives
+ getInt8() (int8, error)
+ getInt16() (int16, error)
+ getInt32() (int32, error)
+ getInt64() (int64, error)
+ getVarint() (int64, error)
+ getArrayLength() (int, error)
+
+ // Collections
+ getBytes() ([]byte, error)
+ getVarintBytes() ([]byte, error)
+ getRawBytes(length int) ([]byte, error)
+ getString() (string, error)
+ getNullableString() (*string, error)
+ getInt32Array() ([]int32, error)
+ getInt64Array() ([]int64, error)
+ getStringArray() ([]string, error)
+
+ // Subsets
+ remaining() int
+ getSubset(length int) (packetDecoder, error)
+ peek(offset, length int) (packetDecoder, error) // similar to getSubset, but it doesn't advance the offset
+
+ // Stacks, see PushDecoder
+ push(in pushDecoder) error
+ pop() error
+}
+
+// PushDecoder is the interface for decoding fields like CRCs and lengths where the validity
+// of the field depends on what is after it in the packet. Start them with PacketDecoder.Push() where
+// the actual value is located in the packet, then PacketDecoder.Pop() them when all the bytes they
+// depend upon have been decoded.
+type pushDecoder interface {
+ // Saves the offset into the input buffer as the location to actually read the calculated value when able.
+ saveOffset(in int)
+
+ // Returns the length of data to reserve for the input of this encoder (eg 4 bytes for a CRC32).
+ reserveLength() int
+
+ // Indicates that all required data is now available to calculate and check the field.
+ // SaveOffset is guaranteed to have been called first. The implementation should read ReserveLength() bytes
+ // of data from the saved offset, and verify it based on the data between the saved offset and curOffset.
+ check(curOffset int, buf []byte) error
+}
+
+// dynamicPushDecoder extends the interface of pushDecoder for uses cases where the length of the
+// fields itself is unknown until its value was decoded (for instance varint encoded length
+// fields).
+// During push, dynamicPushDecoder.decode() method will be called instead of reserveLength()
+type dynamicPushDecoder interface {
+ pushDecoder
+ decoder
+}
diff --git a/vendor/github.com/Shopify/sarama/packet_encoder.go b/vendor/github.com/Shopify/sarama/packet_encoder.go
new file mode 100644
index 0000000..aecd2b8
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/packet_encoder.go
@@ -0,0 +1,64 @@
+package sarama
+
+import "github.com/rcrowley/go-metrics"
+
+// PacketEncoder is the interface providing helpers for writing with Kafka's encoding rules.
+// Types implementing Encoder only need to worry about calling methods like PutString,
+// not about how a string is represented in Kafka.
+type packetEncoder interface {
+ // Primitives
+ putInt8(in int8)
+ putInt16(in int16)
+ putInt32(in int32)
+ putInt64(in int64)
+ putVarint(in int64)
+ putArrayLength(in int) error
+
+ // Collections
+ putBytes(in []byte) error
+ putVarintBytes(in []byte) error
+ putRawBytes(in []byte) error
+ putString(in string) error
+ putNullableString(in *string) error
+ putStringArray(in []string) error
+ putInt32Array(in []int32) error
+ putInt64Array(in []int64) error
+
+ // Provide the current offset to record the batch size metric
+ offset() int
+
+ // Stacks, see PushEncoder
+ push(in pushEncoder)
+ pop() error
+
+ // To record metrics when provided
+ metricRegistry() metrics.Registry
+}
+
+// PushEncoder is the interface for encoding fields like CRCs and lengths where the value
+// of the field depends on what is encoded after it in the packet. Start them with PacketEncoder.Push() where
+// the actual value is located in the packet, then PacketEncoder.Pop() them when all the bytes they
+// depend upon have been written.
+type pushEncoder interface {
+ // Saves the offset into the input buffer as the location to actually write the calculated value when able.
+ saveOffset(in int)
+
+ // Returns the length of data to reserve for the output of this encoder (eg 4 bytes for a CRC32).
+ reserveLength() int
+
+ // Indicates that all required data is now available to calculate and write the field.
+ // SaveOffset is guaranteed to have been called first. The implementation should write ReserveLength() bytes
+ // of data to the saved offset, based on the data between the saved offset and curOffset.
+ run(curOffset int, buf []byte) error
+}
+
+// dynamicPushEncoder extends the interface of pushEncoder for uses cases where the length of the
+// fields itself is unknown until its value was computed (for instance varint encoded length
+// fields).
+type dynamicPushEncoder interface {
+ pushEncoder
+
+ // Called during pop() to adjust the length of the field.
+ // It should return the difference in bytes between the last computed length and current length.
+ adjustLength(currOffset int) int
+}
diff --git a/vendor/github.com/Shopify/sarama/partitioner.go b/vendor/github.com/Shopify/sarama/partitioner.go
new file mode 100644
index 0000000..9729327
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/partitioner.go
@@ -0,0 +1,135 @@
+package sarama
+
+import (
+ "hash"
+ "hash/fnv"
+ "math/rand"
+ "time"
+)
+
+// Partitioner is anything that, given a Kafka message and a number of partitions indexed [0...numPartitions-1],
+// decides to which partition to send the message. RandomPartitioner, RoundRobinPartitioner and HashPartitioner are provided
+// as simple default implementations.
+type Partitioner interface {
+ // Partition takes a message and partition count and chooses a partition
+ Partition(message *ProducerMessage, numPartitions int32) (int32, error)
+
+ // RequiresConsistency indicates to the user of the partitioner whether the
+ // mapping of key->partition is consistent or not. Specifically, if a
+ // partitioner requires consistency then it must be allowed to choose from all
+ // partitions (even ones known to be unavailable), and its choice must be
+ // respected by the caller. The obvious example is the HashPartitioner.
+ RequiresConsistency() bool
+}
+
+// PartitionerConstructor is the type for a function capable of constructing new Partitioners.
+type PartitionerConstructor func(topic string) Partitioner
+
+type manualPartitioner struct{}
+
+// NewManualPartitioner returns a Partitioner which uses the partition manually set in the provided
+// ProducerMessage's Partition field as the partition to produce to.
+func NewManualPartitioner(topic string) Partitioner {
+ return new(manualPartitioner)
+}
+
+func (p *manualPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) {
+ return message.Partition, nil
+}
+
+func (p *manualPartitioner) RequiresConsistency() bool {
+ return true
+}
+
+type randomPartitioner struct {
+ generator *rand.Rand
+}
+
+// NewRandomPartitioner returns a Partitioner which chooses a random partition each time.
+func NewRandomPartitioner(topic string) Partitioner {
+ p := new(randomPartitioner)
+ p.generator = rand.New(rand.NewSource(time.Now().UTC().UnixNano()))
+ return p
+}
+
+func (p *randomPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) {
+ return int32(p.generator.Intn(int(numPartitions))), nil
+}
+
+func (p *randomPartitioner) RequiresConsistency() bool {
+ return false
+}
+
+type roundRobinPartitioner struct {
+ partition int32
+}
+
+// NewRoundRobinPartitioner returns a Partitioner which walks through the available partitions one at a time.
+func NewRoundRobinPartitioner(topic string) Partitioner {
+ return &roundRobinPartitioner{}
+}
+
+func (p *roundRobinPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) {
+ if p.partition >= numPartitions {
+ p.partition = 0
+ }
+ ret := p.partition
+ p.partition++
+ return ret, nil
+}
+
+func (p *roundRobinPartitioner) RequiresConsistency() bool {
+ return false
+}
+
+type hashPartitioner struct {
+ random Partitioner
+ hasher hash.Hash32
+}
+
+// NewCustomHashPartitioner is a wrapper around NewHashPartitioner, allowing the use of custom hasher.
+// The argument is a function providing the instance, implementing the hash.Hash32 interface. This is to ensure that
+// each partition dispatcher gets its own hasher, to avoid concurrency issues by sharing an instance.
+func NewCustomHashPartitioner(hasher func() hash.Hash32) PartitionerConstructor {
+ return func(topic string) Partitioner {
+ p := new(hashPartitioner)
+ p.random = NewRandomPartitioner(topic)
+ p.hasher = hasher()
+ return p
+ }
+}
+
+// NewHashPartitioner returns a Partitioner which behaves as follows. If the message's key is nil then a
+// random partition is chosen. Otherwise the FNV-1a hash of the encoded bytes of the message key is used,
+// modulus the number of partitions. This ensures that messages with the same key always end up on the
+// same partition.
+func NewHashPartitioner(topic string) Partitioner {
+ p := new(hashPartitioner)
+ p.random = NewRandomPartitioner(topic)
+ p.hasher = fnv.New32a()
+ return p
+}
+
+func (p *hashPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) {
+ if message.Key == nil {
+ return p.random.Partition(message, numPartitions)
+ }
+ bytes, err := message.Key.Encode()
+ if err != nil {
+ return -1, err
+ }
+ p.hasher.Reset()
+ _, err = p.hasher.Write(bytes)
+ if err != nil {
+ return -1, err
+ }
+ partition := int32(p.hasher.Sum32()) % numPartitions
+ if partition < 0 {
+ partition = -partition
+ }
+ return partition, nil
+}
+
+func (p *hashPartitioner) RequiresConsistency() bool {
+ return true
+}
diff --git a/vendor/github.com/Shopify/sarama/prep_encoder.go b/vendor/github.com/Shopify/sarama/prep_encoder.go
new file mode 100644
index 0000000..d99cd71
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/prep_encoder.go
@@ -0,0 +1,149 @@
+package sarama
+
+import (
+ "encoding/binary"
+ "fmt"
+ "math"
+
+ "github.com/rcrowley/go-metrics"
+)
+
+type prepEncoder struct {
+ stack []pushEncoder
+ length int
+}
+
+// primitives
+
+func (pe *prepEncoder) putInt8(in int8) {
+ pe.length++
+}
+
+func (pe *prepEncoder) putInt16(in int16) {
+ pe.length += 2
+}
+
+func (pe *prepEncoder) putInt32(in int32) {
+ pe.length += 4
+}
+
+func (pe *prepEncoder) putInt64(in int64) {
+ pe.length += 8
+}
+
+func (pe *prepEncoder) putVarint(in int64) {
+ var buf [binary.MaxVarintLen64]byte
+ pe.length += binary.PutVarint(buf[:], in)
+}
+
+func (pe *prepEncoder) putArrayLength(in int) error {
+ if in > math.MaxInt32 {
+ return PacketEncodingError{fmt.Sprintf("array too long (%d)", in)}
+ }
+ pe.length += 4
+ return nil
+}
+
+// arrays
+
+func (pe *prepEncoder) putBytes(in []byte) error {
+ pe.length += 4
+ if in == nil {
+ return nil
+ }
+ return pe.putRawBytes(in)
+}
+
+func (pe *prepEncoder) putVarintBytes(in []byte) error {
+ if in == nil {
+ pe.putVarint(-1)
+ return nil
+ }
+ pe.putVarint(int64(len(in)))
+ return pe.putRawBytes(in)
+}
+
+func (pe *prepEncoder) putRawBytes(in []byte) error {
+ if len(in) > math.MaxInt32 {
+ return PacketEncodingError{fmt.Sprintf("byteslice too long (%d)", len(in))}
+ }
+ pe.length += len(in)
+ return nil
+}
+
+func (pe *prepEncoder) putNullableString(in *string) error {
+ if in == nil {
+ pe.length += 2
+ return nil
+ }
+ return pe.putString(*in)
+}
+
+func (pe *prepEncoder) putString(in string) error {
+ pe.length += 2
+ if len(in) > math.MaxInt16 {
+ return PacketEncodingError{fmt.Sprintf("string too long (%d)", len(in))}
+ }
+ pe.length += len(in)
+ return nil
+}
+
+func (pe *prepEncoder) putStringArray(in []string) error {
+ err := pe.putArrayLength(len(in))
+ if err != nil {
+ return err
+ }
+
+ for _, str := range in {
+ if err := pe.putString(str); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (pe *prepEncoder) putInt32Array(in []int32) error {
+ err := pe.putArrayLength(len(in))
+ if err != nil {
+ return err
+ }
+ pe.length += 4 * len(in)
+ return nil
+}
+
+func (pe *prepEncoder) putInt64Array(in []int64) error {
+ err := pe.putArrayLength(len(in))
+ if err != nil {
+ return err
+ }
+ pe.length += 8 * len(in)
+ return nil
+}
+
+func (pe *prepEncoder) offset() int {
+ return pe.length
+}
+
+// stackable
+
+func (pe *prepEncoder) push(in pushEncoder) {
+ in.saveOffset(pe.length)
+ pe.length += in.reserveLength()
+ pe.stack = append(pe.stack, in)
+}
+
+func (pe *prepEncoder) pop() error {
+ in := pe.stack[len(pe.stack)-1]
+ pe.stack = pe.stack[:len(pe.stack)-1]
+ if dpe, ok := in.(dynamicPushEncoder); ok {
+ pe.length += dpe.adjustLength(pe.length)
+ }
+
+ return nil
+}
+
+// we do not record metrics during the prep encoder pass
+func (pe *prepEncoder) metricRegistry() metrics.Registry {
+ return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/produce_request.go b/vendor/github.com/Shopify/sarama/produce_request.go
new file mode 100644
index 0000000..0ec4d8d
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/produce_request.go
@@ -0,0 +1,252 @@
+package sarama
+
+import "github.com/rcrowley/go-metrics"
+
+// RequiredAcks is used in Produce Requests to tell the broker how many replica acknowledgements
+// it must see before responding. Any of the constants defined here are valid. On broker versions
+// prior to 0.8.2.0 any other positive int16 is also valid (the broker will wait for that many
+// acknowledgements) but in 0.8.2.0 and later this will raise an exception (it has been replaced
+// by setting the `min.isr` value in the brokers configuration).
+type RequiredAcks int16
+
+const (
+ // NoResponse doesn't send any response, the TCP ACK is all you get.
+ NoResponse RequiredAcks = 0
+ // WaitForLocal waits for only the local commit to succeed before responding.
+ WaitForLocal RequiredAcks = 1
+ // WaitForAll waits for all in-sync replicas to commit before responding.
+ // The minimum number of in-sync replicas is configured on the broker via
+ // the `min.insync.replicas` configuration key.
+ WaitForAll RequiredAcks = -1
+)
+
+type ProduceRequest struct {
+ TransactionalID *string
+ RequiredAcks RequiredAcks
+ Timeout int32
+ Version int16 // v1 requires Kafka 0.9, v2 requires Kafka 0.10, v3 requires Kafka 0.11
+ records map[string]map[int32]Records
+}
+
+func updateMsgSetMetrics(msgSet *MessageSet, compressionRatioMetric metrics.Histogram,
+ topicCompressionRatioMetric metrics.Histogram) int64 {
+ var topicRecordCount int64
+ for _, messageBlock := range msgSet.Messages {
+ // Is this a fake "message" wrapping real messages?
+ if messageBlock.Msg.Set != nil {
+ topicRecordCount += int64(len(messageBlock.Msg.Set.Messages))
+ } else {
+ // A single uncompressed message
+ topicRecordCount++
+ }
+ // Better be safe than sorry when computing the compression ratio
+ if messageBlock.Msg.compressedSize != 0 {
+ compressionRatio := float64(len(messageBlock.Msg.Value)) /
+ float64(messageBlock.Msg.compressedSize)
+ // Histogram do not support decimal values, let's multiple it by 100 for better precision
+ intCompressionRatio := int64(100 * compressionRatio)
+ compressionRatioMetric.Update(intCompressionRatio)
+ topicCompressionRatioMetric.Update(intCompressionRatio)
+ }
+ }
+ return topicRecordCount
+}
+
+func updateBatchMetrics(recordBatch *RecordBatch, compressionRatioMetric metrics.Histogram,
+ topicCompressionRatioMetric metrics.Histogram) int64 {
+ if recordBatch.compressedRecords != nil {
+ compressionRatio := int64(float64(recordBatch.recordsLen) / float64(len(recordBatch.compressedRecords)) * 100)
+ compressionRatioMetric.Update(compressionRatio)
+ topicCompressionRatioMetric.Update(compressionRatio)
+ }
+
+ return int64(len(recordBatch.Records))
+}
+
+func (r *ProduceRequest) encode(pe packetEncoder) error {
+ if r.Version >= 3 {
+ if err := pe.putNullableString(r.TransactionalID); err != nil {
+ return err
+ }
+ }
+ pe.putInt16(int16(r.RequiredAcks))
+ pe.putInt32(r.Timeout)
+ metricRegistry := pe.metricRegistry()
+ var batchSizeMetric metrics.Histogram
+ var compressionRatioMetric metrics.Histogram
+ if metricRegistry != nil {
+ batchSizeMetric = getOrRegisterHistogram("batch-size", metricRegistry)
+ compressionRatioMetric = getOrRegisterHistogram("compression-ratio", metricRegistry)
+ }
+ totalRecordCount := int64(0)
+
+ err := pe.putArrayLength(len(r.records))
+ if err != nil {
+ return err
+ }
+
+ for topic, partitions := range r.records {
+ err = pe.putString(topic)
+ if err != nil {
+ return err
+ }
+ err = pe.putArrayLength(len(partitions))
+ if err != nil {
+ return err
+ }
+ topicRecordCount := int64(0)
+ var topicCompressionRatioMetric metrics.Histogram
+ if metricRegistry != nil {
+ topicCompressionRatioMetric = getOrRegisterTopicHistogram("compression-ratio", topic, metricRegistry)
+ }
+ for id, records := range partitions {
+ startOffset := pe.offset()
+ pe.putInt32(id)
+ pe.push(&lengthField{})
+ err = records.encode(pe)
+ if err != nil {
+ return err
+ }
+ err = pe.pop()
+ if err != nil {
+ return err
+ }
+ if metricRegistry != nil {
+ if r.Version >= 3 {
+ topicRecordCount += updateBatchMetrics(records.recordBatch, compressionRatioMetric, topicCompressionRatioMetric)
+ } else {
+ topicRecordCount += updateMsgSetMetrics(records.msgSet, compressionRatioMetric, topicCompressionRatioMetric)
+ }
+ batchSize := int64(pe.offset() - startOffset)
+ batchSizeMetric.Update(batchSize)
+ getOrRegisterTopicHistogram("batch-size", topic, metricRegistry).Update(batchSize)
+ }
+ }
+ if topicRecordCount > 0 {
+ getOrRegisterTopicMeter("record-send-rate", topic, metricRegistry).Mark(topicRecordCount)
+ getOrRegisterTopicHistogram("records-per-request", topic, metricRegistry).Update(topicRecordCount)
+ totalRecordCount += topicRecordCount
+ }
+ }
+ if totalRecordCount > 0 {
+ metrics.GetOrRegisterMeter("record-send-rate", metricRegistry).Mark(totalRecordCount)
+ getOrRegisterHistogram("records-per-request", metricRegistry).Update(totalRecordCount)
+ }
+
+ return nil
+}
+
+func (r *ProduceRequest) decode(pd packetDecoder, version int16) error {
+ r.Version = version
+
+ if version >= 3 {
+ id, err := pd.getNullableString()
+ if err != nil {
+ return err
+ }
+ r.TransactionalID = id
+ }
+ requiredAcks, err := pd.getInt16()
+ if err != nil {
+ return err
+ }
+ r.RequiredAcks = RequiredAcks(requiredAcks)
+ if r.Timeout, err = pd.getInt32(); err != nil {
+ return err
+ }
+ topicCount, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+ if topicCount == 0 {
+ return nil
+ }
+
+ r.records = make(map[string]map[int32]Records)
+ for i := 0; i < topicCount; i++ {
+ topic, err := pd.getString()
+ if err != nil {
+ return err
+ }
+ partitionCount, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+ r.records[topic] = make(map[int32]Records)
+
+ for j := 0; j < partitionCount; j++ {
+ partition, err := pd.getInt32()
+ if err != nil {
+ return err
+ }
+ size, err := pd.getInt32()
+ if err != nil {
+ return err
+ }
+ recordsDecoder, err := pd.getSubset(int(size))
+ if err != nil {
+ return err
+ }
+ var records Records
+ if err := records.decode(recordsDecoder); err != nil {
+ return err
+ }
+ r.records[topic][partition] = records
+ }
+ }
+
+ return nil
+}
+
+func (r *ProduceRequest) key() int16 {
+ return 0
+}
+
+func (r *ProduceRequest) version() int16 {
+ return r.Version
+}
+
+func (r *ProduceRequest) requiredVersion() KafkaVersion {
+ switch r.Version {
+ case 1:
+ return V0_9_0_0
+ case 2:
+ return V0_10_0_0
+ case 3:
+ return V0_11_0_0
+ default:
+ return minVersion
+ }
+}
+
+func (r *ProduceRequest) ensureRecords(topic string, partition int32) {
+ if r.records == nil {
+ r.records = make(map[string]map[int32]Records)
+ }
+
+ if r.records[topic] == nil {
+ r.records[topic] = make(map[int32]Records)
+ }
+}
+
+func (r *ProduceRequest) AddMessage(topic string, partition int32, msg *Message) {
+ r.ensureRecords(topic, partition)
+ set := r.records[topic][partition].msgSet
+
+ if set == nil {
+ set = new(MessageSet)
+ r.records[topic][partition] = newLegacyRecords(set)
+ }
+
+ set.addMessage(msg)
+}
+
+func (r *ProduceRequest) AddSet(topic string, partition int32, set *MessageSet) {
+ r.ensureRecords(topic, partition)
+ r.records[topic][partition] = newLegacyRecords(set)
+}
+
+func (r *ProduceRequest) AddBatch(topic string, partition int32, batch *RecordBatch) {
+ r.ensureRecords(topic, partition)
+ r.records[topic][partition] = newDefaultRecords(batch)
+}
diff --git a/vendor/github.com/Shopify/sarama/produce_response.go b/vendor/github.com/Shopify/sarama/produce_response.go
new file mode 100644
index 0000000..043c40f
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/produce_response.go
@@ -0,0 +1,183 @@
+package sarama
+
+import (
+ "fmt"
+ "time"
+)
+
+type ProduceResponseBlock struct {
+ Err KError
+ Offset int64
+ // only provided if Version >= 2 and the broker is configured with `LogAppendTime`
+ Timestamp time.Time
+}
+
+func (b *ProduceResponseBlock) decode(pd packetDecoder, version int16) (err error) {
+ tmp, err := pd.getInt16()
+ if err != nil {
+ return err
+ }
+ b.Err = KError(tmp)
+
+ b.Offset, err = pd.getInt64()
+ if err != nil {
+ return err
+ }
+
+ if version >= 2 {
+ if millis, err := pd.getInt64(); err != nil {
+ return err
+ } else if millis != -1 {
+ b.Timestamp = time.Unix(millis/1000, (millis%1000)*int64(time.Millisecond))
+ }
+ }
+
+ return nil
+}
+
+func (b *ProduceResponseBlock) encode(pe packetEncoder, version int16) (err error) {
+ pe.putInt16(int16(b.Err))
+ pe.putInt64(b.Offset)
+
+ if version >= 2 {
+ timestamp := int64(-1)
+ if !b.Timestamp.Before(time.Unix(0, 0)) {
+ timestamp = b.Timestamp.UnixNano() / int64(time.Millisecond)
+ } else if !b.Timestamp.IsZero() {
+ return PacketEncodingError{fmt.Sprintf("invalid timestamp (%v)", b.Timestamp)}
+ }
+ pe.putInt64(timestamp)
+ }
+
+ return nil
+}
+
+type ProduceResponse struct {
+ Blocks map[string]map[int32]*ProduceResponseBlock
+ Version int16
+ ThrottleTime time.Duration // only provided if Version >= 1
+}
+
+func (r *ProduceResponse) decode(pd packetDecoder, version int16) (err error) {
+ r.Version = version
+
+ numTopics, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ r.Blocks = make(map[string]map[int32]*ProduceResponseBlock, numTopics)
+ for i := 0; i < numTopics; i++ {
+ name, err := pd.getString()
+ if err != nil {
+ return err
+ }
+
+ numBlocks, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ r.Blocks[name] = make(map[int32]*ProduceResponseBlock, numBlocks)
+
+ for j := 0; j < numBlocks; j++ {
+ id, err := pd.getInt32()
+ if err != nil {
+ return err
+ }
+
+ block := new(ProduceResponseBlock)
+ err = block.decode(pd, version)
+ if err != nil {
+ return err
+ }
+ r.Blocks[name][id] = block
+ }
+ }
+
+ if r.Version >= 1 {
+ millis, err := pd.getInt32()
+ if err != nil {
+ return err
+ }
+
+ r.ThrottleTime = time.Duration(millis) * time.Millisecond
+ }
+
+ return nil
+}
+
+func (r *ProduceResponse) encode(pe packetEncoder) error {
+ err := pe.putArrayLength(len(r.Blocks))
+ if err != nil {
+ return err
+ }
+ for topic, partitions := range r.Blocks {
+ err = pe.putString(topic)
+ if err != nil {
+ return err
+ }
+ err = pe.putArrayLength(len(partitions))
+ if err != nil {
+ return err
+ }
+ for id, prb := range partitions {
+ pe.putInt32(id)
+ err = prb.encode(pe, r.Version)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ if r.Version >= 1 {
+ pe.putInt32(int32(r.ThrottleTime / time.Millisecond))
+ }
+ return nil
+}
+
+func (r *ProduceResponse) key() int16 {
+ return 0
+}
+
+func (r *ProduceResponse) version() int16 {
+ return r.Version
+}
+
+func (r *ProduceResponse) requiredVersion() KafkaVersion {
+ switch r.Version {
+ case 1:
+ return V0_9_0_0
+ case 2:
+ return V0_10_0_0
+ case 3:
+ return V0_11_0_0
+ default:
+ return minVersion
+ }
+}
+
+func (r *ProduceResponse) GetBlock(topic string, partition int32) *ProduceResponseBlock {
+ if r.Blocks == nil {
+ return nil
+ }
+
+ if r.Blocks[topic] == nil {
+ return nil
+ }
+
+ return r.Blocks[topic][partition]
+}
+
+// Testing API
+
+func (r *ProduceResponse) AddTopicPartition(topic string, partition int32, err KError) {
+ if r.Blocks == nil {
+ r.Blocks = make(map[string]map[int32]*ProduceResponseBlock)
+ }
+ byTopic, ok := r.Blocks[topic]
+ if !ok {
+ byTopic = make(map[int32]*ProduceResponseBlock)
+ r.Blocks[topic] = byTopic
+ }
+ byTopic[partition] = &ProduceResponseBlock{Err: err}
+}
diff --git a/vendor/github.com/Shopify/sarama/produce_set.go b/vendor/github.com/Shopify/sarama/produce_set.go
new file mode 100644
index 0000000..627fdf0
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/produce_set.go
@@ -0,0 +1,224 @@
+package sarama
+
+import (
+ "encoding/binary"
+ "time"
+)
+
+type partitionSet struct {
+ msgs []*ProducerMessage
+ recordsToSend Records
+ bufferBytes int
+}
+
+type produceSet struct {
+ parent *asyncProducer
+ msgs map[string]map[int32]*partitionSet
+
+ bufferBytes int
+ bufferCount int
+}
+
+func newProduceSet(parent *asyncProducer) *produceSet {
+ return &produceSet{
+ msgs: make(map[string]map[int32]*partitionSet),
+ parent: parent,
+ }
+}
+
+func (ps *produceSet) add(msg *ProducerMessage) error {
+ var err error
+ var key, val []byte
+
+ if msg.Key != nil {
+ if key, err = msg.Key.Encode(); err != nil {
+ return err
+ }
+ }
+
+ if msg.Value != nil {
+ if val, err = msg.Value.Encode(); err != nil {
+ return err
+ }
+ }
+
+ timestamp := msg.Timestamp
+ if msg.Timestamp.IsZero() {
+ timestamp = time.Now()
+ }
+
+ partitions := ps.msgs[msg.Topic]
+ if partitions == nil {
+ partitions = make(map[int32]*partitionSet)
+ ps.msgs[msg.Topic] = partitions
+ }
+
+ var size int
+
+ set := partitions[msg.Partition]
+ if set == nil {
+ if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) {
+ batch := &RecordBatch{
+ FirstTimestamp: timestamp,
+ Version: 2,
+ ProducerID: -1, /* No producer id */
+ Codec: ps.parent.conf.Producer.Compression,
+ }
+ set = &partitionSet{recordsToSend: newDefaultRecords(batch)}
+ size = recordBatchOverhead
+ } else {
+ set = &partitionSet{recordsToSend: newLegacyRecords(new(MessageSet))}
+ }
+ partitions[msg.Partition] = set
+ }
+
+ set.msgs = append(set.msgs, msg)
+ if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) {
+ // We are being conservative here to avoid having to prep encode the record
+ size += maximumRecordOverhead
+ rec := &Record{
+ Key: key,
+ Value: val,
+ TimestampDelta: timestamp.Sub(set.recordsToSend.recordBatch.FirstTimestamp),
+ }
+ size += len(key) + len(val)
+ if len(msg.Headers) > 0 {
+ rec.Headers = make([]*RecordHeader, len(msg.Headers))
+ for i := range msg.Headers {
+ rec.Headers[i] = &msg.Headers[i]
+ size += len(rec.Headers[i].Key) + len(rec.Headers[i].Value) + 2*binary.MaxVarintLen32
+ }
+ }
+ set.recordsToSend.recordBatch.addRecord(rec)
+ } else {
+ msgToSend := &Message{Codec: CompressionNone, Key: key, Value: val}
+ if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) {
+ msgToSend.Timestamp = timestamp
+ msgToSend.Version = 1
+ }
+ set.recordsToSend.msgSet.addMessage(msgToSend)
+ size = producerMessageOverhead + len(key) + len(val)
+ }
+
+ set.bufferBytes += size
+ ps.bufferBytes += size
+ ps.bufferCount++
+
+ return nil
+}
+
+func (ps *produceSet) buildRequest() *ProduceRequest {
+ req := &ProduceRequest{
+ RequiredAcks: ps.parent.conf.Producer.RequiredAcks,
+ Timeout: int32(ps.parent.conf.Producer.Timeout / time.Millisecond),
+ }
+ if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) {
+ req.Version = 2
+ }
+ if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) {
+ req.Version = 3
+ }
+
+ for topic, partitionSet := range ps.msgs {
+ for partition, set := range partitionSet {
+ if req.Version >= 3 {
+ req.AddBatch(topic, partition, set.recordsToSend.recordBatch)
+ continue
+ }
+ if ps.parent.conf.Producer.Compression == CompressionNone {
+ req.AddSet(topic, partition, set.recordsToSend.msgSet)
+ } else {
+ // When compression is enabled, the entire set for each partition is compressed
+ // and sent as the payload of a single fake "message" with the appropriate codec
+ // set and no key. When the server sees a message with a compression codec, it
+ // decompresses the payload and treats the result as its message set.
+ payload, err := encode(set.recordsToSend.msgSet, ps.parent.conf.MetricRegistry)
+ if err != nil {
+ Logger.Println(err) // if this happens, it's basically our fault.
+ panic(err)
+ }
+ compMsg := &Message{
+ Codec: ps.parent.conf.Producer.Compression,
+ Key: nil,
+ Value: payload,
+ Set: set.recordsToSend.msgSet, // Provide the underlying message set for accurate metrics
+ }
+ if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) {
+ compMsg.Version = 1
+ compMsg.Timestamp = set.recordsToSend.msgSet.Messages[0].Msg.Timestamp
+ }
+ req.AddMessage(topic, partition, compMsg)
+ }
+ }
+ }
+
+ return req
+}
+
+func (ps *produceSet) eachPartition(cb func(topic string, partition int32, msgs []*ProducerMessage)) {
+ for topic, partitionSet := range ps.msgs {
+ for partition, set := range partitionSet {
+ cb(topic, partition, set.msgs)
+ }
+ }
+}
+
+func (ps *produceSet) dropPartition(topic string, partition int32) []*ProducerMessage {
+ if ps.msgs[topic] == nil {
+ return nil
+ }
+ set := ps.msgs[topic][partition]
+ if set == nil {
+ return nil
+ }
+ ps.bufferBytes -= set.bufferBytes
+ ps.bufferCount -= len(set.msgs)
+ delete(ps.msgs[topic], partition)
+ return set.msgs
+}
+
+func (ps *produceSet) wouldOverflow(msg *ProducerMessage) bool {
+ version := 1
+ if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) {
+ version = 2
+ }
+
+ switch {
+ // Would we overflow our maximum possible size-on-the-wire? 10KiB is arbitrary overhead for safety.
+ case ps.bufferBytes+msg.byteSize(version) >= int(MaxRequestSize-(10*1024)):
+ return true
+ // Would we overflow the size-limit of a compressed message-batch for this partition?
+ case ps.parent.conf.Producer.Compression != CompressionNone &&
+ ps.msgs[msg.Topic] != nil && ps.msgs[msg.Topic][msg.Partition] != nil &&
+ ps.msgs[msg.Topic][msg.Partition].bufferBytes+msg.byteSize(version) >= ps.parent.conf.Producer.MaxMessageBytes:
+ return true
+ // Would we overflow simply in number of messages?
+ case ps.parent.conf.Producer.Flush.MaxMessages > 0 && ps.bufferCount >= ps.parent.conf.Producer.Flush.MaxMessages:
+ return true
+ default:
+ return false
+ }
+}
+
+func (ps *produceSet) readyToFlush() bool {
+ switch {
+ // If we don't have any messages, nothing else matters
+ case ps.empty():
+ return false
+ // If all three config values are 0, we always flush as-fast-as-possible
+ case ps.parent.conf.Producer.Flush.Frequency == 0 && ps.parent.conf.Producer.Flush.Bytes == 0 && ps.parent.conf.Producer.Flush.Messages == 0:
+ return true
+ // If we've passed the message trigger-point
+ case ps.parent.conf.Producer.Flush.Messages > 0 && ps.bufferCount >= ps.parent.conf.Producer.Flush.Messages:
+ return true
+ // If we've passed the byte trigger-point
+ case ps.parent.conf.Producer.Flush.Bytes > 0 && ps.bufferBytes >= ps.parent.conf.Producer.Flush.Bytes:
+ return true
+ default:
+ return false
+ }
+}
+
+func (ps *produceSet) empty() bool {
+ return ps.bufferCount == 0
+}
diff --git a/vendor/github.com/Shopify/sarama/real_decoder.go b/vendor/github.com/Shopify/sarama/real_decoder.go
new file mode 100644
index 0000000..7c3bc25
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/real_decoder.go
@@ -0,0 +1,306 @@
+package sarama
+
+import (
+ "encoding/binary"
+ "math"
+)
+
+var errInvalidArrayLength = PacketDecodingError{"invalid array length"}
+var errInvalidByteSliceLength = PacketDecodingError{"invalid byteslice length"}
+var errInvalidByteSliceLengthType = PacketDecodingError{"invalid byteslice length type"}
+var errInvalidStringLength = PacketDecodingError{"invalid string length"}
+var errInvalidSubsetSize = PacketDecodingError{"invalid subset size"}
+var errVarintOverflow = PacketDecodingError{"varint overflow"}
+
+type realDecoder struct {
+ raw []byte
+ off int
+ stack []pushDecoder
+}
+
+// primitives
+
+func (rd *realDecoder) getInt8() (int8, error) {
+ if rd.remaining() < 1 {
+ rd.off = len(rd.raw)
+ return -1, ErrInsufficientData
+ }
+ tmp := int8(rd.raw[rd.off])
+ rd.off++
+ return tmp, nil
+}
+
+func (rd *realDecoder) getInt16() (int16, error) {
+ if rd.remaining() < 2 {
+ rd.off = len(rd.raw)
+ return -1, ErrInsufficientData
+ }
+ tmp := int16(binary.BigEndian.Uint16(rd.raw[rd.off:]))
+ rd.off += 2
+ return tmp, nil
+}
+
+func (rd *realDecoder) getInt32() (int32, error) {
+ if rd.remaining() < 4 {
+ rd.off = len(rd.raw)
+ return -1, ErrInsufficientData
+ }
+ tmp := int32(binary.BigEndian.Uint32(rd.raw[rd.off:]))
+ rd.off += 4
+ return tmp, nil
+}
+
+func (rd *realDecoder) getInt64() (int64, error) {
+ if rd.remaining() < 8 {
+ rd.off = len(rd.raw)
+ return -1, ErrInsufficientData
+ }
+ tmp := int64(binary.BigEndian.Uint64(rd.raw[rd.off:]))
+ rd.off += 8
+ return tmp, nil
+}
+
+func (rd *realDecoder) getVarint() (int64, error) {
+ tmp, n := binary.Varint(rd.raw[rd.off:])
+ if n == 0 {
+ rd.off = len(rd.raw)
+ return -1, ErrInsufficientData
+ }
+ if n < 0 {
+ rd.off -= n
+ return -1, errVarintOverflow
+ }
+ rd.off += n
+ return tmp, nil
+}
+
+func (rd *realDecoder) getArrayLength() (int, error) {
+ if rd.remaining() < 4 {
+ rd.off = len(rd.raw)
+ return -1, ErrInsufficientData
+ }
+ tmp := int(int32(binary.BigEndian.Uint32(rd.raw[rd.off:])))
+ rd.off += 4
+ if tmp > rd.remaining() {
+ rd.off = len(rd.raw)
+ return -1, ErrInsufficientData
+ } else if tmp > 2*math.MaxUint16 {
+ return -1, errInvalidArrayLength
+ }
+ return tmp, nil
+}
+
+// collections
+
+func (rd *realDecoder) getBytes() ([]byte, error) {
+ tmp, err := rd.getInt32()
+ if err != nil {
+ return nil, err
+ }
+ if tmp == -1 {
+ return nil, nil
+ }
+
+ return rd.getRawBytes(int(tmp))
+}
+
+func (rd *realDecoder) getVarintBytes() ([]byte, error) {
+ tmp, err := rd.getVarint()
+ if err != nil {
+ return nil, err
+ }
+ if tmp == -1 {
+ return nil, nil
+ }
+
+ return rd.getRawBytes(int(tmp))
+}
+
+func (rd *realDecoder) getString() (string, error) {
+ tmp, err := rd.getInt16()
+
+ if err != nil {
+ return "", err
+ }
+
+ n := int(tmp)
+
+ switch {
+ case n < -1:
+ return "", errInvalidStringLength
+ case n == -1:
+ return "", nil
+ case n == 0:
+ return "", nil
+ case n > rd.remaining():
+ rd.off = len(rd.raw)
+ return "", ErrInsufficientData
+ }
+
+ tmpStr := string(rd.raw[rd.off : rd.off+n])
+ rd.off += n
+ return tmpStr, nil
+}
+
+func (rd *realDecoder) getNullableString() (*string, error) {
+ tmp, err := rd.getInt16()
+ if err != nil || tmp == -1 {
+ return nil, err
+ }
+ str, err := rd.getString()
+ return &str, err
+}
+
+func (rd *realDecoder) getInt32Array() ([]int32, error) {
+ if rd.remaining() < 4 {
+ rd.off = len(rd.raw)
+ return nil, ErrInsufficientData
+ }
+ n := int(binary.BigEndian.Uint32(rd.raw[rd.off:]))
+ rd.off += 4
+
+ if rd.remaining() < 4*n {
+ rd.off = len(rd.raw)
+ return nil, ErrInsufficientData
+ }
+
+ if n == 0 {
+ return nil, nil
+ }
+
+ if n < 0 {
+ return nil, errInvalidArrayLength
+ }
+
+ ret := make([]int32, n)
+ for i := range ret {
+ ret[i] = int32(binary.BigEndian.Uint32(rd.raw[rd.off:]))
+ rd.off += 4
+ }
+ return ret, nil
+}
+
+func (rd *realDecoder) getInt64Array() ([]int64, error) {
+ if rd.remaining() < 4 {
+ rd.off = len(rd.raw)
+ return nil, ErrInsufficientData
+ }
+ n := int(binary.BigEndian.Uint32(rd.raw[rd.off:]))
+ rd.off += 4
+
+ if rd.remaining() < 8*n {
+ rd.off = len(rd.raw)
+ return nil, ErrInsufficientData
+ }
+
+ if n == 0 {
+ return nil, nil
+ }
+
+ if n < 0 {
+ return nil, errInvalidArrayLength
+ }
+
+ ret := make([]int64, n)
+ for i := range ret {
+ ret[i] = int64(binary.BigEndian.Uint64(rd.raw[rd.off:]))
+ rd.off += 8
+ }
+ return ret, nil
+}
+
+func (rd *realDecoder) getStringArray() ([]string, error) {
+ if rd.remaining() < 4 {
+ rd.off = len(rd.raw)
+ return nil, ErrInsufficientData
+ }
+ n := int(binary.BigEndian.Uint32(rd.raw[rd.off:]))
+ rd.off += 4
+
+ if n == 0 {
+ return nil, nil
+ }
+
+ if n < 0 {
+ return nil, errInvalidArrayLength
+ }
+
+ ret := make([]string, n)
+ for i := range ret {
+ str, err := rd.getString()
+ if err != nil {
+ return nil, err
+ }
+
+ ret[i] = str
+ }
+ return ret, nil
+}
+
+// subsets
+
+func (rd *realDecoder) remaining() int {
+ return len(rd.raw) - rd.off
+}
+
+func (rd *realDecoder) getSubset(length int) (packetDecoder, error) {
+ buf, err := rd.getRawBytes(length)
+ if err != nil {
+ return nil, err
+ }
+ return &realDecoder{raw: buf}, nil
+}
+
+func (rd *realDecoder) getRawBytes(length int) ([]byte, error) {
+ if length < 0 {
+ return nil, errInvalidByteSliceLength
+ } else if length > rd.remaining() {
+ rd.off = len(rd.raw)
+ return nil, ErrInsufficientData
+ }
+
+ start := rd.off
+ rd.off += length
+ return rd.raw[start:rd.off], nil
+}
+
+func (rd *realDecoder) peek(offset, length int) (packetDecoder, error) {
+ if rd.remaining() < offset+length {
+ return nil, ErrInsufficientData
+ }
+ off := rd.off + offset
+ return &realDecoder{raw: rd.raw[off : off+length]}, nil
+}
+
+// stacks
+
+func (rd *realDecoder) push(in pushDecoder) error {
+ in.saveOffset(rd.off)
+
+ var reserve int
+ if dpd, ok := in.(dynamicPushDecoder); ok {
+ if err := dpd.decode(rd); err != nil {
+ return err
+ }
+ } else {
+ reserve = in.reserveLength()
+ if rd.remaining() < reserve {
+ rd.off = len(rd.raw)
+ return ErrInsufficientData
+ }
+ }
+
+ rd.stack = append(rd.stack, in)
+
+ rd.off += reserve
+
+ return nil
+}
+
+func (rd *realDecoder) pop() error {
+ // this is go's ugly pop pattern (the inverse of append)
+ in := rd.stack[len(rd.stack)-1]
+ rd.stack = rd.stack[:len(rd.stack)-1]
+
+ return in.check(rd.off, rd.raw)
+}
diff --git a/vendor/github.com/Shopify/sarama/real_encoder.go b/vendor/github.com/Shopify/sarama/real_encoder.go
new file mode 100644
index 0000000..51112e7
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/real_encoder.go
@@ -0,0 +1,148 @@
+package sarama
+
+import (
+ "encoding/binary"
+
+ "github.com/rcrowley/go-metrics"
+)
+
+type realEncoder struct {
+ raw []byte
+ off int
+ stack []pushEncoder
+ registry metrics.Registry
+}
+
+// primitives
+
+func (re *realEncoder) putInt8(in int8) {
+ re.raw[re.off] = byte(in)
+ re.off++
+}
+
+func (re *realEncoder) putInt16(in int16) {
+ binary.BigEndian.PutUint16(re.raw[re.off:], uint16(in))
+ re.off += 2
+}
+
+func (re *realEncoder) putInt32(in int32) {
+ binary.BigEndian.PutUint32(re.raw[re.off:], uint32(in))
+ re.off += 4
+}
+
+func (re *realEncoder) putInt64(in int64) {
+ binary.BigEndian.PutUint64(re.raw[re.off:], uint64(in))
+ re.off += 8
+}
+
+func (re *realEncoder) putVarint(in int64) {
+ re.off += binary.PutVarint(re.raw[re.off:], in)
+}
+
+func (re *realEncoder) putArrayLength(in int) error {
+ re.putInt32(int32(in))
+ return nil
+}
+
+// collection
+
+func (re *realEncoder) putRawBytes(in []byte) error {
+ copy(re.raw[re.off:], in)
+ re.off += len(in)
+ return nil
+}
+
+func (re *realEncoder) putBytes(in []byte) error {
+ if in == nil {
+ re.putInt32(-1)
+ return nil
+ }
+ re.putInt32(int32(len(in)))
+ return re.putRawBytes(in)
+}
+
+func (re *realEncoder) putVarintBytes(in []byte) error {
+ if in == nil {
+ re.putVarint(-1)
+ return nil
+ }
+ re.putVarint(int64(len(in)))
+ return re.putRawBytes(in)
+}
+
+func (re *realEncoder) putString(in string) error {
+ re.putInt16(int16(len(in)))
+ copy(re.raw[re.off:], in)
+ re.off += len(in)
+ return nil
+}
+
+func (re *realEncoder) putNullableString(in *string) error {
+ if in == nil {
+ re.putInt16(-1)
+ return nil
+ }
+ return re.putString(*in)
+}
+
+func (re *realEncoder) putStringArray(in []string) error {
+ err := re.putArrayLength(len(in))
+ if err != nil {
+ return err
+ }
+
+ for _, val := range in {
+ if err := re.putString(val); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (re *realEncoder) putInt32Array(in []int32) error {
+ err := re.putArrayLength(len(in))
+ if err != nil {
+ return err
+ }
+ for _, val := range in {
+ re.putInt32(val)
+ }
+ return nil
+}
+
+func (re *realEncoder) putInt64Array(in []int64) error {
+ err := re.putArrayLength(len(in))
+ if err != nil {
+ return err
+ }
+ for _, val := range in {
+ re.putInt64(val)
+ }
+ return nil
+}
+
+func (re *realEncoder) offset() int {
+ return re.off
+}
+
+// stacks
+
+func (re *realEncoder) push(in pushEncoder) {
+ in.saveOffset(re.off)
+ re.off += in.reserveLength()
+ re.stack = append(re.stack, in)
+}
+
+func (re *realEncoder) pop() error {
+ // this is go's ugly pop pattern (the inverse of append)
+ in := re.stack[len(re.stack)-1]
+ re.stack = re.stack[:len(re.stack)-1]
+
+ return in.run(re.off, re.raw)
+}
+
+// we do record metrics during the real encoder pass
+func (re *realEncoder) metricRegistry() metrics.Registry {
+ return re.registry
+}
diff --git a/vendor/github.com/Shopify/sarama/record.go b/vendor/github.com/Shopify/sarama/record.go
new file mode 100644
index 0000000..cded308
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/record.go
@@ -0,0 +1,113 @@
+package sarama
+
+import (
+ "encoding/binary"
+ "time"
+)
+
+const (
+ controlMask = 0x20
+ maximumRecordOverhead = 5*binary.MaxVarintLen32 + binary.MaxVarintLen64 + 1
+)
+
+type RecordHeader struct {
+ Key []byte
+ Value []byte
+}
+
+func (h *RecordHeader) encode(pe packetEncoder) error {
+ if err := pe.putVarintBytes(h.Key); err != nil {
+ return err
+ }
+ return pe.putVarintBytes(h.Value)
+}
+
+func (h *RecordHeader) decode(pd packetDecoder) (err error) {
+ if h.Key, err = pd.getVarintBytes(); err != nil {
+ return err
+ }
+
+ if h.Value, err = pd.getVarintBytes(); err != nil {
+ return err
+ }
+ return nil
+}
+
+type Record struct {
+ Attributes int8
+ TimestampDelta time.Duration
+ OffsetDelta int64
+ Key []byte
+ Value []byte
+ Headers []*RecordHeader
+
+ length varintLengthField
+}
+
+func (r *Record) encode(pe packetEncoder) error {
+ pe.push(&r.length)
+ pe.putInt8(r.Attributes)
+ pe.putVarint(int64(r.TimestampDelta / time.Millisecond))
+ pe.putVarint(r.OffsetDelta)
+ if err := pe.putVarintBytes(r.Key); err != nil {
+ return err
+ }
+ if err := pe.putVarintBytes(r.Value); err != nil {
+ return err
+ }
+ pe.putVarint(int64(len(r.Headers)))
+
+ for _, h := range r.Headers {
+ if err := h.encode(pe); err != nil {
+ return err
+ }
+ }
+
+ return pe.pop()
+}
+
+func (r *Record) decode(pd packetDecoder) (err error) {
+ if err = pd.push(&r.length); err != nil {
+ return err
+ }
+
+ if r.Attributes, err = pd.getInt8(); err != nil {
+ return err
+ }
+
+ timestamp, err := pd.getVarint()
+ if err != nil {
+ return err
+ }
+ r.TimestampDelta = time.Duration(timestamp) * time.Millisecond
+
+ if r.OffsetDelta, err = pd.getVarint(); err != nil {
+ return err
+ }
+
+ if r.Key, err = pd.getVarintBytes(); err != nil {
+ return err
+ }
+
+ if r.Value, err = pd.getVarintBytes(); err != nil {
+ return err
+ }
+
+ numHeaders, err := pd.getVarint()
+ if err != nil {
+ return err
+ }
+
+ if numHeaders >= 0 {
+ r.Headers = make([]*RecordHeader, numHeaders)
+ }
+ for i := int64(0); i < numHeaders; i++ {
+ hdr := new(RecordHeader)
+ if err := hdr.decode(pd); err != nil {
+ return err
+ }
+ r.Headers[i] = hdr
+ }
+
+ return pd.pop()
+}
diff --git a/vendor/github.com/Shopify/sarama/record_batch.go b/vendor/github.com/Shopify/sarama/record_batch.go
new file mode 100644
index 0000000..a8c533b
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/record_batch.go
@@ -0,0 +1,265 @@
+package sarama
+
+import (
+ "bytes"
+ "compress/gzip"
+ "fmt"
+ "io/ioutil"
+ "time"
+
+ "github.com/eapache/go-xerial-snappy"
+ "github.com/pierrec/lz4"
+)
+
+const recordBatchOverhead = 49
+
+type recordsArray []*Record
+
+func (e recordsArray) encode(pe packetEncoder) error {
+ for _, r := range e {
+ if err := r.encode(pe); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (e recordsArray) decode(pd packetDecoder) error {
+ for i := range e {
+ rec := &Record{}
+ if err := rec.decode(pd); err != nil {
+ return err
+ }
+ e[i] = rec
+ }
+ return nil
+}
+
+type RecordBatch struct {
+ FirstOffset int64
+ PartitionLeaderEpoch int32
+ Version int8
+ Codec CompressionCodec
+ Control bool
+ LastOffsetDelta int32
+ FirstTimestamp time.Time
+ MaxTimestamp time.Time
+ ProducerID int64
+ ProducerEpoch int16
+ FirstSequence int32
+ Records []*Record
+ PartialTrailingRecord bool
+
+ compressedRecords []byte
+ recordsLen int // uncompressed records size
+}
+
+func (b *RecordBatch) encode(pe packetEncoder) error {
+ if b.Version != 2 {
+ return PacketEncodingError{fmt.Sprintf("unsupported compression codec (%d)", b.Codec)}
+ }
+ pe.putInt64(b.FirstOffset)
+ pe.push(&lengthField{})
+ pe.putInt32(b.PartitionLeaderEpoch)
+ pe.putInt8(b.Version)
+ pe.push(newCRC32Field(crcCastagnoli))
+ pe.putInt16(b.computeAttributes())
+ pe.putInt32(b.LastOffsetDelta)
+
+ if err := (Timestamp{&b.FirstTimestamp}).encode(pe); err != nil {
+ return err
+ }
+
+ if err := (Timestamp{&b.MaxTimestamp}).encode(pe); err != nil {
+ return err
+ }
+
+ pe.putInt64(b.ProducerID)
+ pe.putInt16(b.ProducerEpoch)
+ pe.putInt32(b.FirstSequence)
+
+ if err := pe.putArrayLength(len(b.Records)); err != nil {
+ return err
+ }
+
+ if b.compressedRecords == nil {
+ if err := b.encodeRecords(pe); err != nil {
+ return err
+ }
+ }
+ if err := pe.putRawBytes(b.compressedRecords); err != nil {
+ return err
+ }
+
+ if err := pe.pop(); err != nil {
+ return err
+ }
+ return pe.pop()
+}
+
+func (b *RecordBatch) decode(pd packetDecoder) (err error) {
+ if b.FirstOffset, err = pd.getInt64(); err != nil {
+ return err
+ }
+
+ batchLen, err := pd.getInt32()
+ if err != nil {
+ return err
+ }
+
+ if b.PartitionLeaderEpoch, err = pd.getInt32(); err != nil {
+ return err
+ }
+
+ if b.Version, err = pd.getInt8(); err != nil {
+ return err
+ }
+
+ if err = pd.push(&crc32Field{polynomial: crcCastagnoli}); err != nil {
+ return err
+ }
+
+ attributes, err := pd.getInt16()
+ if err != nil {
+ return err
+ }
+ b.Codec = CompressionCodec(int8(attributes) & compressionCodecMask)
+ b.Control = attributes&controlMask == controlMask
+
+ if b.LastOffsetDelta, err = pd.getInt32(); err != nil {
+ return err
+ }
+
+ if err = (Timestamp{&b.FirstTimestamp}).decode(pd); err != nil {
+ return err
+ }
+
+ if err = (Timestamp{&b.MaxTimestamp}).decode(pd); err != nil {
+ return err
+ }
+
+ if b.ProducerID, err = pd.getInt64(); err != nil {
+ return err
+ }
+
+ if b.ProducerEpoch, err = pd.getInt16(); err != nil {
+ return err
+ }
+
+ if b.FirstSequence, err = pd.getInt32(); err != nil {
+ return err
+ }
+
+ numRecs, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+ if numRecs >= 0 {
+ b.Records = make([]*Record, numRecs)
+ }
+
+ bufSize := int(batchLen) - recordBatchOverhead
+ recBuffer, err := pd.getRawBytes(bufSize)
+ if err != nil {
+ if err == ErrInsufficientData {
+ b.PartialTrailingRecord = true
+ b.Records = nil
+ return nil
+ }
+ return err
+ }
+
+ if err = pd.pop(); err != nil {
+ return err
+ }
+
+ switch b.Codec {
+ case CompressionNone:
+ case CompressionGZIP:
+ reader, err := gzip.NewReader(bytes.NewReader(recBuffer))
+ if err != nil {
+ return err
+ }
+ if recBuffer, err = ioutil.ReadAll(reader); err != nil {
+ return err
+ }
+ case CompressionSnappy:
+ if recBuffer, err = snappy.Decode(recBuffer); err != nil {
+ return err
+ }
+ case CompressionLZ4:
+ reader := lz4.NewReader(bytes.NewReader(recBuffer))
+ if recBuffer, err = ioutil.ReadAll(reader); err != nil {
+ return err
+ }
+ default:
+ return PacketDecodingError{fmt.Sprintf("invalid compression specified (%d)", b.Codec)}
+ }
+
+ b.recordsLen = len(recBuffer)
+ err = decode(recBuffer, recordsArray(b.Records))
+ if err == ErrInsufficientData {
+ b.PartialTrailingRecord = true
+ b.Records = nil
+ return nil
+ }
+ return err
+}
+
+func (b *RecordBatch) encodeRecords(pe packetEncoder) error {
+ var raw []byte
+ if b.Codec != CompressionNone {
+ var err error
+ if raw, err = encode(recordsArray(b.Records), nil); err != nil {
+ return err
+ }
+ b.recordsLen = len(raw)
+ }
+
+ switch b.Codec {
+ case CompressionNone:
+ offset := pe.offset()
+ if err := recordsArray(b.Records).encode(pe); err != nil {
+ return err
+ }
+ b.recordsLen = pe.offset() - offset
+ case CompressionGZIP:
+ var buf bytes.Buffer
+ writer := gzip.NewWriter(&buf)
+ if _, err := writer.Write(raw); err != nil {
+ return err
+ }
+ if err := writer.Close(); err != nil {
+ return err
+ }
+ b.compressedRecords = buf.Bytes()
+ case CompressionSnappy:
+ b.compressedRecords = snappy.Encode(raw)
+ case CompressionLZ4:
+ var buf bytes.Buffer
+ writer := lz4.NewWriter(&buf)
+ if _, err := writer.Write(raw); err != nil {
+ return err
+ }
+ if err := writer.Close(); err != nil {
+ return err
+ }
+ b.compressedRecords = buf.Bytes()
+ default:
+ return PacketEncodingError{fmt.Sprintf("unsupported compression codec (%d)", b.Codec)}
+ }
+
+ return nil
+}
+
+func (b *RecordBatch) computeAttributes() int16 {
+ attr := int16(b.Codec) & int16(compressionCodecMask)
+ if b.Control {
+ attr |= controlMask
+ }
+ return attr
+}
+
+func (b *RecordBatch) addRecord(r *Record) {
+ b.Records = append(b.Records, r)
+}
diff --git a/vendor/github.com/Shopify/sarama/records.go b/vendor/github.com/Shopify/sarama/records.go
new file mode 100644
index 0000000..54ee7e3
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/records.go
@@ -0,0 +1,167 @@
+package sarama
+
+import "fmt"
+
+const (
+ unknownRecords = iota
+ legacyRecords
+ defaultRecords
+
+ magicOffset = 16
+ magicLength = 1
+)
+
+// Records implements a union type containing either a RecordBatch or a legacy MessageSet.
+type Records struct {
+ recordsType int
+ msgSet *MessageSet
+ recordBatch *RecordBatch
+}
+
+func newLegacyRecords(msgSet *MessageSet) Records {
+ return Records{recordsType: legacyRecords, msgSet: msgSet}
+}
+
+func newDefaultRecords(batch *RecordBatch) Records {
+ return Records{recordsType: defaultRecords, recordBatch: batch}
+}
+
+// setTypeFromFields sets type of Records depending on which of msgSet or recordBatch is not nil.
+// The first return value indicates whether both fields are nil (and the type is not set).
+// If both fields are not nil, it returns an error.
+func (r *Records) setTypeFromFields() (bool, error) {
+ if r.msgSet == nil && r.recordBatch == nil {
+ return true, nil
+ }
+ if r.msgSet != nil && r.recordBatch != nil {
+ return false, fmt.Errorf("both msgSet and recordBatch are set, but record type is unknown")
+ }
+ r.recordsType = defaultRecords
+ if r.msgSet != nil {
+ r.recordsType = legacyRecords
+ }
+ return false, nil
+}
+
+func (r *Records) encode(pe packetEncoder) error {
+ if r.recordsType == unknownRecords {
+ if empty, err := r.setTypeFromFields(); err != nil || empty {
+ return err
+ }
+ }
+
+ switch r.recordsType {
+ case legacyRecords:
+ if r.msgSet == nil {
+ return nil
+ }
+ return r.msgSet.encode(pe)
+ case defaultRecords:
+ if r.recordBatch == nil {
+ return nil
+ }
+ return r.recordBatch.encode(pe)
+ }
+ return fmt.Errorf("unknown records type: %v", r.recordsType)
+}
+
+func (r *Records) setTypeFromMagic(pd packetDecoder) error {
+ dec, err := pd.peek(magicOffset, magicLength)
+ if err != nil {
+ return err
+ }
+
+ magic, err := dec.getInt8()
+ if err != nil {
+ return err
+ }
+
+ r.recordsType = defaultRecords
+ if magic < 2 {
+ r.recordsType = legacyRecords
+ }
+ return nil
+}
+
+func (r *Records) decode(pd packetDecoder) error {
+ if r.recordsType == unknownRecords {
+ if err := r.setTypeFromMagic(pd); err != nil {
+ return nil
+ }
+ }
+
+ switch r.recordsType {
+ case legacyRecords:
+ r.msgSet = &MessageSet{}
+ return r.msgSet.decode(pd)
+ case defaultRecords:
+ r.recordBatch = &RecordBatch{}
+ return r.recordBatch.decode(pd)
+ }
+ return fmt.Errorf("unknown records type: %v", r.recordsType)
+}
+
+func (r *Records) numRecords() (int, error) {
+ if r.recordsType == unknownRecords {
+ if empty, err := r.setTypeFromFields(); err != nil || empty {
+ return 0, err
+ }
+ }
+
+ switch r.recordsType {
+ case legacyRecords:
+ if r.msgSet == nil {
+ return 0, nil
+ }
+ return len(r.msgSet.Messages), nil
+ case defaultRecords:
+ if r.recordBatch == nil {
+ return 0, nil
+ }
+ return len(r.recordBatch.Records), nil
+ }
+ return 0, fmt.Errorf("unknown records type: %v", r.recordsType)
+}
+
+func (r *Records) isPartial() (bool, error) {
+ if r.recordsType == unknownRecords {
+ if empty, err := r.setTypeFromFields(); err != nil || empty {
+ return false, err
+ }
+ }
+
+ switch r.recordsType {
+ case unknownRecords:
+ return false, nil
+ case legacyRecords:
+ if r.msgSet == nil {
+ return false, nil
+ }
+ return r.msgSet.PartialTrailingMessage, nil
+ case defaultRecords:
+ if r.recordBatch == nil {
+ return false, nil
+ }
+ return r.recordBatch.PartialTrailingRecord, nil
+ }
+ return false, fmt.Errorf("unknown records type: %v", r.recordsType)
+}
+
+func (r *Records) isControl() (bool, error) {
+ if r.recordsType == unknownRecords {
+ if empty, err := r.setTypeFromFields(); err != nil || empty {
+ return false, err
+ }
+ }
+
+ switch r.recordsType {
+ case legacyRecords:
+ return false, nil
+ case defaultRecords:
+ if r.recordBatch == nil {
+ return false, nil
+ }
+ return r.recordBatch.Control, nil
+ }
+ return false, fmt.Errorf("unknown records type: %v", r.recordsType)
+}
diff --git a/vendor/github.com/Shopify/sarama/request.go b/vendor/github.com/Shopify/sarama/request.go
new file mode 100644
index 0000000..73310ca
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/request.go
@@ -0,0 +1,119 @@
+package sarama
+
+import (
+ "encoding/binary"
+ "fmt"
+ "io"
+)
+
+type protocolBody interface {
+ encoder
+ versionedDecoder
+ key() int16
+ version() int16
+ requiredVersion() KafkaVersion
+}
+
+type request struct {
+ correlationID int32
+ clientID string
+ body protocolBody
+}
+
+func (r *request) encode(pe packetEncoder) (err error) {
+ pe.push(&lengthField{})
+ pe.putInt16(r.body.key())
+ pe.putInt16(r.body.version())
+ pe.putInt32(r.correlationID)
+ err = pe.putString(r.clientID)
+ if err != nil {
+ return err
+ }
+ err = r.body.encode(pe)
+ if err != nil {
+ return err
+ }
+ return pe.pop()
+}
+
+func (r *request) decode(pd packetDecoder) (err error) {
+ var key int16
+ if key, err = pd.getInt16(); err != nil {
+ return err
+ }
+ var version int16
+ if version, err = pd.getInt16(); err != nil {
+ return err
+ }
+ if r.correlationID, err = pd.getInt32(); err != nil {
+ return err
+ }
+ r.clientID, err = pd.getString()
+
+ r.body = allocateBody(key, version)
+ if r.body == nil {
+ return PacketDecodingError{fmt.Sprintf("unknown request key (%d)", key)}
+ }
+ return r.body.decode(pd, version)
+}
+
+func decodeRequest(r io.Reader) (req *request, bytesRead int, err error) {
+ lengthBytes := make([]byte, 4)
+ if _, err := io.ReadFull(r, lengthBytes); err != nil {
+ return nil, bytesRead, err
+ }
+ bytesRead += len(lengthBytes)
+
+ length := int32(binary.BigEndian.Uint32(lengthBytes))
+ if length <= 4 || length > MaxRequestSize {
+ return nil, bytesRead, PacketDecodingError{fmt.Sprintf("message of length %d too large or too small", length)}
+ }
+
+ encodedReq := make([]byte, length)
+ if _, err := io.ReadFull(r, encodedReq); err != nil {
+ return nil, bytesRead, err
+ }
+ bytesRead += len(encodedReq)
+
+ req = &request{}
+ if err := decode(encodedReq, req); err != nil {
+ return nil, bytesRead, err
+ }
+ return req, bytesRead, nil
+}
+
+func allocateBody(key, version int16) protocolBody {
+ switch key {
+ case 0:
+ return &ProduceRequest{}
+ case 1:
+ return &FetchRequest{}
+ case 2:
+ return &OffsetRequest{Version: version}
+ case 3:
+ return &MetadataRequest{}
+ case 8:
+ return &OffsetCommitRequest{Version: version}
+ case 9:
+ return &OffsetFetchRequest{}
+ case 10:
+ return &ConsumerMetadataRequest{}
+ case 11:
+ return &JoinGroupRequest{}
+ case 12:
+ return &HeartbeatRequest{}
+ case 13:
+ return &LeaveGroupRequest{}
+ case 14:
+ return &SyncGroupRequest{}
+ case 15:
+ return &DescribeGroupsRequest{}
+ case 16:
+ return &ListGroupsRequest{}
+ case 17:
+ return &SaslHandshakeRequest{}
+ case 18:
+ return &ApiVersionsRequest{}
+ }
+ return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/response_header.go b/vendor/github.com/Shopify/sarama/response_header.go
new file mode 100644
index 0000000..f3f4d27
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/response_header.go
@@ -0,0 +1,21 @@
+package sarama
+
+import "fmt"
+
+type responseHeader struct {
+ length int32
+ correlationID int32
+}
+
+func (r *responseHeader) decode(pd packetDecoder) (err error) {
+ r.length, err = pd.getInt32()
+ if err != nil {
+ return err
+ }
+ if r.length <= 4 || r.length > MaxResponseSize {
+ return PacketDecodingError{fmt.Sprintf("message of length %d too large or too small", r.length)}
+ }
+
+ r.correlationID, err = pd.getInt32()
+ return err
+}
diff --git a/vendor/github.com/Shopify/sarama/sarama.go b/vendor/github.com/Shopify/sarama/sarama.go
new file mode 100644
index 0000000..7d5dc60
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/sarama.go
@@ -0,0 +1,99 @@
+/*
+Package sarama is a pure Go client library for dealing with Apache Kafka (versions 0.8 and later). It includes a high-level
+API for easily producing and consuming messages, and a low-level API for controlling bytes on the wire when the high-level
+API is insufficient. Usage examples for the high-level APIs are provided inline with their full documentation.
+
+To produce messages, use either the AsyncProducer or the SyncProducer. The AsyncProducer accepts messages on a channel
+and produces them asynchronously in the background as efficiently as possible; it is preferred in most cases.
+The SyncProducer provides a method which will block until Kafka acknowledges the message as produced. This can be
+useful but comes with two caveats: it will generally be less efficient, and the actual durability guarantees
+depend on the configured value of `Producer.RequiredAcks`. There are configurations where a message acknowledged by the
+SyncProducer can still sometimes be lost.
+
+To consume messages, use the Consumer. Note that Sarama's Consumer implementation does not currently support automatic
+consumer-group rebalancing and offset tracking. For Zookeeper-based tracking (Kafka 0.8.2 and earlier), the
+https://github.com/wvanbergen/kafka library builds on Sarama to add this support. For Kafka-based tracking (Kafka 0.9
+and later), the https://github.com/bsm/sarama-cluster library builds on Sarama to add this support.
+
+For lower-level needs, the Broker and Request/Response objects permit precise control over each connection
+and message sent on the wire; the Client provides higher-level metadata management that is shared between
+the producers and the consumer. The Request/Response objects and properties are mostly undocumented, as they line up
+exactly with the protocol fields documented by Kafka at
+https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol
+
+Metrics are exposed through https://github.com/rcrowley/go-metrics library in a local registry.
+
+Broker related metrics:
+
+ +----------------------------------------------+------------+---------------------------------------------------------------+
+ | Name | Type | Description |
+ +----------------------------------------------+------------+---------------------------------------------------------------+
+ | incoming-byte-rate | meter | Bytes/second read off all brokers |
+ | incoming-byte-rate-for-broker- | meter | Bytes/second read off a given broker |
+ | outgoing-byte-rate | meter | Bytes/second written off all brokers |
+ | outgoing-byte-rate-for-broker- | meter | Bytes/second written off a given broker |
+ | request-rate | meter | Requests/second sent to all brokers |
+ | request-rate-for-broker- | meter | Requests/second sent to a given broker |
+ | request-size | histogram | Distribution of the request size in bytes for all brokers |
+ | request-size-for-broker- | histogram | Distribution of the request size in bytes for a given broker |
+ | request-latency-in-ms | histogram | Distribution of the request latency in ms for all brokers |
+ | request-latency-in-ms-for-broker- | histogram | Distribution of the request latency in ms for a given broker |
+ | response-rate | meter | Responses/second received from all brokers |
+ | response-rate-for-broker- | meter | Responses/second received from a given broker |
+ | response-size | histogram | Distribution of the response size in bytes for all brokers |
+ | response-size-for-broker- | histogram | Distribution of the response size in bytes for a given broker |
+ +----------------------------------------------+------------+---------------------------------------------------------------+
+
+Note that we do not gather specific metrics for seed brokers but they are part of the "all brokers" metrics.
+
+Producer related metrics:
+
+ +-------------------------------------------+------------+--------------------------------------------------------------------------------------+
+ | Name | Type | Description |
+ +-------------------------------------------+------------+--------------------------------------------------------------------------------------+
+ | batch-size | histogram | Distribution of the number of bytes sent per partition per request for all topics |
+ | batch-size-for-topic- | histogram | Distribution of the number of bytes sent per partition per request for a given topic |
+ | record-send-rate | meter | Records/second sent to all topics |
+ | record-send-rate-for-topic- | meter | Records/second sent to a given topic |
+ | records-per-request | histogram | Distribution of the number of records sent per request for all topics |
+ | records-per-request-for-topic- | histogram | Distribution of the number of records sent per request for a given topic |
+ | compression-ratio | histogram | Distribution of the compression ratio times 100 of record batches for all topics |
+ | compression-ratio-for-topic- | histogram | Distribution of the compression ratio times 100 of record batches for a given topic |
+ +-------------------------------------------+------------+--------------------------------------------------------------------------------------+
+
+*/
+package sarama
+
+import (
+ "io/ioutil"
+ "log"
+)
+
+// Logger is the instance of a StdLogger interface that Sarama writes connection
+// management events to. By default it is set to discard all log messages via ioutil.Discard,
+// but you can set it to redirect wherever you want.
+var Logger StdLogger = log.New(ioutil.Discard, "[Sarama] ", log.LstdFlags)
+
+// StdLogger is used to log error messages.
+type StdLogger interface {
+ Print(v ...interface{})
+ Printf(format string, v ...interface{})
+ Println(v ...interface{})
+}
+
+// PanicHandler is called for recovering from panics spawned internally to the library (and thus
+// not recoverable by the caller's goroutine). Defaults to nil, which means panics are not recovered.
+var PanicHandler func(interface{})
+
+// MaxRequestSize is the maximum size (in bytes) of any request that Sarama will attempt to send. Trying
+// to send a request larger than this will result in an PacketEncodingError. The default of 100 MiB is aligned
+// with Kafka's default `socket.request.max.bytes`, which is the largest request the broker will attempt
+// to process.
+var MaxRequestSize int32 = 100 * 1024 * 1024
+
+// MaxResponseSize is the maximum size (in bytes) of any response that Sarama will attempt to parse. If
+// a broker returns a response message larger than this value, Sarama will return a PacketDecodingError to
+// protect the client from running out of memory. Please note that brokers do not have any natural limit on
+// the size of responses they send. In particular, they can send arbitrarily large fetch responses to consumers
+// (see https://issues.apache.org/jira/browse/KAFKA-2063).
+var MaxResponseSize int32 = 100 * 1024 * 1024
diff --git a/vendor/github.com/Shopify/sarama/sasl_handshake_request.go b/vendor/github.com/Shopify/sarama/sasl_handshake_request.go
new file mode 100644
index 0000000..fbbc894
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/sasl_handshake_request.go
@@ -0,0 +1,33 @@
+package sarama
+
+type SaslHandshakeRequest struct {
+ Mechanism string
+}
+
+func (r *SaslHandshakeRequest) encode(pe packetEncoder) error {
+ if err := pe.putString(r.Mechanism); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (r *SaslHandshakeRequest) decode(pd packetDecoder, version int16) (err error) {
+ if r.Mechanism, err = pd.getString(); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (r *SaslHandshakeRequest) key() int16 {
+ return 17
+}
+
+func (r *SaslHandshakeRequest) version() int16 {
+ return 0
+}
+
+func (r *SaslHandshakeRequest) requiredVersion() KafkaVersion {
+ return V0_10_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/sasl_handshake_response.go b/vendor/github.com/Shopify/sarama/sasl_handshake_response.go
new file mode 100644
index 0000000..ef290d4
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/sasl_handshake_response.go
@@ -0,0 +1,38 @@
+package sarama
+
+type SaslHandshakeResponse struct {
+ Err KError
+ EnabledMechanisms []string
+}
+
+func (r *SaslHandshakeResponse) encode(pe packetEncoder) error {
+ pe.putInt16(int16(r.Err))
+ return pe.putStringArray(r.EnabledMechanisms)
+}
+
+func (r *SaslHandshakeResponse) decode(pd packetDecoder, version int16) error {
+ kerr, err := pd.getInt16()
+ if err != nil {
+ return err
+ }
+
+ r.Err = KError(kerr)
+
+ if r.EnabledMechanisms, err = pd.getStringArray(); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (r *SaslHandshakeResponse) key() int16 {
+ return 17
+}
+
+func (r *SaslHandshakeResponse) version() int16 {
+ return 0
+}
+
+func (r *SaslHandshakeResponse) requiredVersion() KafkaVersion {
+ return V0_10_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/sync_group_request.go b/vendor/github.com/Shopify/sarama/sync_group_request.go
new file mode 100644
index 0000000..fe20708
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/sync_group_request.go
@@ -0,0 +1,100 @@
+package sarama
+
+type SyncGroupRequest struct {
+ GroupId string
+ GenerationId int32
+ MemberId string
+ GroupAssignments map[string][]byte
+}
+
+func (r *SyncGroupRequest) encode(pe packetEncoder) error {
+ if err := pe.putString(r.GroupId); err != nil {
+ return err
+ }
+
+ pe.putInt32(r.GenerationId)
+
+ if err := pe.putString(r.MemberId); err != nil {
+ return err
+ }
+
+ if err := pe.putArrayLength(len(r.GroupAssignments)); err != nil {
+ return err
+ }
+ for memberId, memberAssignment := range r.GroupAssignments {
+ if err := pe.putString(memberId); err != nil {
+ return err
+ }
+ if err := pe.putBytes(memberAssignment); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (r *SyncGroupRequest) decode(pd packetDecoder, version int16) (err error) {
+ if r.GroupId, err = pd.getString(); err != nil {
+ return
+ }
+ if r.GenerationId, err = pd.getInt32(); err != nil {
+ return
+ }
+ if r.MemberId, err = pd.getString(); err != nil {
+ return
+ }
+
+ n, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+ if n == 0 {
+ return nil
+ }
+
+ r.GroupAssignments = make(map[string][]byte)
+ for i := 0; i < n; i++ {
+ memberId, err := pd.getString()
+ if err != nil {
+ return err
+ }
+ memberAssignment, err := pd.getBytes()
+ if err != nil {
+ return err
+ }
+
+ r.GroupAssignments[memberId] = memberAssignment
+ }
+
+ return nil
+}
+
+func (r *SyncGroupRequest) key() int16 {
+ return 14
+}
+
+func (r *SyncGroupRequest) version() int16 {
+ return 0
+}
+
+func (r *SyncGroupRequest) requiredVersion() KafkaVersion {
+ return V0_9_0_0
+}
+
+func (r *SyncGroupRequest) AddGroupAssignment(memberId string, memberAssignment []byte) {
+ if r.GroupAssignments == nil {
+ r.GroupAssignments = make(map[string][]byte)
+ }
+
+ r.GroupAssignments[memberId] = memberAssignment
+}
+
+func (r *SyncGroupRequest) AddGroupAssignmentMember(memberId string, memberAssignment *ConsumerGroupMemberAssignment) error {
+ bin, err := encode(memberAssignment, nil)
+ if err != nil {
+ return err
+ }
+
+ r.AddGroupAssignment(memberId, bin)
+ return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/sync_group_response.go b/vendor/github.com/Shopify/sarama/sync_group_response.go
new file mode 100644
index 0000000..194b382
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/sync_group_response.go
@@ -0,0 +1,41 @@
+package sarama
+
+type SyncGroupResponse struct {
+ Err KError
+ MemberAssignment []byte
+}
+
+func (r *SyncGroupResponse) GetMemberAssignment() (*ConsumerGroupMemberAssignment, error) {
+ assignment := new(ConsumerGroupMemberAssignment)
+ err := decode(r.MemberAssignment, assignment)
+ return assignment, err
+}
+
+func (r *SyncGroupResponse) encode(pe packetEncoder) error {
+ pe.putInt16(int16(r.Err))
+ return pe.putBytes(r.MemberAssignment)
+}
+
+func (r *SyncGroupResponse) decode(pd packetDecoder, version int16) (err error) {
+ kerr, err := pd.getInt16()
+ if err != nil {
+ return err
+ }
+
+ r.Err = KError(kerr)
+
+ r.MemberAssignment, err = pd.getBytes()
+ return
+}
+
+func (r *SyncGroupResponse) key() int16 {
+ return 14
+}
+
+func (r *SyncGroupResponse) version() int16 {
+ return 0
+}
+
+func (r *SyncGroupResponse) requiredVersion() KafkaVersion {
+ return V0_9_0_0
+}
diff --git a/vendor/github.com/Shopify/sarama/sync_producer.go b/vendor/github.com/Shopify/sarama/sync_producer.go
new file mode 100644
index 0000000..dd096b6
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/sync_producer.go
@@ -0,0 +1,164 @@
+package sarama
+
+import "sync"
+
+// SyncProducer publishes Kafka messages, blocking until they have been acknowledged. It routes messages to the correct
+// broker, refreshing metadata as appropriate, and parses responses for errors. You must call Close() on a producer
+// to avoid leaks, it may not be garbage-collected automatically when it passes out of scope.
+//
+// The SyncProducer comes with two caveats: it will generally be less efficient than the AsyncProducer, and the actual
+// durability guarantee provided when a message is acknowledged depend on the configured value of `Producer.RequiredAcks`.
+// There are configurations where a message acknowledged by the SyncProducer can still sometimes be lost.
+//
+// For implementation reasons, the SyncProducer requires `Producer.Return.Errors` and `Producer.Return.Successes` to
+// be set to true in its configuration.
+type SyncProducer interface {
+
+ // SendMessage produces a given message, and returns only when it either has
+ // succeeded or failed to produce. It will return the partition and the offset
+ // of the produced message, or an error if the message failed to produce.
+ SendMessage(msg *ProducerMessage) (partition int32, offset int64, err error)
+
+ // SendMessages produces a given set of messages, and returns only when all
+ // messages in the set have either succeeded or failed. Note that messages
+ // can succeed and fail individually; if some succeed and some fail,
+ // SendMessages will return an error.
+ SendMessages(msgs []*ProducerMessage) error
+
+ // Close shuts down the producer and waits for any buffered messages to be
+ // flushed. You must call this function before a producer object passes out of
+ // scope, as it may otherwise leak memory. You must call this before calling
+ // Close on the underlying client.
+ Close() error
+}
+
+type syncProducer struct {
+ producer *asyncProducer
+ wg sync.WaitGroup
+}
+
+// NewSyncProducer creates a new SyncProducer using the given broker addresses and configuration.
+func NewSyncProducer(addrs []string, config *Config) (SyncProducer, error) {
+ if config == nil {
+ config = NewConfig()
+ config.Producer.Return.Successes = true
+ }
+
+ if err := verifyProducerConfig(config); err != nil {
+ return nil, err
+ }
+
+ p, err := NewAsyncProducer(addrs, config)
+ if err != nil {
+ return nil, err
+ }
+ return newSyncProducerFromAsyncProducer(p.(*asyncProducer)), nil
+}
+
+// NewSyncProducerFromClient creates a new SyncProducer using the given client. It is still
+// necessary to call Close() on the underlying client when shutting down this producer.
+func NewSyncProducerFromClient(client Client) (SyncProducer, error) {
+ if err := verifyProducerConfig(client.Config()); err != nil {
+ return nil, err
+ }
+
+ p, err := NewAsyncProducerFromClient(client)
+ if err != nil {
+ return nil, err
+ }
+ return newSyncProducerFromAsyncProducer(p.(*asyncProducer)), nil
+}
+
+func newSyncProducerFromAsyncProducer(p *asyncProducer) *syncProducer {
+ sp := &syncProducer{producer: p}
+
+ sp.wg.Add(2)
+ go withRecover(sp.handleSuccesses)
+ go withRecover(sp.handleErrors)
+
+ return sp
+}
+
+func verifyProducerConfig(config *Config) error {
+ if !config.Producer.Return.Errors {
+ return ConfigurationError("Producer.Return.Errors must be true to be used in a SyncProducer")
+ }
+ if !config.Producer.Return.Successes {
+ return ConfigurationError("Producer.Return.Successes must be true to be used in a SyncProducer")
+ }
+ return nil
+}
+
+func (sp *syncProducer) SendMessage(msg *ProducerMessage) (partition int32, offset int64, err error) {
+ oldMetadata := msg.Metadata
+ defer func() {
+ msg.Metadata = oldMetadata
+ }()
+
+ expectation := make(chan *ProducerError, 1)
+ msg.Metadata = expectation
+ sp.producer.Input() <- msg
+
+ if err := <-expectation; err != nil {
+ return -1, -1, err.Err
+ }
+
+ return msg.Partition, msg.Offset, nil
+}
+
+func (sp *syncProducer) SendMessages(msgs []*ProducerMessage) error {
+ savedMetadata := make([]interface{}, len(msgs))
+ for i := range msgs {
+ savedMetadata[i] = msgs[i].Metadata
+ }
+ defer func() {
+ for i := range msgs {
+ msgs[i].Metadata = savedMetadata[i]
+ }
+ }()
+
+ expectations := make(chan chan *ProducerError, len(msgs))
+ go func() {
+ for _, msg := range msgs {
+ expectation := make(chan *ProducerError, 1)
+ msg.Metadata = expectation
+ sp.producer.Input() <- msg
+ expectations <- expectation
+ }
+ close(expectations)
+ }()
+
+ var errors ProducerErrors
+ for expectation := range expectations {
+ if err := <-expectation; err != nil {
+ errors = append(errors, err)
+ }
+ }
+
+ if len(errors) > 0 {
+ return errors
+ }
+ return nil
+}
+
+func (sp *syncProducer) handleSuccesses() {
+ defer sp.wg.Done()
+ for msg := range sp.producer.Successes() {
+ expectation := msg.Metadata.(chan *ProducerError)
+ expectation <- nil
+ }
+}
+
+func (sp *syncProducer) handleErrors() {
+ defer sp.wg.Done()
+ for err := range sp.producer.Errors() {
+ expectation := err.Msg.Metadata.(chan *ProducerError)
+ expectation <- err
+ }
+}
+
+func (sp *syncProducer) Close() error {
+ sp.producer.AsyncClose()
+ sp.wg.Wait()
+ return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/timestamp.go b/vendor/github.com/Shopify/sarama/timestamp.go
new file mode 100644
index 0000000..372278d
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/timestamp.go
@@ -0,0 +1,40 @@
+package sarama
+
+import (
+ "fmt"
+ "time"
+)
+
+type Timestamp struct {
+ *time.Time
+}
+
+func (t Timestamp) encode(pe packetEncoder) error {
+ timestamp := int64(-1)
+
+ if !t.Before(time.Unix(0, 0)) {
+ timestamp = t.UnixNano() / int64(time.Millisecond)
+ } else if !t.IsZero() {
+ return PacketEncodingError{fmt.Sprintf("invalid timestamp (%v)", t)}
+ }
+
+ pe.putInt64(timestamp)
+ return nil
+}
+
+func (t Timestamp) decode(pd packetDecoder) error {
+ millis, err := pd.getInt64()
+ if err != nil {
+ return err
+ }
+
+ // negative timestamps are invalid, in these cases we should return
+ // a zero time
+ timestamp := time.Time{}
+ if millis >= 0 {
+ timestamp = time.Unix(millis/1000, (millis%1000)*int64(time.Millisecond))
+ }
+
+ *t.Time = timestamp
+ return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/utils.go b/vendor/github.com/Shopify/sarama/utils.go
new file mode 100644
index 0000000..9d7b60f
--- /dev/null
+++ b/vendor/github.com/Shopify/sarama/utils.go
@@ -0,0 +1,184 @@
+package sarama
+
+import (
+ "bufio"
+ "fmt"
+ "net"
+ "regexp"
+)
+
+type none struct{}
+
+// make []int32 sortable so we can sort partition numbers
+type int32Slice []int32
+
+func (slice int32Slice) Len() int {
+ return len(slice)
+}
+
+func (slice int32Slice) Less(i, j int) bool {
+ return slice[i] < slice[j]
+}
+
+func (slice int32Slice) Swap(i, j int) {
+ slice[i], slice[j] = slice[j], slice[i]
+}
+
+func dupInt32Slice(input []int32) []int32 {
+ ret := make([]int32, 0, len(input))
+ for _, val := range input {
+ ret = append(ret, val)
+ }
+ return ret
+}
+
+func withRecover(fn func()) {
+ defer func() {
+ handler := PanicHandler
+ if handler != nil {
+ if err := recover(); err != nil {
+ handler(err)
+ }
+ }
+ }()
+
+ fn()
+}
+
+func safeAsyncClose(b *Broker) {
+ tmp := b // local var prevents clobbering in goroutine
+ go withRecover(func() {
+ if connected, _ := tmp.Connected(); connected {
+ if err := tmp.Close(); err != nil {
+ Logger.Println("Error closing broker", tmp.ID(), ":", err)
+ }
+ }
+ })
+}
+
+// Encoder is a simple interface for any type that can be encoded as an array of bytes
+// in order to be sent as the key or value of a Kafka message. Length() is provided as an
+// optimization, and must return the same as len() on the result of Encode().
+type Encoder interface {
+ Encode() ([]byte, error)
+ Length() int
+}
+
+// make strings and byte slices encodable for convenience so they can be used as keys
+// and/or values in kafka messages
+
+// StringEncoder implements the Encoder interface for Go strings so that they can be used
+// as the Key or Value in a ProducerMessage.
+type StringEncoder string
+
+func (s StringEncoder) Encode() ([]byte, error) {
+ return []byte(s), nil
+}
+
+func (s StringEncoder) Length() int {
+ return len(s)
+}
+
+// ByteEncoder implements the Encoder interface for Go byte slices so that they can be used
+// as the Key or Value in a ProducerMessage.
+type ByteEncoder []byte
+
+func (b ByteEncoder) Encode() ([]byte, error) {
+ return b, nil
+}
+
+func (b ByteEncoder) Length() int {
+ return len(b)
+}
+
+// bufConn wraps a net.Conn with a buffer for reads to reduce the number of
+// reads that trigger syscalls.
+type bufConn struct {
+ net.Conn
+ buf *bufio.Reader
+}
+
+func newBufConn(conn net.Conn) *bufConn {
+ return &bufConn{
+ Conn: conn,
+ buf: bufio.NewReader(conn),
+ }
+}
+
+func (bc *bufConn) Read(b []byte) (n int, err error) {
+ return bc.buf.Read(b)
+}
+
+// KafkaVersion instances represent versions of the upstream Kafka broker.
+type KafkaVersion struct {
+ // it's a struct rather than just typing the array directly to make it opaque and stop people
+ // generating their own arbitrary versions
+ version [4]uint
+}
+
+func newKafkaVersion(major, minor, veryMinor, patch uint) KafkaVersion {
+ return KafkaVersion{
+ version: [4]uint{major, minor, veryMinor, patch},
+ }
+}
+
+// IsAtLeast return true if and only if the version it is called on is
+// greater than or equal to the version passed in:
+// V1.IsAtLeast(V2) // false
+// V2.IsAtLeast(V1) // true
+func (v KafkaVersion) IsAtLeast(other KafkaVersion) bool {
+ for i := range v.version {
+ if v.version[i] > other.version[i] {
+ return true
+ } else if v.version[i] < other.version[i] {
+ return false
+ }
+ }
+ return true
+}
+
+// Effective constants defining the supported kafka versions.
+var (
+ V0_8_2_0 = newKafkaVersion(0, 8, 2, 0)
+ V0_8_2_1 = newKafkaVersion(0, 8, 2, 1)
+ V0_8_2_2 = newKafkaVersion(0, 8, 2, 2)
+ V0_9_0_0 = newKafkaVersion(0, 9, 0, 0)
+ V0_9_0_1 = newKafkaVersion(0, 9, 0, 1)
+ V0_10_0_0 = newKafkaVersion(0, 10, 0, 0)
+ V0_10_0_1 = newKafkaVersion(0, 10, 0, 1)
+ V0_10_1_0 = newKafkaVersion(0, 10, 1, 0)
+ V0_10_2_0 = newKafkaVersion(0, 10, 2, 0)
+ V0_11_0_0 = newKafkaVersion(0, 11, 0, 0)
+ V1_0_0_0 = newKafkaVersion(1, 0, 0, 0)
+ minVersion = V0_8_2_0
+)
+
+func ParseKafkaVersion(s string) (KafkaVersion, error) {
+ var major, minor, veryMinor, patch uint
+ var err error
+ if s[0] == '0' {
+ err = scanKafkaVersion(s, `^0\.\d+\.\d+\.\d+$`, "0.%d.%d.%d", [3]*uint{&minor, &veryMinor, &patch})
+ } else {
+ err = scanKafkaVersion(s, `^\d+\.\d+\.\d+$`, "%d.%d.%d", [3]*uint{&major, &minor, &veryMinor})
+ }
+ if err != nil {
+ return minVersion, err
+ }
+ return newKafkaVersion(major, minor, veryMinor, patch), nil
+}
+
+func scanKafkaVersion(s string, pattern string, format string, v [3]*uint) error {
+ if !regexp.MustCompile(pattern).MatchString(s) {
+ return fmt.Errorf("invalid version `%s`", s)
+ }
+ _, err := fmt.Sscanf(s, format, v[0], v[1], v[2])
+ return err
+}
+
+func (v KafkaVersion) String() string {
+ if v.version[0] == 0 {
+ return fmt.Sprintf("0.%d.%d.%d", v.version[1], v.version[2], v.version[3])
+ } else {
+ return fmt.Sprintf("%d.%d.%d", v.version[0], v.version[1], v.version[2])
+ }
+}
diff --git a/vendor/github.com/davecgh/go-spew/LICENSE b/vendor/github.com/davecgh/go-spew/LICENSE
new file mode 100644
index 0000000..bc52e96
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/LICENSE
@@ -0,0 +1,15 @@
+ISC License
+
+Copyright (c) 2012-2016 Dave Collins
+
+Permission to use, copy, modify, and/or distribute this software for any
+purpose with or without fee is hereby granted, provided that the above
+copyright notice and this permission notice appear in all copies.
+
+THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
diff --git a/vendor/github.com/davecgh/go-spew/spew/bypass.go b/vendor/github.com/davecgh/go-spew/spew/bypass.go
new file mode 100644
index 0000000..7f166c3
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/bypass.go
@@ -0,0 +1,152 @@
+// Copyright (c) 2015-2016 Dave Collins
+//
+// Permission to use, copy, modify, and distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+// NOTE: Due to the following build constraints, this file will only be compiled
+// when the code is not running on Google App Engine, compiled by GopherJS, and
+// "-tags safe" is not added to the go build command line. The "disableunsafe"
+// tag is deprecated and thus should not be used.
+// +build !js,!appengine,!safe,!disableunsafe
+
+package spew
+
+import (
+ "reflect"
+ "unsafe"
+)
+
+const (
+ // UnsafeDisabled is a build-time constant which specifies whether or
+ // not access to the unsafe package is available.
+ UnsafeDisabled = false
+
+ // ptrSize is the size of a pointer on the current arch.
+ ptrSize = unsafe.Sizeof((*byte)(nil))
+)
+
+var (
+ // offsetPtr, offsetScalar, and offsetFlag are the offsets for the
+ // internal reflect.Value fields. These values are valid before golang
+ // commit ecccf07e7f9d which changed the format. The are also valid
+ // after commit 82f48826c6c7 which changed the format again to mirror
+ // the original format. Code in the init function updates these offsets
+ // as necessary.
+ offsetPtr = ptrSize
+ offsetScalar = uintptr(0)
+ offsetFlag = ptrSize * 2
+
+ // flagKindWidth and flagKindShift indicate various bits that the
+ // reflect package uses internally to track kind information.
+ //
+ // flagRO indicates whether or not the value field of a reflect.Value is
+ // read-only.
+ //
+ // flagIndir indicates whether the value field of a reflect.Value is
+ // the actual data or a pointer to the data.
+ //
+ // These values are valid before golang commit 90a7c3c86944 which
+ // changed their positions. Code in the init function updates these
+ // flags as necessary.
+ flagKindWidth = uintptr(5)
+ flagKindShift = flagKindWidth - 1
+ flagRO = uintptr(1 << 0)
+ flagIndir = uintptr(1 << 1)
+)
+
+func init() {
+ // Older versions of reflect.Value stored small integers directly in the
+ // ptr field (which is named val in the older versions). Versions
+ // between commits ecccf07e7f9d and 82f48826c6c7 added a new field named
+ // scalar for this purpose which unfortunately came before the flag
+ // field, so the offset of the flag field is different for those
+ // versions.
+ //
+ // This code constructs a new reflect.Value from a known small integer
+ // and checks if the size of the reflect.Value struct indicates it has
+ // the scalar field. When it does, the offsets are updated accordingly.
+ vv := reflect.ValueOf(0xf00)
+ if unsafe.Sizeof(vv) == (ptrSize * 4) {
+ offsetScalar = ptrSize * 2
+ offsetFlag = ptrSize * 3
+ }
+
+ // Commit 90a7c3c86944 changed the flag positions such that the low
+ // order bits are the kind. This code extracts the kind from the flags
+ // field and ensures it's the correct type. When it's not, the flag
+ // order has been changed to the newer format, so the flags are updated
+ // accordingly.
+ upf := unsafe.Pointer(uintptr(unsafe.Pointer(&vv)) + offsetFlag)
+ upfv := *(*uintptr)(upf)
+ flagKindMask := uintptr((1<>flagKindShift != uintptr(reflect.Int) {
+ flagKindShift = 0
+ flagRO = 1 << 5
+ flagIndir = 1 << 6
+
+ // Commit adf9b30e5594 modified the flags to separate the
+ // flagRO flag into two bits which specifies whether or not the
+ // field is embedded. This causes flagIndir to move over a bit
+ // and means that flagRO is the combination of either of the
+ // original flagRO bit and the new bit.
+ //
+ // This code detects the change by extracting what used to be
+ // the indirect bit to ensure it's set. When it's not, the flag
+ // order has been changed to the newer format, so the flags are
+ // updated accordingly.
+ if upfv&flagIndir == 0 {
+ flagRO = 3 << 5
+ flagIndir = 1 << 7
+ }
+ }
+}
+
+// unsafeReflectValue converts the passed reflect.Value into a one that bypasses
+// the typical safety restrictions preventing access to unaddressable and
+// unexported data. It works by digging the raw pointer to the underlying
+// value out of the protected value and generating a new unprotected (unsafe)
+// reflect.Value to it.
+//
+// This allows us to check for implementations of the Stringer and error
+// interfaces to be used for pretty printing ordinarily unaddressable and
+// inaccessible values such as unexported struct fields.
+func unsafeReflectValue(v reflect.Value) (rv reflect.Value) {
+ indirects := 1
+ vt := v.Type()
+ upv := unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetPtr)
+ rvf := *(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetFlag))
+ if rvf&flagIndir != 0 {
+ vt = reflect.PtrTo(v.Type())
+ indirects++
+ } else if offsetScalar != 0 {
+ // The value is in the scalar field when it's not one of the
+ // reference types.
+ switch vt.Kind() {
+ case reflect.Uintptr:
+ case reflect.Chan:
+ case reflect.Func:
+ case reflect.Map:
+ case reflect.Ptr:
+ case reflect.UnsafePointer:
+ default:
+ upv = unsafe.Pointer(uintptr(unsafe.Pointer(&v)) +
+ offsetScalar)
+ }
+ }
+
+ pv := reflect.NewAt(vt, upv)
+ rv = pv
+ for i := 0; i < indirects; i++ {
+ rv = rv.Elem()
+ }
+ return rv
+}
diff --git a/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go
new file mode 100644
index 0000000..1fe3cf3
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go
@@ -0,0 +1,38 @@
+// Copyright (c) 2015-2016 Dave Collins
+//
+// Permission to use, copy, modify, and distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+// NOTE: Due to the following build constraints, this file will only be compiled
+// when the code is running on Google App Engine, compiled by GopherJS, or
+// "-tags safe" is added to the go build command line. The "disableunsafe"
+// tag is deprecated and thus should not be used.
+// +build js appengine safe disableunsafe
+
+package spew
+
+import "reflect"
+
+const (
+ // UnsafeDisabled is a build-time constant which specifies whether or
+ // not access to the unsafe package is available.
+ UnsafeDisabled = true
+)
+
+// unsafeReflectValue typically converts the passed reflect.Value into a one
+// that bypasses the typical safety restrictions preventing access to
+// unaddressable and unexported data. However, doing this relies on access to
+// the unsafe package. This is a stub version which simply returns the passed
+// reflect.Value when the unsafe package is not available.
+func unsafeReflectValue(v reflect.Value) reflect.Value {
+ return v
+}
diff --git a/vendor/github.com/davecgh/go-spew/spew/common.go b/vendor/github.com/davecgh/go-spew/spew/common.go
new file mode 100644
index 0000000..1be8ce9
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/common.go
@@ -0,0 +1,341 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "reflect"
+ "sort"
+ "strconv"
+)
+
+// Some constants in the form of bytes to avoid string overhead. This mirrors
+// the technique used in the fmt package.
+var (
+ panicBytes = []byte("(PANIC=")
+ plusBytes = []byte("+")
+ iBytes = []byte("i")
+ trueBytes = []byte("true")
+ falseBytes = []byte("false")
+ interfaceBytes = []byte("(interface {})")
+ commaNewlineBytes = []byte(",\n")
+ newlineBytes = []byte("\n")
+ openBraceBytes = []byte("{")
+ openBraceNewlineBytes = []byte("{\n")
+ closeBraceBytes = []byte("}")
+ asteriskBytes = []byte("*")
+ colonBytes = []byte(":")
+ colonSpaceBytes = []byte(": ")
+ openParenBytes = []byte("(")
+ closeParenBytes = []byte(")")
+ spaceBytes = []byte(" ")
+ pointerChainBytes = []byte("->")
+ nilAngleBytes = []byte("")
+ maxNewlineBytes = []byte("\n")
+ maxShortBytes = []byte("")
+ circularBytes = []byte("")
+ circularShortBytes = []byte("")
+ invalidAngleBytes = []byte("")
+ openBracketBytes = []byte("[")
+ closeBracketBytes = []byte("]")
+ percentBytes = []byte("%")
+ precisionBytes = []byte(".")
+ openAngleBytes = []byte("<")
+ closeAngleBytes = []byte(">")
+ openMapBytes = []byte("map[")
+ closeMapBytes = []byte("]")
+ lenEqualsBytes = []byte("len=")
+ capEqualsBytes = []byte("cap=")
+)
+
+// hexDigits is used to map a decimal value to a hex digit.
+var hexDigits = "0123456789abcdef"
+
+// catchPanic handles any panics that might occur during the handleMethods
+// calls.
+func catchPanic(w io.Writer, v reflect.Value) {
+ if err := recover(); err != nil {
+ w.Write(panicBytes)
+ fmt.Fprintf(w, "%v", err)
+ w.Write(closeParenBytes)
+ }
+}
+
+// handleMethods attempts to call the Error and String methods on the underlying
+// type the passed reflect.Value represents and outputes the result to Writer w.
+//
+// It handles panics in any called methods by catching and displaying the error
+// as the formatted value.
+func handleMethods(cs *ConfigState, w io.Writer, v reflect.Value) (handled bool) {
+ // We need an interface to check if the type implements the error or
+ // Stringer interface. However, the reflect package won't give us an
+ // interface on certain things like unexported struct fields in order
+ // to enforce visibility rules. We use unsafe, when it's available,
+ // to bypass these restrictions since this package does not mutate the
+ // values.
+ if !v.CanInterface() {
+ if UnsafeDisabled {
+ return false
+ }
+
+ v = unsafeReflectValue(v)
+ }
+
+ // Choose whether or not to do error and Stringer interface lookups against
+ // the base type or a pointer to the base type depending on settings.
+ // Technically calling one of these methods with a pointer receiver can
+ // mutate the value, however, types which choose to satisify an error or
+ // Stringer interface with a pointer receiver should not be mutating their
+ // state inside these interface methods.
+ if !cs.DisablePointerMethods && !UnsafeDisabled && !v.CanAddr() {
+ v = unsafeReflectValue(v)
+ }
+ if v.CanAddr() {
+ v = v.Addr()
+ }
+
+ // Is it an error or Stringer?
+ switch iface := v.Interface().(type) {
+ case error:
+ defer catchPanic(w, v)
+ if cs.ContinueOnMethod {
+ w.Write(openParenBytes)
+ w.Write([]byte(iface.Error()))
+ w.Write(closeParenBytes)
+ w.Write(spaceBytes)
+ return false
+ }
+
+ w.Write([]byte(iface.Error()))
+ return true
+
+ case fmt.Stringer:
+ defer catchPanic(w, v)
+ if cs.ContinueOnMethod {
+ w.Write(openParenBytes)
+ w.Write([]byte(iface.String()))
+ w.Write(closeParenBytes)
+ w.Write(spaceBytes)
+ return false
+ }
+ w.Write([]byte(iface.String()))
+ return true
+ }
+ return false
+}
+
+// printBool outputs a boolean value as true or false to Writer w.
+func printBool(w io.Writer, val bool) {
+ if val {
+ w.Write(trueBytes)
+ } else {
+ w.Write(falseBytes)
+ }
+}
+
+// printInt outputs a signed integer value to Writer w.
+func printInt(w io.Writer, val int64, base int) {
+ w.Write([]byte(strconv.FormatInt(val, base)))
+}
+
+// printUint outputs an unsigned integer value to Writer w.
+func printUint(w io.Writer, val uint64, base int) {
+ w.Write([]byte(strconv.FormatUint(val, base)))
+}
+
+// printFloat outputs a floating point value using the specified precision,
+// which is expected to be 32 or 64bit, to Writer w.
+func printFloat(w io.Writer, val float64, precision int) {
+ w.Write([]byte(strconv.FormatFloat(val, 'g', -1, precision)))
+}
+
+// printComplex outputs a complex value using the specified float precision
+// for the real and imaginary parts to Writer w.
+func printComplex(w io.Writer, c complex128, floatPrecision int) {
+ r := real(c)
+ w.Write(openParenBytes)
+ w.Write([]byte(strconv.FormatFloat(r, 'g', -1, floatPrecision)))
+ i := imag(c)
+ if i >= 0 {
+ w.Write(plusBytes)
+ }
+ w.Write([]byte(strconv.FormatFloat(i, 'g', -1, floatPrecision)))
+ w.Write(iBytes)
+ w.Write(closeParenBytes)
+}
+
+// printHexPtr outputs a uintptr formatted as hexadecimal with a leading '0x'
+// prefix to Writer w.
+func printHexPtr(w io.Writer, p uintptr) {
+ // Null pointer.
+ num := uint64(p)
+ if num == 0 {
+ w.Write(nilAngleBytes)
+ return
+ }
+
+ // Max uint64 is 16 bytes in hex + 2 bytes for '0x' prefix
+ buf := make([]byte, 18)
+
+ // It's simpler to construct the hex string right to left.
+ base := uint64(16)
+ i := len(buf) - 1
+ for num >= base {
+ buf[i] = hexDigits[num%base]
+ num /= base
+ i--
+ }
+ buf[i] = hexDigits[num]
+
+ // Add '0x' prefix.
+ i--
+ buf[i] = 'x'
+ i--
+ buf[i] = '0'
+
+ // Strip unused leading bytes.
+ buf = buf[i:]
+ w.Write(buf)
+}
+
+// valuesSorter implements sort.Interface to allow a slice of reflect.Value
+// elements to be sorted.
+type valuesSorter struct {
+ values []reflect.Value
+ strings []string // either nil or same len and values
+ cs *ConfigState
+}
+
+// newValuesSorter initializes a valuesSorter instance, which holds a set of
+// surrogate keys on which the data should be sorted. It uses flags in
+// ConfigState to decide if and how to populate those surrogate keys.
+func newValuesSorter(values []reflect.Value, cs *ConfigState) sort.Interface {
+ vs := &valuesSorter{values: values, cs: cs}
+ if canSortSimply(vs.values[0].Kind()) {
+ return vs
+ }
+ if !cs.DisableMethods {
+ vs.strings = make([]string, len(values))
+ for i := range vs.values {
+ b := bytes.Buffer{}
+ if !handleMethods(cs, &b, vs.values[i]) {
+ vs.strings = nil
+ break
+ }
+ vs.strings[i] = b.String()
+ }
+ }
+ if vs.strings == nil && cs.SpewKeys {
+ vs.strings = make([]string, len(values))
+ for i := range vs.values {
+ vs.strings[i] = Sprintf("%#v", vs.values[i].Interface())
+ }
+ }
+ return vs
+}
+
+// canSortSimply tests whether a reflect.Kind is a primitive that can be sorted
+// directly, or whether it should be considered for sorting by surrogate keys
+// (if the ConfigState allows it).
+func canSortSimply(kind reflect.Kind) bool {
+ // This switch parallels valueSortLess, except for the default case.
+ switch kind {
+ case reflect.Bool:
+ return true
+ case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
+ return true
+ case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
+ return true
+ case reflect.Float32, reflect.Float64:
+ return true
+ case reflect.String:
+ return true
+ case reflect.Uintptr:
+ return true
+ case reflect.Array:
+ return true
+ }
+ return false
+}
+
+// Len returns the number of values in the slice. It is part of the
+// sort.Interface implementation.
+func (s *valuesSorter) Len() int {
+ return len(s.values)
+}
+
+// Swap swaps the values at the passed indices. It is part of the
+// sort.Interface implementation.
+func (s *valuesSorter) Swap(i, j int) {
+ s.values[i], s.values[j] = s.values[j], s.values[i]
+ if s.strings != nil {
+ s.strings[i], s.strings[j] = s.strings[j], s.strings[i]
+ }
+}
+
+// valueSortLess returns whether the first value should sort before the second
+// value. It is used by valueSorter.Less as part of the sort.Interface
+// implementation.
+func valueSortLess(a, b reflect.Value) bool {
+ switch a.Kind() {
+ case reflect.Bool:
+ return !a.Bool() && b.Bool()
+ case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
+ return a.Int() < b.Int()
+ case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
+ return a.Uint() < b.Uint()
+ case reflect.Float32, reflect.Float64:
+ return a.Float() < b.Float()
+ case reflect.String:
+ return a.String() < b.String()
+ case reflect.Uintptr:
+ return a.Uint() < b.Uint()
+ case reflect.Array:
+ // Compare the contents of both arrays.
+ l := a.Len()
+ for i := 0; i < l; i++ {
+ av := a.Index(i)
+ bv := b.Index(i)
+ if av.Interface() == bv.Interface() {
+ continue
+ }
+ return valueSortLess(av, bv)
+ }
+ }
+ return a.String() < b.String()
+}
+
+// Less returns whether the value at index i should sort before the
+// value at index j. It is part of the sort.Interface implementation.
+func (s *valuesSorter) Less(i, j int) bool {
+ if s.strings == nil {
+ return valueSortLess(s.values[i], s.values[j])
+ }
+ return s.strings[i] < s.strings[j]
+}
+
+// sortValues is a sort function that handles both native types and any type that
+// can be converted to error or Stringer. Other inputs are sorted according to
+// their Value.String() value to ensure display stability.
+func sortValues(values []reflect.Value, cs *ConfigState) {
+ if len(values) == 0 {
+ return
+ }
+ sort.Sort(newValuesSorter(values, cs))
+}
diff --git a/vendor/github.com/davecgh/go-spew/spew/config.go b/vendor/github.com/davecgh/go-spew/spew/config.go
new file mode 100644
index 0000000..2e3d22f
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/config.go
@@ -0,0 +1,306 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "os"
+)
+
+// ConfigState houses the configuration options used by spew to format and
+// display values. There is a global instance, Config, that is used to control
+// all top-level Formatter and Dump functionality. Each ConfigState instance
+// provides methods equivalent to the top-level functions.
+//
+// The zero value for ConfigState provides no indentation. You would typically
+// want to set it to a space or a tab.
+//
+// Alternatively, you can use NewDefaultConfig to get a ConfigState instance
+// with default settings. See the documentation of NewDefaultConfig for default
+// values.
+type ConfigState struct {
+ // Indent specifies the string to use for each indentation level. The
+ // global config instance that all top-level functions use set this to a
+ // single space by default. If you would like more indentation, you might
+ // set this to a tab with "\t" or perhaps two spaces with " ".
+ Indent string
+
+ // MaxDepth controls the maximum number of levels to descend into nested
+ // data structures. The default, 0, means there is no limit.
+ //
+ // NOTE: Circular data structures are properly detected, so it is not
+ // necessary to set this value unless you specifically want to limit deeply
+ // nested data structures.
+ MaxDepth int
+
+ // DisableMethods specifies whether or not error and Stringer interfaces are
+ // invoked for types that implement them.
+ DisableMethods bool
+
+ // DisablePointerMethods specifies whether or not to check for and invoke
+ // error and Stringer interfaces on types which only accept a pointer
+ // receiver when the current type is not a pointer.
+ //
+ // NOTE: This might be an unsafe action since calling one of these methods
+ // with a pointer receiver could technically mutate the value, however,
+ // in practice, types which choose to satisify an error or Stringer
+ // interface with a pointer receiver should not be mutating their state
+ // inside these interface methods. As a result, this option relies on
+ // access to the unsafe package, so it will not have any effect when
+ // running in environments without access to the unsafe package such as
+ // Google App Engine or with the "safe" build tag specified.
+ DisablePointerMethods bool
+
+ // DisablePointerAddresses specifies whether to disable the printing of
+ // pointer addresses. This is useful when diffing data structures in tests.
+ DisablePointerAddresses bool
+
+ // DisableCapacities specifies whether to disable the printing of capacities
+ // for arrays, slices, maps and channels. This is useful when diffing
+ // data structures in tests.
+ DisableCapacities bool
+
+ // ContinueOnMethod specifies whether or not recursion should continue once
+ // a custom error or Stringer interface is invoked. The default, false,
+ // means it will print the results of invoking the custom error or Stringer
+ // interface and return immediately instead of continuing to recurse into
+ // the internals of the data type.
+ //
+ // NOTE: This flag does not have any effect if method invocation is disabled
+ // via the DisableMethods or DisablePointerMethods options.
+ ContinueOnMethod bool
+
+ // SortKeys specifies map keys should be sorted before being printed. Use
+ // this to have a more deterministic, diffable output. Note that only
+ // native types (bool, int, uint, floats, uintptr and string) and types
+ // that support the error or Stringer interfaces (if methods are
+ // enabled) are supported, with other types sorted according to the
+ // reflect.Value.String() output which guarantees display stability.
+ SortKeys bool
+
+ // SpewKeys specifies that, as a last resort attempt, map keys should
+ // be spewed to strings and sorted by those strings. This is only
+ // considered if SortKeys is true.
+ SpewKeys bool
+}
+
+// Config is the active configuration of the top-level functions.
+// The configuration can be changed by modifying the contents of spew.Config.
+var Config = ConfigState{Indent: " "}
+
+// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the formatted string as a value that satisfies error. See NewFormatter
+// for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Errorf(format, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Errorf(format string, a ...interface{}) (err error) {
+ return fmt.Errorf(format, c.convertArgs(a)...)
+}
+
+// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Fprint(w, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Fprint(w io.Writer, a ...interface{}) (n int, err error) {
+ return fmt.Fprint(w, c.convertArgs(a)...)
+}
+
+// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Fprintf(w, format, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
+ return fmt.Fprintf(w, format, c.convertArgs(a)...)
+}
+
+// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it
+// passed with a Formatter interface returned by c.NewFormatter. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Fprintln(w, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
+ return fmt.Fprintln(w, c.convertArgs(a)...)
+}
+
+// Print is a wrapper for fmt.Print that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Print(c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Print(a ...interface{}) (n int, err error) {
+ return fmt.Print(c.convertArgs(a)...)
+}
+
+// Printf is a wrapper for fmt.Printf that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Printf(format, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Printf(format string, a ...interface{}) (n int, err error) {
+ return fmt.Printf(format, c.convertArgs(a)...)
+}
+
+// Println is a wrapper for fmt.Println that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Println(c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Println(a ...interface{}) (n int, err error) {
+ return fmt.Println(c.convertArgs(a)...)
+}
+
+// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the resulting string. See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Sprint(c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Sprint(a ...interface{}) string {
+ return fmt.Sprint(c.convertArgs(a)...)
+}
+
+// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the resulting string. See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Sprintf(format, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Sprintf(format string, a ...interface{}) string {
+ return fmt.Sprintf(format, c.convertArgs(a)...)
+}
+
+// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it
+// were passed with a Formatter interface returned by c.NewFormatter. It
+// returns the resulting string. See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Sprintln(c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Sprintln(a ...interface{}) string {
+ return fmt.Sprintln(c.convertArgs(a)...)
+}
+
+/*
+NewFormatter returns a custom formatter that satisfies the fmt.Formatter
+interface. As a result, it integrates cleanly with standard fmt package
+printing functions. The formatter is useful for inline printing of smaller data
+types similar to the standard %v format specifier.
+
+The custom formatter only responds to the %v (most compact), %+v (adds pointer
+addresses), %#v (adds types), and %#+v (adds types and pointer addresses) verb
+combinations. Any other verbs such as %x and %q will be sent to the the
+standard fmt package for formatting. In addition, the custom formatter ignores
+the width and precision arguments (however they will still work on the format
+specifiers not handled by the custom formatter).
+
+Typically this function shouldn't be called directly. It is much easier to make
+use of the custom formatter by calling one of the convenience functions such as
+c.Printf, c.Println, or c.Printf.
+*/
+func (c *ConfigState) NewFormatter(v interface{}) fmt.Formatter {
+ return newFormatter(c, v)
+}
+
+// Fdump formats and displays the passed arguments to io.Writer w. It formats
+// exactly the same as Dump.
+func (c *ConfigState) Fdump(w io.Writer, a ...interface{}) {
+ fdump(c, w, a...)
+}
+
+/*
+Dump displays the passed parameters to standard out with newlines, customizable
+indentation, and additional debug information such as complete types and all
+pointer addresses used to indirect to the final value. It provides the
+following features over the built-in printing facilities provided by the fmt
+package:
+
+ * Pointers are dereferenced and followed
+ * Circular data structures are detected and handled properly
+ * Custom Stringer/error interfaces are optionally invoked, including
+ on unexported types
+ * Custom types which only implement the Stringer/error interfaces via
+ a pointer receiver are optionally invoked when passing non-pointer
+ variables
+ * Byte arrays and slices are dumped like the hexdump -C command which
+ includes offsets, byte values in hex, and ASCII output
+
+The configuration options are controlled by modifying the public members
+of c. See ConfigState for options documentation.
+
+See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to
+get the formatted result as a string.
+*/
+func (c *ConfigState) Dump(a ...interface{}) {
+ fdump(c, os.Stdout, a...)
+}
+
+// Sdump returns a string with the passed arguments formatted exactly the same
+// as Dump.
+func (c *ConfigState) Sdump(a ...interface{}) string {
+ var buf bytes.Buffer
+ fdump(c, &buf, a...)
+ return buf.String()
+}
+
+// convertArgs accepts a slice of arguments and returns a slice of the same
+// length with each argument converted to a spew Formatter interface using
+// the ConfigState associated with s.
+func (c *ConfigState) convertArgs(args []interface{}) (formatters []interface{}) {
+ formatters = make([]interface{}, len(args))
+ for index, arg := range args {
+ formatters[index] = newFormatter(c, arg)
+ }
+ return formatters
+}
+
+// NewDefaultConfig returns a ConfigState with the following default settings.
+//
+// Indent: " "
+// MaxDepth: 0
+// DisableMethods: false
+// DisablePointerMethods: false
+// ContinueOnMethod: false
+// SortKeys: false
+func NewDefaultConfig() *ConfigState {
+ return &ConfigState{Indent: " "}
+}
diff --git a/vendor/github.com/davecgh/go-spew/spew/doc.go b/vendor/github.com/davecgh/go-spew/spew/doc.go
new file mode 100644
index 0000000..aacaac6
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/doc.go
@@ -0,0 +1,211 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+Package spew implements a deep pretty printer for Go data structures to aid in
+debugging.
+
+A quick overview of the additional features spew provides over the built-in
+printing facilities for Go data types are as follows:
+
+ * Pointers are dereferenced and followed
+ * Circular data structures are detected and handled properly
+ * Custom Stringer/error interfaces are optionally invoked, including
+ on unexported types
+ * Custom types which only implement the Stringer/error interfaces via
+ a pointer receiver are optionally invoked when passing non-pointer
+ variables
+ * Byte arrays and slices are dumped like the hexdump -C command which
+ includes offsets, byte values in hex, and ASCII output (only when using
+ Dump style)
+
+There are two different approaches spew allows for dumping Go data structures:
+
+ * Dump style which prints with newlines, customizable indentation,
+ and additional debug information such as types and all pointer addresses
+ used to indirect to the final value
+ * A custom Formatter interface that integrates cleanly with the standard fmt
+ package and replaces %v, %+v, %#v, and %#+v to provide inline printing
+ similar to the default %v while providing the additional functionality
+ outlined above and passing unsupported format verbs such as %x and %q
+ along to fmt
+
+Quick Start
+
+This section demonstrates how to quickly get started with spew. See the
+sections below for further details on formatting and configuration options.
+
+To dump a variable with full newlines, indentation, type, and pointer
+information use Dump, Fdump, or Sdump:
+ spew.Dump(myVar1, myVar2, ...)
+ spew.Fdump(someWriter, myVar1, myVar2, ...)
+ str := spew.Sdump(myVar1, myVar2, ...)
+
+Alternatively, if you would prefer to use format strings with a compacted inline
+printing style, use the convenience wrappers Printf, Fprintf, etc with
+%v (most compact), %+v (adds pointer addresses), %#v (adds types), or
+%#+v (adds types and pointer addresses):
+ spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
+ spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
+ spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
+ spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
+
+Configuration Options
+
+Configuration of spew is handled by fields in the ConfigState type. For
+convenience, all of the top-level functions use a global state available
+via the spew.Config global.
+
+It is also possible to create a ConfigState instance that provides methods
+equivalent to the top-level functions. This allows concurrent configuration
+options. See the ConfigState documentation for more details.
+
+The following configuration options are available:
+ * Indent
+ String to use for each indentation level for Dump functions.
+ It is a single space by default. A popular alternative is "\t".
+
+ * MaxDepth
+ Maximum number of levels to descend into nested data structures.
+ There is no limit by default.
+
+ * DisableMethods
+ Disables invocation of error and Stringer interface methods.
+ Method invocation is enabled by default.
+
+ * DisablePointerMethods
+ Disables invocation of error and Stringer interface methods on types
+ which only accept pointer receivers from non-pointer variables.
+ Pointer method invocation is enabled by default.
+
+ * DisablePointerAddresses
+ DisablePointerAddresses specifies whether to disable the printing of
+ pointer addresses. This is useful when diffing data structures in tests.
+
+ * DisableCapacities
+ DisableCapacities specifies whether to disable the printing of
+ capacities for arrays, slices, maps and channels. This is useful when
+ diffing data structures in tests.
+
+ * ContinueOnMethod
+ Enables recursion into types after invoking error and Stringer interface
+ methods. Recursion after method invocation is disabled by default.
+
+ * SortKeys
+ Specifies map keys should be sorted before being printed. Use
+ this to have a more deterministic, diffable output. Note that
+ only native types (bool, int, uint, floats, uintptr and string)
+ and types which implement error or Stringer interfaces are
+ supported with other types sorted according to the
+ reflect.Value.String() output which guarantees display
+ stability. Natural map order is used by default.
+
+ * SpewKeys
+ Specifies that, as a last resort attempt, map keys should be
+ spewed to strings and sorted by those strings. This is only
+ considered if SortKeys is true.
+
+Dump Usage
+
+Simply call spew.Dump with a list of variables you want to dump:
+
+ spew.Dump(myVar1, myVar2, ...)
+
+You may also call spew.Fdump if you would prefer to output to an arbitrary
+io.Writer. For example, to dump to standard error:
+
+ spew.Fdump(os.Stderr, myVar1, myVar2, ...)
+
+A third option is to call spew.Sdump to get the formatted output as a string:
+
+ str := spew.Sdump(myVar1, myVar2, ...)
+
+Sample Dump Output
+
+See the Dump example for details on the setup of the types and variables being
+shown here.
+
+ (main.Foo) {
+ unexportedField: (*main.Bar)(0xf84002e210)({
+ flag: (main.Flag) flagTwo,
+ data: (uintptr)
+ }),
+ ExportedField: (map[interface {}]interface {}) (len=1) {
+ (string) (len=3) "one": (bool) true
+ }
+ }
+
+Byte (and uint8) arrays and slices are displayed uniquely like the hexdump -C
+command as shown.
+ ([]uint8) (len=32 cap=32) {
+ 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... |
+ 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0|
+ 00000020 31 32 |12|
+ }
+
+Custom Formatter
+
+Spew provides a custom formatter that implements the fmt.Formatter interface
+so that it integrates cleanly with standard fmt package printing functions. The
+formatter is useful for inline printing of smaller data types similar to the
+standard %v format specifier.
+
+The custom formatter only responds to the %v (most compact), %+v (adds pointer
+addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb
+combinations. Any other verbs such as %x and %q will be sent to the the
+standard fmt package for formatting. In addition, the custom formatter ignores
+the width and precision arguments (however they will still work on the format
+specifiers not handled by the custom formatter).
+
+Custom Formatter Usage
+
+The simplest way to make use of the spew custom formatter is to call one of the
+convenience functions such as spew.Printf, spew.Println, or spew.Printf. The
+functions have syntax you are most likely already familiar with:
+
+ spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
+ spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
+ spew.Println(myVar, myVar2)
+ spew.Fprintf(os.Stderr, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
+ spew.Fprintf(os.Stderr, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
+
+See the Index for the full list convenience functions.
+
+Sample Formatter Output
+
+Double pointer to a uint8:
+ %v: <**>5
+ %+v: <**>(0xf8400420d0->0xf8400420c8)5
+ %#v: (**uint8)5
+ %#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5
+
+Pointer to circular struct with a uint8 field and a pointer to itself:
+ %v: <*>{1 <*>}
+ %+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)}
+ %#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)}
+ %#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)}
+
+See the Printf example for details on the setup of variables being shown
+here.
+
+Errors
+
+Since it is possible for custom Stringer/error interfaces to panic, spew
+detects them and handles them internally by printing the panic information
+inline with the output. Since spew is intended to provide deep pretty printing
+capabilities on structures, it intentionally does not return any errors.
+*/
+package spew
diff --git a/vendor/github.com/davecgh/go-spew/spew/dump.go b/vendor/github.com/davecgh/go-spew/spew/dump.go
new file mode 100644
index 0000000..f78d89f
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/dump.go
@@ -0,0 +1,509 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+ "bytes"
+ "encoding/hex"
+ "fmt"
+ "io"
+ "os"
+ "reflect"
+ "regexp"
+ "strconv"
+ "strings"
+)
+
+var (
+ // uint8Type is a reflect.Type representing a uint8. It is used to
+ // convert cgo types to uint8 slices for hexdumping.
+ uint8Type = reflect.TypeOf(uint8(0))
+
+ // cCharRE is a regular expression that matches a cgo char.
+ // It is used to detect character arrays to hexdump them.
+ cCharRE = regexp.MustCompile(`^.*\._Ctype_char$`)
+
+ // cUnsignedCharRE is a regular expression that matches a cgo unsigned
+ // char. It is used to detect unsigned character arrays to hexdump
+ // them.
+ cUnsignedCharRE = regexp.MustCompile(`^.*\._Ctype_unsignedchar$`)
+
+ // cUint8tCharRE is a regular expression that matches a cgo uint8_t.
+ // It is used to detect uint8_t arrays to hexdump them.
+ cUint8tCharRE = regexp.MustCompile(`^.*\._Ctype_uint8_t$`)
+)
+
+// dumpState contains information about the state of a dump operation.
+type dumpState struct {
+ w io.Writer
+ depth int
+ pointers map[uintptr]int
+ ignoreNextType bool
+ ignoreNextIndent bool
+ cs *ConfigState
+}
+
+// indent performs indentation according to the depth level and cs.Indent
+// option.
+func (d *dumpState) indent() {
+ if d.ignoreNextIndent {
+ d.ignoreNextIndent = false
+ return
+ }
+ d.w.Write(bytes.Repeat([]byte(d.cs.Indent), d.depth))
+}
+
+// unpackValue returns values inside of non-nil interfaces when possible.
+// This is useful for data types like structs, arrays, slices, and maps which
+// can contain varying types packed inside an interface.
+func (d *dumpState) unpackValue(v reflect.Value) reflect.Value {
+ if v.Kind() == reflect.Interface && !v.IsNil() {
+ v = v.Elem()
+ }
+ return v
+}
+
+// dumpPtr handles formatting of pointers by indirecting them as necessary.
+func (d *dumpState) dumpPtr(v reflect.Value) {
+ // Remove pointers at or below the current depth from map used to detect
+ // circular refs.
+ for k, depth := range d.pointers {
+ if depth >= d.depth {
+ delete(d.pointers, k)
+ }
+ }
+
+ // Keep list of all dereferenced pointers to show later.
+ pointerChain := make([]uintptr, 0)
+
+ // Figure out how many levels of indirection there are by dereferencing
+ // pointers and unpacking interfaces down the chain while detecting circular
+ // references.
+ nilFound := false
+ cycleFound := false
+ indirects := 0
+ ve := v
+ for ve.Kind() == reflect.Ptr {
+ if ve.IsNil() {
+ nilFound = true
+ break
+ }
+ indirects++
+ addr := ve.Pointer()
+ pointerChain = append(pointerChain, addr)
+ if pd, ok := d.pointers[addr]; ok && pd < d.depth {
+ cycleFound = true
+ indirects--
+ break
+ }
+ d.pointers[addr] = d.depth
+
+ ve = ve.Elem()
+ if ve.Kind() == reflect.Interface {
+ if ve.IsNil() {
+ nilFound = true
+ break
+ }
+ ve = ve.Elem()
+ }
+ }
+
+ // Display type information.
+ d.w.Write(openParenBytes)
+ d.w.Write(bytes.Repeat(asteriskBytes, indirects))
+ d.w.Write([]byte(ve.Type().String()))
+ d.w.Write(closeParenBytes)
+
+ // Display pointer information.
+ if !d.cs.DisablePointerAddresses && len(pointerChain) > 0 {
+ d.w.Write(openParenBytes)
+ for i, addr := range pointerChain {
+ if i > 0 {
+ d.w.Write(pointerChainBytes)
+ }
+ printHexPtr(d.w, addr)
+ }
+ d.w.Write(closeParenBytes)
+ }
+
+ // Display dereferenced value.
+ d.w.Write(openParenBytes)
+ switch {
+ case nilFound:
+ d.w.Write(nilAngleBytes)
+
+ case cycleFound:
+ d.w.Write(circularBytes)
+
+ default:
+ d.ignoreNextType = true
+ d.dump(ve)
+ }
+ d.w.Write(closeParenBytes)
+}
+
+// dumpSlice handles formatting of arrays and slices. Byte (uint8 under
+// reflection) arrays and slices are dumped in hexdump -C fashion.
+func (d *dumpState) dumpSlice(v reflect.Value) {
+ // Determine whether this type should be hex dumped or not. Also,
+ // for types which should be hexdumped, try to use the underlying data
+ // first, then fall back to trying to convert them to a uint8 slice.
+ var buf []uint8
+ doConvert := false
+ doHexDump := false
+ numEntries := v.Len()
+ if numEntries > 0 {
+ vt := v.Index(0).Type()
+ vts := vt.String()
+ switch {
+ // C types that need to be converted.
+ case cCharRE.MatchString(vts):
+ fallthrough
+ case cUnsignedCharRE.MatchString(vts):
+ fallthrough
+ case cUint8tCharRE.MatchString(vts):
+ doConvert = true
+
+ // Try to use existing uint8 slices and fall back to converting
+ // and copying if that fails.
+ case vt.Kind() == reflect.Uint8:
+ // We need an addressable interface to convert the type
+ // to a byte slice. However, the reflect package won't
+ // give us an interface on certain things like
+ // unexported struct fields in order to enforce
+ // visibility rules. We use unsafe, when available, to
+ // bypass these restrictions since this package does not
+ // mutate the values.
+ vs := v
+ if !vs.CanInterface() || !vs.CanAddr() {
+ vs = unsafeReflectValue(vs)
+ }
+ if !UnsafeDisabled {
+ vs = vs.Slice(0, numEntries)
+
+ // Use the existing uint8 slice if it can be
+ // type asserted.
+ iface := vs.Interface()
+ if slice, ok := iface.([]uint8); ok {
+ buf = slice
+ doHexDump = true
+ break
+ }
+ }
+
+ // The underlying data needs to be converted if it can't
+ // be type asserted to a uint8 slice.
+ doConvert = true
+ }
+
+ // Copy and convert the underlying type if needed.
+ if doConvert && vt.ConvertibleTo(uint8Type) {
+ // Convert and copy each element into a uint8 byte
+ // slice.
+ buf = make([]uint8, numEntries)
+ for i := 0; i < numEntries; i++ {
+ vv := v.Index(i)
+ buf[i] = uint8(vv.Convert(uint8Type).Uint())
+ }
+ doHexDump = true
+ }
+ }
+
+ // Hexdump the entire slice as needed.
+ if doHexDump {
+ indent := strings.Repeat(d.cs.Indent, d.depth)
+ str := indent + hex.Dump(buf)
+ str = strings.Replace(str, "\n", "\n"+indent, -1)
+ str = strings.TrimRight(str, d.cs.Indent)
+ d.w.Write([]byte(str))
+ return
+ }
+
+ // Recursively call dump for each item.
+ for i := 0; i < numEntries; i++ {
+ d.dump(d.unpackValue(v.Index(i)))
+ if i < (numEntries - 1) {
+ d.w.Write(commaNewlineBytes)
+ } else {
+ d.w.Write(newlineBytes)
+ }
+ }
+}
+
+// dump is the main workhorse for dumping a value. It uses the passed reflect
+// value to figure out what kind of object we are dealing with and formats it
+// appropriately. It is a recursive function, however circular data structures
+// are detected and handled properly.
+func (d *dumpState) dump(v reflect.Value) {
+ // Handle invalid reflect values immediately.
+ kind := v.Kind()
+ if kind == reflect.Invalid {
+ d.w.Write(invalidAngleBytes)
+ return
+ }
+
+ // Handle pointers specially.
+ if kind == reflect.Ptr {
+ d.indent()
+ d.dumpPtr(v)
+ return
+ }
+
+ // Print type information unless already handled elsewhere.
+ if !d.ignoreNextType {
+ d.indent()
+ d.w.Write(openParenBytes)
+ d.w.Write([]byte(v.Type().String()))
+ d.w.Write(closeParenBytes)
+ d.w.Write(spaceBytes)
+ }
+ d.ignoreNextType = false
+
+ // Display length and capacity if the built-in len and cap functions
+ // work with the value's kind and the len/cap itself is non-zero.
+ valueLen, valueCap := 0, 0
+ switch v.Kind() {
+ case reflect.Array, reflect.Slice, reflect.Chan:
+ valueLen, valueCap = v.Len(), v.Cap()
+ case reflect.Map, reflect.String:
+ valueLen = v.Len()
+ }
+ if valueLen != 0 || !d.cs.DisableCapacities && valueCap != 0 {
+ d.w.Write(openParenBytes)
+ if valueLen != 0 {
+ d.w.Write(lenEqualsBytes)
+ printInt(d.w, int64(valueLen), 10)
+ }
+ if !d.cs.DisableCapacities && valueCap != 0 {
+ if valueLen != 0 {
+ d.w.Write(spaceBytes)
+ }
+ d.w.Write(capEqualsBytes)
+ printInt(d.w, int64(valueCap), 10)
+ }
+ d.w.Write(closeParenBytes)
+ d.w.Write(spaceBytes)
+ }
+
+ // Call Stringer/error interfaces if they exist and the handle methods flag
+ // is enabled
+ if !d.cs.DisableMethods {
+ if (kind != reflect.Invalid) && (kind != reflect.Interface) {
+ if handled := handleMethods(d.cs, d.w, v); handled {
+ return
+ }
+ }
+ }
+
+ switch kind {
+ case reflect.Invalid:
+ // Do nothing. We should never get here since invalid has already
+ // been handled above.
+
+ case reflect.Bool:
+ printBool(d.w, v.Bool())
+
+ case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
+ printInt(d.w, v.Int(), 10)
+
+ case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
+ printUint(d.w, v.Uint(), 10)
+
+ case reflect.Float32:
+ printFloat(d.w, v.Float(), 32)
+
+ case reflect.Float64:
+ printFloat(d.w, v.Float(), 64)
+
+ case reflect.Complex64:
+ printComplex(d.w, v.Complex(), 32)
+
+ case reflect.Complex128:
+ printComplex(d.w, v.Complex(), 64)
+
+ case reflect.Slice:
+ if v.IsNil() {
+ d.w.Write(nilAngleBytes)
+ break
+ }
+ fallthrough
+
+ case reflect.Array:
+ d.w.Write(openBraceNewlineBytes)
+ d.depth++
+ if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
+ d.indent()
+ d.w.Write(maxNewlineBytes)
+ } else {
+ d.dumpSlice(v)
+ }
+ d.depth--
+ d.indent()
+ d.w.Write(closeBraceBytes)
+
+ case reflect.String:
+ d.w.Write([]byte(strconv.Quote(v.String())))
+
+ case reflect.Interface:
+ // The only time we should get here is for nil interfaces due to
+ // unpackValue calls.
+ if v.IsNil() {
+ d.w.Write(nilAngleBytes)
+ }
+
+ case reflect.Ptr:
+ // Do nothing. We should never get here since pointers have already
+ // been handled above.
+
+ case reflect.Map:
+ // nil maps should be indicated as different than empty maps
+ if v.IsNil() {
+ d.w.Write(nilAngleBytes)
+ break
+ }
+
+ d.w.Write(openBraceNewlineBytes)
+ d.depth++
+ if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
+ d.indent()
+ d.w.Write(maxNewlineBytes)
+ } else {
+ numEntries := v.Len()
+ keys := v.MapKeys()
+ if d.cs.SortKeys {
+ sortValues(keys, d.cs)
+ }
+ for i, key := range keys {
+ d.dump(d.unpackValue(key))
+ d.w.Write(colonSpaceBytes)
+ d.ignoreNextIndent = true
+ d.dump(d.unpackValue(v.MapIndex(key)))
+ if i < (numEntries - 1) {
+ d.w.Write(commaNewlineBytes)
+ } else {
+ d.w.Write(newlineBytes)
+ }
+ }
+ }
+ d.depth--
+ d.indent()
+ d.w.Write(closeBraceBytes)
+
+ case reflect.Struct:
+ d.w.Write(openBraceNewlineBytes)
+ d.depth++
+ if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
+ d.indent()
+ d.w.Write(maxNewlineBytes)
+ } else {
+ vt := v.Type()
+ numFields := v.NumField()
+ for i := 0; i < numFields; i++ {
+ d.indent()
+ vtf := vt.Field(i)
+ d.w.Write([]byte(vtf.Name))
+ d.w.Write(colonSpaceBytes)
+ d.ignoreNextIndent = true
+ d.dump(d.unpackValue(v.Field(i)))
+ if i < (numFields - 1) {
+ d.w.Write(commaNewlineBytes)
+ } else {
+ d.w.Write(newlineBytes)
+ }
+ }
+ }
+ d.depth--
+ d.indent()
+ d.w.Write(closeBraceBytes)
+
+ case reflect.Uintptr:
+ printHexPtr(d.w, uintptr(v.Uint()))
+
+ case reflect.UnsafePointer, reflect.Chan, reflect.Func:
+ printHexPtr(d.w, v.Pointer())
+
+ // There were not any other types at the time this code was written, but
+ // fall back to letting the default fmt package handle it in case any new
+ // types are added.
+ default:
+ if v.CanInterface() {
+ fmt.Fprintf(d.w, "%v", v.Interface())
+ } else {
+ fmt.Fprintf(d.w, "%v", v.String())
+ }
+ }
+}
+
+// fdump is a helper function to consolidate the logic from the various public
+// methods which take varying writers and config states.
+func fdump(cs *ConfigState, w io.Writer, a ...interface{}) {
+ for _, arg := range a {
+ if arg == nil {
+ w.Write(interfaceBytes)
+ w.Write(spaceBytes)
+ w.Write(nilAngleBytes)
+ w.Write(newlineBytes)
+ continue
+ }
+
+ d := dumpState{w: w, cs: cs}
+ d.pointers = make(map[uintptr]int)
+ d.dump(reflect.ValueOf(arg))
+ d.w.Write(newlineBytes)
+ }
+}
+
+// Fdump formats and displays the passed arguments to io.Writer w. It formats
+// exactly the same as Dump.
+func Fdump(w io.Writer, a ...interface{}) {
+ fdump(&Config, w, a...)
+}
+
+// Sdump returns a string with the passed arguments formatted exactly the same
+// as Dump.
+func Sdump(a ...interface{}) string {
+ var buf bytes.Buffer
+ fdump(&Config, &buf, a...)
+ return buf.String()
+}
+
+/*
+Dump displays the passed parameters to standard out with newlines, customizable
+indentation, and additional debug information such as complete types and all
+pointer addresses used to indirect to the final value. It provides the
+following features over the built-in printing facilities provided by the fmt
+package:
+
+ * Pointers are dereferenced and followed
+ * Circular data structures are detected and handled properly
+ * Custom Stringer/error interfaces are optionally invoked, including
+ on unexported types
+ * Custom types which only implement the Stringer/error interfaces via
+ a pointer receiver are optionally invoked when passing non-pointer
+ variables
+ * Byte arrays and slices are dumped like the hexdump -C command which
+ includes offsets, byte values in hex, and ASCII output
+
+The configuration options are controlled by an exported package global,
+spew.Config. See ConfigState for options documentation.
+
+See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to
+get the formatted result as a string.
+*/
+func Dump(a ...interface{}) {
+ fdump(&Config, os.Stdout, a...)
+}
diff --git a/vendor/github.com/davecgh/go-spew/spew/format.go b/vendor/github.com/davecgh/go-spew/spew/format.go
new file mode 100644
index 0000000..b04edb7
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/format.go
@@ -0,0 +1,419 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+ "bytes"
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+)
+
+// supportedFlags is a list of all the character flags supported by fmt package.
+const supportedFlags = "0-+# "
+
+// formatState implements the fmt.Formatter interface and contains information
+// about the state of a formatting operation. The NewFormatter function can
+// be used to get a new Formatter which can be used directly as arguments
+// in standard fmt package printing calls.
+type formatState struct {
+ value interface{}
+ fs fmt.State
+ depth int
+ pointers map[uintptr]int
+ ignoreNextType bool
+ cs *ConfigState
+}
+
+// buildDefaultFormat recreates the original format string without precision
+// and width information to pass in to fmt.Sprintf in the case of an
+// unrecognized type. Unless new types are added to the language, this
+// function won't ever be called.
+func (f *formatState) buildDefaultFormat() (format string) {
+ buf := bytes.NewBuffer(percentBytes)
+
+ for _, flag := range supportedFlags {
+ if f.fs.Flag(int(flag)) {
+ buf.WriteRune(flag)
+ }
+ }
+
+ buf.WriteRune('v')
+
+ format = buf.String()
+ return format
+}
+
+// constructOrigFormat recreates the original format string including precision
+// and width information to pass along to the standard fmt package. This allows
+// automatic deferral of all format strings this package doesn't support.
+func (f *formatState) constructOrigFormat(verb rune) (format string) {
+ buf := bytes.NewBuffer(percentBytes)
+
+ for _, flag := range supportedFlags {
+ if f.fs.Flag(int(flag)) {
+ buf.WriteRune(flag)
+ }
+ }
+
+ if width, ok := f.fs.Width(); ok {
+ buf.WriteString(strconv.Itoa(width))
+ }
+
+ if precision, ok := f.fs.Precision(); ok {
+ buf.Write(precisionBytes)
+ buf.WriteString(strconv.Itoa(precision))
+ }
+
+ buf.WriteRune(verb)
+
+ format = buf.String()
+ return format
+}
+
+// unpackValue returns values inside of non-nil interfaces when possible and
+// ensures that types for values which have been unpacked from an interface
+// are displayed when the show types flag is also set.
+// This is useful for data types like structs, arrays, slices, and maps which
+// can contain varying types packed inside an interface.
+func (f *formatState) unpackValue(v reflect.Value) reflect.Value {
+ if v.Kind() == reflect.Interface {
+ f.ignoreNextType = false
+ if !v.IsNil() {
+ v = v.Elem()
+ }
+ }
+ return v
+}
+
+// formatPtr handles formatting of pointers by indirecting them as necessary.
+func (f *formatState) formatPtr(v reflect.Value) {
+ // Display nil if top level pointer is nil.
+ showTypes := f.fs.Flag('#')
+ if v.IsNil() && (!showTypes || f.ignoreNextType) {
+ f.fs.Write(nilAngleBytes)
+ return
+ }
+
+ // Remove pointers at or below the current depth from map used to detect
+ // circular refs.
+ for k, depth := range f.pointers {
+ if depth >= f.depth {
+ delete(f.pointers, k)
+ }
+ }
+
+ // Keep list of all dereferenced pointers to possibly show later.
+ pointerChain := make([]uintptr, 0)
+
+ // Figure out how many levels of indirection there are by derferencing
+ // pointers and unpacking interfaces down the chain while detecting circular
+ // references.
+ nilFound := false
+ cycleFound := false
+ indirects := 0
+ ve := v
+ for ve.Kind() == reflect.Ptr {
+ if ve.IsNil() {
+ nilFound = true
+ break
+ }
+ indirects++
+ addr := ve.Pointer()
+ pointerChain = append(pointerChain, addr)
+ if pd, ok := f.pointers[addr]; ok && pd < f.depth {
+ cycleFound = true
+ indirects--
+ break
+ }
+ f.pointers[addr] = f.depth
+
+ ve = ve.Elem()
+ if ve.Kind() == reflect.Interface {
+ if ve.IsNil() {
+ nilFound = true
+ break
+ }
+ ve = ve.Elem()
+ }
+ }
+
+ // Display type or indirection level depending on flags.
+ if showTypes && !f.ignoreNextType {
+ f.fs.Write(openParenBytes)
+ f.fs.Write(bytes.Repeat(asteriskBytes, indirects))
+ f.fs.Write([]byte(ve.Type().String()))
+ f.fs.Write(closeParenBytes)
+ } else {
+ if nilFound || cycleFound {
+ indirects += strings.Count(ve.Type().String(), "*")
+ }
+ f.fs.Write(openAngleBytes)
+ f.fs.Write([]byte(strings.Repeat("*", indirects)))
+ f.fs.Write(closeAngleBytes)
+ }
+
+ // Display pointer information depending on flags.
+ if f.fs.Flag('+') && (len(pointerChain) > 0) {
+ f.fs.Write(openParenBytes)
+ for i, addr := range pointerChain {
+ if i > 0 {
+ f.fs.Write(pointerChainBytes)
+ }
+ printHexPtr(f.fs, addr)
+ }
+ f.fs.Write(closeParenBytes)
+ }
+
+ // Display dereferenced value.
+ switch {
+ case nilFound:
+ f.fs.Write(nilAngleBytes)
+
+ case cycleFound:
+ f.fs.Write(circularShortBytes)
+
+ default:
+ f.ignoreNextType = true
+ f.format(ve)
+ }
+}
+
+// format is the main workhorse for providing the Formatter interface. It
+// uses the passed reflect value to figure out what kind of object we are
+// dealing with and formats it appropriately. It is a recursive function,
+// however circular data structures are detected and handled properly.
+func (f *formatState) format(v reflect.Value) {
+ // Handle invalid reflect values immediately.
+ kind := v.Kind()
+ if kind == reflect.Invalid {
+ f.fs.Write(invalidAngleBytes)
+ return
+ }
+
+ // Handle pointers specially.
+ if kind == reflect.Ptr {
+ f.formatPtr(v)
+ return
+ }
+
+ // Print type information unless already handled elsewhere.
+ if !f.ignoreNextType && f.fs.Flag('#') {
+ f.fs.Write(openParenBytes)
+ f.fs.Write([]byte(v.Type().String()))
+ f.fs.Write(closeParenBytes)
+ }
+ f.ignoreNextType = false
+
+ // Call Stringer/error interfaces if they exist and the handle methods
+ // flag is enabled.
+ if !f.cs.DisableMethods {
+ if (kind != reflect.Invalid) && (kind != reflect.Interface) {
+ if handled := handleMethods(f.cs, f.fs, v); handled {
+ return
+ }
+ }
+ }
+
+ switch kind {
+ case reflect.Invalid:
+ // Do nothing. We should never get here since invalid has already
+ // been handled above.
+
+ case reflect.Bool:
+ printBool(f.fs, v.Bool())
+
+ case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
+ printInt(f.fs, v.Int(), 10)
+
+ case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
+ printUint(f.fs, v.Uint(), 10)
+
+ case reflect.Float32:
+ printFloat(f.fs, v.Float(), 32)
+
+ case reflect.Float64:
+ printFloat(f.fs, v.Float(), 64)
+
+ case reflect.Complex64:
+ printComplex(f.fs, v.Complex(), 32)
+
+ case reflect.Complex128:
+ printComplex(f.fs, v.Complex(), 64)
+
+ case reflect.Slice:
+ if v.IsNil() {
+ f.fs.Write(nilAngleBytes)
+ break
+ }
+ fallthrough
+
+ case reflect.Array:
+ f.fs.Write(openBracketBytes)
+ f.depth++
+ if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
+ f.fs.Write(maxShortBytes)
+ } else {
+ numEntries := v.Len()
+ for i := 0; i < numEntries; i++ {
+ if i > 0 {
+ f.fs.Write(spaceBytes)
+ }
+ f.ignoreNextType = true
+ f.format(f.unpackValue(v.Index(i)))
+ }
+ }
+ f.depth--
+ f.fs.Write(closeBracketBytes)
+
+ case reflect.String:
+ f.fs.Write([]byte(v.String()))
+
+ case reflect.Interface:
+ // The only time we should get here is for nil interfaces due to
+ // unpackValue calls.
+ if v.IsNil() {
+ f.fs.Write(nilAngleBytes)
+ }
+
+ case reflect.Ptr:
+ // Do nothing. We should never get here since pointers have already
+ // been handled above.
+
+ case reflect.Map:
+ // nil maps should be indicated as different than empty maps
+ if v.IsNil() {
+ f.fs.Write(nilAngleBytes)
+ break
+ }
+
+ f.fs.Write(openMapBytes)
+ f.depth++
+ if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
+ f.fs.Write(maxShortBytes)
+ } else {
+ keys := v.MapKeys()
+ if f.cs.SortKeys {
+ sortValues(keys, f.cs)
+ }
+ for i, key := range keys {
+ if i > 0 {
+ f.fs.Write(spaceBytes)
+ }
+ f.ignoreNextType = true
+ f.format(f.unpackValue(key))
+ f.fs.Write(colonBytes)
+ f.ignoreNextType = true
+ f.format(f.unpackValue(v.MapIndex(key)))
+ }
+ }
+ f.depth--
+ f.fs.Write(closeMapBytes)
+
+ case reflect.Struct:
+ numFields := v.NumField()
+ f.fs.Write(openBraceBytes)
+ f.depth++
+ if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
+ f.fs.Write(maxShortBytes)
+ } else {
+ vt := v.Type()
+ for i := 0; i < numFields; i++ {
+ if i > 0 {
+ f.fs.Write(spaceBytes)
+ }
+ vtf := vt.Field(i)
+ if f.fs.Flag('+') || f.fs.Flag('#') {
+ f.fs.Write([]byte(vtf.Name))
+ f.fs.Write(colonBytes)
+ }
+ f.format(f.unpackValue(v.Field(i)))
+ }
+ }
+ f.depth--
+ f.fs.Write(closeBraceBytes)
+
+ case reflect.Uintptr:
+ printHexPtr(f.fs, uintptr(v.Uint()))
+
+ case reflect.UnsafePointer, reflect.Chan, reflect.Func:
+ printHexPtr(f.fs, v.Pointer())
+
+ // There were not any other types at the time this code was written, but
+ // fall back to letting the default fmt package handle it if any get added.
+ default:
+ format := f.buildDefaultFormat()
+ if v.CanInterface() {
+ fmt.Fprintf(f.fs, format, v.Interface())
+ } else {
+ fmt.Fprintf(f.fs, format, v.String())
+ }
+ }
+}
+
+// Format satisfies the fmt.Formatter interface. See NewFormatter for usage
+// details.
+func (f *formatState) Format(fs fmt.State, verb rune) {
+ f.fs = fs
+
+ // Use standard formatting for verbs that are not v.
+ if verb != 'v' {
+ format := f.constructOrigFormat(verb)
+ fmt.Fprintf(fs, format, f.value)
+ return
+ }
+
+ if f.value == nil {
+ if fs.Flag('#') {
+ fs.Write(interfaceBytes)
+ }
+ fs.Write(nilAngleBytes)
+ return
+ }
+
+ f.format(reflect.ValueOf(f.value))
+}
+
+// newFormatter is a helper function to consolidate the logic from the various
+// public methods which take varying config states.
+func newFormatter(cs *ConfigState, v interface{}) fmt.Formatter {
+ fs := &formatState{value: v, cs: cs}
+ fs.pointers = make(map[uintptr]int)
+ return fs
+}
+
+/*
+NewFormatter returns a custom formatter that satisfies the fmt.Formatter
+interface. As a result, it integrates cleanly with standard fmt package
+printing functions. The formatter is useful for inline printing of smaller data
+types similar to the standard %v format specifier.
+
+The custom formatter only responds to the %v (most compact), %+v (adds pointer
+addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb
+combinations. Any other verbs such as %x and %q will be sent to the the
+standard fmt package for formatting. In addition, the custom formatter ignores
+the width and precision arguments (however they will still work on the format
+specifiers not handled by the custom formatter).
+
+Typically this function shouldn't be called directly. It is much easier to make
+use of the custom formatter by calling one of the convenience functions such as
+Printf, Println, or Fprintf.
+*/
+func NewFormatter(v interface{}) fmt.Formatter {
+ return newFormatter(&Config, v)
+}
diff --git a/vendor/github.com/davecgh/go-spew/spew/spew.go b/vendor/github.com/davecgh/go-spew/spew/spew.go
new file mode 100644
index 0000000..32c0e33
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/spew.go
@@ -0,0 +1,148 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+ "fmt"
+ "io"
+)
+
+// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the formatted string as a value that satisfies error. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Errorf(format, spew.NewFormatter(a), spew.NewFormatter(b))
+func Errorf(format string, a ...interface{}) (err error) {
+ return fmt.Errorf(format, convertArgs(a)...)
+}
+
+// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Fprint(w, spew.NewFormatter(a), spew.NewFormatter(b))
+func Fprint(w io.Writer, a ...interface{}) (n int, err error) {
+ return fmt.Fprint(w, convertArgs(a)...)
+}
+
+// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Fprintf(w, format, spew.NewFormatter(a), spew.NewFormatter(b))
+func Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
+ return fmt.Fprintf(w, format, convertArgs(a)...)
+}
+
+// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it
+// passed with a default Formatter interface returned by NewFormatter. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Fprintln(w, spew.NewFormatter(a), spew.NewFormatter(b))
+func Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
+ return fmt.Fprintln(w, convertArgs(a)...)
+}
+
+// Print is a wrapper for fmt.Print that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Print(spew.NewFormatter(a), spew.NewFormatter(b))
+func Print(a ...interface{}) (n int, err error) {
+ return fmt.Print(convertArgs(a)...)
+}
+
+// Printf is a wrapper for fmt.Printf that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Printf(format, spew.NewFormatter(a), spew.NewFormatter(b))
+func Printf(format string, a ...interface{}) (n int, err error) {
+ return fmt.Printf(format, convertArgs(a)...)
+}
+
+// Println is a wrapper for fmt.Println that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Println(spew.NewFormatter(a), spew.NewFormatter(b))
+func Println(a ...interface{}) (n int, err error) {
+ return fmt.Println(convertArgs(a)...)
+}
+
+// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the resulting string. See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Sprint(spew.NewFormatter(a), spew.NewFormatter(b))
+func Sprint(a ...interface{}) string {
+ return fmt.Sprint(convertArgs(a)...)
+}
+
+// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the resulting string. See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Sprintf(format, spew.NewFormatter(a), spew.NewFormatter(b))
+func Sprintf(format string, a ...interface{}) string {
+ return fmt.Sprintf(format, convertArgs(a)...)
+}
+
+// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it
+// were passed with a default Formatter interface returned by NewFormatter. It
+// returns the resulting string. See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Sprintln(spew.NewFormatter(a), spew.NewFormatter(b))
+func Sprintln(a ...interface{}) string {
+ return fmt.Sprintln(convertArgs(a)...)
+}
+
+// convertArgs accepts a slice of arguments and returns a slice of the same
+// length with each argument converted to a default spew Formatter interface.
+func convertArgs(args []interface{}) (formatters []interface{}) {
+ formatters = make([]interface{}, len(args))
+ for index, arg := range args {
+ formatters[index] = NewFormatter(arg)
+ }
+ return formatters
+}
diff --git a/vendor/github.com/eapache/go-resiliency/LICENSE b/vendor/github.com/eapache/go-resiliency/LICENSE
new file mode 100644
index 0000000..698a3f5
--- /dev/null
+++ b/vendor/github.com/eapache/go-resiliency/LICENSE
@@ -0,0 +1,22 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Evan Huus
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
diff --git a/vendor/github.com/eapache/go-resiliency/breaker/README.md b/vendor/github.com/eapache/go-resiliency/breaker/README.md
new file mode 100644
index 0000000..2d1b3d9
--- /dev/null
+++ b/vendor/github.com/eapache/go-resiliency/breaker/README.md
@@ -0,0 +1,34 @@
+circuit-breaker
+===============
+
+[](https://travis-ci.org/eapache/go-resiliency)
+[](https://godoc.org/github.com/eapache/go-resiliency/breaker)
+[](https://eapache.github.io/conduct.html)
+
+The circuit-breaker resiliency pattern for golang.
+
+Creating a breaker takes three parameters:
+- error threshold (for opening the breaker)
+- success threshold (for closing the breaker)
+- timeout (how long to keep the breaker open)
+
+```go
+b := breaker.New(3, 1, 5*time.Second)
+
+for {
+ result := b.Run(func() error {
+ // communicate with some external service and
+ // return an error if the communication failed
+ return nil
+ })
+
+ switch result {
+ case nil:
+ // success!
+ case breaker.ErrBreakerOpen:
+ // our function wasn't run because the breaker was open
+ default:
+ // some other error
+ }
+}
+```
diff --git a/vendor/github.com/eapache/go-resiliency/breaker/breaker.go b/vendor/github.com/eapache/go-resiliency/breaker/breaker.go
new file mode 100644
index 0000000..f88ca72
--- /dev/null
+++ b/vendor/github.com/eapache/go-resiliency/breaker/breaker.go
@@ -0,0 +1,161 @@
+// Package breaker implements the circuit-breaker resiliency pattern for Go.
+package breaker
+
+import (
+ "errors"
+ "sync"
+ "sync/atomic"
+ "time"
+)
+
+// ErrBreakerOpen is the error returned from Run() when the function is not executed
+// because the breaker is currently open.
+var ErrBreakerOpen = errors.New("circuit breaker is open")
+
+const (
+ closed uint32 = iota
+ open
+ halfOpen
+)
+
+// Breaker implements the circuit-breaker resiliency pattern
+type Breaker struct {
+ errorThreshold, successThreshold int
+ timeout time.Duration
+
+ lock sync.Mutex
+ state uint32
+ errors, successes int
+ lastError time.Time
+}
+
+// New constructs a new circuit-breaker that starts closed.
+// From closed, the breaker opens if "errorThreshold" errors are seen
+// without an error-free period of at least "timeout". From open, the
+// breaker half-closes after "timeout". From half-open, the breaker closes
+// after "successThreshold" consecutive successes, or opens on a single error.
+func New(errorThreshold, successThreshold int, timeout time.Duration) *Breaker {
+ return &Breaker{
+ errorThreshold: errorThreshold,
+ successThreshold: successThreshold,
+ timeout: timeout,
+ }
+}
+
+// Run will either return ErrBreakerOpen immediately if the circuit-breaker is
+// already open, or it will run the given function and pass along its return
+// value. It is safe to call Run concurrently on the same Breaker.
+func (b *Breaker) Run(work func() error) error {
+ state := atomic.LoadUint32(&b.state)
+
+ if state == open {
+ return ErrBreakerOpen
+ }
+
+ return b.doWork(state, work)
+}
+
+// Go will either return ErrBreakerOpen immediately if the circuit-breaker is
+// already open, or it will run the given function in a separate goroutine.
+// If the function is run, Go will return nil immediately, and will *not* return
+// the return value of the function. It is safe to call Go concurrently on the
+// same Breaker.
+func (b *Breaker) Go(work func() error) error {
+ state := atomic.LoadUint32(&b.state)
+
+ if state == open {
+ return ErrBreakerOpen
+ }
+
+ // errcheck complains about ignoring the error return value, but
+ // that's on purpose; if you want an error from a goroutine you have to
+ // get it over a channel or something
+ go b.doWork(state, work)
+
+ return nil
+}
+
+func (b *Breaker) doWork(state uint32, work func() error) error {
+ var panicValue interface{}
+
+ result := func() error {
+ defer func() {
+ panicValue = recover()
+ }()
+ return work()
+ }()
+
+ if result == nil && panicValue == nil && state == closed {
+ // short-circuit the normal, success path without contending
+ // on the lock
+ return nil
+ }
+
+ // oh well, I guess we have to contend on the lock
+ b.processResult(result, panicValue)
+
+ if panicValue != nil {
+ // as close as Go lets us come to a "rethrow" although unfortunately
+ // we lose the original panicing location
+ panic(panicValue)
+ }
+
+ return result
+}
+
+func (b *Breaker) processResult(result error, panicValue interface{}) {
+ b.lock.Lock()
+ defer b.lock.Unlock()
+
+ if result == nil && panicValue == nil {
+ if b.state == halfOpen {
+ b.successes++
+ if b.successes == b.successThreshold {
+ b.closeBreaker()
+ }
+ }
+ } else {
+ if b.errors > 0 {
+ expiry := b.lastError.Add(b.timeout)
+ if time.Now().After(expiry) {
+ b.errors = 0
+ }
+ }
+
+ switch b.state {
+ case closed:
+ b.errors++
+ if b.errors == b.errorThreshold {
+ b.openBreaker()
+ } else {
+ b.lastError = time.Now()
+ }
+ case halfOpen:
+ b.openBreaker()
+ }
+ }
+}
+
+func (b *Breaker) openBreaker() {
+ b.changeState(open)
+ go b.timer()
+}
+
+func (b *Breaker) closeBreaker() {
+ b.changeState(closed)
+}
+
+func (b *Breaker) timer() {
+ time.Sleep(b.timeout)
+
+ b.lock.Lock()
+ defer b.lock.Unlock()
+
+ b.changeState(halfOpen)
+}
+
+func (b *Breaker) changeState(newState uint32) {
+ b.errors = 0
+ b.successes = 0
+ atomic.StoreUint32(&b.state, newState)
+}
diff --git a/vendor/github.com/eapache/go-xerial-snappy/.gitignore b/vendor/github.com/eapache/go-xerial-snappy/.gitignore
new file mode 100644
index 0000000..daf913b
--- /dev/null
+++ b/vendor/github.com/eapache/go-xerial-snappy/.gitignore
@@ -0,0 +1,24 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+*.test
+*.prof
diff --git a/vendor/github.com/eapache/go-xerial-snappy/.travis.yml b/vendor/github.com/eapache/go-xerial-snappy/.travis.yml
new file mode 100644
index 0000000..d6cf4f1
--- /dev/null
+++ b/vendor/github.com/eapache/go-xerial-snappy/.travis.yml
@@ -0,0 +1,7 @@
+language: go
+
+go:
+- 1.5.4
+- 1.6.1
+
+sudo: false
diff --git a/vendor/github.com/eapache/go-xerial-snappy/LICENSE b/vendor/github.com/eapache/go-xerial-snappy/LICENSE
new file mode 100644
index 0000000..5bf3688
--- /dev/null
+++ b/vendor/github.com/eapache/go-xerial-snappy/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2016 Evan Huus
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/eapache/go-xerial-snappy/README.md b/vendor/github.com/eapache/go-xerial-snappy/README.md
new file mode 100644
index 0000000..3f2695c
--- /dev/null
+++ b/vendor/github.com/eapache/go-xerial-snappy/README.md
@@ -0,0 +1,13 @@
+# go-xerial-snappy
+
+[](https://travis-ci.org/eapache/go-xerial-snappy)
+
+Xerial-compatible Snappy framing support for golang.
+
+Packages using Xerial for snappy encoding use a framing format incompatible with
+basically everything else in existence. This package wraps Go's built-in snappy
+package to support it.
+
+Apps that use this format include Apache Kafka (see
+https://github.com/dpkp/kafka-python/issues/126#issuecomment-35478921 for
+details).
diff --git a/vendor/github.com/eapache/go-xerial-snappy/snappy.go b/vendor/github.com/eapache/go-xerial-snappy/snappy.go
new file mode 100644
index 0000000..b8f8b51
--- /dev/null
+++ b/vendor/github.com/eapache/go-xerial-snappy/snappy.go
@@ -0,0 +1,43 @@
+package snappy
+
+import (
+ "bytes"
+ "encoding/binary"
+
+ master "github.com/golang/snappy"
+)
+
+var xerialHeader = []byte{130, 83, 78, 65, 80, 80, 89, 0}
+
+// Encode encodes data as snappy with no framing header.
+func Encode(src []byte) []byte {
+ return master.Encode(nil, src)
+}
+
+// Decode decodes snappy data whether it is traditional unframed
+// or includes the xerial framing format.
+func Decode(src []byte) ([]byte, error) {
+ if !bytes.Equal(src[:8], xerialHeader) {
+ return master.Decode(nil, src)
+ }
+
+ var (
+ pos = uint32(16)
+ max = uint32(len(src))
+ dst = make([]byte, 0, len(src))
+ chunk []byte
+ err error
+ )
+ for pos < max {
+ size := binary.BigEndian.Uint32(src[pos : pos+4])
+ pos += 4
+
+ chunk, err = master.Decode(chunk, src[pos:pos+size])
+ if err != nil {
+ return nil, err
+ }
+ pos += size
+ dst = append(dst, chunk...)
+ }
+ return dst, nil
+}
diff --git a/vendor/github.com/eapache/queue/.gitignore b/vendor/github.com/eapache/queue/.gitignore
new file mode 100644
index 0000000..8365624
--- /dev/null
+++ b/vendor/github.com/eapache/queue/.gitignore
@@ -0,0 +1,23 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+*.test
diff --git a/vendor/github.com/eapache/queue/.travis.yml b/vendor/github.com/eapache/queue/.travis.yml
new file mode 100644
index 0000000..235a40a
--- /dev/null
+++ b/vendor/github.com/eapache/queue/.travis.yml
@@ -0,0 +1,7 @@
+language: go
+sudo: false
+
+go:
+ - 1.2
+ - 1.3
+ - 1.4
diff --git a/vendor/github.com/eapache/queue/LICENSE b/vendor/github.com/eapache/queue/LICENSE
new file mode 100644
index 0000000..d5f36db
--- /dev/null
+++ b/vendor/github.com/eapache/queue/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Evan Huus
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
\ No newline at end of file
diff --git a/vendor/github.com/eapache/queue/README.md b/vendor/github.com/eapache/queue/README.md
new file mode 100644
index 0000000..8e78233
--- /dev/null
+++ b/vendor/github.com/eapache/queue/README.md
@@ -0,0 +1,16 @@
+Queue
+=====
+
+[](https://travis-ci.org/eapache/queue)
+[](https://godoc.org/github.com/eapache/queue)
+[](https://eapache.github.io/conduct.html)
+
+A fast Golang queue using a ring-buffer, based on the version suggested by Dariusz Górecki.
+Using this instead of other, simpler, queue implementations (slice+append or linked list) provides
+substantial memory and time benefits, and fewer GC pauses.
+
+The queue implemented here is as fast as it is in part because it is *not* thread-safe.
+
+Follows semantic versioning using https://gopkg.in/ - import from
+[`gopkg.in/eapache/queue.v1`](https://gopkg.in/eapache/queue.v1)
+for guaranteed API stability.
diff --git a/vendor/github.com/eapache/queue/queue.go b/vendor/github.com/eapache/queue/queue.go
new file mode 100644
index 0000000..71d1acd
--- /dev/null
+++ b/vendor/github.com/eapache/queue/queue.go
@@ -0,0 +1,102 @@
+/*
+Package queue provides a fast, ring-buffer queue based on the version suggested by Dariusz Górecki.
+Using this instead of other, simpler, queue implementations (slice+append or linked list) provides
+substantial memory and time benefits, and fewer GC pauses.
+
+The queue implemented here is as fast as it is for an additional reason: it is *not* thread-safe.
+*/
+package queue
+
+// minQueueLen is smallest capacity that queue may have.
+// Must be power of 2 for bitwise modulus: x % n == x & (n - 1).
+const minQueueLen = 16
+
+// Queue represents a single instance of the queue data structure.
+type Queue struct {
+ buf []interface{}
+ head, tail, count int
+}
+
+// New constructs and returns a new Queue.
+func New() *Queue {
+ return &Queue{
+ buf: make([]interface{}, minQueueLen),
+ }
+}
+
+// Length returns the number of elements currently stored in the queue.
+func (q *Queue) Length() int {
+ return q.count
+}
+
+// resizes the queue to fit exactly twice its current contents
+// this can result in shrinking if the queue is less than half-full
+func (q *Queue) resize() {
+ newBuf := make([]interface{}, q.count<<1)
+
+ if q.tail > q.head {
+ copy(newBuf, q.buf[q.head:q.tail])
+ } else {
+ n := copy(newBuf, q.buf[q.head:])
+ copy(newBuf[n:], q.buf[:q.tail])
+ }
+
+ q.head = 0
+ q.tail = q.count
+ q.buf = newBuf
+}
+
+// Add puts an element on the end of the queue.
+func (q *Queue) Add(elem interface{}) {
+ if q.count == len(q.buf) {
+ q.resize()
+ }
+
+ q.buf[q.tail] = elem
+ // bitwise modulus
+ q.tail = (q.tail + 1) & (len(q.buf) - 1)
+ q.count++
+}
+
+// Peek returns the element at the head of the queue. This call panics
+// if the queue is empty.
+func (q *Queue) Peek() interface{} {
+ if q.count <= 0 {
+ panic("queue: Peek() called on empty queue")
+ }
+ return q.buf[q.head]
+}
+
+// Get returns the element at index i in the queue. If the index is
+// invalid, the call will panic. This method accepts both positive and
+// negative index values. Index 0 refers to the first element, and
+// index -1 refers to the last.
+func (q *Queue) Get(i int) interface{} {
+ // If indexing backwards, convert to positive index.
+ if i < 0 {
+ i += q.count
+ }
+ if i < 0 || i >= q.count {
+ panic("queue: Get() called with index out of range")
+ }
+ // bitwise modulus
+ return q.buf[(q.head+i)&(len(q.buf)-1)]
+}
+
+// Remove removes and returns the element from the front of the queue. If the
+// queue is empty, the call will panic.
+func (q *Queue) Remove() interface{} {
+ if q.count <= 0 {
+ panic("queue: Remove() called on empty queue")
+ }
+ ret := q.buf[q.head]
+ q.buf[q.head] = nil
+ // bitwise modulus
+ q.head = (q.head + 1) & (len(q.buf) - 1)
+ q.count--
+ // Resize down if buffer 1/4 full.
+ if len(q.buf) > minQueueLen && (q.count<<2) == len(q.buf) {
+ q.resize()
+ }
+ return ret
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/.gitignore b/vendor/github.com/go-sql-driver/mysql/.gitignore
new file mode 100644
index 0000000..2de28da
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/.gitignore
@@ -0,0 +1,9 @@
+.DS_Store
+.DS_Store?
+._*
+.Spotlight-V100
+.Trashes
+Icon?
+ehthumbs.db
+Thumbs.db
+.idea
diff --git a/vendor/github.com/go-sql-driver/mysql/.travis.yml b/vendor/github.com/go-sql-driver/mysql/.travis.yml
new file mode 100644
index 0000000..6369281
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/.travis.yml
@@ -0,0 +1,94 @@
+sudo: false
+language: go
+go:
+ - 1.5
+ - 1.6
+ - 1.7
+ - 1.8
+ - 1.9
+ - tip
+
+before_install:
+ - go get golang.org/x/tools/cmd/cover
+ - go get github.com/mattn/goveralls
+
+before_script:
+ - echo -e "[server]\ninnodb_log_file_size=256MB\ninnodb_buffer_pool_size=512MB\nmax_allowed_packet=16MB" | sudo tee -a /etc/mysql/my.cnf
+ - sudo service mysql restart
+ - .travis/wait_mysql.sh
+ - mysql -e 'create database gotest;'
+
+matrix:
+ include:
+ - env: DB=MYSQL57
+ sudo: required
+ dist: trusty
+ go: 1.9
+ services:
+ - docker
+ before_install:
+ - go get golang.org/x/tools/cmd/cover
+ - go get github.com/mattn/goveralls
+ - docker pull mysql:5.7
+ - docker run -d -p 127.0.0.1:3307:3306 --name mysqld -e MYSQL_DATABASE=gotest -e MYSQL_USER=gotest -e MYSQL_PASSWORD=secret -e MYSQL_ROOT_PASSWORD=verysecret
+ mysql:5.7 --innodb_log_file_size=256MB --innodb_buffer_pool_size=512MB --max_allowed_packet=16MB
+ - sleep 30
+ - cp .travis/docker.cnf ~/.my.cnf
+ - mysql --print-defaults
+ - .travis/wait_mysql.sh
+ before_script:
+ - export MYSQL_TEST_USER=gotest
+ - export MYSQL_TEST_PASS=secret
+ - export MYSQL_TEST_ADDR=127.0.0.1:3307
+ - export MYSQL_TEST_CONCURRENT=1
+
+ - env: DB=MARIA55
+ sudo: required
+ dist: trusty
+ go: 1.9
+ services:
+ - docker
+ before_install:
+ - go get golang.org/x/tools/cmd/cover
+ - go get github.com/mattn/goveralls
+ - docker pull mariadb:5.5
+ - docker run -d -p 127.0.0.1:3307:3306 --name mysqld -e MYSQL_DATABASE=gotest -e MYSQL_USER=gotest -e MYSQL_PASSWORD=secret -e MYSQL_ROOT_PASSWORD=verysecret
+ mariadb:5.5 --innodb_log_file_size=256MB --innodb_buffer_pool_size=512MB --max_allowed_packet=16MB
+ - sleep 30
+ - cp .travis/docker.cnf ~/.my.cnf
+ - mysql --print-defaults
+ - .travis/wait_mysql.sh
+ before_script:
+ - export MYSQL_TEST_USER=gotest
+ - export MYSQL_TEST_PASS=secret
+ - export MYSQL_TEST_ADDR=127.0.0.1:3307
+ - export MYSQL_TEST_CONCURRENT=1
+
+ - env: DB=MARIA10_1
+ sudo: required
+ dist: trusty
+ go: 1.9
+ services:
+ - docker
+ before_install:
+ - go get golang.org/x/tools/cmd/cover
+ - go get github.com/mattn/goveralls
+ - docker pull mariadb:10.1
+ - docker run -d -p 127.0.0.1:3307:3306 --name mysqld -e MYSQL_DATABASE=gotest -e MYSQL_USER=gotest -e MYSQL_PASSWORD=secret -e MYSQL_ROOT_PASSWORD=verysecret
+ mariadb:10.1 --innodb_log_file_size=256MB --innodb_buffer_pool_size=512MB --max_allowed_packet=16MB
+ - sleep 30
+ - cp .travis/docker.cnf ~/.my.cnf
+ - mysql --print-defaults
+ - .travis/wait_mysql.sh
+ before_script:
+ - export MYSQL_TEST_USER=gotest
+ - export MYSQL_TEST_PASS=secret
+ - export MYSQL_TEST_ADDR=127.0.0.1:3307
+ - export MYSQL_TEST_CONCURRENT=1
+
+script:
+ - go test -v -covermode=count -coverprofile=coverage.out
+ - go vet ./...
+ - test -z "$(gofmt -d -s . | tee /dev/stderr)"
+after_script:
+ - $HOME/gopath/bin/goveralls -coverprofile=coverage.out -service=travis-ci
diff --git a/vendor/github.com/go-sql-driver/mysql/AUTHORS b/vendor/github.com/go-sql-driver/mysql/AUTHORS
new file mode 100644
index 0000000..ac36be9
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/AUTHORS
@@ -0,0 +1,75 @@
+# This is the official list of Go-MySQL-Driver authors for copyright purposes.
+
+# If you are submitting a patch, please add your name or the name of the
+# organization which holds the copyright to this list in alphabetical order.
+
+# Names should be added to this file as
+# Name
+# The email address is not required for organizations.
+# Please keep the list sorted.
+
+
+# Individual Persons
+
+Aaron Hopkins
+Achille Roussel
+Arne Hormann
+Asta Xie
+Bulat Gaifullin
+Carlos Nieto
+Chris Moos
+Daniel Nichter
+Daniël van Eeden
+Dave Protasowski
+DisposaBoy
+Egor Smolyakov
+Evan Shaw
+Frederick Mayle
+Gustavo Kristic
+Hanno Braun
+Henri Yandell
+Hirotaka Yamamoto
+ICHINOSE Shogo
+INADA Naoki
+Jacek Szwec
+James Harr
+Jeff Hodges
+Jeffrey Charles
+Jian Zhen
+Joshua Prunier
+Julien Lefevre
+Julien Schmidt
+Justin Nuß
+Kamil Dziedzic
+Kevin Malachowski
+Lennart Rudolph
+Leonardo YongUk Kim
+Lion Yang
+Luca Looz
+Lucas Liu
+Luke Scott
+Maciej Zimnoch
+Michael Woolnough
+Nicola Peduzzi
+Olivier Mengué
+oscarzhao
+Paul Bonser
+Peter Schultz
+Rebecca Chin
+Runrioter Wung
+Shuode Li
+Soroush Pour
+Stan Putrya
+Stanley Gunawan
+Xiangyu Hu
+Xiaobing Jiang
+Xiuming Chen
+Zhenye Xie
+
+# Organizations
+
+Barracuda Networks, Inc.
+Google Inc.
+Keybase Inc.
+Pivotal Inc.
+Stripe Inc.
diff --git a/vendor/github.com/go-sql-driver/mysql/CHANGELOG.md b/vendor/github.com/go-sql-driver/mysql/CHANGELOG.md
new file mode 100644
index 0000000..6bcad7e
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/CHANGELOG.md
@@ -0,0 +1,119 @@
+## Version 1.3 (2016-12-01)
+
+Changes:
+
+ - Go 1.1 is no longer supported
+ - Use decimals fields in MySQL to format time types (#249)
+ - Buffer optimizations (#269)
+ - TLS ServerName defaults to the host (#283)
+ - Refactoring (#400, #410, #437)
+ - Adjusted documentation for second generation CloudSQL (#485)
+ - Documented DSN system var quoting rules (#502)
+ - Made statement.Close() calls idempotent to avoid errors in Go 1.6+ (#512)
+
+New Features:
+
+ - Enable microsecond resolution on TIME, DATETIME and TIMESTAMP (#249)
+ - Support for returning table alias on Columns() (#289, #359, #382)
+ - Placeholder interpolation, can be actived with the DSN parameter `interpolateParams=true` (#309, #318, #490)
+ - Support for uint64 parameters with high bit set (#332, #345)
+ - Cleartext authentication plugin support (#327)
+ - Exported ParseDSN function and the Config struct (#403, #419, #429)
+ - Read / Write timeouts (#401)
+ - Support for JSON field type (#414)
+ - Support for multi-statements and multi-results (#411, #431)
+ - DSN parameter to set the driver-side max_allowed_packet value manually (#489)
+ - Native password authentication plugin support (#494, #524)
+
+Bugfixes:
+
+ - Fixed handling of queries without columns and rows (#255)
+ - Fixed a panic when SetKeepAlive() failed (#298)
+ - Handle ERR packets while reading rows (#321)
+ - Fixed reading NULL length-encoded integers in MySQL 5.6+ (#349)
+ - Fixed absolute paths support in LOAD LOCAL DATA INFILE (#356)
+ - Actually zero out bytes in handshake response (#378)
+ - Fixed race condition in registering LOAD DATA INFILE handler (#383)
+ - Fixed tests with MySQL 5.7.9+ (#380)
+ - QueryUnescape TLS config names (#397)
+ - Fixed "broken pipe" error by writing to closed socket (#390)
+ - Fixed LOAD LOCAL DATA INFILE buffering (#424)
+ - Fixed parsing of floats into float64 when placeholders are used (#434)
+ - Fixed DSN tests with Go 1.7+ (#459)
+ - Handle ERR packets while waiting for EOF (#473)
+ - Invalidate connection on error while discarding additional results (#513)
+ - Allow terminating packets of length 0 (#516)
+
+
+## Version 1.2 (2014-06-03)
+
+Changes:
+
+ - We switched back to a "rolling release". `go get` installs the current master branch again
+ - Version v1 of the driver will not be maintained anymore. Go 1.0 is no longer supported by this driver
+ - Exported errors to allow easy checking from application code
+ - Enabled TCP Keepalives on TCP connections
+ - Optimized INFILE handling (better buffer size calculation, lazy init, ...)
+ - The DSN parser also checks for a missing separating slash
+ - Faster binary date / datetime to string formatting
+ - Also exported the MySQLWarning type
+ - mysqlConn.Close returns the first error encountered instead of ignoring all errors
+ - writePacket() automatically writes the packet size to the header
+ - readPacket() uses an iterative approach instead of the recursive approach to merge splitted packets
+
+New Features:
+
+ - `RegisterDial` allows the usage of a custom dial function to establish the network connection
+ - Setting the connection collation is possible with the `collation` DSN parameter. This parameter should be preferred over the `charset` parameter
+ - Logging of critical errors is configurable with `SetLogger`
+ - Google CloudSQL support
+
+Bugfixes:
+
+ - Allow more than 32 parameters in prepared statements
+ - Various old_password fixes
+ - Fixed TestConcurrent test to pass Go's race detection
+ - Fixed appendLengthEncodedInteger for large numbers
+ - Renamed readLengthEnodedString to readLengthEncodedString and skipLengthEnodedString to skipLengthEncodedString (fixed typo)
+
+
+## Version 1.1 (2013-11-02)
+
+Changes:
+
+ - Go-MySQL-Driver now requires Go 1.1
+ - Connections now use the collation `utf8_general_ci` by default. Adding `&charset=UTF8` to the DSN should not be necessary anymore
+ - Made closing rows and connections error tolerant. This allows for example deferring rows.Close() without checking for errors
+ - `[]byte(nil)` is now treated as a NULL value. Before, it was treated like an empty string / `[]byte("")`
+ - DSN parameter values must now be url.QueryEscape'ed. This allows text values to contain special characters, such as '&'.
+ - Use the IO buffer also for writing. This results in zero allocations (by the driver) for most queries
+ - Optimized the buffer for reading
+ - stmt.Query now caches column metadata
+ - New Logo
+ - Changed the copyright header to include all contributors
+ - Improved the LOAD INFILE documentation
+ - The driver struct is now exported to make the driver directly accessible
+ - Refactored the driver tests
+ - Added more benchmarks and moved all to a separate file
+ - Other small refactoring
+
+New Features:
+
+ - Added *old_passwords* support: Required in some cases, but must be enabled by adding `allowOldPasswords=true` to the DSN since it is insecure
+ - Added a `clientFoundRows` parameter: Return the number of matching rows instead of the number of rows changed on UPDATEs
+ - Added TLS/SSL support: Use a TLS/SSL encrypted connection to the server. Custom TLS configs can be registered and used
+
+Bugfixes:
+
+ - Fixed MySQL 4.1 support: MySQL 4.1 sends packets with lengths which differ from the specification
+ - Convert to DB timezone when inserting `time.Time`
+ - Splitted packets (more than 16MB) are now merged correctly
+ - Fixed false positive `io.EOF` errors when the data was fully read
+ - Avoid panics on reuse of closed connections
+ - Fixed empty string producing false nil values
+ - Fixed sign byte for positive TIME fields
+
+
+## Version 1.0 (2013-05-14)
+
+Initial Release
diff --git a/vendor/github.com/go-sql-driver/mysql/CONTRIBUTING.md b/vendor/github.com/go-sql-driver/mysql/CONTRIBUTING.md
new file mode 100644
index 0000000..8fe16bc
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/CONTRIBUTING.md
@@ -0,0 +1,23 @@
+# Contributing Guidelines
+
+## Reporting Issues
+
+Before creating a new Issue, please check first if a similar Issue [already exists](https://github.com/go-sql-driver/mysql/issues?state=open) or was [recently closed](https://github.com/go-sql-driver/mysql/issues?direction=desc&page=1&sort=updated&state=closed).
+
+## Contributing Code
+
+By contributing to this project, you share your code under the Mozilla Public License 2, as specified in the LICENSE file.
+Don't forget to add yourself to the AUTHORS file.
+
+### Code Review
+
+Everyone is invited to review and comment on pull requests.
+If it looks fine to you, comment with "LGTM" (Looks good to me).
+
+If changes are required, notice the reviewers with "PTAL" (Please take another look) after committing the fixes.
+
+Before merging the Pull Request, at least one [team member](https://github.com/go-sql-driver?tab=members) must have commented with "LGTM".
+
+## Development Ideas
+
+If you are looking for ideas for code contributions, please check our [Development Ideas](https://github.com/go-sql-driver/mysql/wiki/Development-Ideas) Wiki page.
diff --git a/vendor/github.com/go-sql-driver/mysql/LICENSE b/vendor/github.com/go-sql-driver/mysql/LICENSE
new file mode 100644
index 0000000..14e2f77
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/LICENSE
@@ -0,0 +1,373 @@
+Mozilla Public License Version 2.0
+==================================
+
+1. Definitions
+--------------
+
+1.1. "Contributor"
+ means each individual or legal entity that creates, contributes to
+ the creation of, or owns Covered Software.
+
+1.2. "Contributor Version"
+ means the combination of the Contributions of others (if any) used
+ by a Contributor and that particular Contributor's Contribution.
+
+1.3. "Contribution"
+ means Covered Software of a particular Contributor.
+
+1.4. "Covered Software"
+ means Source Code Form to which the initial Contributor has attached
+ the notice in Exhibit A, the Executable Form of such Source Code
+ Form, and Modifications of such Source Code Form, in each case
+ including portions thereof.
+
+1.5. "Incompatible With Secondary Licenses"
+ means
+
+ (a) that the initial Contributor has attached the notice described
+ in Exhibit B to the Covered Software; or
+
+ (b) that the Covered Software was made available under the terms of
+ version 1.1 or earlier of the License, but not also under the
+ terms of a Secondary License.
+
+1.6. "Executable Form"
+ means any form of the work other than Source Code Form.
+
+1.7. "Larger Work"
+ means a work that combines Covered Software with other material, in
+ a separate file or files, that is not Covered Software.
+
+1.8. "License"
+ means this document.
+
+1.9. "Licensable"
+ means having the right to grant, to the maximum extent possible,
+ whether at the time of the initial grant or subsequently, any and
+ all of the rights conveyed by this License.
+
+1.10. "Modifications"
+ means any of the following:
+
+ (a) any file in Source Code Form that results from an addition to,
+ deletion from, or modification of the contents of Covered
+ Software; or
+
+ (b) any new file in Source Code Form that contains any Covered
+ Software.
+
+1.11. "Patent Claims" of a Contributor
+ means any patent claim(s), including without limitation, method,
+ process, and apparatus claims, in any patent Licensable by such
+ Contributor that would be infringed, but for the grant of the
+ License, by the making, using, selling, offering for sale, having
+ made, import, or transfer of either its Contributions or its
+ Contributor Version.
+
+1.12. "Secondary License"
+ means either the GNU General Public License, Version 2.0, the GNU
+ Lesser General Public License, Version 2.1, the GNU Affero General
+ Public License, Version 3.0, or any later versions of those
+ licenses.
+
+1.13. "Source Code Form"
+ means the form of the work preferred for making modifications.
+
+1.14. "You" (or "Your")
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, "You" includes any entity that
+ controls, is controlled by, or is under common control with You. For
+ purposes of this definition, "control" means (a) the power, direct
+ or indirect, to cause the direction or management of such entity,
+ whether by contract or otherwise, or (b) ownership of more than
+ fifty percent (50%) of the outstanding shares or beneficial
+ ownership of such entity.
+
+2. License Grants and Conditions
+--------------------------------
+
+2.1. Grants
+
+Each Contributor hereby grants You a world-wide, royalty-free,
+non-exclusive license:
+
+(a) under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or
+ as part of a Larger Work; and
+
+(b) under Patent Claims of such Contributor to make, use, sell, offer
+ for sale, have made, import, and otherwise transfer either its
+ Contributions or its Contributor Version.
+
+2.2. Effective Date
+
+The licenses granted in Section 2.1 with respect to any Contribution
+become effective for each Contribution on the date the Contributor first
+distributes such Contribution.
+
+2.3. Limitations on Grant Scope
+
+The licenses granted in this Section 2 are the only rights granted under
+this License. No additional rights or licenses will be implied from the
+distribution or licensing of Covered Software under this License.
+Notwithstanding Section 2.1(b) above, no patent license is granted by a
+Contributor:
+
+(a) for any code that a Contributor has removed from Covered Software;
+ or
+
+(b) for infringements caused by: (i) Your and any other third party's
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+(c) under Patent Claims infringed by Covered Software in the absence of
+ its Contributions.
+
+This License does not grant any rights in the trademarks, service marks,
+or logos of any Contributor (except as may be necessary to comply with
+the notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+No Contributor makes additional grants as a result of Your choice to
+distribute the Covered Software under a subsequent version of this
+License (see Section 10.2) or under the terms of a Secondary License (if
+permitted under the terms of Section 3.3).
+
+2.5. Representation
+
+Each Contributor represents that the Contributor believes its
+Contributions are its original creation(s) or it has sufficient rights
+to grant the rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+This License is not intended to limit any rights You have under
+applicable copyright doctrines of fair use, fair dealing, or other
+equivalents.
+
+2.7. Conditions
+
+Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
+in Section 2.1.
+
+3. Responsibilities
+-------------------
+
+3.1. Distribution of Source Form
+
+All distribution of Covered Software in Source Code Form, including any
+Modifications that You create or to which You contribute, must be under
+the terms of this License. You must inform recipients that the Source
+Code Form of the Covered Software is governed by the terms of this
+License, and how they can obtain a copy of this License. You may not
+attempt to alter or restrict the recipients' rights in the Source Code
+Form.
+
+3.2. Distribution of Executable Form
+
+If You distribute Covered Software in Executable Form then:
+
+(a) such Covered Software must also be made available in Source Code
+ Form, as described in Section 3.1, and You must inform recipients of
+ the Executable Form how they can obtain a copy of such Source Code
+ Form by reasonable means in a timely manner, at a charge no more
+ than the cost of distribution to the recipient; and
+
+(b) You may distribute such Executable Form under the terms of this
+ License, or sublicense it under different terms, provided that the
+ license for the Executable Form does not attempt to limit or alter
+ the recipients' rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+You may create and distribute a Larger Work under terms of Your choice,
+provided that You also comply with the requirements of this License for
+the Covered Software. If the Larger Work is a combination of Covered
+Software with a work governed by one or more Secondary Licenses, and the
+Covered Software is not Incompatible With Secondary Licenses, this
+License permits You to additionally distribute such Covered Software
+under the terms of such Secondary License(s), so that the recipient of
+the Larger Work may, at their option, further distribute the Covered
+Software under the terms of either this License or such Secondary
+License(s).
+
+3.4. Notices
+
+You may not remove or alter the substance of any license notices
+(including copyright notices, patent notices, disclaimers of warranty,
+or limitations of liability) contained within the Source Code Form of
+the Covered Software, except that You may alter any license notices to
+the extent required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+You may choose to offer, and to charge a fee for, warranty, support,
+indemnity or liability obligations to one or more recipients of Covered
+Software. However, You may do so only on Your own behalf, and not on
+behalf of any Contributor. You must make it absolutely clear that any
+such warranty, support, indemnity, or liability obligation is offered by
+You alone, and You hereby agree to indemnify every Contributor for any
+liability incurred by such Contributor as a result of warranty, support,
+indemnity or liability terms You offer. You may include additional
+disclaimers of warranty and limitations of liability specific to any
+jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+---------------------------------------------------
+
+If it is impossible for You to comply with any of the terms of this
+License with respect to some or all of the Covered Software due to
+statute, judicial order, or regulation then You must: (a) comply with
+the terms of this License to the maximum extent possible; and (b)
+describe the limitations and the code they affect. Such description must
+be placed in a text file included with all distributions of the Covered
+Software under this License. Except to the extent prohibited by statute
+or regulation, such description must be sufficiently detailed for a
+recipient of ordinary skill to be able to understand it.
+
+5. Termination
+--------------
+
+5.1. The rights granted under this License will terminate automatically
+if You fail to comply with any of its terms. However, if You become
+compliant, then the rights granted under this License from a particular
+Contributor are reinstated (a) provisionally, unless and until such
+Contributor explicitly and finally terminates Your grants, and (b) on an
+ongoing basis, if such Contributor fails to notify You of the
+non-compliance by some reasonable means prior to 60 days after You have
+come back into compliance. Moreover, Your grants from a particular
+Contributor are reinstated on an ongoing basis if such Contributor
+notifies You of the non-compliance by some reasonable means, this is the
+first time You have received notice of non-compliance with this License
+from such Contributor, and You become compliant prior to 30 days after
+Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+infringement claim (excluding declaratory judgment actions,
+counter-claims, and cross-claims) alleging that a Contributor Version
+directly or indirectly infringes any patent, then the rights granted to
+You by any and all Contributors for the Covered Software under Section
+2.1 of this License shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all
+end user license agreements (excluding distributors and resellers) which
+have been validly granted by You or Your distributors under this License
+prior to termination shall survive termination.
+
+************************************************************************
+* *
+* 6. Disclaimer of Warranty *
+* ------------------------- *
+* *
+* Covered Software is provided under this License on an "as is" *
+* basis, without warranty of any kind, either expressed, implied, or *
+* statutory, including, without limitation, warranties that the *
+* Covered Software is free of defects, merchantable, fit for a *
+* particular purpose or non-infringing. The entire risk as to the *
+* quality and performance of the Covered Software is with You. *
+* Should any Covered Software prove defective in any respect, You *
+* (not any Contributor) assume the cost of any necessary servicing, *
+* repair, or correction. This disclaimer of warranty constitutes an *
+* essential part of this License. No use of any Covered Software is *
+* authorized under this License except under this disclaimer. *
+* *
+************************************************************************
+
+************************************************************************
+* *
+* 7. Limitation of Liability *
+* -------------------------- *
+* *
+* Under no circumstances and under no legal theory, whether tort *
+* (including negligence), contract, or otherwise, shall any *
+* Contributor, or anyone who distributes Covered Software as *
+* permitted above, be liable to You for any direct, indirect, *
+* special, incidental, or consequential damages of any character *
+* including, without limitation, damages for lost profits, loss of *
+* goodwill, work stoppage, computer failure or malfunction, or any *
+* and all other commercial damages or losses, even if such party *
+* shall have been informed of the possibility of such damages. This *
+* limitation of liability shall not apply to liability for death or *
+* personal injury resulting from such party's negligence to the *
+* extent applicable law prohibits such limitation. Some *
+* jurisdictions do not allow the exclusion or limitation of *
+* incidental or consequential damages, so this exclusion and *
+* limitation may not apply to You. *
+* *
+************************************************************************
+
+8. Litigation
+-------------
+
+Any litigation relating to this License may be brought only in the
+courts of a jurisdiction where the defendant maintains its principal
+place of business and such litigation shall be governed by laws of that
+jurisdiction, without reference to its conflict-of-law provisions.
+Nothing in this Section shall prevent a party's ability to bring
+cross-claims or counter-claims.
+
+9. Miscellaneous
+----------------
+
+This License represents the complete agreement concerning the subject
+matter hereof. If any provision of this License is held to be
+unenforceable, such provision shall be reformed only to the extent
+necessary to make it enforceable. Any law or regulation which provides
+that the language of a contract shall be construed against the drafter
+shall not be used to construe this License against a Contributor.
+
+10. Versions of the License
+---------------------------
+
+10.1. New Versions
+
+Mozilla Foundation is the license steward. Except as provided in Section
+10.3, no one other than the license steward has the right to modify or
+publish new versions of this License. Each version will be given a
+distinguishing version number.
+
+10.2. Effect of New Versions
+
+You may distribute the Covered Software under the terms of the version
+of the License under which You originally received the Covered Software,
+or under the terms of any subsequent version published by the license
+steward.
+
+10.3. Modified Versions
+
+If you create software not governed by this License, and you want to
+create a new license for such software, you may create and use a
+modified version of this License if you rename the license and remove
+any references to the name of the license steward (except to note that
+such modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary
+Licenses
+
+If You choose to distribute Source Code Form that is Incompatible With
+Secondary Licenses under the terms of this version of the License, the
+notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+-------------------------------------------
+
+ This Source Code Form is subject to the terms of the Mozilla Public
+ License, v. 2.0. If a copy of the MPL was not distributed with this
+ file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular
+file, then You may include the notice in a location (such as a LICENSE
+file in a relevant directory) where a recipient would be likely to look
+for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - "Incompatible With Secondary Licenses" Notice
+---------------------------------------------------------
+
+ This Source Code Form is "Incompatible With Secondary Licenses", as
+ defined by the Mozilla Public License, v. 2.0.
diff --git a/vendor/github.com/go-sql-driver/mysql/README.md b/vendor/github.com/go-sql-driver/mysql/README.md
new file mode 100644
index 0000000..d24aaa0
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/README.md
@@ -0,0 +1,476 @@
+# Go-MySQL-Driver
+
+A MySQL-Driver for Go's [database/sql](https://golang.org/pkg/database/sql/) package
+
+
+
+---------------------------------------
+ * [Features](#features)
+ * [Requirements](#requirements)
+ * [Installation](#installation)
+ * [Usage](#usage)
+ * [DSN (Data Source Name)](#dsn-data-source-name)
+ * [Password](#password)
+ * [Protocol](#protocol)
+ * [Address](#address)
+ * [Parameters](#parameters)
+ * [Examples](#examples)
+ * [Connection pool and timeouts](#connection-pool-and-timeouts)
+ * [context.Context Support](#contextcontext-support)
+ * [ColumnType Support](#columntype-support)
+ * [LOAD DATA LOCAL INFILE support](#load-data-local-infile-support)
+ * [time.Time support](#timetime-support)
+ * [Unicode support](#unicode-support)
+ * [Testing / Development](#testing--development)
+ * [License](#license)
+
+---------------------------------------
+
+## Features
+ * Lightweight and [fast](https://github.com/go-sql-driver/sql-benchmark "golang MySQL-Driver performance")
+ * Native Go implementation. No C-bindings, just pure Go
+ * Connections over TCP/IPv4, TCP/IPv6, Unix domain sockets or [custom protocols](https://godoc.org/github.com/go-sql-driver/mysql#DialFunc)
+ * Automatic handling of broken connections
+ * Automatic Connection Pooling *(by database/sql package)*
+ * Supports queries larger than 16MB
+ * Full [`sql.RawBytes`](https://golang.org/pkg/database/sql/#RawBytes) support.
+ * Intelligent `LONG DATA` handling in prepared statements
+ * Secure `LOAD DATA LOCAL INFILE` support with file Whitelisting and `io.Reader` support
+ * Optional `time.Time` parsing
+ * Optional placeholder interpolation
+
+## Requirements
+ * Go 1.5 or higher
+ * MySQL (4.1+), MariaDB, Percona Server, Google CloudSQL or Sphinx (2.2.3+)
+
+---------------------------------------
+
+## Installation
+Simple install the package to your [$GOPATH](https://github.com/golang/go/wiki/GOPATH "GOPATH") with the [go tool](https://golang.org/cmd/go/ "go command") from shell:
+```bash
+$ go get -u github.com/go-sql-driver/mysql
+```
+Make sure [Git is installed](https://git-scm.com/downloads) on your machine and in your system's `PATH`.
+
+## Usage
+_Go MySQL Driver_ is an implementation of Go's `database/sql/driver` interface. You only need to import the driver and can use the full [`database/sql`](https://golang.org/pkg/database/sql/) API then.
+
+Use `mysql` as `driverName` and a valid [DSN](#dsn-data-source-name) as `dataSourceName`:
+```go
+import "database/sql"
+import _ "github.com/go-sql-driver/mysql"
+
+db, err := sql.Open("mysql", "user:password@/dbname")
+```
+
+[Examples are available in our Wiki](https://github.com/go-sql-driver/mysql/wiki/Examples "Go-MySQL-Driver Examples").
+
+
+### DSN (Data Source Name)
+
+The Data Source Name has a common format, like e.g. [PEAR DB](http://pear.php.net/manual/en/package.database.db.intro-dsn.php) uses it, but without type-prefix (optional parts marked by squared brackets):
+```
+[username[:password]@][protocol[(address)]]/dbname[?param1=value1&...¶mN=valueN]
+```
+
+A DSN in its fullest form:
+```
+username:password@protocol(address)/dbname?param=value
+```
+
+Except for the databasename, all values are optional. So the minimal DSN is:
+```
+/dbname
+```
+
+If you do not want to preselect a database, leave `dbname` empty:
+```
+/
+```
+This has the same effect as an empty DSN string:
+```
+
+```
+
+Alternatively, [Config.FormatDSN](https://godoc.org/github.com/go-sql-driver/mysql#Config.FormatDSN) can be used to create a DSN string by filling a struct.
+
+#### Password
+Passwords can consist of any character. Escaping is **not** necessary.
+
+#### Protocol
+See [net.Dial](https://golang.org/pkg/net/#Dial) for more information which networks are available.
+In general you should use an Unix domain socket if available and TCP otherwise for best performance.
+
+#### Address
+For TCP and UDP networks, addresses have the form `host[:port]`.
+If `port` is omitted, the default port will be used.
+If `host` is a literal IPv6 address, it must be enclosed in square brackets.
+The functions [net.JoinHostPort](https://golang.org/pkg/net/#JoinHostPort) and [net.SplitHostPort](https://golang.org/pkg/net/#SplitHostPort) manipulate addresses in this form.
+
+For Unix domain sockets the address is the absolute path to the MySQL-Server-socket, e.g. `/var/run/mysqld/mysqld.sock` or `/tmp/mysql.sock`.
+
+#### Parameters
+*Parameters are case-sensitive!*
+
+Notice that any of `true`, `TRUE`, `True` or `1` is accepted to stand for a true boolean value. Not surprisingly, false can be specified as any of: `false`, `FALSE`, `False` or `0`.
+
+##### `allowAllFiles`
+
+```
+Type: bool
+Valid Values: true, false
+Default: false
+```
+
+`allowAllFiles=true` disables the file Whitelist for `LOAD DATA LOCAL INFILE` and allows *all* files.
+[*Might be insecure!*](http://dev.mysql.com/doc/refman/5.7/en/load-data-local.html)
+
+##### `allowCleartextPasswords`
+
+```
+Type: bool
+Valid Values: true, false
+Default: false
+```
+
+`allowCleartextPasswords=true` allows using the [cleartext client side plugin](http://dev.mysql.com/doc/en/cleartext-authentication-plugin.html) if required by an account, such as one defined with the [PAM authentication plugin](http://dev.mysql.com/doc/en/pam-authentication-plugin.html). Sending passwords in clear text may be a security problem in some configurations. To avoid problems if there is any possibility that the password would be intercepted, clients should connect to MySQL Server using a method that protects the password. Possibilities include [TLS / SSL](#tls), IPsec, or a private network.
+
+##### `allowNativePasswords`
+
+```
+Type: bool
+Valid Values: true, false
+Default: true
+```
+`allowNativePasswords=false` disallows the usage of MySQL native password method.
+
+##### `allowOldPasswords`
+
+```
+Type: bool
+Valid Values: true, false
+Default: false
+```
+`allowOldPasswords=true` allows the usage of the insecure old password method. This should be avoided, but is necessary in some cases. See also [the old_passwords wiki page](https://github.com/go-sql-driver/mysql/wiki/old_passwords).
+
+##### `charset`
+
+```
+Type: string
+Valid Values:
+Default: none
+```
+
+Sets the charset used for client-server interaction (`"SET NAMES "`). If multiple charsets are set (separated by a comma), the following charset is used if setting the charset failes. This enables for example support for `utf8mb4` ([introduced in MySQL 5.5.3](http://dev.mysql.com/doc/refman/5.5/en/charset-unicode-utf8mb4.html)) with fallback to `utf8` for older servers (`charset=utf8mb4,utf8`).
+
+Usage of the `charset` parameter is discouraged because it issues additional queries to the server.
+Unless you need the fallback behavior, please use `collation` instead.
+
+##### `collation`
+
+```
+Type: string
+Valid Values:
+Default: utf8_general_ci
+```
+
+Sets the collation used for client-server interaction on connection. In contrast to `charset`, `collation` does not issue additional queries. If the specified collation is unavailable on the target server, the connection will fail.
+
+A list of valid charsets for a server is retrievable with `SHOW COLLATION`.
+
+##### `clientFoundRows`
+
+```
+Type: bool
+Valid Values: true, false
+Default: false
+```
+
+`clientFoundRows=true` causes an UPDATE to return the number of matching rows instead of the number of rows changed.
+
+##### `columnsWithAlias`
+
+```
+Type: bool
+Valid Values: true, false
+Default: false
+```
+
+When `columnsWithAlias` is true, calls to `sql.Rows.Columns()` will return the table alias and the column name separated by a dot. For example:
+
+```
+SELECT u.id FROM users as u
+```
+
+will return `u.id` instead of just `id` if `columnsWithAlias=true`.
+
+##### `interpolateParams`
+
+```
+Type: bool
+Valid Values: true, false
+Default: false
+```
+
+If `interpolateParams` is true, placeholders (`?`) in calls to `db.Query()` and `db.Exec()` are interpolated into a single query string with given parameters. This reduces the number of roundtrips, since the driver has to prepare a statement, execute it with given parameters and close the statement again with `interpolateParams=false`.
+
+*This can not be used together with the multibyte encodings BIG5, CP932, GB2312, GBK or SJIS. These are blacklisted as they may [introduce a SQL injection vulnerability](http://stackoverflow.com/a/12118602/3430118)!*
+
+##### `loc`
+
+```
+Type: string
+Valid Values:
+Default: UTC
+```
+
+Sets the location for time.Time values (when using `parseTime=true`). *"Local"* sets the system's location. See [time.LoadLocation](https://golang.org/pkg/time/#LoadLocation) for details.
+
+Note that this sets the location for time.Time values but does not change MySQL's [time_zone setting](https://dev.mysql.com/doc/refman/5.5/en/time-zone-support.html). For that see the [time_zone system variable](#system-variables), which can also be set as a DSN parameter.
+
+Please keep in mind, that param values must be [url.QueryEscape](https://golang.org/pkg/net/url/#QueryEscape)'ed. Alternatively you can manually replace the `/` with `%2F`. For example `US/Pacific` would be `loc=US%2FPacific`.
+
+##### `maxAllowedPacket`
+```
+Type: decimal number
+Default: 4194304
+```
+
+Max packet size allowed in bytes. The default value is 4 MiB and should be adjusted to match the server settings. `maxAllowedPacket=0` can be used to automatically fetch the `max_allowed_packet` variable from server *on every connection*.
+
+##### `multiStatements`
+
+```
+Type: bool
+Valid Values: true, false
+Default: false
+```
+
+Allow multiple statements in one query. While this allows batch queries, it also greatly increases the risk of SQL injections. Only the result of the first query is returned, all other results are silently discarded.
+
+When `multiStatements` is used, `?` parameters must only be used in the first statement.
+
+##### `parseTime`
+
+```
+Type: bool
+Valid Values: true, false
+Default: false
+```
+
+`parseTime=true` changes the output type of `DATE` and `DATETIME` values to `time.Time` instead of `[]byte` / `string`
+
+
+##### `readTimeout`
+
+```
+Type: duration
+Default: 0
+```
+
+I/O read timeout. The value must be a decimal number with a unit suffix (*"ms"*, *"s"*, *"m"*, *"h"*), such as *"30s"*, *"0.5m"* or *"1m30s"*.
+
+##### `rejectReadOnly`
+
+```
+Type: bool
+Valid Values: true, false
+Default: false
+```
+
+
+`rejectReadOnly=true` causes the driver to reject read-only connections. This
+is for a possible race condition during an automatic failover, where the mysql
+client gets connected to a read-only replica after the failover.
+
+Note that this should be a fairly rare case, as an automatic failover normally
+happens when the primary is down, and the race condition shouldn't happen
+unless it comes back up online as soon as the failover is kicked off. On the
+other hand, when this happens, a MySQL application can get stuck on a
+read-only connection until restarted. It is however fairly easy to reproduce,
+for example, using a manual failover on AWS Aurora's MySQL-compatible cluster.
+
+If you are not relying on read-only transactions to reject writes that aren't
+supposed to happen, setting this on some MySQL providers (such as AWS Aurora)
+is safer for failovers.
+
+Note that ERROR 1290 can be returned for a `read-only` server and this option will
+cause a retry for that error. However the same error number is used for some
+other cases. You should ensure your application will never cause an ERROR 1290
+except for `read-only` mode when enabling this option.
+
+
+##### `timeout`
+
+```
+Type: duration
+Default: OS default
+```
+
+Timeout for establishing connections, aka dial timeout. The value must be a decimal number with a unit suffix (*"ms"*, *"s"*, *"m"*, *"h"*), such as *"30s"*, *"0.5m"* or *"1m30s"*.
+
+
+##### `tls`
+
+```
+Type: bool / string
+Valid Values: true, false, skip-verify,
+Default: false
+```
+
+`tls=true` enables TLS / SSL encrypted connection to the server. Use `skip-verify` if you want to use a self-signed or invalid certificate (server side). Use a custom value registered with [`mysql.RegisterTLSConfig`](https://godoc.org/github.com/go-sql-driver/mysql#RegisterTLSConfig).
+
+
+##### `writeTimeout`
+
+```
+Type: duration
+Default: 0
+```
+
+I/O write timeout. The value must be a decimal number with a unit suffix (*"ms"*, *"s"*, *"m"*, *"h"*), such as *"30s"*, *"0.5m"* or *"1m30s"*.
+
+
+##### System Variables
+
+Any other parameters are interpreted as system variables:
+ * `=`: `SET =`
+ * `=`: `SET =`
+ * `=%27%27`: `SET =''`
+
+Rules:
+* The values for string variables must be quoted with `'`.
+* The values must also be [url.QueryEscape](http://golang.org/pkg/net/url/#QueryEscape)'ed!
+ (which implies values of string variables must be wrapped with `%27`).
+
+Examples:
+ * `autocommit=1`: `SET autocommit=1`
+ * [`time_zone=%27Europe%2FParis%27`](https://dev.mysql.com/doc/refman/5.5/en/time-zone-support.html): `SET time_zone='Europe/Paris'`
+ * [`tx_isolation=%27REPEATABLE-READ%27`](https://dev.mysql.com/doc/refman/5.5/en/server-system-variables.html#sysvar_tx_isolation): `SET tx_isolation='REPEATABLE-READ'`
+
+
+#### Examples
+```
+user@unix(/path/to/socket)/dbname
+```
+
+```
+root:pw@unix(/tmp/mysql.sock)/myDatabase?loc=Local
+```
+
+```
+user:password@tcp(localhost:5555)/dbname?tls=skip-verify&autocommit=true
+```
+
+Treat warnings as errors by setting the system variable [`sql_mode`](https://dev.mysql.com/doc/refman/5.7/en/sql-mode.html):
+```
+user:password@/dbname?sql_mode=TRADITIONAL
+```
+
+TCP via IPv6:
+```
+user:password@tcp([de:ad:be:ef::ca:fe]:80)/dbname?timeout=90s&collation=utf8mb4_unicode_ci
+```
+
+TCP on a remote host, e.g. Amazon RDS:
+```
+id:password@tcp(your-amazonaws-uri.com:3306)/dbname
+```
+
+Google Cloud SQL on App Engine (First Generation MySQL Server):
+```
+user@cloudsql(project-id:instance-name)/dbname
+```
+
+Google Cloud SQL on App Engine (Second Generation MySQL Server):
+```
+user@cloudsql(project-id:regionname:instance-name)/dbname
+```
+
+TCP using default port (3306) on localhost:
+```
+user:password@tcp/dbname?charset=utf8mb4,utf8&sys_var=esc%40ped
+```
+
+Use the default protocol (tcp) and host (localhost:3306):
+```
+user:password@/dbname
+```
+
+No Database preselected:
+```
+user:password@/
+```
+
+
+### Connection pool and timeouts
+The connection pool is managed by Go's database/sql package. For details on how to configure the size of the pool and how long connections stay in the pool see `*DB.SetMaxOpenConns`, `*DB.SetMaxIdleConns`, and `*DB.SetConnMaxLifetime` in the [database/sql documentation](https://golang.org/pkg/database/sql/). The read, write, and dial timeouts for each individual connection are configured with the DSN parameters [`readTimeout`](#readtimeout), [`writeTimeout`](#writetimeout), and [`timeout`](#timeout), respectively.
+
+## `ColumnType` Support
+This driver supports the [`ColumnType` interface](https://golang.org/pkg/database/sql/#ColumnType) introduced in Go 1.8, with the exception of [`ColumnType.Length()`](https://golang.org/pkg/database/sql/#ColumnType.Length), which is currently not supported.
+
+## `context.Context` Support
+Go 1.8 added `database/sql` support for `context.Context`. This driver supports query timeouts and cancellation via contexts.
+See [context support in the database/sql package](https://golang.org/doc/go1.8#database_sql) for more details.
+
+
+### `LOAD DATA LOCAL INFILE` support
+For this feature you need direct access to the package. Therefore you must change the import path (no `_`):
+```go
+import "github.com/go-sql-driver/mysql"
+```
+
+Files must be whitelisted by registering them with `mysql.RegisterLocalFile(filepath)` (recommended) or the Whitelist check must be deactivated by using the DSN parameter `allowAllFiles=true` ([*Might be insecure!*](http://dev.mysql.com/doc/refman/5.7/en/load-data-local.html)).
+
+To use a `io.Reader` a handler function must be registered with `mysql.RegisterReaderHandler(name, handler)` which returns a `io.Reader` or `io.ReadCloser`. The Reader is available with the filepath `Reader::` then. Choose different names for different handlers and `DeregisterReaderHandler` when you don't need it anymore.
+
+See the [godoc of Go-MySQL-Driver](https://godoc.org/github.com/go-sql-driver/mysql "golang mysql driver documentation") for details.
+
+
+### `time.Time` support
+The default internal output type of MySQL `DATE` and `DATETIME` values is `[]byte` which allows you to scan the value into a `[]byte`, `string` or `sql.RawBytes` variable in your program.
+
+However, many want to scan MySQL `DATE` and `DATETIME` values into `time.Time` variables, which is the logical opposite in Go to `DATE` and `DATETIME` in MySQL. You can do that by changing the internal output type from `[]byte` to `time.Time` with the DSN parameter `parseTime=true`. You can set the default [`time.Time` location](https://golang.org/pkg/time/#Location) with the `loc` DSN parameter.
+
+**Caution:** As of Go 1.1, this makes `time.Time` the only variable type you can scan `DATE` and `DATETIME` values into. This breaks for example [`sql.RawBytes` support](https://github.com/go-sql-driver/mysql/wiki/Examples#rawbytes).
+
+Alternatively you can use the [`NullTime`](https://godoc.org/github.com/go-sql-driver/mysql#NullTime) type as the scan destination, which works with both `time.Time` and `string` / `[]byte`.
+
+
+### Unicode support
+Since version 1.1 Go-MySQL-Driver automatically uses the collation `utf8_general_ci` by default.
+
+Other collations / charsets can be set using the [`collation`](#collation) DSN parameter.
+
+Version 1.0 of the driver recommended adding `&charset=utf8` (alias for `SET NAMES utf8`) to the DSN to enable proper UTF-8 support. This is not necessary anymore. The [`collation`](#collation) parameter should be preferred to set another collation / charset than the default.
+
+See http://dev.mysql.com/doc/refman/5.7/en/charset-unicode.html for more details on MySQL's Unicode support.
+
+## Testing / Development
+To run the driver tests you may need to adjust the configuration. See the [Testing Wiki-Page](https://github.com/go-sql-driver/mysql/wiki/Testing "Testing") for details.
+
+Go-MySQL-Driver is not feature-complete yet. Your help is very appreciated.
+If you want to contribute, you can work on an [open issue](https://github.com/go-sql-driver/mysql/issues?state=open) or review a [pull request](https://github.com/go-sql-driver/mysql/pulls).
+
+See the [Contribution Guidelines](https://github.com/go-sql-driver/mysql/blob/master/CONTRIBUTING.md) for details.
+
+---------------------------------------
+
+## License
+Go-MySQL-Driver is licensed under the [Mozilla Public License Version 2.0](https://raw.github.com/go-sql-driver/mysql/master/LICENSE)
+
+Mozilla summarizes the license scope as follows:
+> MPL: The copyleft applies to any files containing MPLed code.
+
+
+That means:
+ * You can **use** the **unchanged** source code both in private and commercially.
+ * When distributing, you **must publish** the source code of any **changed files** licensed under the MPL 2.0 under a) the MPL 2.0 itself or b) a compatible license (e.g. GPL 3.0 or Apache License 2.0).
+ * You **needn't publish** the source code of your library as long as the files licensed under the MPL 2.0 are **unchanged**.
+
+Please read the [MPL 2.0 FAQ](https://www.mozilla.org/en-US/MPL/2.0/FAQ/) if you have further questions regarding the license.
+
+You can read the full terms here: [LICENSE](https://raw.github.com/go-sql-driver/mysql/master/LICENSE).
+
+
+
diff --git a/vendor/github.com/go-sql-driver/mysql/appengine.go b/vendor/github.com/go-sql-driver/mysql/appengine.go
new file mode 100644
index 0000000..565614e
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/appengine.go
@@ -0,0 +1,19 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+// +build appengine
+
+package mysql
+
+import (
+ "appengine/cloudsql"
+)
+
+func init() {
+ RegisterDial("cloudsql", cloudsql.Dial)
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/buffer.go b/vendor/github.com/go-sql-driver/mysql/buffer.go
new file mode 100644
index 0000000..2001fea
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/buffer.go
@@ -0,0 +1,147 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+import (
+ "io"
+ "net"
+ "time"
+)
+
+const defaultBufSize = 4096
+
+// A buffer which is used for both reading and writing.
+// This is possible since communication on each connection is synchronous.
+// In other words, we can't write and read simultaneously on the same connection.
+// The buffer is similar to bufio.Reader / Writer but zero-copy-ish
+// Also highly optimized for this particular use case.
+type buffer struct {
+ buf []byte
+ nc net.Conn
+ idx int
+ length int
+ timeout time.Duration
+}
+
+func newBuffer(nc net.Conn) buffer {
+ var b [defaultBufSize]byte
+ return buffer{
+ buf: b[:],
+ nc: nc,
+ }
+}
+
+// fill reads into the buffer until at least _need_ bytes are in it
+func (b *buffer) fill(need int) error {
+ n := b.length
+
+ // move existing data to the beginning
+ if n > 0 && b.idx > 0 {
+ copy(b.buf[0:n], b.buf[b.idx:])
+ }
+
+ // grow buffer if necessary
+ // TODO: let the buffer shrink again at some point
+ // Maybe keep the org buf slice and swap back?
+ if need > len(b.buf) {
+ // Round up to the next multiple of the default size
+ newBuf := make([]byte, ((need/defaultBufSize)+1)*defaultBufSize)
+ copy(newBuf, b.buf)
+ b.buf = newBuf
+ }
+
+ b.idx = 0
+
+ for {
+ if b.timeout > 0 {
+ if err := b.nc.SetReadDeadline(time.Now().Add(b.timeout)); err != nil {
+ return err
+ }
+ }
+
+ nn, err := b.nc.Read(b.buf[n:])
+ n += nn
+
+ switch err {
+ case nil:
+ if n < need {
+ continue
+ }
+ b.length = n
+ return nil
+
+ case io.EOF:
+ if n >= need {
+ b.length = n
+ return nil
+ }
+ return io.ErrUnexpectedEOF
+
+ default:
+ return err
+ }
+ }
+}
+
+// returns next N bytes from buffer.
+// The returned slice is only guaranteed to be valid until the next read
+func (b *buffer) readNext(need int) ([]byte, error) {
+ if b.length < need {
+ // refill
+ if err := b.fill(need); err != nil {
+ return nil, err
+ }
+ }
+
+ offset := b.idx
+ b.idx += need
+ b.length -= need
+ return b.buf[offset:b.idx], nil
+}
+
+// returns a buffer with the requested size.
+// If possible, a slice from the existing buffer is returned.
+// Otherwise a bigger buffer is made.
+// Only one buffer (total) can be used at a time.
+func (b *buffer) takeBuffer(length int) []byte {
+ if b.length > 0 {
+ return nil
+ }
+
+ // test (cheap) general case first
+ if length <= defaultBufSize || length <= cap(b.buf) {
+ return b.buf[:length]
+ }
+
+ if length < maxPacketSize {
+ b.buf = make([]byte, length)
+ return b.buf
+ }
+ return make([]byte, length)
+}
+
+// shortcut which can be used if the requested buffer is guaranteed to be
+// smaller than defaultBufSize
+// Only one buffer (total) can be used at a time.
+func (b *buffer) takeSmallBuffer(length int) []byte {
+ if b.length == 0 {
+ return b.buf[:length]
+ }
+ return nil
+}
+
+// takeCompleteBuffer returns the complete existing buffer.
+// This can be used if the necessary buffer size is unknown.
+// Only one buffer (total) can be used at a time.
+func (b *buffer) takeCompleteBuffer() []byte {
+ if b.length == 0 {
+ return b.buf
+ }
+ return nil
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/collations.go b/vendor/github.com/go-sql-driver/mysql/collations.go
new file mode 100644
index 0000000..82079cf
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/collations.go
@@ -0,0 +1,250 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2014 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+const defaultCollation = "utf8_general_ci"
+
+// A list of available collations mapped to the internal ID.
+// To update this map use the following MySQL query:
+// SELECT COLLATION_NAME, ID FROM information_schema.COLLATIONS
+var collations = map[string]byte{
+ "big5_chinese_ci": 1,
+ "latin2_czech_cs": 2,
+ "dec8_swedish_ci": 3,
+ "cp850_general_ci": 4,
+ "latin1_german1_ci": 5,
+ "hp8_english_ci": 6,
+ "koi8r_general_ci": 7,
+ "latin1_swedish_ci": 8,
+ "latin2_general_ci": 9,
+ "swe7_swedish_ci": 10,
+ "ascii_general_ci": 11,
+ "ujis_japanese_ci": 12,
+ "sjis_japanese_ci": 13,
+ "cp1251_bulgarian_ci": 14,
+ "latin1_danish_ci": 15,
+ "hebrew_general_ci": 16,
+ "tis620_thai_ci": 18,
+ "euckr_korean_ci": 19,
+ "latin7_estonian_cs": 20,
+ "latin2_hungarian_ci": 21,
+ "koi8u_general_ci": 22,
+ "cp1251_ukrainian_ci": 23,
+ "gb2312_chinese_ci": 24,
+ "greek_general_ci": 25,
+ "cp1250_general_ci": 26,
+ "latin2_croatian_ci": 27,
+ "gbk_chinese_ci": 28,
+ "cp1257_lithuanian_ci": 29,
+ "latin5_turkish_ci": 30,
+ "latin1_german2_ci": 31,
+ "armscii8_general_ci": 32,
+ "utf8_general_ci": 33,
+ "cp1250_czech_cs": 34,
+ "ucs2_general_ci": 35,
+ "cp866_general_ci": 36,
+ "keybcs2_general_ci": 37,
+ "macce_general_ci": 38,
+ "macroman_general_ci": 39,
+ "cp852_general_ci": 40,
+ "latin7_general_ci": 41,
+ "latin7_general_cs": 42,
+ "macce_bin": 43,
+ "cp1250_croatian_ci": 44,
+ "utf8mb4_general_ci": 45,
+ "utf8mb4_bin": 46,
+ "latin1_bin": 47,
+ "latin1_general_ci": 48,
+ "latin1_general_cs": 49,
+ "cp1251_bin": 50,
+ "cp1251_general_ci": 51,
+ "cp1251_general_cs": 52,
+ "macroman_bin": 53,
+ "utf16_general_ci": 54,
+ "utf16_bin": 55,
+ "utf16le_general_ci": 56,
+ "cp1256_general_ci": 57,
+ "cp1257_bin": 58,
+ "cp1257_general_ci": 59,
+ "utf32_general_ci": 60,
+ "utf32_bin": 61,
+ "utf16le_bin": 62,
+ "binary": 63,
+ "armscii8_bin": 64,
+ "ascii_bin": 65,
+ "cp1250_bin": 66,
+ "cp1256_bin": 67,
+ "cp866_bin": 68,
+ "dec8_bin": 69,
+ "greek_bin": 70,
+ "hebrew_bin": 71,
+ "hp8_bin": 72,
+ "keybcs2_bin": 73,
+ "koi8r_bin": 74,
+ "koi8u_bin": 75,
+ "latin2_bin": 77,
+ "latin5_bin": 78,
+ "latin7_bin": 79,
+ "cp850_bin": 80,
+ "cp852_bin": 81,
+ "swe7_bin": 82,
+ "utf8_bin": 83,
+ "big5_bin": 84,
+ "euckr_bin": 85,
+ "gb2312_bin": 86,
+ "gbk_bin": 87,
+ "sjis_bin": 88,
+ "tis620_bin": 89,
+ "ucs2_bin": 90,
+ "ujis_bin": 91,
+ "geostd8_general_ci": 92,
+ "geostd8_bin": 93,
+ "latin1_spanish_ci": 94,
+ "cp932_japanese_ci": 95,
+ "cp932_bin": 96,
+ "eucjpms_japanese_ci": 97,
+ "eucjpms_bin": 98,
+ "cp1250_polish_ci": 99,
+ "utf16_unicode_ci": 101,
+ "utf16_icelandic_ci": 102,
+ "utf16_latvian_ci": 103,
+ "utf16_romanian_ci": 104,
+ "utf16_slovenian_ci": 105,
+ "utf16_polish_ci": 106,
+ "utf16_estonian_ci": 107,
+ "utf16_spanish_ci": 108,
+ "utf16_swedish_ci": 109,
+ "utf16_turkish_ci": 110,
+ "utf16_czech_ci": 111,
+ "utf16_danish_ci": 112,
+ "utf16_lithuanian_ci": 113,
+ "utf16_slovak_ci": 114,
+ "utf16_spanish2_ci": 115,
+ "utf16_roman_ci": 116,
+ "utf16_persian_ci": 117,
+ "utf16_esperanto_ci": 118,
+ "utf16_hungarian_ci": 119,
+ "utf16_sinhala_ci": 120,
+ "utf16_german2_ci": 121,
+ "utf16_croatian_ci": 122,
+ "utf16_unicode_520_ci": 123,
+ "utf16_vietnamese_ci": 124,
+ "ucs2_unicode_ci": 128,
+ "ucs2_icelandic_ci": 129,
+ "ucs2_latvian_ci": 130,
+ "ucs2_romanian_ci": 131,
+ "ucs2_slovenian_ci": 132,
+ "ucs2_polish_ci": 133,
+ "ucs2_estonian_ci": 134,
+ "ucs2_spanish_ci": 135,
+ "ucs2_swedish_ci": 136,
+ "ucs2_turkish_ci": 137,
+ "ucs2_czech_ci": 138,
+ "ucs2_danish_ci": 139,
+ "ucs2_lithuanian_ci": 140,
+ "ucs2_slovak_ci": 141,
+ "ucs2_spanish2_ci": 142,
+ "ucs2_roman_ci": 143,
+ "ucs2_persian_ci": 144,
+ "ucs2_esperanto_ci": 145,
+ "ucs2_hungarian_ci": 146,
+ "ucs2_sinhala_ci": 147,
+ "ucs2_german2_ci": 148,
+ "ucs2_croatian_ci": 149,
+ "ucs2_unicode_520_ci": 150,
+ "ucs2_vietnamese_ci": 151,
+ "ucs2_general_mysql500_ci": 159,
+ "utf32_unicode_ci": 160,
+ "utf32_icelandic_ci": 161,
+ "utf32_latvian_ci": 162,
+ "utf32_romanian_ci": 163,
+ "utf32_slovenian_ci": 164,
+ "utf32_polish_ci": 165,
+ "utf32_estonian_ci": 166,
+ "utf32_spanish_ci": 167,
+ "utf32_swedish_ci": 168,
+ "utf32_turkish_ci": 169,
+ "utf32_czech_ci": 170,
+ "utf32_danish_ci": 171,
+ "utf32_lithuanian_ci": 172,
+ "utf32_slovak_ci": 173,
+ "utf32_spanish2_ci": 174,
+ "utf32_roman_ci": 175,
+ "utf32_persian_ci": 176,
+ "utf32_esperanto_ci": 177,
+ "utf32_hungarian_ci": 178,
+ "utf32_sinhala_ci": 179,
+ "utf32_german2_ci": 180,
+ "utf32_croatian_ci": 181,
+ "utf32_unicode_520_ci": 182,
+ "utf32_vietnamese_ci": 183,
+ "utf8_unicode_ci": 192,
+ "utf8_icelandic_ci": 193,
+ "utf8_latvian_ci": 194,
+ "utf8_romanian_ci": 195,
+ "utf8_slovenian_ci": 196,
+ "utf8_polish_ci": 197,
+ "utf8_estonian_ci": 198,
+ "utf8_spanish_ci": 199,
+ "utf8_swedish_ci": 200,
+ "utf8_turkish_ci": 201,
+ "utf8_czech_ci": 202,
+ "utf8_danish_ci": 203,
+ "utf8_lithuanian_ci": 204,
+ "utf8_slovak_ci": 205,
+ "utf8_spanish2_ci": 206,
+ "utf8_roman_ci": 207,
+ "utf8_persian_ci": 208,
+ "utf8_esperanto_ci": 209,
+ "utf8_hungarian_ci": 210,
+ "utf8_sinhala_ci": 211,
+ "utf8_german2_ci": 212,
+ "utf8_croatian_ci": 213,
+ "utf8_unicode_520_ci": 214,
+ "utf8_vietnamese_ci": 215,
+ "utf8_general_mysql500_ci": 223,
+ "utf8mb4_unicode_ci": 224,
+ "utf8mb4_icelandic_ci": 225,
+ "utf8mb4_latvian_ci": 226,
+ "utf8mb4_romanian_ci": 227,
+ "utf8mb4_slovenian_ci": 228,
+ "utf8mb4_polish_ci": 229,
+ "utf8mb4_estonian_ci": 230,
+ "utf8mb4_spanish_ci": 231,
+ "utf8mb4_swedish_ci": 232,
+ "utf8mb4_turkish_ci": 233,
+ "utf8mb4_czech_ci": 234,
+ "utf8mb4_danish_ci": 235,
+ "utf8mb4_lithuanian_ci": 236,
+ "utf8mb4_slovak_ci": 237,
+ "utf8mb4_spanish2_ci": 238,
+ "utf8mb4_roman_ci": 239,
+ "utf8mb4_persian_ci": 240,
+ "utf8mb4_esperanto_ci": 241,
+ "utf8mb4_hungarian_ci": 242,
+ "utf8mb4_sinhala_ci": 243,
+ "utf8mb4_german2_ci": 244,
+ "utf8mb4_croatian_ci": 245,
+ "utf8mb4_unicode_520_ci": 246,
+ "utf8mb4_vietnamese_ci": 247,
+}
+
+// A blacklist of collations which is unsafe to interpolate parameters.
+// These multibyte encodings may contains 0x5c (`\`) in their trailing bytes.
+var unsafeCollations = map[string]bool{
+ "big5_chinese_ci": true,
+ "sjis_japanese_ci": true,
+ "gbk_chinese_ci": true,
+ "big5_bin": true,
+ "gb2312_bin": true,
+ "gbk_bin": true,
+ "sjis_bin": true,
+ "cp932_japanese_ci": true,
+ "cp932_bin": true,
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/connection.go b/vendor/github.com/go-sql-driver/mysql/connection.go
new file mode 100644
index 0000000..e570614
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/connection.go
@@ -0,0 +1,461 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+import (
+ "database/sql/driver"
+ "io"
+ "net"
+ "strconv"
+ "strings"
+ "time"
+)
+
+// a copy of context.Context for Go 1.7 and earlier
+type mysqlContext interface {
+ Done() <-chan struct{}
+ Err() error
+
+ // defined in context.Context, but not used in this driver:
+ // Deadline() (deadline time.Time, ok bool)
+ // Value(key interface{}) interface{}
+}
+
+type mysqlConn struct {
+ buf buffer
+ netConn net.Conn
+ affectedRows uint64
+ insertId uint64
+ cfg *Config
+ maxAllowedPacket int
+ maxWriteSize int
+ writeTimeout time.Duration
+ flags clientFlag
+ status statusFlag
+ sequence uint8
+ parseTime bool
+
+ // for context support (Go 1.8+)
+ watching bool
+ watcher chan<- mysqlContext
+ closech chan struct{}
+ finished chan<- struct{}
+ canceled atomicError // set non-nil if conn is canceled
+ closed atomicBool // set when conn is closed, before closech is closed
+}
+
+// Handles parameters set in DSN after the connection is established
+func (mc *mysqlConn) handleParams() (err error) {
+ for param, val := range mc.cfg.Params {
+ switch param {
+ // Charset
+ case "charset":
+ charsets := strings.Split(val, ",")
+ for i := range charsets {
+ // ignore errors here - a charset may not exist
+ err = mc.exec("SET NAMES " + charsets[i])
+ if err == nil {
+ break
+ }
+ }
+ if err != nil {
+ return
+ }
+
+ // System Vars
+ default:
+ err = mc.exec("SET " + param + "=" + val + "")
+ if err != nil {
+ return
+ }
+ }
+ }
+
+ return
+}
+
+func (mc *mysqlConn) markBadConn(err error) error {
+ if mc == nil {
+ return err
+ }
+ if err != errBadConnNoWrite {
+ return err
+ }
+ return driver.ErrBadConn
+}
+
+func (mc *mysqlConn) Begin() (driver.Tx, error) {
+ return mc.begin(false)
+}
+
+func (mc *mysqlConn) begin(readOnly bool) (driver.Tx, error) {
+ if mc.closed.IsSet() {
+ errLog.Print(ErrInvalidConn)
+ return nil, driver.ErrBadConn
+ }
+ var q string
+ if readOnly {
+ q = "START TRANSACTION READ ONLY"
+ } else {
+ q = "START TRANSACTION"
+ }
+ err := mc.exec(q)
+ if err == nil {
+ return &mysqlTx{mc}, err
+ }
+ return nil, mc.markBadConn(err)
+}
+
+func (mc *mysqlConn) Close() (err error) {
+ // Makes Close idempotent
+ if !mc.closed.IsSet() {
+ err = mc.writeCommandPacket(comQuit)
+ }
+
+ mc.cleanup()
+
+ return
+}
+
+// Closes the network connection and unsets internal variables. Do not call this
+// function after successfully authentication, call Close instead. This function
+// is called before auth or on auth failure because MySQL will have already
+// closed the network connection.
+func (mc *mysqlConn) cleanup() {
+ if !mc.closed.TrySet(true) {
+ return
+ }
+
+ // Makes cleanup idempotent
+ close(mc.closech)
+ if mc.netConn == nil {
+ return
+ }
+ if err := mc.netConn.Close(); err != nil {
+ errLog.Print(err)
+ }
+}
+
+func (mc *mysqlConn) error() error {
+ if mc.closed.IsSet() {
+ if err := mc.canceled.Value(); err != nil {
+ return err
+ }
+ return ErrInvalidConn
+ }
+ return nil
+}
+
+func (mc *mysqlConn) Prepare(query string) (driver.Stmt, error) {
+ if mc.closed.IsSet() {
+ errLog.Print(ErrInvalidConn)
+ return nil, driver.ErrBadConn
+ }
+ // Send command
+ err := mc.writeCommandPacketStr(comStmtPrepare, query)
+ if err != nil {
+ return nil, mc.markBadConn(err)
+ }
+
+ stmt := &mysqlStmt{
+ mc: mc,
+ }
+
+ // Read Result
+ columnCount, err := stmt.readPrepareResultPacket()
+ if err == nil {
+ if stmt.paramCount > 0 {
+ if err = mc.readUntilEOF(); err != nil {
+ return nil, err
+ }
+ }
+
+ if columnCount > 0 {
+ err = mc.readUntilEOF()
+ }
+ }
+
+ return stmt, err
+}
+
+func (mc *mysqlConn) interpolateParams(query string, args []driver.Value) (string, error) {
+ // Number of ? should be same to len(args)
+ if strings.Count(query, "?") != len(args) {
+ return "", driver.ErrSkip
+ }
+
+ buf := mc.buf.takeCompleteBuffer()
+ if buf == nil {
+ // can not take the buffer. Something must be wrong with the connection
+ errLog.Print(ErrBusyBuffer)
+ return "", ErrInvalidConn
+ }
+ buf = buf[:0]
+ argPos := 0
+
+ for i := 0; i < len(query); i++ {
+ q := strings.IndexByte(query[i:], '?')
+ if q == -1 {
+ buf = append(buf, query[i:]...)
+ break
+ }
+ buf = append(buf, query[i:i+q]...)
+ i += q
+
+ arg := args[argPos]
+ argPos++
+
+ if arg == nil {
+ buf = append(buf, "NULL"...)
+ continue
+ }
+
+ switch v := arg.(type) {
+ case int64:
+ buf = strconv.AppendInt(buf, v, 10)
+ case float64:
+ buf = strconv.AppendFloat(buf, v, 'g', -1, 64)
+ case bool:
+ if v {
+ buf = append(buf, '1')
+ } else {
+ buf = append(buf, '0')
+ }
+ case time.Time:
+ if v.IsZero() {
+ buf = append(buf, "'0000-00-00'"...)
+ } else {
+ v := v.In(mc.cfg.Loc)
+ v = v.Add(time.Nanosecond * 500) // To round under microsecond
+ year := v.Year()
+ year100 := year / 100
+ year1 := year % 100
+ month := v.Month()
+ day := v.Day()
+ hour := v.Hour()
+ minute := v.Minute()
+ second := v.Second()
+ micro := v.Nanosecond() / 1000
+
+ buf = append(buf, []byte{
+ '\'',
+ digits10[year100], digits01[year100],
+ digits10[year1], digits01[year1],
+ '-',
+ digits10[month], digits01[month],
+ '-',
+ digits10[day], digits01[day],
+ ' ',
+ digits10[hour], digits01[hour],
+ ':',
+ digits10[minute], digits01[minute],
+ ':',
+ digits10[second], digits01[second],
+ }...)
+
+ if micro != 0 {
+ micro10000 := micro / 10000
+ micro100 := micro / 100 % 100
+ micro1 := micro % 100
+ buf = append(buf, []byte{
+ '.',
+ digits10[micro10000], digits01[micro10000],
+ digits10[micro100], digits01[micro100],
+ digits10[micro1], digits01[micro1],
+ }...)
+ }
+ buf = append(buf, '\'')
+ }
+ case []byte:
+ if v == nil {
+ buf = append(buf, "NULL"...)
+ } else {
+ buf = append(buf, "_binary'"...)
+ if mc.status&statusNoBackslashEscapes == 0 {
+ buf = escapeBytesBackslash(buf, v)
+ } else {
+ buf = escapeBytesQuotes(buf, v)
+ }
+ buf = append(buf, '\'')
+ }
+ case string:
+ buf = append(buf, '\'')
+ if mc.status&statusNoBackslashEscapes == 0 {
+ buf = escapeStringBackslash(buf, v)
+ } else {
+ buf = escapeStringQuotes(buf, v)
+ }
+ buf = append(buf, '\'')
+ default:
+ return "", driver.ErrSkip
+ }
+
+ if len(buf)+4 > mc.maxAllowedPacket {
+ return "", driver.ErrSkip
+ }
+ }
+ if argPos != len(args) {
+ return "", driver.ErrSkip
+ }
+ return string(buf), nil
+}
+
+func (mc *mysqlConn) Exec(query string, args []driver.Value) (driver.Result, error) {
+ if mc.closed.IsSet() {
+ errLog.Print(ErrInvalidConn)
+ return nil, driver.ErrBadConn
+ }
+ if len(args) != 0 {
+ if !mc.cfg.InterpolateParams {
+ return nil, driver.ErrSkip
+ }
+ // try to interpolate the parameters to save extra roundtrips for preparing and closing a statement
+ prepared, err := mc.interpolateParams(query, args)
+ if err != nil {
+ return nil, err
+ }
+ query = prepared
+ }
+ mc.affectedRows = 0
+ mc.insertId = 0
+
+ err := mc.exec(query)
+ if err == nil {
+ return &mysqlResult{
+ affectedRows: int64(mc.affectedRows),
+ insertId: int64(mc.insertId),
+ }, err
+ }
+ return nil, mc.markBadConn(err)
+}
+
+// Internal function to execute commands
+func (mc *mysqlConn) exec(query string) error {
+ // Send command
+ if err := mc.writeCommandPacketStr(comQuery, query); err != nil {
+ return mc.markBadConn(err)
+ }
+
+ // Read Result
+ resLen, err := mc.readResultSetHeaderPacket()
+ if err != nil {
+ return err
+ }
+
+ if resLen > 0 {
+ // columns
+ if err := mc.readUntilEOF(); err != nil {
+ return err
+ }
+
+ // rows
+ if err := mc.readUntilEOF(); err != nil {
+ return err
+ }
+ }
+
+ return mc.discardResults()
+}
+
+func (mc *mysqlConn) Query(query string, args []driver.Value) (driver.Rows, error) {
+ return mc.query(query, args)
+}
+
+func (mc *mysqlConn) query(query string, args []driver.Value) (*textRows, error) {
+ if mc.closed.IsSet() {
+ errLog.Print(ErrInvalidConn)
+ return nil, driver.ErrBadConn
+ }
+ if len(args) != 0 {
+ if !mc.cfg.InterpolateParams {
+ return nil, driver.ErrSkip
+ }
+ // try client-side prepare to reduce roundtrip
+ prepared, err := mc.interpolateParams(query, args)
+ if err != nil {
+ return nil, err
+ }
+ query = prepared
+ }
+ // Send command
+ err := mc.writeCommandPacketStr(comQuery, query)
+ if err == nil {
+ // Read Result
+ var resLen int
+ resLen, err = mc.readResultSetHeaderPacket()
+ if err == nil {
+ rows := new(textRows)
+ rows.mc = mc
+
+ if resLen == 0 {
+ rows.rs.done = true
+
+ switch err := rows.NextResultSet(); err {
+ case nil, io.EOF:
+ return rows, nil
+ default:
+ return nil, err
+ }
+ }
+
+ // Columns
+ rows.rs.columns, err = mc.readColumns(resLen)
+ return rows, err
+ }
+ }
+ return nil, mc.markBadConn(err)
+}
+
+// Gets the value of the given MySQL System Variable
+// The returned byte slice is only valid until the next read
+func (mc *mysqlConn) getSystemVar(name string) ([]byte, error) {
+ // Send command
+ if err := mc.writeCommandPacketStr(comQuery, "SELECT @@"+name); err != nil {
+ return nil, err
+ }
+
+ // Read Result
+ resLen, err := mc.readResultSetHeaderPacket()
+ if err == nil {
+ rows := new(textRows)
+ rows.mc = mc
+ rows.rs.columns = []mysqlField{{fieldType: fieldTypeVarChar}}
+
+ if resLen > 0 {
+ // Columns
+ if err := mc.readUntilEOF(); err != nil {
+ return nil, err
+ }
+ }
+
+ dest := make([]driver.Value, resLen)
+ if err = rows.readRow(dest); err == nil {
+ return dest[0].([]byte), mc.readUntilEOF()
+ }
+ }
+ return nil, err
+}
+
+// finish is called when the query has canceled.
+func (mc *mysqlConn) cancel(err error) {
+ mc.canceled.Set(err)
+ mc.cleanup()
+}
+
+// finish is called when the query has succeeded.
+func (mc *mysqlConn) finish() {
+ if !mc.watching || mc.finished == nil {
+ return
+ }
+ select {
+ case mc.finished <- struct{}{}:
+ mc.watching = false
+ case <-mc.closech:
+ }
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/connection_go18.go b/vendor/github.com/go-sql-driver/mysql/connection_go18.go
new file mode 100644
index 0000000..48a9cca
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/connection_go18.go
@@ -0,0 +1,197 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+// +build go1.8
+
+package mysql
+
+import (
+ "context"
+ "database/sql"
+ "database/sql/driver"
+)
+
+// Ping implements driver.Pinger interface
+func (mc *mysqlConn) Ping(ctx context.Context) error {
+ if mc.closed.IsSet() {
+ errLog.Print(ErrInvalidConn)
+ return driver.ErrBadConn
+ }
+
+ if err := mc.watchCancel(ctx); err != nil {
+ return err
+ }
+ defer mc.finish()
+
+ if err := mc.writeCommandPacket(comPing); err != nil {
+ return err
+ }
+ if _, err := mc.readResultOK(); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// BeginTx implements driver.ConnBeginTx interface
+func (mc *mysqlConn) BeginTx(ctx context.Context, opts driver.TxOptions) (driver.Tx, error) {
+ if err := mc.watchCancel(ctx); err != nil {
+ return nil, err
+ }
+ defer mc.finish()
+
+ if sql.IsolationLevel(opts.Isolation) != sql.LevelDefault {
+ level, err := mapIsolationLevel(opts.Isolation)
+ if err != nil {
+ return nil, err
+ }
+ err = mc.exec("SET TRANSACTION ISOLATION LEVEL " + level)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return mc.begin(opts.ReadOnly)
+}
+
+func (mc *mysqlConn) QueryContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Rows, error) {
+ dargs, err := namedValueToValue(args)
+ if err != nil {
+ return nil, err
+ }
+
+ if err := mc.watchCancel(ctx); err != nil {
+ return nil, err
+ }
+
+ rows, err := mc.query(query, dargs)
+ if err != nil {
+ mc.finish()
+ return nil, err
+ }
+ rows.finish = mc.finish
+ return rows, err
+}
+
+func (mc *mysqlConn) ExecContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Result, error) {
+ dargs, err := namedValueToValue(args)
+ if err != nil {
+ return nil, err
+ }
+
+ if err := mc.watchCancel(ctx); err != nil {
+ return nil, err
+ }
+ defer mc.finish()
+
+ return mc.Exec(query, dargs)
+}
+
+func (mc *mysqlConn) PrepareContext(ctx context.Context, query string) (driver.Stmt, error) {
+ if err := mc.watchCancel(ctx); err != nil {
+ return nil, err
+ }
+
+ stmt, err := mc.Prepare(query)
+ mc.finish()
+ if err != nil {
+ return nil, err
+ }
+
+ select {
+ default:
+ case <-ctx.Done():
+ stmt.Close()
+ return nil, ctx.Err()
+ }
+ return stmt, nil
+}
+
+func (stmt *mysqlStmt) QueryContext(ctx context.Context, args []driver.NamedValue) (driver.Rows, error) {
+ dargs, err := namedValueToValue(args)
+ if err != nil {
+ return nil, err
+ }
+
+ if err := stmt.mc.watchCancel(ctx); err != nil {
+ return nil, err
+ }
+
+ rows, err := stmt.query(dargs)
+ if err != nil {
+ stmt.mc.finish()
+ return nil, err
+ }
+ rows.finish = stmt.mc.finish
+ return rows, err
+}
+
+func (stmt *mysqlStmt) ExecContext(ctx context.Context, args []driver.NamedValue) (driver.Result, error) {
+ dargs, err := namedValueToValue(args)
+ if err != nil {
+ return nil, err
+ }
+
+ if err := stmt.mc.watchCancel(ctx); err != nil {
+ return nil, err
+ }
+ defer stmt.mc.finish()
+
+ return stmt.Exec(dargs)
+}
+
+func (mc *mysqlConn) watchCancel(ctx context.Context) error {
+ if mc.watching {
+ // Reach here if canceled,
+ // so the connection is already invalid
+ mc.cleanup()
+ return nil
+ }
+ if ctx.Done() == nil {
+ return nil
+ }
+
+ mc.watching = true
+ select {
+ default:
+ case <-ctx.Done():
+ return ctx.Err()
+ }
+ if mc.watcher == nil {
+ return nil
+ }
+
+ mc.watcher <- ctx
+
+ return nil
+}
+
+func (mc *mysqlConn) startWatcher() {
+ watcher := make(chan mysqlContext, 1)
+ mc.watcher = watcher
+ finished := make(chan struct{})
+ mc.finished = finished
+ go func() {
+ for {
+ var ctx mysqlContext
+ select {
+ case ctx = <-watcher:
+ case <-mc.closech:
+ return
+ }
+
+ select {
+ case <-ctx.Done():
+ mc.cancel(ctx.Err())
+ case <-finished:
+ case <-mc.closech:
+ return
+ }
+ }
+ }()
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/const.go b/vendor/github.com/go-sql-driver/mysql/const.go
new file mode 100644
index 0000000..4a19ca5
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/const.go
@@ -0,0 +1,166 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+const (
+ defaultMaxAllowedPacket = 4 << 20 // 4 MiB
+ minProtocolVersion = 10
+ maxPacketSize = 1<<24 - 1
+ timeFormat = "2006-01-02 15:04:05.999999"
+)
+
+// MySQL constants documentation:
+// http://dev.mysql.com/doc/internals/en/client-server-protocol.html
+
+const (
+ iOK byte = 0x00
+ iLocalInFile byte = 0xfb
+ iEOF byte = 0xfe
+ iERR byte = 0xff
+)
+
+// https://dev.mysql.com/doc/internals/en/capability-flags.html#packet-Protocol::CapabilityFlags
+type clientFlag uint32
+
+const (
+ clientLongPassword clientFlag = 1 << iota
+ clientFoundRows
+ clientLongFlag
+ clientConnectWithDB
+ clientNoSchema
+ clientCompress
+ clientODBC
+ clientLocalFiles
+ clientIgnoreSpace
+ clientProtocol41
+ clientInteractive
+ clientSSL
+ clientIgnoreSIGPIPE
+ clientTransactions
+ clientReserved
+ clientSecureConn
+ clientMultiStatements
+ clientMultiResults
+ clientPSMultiResults
+ clientPluginAuth
+ clientConnectAttrs
+ clientPluginAuthLenEncClientData
+ clientCanHandleExpiredPasswords
+ clientSessionTrack
+ clientDeprecateEOF
+)
+
+const (
+ comQuit byte = iota + 1
+ comInitDB
+ comQuery
+ comFieldList
+ comCreateDB
+ comDropDB
+ comRefresh
+ comShutdown
+ comStatistics
+ comProcessInfo
+ comConnect
+ comProcessKill
+ comDebug
+ comPing
+ comTime
+ comDelayedInsert
+ comChangeUser
+ comBinlogDump
+ comTableDump
+ comConnectOut
+ comRegisterSlave
+ comStmtPrepare
+ comStmtExecute
+ comStmtSendLongData
+ comStmtClose
+ comStmtReset
+ comSetOption
+ comStmtFetch
+)
+
+// https://dev.mysql.com/doc/internals/en/com-query-response.html#packet-Protocol::ColumnType
+type fieldType byte
+
+const (
+ fieldTypeDecimal fieldType = iota
+ fieldTypeTiny
+ fieldTypeShort
+ fieldTypeLong
+ fieldTypeFloat
+ fieldTypeDouble
+ fieldTypeNULL
+ fieldTypeTimestamp
+ fieldTypeLongLong
+ fieldTypeInt24
+ fieldTypeDate
+ fieldTypeTime
+ fieldTypeDateTime
+ fieldTypeYear
+ fieldTypeNewDate
+ fieldTypeVarChar
+ fieldTypeBit
+)
+const (
+ fieldTypeJSON fieldType = iota + 0xf5
+ fieldTypeNewDecimal
+ fieldTypeEnum
+ fieldTypeSet
+ fieldTypeTinyBLOB
+ fieldTypeMediumBLOB
+ fieldTypeLongBLOB
+ fieldTypeBLOB
+ fieldTypeVarString
+ fieldTypeString
+ fieldTypeGeometry
+)
+
+type fieldFlag uint16
+
+const (
+ flagNotNULL fieldFlag = 1 << iota
+ flagPriKey
+ flagUniqueKey
+ flagMultipleKey
+ flagBLOB
+ flagUnsigned
+ flagZeroFill
+ flagBinary
+ flagEnum
+ flagAutoIncrement
+ flagTimestamp
+ flagSet
+ flagUnknown1
+ flagUnknown2
+ flagUnknown3
+ flagUnknown4
+)
+
+// http://dev.mysql.com/doc/internals/en/status-flags.html
+type statusFlag uint16
+
+const (
+ statusInTrans statusFlag = 1 << iota
+ statusInAutocommit
+ statusReserved // Not in documentation
+ statusMoreResultsExists
+ statusNoGoodIndexUsed
+ statusNoIndexUsed
+ statusCursorExists
+ statusLastRowSent
+ statusDbDropped
+ statusNoBackslashEscapes
+ statusMetadataChanged
+ statusQueryWasSlow
+ statusPsOutParams
+ statusInTransReadonly
+ statusSessionStateChanged
+)
diff --git a/vendor/github.com/go-sql-driver/mysql/driver.go b/vendor/github.com/go-sql-driver/mysql/driver.go
new file mode 100644
index 0000000..d42ce7a
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/driver.go
@@ -0,0 +1,193 @@
+// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+// Package mysql provides a MySQL driver for Go's database/sql package.
+//
+// The driver should be used via the database/sql package:
+//
+// import "database/sql"
+// import _ "github.com/go-sql-driver/mysql"
+//
+// db, err := sql.Open("mysql", "user:password@/dbname")
+//
+// See https://github.com/go-sql-driver/mysql#usage for details
+package mysql
+
+import (
+ "database/sql"
+ "database/sql/driver"
+ "net"
+)
+
+// watcher interface is used for context support (From Go 1.8)
+type watcher interface {
+ startWatcher()
+}
+
+// MySQLDriver is exported to make the driver directly accessible.
+// In general the driver is used via the database/sql package.
+type MySQLDriver struct{}
+
+// DialFunc is a function which can be used to establish the network connection.
+// Custom dial functions must be registered with RegisterDial
+type DialFunc func(addr string) (net.Conn, error)
+
+var dials map[string]DialFunc
+
+// RegisterDial registers a custom dial function. It can then be used by the
+// network address mynet(addr), where mynet is the registered new network.
+// addr is passed as a parameter to the dial function.
+func RegisterDial(net string, dial DialFunc) {
+ if dials == nil {
+ dials = make(map[string]DialFunc)
+ }
+ dials[net] = dial
+}
+
+// Open new Connection.
+// See https://github.com/go-sql-driver/mysql#dsn-data-source-name for how
+// the DSN string is formated
+func (d MySQLDriver) Open(dsn string) (driver.Conn, error) {
+ var err error
+
+ // New mysqlConn
+ mc := &mysqlConn{
+ maxAllowedPacket: maxPacketSize,
+ maxWriteSize: maxPacketSize - 1,
+ closech: make(chan struct{}),
+ }
+ mc.cfg, err = ParseDSN(dsn)
+ if err != nil {
+ return nil, err
+ }
+ mc.parseTime = mc.cfg.ParseTime
+
+ // Connect to Server
+ if dial, ok := dials[mc.cfg.Net]; ok {
+ mc.netConn, err = dial(mc.cfg.Addr)
+ } else {
+ nd := net.Dialer{Timeout: mc.cfg.Timeout}
+ mc.netConn, err = nd.Dial(mc.cfg.Net, mc.cfg.Addr)
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ // Enable TCP Keepalives on TCP connections
+ if tc, ok := mc.netConn.(*net.TCPConn); ok {
+ if err := tc.SetKeepAlive(true); err != nil {
+ // Don't send COM_QUIT before handshake.
+ mc.netConn.Close()
+ mc.netConn = nil
+ return nil, err
+ }
+ }
+
+ // Call startWatcher for context support (From Go 1.8)
+ if s, ok := interface{}(mc).(watcher); ok {
+ s.startWatcher()
+ }
+
+ mc.buf = newBuffer(mc.netConn)
+
+ // Set I/O timeouts
+ mc.buf.timeout = mc.cfg.ReadTimeout
+ mc.writeTimeout = mc.cfg.WriteTimeout
+
+ // Reading Handshake Initialization Packet
+ cipher, err := mc.readInitPacket()
+ if err != nil {
+ mc.cleanup()
+ return nil, err
+ }
+
+ // Send Client Authentication Packet
+ if err = mc.writeAuthPacket(cipher); err != nil {
+ mc.cleanup()
+ return nil, err
+ }
+
+ // Handle response to auth packet, switch methods if possible
+ if err = handleAuthResult(mc, cipher); err != nil {
+ // Authentication failed and MySQL has already closed the connection
+ // (https://dev.mysql.com/doc/internals/en/authentication-fails.html).
+ // Do not send COM_QUIT, just cleanup and return the error.
+ mc.cleanup()
+ return nil, err
+ }
+
+ if mc.cfg.MaxAllowedPacket > 0 {
+ mc.maxAllowedPacket = mc.cfg.MaxAllowedPacket
+ } else {
+ // Get max allowed packet size
+ maxap, err := mc.getSystemVar("max_allowed_packet")
+ if err != nil {
+ mc.Close()
+ return nil, err
+ }
+ mc.maxAllowedPacket = stringToInt(maxap) - 1
+ }
+ if mc.maxAllowedPacket < maxPacketSize {
+ mc.maxWriteSize = mc.maxAllowedPacket
+ }
+
+ // Handle DSN Params
+ err = mc.handleParams()
+ if err != nil {
+ mc.Close()
+ return nil, err
+ }
+
+ return mc, nil
+}
+
+func handleAuthResult(mc *mysqlConn, oldCipher []byte) error {
+ // Read Result Packet
+ cipher, err := mc.readResultOK()
+ if err == nil {
+ return nil // auth successful
+ }
+
+ if mc.cfg == nil {
+ return err // auth failed and retry not possible
+ }
+
+ // Retry auth if configured to do so.
+ if mc.cfg.AllowOldPasswords && err == ErrOldPassword {
+ // Retry with old authentication method. Note: there are edge cases
+ // where this should work but doesn't; this is currently "wontfix":
+ // https://github.com/go-sql-driver/mysql/issues/184
+
+ // If CLIENT_PLUGIN_AUTH capability is not supported, no new cipher is
+ // sent and we have to keep using the cipher sent in the init packet.
+ if cipher == nil {
+ cipher = oldCipher
+ }
+
+ if err = mc.writeOldAuthPacket(cipher); err != nil {
+ return err
+ }
+ _, err = mc.readResultOK()
+ } else if mc.cfg.AllowCleartextPasswords && err == ErrCleartextPassword {
+ // Retry with clear text password for
+ // http://dev.mysql.com/doc/refman/5.7/en/cleartext-authentication-plugin.html
+ // http://dev.mysql.com/doc/refman/5.7/en/pam-authentication-plugin.html
+ if err = mc.writeClearAuthPacket(); err != nil {
+ return err
+ }
+ _, err = mc.readResultOK()
+ } else if mc.cfg.AllowNativePasswords && err == ErrNativePassword {
+ if err = mc.writeNativeAuthPacket(cipher); err != nil {
+ return err
+ }
+ _, err = mc.readResultOK()
+ }
+ return err
+}
+
+func init() {
+ sql.Register("mysql", &MySQLDriver{})
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/dsn.go b/vendor/github.com/go-sql-driver/mysql/dsn.go
new file mode 100644
index 0000000..3ade963
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/dsn.go
@@ -0,0 +1,587 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2016 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+import (
+ "bytes"
+ "crypto/tls"
+ "errors"
+ "fmt"
+ "net"
+ "net/url"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+)
+
+var (
+ errInvalidDSNUnescaped = errors.New("invalid DSN: did you forget to escape a param value?")
+ errInvalidDSNAddr = errors.New("invalid DSN: network address not terminated (missing closing brace)")
+ errInvalidDSNNoSlash = errors.New("invalid DSN: missing the slash separating the database name")
+ errInvalidDSNUnsafeCollation = errors.New("invalid DSN: interpolateParams can not be used with unsafe collations")
+)
+
+// Config is a configuration parsed from a DSN string.
+// If a new Config is created instead of being parsed from a DSN string,
+// the NewConfig function should be used, which sets default values.
+type Config struct {
+ User string // Username
+ Passwd string // Password (requires User)
+ Net string // Network type
+ Addr string // Network address (requires Net)
+ DBName string // Database name
+ Params map[string]string // Connection parameters
+ Collation string // Connection collation
+ Loc *time.Location // Location for time.Time values
+ MaxAllowedPacket int // Max packet size allowed
+ TLSConfig string // TLS configuration name
+ tls *tls.Config // TLS configuration
+ Timeout time.Duration // Dial timeout
+ ReadTimeout time.Duration // I/O read timeout
+ WriteTimeout time.Duration // I/O write timeout
+
+ AllowAllFiles bool // Allow all files to be used with LOAD DATA LOCAL INFILE
+ AllowCleartextPasswords bool // Allows the cleartext client side plugin
+ AllowNativePasswords bool // Allows the native password authentication method
+ AllowOldPasswords bool // Allows the old insecure password method
+ ClientFoundRows bool // Return number of matching rows instead of rows changed
+ ColumnsWithAlias bool // Prepend table alias to column names
+ InterpolateParams bool // Interpolate placeholders into query string
+ MultiStatements bool // Allow multiple statements in one query
+ ParseTime bool // Parse time values to time.Time
+ RejectReadOnly bool // Reject read-only connections
+}
+
+// NewConfig creates a new Config and sets default values.
+func NewConfig() *Config {
+ return &Config{
+ Collation: defaultCollation,
+ Loc: time.UTC,
+ MaxAllowedPacket: defaultMaxAllowedPacket,
+ AllowNativePasswords: true,
+ }
+}
+
+func (cfg *Config) normalize() error {
+ if cfg.InterpolateParams && unsafeCollations[cfg.Collation] {
+ return errInvalidDSNUnsafeCollation
+ }
+
+ // Set default network if empty
+ if cfg.Net == "" {
+ cfg.Net = "tcp"
+ }
+
+ // Set default address if empty
+ if cfg.Addr == "" {
+ switch cfg.Net {
+ case "tcp":
+ cfg.Addr = "127.0.0.1:3306"
+ case "unix":
+ cfg.Addr = "/tmp/mysql.sock"
+ default:
+ return errors.New("default addr for network '" + cfg.Net + "' unknown")
+ }
+
+ } else if cfg.Net == "tcp" {
+ cfg.Addr = ensureHavePort(cfg.Addr)
+ }
+
+ return nil
+}
+
+// FormatDSN formats the given Config into a DSN string which can be passed to
+// the driver.
+func (cfg *Config) FormatDSN() string {
+ var buf bytes.Buffer
+
+ // [username[:password]@]
+ if len(cfg.User) > 0 {
+ buf.WriteString(cfg.User)
+ if len(cfg.Passwd) > 0 {
+ buf.WriteByte(':')
+ buf.WriteString(cfg.Passwd)
+ }
+ buf.WriteByte('@')
+ }
+
+ // [protocol[(address)]]
+ if len(cfg.Net) > 0 {
+ buf.WriteString(cfg.Net)
+ if len(cfg.Addr) > 0 {
+ buf.WriteByte('(')
+ buf.WriteString(cfg.Addr)
+ buf.WriteByte(')')
+ }
+ }
+
+ // /dbname
+ buf.WriteByte('/')
+ buf.WriteString(cfg.DBName)
+
+ // [?param1=value1&...¶mN=valueN]
+ hasParam := false
+
+ if cfg.AllowAllFiles {
+ hasParam = true
+ buf.WriteString("?allowAllFiles=true")
+ }
+
+ if cfg.AllowCleartextPasswords {
+ if hasParam {
+ buf.WriteString("&allowCleartextPasswords=true")
+ } else {
+ hasParam = true
+ buf.WriteString("?allowCleartextPasswords=true")
+ }
+ }
+
+ if !cfg.AllowNativePasswords {
+ if hasParam {
+ buf.WriteString("&allowNativePasswords=false")
+ } else {
+ hasParam = true
+ buf.WriteString("?allowNativePasswords=false")
+ }
+ }
+
+ if cfg.AllowOldPasswords {
+ if hasParam {
+ buf.WriteString("&allowOldPasswords=true")
+ } else {
+ hasParam = true
+ buf.WriteString("?allowOldPasswords=true")
+ }
+ }
+
+ if cfg.ClientFoundRows {
+ if hasParam {
+ buf.WriteString("&clientFoundRows=true")
+ } else {
+ hasParam = true
+ buf.WriteString("?clientFoundRows=true")
+ }
+ }
+
+ if col := cfg.Collation; col != defaultCollation && len(col) > 0 {
+ if hasParam {
+ buf.WriteString("&collation=")
+ } else {
+ hasParam = true
+ buf.WriteString("?collation=")
+ }
+ buf.WriteString(col)
+ }
+
+ if cfg.ColumnsWithAlias {
+ if hasParam {
+ buf.WriteString("&columnsWithAlias=true")
+ } else {
+ hasParam = true
+ buf.WriteString("?columnsWithAlias=true")
+ }
+ }
+
+ if cfg.InterpolateParams {
+ if hasParam {
+ buf.WriteString("&interpolateParams=true")
+ } else {
+ hasParam = true
+ buf.WriteString("?interpolateParams=true")
+ }
+ }
+
+ if cfg.Loc != time.UTC && cfg.Loc != nil {
+ if hasParam {
+ buf.WriteString("&loc=")
+ } else {
+ hasParam = true
+ buf.WriteString("?loc=")
+ }
+ buf.WriteString(url.QueryEscape(cfg.Loc.String()))
+ }
+
+ if cfg.MultiStatements {
+ if hasParam {
+ buf.WriteString("&multiStatements=true")
+ } else {
+ hasParam = true
+ buf.WriteString("?multiStatements=true")
+ }
+ }
+
+ if cfg.ParseTime {
+ if hasParam {
+ buf.WriteString("&parseTime=true")
+ } else {
+ hasParam = true
+ buf.WriteString("?parseTime=true")
+ }
+ }
+
+ if cfg.ReadTimeout > 0 {
+ if hasParam {
+ buf.WriteString("&readTimeout=")
+ } else {
+ hasParam = true
+ buf.WriteString("?readTimeout=")
+ }
+ buf.WriteString(cfg.ReadTimeout.String())
+ }
+
+ if cfg.RejectReadOnly {
+ if hasParam {
+ buf.WriteString("&rejectReadOnly=true")
+ } else {
+ hasParam = true
+ buf.WriteString("?rejectReadOnly=true")
+ }
+ }
+
+ if cfg.Timeout > 0 {
+ if hasParam {
+ buf.WriteString("&timeout=")
+ } else {
+ hasParam = true
+ buf.WriteString("?timeout=")
+ }
+ buf.WriteString(cfg.Timeout.String())
+ }
+
+ if len(cfg.TLSConfig) > 0 {
+ if hasParam {
+ buf.WriteString("&tls=")
+ } else {
+ hasParam = true
+ buf.WriteString("?tls=")
+ }
+ buf.WriteString(url.QueryEscape(cfg.TLSConfig))
+ }
+
+ if cfg.WriteTimeout > 0 {
+ if hasParam {
+ buf.WriteString("&writeTimeout=")
+ } else {
+ hasParam = true
+ buf.WriteString("?writeTimeout=")
+ }
+ buf.WriteString(cfg.WriteTimeout.String())
+ }
+
+ if cfg.MaxAllowedPacket != defaultMaxAllowedPacket {
+ if hasParam {
+ buf.WriteString("&maxAllowedPacket=")
+ } else {
+ hasParam = true
+ buf.WriteString("?maxAllowedPacket=")
+ }
+ buf.WriteString(strconv.Itoa(cfg.MaxAllowedPacket))
+
+ }
+
+ // other params
+ if cfg.Params != nil {
+ var params []string
+ for param := range cfg.Params {
+ params = append(params, param)
+ }
+ sort.Strings(params)
+ for _, param := range params {
+ if hasParam {
+ buf.WriteByte('&')
+ } else {
+ hasParam = true
+ buf.WriteByte('?')
+ }
+
+ buf.WriteString(param)
+ buf.WriteByte('=')
+ buf.WriteString(url.QueryEscape(cfg.Params[param]))
+ }
+ }
+
+ return buf.String()
+}
+
+// ParseDSN parses the DSN string to a Config
+func ParseDSN(dsn string) (cfg *Config, err error) {
+ // New config with some default values
+ cfg = NewConfig()
+
+ // [user[:password]@][net[(addr)]]/dbname[?param1=value1¶mN=valueN]
+ // Find the last '/' (since the password or the net addr might contain a '/')
+ foundSlash := false
+ for i := len(dsn) - 1; i >= 0; i-- {
+ if dsn[i] == '/' {
+ foundSlash = true
+ var j, k int
+
+ // left part is empty if i <= 0
+ if i > 0 {
+ // [username[:password]@][protocol[(address)]]
+ // Find the last '@' in dsn[:i]
+ for j = i; j >= 0; j-- {
+ if dsn[j] == '@' {
+ // username[:password]
+ // Find the first ':' in dsn[:j]
+ for k = 0; k < j; k++ {
+ if dsn[k] == ':' {
+ cfg.Passwd = dsn[k+1 : j]
+ break
+ }
+ }
+ cfg.User = dsn[:k]
+
+ break
+ }
+ }
+
+ // [protocol[(address)]]
+ // Find the first '(' in dsn[j+1:i]
+ for k = j + 1; k < i; k++ {
+ if dsn[k] == '(' {
+ // dsn[i-1] must be == ')' if an address is specified
+ if dsn[i-1] != ')' {
+ if strings.ContainsRune(dsn[k+1:i], ')') {
+ return nil, errInvalidDSNUnescaped
+ }
+ return nil, errInvalidDSNAddr
+ }
+ cfg.Addr = dsn[k+1 : i-1]
+ break
+ }
+ }
+ cfg.Net = dsn[j+1 : k]
+ }
+
+ // dbname[?param1=value1&...¶mN=valueN]
+ // Find the first '?' in dsn[i+1:]
+ for j = i + 1; j < len(dsn); j++ {
+ if dsn[j] == '?' {
+ if err = parseDSNParams(cfg, dsn[j+1:]); err != nil {
+ return
+ }
+ break
+ }
+ }
+ cfg.DBName = dsn[i+1 : j]
+
+ break
+ }
+ }
+
+ if !foundSlash && len(dsn) > 0 {
+ return nil, errInvalidDSNNoSlash
+ }
+
+ if err = cfg.normalize(); err != nil {
+ return nil, err
+ }
+ return
+}
+
+// parseDSNParams parses the DSN "query string"
+// Values must be url.QueryEscape'ed
+func parseDSNParams(cfg *Config, params string) (err error) {
+ for _, v := range strings.Split(params, "&") {
+ param := strings.SplitN(v, "=", 2)
+ if len(param) != 2 {
+ continue
+ }
+
+ // cfg params
+ switch value := param[1]; param[0] {
+
+ // Disable INFILE whitelist / enable all files
+ case "allowAllFiles":
+ var isBool bool
+ cfg.AllowAllFiles, isBool = readBool(value)
+ if !isBool {
+ return errors.New("invalid bool value: " + value)
+ }
+
+ // Use cleartext authentication mode (MySQL 5.5.10+)
+ case "allowCleartextPasswords":
+ var isBool bool
+ cfg.AllowCleartextPasswords, isBool = readBool(value)
+ if !isBool {
+ return errors.New("invalid bool value: " + value)
+ }
+
+ // Use native password authentication
+ case "allowNativePasswords":
+ var isBool bool
+ cfg.AllowNativePasswords, isBool = readBool(value)
+ if !isBool {
+ return errors.New("invalid bool value: " + value)
+ }
+
+ // Use old authentication mode (pre MySQL 4.1)
+ case "allowOldPasswords":
+ var isBool bool
+ cfg.AllowOldPasswords, isBool = readBool(value)
+ if !isBool {
+ return errors.New("invalid bool value: " + value)
+ }
+
+ // Switch "rowsAffected" mode
+ case "clientFoundRows":
+ var isBool bool
+ cfg.ClientFoundRows, isBool = readBool(value)
+ if !isBool {
+ return errors.New("invalid bool value: " + value)
+ }
+
+ // Collation
+ case "collation":
+ cfg.Collation = value
+ break
+
+ case "columnsWithAlias":
+ var isBool bool
+ cfg.ColumnsWithAlias, isBool = readBool(value)
+ if !isBool {
+ return errors.New("invalid bool value: " + value)
+ }
+
+ // Compression
+ case "compress":
+ return errors.New("compression not implemented yet")
+
+ // Enable client side placeholder substitution
+ case "interpolateParams":
+ var isBool bool
+ cfg.InterpolateParams, isBool = readBool(value)
+ if !isBool {
+ return errors.New("invalid bool value: " + value)
+ }
+
+ // Time Location
+ case "loc":
+ if value, err = url.QueryUnescape(value); err != nil {
+ return
+ }
+ cfg.Loc, err = time.LoadLocation(value)
+ if err != nil {
+ return
+ }
+
+ // multiple statements in one query
+ case "multiStatements":
+ var isBool bool
+ cfg.MultiStatements, isBool = readBool(value)
+ if !isBool {
+ return errors.New("invalid bool value: " + value)
+ }
+
+ // time.Time parsing
+ case "parseTime":
+ var isBool bool
+ cfg.ParseTime, isBool = readBool(value)
+ if !isBool {
+ return errors.New("invalid bool value: " + value)
+ }
+
+ // I/O read Timeout
+ case "readTimeout":
+ cfg.ReadTimeout, err = time.ParseDuration(value)
+ if err != nil {
+ return
+ }
+
+ // Reject read-only connections
+ case "rejectReadOnly":
+ var isBool bool
+ cfg.RejectReadOnly, isBool = readBool(value)
+ if !isBool {
+ return errors.New("invalid bool value: " + value)
+ }
+
+ // Strict mode
+ case "strict":
+ panic("strict mode has been removed. See https://github.com/go-sql-driver/mysql/wiki/strict-mode")
+
+ // Dial Timeout
+ case "timeout":
+ cfg.Timeout, err = time.ParseDuration(value)
+ if err != nil {
+ return
+ }
+
+ // TLS-Encryption
+ case "tls":
+ boolValue, isBool := readBool(value)
+ if isBool {
+ if boolValue {
+ cfg.TLSConfig = "true"
+ cfg.tls = &tls.Config{}
+ host, _, err := net.SplitHostPort(cfg.Addr)
+ if err == nil {
+ cfg.tls.ServerName = host
+ }
+ } else {
+ cfg.TLSConfig = "false"
+ }
+ } else if vl := strings.ToLower(value); vl == "skip-verify" {
+ cfg.TLSConfig = vl
+ cfg.tls = &tls.Config{InsecureSkipVerify: true}
+ } else {
+ name, err := url.QueryUnescape(value)
+ if err != nil {
+ return fmt.Errorf("invalid value for TLS config name: %v", err)
+ }
+
+ if tlsConfig := getTLSConfigClone(name); tlsConfig != nil {
+ if len(tlsConfig.ServerName) == 0 && !tlsConfig.InsecureSkipVerify {
+ host, _, err := net.SplitHostPort(cfg.Addr)
+ if err == nil {
+ tlsConfig.ServerName = host
+ }
+ }
+
+ cfg.TLSConfig = name
+ cfg.tls = tlsConfig
+ } else {
+ return errors.New("invalid value / unknown config name: " + name)
+ }
+ }
+
+ // I/O write Timeout
+ case "writeTimeout":
+ cfg.WriteTimeout, err = time.ParseDuration(value)
+ if err != nil {
+ return
+ }
+ case "maxAllowedPacket":
+ cfg.MaxAllowedPacket, err = strconv.Atoi(value)
+ if err != nil {
+ return
+ }
+ default:
+ // lazy init
+ if cfg.Params == nil {
+ cfg.Params = make(map[string]string)
+ }
+
+ if cfg.Params[param[0]], err = url.QueryUnescape(value); err != nil {
+ return
+ }
+ }
+ }
+
+ return
+}
+
+func ensureHavePort(addr string) string {
+ if _, _, err := net.SplitHostPort(addr); err != nil {
+ return net.JoinHostPort(addr, "3306")
+ }
+ return addr
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/errors.go b/vendor/github.com/go-sql-driver/mysql/errors.go
new file mode 100644
index 0000000..760782f
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/errors.go
@@ -0,0 +1,65 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+import (
+ "errors"
+ "fmt"
+ "log"
+ "os"
+)
+
+// Various errors the driver might return. Can change between driver versions.
+var (
+ ErrInvalidConn = errors.New("invalid connection")
+ ErrMalformPkt = errors.New("malformed packet")
+ ErrNoTLS = errors.New("TLS requested but server does not support TLS")
+ ErrCleartextPassword = errors.New("this user requires clear text authentication. If you still want to use it, please add 'allowCleartextPasswords=1' to your DSN")
+ ErrNativePassword = errors.New("this user requires mysql native password authentication.")
+ ErrOldPassword = errors.New("this user requires old password authentication. If you still want to use it, please add 'allowOldPasswords=1' to your DSN. See also https://github.com/go-sql-driver/mysql/wiki/old_passwords")
+ ErrUnknownPlugin = errors.New("this authentication plugin is not supported")
+ ErrOldProtocol = errors.New("MySQL server does not support required protocol 41+")
+ ErrPktSync = errors.New("commands out of sync. You can't run this command now")
+ ErrPktSyncMul = errors.New("commands out of sync. Did you run multiple statements at once?")
+ ErrPktTooLarge = errors.New("packet for query is too large. Try adjusting the 'max_allowed_packet' variable on the server")
+ ErrBusyBuffer = errors.New("busy buffer")
+
+ // errBadConnNoWrite is used for connection errors where nothing was sent to the database yet.
+ // If this happens first in a function starting a database interaction, it should be replaced by driver.ErrBadConn
+ // to trigger a resend.
+ // See https://github.com/go-sql-driver/mysql/pull/302
+ errBadConnNoWrite = errors.New("bad connection")
+)
+
+var errLog = Logger(log.New(os.Stderr, "[mysql] ", log.Ldate|log.Ltime|log.Lshortfile))
+
+// Logger is used to log critical error messages.
+type Logger interface {
+ Print(v ...interface{})
+}
+
+// SetLogger is used to set the logger for critical errors.
+// The initial logger is os.Stderr.
+func SetLogger(logger Logger) error {
+ if logger == nil {
+ return errors.New("logger is nil")
+ }
+ errLog = logger
+ return nil
+}
+
+// MySQLError is an error type which represents a single MySQL error
+type MySQLError struct {
+ Number uint16
+ Message string
+}
+
+func (me *MySQLError) Error() string {
+ return fmt.Sprintf("Error %d: %s", me.Number, me.Message)
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/fields.go b/vendor/github.com/go-sql-driver/mysql/fields.go
new file mode 100644
index 0000000..cded986
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/fields.go
@@ -0,0 +1,140 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2017 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+import (
+ "database/sql"
+ "reflect"
+)
+
+var typeDatabaseName = map[fieldType]string{
+ fieldTypeBit: "BIT",
+ fieldTypeBLOB: "BLOB",
+ fieldTypeDate: "DATE",
+ fieldTypeDateTime: "DATETIME",
+ fieldTypeDecimal: "DECIMAL",
+ fieldTypeDouble: "DOUBLE",
+ fieldTypeEnum: "ENUM",
+ fieldTypeFloat: "FLOAT",
+ fieldTypeGeometry: "GEOMETRY",
+ fieldTypeInt24: "MEDIUMINT",
+ fieldTypeJSON: "JSON",
+ fieldTypeLong: "INT",
+ fieldTypeLongBLOB: "LONGBLOB",
+ fieldTypeLongLong: "BIGINT",
+ fieldTypeMediumBLOB: "MEDIUMBLOB",
+ fieldTypeNewDate: "DATE",
+ fieldTypeNewDecimal: "DECIMAL",
+ fieldTypeNULL: "NULL",
+ fieldTypeSet: "SET",
+ fieldTypeShort: "SMALLINT",
+ fieldTypeString: "CHAR",
+ fieldTypeTime: "TIME",
+ fieldTypeTimestamp: "TIMESTAMP",
+ fieldTypeTiny: "TINYINT",
+ fieldTypeTinyBLOB: "TINYBLOB",
+ fieldTypeVarChar: "VARCHAR",
+ fieldTypeVarString: "VARCHAR",
+ fieldTypeYear: "YEAR",
+}
+
+var (
+ scanTypeFloat32 = reflect.TypeOf(float32(0))
+ scanTypeFloat64 = reflect.TypeOf(float64(0))
+ scanTypeInt8 = reflect.TypeOf(int8(0))
+ scanTypeInt16 = reflect.TypeOf(int16(0))
+ scanTypeInt32 = reflect.TypeOf(int32(0))
+ scanTypeInt64 = reflect.TypeOf(int64(0))
+ scanTypeNullFloat = reflect.TypeOf(sql.NullFloat64{})
+ scanTypeNullInt = reflect.TypeOf(sql.NullInt64{})
+ scanTypeNullTime = reflect.TypeOf(NullTime{})
+ scanTypeUint8 = reflect.TypeOf(uint8(0))
+ scanTypeUint16 = reflect.TypeOf(uint16(0))
+ scanTypeUint32 = reflect.TypeOf(uint32(0))
+ scanTypeUint64 = reflect.TypeOf(uint64(0))
+ scanTypeRawBytes = reflect.TypeOf(sql.RawBytes{})
+ scanTypeUnknown = reflect.TypeOf(new(interface{}))
+)
+
+type mysqlField struct {
+ tableName string
+ name string
+ length uint32
+ flags fieldFlag
+ fieldType fieldType
+ decimals byte
+}
+
+func (mf *mysqlField) scanType() reflect.Type {
+ switch mf.fieldType {
+ case fieldTypeTiny:
+ if mf.flags&flagNotNULL != 0 {
+ if mf.flags&flagUnsigned != 0 {
+ return scanTypeUint8
+ }
+ return scanTypeInt8
+ }
+ return scanTypeNullInt
+
+ case fieldTypeShort, fieldTypeYear:
+ if mf.flags&flagNotNULL != 0 {
+ if mf.flags&flagUnsigned != 0 {
+ return scanTypeUint16
+ }
+ return scanTypeInt16
+ }
+ return scanTypeNullInt
+
+ case fieldTypeInt24, fieldTypeLong:
+ if mf.flags&flagNotNULL != 0 {
+ if mf.flags&flagUnsigned != 0 {
+ return scanTypeUint32
+ }
+ return scanTypeInt32
+ }
+ return scanTypeNullInt
+
+ case fieldTypeLongLong:
+ if mf.flags&flagNotNULL != 0 {
+ if mf.flags&flagUnsigned != 0 {
+ return scanTypeUint64
+ }
+ return scanTypeInt64
+ }
+ return scanTypeNullInt
+
+ case fieldTypeFloat:
+ if mf.flags&flagNotNULL != 0 {
+ return scanTypeFloat32
+ }
+ return scanTypeNullFloat
+
+ case fieldTypeDouble:
+ if mf.flags&flagNotNULL != 0 {
+ return scanTypeFloat64
+ }
+ return scanTypeNullFloat
+
+ case fieldTypeDecimal, fieldTypeNewDecimal, fieldTypeVarChar,
+ fieldTypeBit, fieldTypeEnum, fieldTypeSet, fieldTypeTinyBLOB,
+ fieldTypeMediumBLOB, fieldTypeLongBLOB, fieldTypeBLOB,
+ fieldTypeVarString, fieldTypeString, fieldTypeGeometry, fieldTypeJSON,
+ fieldTypeTime:
+ return scanTypeRawBytes
+
+ case fieldTypeDate, fieldTypeNewDate,
+ fieldTypeTimestamp, fieldTypeDateTime:
+ // NullTime is always returned for more consistent behavior as it can
+ // handle both cases of parseTime regardless if the field is nullable.
+ return scanTypeNullTime
+
+ default:
+ return scanTypeUnknown
+ }
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/infile.go b/vendor/github.com/go-sql-driver/mysql/infile.go
new file mode 100644
index 0000000..4020f91
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/infile.go
@@ -0,0 +1,183 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+import (
+ "fmt"
+ "io"
+ "os"
+ "strings"
+ "sync"
+)
+
+var (
+ fileRegister map[string]bool
+ fileRegisterLock sync.RWMutex
+ readerRegister map[string]func() io.Reader
+ readerRegisterLock sync.RWMutex
+)
+
+// RegisterLocalFile adds the given file to the file whitelist,
+// so that it can be used by "LOAD DATA LOCAL INFILE ".
+// Alternatively you can allow the use of all local files with
+// the DSN parameter 'allowAllFiles=true'
+//
+// filePath := "/home/gopher/data.csv"
+// mysql.RegisterLocalFile(filePath)
+// err := db.Exec("LOAD DATA LOCAL INFILE '" + filePath + "' INTO TABLE foo")
+// if err != nil {
+// ...
+//
+func RegisterLocalFile(filePath string) {
+ fileRegisterLock.Lock()
+ // lazy map init
+ if fileRegister == nil {
+ fileRegister = make(map[string]bool)
+ }
+
+ fileRegister[strings.Trim(filePath, `"`)] = true
+ fileRegisterLock.Unlock()
+}
+
+// DeregisterLocalFile removes the given filepath from the whitelist.
+func DeregisterLocalFile(filePath string) {
+ fileRegisterLock.Lock()
+ delete(fileRegister, strings.Trim(filePath, `"`))
+ fileRegisterLock.Unlock()
+}
+
+// RegisterReaderHandler registers a handler function which is used
+// to receive a io.Reader.
+// The Reader can be used by "LOAD DATA LOCAL INFILE Reader::".
+// If the handler returns a io.ReadCloser Close() is called when the
+// request is finished.
+//
+// mysql.RegisterReaderHandler("data", func() io.Reader {
+// var csvReader io.Reader // Some Reader that returns CSV data
+// ... // Open Reader here
+// return csvReader
+// })
+// err := db.Exec("LOAD DATA LOCAL INFILE 'Reader::data' INTO TABLE foo")
+// if err != nil {
+// ...
+//
+func RegisterReaderHandler(name string, handler func() io.Reader) {
+ readerRegisterLock.Lock()
+ // lazy map init
+ if readerRegister == nil {
+ readerRegister = make(map[string]func() io.Reader)
+ }
+
+ readerRegister[name] = handler
+ readerRegisterLock.Unlock()
+}
+
+// DeregisterReaderHandler removes the ReaderHandler function with
+// the given name from the registry.
+func DeregisterReaderHandler(name string) {
+ readerRegisterLock.Lock()
+ delete(readerRegister, name)
+ readerRegisterLock.Unlock()
+}
+
+func deferredClose(err *error, closer io.Closer) {
+ closeErr := closer.Close()
+ if *err == nil {
+ *err = closeErr
+ }
+}
+
+func (mc *mysqlConn) handleInFileRequest(name string) (err error) {
+ var rdr io.Reader
+ var data []byte
+ packetSize := 16 * 1024 // 16KB is small enough for disk readahead and large enough for TCP
+ if mc.maxWriteSize < packetSize {
+ packetSize = mc.maxWriteSize
+ }
+
+ if idx := strings.Index(name, "Reader::"); idx == 0 || (idx > 0 && name[idx-1] == '/') { // io.Reader
+ // The server might return an an absolute path. See issue #355.
+ name = name[idx+8:]
+
+ readerRegisterLock.RLock()
+ handler, inMap := readerRegister[name]
+ readerRegisterLock.RUnlock()
+
+ if inMap {
+ rdr = handler()
+ if rdr != nil {
+ if cl, ok := rdr.(io.Closer); ok {
+ defer deferredClose(&err, cl)
+ }
+ } else {
+ err = fmt.Errorf("Reader '%s' is ", name)
+ }
+ } else {
+ err = fmt.Errorf("Reader '%s' is not registered", name)
+ }
+ } else { // File
+ name = strings.Trim(name, `"`)
+ fileRegisterLock.RLock()
+ fr := fileRegister[name]
+ fileRegisterLock.RUnlock()
+ if mc.cfg.AllowAllFiles || fr {
+ var file *os.File
+ var fi os.FileInfo
+
+ if file, err = os.Open(name); err == nil {
+ defer deferredClose(&err, file)
+
+ // get file size
+ if fi, err = file.Stat(); err == nil {
+ rdr = file
+ if fileSize := int(fi.Size()); fileSize < packetSize {
+ packetSize = fileSize
+ }
+ }
+ }
+ } else {
+ err = fmt.Errorf("local file '%s' is not registered", name)
+ }
+ }
+
+ // send content packets
+ // if packetSize == 0, the Reader contains no data
+ if err == nil && packetSize > 0 {
+ data := make([]byte, 4+packetSize)
+ var n int
+ for err == nil {
+ n, err = rdr.Read(data[4:])
+ if n > 0 {
+ if ioErr := mc.writePacket(data[:4+n]); ioErr != nil {
+ return ioErr
+ }
+ }
+ }
+ if err == io.EOF {
+ err = nil
+ }
+ }
+
+ // send empty packet (termination)
+ if data == nil {
+ data = make([]byte, 4)
+ }
+ if ioErr := mc.writePacket(data[:4]); ioErr != nil {
+ return ioErr
+ }
+
+ // read OK packet
+ if err == nil {
+ _, err = mc.readResultOK()
+ return err
+ }
+
+ mc.readPacket()
+ return err
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/packets.go b/vendor/github.com/go-sql-driver/mysql/packets.go
new file mode 100644
index 0000000..f63d250
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/packets.go
@@ -0,0 +1,1309 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+import (
+ "bytes"
+ "crypto/tls"
+ "database/sql/driver"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+ "math"
+ "time"
+)
+
+// Packets documentation:
+// http://dev.mysql.com/doc/internals/en/client-server-protocol.html
+
+// Read packet to buffer 'data'
+func (mc *mysqlConn) readPacket() ([]byte, error) {
+ var prevData []byte
+ for {
+ // read packet header
+ data, err := mc.buf.readNext(4)
+ if err != nil {
+ if cerr := mc.canceled.Value(); cerr != nil {
+ return nil, cerr
+ }
+ errLog.Print(err)
+ mc.Close()
+ return nil, ErrInvalidConn
+ }
+
+ // packet length [24 bit]
+ pktLen := int(uint32(data[0]) | uint32(data[1])<<8 | uint32(data[2])<<16)
+
+ // check packet sync [8 bit]
+ if data[3] != mc.sequence {
+ if data[3] > mc.sequence {
+ return nil, ErrPktSyncMul
+ }
+ return nil, ErrPktSync
+ }
+ mc.sequence++
+
+ // packets with length 0 terminate a previous packet which is a
+ // multiple of (2^24)−1 bytes long
+ if pktLen == 0 {
+ // there was no previous packet
+ if prevData == nil {
+ errLog.Print(ErrMalformPkt)
+ mc.Close()
+ return nil, ErrInvalidConn
+ }
+
+ return prevData, nil
+ }
+
+ // read packet body [pktLen bytes]
+ data, err = mc.buf.readNext(pktLen)
+ if err != nil {
+ if cerr := mc.canceled.Value(); cerr != nil {
+ return nil, cerr
+ }
+ errLog.Print(err)
+ mc.Close()
+ return nil, ErrInvalidConn
+ }
+
+ // return data if this was the last packet
+ if pktLen < maxPacketSize {
+ // zero allocations for non-split packets
+ if prevData == nil {
+ return data, nil
+ }
+
+ return append(prevData, data...), nil
+ }
+
+ prevData = append(prevData, data...)
+ }
+}
+
+// Write packet buffer 'data'
+func (mc *mysqlConn) writePacket(data []byte) error {
+ pktLen := len(data) - 4
+
+ if pktLen > mc.maxAllowedPacket {
+ return ErrPktTooLarge
+ }
+
+ for {
+ var size int
+ if pktLen >= maxPacketSize {
+ data[0] = 0xff
+ data[1] = 0xff
+ data[2] = 0xff
+ size = maxPacketSize
+ } else {
+ data[0] = byte(pktLen)
+ data[1] = byte(pktLen >> 8)
+ data[2] = byte(pktLen >> 16)
+ size = pktLen
+ }
+ data[3] = mc.sequence
+
+ // Write packet
+ if mc.writeTimeout > 0 {
+ if err := mc.netConn.SetWriteDeadline(time.Now().Add(mc.writeTimeout)); err != nil {
+ return err
+ }
+ }
+
+ n, err := mc.netConn.Write(data[:4+size])
+ if err == nil && n == 4+size {
+ mc.sequence++
+ if size != maxPacketSize {
+ return nil
+ }
+ pktLen -= size
+ data = data[size:]
+ continue
+ }
+
+ // Handle error
+ if err == nil { // n != len(data)
+ mc.cleanup()
+ errLog.Print(ErrMalformPkt)
+ } else {
+ if cerr := mc.canceled.Value(); cerr != nil {
+ return cerr
+ }
+ if n == 0 && pktLen == len(data)-4 {
+ // only for the first loop iteration when nothing was written yet
+ return errBadConnNoWrite
+ }
+ mc.cleanup()
+ errLog.Print(err)
+ }
+ return ErrInvalidConn
+ }
+}
+
+/******************************************************************************
+* Initialisation Process *
+******************************************************************************/
+
+// Handshake Initialization Packet
+// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::Handshake
+func (mc *mysqlConn) readInitPacket() ([]byte, error) {
+ data, err := mc.readPacket()
+ if err != nil {
+ return nil, err
+ }
+
+ if data[0] == iERR {
+ return nil, mc.handleErrorPacket(data)
+ }
+
+ // protocol version [1 byte]
+ if data[0] < minProtocolVersion {
+ return nil, fmt.Errorf(
+ "unsupported protocol version %d. Version %d or higher is required",
+ data[0],
+ minProtocolVersion,
+ )
+ }
+
+ // server version [null terminated string]
+ // connection id [4 bytes]
+ pos := 1 + bytes.IndexByte(data[1:], 0x00) + 1 + 4
+
+ // first part of the password cipher [8 bytes]
+ cipher := data[pos : pos+8]
+
+ // (filler) always 0x00 [1 byte]
+ pos += 8 + 1
+
+ // capability flags (lower 2 bytes) [2 bytes]
+ mc.flags = clientFlag(binary.LittleEndian.Uint16(data[pos : pos+2]))
+ if mc.flags&clientProtocol41 == 0 {
+ return nil, ErrOldProtocol
+ }
+ if mc.flags&clientSSL == 0 && mc.cfg.tls != nil {
+ return nil, ErrNoTLS
+ }
+ pos += 2
+
+ if len(data) > pos {
+ // character set [1 byte]
+ // status flags [2 bytes]
+ // capability flags (upper 2 bytes) [2 bytes]
+ // length of auth-plugin-data [1 byte]
+ // reserved (all [00]) [10 bytes]
+ pos += 1 + 2 + 2 + 1 + 10
+
+ // second part of the password cipher [mininum 13 bytes],
+ // where len=MAX(13, length of auth-plugin-data - 8)
+ //
+ // The web documentation is ambiguous about the length. However,
+ // according to mysql-5.7/sql/auth/sql_authentication.cc line 538,
+ // the 13th byte is "\0 byte, terminating the second part of
+ // a scramble". So the second part of the password cipher is
+ // a NULL terminated string that's at least 13 bytes with the
+ // last byte being NULL.
+ //
+ // The official Python library uses the fixed length 12
+ // which seems to work but technically could have a hidden bug.
+ cipher = append(cipher, data[pos:pos+12]...)
+
+ // TODO: Verify string termination
+ // EOF if version (>= 5.5.7 and < 5.5.10) or (>= 5.6.0 and < 5.6.2)
+ // \NUL otherwise
+ //
+ //if data[len(data)-1] == 0 {
+ // return
+ //}
+ //return ErrMalformPkt
+
+ // make a memory safe copy of the cipher slice
+ var b [20]byte
+ copy(b[:], cipher)
+ return b[:], nil
+ }
+
+ // make a memory safe copy of the cipher slice
+ var b [8]byte
+ copy(b[:], cipher)
+ return b[:], nil
+}
+
+// Client Authentication Packet
+// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::HandshakeResponse
+func (mc *mysqlConn) writeAuthPacket(cipher []byte) error {
+ // Adjust client flags based on server support
+ clientFlags := clientProtocol41 |
+ clientSecureConn |
+ clientLongPassword |
+ clientTransactions |
+ clientLocalFiles |
+ clientPluginAuth |
+ clientMultiResults |
+ mc.flags&clientLongFlag
+
+ if mc.cfg.ClientFoundRows {
+ clientFlags |= clientFoundRows
+ }
+
+ // To enable TLS / SSL
+ if mc.cfg.tls != nil {
+ clientFlags |= clientSSL
+ }
+
+ if mc.cfg.MultiStatements {
+ clientFlags |= clientMultiStatements
+ }
+
+ // User Password
+ scrambleBuff := scramblePassword(cipher, []byte(mc.cfg.Passwd))
+
+ pktLen := 4 + 4 + 1 + 23 + len(mc.cfg.User) + 1 + 1 + len(scrambleBuff) + 21 + 1
+
+ // To specify a db name
+ if n := len(mc.cfg.DBName); n > 0 {
+ clientFlags |= clientConnectWithDB
+ pktLen += n + 1
+ }
+
+ // Calculate packet length and get buffer with that size
+ data := mc.buf.takeSmallBuffer(pktLen + 4)
+ if data == nil {
+ // can not take the buffer. Something must be wrong with the connection
+ errLog.Print(ErrBusyBuffer)
+ return errBadConnNoWrite
+ }
+
+ // ClientFlags [32 bit]
+ data[4] = byte(clientFlags)
+ data[5] = byte(clientFlags >> 8)
+ data[6] = byte(clientFlags >> 16)
+ data[7] = byte(clientFlags >> 24)
+
+ // MaxPacketSize [32 bit] (none)
+ data[8] = 0x00
+ data[9] = 0x00
+ data[10] = 0x00
+ data[11] = 0x00
+
+ // Charset [1 byte]
+ var found bool
+ data[12], found = collations[mc.cfg.Collation]
+ if !found {
+ // Note possibility for false negatives:
+ // could be triggered although the collation is valid if the
+ // collations map does not contain entries the server supports.
+ return errors.New("unknown collation")
+ }
+
+ // SSL Connection Request Packet
+ // http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::SSLRequest
+ if mc.cfg.tls != nil {
+ // Send TLS / SSL request packet
+ if err := mc.writePacket(data[:(4+4+1+23)+4]); err != nil {
+ return err
+ }
+
+ // Switch to TLS
+ tlsConn := tls.Client(mc.netConn, mc.cfg.tls)
+ if err := tlsConn.Handshake(); err != nil {
+ return err
+ }
+ mc.netConn = tlsConn
+ mc.buf.nc = tlsConn
+ }
+
+ // Filler [23 bytes] (all 0x00)
+ pos := 13
+ for ; pos < 13+23; pos++ {
+ data[pos] = 0
+ }
+
+ // User [null terminated string]
+ if len(mc.cfg.User) > 0 {
+ pos += copy(data[pos:], mc.cfg.User)
+ }
+ data[pos] = 0x00
+ pos++
+
+ // ScrambleBuffer [length encoded integer]
+ data[pos] = byte(len(scrambleBuff))
+ pos += 1 + copy(data[pos+1:], scrambleBuff)
+
+ // Databasename [null terminated string]
+ if len(mc.cfg.DBName) > 0 {
+ pos += copy(data[pos:], mc.cfg.DBName)
+ data[pos] = 0x00
+ pos++
+ }
+
+ // Assume native client during response
+ pos += copy(data[pos:], "mysql_native_password")
+ data[pos] = 0x00
+
+ // Send Auth packet
+ return mc.writePacket(data)
+}
+
+// Client old authentication packet
+// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::AuthSwitchResponse
+func (mc *mysqlConn) writeOldAuthPacket(cipher []byte) error {
+ // User password
+ // https://dev.mysql.com/doc/internals/en/old-password-authentication.html
+ // Old password authentication only need and will need 8-byte challenge.
+ scrambleBuff := scrambleOldPassword(cipher[:8], []byte(mc.cfg.Passwd))
+
+ // Calculate the packet length and add a tailing 0
+ pktLen := len(scrambleBuff) + 1
+ data := mc.buf.takeSmallBuffer(4 + pktLen)
+ if data == nil {
+ // can not take the buffer. Something must be wrong with the connection
+ errLog.Print(ErrBusyBuffer)
+ return errBadConnNoWrite
+ }
+
+ // Add the scrambled password [null terminated string]
+ copy(data[4:], scrambleBuff)
+ data[4+pktLen-1] = 0x00
+
+ return mc.writePacket(data)
+}
+
+// Client clear text authentication packet
+// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::AuthSwitchResponse
+func (mc *mysqlConn) writeClearAuthPacket() error {
+ // Calculate the packet length and add a tailing 0
+ pktLen := len(mc.cfg.Passwd) + 1
+ data := mc.buf.takeSmallBuffer(4 + pktLen)
+ if data == nil {
+ // can not take the buffer. Something must be wrong with the connection
+ errLog.Print(ErrBusyBuffer)
+ return errBadConnNoWrite
+ }
+
+ // Add the clear password [null terminated string]
+ copy(data[4:], mc.cfg.Passwd)
+ data[4+pktLen-1] = 0x00
+
+ return mc.writePacket(data)
+}
+
+// Native password authentication method
+// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::AuthSwitchResponse
+func (mc *mysqlConn) writeNativeAuthPacket(cipher []byte) error {
+ // https://dev.mysql.com/doc/internals/en/secure-password-authentication.html
+ // Native password authentication only need and will need 20-byte challenge.
+ scrambleBuff := scramblePassword(cipher[0:20], []byte(mc.cfg.Passwd))
+
+ // Calculate the packet length and add a tailing 0
+ pktLen := len(scrambleBuff)
+ data := mc.buf.takeSmallBuffer(4 + pktLen)
+ if data == nil {
+ // can not take the buffer. Something must be wrong with the connection
+ errLog.Print(ErrBusyBuffer)
+ return errBadConnNoWrite
+ }
+
+ // Add the scramble
+ copy(data[4:], scrambleBuff)
+
+ return mc.writePacket(data)
+}
+
+/******************************************************************************
+* Command Packets *
+******************************************************************************/
+
+func (mc *mysqlConn) writeCommandPacket(command byte) error {
+ // Reset Packet Sequence
+ mc.sequence = 0
+
+ data := mc.buf.takeSmallBuffer(4 + 1)
+ if data == nil {
+ // can not take the buffer. Something must be wrong with the connection
+ errLog.Print(ErrBusyBuffer)
+ return errBadConnNoWrite
+ }
+
+ // Add command byte
+ data[4] = command
+
+ // Send CMD packet
+ return mc.writePacket(data)
+}
+
+func (mc *mysqlConn) writeCommandPacketStr(command byte, arg string) error {
+ // Reset Packet Sequence
+ mc.sequence = 0
+
+ pktLen := 1 + len(arg)
+ data := mc.buf.takeBuffer(pktLen + 4)
+ if data == nil {
+ // can not take the buffer. Something must be wrong with the connection
+ errLog.Print(ErrBusyBuffer)
+ return errBadConnNoWrite
+ }
+
+ // Add command byte
+ data[4] = command
+
+ // Add arg
+ copy(data[5:], arg)
+
+ // Send CMD packet
+ return mc.writePacket(data)
+}
+
+func (mc *mysqlConn) writeCommandPacketUint32(command byte, arg uint32) error {
+ // Reset Packet Sequence
+ mc.sequence = 0
+
+ data := mc.buf.takeSmallBuffer(4 + 1 + 4)
+ if data == nil {
+ // can not take the buffer. Something must be wrong with the connection
+ errLog.Print(ErrBusyBuffer)
+ return errBadConnNoWrite
+ }
+
+ // Add command byte
+ data[4] = command
+
+ // Add arg [32 bit]
+ data[5] = byte(arg)
+ data[6] = byte(arg >> 8)
+ data[7] = byte(arg >> 16)
+ data[8] = byte(arg >> 24)
+
+ // Send CMD packet
+ return mc.writePacket(data)
+}
+
+/******************************************************************************
+* Result Packets *
+******************************************************************************/
+
+// Returns error if Packet is not an 'Result OK'-Packet
+func (mc *mysqlConn) readResultOK() ([]byte, error) {
+ data, err := mc.readPacket()
+ if err == nil {
+ // packet indicator
+ switch data[0] {
+
+ case iOK:
+ return nil, mc.handleOkPacket(data)
+
+ case iEOF:
+ if len(data) > 1 {
+ pluginEndIndex := bytes.IndexByte(data, 0x00)
+ plugin := string(data[1:pluginEndIndex])
+ cipher := data[pluginEndIndex+1:]
+
+ switch plugin {
+ case "mysql_old_password":
+ // using old_passwords
+ return cipher, ErrOldPassword
+ case "mysql_clear_password":
+ // using clear text password
+ return cipher, ErrCleartextPassword
+ case "mysql_native_password":
+ // using mysql default authentication method
+ return cipher, ErrNativePassword
+ default:
+ return cipher, ErrUnknownPlugin
+ }
+ }
+
+ // https://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::OldAuthSwitchRequest
+ return nil, ErrOldPassword
+
+ default: // Error otherwise
+ return nil, mc.handleErrorPacket(data)
+ }
+ }
+ return nil, err
+}
+
+// Result Set Header Packet
+// http://dev.mysql.com/doc/internals/en/com-query-response.html#packet-ProtocolText::Resultset
+func (mc *mysqlConn) readResultSetHeaderPacket() (int, error) {
+ data, err := mc.readPacket()
+ if err == nil {
+ switch data[0] {
+
+ case iOK:
+ return 0, mc.handleOkPacket(data)
+
+ case iERR:
+ return 0, mc.handleErrorPacket(data)
+
+ case iLocalInFile:
+ return 0, mc.handleInFileRequest(string(data[1:]))
+ }
+
+ // column count
+ num, _, n := readLengthEncodedInteger(data)
+ if n-len(data) == 0 {
+ return int(num), nil
+ }
+
+ return 0, ErrMalformPkt
+ }
+ return 0, err
+}
+
+// Error Packet
+// http://dev.mysql.com/doc/internals/en/generic-response-packets.html#packet-ERR_Packet
+func (mc *mysqlConn) handleErrorPacket(data []byte) error {
+ if data[0] != iERR {
+ return ErrMalformPkt
+ }
+
+ // 0xff [1 byte]
+
+ // Error Number [16 bit uint]
+ errno := binary.LittleEndian.Uint16(data[1:3])
+
+ // 1792: ER_CANT_EXECUTE_IN_READ_ONLY_TRANSACTION
+ // 1290: ER_OPTION_PREVENTS_STATEMENT (returned by Aurora during failover)
+ if (errno == 1792 || errno == 1290) && mc.cfg.RejectReadOnly {
+ // Oops; we are connected to a read-only connection, and won't be able
+ // to issue any write statements. Since RejectReadOnly is configured,
+ // we throw away this connection hoping this one would have write
+ // permission. This is specifically for a possible race condition
+ // during failover (e.g. on AWS Aurora). See README.md for more.
+ //
+ // We explicitly close the connection before returning
+ // driver.ErrBadConn to ensure that `database/sql` purges this
+ // connection and initiates a new one for next statement next time.
+ mc.Close()
+ return driver.ErrBadConn
+ }
+
+ pos := 3
+
+ // SQL State [optional: # + 5bytes string]
+ if data[3] == 0x23 {
+ //sqlstate := string(data[4 : 4+5])
+ pos = 9
+ }
+
+ // Error Message [string]
+ return &MySQLError{
+ Number: errno,
+ Message: string(data[pos:]),
+ }
+}
+
+func readStatus(b []byte) statusFlag {
+ return statusFlag(b[0]) | statusFlag(b[1])<<8
+}
+
+// Ok Packet
+// http://dev.mysql.com/doc/internals/en/generic-response-packets.html#packet-OK_Packet
+func (mc *mysqlConn) handleOkPacket(data []byte) error {
+ var n, m int
+
+ // 0x00 [1 byte]
+
+ // Affected rows [Length Coded Binary]
+ mc.affectedRows, _, n = readLengthEncodedInteger(data[1:])
+
+ // Insert id [Length Coded Binary]
+ mc.insertId, _, m = readLengthEncodedInteger(data[1+n:])
+
+ // server_status [2 bytes]
+ mc.status = readStatus(data[1+n+m : 1+n+m+2])
+ if mc.status&statusMoreResultsExists != 0 {
+ return nil
+ }
+
+ // warning count [2 bytes]
+
+ return nil
+}
+
+// Read Packets as Field Packets until EOF-Packet or an Error appears
+// http://dev.mysql.com/doc/internals/en/com-query-response.html#packet-Protocol::ColumnDefinition41
+func (mc *mysqlConn) readColumns(count int) ([]mysqlField, error) {
+ columns := make([]mysqlField, count)
+
+ for i := 0; ; i++ {
+ data, err := mc.readPacket()
+ if err != nil {
+ return nil, err
+ }
+
+ // EOF Packet
+ if data[0] == iEOF && (len(data) == 5 || len(data) == 1) {
+ if i == count {
+ return columns, nil
+ }
+ return nil, fmt.Errorf("column count mismatch n:%d len:%d", count, len(columns))
+ }
+
+ // Catalog
+ pos, err := skipLengthEncodedString(data)
+ if err != nil {
+ return nil, err
+ }
+
+ // Database [len coded string]
+ n, err := skipLengthEncodedString(data[pos:])
+ if err != nil {
+ return nil, err
+ }
+ pos += n
+
+ // Table [len coded string]
+ if mc.cfg.ColumnsWithAlias {
+ tableName, _, n, err := readLengthEncodedString(data[pos:])
+ if err != nil {
+ return nil, err
+ }
+ pos += n
+ columns[i].tableName = string(tableName)
+ } else {
+ n, err = skipLengthEncodedString(data[pos:])
+ if err != nil {
+ return nil, err
+ }
+ pos += n
+ }
+
+ // Original table [len coded string]
+ n, err = skipLengthEncodedString(data[pos:])
+ if err != nil {
+ return nil, err
+ }
+ pos += n
+
+ // Name [len coded string]
+ name, _, n, err := readLengthEncodedString(data[pos:])
+ if err != nil {
+ return nil, err
+ }
+ columns[i].name = string(name)
+ pos += n
+
+ // Original name [len coded string]
+ n, err = skipLengthEncodedString(data[pos:])
+ if err != nil {
+ return nil, err
+ }
+
+ // Filler [uint8]
+ // Charset [charset, collation uint8]
+ pos += n + 1 + 2
+
+ // Length [uint32]
+ columns[i].length = binary.LittleEndian.Uint32(data[pos : pos+4])
+ pos += 4
+
+ // Field type [uint8]
+ columns[i].fieldType = fieldType(data[pos])
+ pos++
+
+ // Flags [uint16]
+ columns[i].flags = fieldFlag(binary.LittleEndian.Uint16(data[pos : pos+2]))
+ pos += 2
+
+ // Decimals [uint8]
+ columns[i].decimals = data[pos]
+ //pos++
+
+ // Default value [len coded binary]
+ //if pos < len(data) {
+ // defaultVal, _, err = bytesToLengthCodedBinary(data[pos:])
+ //}
+ }
+}
+
+// Read Packets as Field Packets until EOF-Packet or an Error appears
+// http://dev.mysql.com/doc/internals/en/com-query-response.html#packet-ProtocolText::ResultsetRow
+func (rows *textRows) readRow(dest []driver.Value) error {
+ mc := rows.mc
+
+ if rows.rs.done {
+ return io.EOF
+ }
+
+ data, err := mc.readPacket()
+ if err != nil {
+ return err
+ }
+
+ // EOF Packet
+ if data[0] == iEOF && len(data) == 5 {
+ // server_status [2 bytes]
+ rows.mc.status = readStatus(data[3:])
+ rows.rs.done = true
+ if !rows.HasNextResultSet() {
+ rows.mc = nil
+ }
+ return io.EOF
+ }
+ if data[0] == iERR {
+ rows.mc = nil
+ return mc.handleErrorPacket(data)
+ }
+
+ // RowSet Packet
+ var n int
+ var isNull bool
+ pos := 0
+
+ for i := range dest {
+ // Read bytes and convert to string
+ dest[i], isNull, n, err = readLengthEncodedString(data[pos:])
+ pos += n
+ if err == nil {
+ if !isNull {
+ if !mc.parseTime {
+ continue
+ } else {
+ switch rows.rs.columns[i].fieldType {
+ case fieldTypeTimestamp, fieldTypeDateTime,
+ fieldTypeDate, fieldTypeNewDate:
+ dest[i], err = parseDateTime(
+ string(dest[i].([]byte)),
+ mc.cfg.Loc,
+ )
+ if err == nil {
+ continue
+ }
+ default:
+ continue
+ }
+ }
+
+ } else {
+ dest[i] = nil
+ continue
+ }
+ }
+ return err // err != nil
+ }
+
+ return nil
+}
+
+// Reads Packets until EOF-Packet or an Error appears. Returns count of Packets read
+func (mc *mysqlConn) readUntilEOF() error {
+ for {
+ data, err := mc.readPacket()
+ if err != nil {
+ return err
+ }
+
+ switch data[0] {
+ case iERR:
+ return mc.handleErrorPacket(data)
+ case iEOF:
+ if len(data) == 5 {
+ mc.status = readStatus(data[3:])
+ }
+ return nil
+ }
+ }
+}
+
+/******************************************************************************
+* Prepared Statements *
+******************************************************************************/
+
+// Prepare Result Packets
+// http://dev.mysql.com/doc/internals/en/com-stmt-prepare-response.html
+func (stmt *mysqlStmt) readPrepareResultPacket() (uint16, error) {
+ data, err := stmt.mc.readPacket()
+ if err == nil {
+ // packet indicator [1 byte]
+ if data[0] != iOK {
+ return 0, stmt.mc.handleErrorPacket(data)
+ }
+
+ // statement id [4 bytes]
+ stmt.id = binary.LittleEndian.Uint32(data[1:5])
+
+ // Column count [16 bit uint]
+ columnCount := binary.LittleEndian.Uint16(data[5:7])
+
+ // Param count [16 bit uint]
+ stmt.paramCount = int(binary.LittleEndian.Uint16(data[7:9]))
+
+ // Reserved [8 bit]
+
+ // Warning count [16 bit uint]
+
+ return columnCount, nil
+ }
+ return 0, err
+}
+
+// http://dev.mysql.com/doc/internals/en/com-stmt-send-long-data.html
+func (stmt *mysqlStmt) writeCommandLongData(paramID int, arg []byte) error {
+ maxLen := stmt.mc.maxAllowedPacket - 1
+ pktLen := maxLen
+
+ // After the header (bytes 0-3) follows before the data:
+ // 1 byte command
+ // 4 bytes stmtID
+ // 2 bytes paramID
+ const dataOffset = 1 + 4 + 2
+
+ // Can not use the write buffer since
+ // a) the buffer is too small
+ // b) it is in use
+ data := make([]byte, 4+1+4+2+len(arg))
+
+ copy(data[4+dataOffset:], arg)
+
+ for argLen := len(arg); argLen > 0; argLen -= pktLen - dataOffset {
+ if dataOffset+argLen < maxLen {
+ pktLen = dataOffset + argLen
+ }
+
+ stmt.mc.sequence = 0
+ // Add command byte [1 byte]
+ data[4] = comStmtSendLongData
+
+ // Add stmtID [32 bit]
+ data[5] = byte(stmt.id)
+ data[6] = byte(stmt.id >> 8)
+ data[7] = byte(stmt.id >> 16)
+ data[8] = byte(stmt.id >> 24)
+
+ // Add paramID [16 bit]
+ data[9] = byte(paramID)
+ data[10] = byte(paramID >> 8)
+
+ // Send CMD packet
+ err := stmt.mc.writePacket(data[:4+pktLen])
+ if err == nil {
+ data = data[pktLen-dataOffset:]
+ continue
+ }
+ return err
+
+ }
+
+ // Reset Packet Sequence
+ stmt.mc.sequence = 0
+ return nil
+}
+
+// Execute Prepared Statement
+// http://dev.mysql.com/doc/internals/en/com-stmt-execute.html
+func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error {
+ if len(args) != stmt.paramCount {
+ return fmt.Errorf(
+ "argument count mismatch (got: %d; has: %d)",
+ len(args),
+ stmt.paramCount,
+ )
+ }
+
+ const minPktLen = 4 + 1 + 4 + 1 + 4
+ mc := stmt.mc
+
+ // Reset packet-sequence
+ mc.sequence = 0
+
+ var data []byte
+
+ if len(args) == 0 {
+ data = mc.buf.takeBuffer(minPktLen)
+ } else {
+ data = mc.buf.takeCompleteBuffer()
+ }
+ if data == nil {
+ // can not take the buffer. Something must be wrong with the connection
+ errLog.Print(ErrBusyBuffer)
+ return errBadConnNoWrite
+ }
+
+ // command [1 byte]
+ data[4] = comStmtExecute
+
+ // statement_id [4 bytes]
+ data[5] = byte(stmt.id)
+ data[6] = byte(stmt.id >> 8)
+ data[7] = byte(stmt.id >> 16)
+ data[8] = byte(stmt.id >> 24)
+
+ // flags (0: CURSOR_TYPE_NO_CURSOR) [1 byte]
+ data[9] = 0x00
+
+ // iteration_count (uint32(1)) [4 bytes]
+ data[10] = 0x01
+ data[11] = 0x00
+ data[12] = 0x00
+ data[13] = 0x00
+
+ if len(args) > 0 {
+ pos := minPktLen
+
+ var nullMask []byte
+ if maskLen, typesLen := (len(args)+7)/8, 1+2*len(args); pos+maskLen+typesLen >= len(data) {
+ // buffer has to be extended but we don't know by how much so
+ // we depend on append after all data with known sizes fit.
+ // We stop at that because we deal with a lot of columns here
+ // which makes the required allocation size hard to guess.
+ tmp := make([]byte, pos+maskLen+typesLen)
+ copy(tmp[:pos], data[:pos])
+ data = tmp
+ nullMask = data[pos : pos+maskLen]
+ pos += maskLen
+ } else {
+ nullMask = data[pos : pos+maskLen]
+ for i := 0; i < maskLen; i++ {
+ nullMask[i] = 0
+ }
+ pos += maskLen
+ }
+
+ // newParameterBoundFlag 1 [1 byte]
+ data[pos] = 0x01
+ pos++
+
+ // type of each parameter [len(args)*2 bytes]
+ paramTypes := data[pos:]
+ pos += len(args) * 2
+
+ // value of each parameter [n bytes]
+ paramValues := data[pos:pos]
+ valuesCap := cap(paramValues)
+
+ for i, arg := range args {
+ // build NULL-bitmap
+ if arg == nil {
+ nullMask[i/8] |= 1 << (uint(i) & 7)
+ paramTypes[i+i] = byte(fieldTypeNULL)
+ paramTypes[i+i+1] = 0x00
+ continue
+ }
+
+ // cache types and values
+ switch v := arg.(type) {
+ case int64:
+ paramTypes[i+i] = byte(fieldTypeLongLong)
+ paramTypes[i+i+1] = 0x00
+
+ if cap(paramValues)-len(paramValues)-8 >= 0 {
+ paramValues = paramValues[:len(paramValues)+8]
+ binary.LittleEndian.PutUint64(
+ paramValues[len(paramValues)-8:],
+ uint64(v),
+ )
+ } else {
+ paramValues = append(paramValues,
+ uint64ToBytes(uint64(v))...,
+ )
+ }
+
+ case float64:
+ paramTypes[i+i] = byte(fieldTypeDouble)
+ paramTypes[i+i+1] = 0x00
+
+ if cap(paramValues)-len(paramValues)-8 >= 0 {
+ paramValues = paramValues[:len(paramValues)+8]
+ binary.LittleEndian.PutUint64(
+ paramValues[len(paramValues)-8:],
+ math.Float64bits(v),
+ )
+ } else {
+ paramValues = append(paramValues,
+ uint64ToBytes(math.Float64bits(v))...,
+ )
+ }
+
+ case bool:
+ paramTypes[i+i] = byte(fieldTypeTiny)
+ paramTypes[i+i+1] = 0x00
+
+ if v {
+ paramValues = append(paramValues, 0x01)
+ } else {
+ paramValues = append(paramValues, 0x00)
+ }
+
+ case []byte:
+ // Common case (non-nil value) first
+ if v != nil {
+ paramTypes[i+i] = byte(fieldTypeString)
+ paramTypes[i+i+1] = 0x00
+
+ if len(v) < mc.maxAllowedPacket-pos-len(paramValues)-(len(args)-(i+1))*64 {
+ paramValues = appendLengthEncodedInteger(paramValues,
+ uint64(len(v)),
+ )
+ paramValues = append(paramValues, v...)
+ } else {
+ if err := stmt.writeCommandLongData(i, v); err != nil {
+ return err
+ }
+ }
+ continue
+ }
+
+ // Handle []byte(nil) as a NULL value
+ nullMask[i/8] |= 1 << (uint(i) & 7)
+ paramTypes[i+i] = byte(fieldTypeNULL)
+ paramTypes[i+i+1] = 0x00
+
+ case string:
+ paramTypes[i+i] = byte(fieldTypeString)
+ paramTypes[i+i+1] = 0x00
+
+ if len(v) < mc.maxAllowedPacket-pos-len(paramValues)-(len(args)-(i+1))*64 {
+ paramValues = appendLengthEncodedInteger(paramValues,
+ uint64(len(v)),
+ )
+ paramValues = append(paramValues, v...)
+ } else {
+ if err := stmt.writeCommandLongData(i, []byte(v)); err != nil {
+ return err
+ }
+ }
+
+ case time.Time:
+ paramTypes[i+i] = byte(fieldTypeString)
+ paramTypes[i+i+1] = 0x00
+
+ var a [64]byte
+ var b = a[:0]
+
+ if v.IsZero() {
+ b = append(b, "0000-00-00"...)
+ } else {
+ b = v.In(mc.cfg.Loc).AppendFormat(b, timeFormat)
+ }
+
+ paramValues = appendLengthEncodedInteger(paramValues,
+ uint64(len(b)),
+ )
+ paramValues = append(paramValues, b...)
+
+ default:
+ return fmt.Errorf("can not convert type: %T", arg)
+ }
+ }
+
+ // Check if param values exceeded the available buffer
+ // In that case we must build the data packet with the new values buffer
+ if valuesCap != cap(paramValues) {
+ data = append(data[:pos], paramValues...)
+ mc.buf.buf = data
+ }
+
+ pos += len(paramValues)
+ data = data[:pos]
+ }
+
+ return mc.writePacket(data)
+}
+
+func (mc *mysqlConn) discardResults() error {
+ for mc.status&statusMoreResultsExists != 0 {
+ resLen, err := mc.readResultSetHeaderPacket()
+ if err != nil {
+ return err
+ }
+ if resLen > 0 {
+ // columns
+ if err := mc.readUntilEOF(); err != nil {
+ return err
+ }
+ // rows
+ if err := mc.readUntilEOF(); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+// http://dev.mysql.com/doc/internals/en/binary-protocol-resultset-row.html
+func (rows *binaryRows) readRow(dest []driver.Value) error {
+ data, err := rows.mc.readPacket()
+ if err != nil {
+ return err
+ }
+
+ // packet indicator [1 byte]
+ if data[0] != iOK {
+ // EOF Packet
+ if data[0] == iEOF && len(data) == 5 {
+ rows.mc.status = readStatus(data[3:])
+ rows.rs.done = true
+ if !rows.HasNextResultSet() {
+ rows.mc = nil
+ }
+ return io.EOF
+ }
+ mc := rows.mc
+ rows.mc = nil
+
+ // Error otherwise
+ return mc.handleErrorPacket(data)
+ }
+
+ // NULL-bitmap, [(column-count + 7 + 2) / 8 bytes]
+ pos := 1 + (len(dest)+7+2)>>3
+ nullMask := data[1:pos]
+
+ for i := range dest {
+ // Field is NULL
+ // (byte >> bit-pos) % 2 == 1
+ if ((nullMask[(i+2)>>3] >> uint((i+2)&7)) & 1) == 1 {
+ dest[i] = nil
+ continue
+ }
+
+ // Convert to byte-coded string
+ switch rows.rs.columns[i].fieldType {
+ case fieldTypeNULL:
+ dest[i] = nil
+ continue
+
+ // Numeric Types
+ case fieldTypeTiny:
+ if rows.rs.columns[i].flags&flagUnsigned != 0 {
+ dest[i] = int64(data[pos])
+ } else {
+ dest[i] = int64(int8(data[pos]))
+ }
+ pos++
+ continue
+
+ case fieldTypeShort, fieldTypeYear:
+ if rows.rs.columns[i].flags&flagUnsigned != 0 {
+ dest[i] = int64(binary.LittleEndian.Uint16(data[pos : pos+2]))
+ } else {
+ dest[i] = int64(int16(binary.LittleEndian.Uint16(data[pos : pos+2])))
+ }
+ pos += 2
+ continue
+
+ case fieldTypeInt24, fieldTypeLong:
+ if rows.rs.columns[i].flags&flagUnsigned != 0 {
+ dest[i] = int64(binary.LittleEndian.Uint32(data[pos : pos+4]))
+ } else {
+ dest[i] = int64(int32(binary.LittleEndian.Uint32(data[pos : pos+4])))
+ }
+ pos += 4
+ continue
+
+ case fieldTypeLongLong:
+ if rows.rs.columns[i].flags&flagUnsigned != 0 {
+ val := binary.LittleEndian.Uint64(data[pos : pos+8])
+ if val > math.MaxInt64 {
+ dest[i] = uint64ToString(val)
+ } else {
+ dest[i] = int64(val)
+ }
+ } else {
+ dest[i] = int64(binary.LittleEndian.Uint64(data[pos : pos+8]))
+ }
+ pos += 8
+ continue
+
+ case fieldTypeFloat:
+ dest[i] = math.Float32frombits(binary.LittleEndian.Uint32(data[pos : pos+4]))
+ pos += 4
+ continue
+
+ case fieldTypeDouble:
+ dest[i] = math.Float64frombits(binary.LittleEndian.Uint64(data[pos : pos+8]))
+ pos += 8
+ continue
+
+ // Length coded Binary Strings
+ case fieldTypeDecimal, fieldTypeNewDecimal, fieldTypeVarChar,
+ fieldTypeBit, fieldTypeEnum, fieldTypeSet, fieldTypeTinyBLOB,
+ fieldTypeMediumBLOB, fieldTypeLongBLOB, fieldTypeBLOB,
+ fieldTypeVarString, fieldTypeString, fieldTypeGeometry, fieldTypeJSON:
+ var isNull bool
+ var n int
+ dest[i], isNull, n, err = readLengthEncodedString(data[pos:])
+ pos += n
+ if err == nil {
+ if !isNull {
+ continue
+ } else {
+ dest[i] = nil
+ continue
+ }
+ }
+ return err
+
+ case
+ fieldTypeDate, fieldTypeNewDate, // Date YYYY-MM-DD
+ fieldTypeTime, // Time [-][H]HH:MM:SS[.fractal]
+ fieldTypeTimestamp, fieldTypeDateTime: // Timestamp YYYY-MM-DD HH:MM:SS[.fractal]
+
+ num, isNull, n := readLengthEncodedInteger(data[pos:])
+ pos += n
+
+ switch {
+ case isNull:
+ dest[i] = nil
+ continue
+ case rows.rs.columns[i].fieldType == fieldTypeTime:
+ // database/sql does not support an equivalent to TIME, return a string
+ var dstlen uint8
+ switch decimals := rows.rs.columns[i].decimals; decimals {
+ case 0x00, 0x1f:
+ dstlen = 8
+ case 1, 2, 3, 4, 5, 6:
+ dstlen = 8 + 1 + decimals
+ default:
+ return fmt.Errorf(
+ "protocol error, illegal decimals value %d",
+ rows.rs.columns[i].decimals,
+ )
+ }
+ dest[i], err = formatBinaryDateTime(data[pos:pos+int(num)], dstlen, true)
+ case rows.mc.parseTime:
+ dest[i], err = parseBinaryDateTime(num, data[pos:], rows.mc.cfg.Loc)
+ default:
+ var dstlen uint8
+ if rows.rs.columns[i].fieldType == fieldTypeDate {
+ dstlen = 10
+ } else {
+ switch decimals := rows.rs.columns[i].decimals; decimals {
+ case 0x00, 0x1f:
+ dstlen = 19
+ case 1, 2, 3, 4, 5, 6:
+ dstlen = 19 + 1 + decimals
+ default:
+ return fmt.Errorf(
+ "protocol error, illegal decimals value %d",
+ rows.rs.columns[i].decimals,
+ )
+ }
+ }
+ dest[i], err = formatBinaryDateTime(data[pos:pos+int(num)], dstlen, false)
+ }
+
+ if err == nil {
+ pos += int(num)
+ continue
+ } else {
+ return err
+ }
+
+ // Please report if this happens!
+ default:
+ return fmt.Errorf("unknown field type %d", rows.rs.columns[i].fieldType)
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/result.go b/vendor/github.com/go-sql-driver/mysql/result.go
new file mode 100644
index 0000000..c6438d0
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/result.go
@@ -0,0 +1,22 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+type mysqlResult struct {
+ affectedRows int64
+ insertId int64
+}
+
+func (res *mysqlResult) LastInsertId() (int64, error) {
+ return res.insertId, nil
+}
+
+func (res *mysqlResult) RowsAffected() (int64, error) {
+ return res.affectedRows, nil
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/rows.go b/vendor/github.com/go-sql-driver/mysql/rows.go
new file mode 100644
index 0000000..18f4169
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/rows.go
@@ -0,0 +1,219 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+import (
+ "database/sql/driver"
+ "io"
+ "math"
+ "reflect"
+)
+
+type resultSet struct {
+ columns []mysqlField
+ columnNames []string
+ done bool
+}
+
+type mysqlRows struct {
+ mc *mysqlConn
+ rs resultSet
+ finish func()
+}
+
+type binaryRows struct {
+ mysqlRows
+}
+
+type textRows struct {
+ mysqlRows
+}
+
+func (rows *mysqlRows) Columns() []string {
+ if rows.rs.columnNames != nil {
+ return rows.rs.columnNames
+ }
+
+ columns := make([]string, len(rows.rs.columns))
+ if rows.mc != nil && rows.mc.cfg.ColumnsWithAlias {
+ for i := range columns {
+ if tableName := rows.rs.columns[i].tableName; len(tableName) > 0 {
+ columns[i] = tableName + "." + rows.rs.columns[i].name
+ } else {
+ columns[i] = rows.rs.columns[i].name
+ }
+ }
+ } else {
+ for i := range columns {
+ columns[i] = rows.rs.columns[i].name
+ }
+ }
+
+ rows.rs.columnNames = columns
+ return columns
+}
+
+func (rows *mysqlRows) ColumnTypeDatabaseTypeName(i int) string {
+ if name, ok := typeDatabaseName[rows.rs.columns[i].fieldType]; ok {
+ return name
+ }
+ return ""
+}
+
+// func (rows *mysqlRows) ColumnTypeLength(i int) (length int64, ok bool) {
+// return int64(rows.rs.columns[i].length), true
+// }
+
+func (rows *mysqlRows) ColumnTypeNullable(i int) (nullable, ok bool) {
+ return rows.rs.columns[i].flags&flagNotNULL == 0, true
+}
+
+func (rows *mysqlRows) ColumnTypePrecisionScale(i int) (int64, int64, bool) {
+ column := rows.rs.columns[i]
+ decimals := int64(column.decimals)
+
+ switch column.fieldType {
+ case fieldTypeDecimal, fieldTypeNewDecimal:
+ if decimals > 0 {
+ return int64(column.length) - 2, decimals, true
+ }
+ return int64(column.length) - 1, decimals, true
+ case fieldTypeTimestamp, fieldTypeDateTime, fieldTypeTime:
+ return decimals, decimals, true
+ case fieldTypeFloat, fieldTypeDouble:
+ if decimals == 0x1f {
+ return math.MaxInt64, math.MaxInt64, true
+ }
+ return math.MaxInt64, decimals, true
+ }
+
+ return 0, 0, false
+}
+
+func (rows *mysqlRows) ColumnTypeScanType(i int) reflect.Type {
+ return rows.rs.columns[i].scanType()
+}
+
+func (rows *mysqlRows) Close() (err error) {
+ if f := rows.finish; f != nil {
+ f()
+ rows.finish = nil
+ }
+
+ mc := rows.mc
+ if mc == nil {
+ return nil
+ }
+ if err := mc.error(); err != nil {
+ return err
+ }
+
+ // Remove unread packets from stream
+ if !rows.rs.done {
+ err = mc.readUntilEOF()
+ }
+ if err == nil {
+ if err = mc.discardResults(); err != nil {
+ return err
+ }
+ }
+
+ rows.mc = nil
+ return err
+}
+
+func (rows *mysqlRows) HasNextResultSet() (b bool) {
+ if rows.mc == nil {
+ return false
+ }
+ return rows.mc.status&statusMoreResultsExists != 0
+}
+
+func (rows *mysqlRows) nextResultSet() (int, error) {
+ if rows.mc == nil {
+ return 0, io.EOF
+ }
+ if err := rows.mc.error(); err != nil {
+ return 0, err
+ }
+
+ // Remove unread packets from stream
+ if !rows.rs.done {
+ if err := rows.mc.readUntilEOF(); err != nil {
+ return 0, err
+ }
+ rows.rs.done = true
+ }
+
+ if !rows.HasNextResultSet() {
+ rows.mc = nil
+ return 0, io.EOF
+ }
+ rows.rs = resultSet{}
+ return rows.mc.readResultSetHeaderPacket()
+}
+
+func (rows *mysqlRows) nextNotEmptyResultSet() (int, error) {
+ for {
+ resLen, err := rows.nextResultSet()
+ if err != nil {
+ return 0, err
+ }
+
+ if resLen > 0 {
+ return resLen, nil
+ }
+
+ rows.rs.done = true
+ }
+}
+
+func (rows *binaryRows) NextResultSet() error {
+ resLen, err := rows.nextNotEmptyResultSet()
+ if err != nil {
+ return err
+ }
+
+ rows.rs.columns, err = rows.mc.readColumns(resLen)
+ return err
+}
+
+func (rows *binaryRows) Next(dest []driver.Value) error {
+ if mc := rows.mc; mc != nil {
+ if err := mc.error(); err != nil {
+ return err
+ }
+
+ // Fetch next row from stream
+ return rows.readRow(dest)
+ }
+ return io.EOF
+}
+
+func (rows *textRows) NextResultSet() (err error) {
+ resLen, err := rows.nextNotEmptyResultSet()
+ if err != nil {
+ return err
+ }
+
+ rows.rs.columns, err = rows.mc.readColumns(resLen)
+ return err
+}
+
+func (rows *textRows) Next(dest []driver.Value) error {
+ if mc := rows.mc; mc != nil {
+ if err := mc.error(); err != nil {
+ return err
+ }
+
+ // Fetch next row from stream
+ return rows.readRow(dest)
+ }
+ return io.EOF
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/statement.go b/vendor/github.com/go-sql-driver/mysql/statement.go
new file mode 100644
index 0000000..ae22350
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/statement.go
@@ -0,0 +1,162 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+import (
+ "database/sql/driver"
+ "fmt"
+ "io"
+ "reflect"
+ "strconv"
+)
+
+type mysqlStmt struct {
+ mc *mysqlConn
+ id uint32
+ paramCount int
+}
+
+func (stmt *mysqlStmt) Close() error {
+ if stmt.mc == nil || stmt.mc.closed.IsSet() {
+ // driver.Stmt.Close can be called more than once, thus this function
+ // has to be idempotent.
+ // See also Issue #450 and golang/go#16019.
+ //errLog.Print(ErrInvalidConn)
+ return driver.ErrBadConn
+ }
+
+ err := stmt.mc.writeCommandPacketUint32(comStmtClose, stmt.id)
+ stmt.mc = nil
+ return err
+}
+
+func (stmt *mysqlStmt) NumInput() int {
+ return stmt.paramCount
+}
+
+func (stmt *mysqlStmt) ColumnConverter(idx int) driver.ValueConverter {
+ return converter{}
+}
+
+func (stmt *mysqlStmt) Exec(args []driver.Value) (driver.Result, error) {
+ if stmt.mc.closed.IsSet() {
+ errLog.Print(ErrInvalidConn)
+ return nil, driver.ErrBadConn
+ }
+ // Send command
+ err := stmt.writeExecutePacket(args)
+ if err != nil {
+ return nil, stmt.mc.markBadConn(err)
+ }
+
+ mc := stmt.mc
+
+ mc.affectedRows = 0
+ mc.insertId = 0
+
+ // Read Result
+ resLen, err := mc.readResultSetHeaderPacket()
+ if err != nil {
+ return nil, err
+ }
+
+ if resLen > 0 {
+ // Columns
+ if err = mc.readUntilEOF(); err != nil {
+ return nil, err
+ }
+
+ // Rows
+ if err := mc.readUntilEOF(); err != nil {
+ return nil, err
+ }
+ }
+
+ if err := mc.discardResults(); err != nil {
+ return nil, err
+ }
+
+ return &mysqlResult{
+ affectedRows: int64(mc.affectedRows),
+ insertId: int64(mc.insertId),
+ }, nil
+}
+
+func (stmt *mysqlStmt) Query(args []driver.Value) (driver.Rows, error) {
+ return stmt.query(args)
+}
+
+func (stmt *mysqlStmt) query(args []driver.Value) (*binaryRows, error) {
+ if stmt.mc.closed.IsSet() {
+ errLog.Print(ErrInvalidConn)
+ return nil, driver.ErrBadConn
+ }
+ // Send command
+ err := stmt.writeExecutePacket(args)
+ if err != nil {
+ return nil, stmt.mc.markBadConn(err)
+ }
+
+ mc := stmt.mc
+
+ // Read Result
+ resLen, err := mc.readResultSetHeaderPacket()
+ if err != nil {
+ return nil, err
+ }
+
+ rows := new(binaryRows)
+
+ if resLen > 0 {
+ rows.mc = mc
+ rows.rs.columns, err = mc.readColumns(resLen)
+ } else {
+ rows.rs.done = true
+
+ switch err := rows.NextResultSet(); err {
+ case nil, io.EOF:
+ return rows, nil
+ default:
+ return nil, err
+ }
+ }
+
+ return rows, err
+}
+
+type converter struct{}
+
+func (c converter) ConvertValue(v interface{}) (driver.Value, error) {
+ if driver.IsValue(v) {
+ return v, nil
+ }
+
+ rv := reflect.ValueOf(v)
+ switch rv.Kind() {
+ case reflect.Ptr:
+ // indirect pointers
+ if rv.IsNil() {
+ return nil, nil
+ }
+ return c.ConvertValue(rv.Elem().Interface())
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return rv.Int(), nil
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32:
+ return int64(rv.Uint()), nil
+ case reflect.Uint64:
+ u64 := rv.Uint()
+ if u64 >= 1<<63 {
+ return strconv.FormatUint(u64, 10), nil
+ }
+ return int64(u64), nil
+ case reflect.Float32, reflect.Float64:
+ return rv.Float(), nil
+ }
+ return nil, fmt.Errorf("unsupported type %T, a %s", v, rv.Kind())
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/transaction.go b/vendor/github.com/go-sql-driver/mysql/transaction.go
new file mode 100644
index 0000000..417d727
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/transaction.go
@@ -0,0 +1,31 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+type mysqlTx struct {
+ mc *mysqlConn
+}
+
+func (tx *mysqlTx) Commit() (err error) {
+ if tx.mc == nil || tx.mc.closed.IsSet() {
+ return ErrInvalidConn
+ }
+ err = tx.mc.exec("COMMIT")
+ tx.mc = nil
+ return
+}
+
+func (tx *mysqlTx) Rollback() (err error) {
+ if tx.mc == nil || tx.mc.closed.IsSet() {
+ return ErrInvalidConn
+ }
+ err = tx.mc.exec("ROLLBACK")
+ tx.mc = nil
+ return
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/utils.go b/vendor/github.com/go-sql-driver/mysql/utils.go
new file mode 100644
index 0000000..82da830
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/utils.go
@@ -0,0 +1,822 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+import (
+ "crypto/sha1"
+ "crypto/tls"
+ "database/sql/driver"
+ "encoding/binary"
+ "fmt"
+ "io"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "time"
+)
+
+var (
+ tlsConfigLock sync.RWMutex
+ tlsConfigRegister map[string]*tls.Config // Register for custom tls.Configs
+)
+
+// RegisterTLSConfig registers a custom tls.Config to be used with sql.Open.
+// Use the key as a value in the DSN where tls=value.
+//
+// Note: The tls.Config provided to needs to be exclusively owned by the driver after registering.
+//
+// rootCertPool := x509.NewCertPool()
+// pem, err := ioutil.ReadFile("/path/ca-cert.pem")
+// if err != nil {
+// log.Fatal(err)
+// }
+// if ok := rootCertPool.AppendCertsFromPEM(pem); !ok {
+// log.Fatal("Failed to append PEM.")
+// }
+// clientCert := make([]tls.Certificate, 0, 1)
+// certs, err := tls.LoadX509KeyPair("/path/client-cert.pem", "/path/client-key.pem")
+// if err != nil {
+// log.Fatal(err)
+// }
+// clientCert = append(clientCert, certs)
+// mysql.RegisterTLSConfig("custom", &tls.Config{
+// RootCAs: rootCertPool,
+// Certificates: clientCert,
+// })
+// db, err := sql.Open("mysql", "user@tcp(localhost:3306)/test?tls=custom")
+//
+func RegisterTLSConfig(key string, config *tls.Config) error {
+ if _, isBool := readBool(key); isBool || strings.ToLower(key) == "skip-verify" {
+ return fmt.Errorf("key '%s' is reserved", key)
+ }
+
+ tlsConfigLock.Lock()
+ if tlsConfigRegister == nil {
+ tlsConfigRegister = make(map[string]*tls.Config)
+ }
+
+ tlsConfigRegister[key] = config
+ tlsConfigLock.Unlock()
+ return nil
+}
+
+// DeregisterTLSConfig removes the tls.Config associated with key.
+func DeregisterTLSConfig(key string) {
+ tlsConfigLock.Lock()
+ if tlsConfigRegister != nil {
+ delete(tlsConfigRegister, key)
+ }
+ tlsConfigLock.Unlock()
+}
+
+func getTLSConfigClone(key string) (config *tls.Config) {
+ tlsConfigLock.RLock()
+ if v, ok := tlsConfigRegister[key]; ok {
+ config = cloneTLSConfig(v)
+ }
+ tlsConfigLock.RUnlock()
+ return
+}
+
+// Returns the bool value of the input.
+// The 2nd return value indicates if the input was a valid bool value
+func readBool(input string) (value bool, valid bool) {
+ switch input {
+ case "1", "true", "TRUE", "True":
+ return true, true
+ case "0", "false", "FALSE", "False":
+ return false, true
+ }
+
+ // Not a valid bool value
+ return
+}
+
+/******************************************************************************
+* Authentication *
+******************************************************************************/
+
+// Encrypt password using 4.1+ method
+func scramblePassword(scramble, password []byte) []byte {
+ if len(password) == 0 {
+ return nil
+ }
+
+ // stage1Hash = SHA1(password)
+ crypt := sha1.New()
+ crypt.Write(password)
+ stage1 := crypt.Sum(nil)
+
+ // scrambleHash = SHA1(scramble + SHA1(stage1Hash))
+ // inner Hash
+ crypt.Reset()
+ crypt.Write(stage1)
+ hash := crypt.Sum(nil)
+
+ // outer Hash
+ crypt.Reset()
+ crypt.Write(scramble)
+ crypt.Write(hash)
+ scramble = crypt.Sum(nil)
+
+ // token = scrambleHash XOR stage1Hash
+ for i := range scramble {
+ scramble[i] ^= stage1[i]
+ }
+ return scramble
+}
+
+// Encrypt password using pre 4.1 (old password) method
+// https://github.com/atcurtis/mariadb/blob/master/mysys/my_rnd.c
+type myRnd struct {
+ seed1, seed2 uint32
+}
+
+const myRndMaxVal = 0x3FFFFFFF
+
+// Pseudo random number generator
+func newMyRnd(seed1, seed2 uint32) *myRnd {
+ return &myRnd{
+ seed1: seed1 % myRndMaxVal,
+ seed2: seed2 % myRndMaxVal,
+ }
+}
+
+// Tested to be equivalent to MariaDB's floating point variant
+// http://play.golang.org/p/QHvhd4qved
+// http://play.golang.org/p/RG0q4ElWDx
+func (r *myRnd) NextByte() byte {
+ r.seed1 = (r.seed1*3 + r.seed2) % myRndMaxVal
+ r.seed2 = (r.seed1 + r.seed2 + 33) % myRndMaxVal
+
+ return byte(uint64(r.seed1) * 31 / myRndMaxVal)
+}
+
+// Generate binary hash from byte string using insecure pre 4.1 method
+func pwHash(password []byte) (result [2]uint32) {
+ var add uint32 = 7
+ var tmp uint32
+
+ result[0] = 1345345333
+ result[1] = 0x12345671
+
+ for _, c := range password {
+ // skip spaces and tabs in password
+ if c == ' ' || c == '\t' {
+ continue
+ }
+
+ tmp = uint32(c)
+ result[0] ^= (((result[0] & 63) + add) * tmp) + (result[0] << 8)
+ result[1] += (result[1] << 8) ^ result[0]
+ add += tmp
+ }
+
+ // Remove sign bit (1<<31)-1)
+ result[0] &= 0x7FFFFFFF
+ result[1] &= 0x7FFFFFFF
+
+ return
+}
+
+// Encrypt password using insecure pre 4.1 method
+func scrambleOldPassword(scramble, password []byte) []byte {
+ if len(password) == 0 {
+ return nil
+ }
+
+ scramble = scramble[:8]
+
+ hashPw := pwHash(password)
+ hashSc := pwHash(scramble)
+
+ r := newMyRnd(hashPw[0]^hashSc[0], hashPw[1]^hashSc[1])
+
+ var out [8]byte
+ for i := range out {
+ out[i] = r.NextByte() + 64
+ }
+
+ mask := r.NextByte()
+ for i := range out {
+ out[i] ^= mask
+ }
+
+ return out[:]
+}
+
+/******************************************************************************
+* Time related utils *
+******************************************************************************/
+
+// NullTime represents a time.Time that may be NULL.
+// NullTime implements the Scanner interface so
+// it can be used as a scan destination:
+//
+// var nt NullTime
+// err := db.QueryRow("SELECT time FROM foo WHERE id=?", id).Scan(&nt)
+// ...
+// if nt.Valid {
+// // use nt.Time
+// } else {
+// // NULL value
+// }
+//
+// This NullTime implementation is not driver-specific
+type NullTime struct {
+ Time time.Time
+ Valid bool // Valid is true if Time is not NULL
+}
+
+// Scan implements the Scanner interface.
+// The value type must be time.Time or string / []byte (formatted time-string),
+// otherwise Scan fails.
+func (nt *NullTime) Scan(value interface{}) (err error) {
+ if value == nil {
+ nt.Time, nt.Valid = time.Time{}, false
+ return
+ }
+
+ switch v := value.(type) {
+ case time.Time:
+ nt.Time, nt.Valid = v, true
+ return
+ case []byte:
+ nt.Time, err = parseDateTime(string(v), time.UTC)
+ nt.Valid = (err == nil)
+ return
+ case string:
+ nt.Time, err = parseDateTime(v, time.UTC)
+ nt.Valid = (err == nil)
+ return
+ }
+
+ nt.Valid = false
+ return fmt.Errorf("Can't convert %T to time.Time", value)
+}
+
+// Value implements the driver Valuer interface.
+func (nt NullTime) Value() (driver.Value, error) {
+ if !nt.Valid {
+ return nil, nil
+ }
+ return nt.Time, nil
+}
+
+func parseDateTime(str string, loc *time.Location) (t time.Time, err error) {
+ base := "0000-00-00 00:00:00.0000000"
+ switch len(str) {
+ case 10, 19, 21, 22, 23, 24, 25, 26: // up to "YYYY-MM-DD HH:MM:SS.MMMMMM"
+ if str == base[:len(str)] {
+ return
+ }
+ t, err = time.Parse(timeFormat[:len(str)], str)
+ default:
+ err = fmt.Errorf("invalid time string: %s", str)
+ return
+ }
+
+ // Adjust location
+ if err == nil && loc != time.UTC {
+ y, mo, d := t.Date()
+ h, mi, s := t.Clock()
+ t, err = time.Date(y, mo, d, h, mi, s, t.Nanosecond(), loc), nil
+ }
+
+ return
+}
+
+func parseBinaryDateTime(num uint64, data []byte, loc *time.Location) (driver.Value, error) {
+ switch num {
+ case 0:
+ return time.Time{}, nil
+ case 4:
+ return time.Date(
+ int(binary.LittleEndian.Uint16(data[:2])), // year
+ time.Month(data[2]), // month
+ int(data[3]), // day
+ 0, 0, 0, 0,
+ loc,
+ ), nil
+ case 7:
+ return time.Date(
+ int(binary.LittleEndian.Uint16(data[:2])), // year
+ time.Month(data[2]), // month
+ int(data[3]), // day
+ int(data[4]), // hour
+ int(data[5]), // minutes
+ int(data[6]), // seconds
+ 0,
+ loc,
+ ), nil
+ case 11:
+ return time.Date(
+ int(binary.LittleEndian.Uint16(data[:2])), // year
+ time.Month(data[2]), // month
+ int(data[3]), // day
+ int(data[4]), // hour
+ int(data[5]), // minutes
+ int(data[6]), // seconds
+ int(binary.LittleEndian.Uint32(data[7:11]))*1000, // nanoseconds
+ loc,
+ ), nil
+ }
+ return nil, fmt.Errorf("invalid DATETIME packet length %d", num)
+}
+
+// zeroDateTime is used in formatBinaryDateTime to avoid an allocation
+// if the DATE or DATETIME has the zero value.
+// It must never be changed.
+// The current behavior depends on database/sql copying the result.
+var zeroDateTime = []byte("0000-00-00 00:00:00.000000")
+
+const digits01 = "0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789"
+const digits10 = "0000000000111111111122222222223333333333444444444455555555556666666666777777777788888888889999999999"
+
+func formatBinaryDateTime(src []byte, length uint8, justTime bool) (driver.Value, error) {
+ // length expects the deterministic length of the zero value,
+ // negative time and 100+ hours are automatically added if needed
+ if len(src) == 0 {
+ if justTime {
+ return zeroDateTime[11 : 11+length], nil
+ }
+ return zeroDateTime[:length], nil
+ }
+ var dst []byte // return value
+ var pt, p1, p2, p3 byte // current digit pair
+ var zOffs byte // offset of value in zeroDateTime
+ if justTime {
+ switch length {
+ case
+ 8, // time (can be up to 10 when negative and 100+ hours)
+ 10, 11, 12, 13, 14, 15: // time with fractional seconds
+ default:
+ return nil, fmt.Errorf("illegal TIME length %d", length)
+ }
+ switch len(src) {
+ case 8, 12:
+ default:
+ return nil, fmt.Errorf("invalid TIME packet length %d", len(src))
+ }
+ // +2 to enable negative time and 100+ hours
+ dst = make([]byte, 0, length+2)
+ if src[0] == 1 {
+ dst = append(dst, '-')
+ }
+ if src[1] != 0 {
+ hour := uint16(src[1])*24 + uint16(src[5])
+ pt = byte(hour / 100)
+ p1 = byte(hour - 100*uint16(pt))
+ dst = append(dst, digits01[pt])
+ } else {
+ p1 = src[5]
+ }
+ zOffs = 11
+ src = src[6:]
+ } else {
+ switch length {
+ case 10, 19, 21, 22, 23, 24, 25, 26:
+ default:
+ t := "DATE"
+ if length > 10 {
+ t += "TIME"
+ }
+ return nil, fmt.Errorf("illegal %s length %d", t, length)
+ }
+ switch len(src) {
+ case 4, 7, 11:
+ default:
+ t := "DATE"
+ if length > 10 {
+ t += "TIME"
+ }
+ return nil, fmt.Errorf("illegal %s packet length %d", t, len(src))
+ }
+ dst = make([]byte, 0, length)
+ // start with the date
+ year := binary.LittleEndian.Uint16(src[:2])
+ pt = byte(year / 100)
+ p1 = byte(year - 100*uint16(pt))
+ p2, p3 = src[2], src[3]
+ dst = append(dst,
+ digits10[pt], digits01[pt],
+ digits10[p1], digits01[p1], '-',
+ digits10[p2], digits01[p2], '-',
+ digits10[p3], digits01[p3],
+ )
+ if length == 10 {
+ return dst, nil
+ }
+ if len(src) == 4 {
+ return append(dst, zeroDateTime[10:length]...), nil
+ }
+ dst = append(dst, ' ')
+ p1 = src[4] // hour
+ src = src[5:]
+ }
+ // p1 is 2-digit hour, src is after hour
+ p2, p3 = src[0], src[1]
+ dst = append(dst,
+ digits10[p1], digits01[p1], ':',
+ digits10[p2], digits01[p2], ':',
+ digits10[p3], digits01[p3],
+ )
+ if length <= byte(len(dst)) {
+ return dst, nil
+ }
+ src = src[2:]
+ if len(src) == 0 {
+ return append(dst, zeroDateTime[19:zOffs+length]...), nil
+ }
+ microsecs := binary.LittleEndian.Uint32(src[:4])
+ p1 = byte(microsecs / 10000)
+ microsecs -= 10000 * uint32(p1)
+ p2 = byte(microsecs / 100)
+ microsecs -= 100 * uint32(p2)
+ p3 = byte(microsecs)
+ switch decimals := zOffs + length - 20; decimals {
+ default:
+ return append(dst, '.',
+ digits10[p1], digits01[p1],
+ digits10[p2], digits01[p2],
+ digits10[p3], digits01[p3],
+ ), nil
+ case 1:
+ return append(dst, '.',
+ digits10[p1],
+ ), nil
+ case 2:
+ return append(dst, '.',
+ digits10[p1], digits01[p1],
+ ), nil
+ case 3:
+ return append(dst, '.',
+ digits10[p1], digits01[p1],
+ digits10[p2],
+ ), nil
+ case 4:
+ return append(dst, '.',
+ digits10[p1], digits01[p1],
+ digits10[p2], digits01[p2],
+ ), nil
+ case 5:
+ return append(dst, '.',
+ digits10[p1], digits01[p1],
+ digits10[p2], digits01[p2],
+ digits10[p3],
+ ), nil
+ }
+}
+
+/******************************************************************************
+* Convert from and to bytes *
+******************************************************************************/
+
+func uint64ToBytes(n uint64) []byte {
+ return []byte{
+ byte(n),
+ byte(n >> 8),
+ byte(n >> 16),
+ byte(n >> 24),
+ byte(n >> 32),
+ byte(n >> 40),
+ byte(n >> 48),
+ byte(n >> 56),
+ }
+}
+
+func uint64ToString(n uint64) []byte {
+ var a [20]byte
+ i := 20
+
+ // U+0030 = 0
+ // ...
+ // U+0039 = 9
+
+ var q uint64
+ for n >= 10 {
+ i--
+ q = n / 10
+ a[i] = uint8(n-q*10) + 0x30
+ n = q
+ }
+
+ i--
+ a[i] = uint8(n) + 0x30
+
+ return a[i:]
+}
+
+// treats string value as unsigned integer representation
+func stringToInt(b []byte) int {
+ val := 0
+ for i := range b {
+ val *= 10
+ val += int(b[i] - 0x30)
+ }
+ return val
+}
+
+// returns the string read as a bytes slice, wheter the value is NULL,
+// the number of bytes read and an error, in case the string is longer than
+// the input slice
+func readLengthEncodedString(b []byte) ([]byte, bool, int, error) {
+ // Get length
+ num, isNull, n := readLengthEncodedInteger(b)
+ if num < 1 {
+ return b[n:n], isNull, n, nil
+ }
+
+ n += int(num)
+
+ // Check data length
+ if len(b) >= n {
+ return b[n-int(num) : n], false, n, nil
+ }
+ return nil, false, n, io.EOF
+}
+
+// returns the number of bytes skipped and an error, in case the string is
+// longer than the input slice
+func skipLengthEncodedString(b []byte) (int, error) {
+ // Get length
+ num, _, n := readLengthEncodedInteger(b)
+ if num < 1 {
+ return n, nil
+ }
+
+ n += int(num)
+
+ // Check data length
+ if len(b) >= n {
+ return n, nil
+ }
+ return n, io.EOF
+}
+
+// returns the number read, whether the value is NULL and the number of bytes read
+func readLengthEncodedInteger(b []byte) (uint64, bool, int) {
+ // See issue #349
+ if len(b) == 0 {
+ return 0, true, 1
+ }
+ switch b[0] {
+
+ // 251: NULL
+ case 0xfb:
+ return 0, true, 1
+
+ // 252: value of following 2
+ case 0xfc:
+ return uint64(b[1]) | uint64(b[2])<<8, false, 3
+
+ // 253: value of following 3
+ case 0xfd:
+ return uint64(b[1]) | uint64(b[2])<<8 | uint64(b[3])<<16, false, 4
+
+ // 254: value of following 8
+ case 0xfe:
+ return uint64(b[1]) | uint64(b[2])<<8 | uint64(b[3])<<16 |
+ uint64(b[4])<<24 | uint64(b[5])<<32 | uint64(b[6])<<40 |
+ uint64(b[7])<<48 | uint64(b[8])<<56,
+ false, 9
+ }
+
+ // 0-250: value of first byte
+ return uint64(b[0]), false, 1
+}
+
+// encodes a uint64 value and appends it to the given bytes slice
+func appendLengthEncodedInteger(b []byte, n uint64) []byte {
+ switch {
+ case n <= 250:
+ return append(b, byte(n))
+
+ case n <= 0xffff:
+ return append(b, 0xfc, byte(n), byte(n>>8))
+
+ case n <= 0xffffff:
+ return append(b, 0xfd, byte(n), byte(n>>8), byte(n>>16))
+ }
+ return append(b, 0xfe, byte(n), byte(n>>8), byte(n>>16), byte(n>>24),
+ byte(n>>32), byte(n>>40), byte(n>>48), byte(n>>56))
+}
+
+// reserveBuffer checks cap(buf) and expand buffer to len(buf) + appendSize.
+// If cap(buf) is not enough, reallocate new buffer.
+func reserveBuffer(buf []byte, appendSize int) []byte {
+ newSize := len(buf) + appendSize
+ if cap(buf) < newSize {
+ // Grow buffer exponentially
+ newBuf := make([]byte, len(buf)*2+appendSize)
+ copy(newBuf, buf)
+ buf = newBuf
+ }
+ return buf[:newSize]
+}
+
+// escapeBytesBackslash escapes []byte with backslashes (\)
+// This escapes the contents of a string (provided as []byte) by adding backslashes before special
+// characters, and turning others into specific escape sequences, such as
+// turning newlines into \n and null bytes into \0.
+// https://github.com/mysql/mysql-server/blob/mysql-5.7.5/mysys/charset.c#L823-L932
+func escapeBytesBackslash(buf, v []byte) []byte {
+ pos := len(buf)
+ buf = reserveBuffer(buf, len(v)*2)
+
+ for _, c := range v {
+ switch c {
+ case '\x00':
+ buf[pos] = '\\'
+ buf[pos+1] = '0'
+ pos += 2
+ case '\n':
+ buf[pos] = '\\'
+ buf[pos+1] = 'n'
+ pos += 2
+ case '\r':
+ buf[pos] = '\\'
+ buf[pos+1] = 'r'
+ pos += 2
+ case '\x1a':
+ buf[pos] = '\\'
+ buf[pos+1] = 'Z'
+ pos += 2
+ case '\'':
+ buf[pos] = '\\'
+ buf[pos+1] = '\''
+ pos += 2
+ case '"':
+ buf[pos] = '\\'
+ buf[pos+1] = '"'
+ pos += 2
+ case '\\':
+ buf[pos] = '\\'
+ buf[pos+1] = '\\'
+ pos += 2
+ default:
+ buf[pos] = c
+ pos++
+ }
+ }
+
+ return buf[:pos]
+}
+
+// escapeStringBackslash is similar to escapeBytesBackslash but for string.
+func escapeStringBackslash(buf []byte, v string) []byte {
+ pos := len(buf)
+ buf = reserveBuffer(buf, len(v)*2)
+
+ for i := 0; i < len(v); i++ {
+ c := v[i]
+ switch c {
+ case '\x00':
+ buf[pos] = '\\'
+ buf[pos+1] = '0'
+ pos += 2
+ case '\n':
+ buf[pos] = '\\'
+ buf[pos+1] = 'n'
+ pos += 2
+ case '\r':
+ buf[pos] = '\\'
+ buf[pos+1] = 'r'
+ pos += 2
+ case '\x1a':
+ buf[pos] = '\\'
+ buf[pos+1] = 'Z'
+ pos += 2
+ case '\'':
+ buf[pos] = '\\'
+ buf[pos+1] = '\''
+ pos += 2
+ case '"':
+ buf[pos] = '\\'
+ buf[pos+1] = '"'
+ pos += 2
+ case '\\':
+ buf[pos] = '\\'
+ buf[pos+1] = '\\'
+ pos += 2
+ default:
+ buf[pos] = c
+ pos++
+ }
+ }
+
+ return buf[:pos]
+}
+
+// escapeBytesQuotes escapes apostrophes in []byte by doubling them up.
+// This escapes the contents of a string by doubling up any apostrophes that
+// it contains. This is used when the NO_BACKSLASH_ESCAPES SQL_MODE is in
+// effect on the server.
+// https://github.com/mysql/mysql-server/blob/mysql-5.7.5/mysys/charset.c#L963-L1038
+func escapeBytesQuotes(buf, v []byte) []byte {
+ pos := len(buf)
+ buf = reserveBuffer(buf, len(v)*2)
+
+ for _, c := range v {
+ if c == '\'' {
+ buf[pos] = '\''
+ buf[pos+1] = '\''
+ pos += 2
+ } else {
+ buf[pos] = c
+ pos++
+ }
+ }
+
+ return buf[:pos]
+}
+
+// escapeStringQuotes is similar to escapeBytesQuotes but for string.
+func escapeStringQuotes(buf []byte, v string) []byte {
+ pos := len(buf)
+ buf = reserveBuffer(buf, len(v)*2)
+
+ for i := 0; i < len(v); i++ {
+ c := v[i]
+ if c == '\'' {
+ buf[pos] = '\''
+ buf[pos+1] = '\''
+ pos += 2
+ } else {
+ buf[pos] = c
+ pos++
+ }
+ }
+
+ return buf[:pos]
+}
+
+/******************************************************************************
+* Sync utils *
+******************************************************************************/
+
+// noCopy may be embedded into structs which must not be copied
+// after the first use.
+//
+// See https://github.com/golang/go/issues/8005#issuecomment-190753527
+// for details.
+type noCopy struct{}
+
+// Lock is a no-op used by -copylocks checker from `go vet`.
+func (*noCopy) Lock() {}
+
+// atomicBool is a wrapper around uint32 for usage as a boolean value with
+// atomic access.
+type atomicBool struct {
+ _noCopy noCopy
+ value uint32
+}
+
+// IsSet returns wether the current boolean value is true
+func (ab *atomicBool) IsSet() bool {
+ return atomic.LoadUint32(&ab.value) > 0
+}
+
+// Set sets the value of the bool regardless of the previous value
+func (ab *atomicBool) Set(value bool) {
+ if value {
+ atomic.StoreUint32(&ab.value, 1)
+ } else {
+ atomic.StoreUint32(&ab.value, 0)
+ }
+}
+
+// TrySet sets the value of the bool and returns wether the value changed
+func (ab *atomicBool) TrySet(value bool) bool {
+ if value {
+ return atomic.SwapUint32(&ab.value, 1) == 0
+ }
+ return atomic.SwapUint32(&ab.value, 0) > 0
+}
+
+// atomicBool is a wrapper for atomically accessed error values
+type atomicError struct {
+ _noCopy noCopy
+ value atomic.Value
+}
+
+// Set sets the error value regardless of the previous value.
+// The value must not be nil
+func (ae *atomicError) Set(value error) {
+ ae.value.Store(value)
+}
+
+// Value returns the current error value
+func (ae *atomicError) Value() error {
+ if v := ae.value.Load(); v != nil {
+ // this will panic if the value doesn't implement the error interface
+ return v.(error)
+ }
+ return nil
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/utils_go17.go b/vendor/github.com/go-sql-driver/mysql/utils_go17.go
new file mode 100644
index 0000000..f595634
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/utils_go17.go
@@ -0,0 +1,40 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2017 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+// +build go1.7
+// +build !go1.8
+
+package mysql
+
+import "crypto/tls"
+
+func cloneTLSConfig(c *tls.Config) *tls.Config {
+ return &tls.Config{
+ Rand: c.Rand,
+ Time: c.Time,
+ Certificates: c.Certificates,
+ NameToCertificate: c.NameToCertificate,
+ GetCertificate: c.GetCertificate,
+ RootCAs: c.RootCAs,
+ NextProtos: c.NextProtos,
+ ServerName: c.ServerName,
+ ClientAuth: c.ClientAuth,
+ ClientCAs: c.ClientCAs,
+ InsecureSkipVerify: c.InsecureSkipVerify,
+ CipherSuites: c.CipherSuites,
+ PreferServerCipherSuites: c.PreferServerCipherSuites,
+ SessionTicketsDisabled: c.SessionTicketsDisabled,
+ SessionTicketKey: c.SessionTicketKey,
+ ClientSessionCache: c.ClientSessionCache,
+ MinVersion: c.MinVersion,
+ MaxVersion: c.MaxVersion,
+ CurvePreferences: c.CurvePreferences,
+ DynamicRecordSizingDisabled: c.DynamicRecordSizingDisabled,
+ Renegotiation: c.Renegotiation,
+ }
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/utils_go18.go b/vendor/github.com/go-sql-driver/mysql/utils_go18.go
new file mode 100644
index 0000000..7d8c9b1
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/utils_go18.go
@@ -0,0 +1,49 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2017 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+// +build go1.8
+
+package mysql
+
+import (
+ "crypto/tls"
+ "database/sql"
+ "database/sql/driver"
+ "errors"
+)
+
+func cloneTLSConfig(c *tls.Config) *tls.Config {
+ return c.Clone()
+}
+
+func namedValueToValue(named []driver.NamedValue) ([]driver.Value, error) {
+ dargs := make([]driver.Value, len(named))
+ for n, param := range named {
+ if len(param.Name) > 0 {
+ // TODO: support the use of Named Parameters #561
+ return nil, errors.New("mysql: driver does not support the use of Named Parameters")
+ }
+ dargs[n] = param.Value
+ }
+ return dargs, nil
+}
+
+func mapIsolationLevel(level driver.IsolationLevel) (string, error) {
+ switch sql.IsolationLevel(level) {
+ case sql.LevelRepeatableRead:
+ return "REPEATABLE READ", nil
+ case sql.LevelReadCommitted:
+ return "READ COMMITTED", nil
+ case sql.LevelReadUncommitted:
+ return "READ UNCOMMITTED", nil
+ case sql.LevelSerializable:
+ return "SERIALIZABLE", nil
+ default:
+ return "", errors.New("mysql: unsupported isolation level: " + string(level))
+ }
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/utils_legacy.go b/vendor/github.com/go-sql-driver/mysql/utils_legacy.go
new file mode 100644
index 0000000..a03b10d
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/utils_legacy.go
@@ -0,0 +1,18 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2017 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+// +build !go1.7
+
+package mysql
+
+import "crypto/tls"
+
+func cloneTLSConfig(c *tls.Config) *tls.Config {
+ clone := *c
+ return &clone
+}
diff --git a/vendor/github.com/golang/snappy/.gitignore b/vendor/github.com/golang/snappy/.gitignore
new file mode 100644
index 0000000..042091d
--- /dev/null
+++ b/vendor/github.com/golang/snappy/.gitignore
@@ -0,0 +1,16 @@
+cmd/snappytool/snappytool
+testdata/bench
+
+# These explicitly listed benchmark data files are for an obsolete version of
+# snappy_test.go.
+testdata/alice29.txt
+testdata/asyoulik.txt
+testdata/fireworks.jpeg
+testdata/geo.protodata
+testdata/html
+testdata/html_x_4
+testdata/kppkn.gtb
+testdata/lcet10.txt
+testdata/paper-100k.pdf
+testdata/plrabn12.txt
+testdata/urls.10K
diff --git a/vendor/github.com/golang/snappy/AUTHORS b/vendor/github.com/golang/snappy/AUTHORS
new file mode 100644
index 0000000..bcfa195
--- /dev/null
+++ b/vendor/github.com/golang/snappy/AUTHORS
@@ -0,0 +1,15 @@
+# This is the official list of Snappy-Go authors for copyright purposes.
+# This file is distinct from the CONTRIBUTORS files.
+# See the latter for an explanation.
+
+# Names should be added to this file as
+# Name or Organization
+# The email address is not required for organizations.
+
+# Please keep the list sorted.
+
+Damian Gryski
+Google Inc.
+Jan Mercl <0xjnml@gmail.com>
+Rodolfo Carvalho
+Sebastien Binet
diff --git a/vendor/github.com/golang/snappy/CONTRIBUTORS b/vendor/github.com/golang/snappy/CONTRIBUTORS
new file mode 100644
index 0000000..931ae31
--- /dev/null
+++ b/vendor/github.com/golang/snappy/CONTRIBUTORS
@@ -0,0 +1,37 @@
+# This is the official list of people who can contribute
+# (and typically have contributed) code to the Snappy-Go repository.
+# The AUTHORS file lists the copyright holders; this file
+# lists people. For example, Google employees are listed here
+# but not in AUTHORS, because Google holds the copyright.
+#
+# The submission process automatically checks to make sure
+# that people submitting code are listed in this file (by email address).
+#
+# Names should be added to this file only after verifying that
+# the individual or the individual's organization has agreed to
+# the appropriate Contributor License Agreement, found here:
+#
+# http://code.google.com/legal/individual-cla-v1.0.html
+# http://code.google.com/legal/corporate-cla-v1.0.html
+#
+# The agreement for individuals can be filled out on the web.
+#
+# When adding J Random Contributor's name to this file,
+# either J's name or J's organization's name should be
+# added to the AUTHORS file, depending on whether the
+# individual or corporate CLA was used.
+
+# Names should be added to this file like so:
+# Name
+
+# Please keep the list sorted.
+
+Damian Gryski
+Jan Mercl <0xjnml@gmail.com>
+Kai Backman
+Marc-Antoine Ruel
+Nigel Tao
+Rob Pike
+Rodolfo Carvalho
+Russ Cox
+Sebastien Binet
diff --git a/vendor/github.com/golang/snappy/LICENSE b/vendor/github.com/golang/snappy/LICENSE
new file mode 100644
index 0000000..6050c10
--- /dev/null
+++ b/vendor/github.com/golang/snappy/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2011 The Snappy-Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/golang/snappy/README b/vendor/github.com/golang/snappy/README
new file mode 100644
index 0000000..cea1287
--- /dev/null
+++ b/vendor/github.com/golang/snappy/README
@@ -0,0 +1,107 @@
+The Snappy compression format in the Go programming language.
+
+To download and install from source:
+$ go get github.com/golang/snappy
+
+Unless otherwise noted, the Snappy-Go source files are distributed
+under the BSD-style license found in the LICENSE file.
+
+
+
+Benchmarks.
+
+The golang/snappy benchmarks include compressing (Z) and decompressing (U) ten
+or so files, the same set used by the C++ Snappy code (github.com/google/snappy
+and note the "google", not "golang"). On an "Intel(R) Core(TM) i7-3770 CPU @
+3.40GHz", Go's GOARCH=amd64 numbers as of 2016-05-29:
+
+"go test -test.bench=."
+
+_UFlat0-8 2.19GB/s ± 0% html
+_UFlat1-8 1.41GB/s ± 0% urls
+_UFlat2-8 23.5GB/s ± 2% jpg
+_UFlat3-8 1.91GB/s ± 0% jpg_200
+_UFlat4-8 14.0GB/s ± 1% pdf
+_UFlat5-8 1.97GB/s ± 0% html4
+_UFlat6-8 814MB/s ± 0% txt1
+_UFlat7-8 785MB/s ± 0% txt2
+_UFlat8-8 857MB/s ± 0% txt3
+_UFlat9-8 719MB/s ± 1% txt4
+_UFlat10-8 2.84GB/s ± 0% pb
+_UFlat11-8 1.05GB/s ± 0% gaviota
+
+_ZFlat0-8 1.04GB/s ± 0% html
+_ZFlat1-8 534MB/s ± 0% urls
+_ZFlat2-8 15.7GB/s ± 1% jpg
+_ZFlat3-8 740MB/s ± 3% jpg_200
+_ZFlat4-8 9.20GB/s ± 1% pdf
+_ZFlat5-8 991MB/s ± 0% html4
+_ZFlat6-8 379MB/s ± 0% txt1
+_ZFlat7-8 352MB/s ± 0% txt2
+_ZFlat8-8 396MB/s ± 1% txt3
+_ZFlat9-8 327MB/s ± 1% txt4
+_ZFlat10-8 1.33GB/s ± 1% pb
+_ZFlat11-8 605MB/s ± 1% gaviota
+
+
+
+"go test -test.bench=. -tags=noasm"
+
+_UFlat0-8 621MB/s ± 2% html
+_UFlat1-8 494MB/s ± 1% urls
+_UFlat2-8 23.2GB/s ± 1% jpg
+_UFlat3-8 1.12GB/s ± 1% jpg_200
+_UFlat4-8 4.35GB/s ± 1% pdf
+_UFlat5-8 609MB/s ± 0% html4
+_UFlat6-8 296MB/s ± 0% txt1
+_UFlat7-8 288MB/s ± 0% txt2
+_UFlat8-8 309MB/s ± 1% txt3
+_UFlat9-8 280MB/s ± 1% txt4
+_UFlat10-8 753MB/s ± 0% pb
+_UFlat11-8 400MB/s ± 0% gaviota
+
+_ZFlat0-8 409MB/s ± 1% html
+_ZFlat1-8 250MB/s ± 1% urls
+_ZFlat2-8 12.3GB/s ± 1% jpg
+_ZFlat3-8 132MB/s ± 0% jpg_200
+_ZFlat4-8 2.92GB/s ± 0% pdf
+_ZFlat5-8 405MB/s ± 1% html4
+_ZFlat6-8 179MB/s ± 1% txt1
+_ZFlat7-8 170MB/s ± 1% txt2
+_ZFlat8-8 189MB/s ± 1% txt3
+_ZFlat9-8 164MB/s ± 1% txt4
+_ZFlat10-8 479MB/s ± 1% pb
+_ZFlat11-8 270MB/s ± 1% gaviota
+
+
+
+For comparison (Go's encoded output is byte-for-byte identical to C++'s), here
+are the numbers from C++ Snappy's
+
+make CXXFLAGS="-O2 -DNDEBUG -g" clean snappy_unittest.log && cat snappy_unittest.log
+
+BM_UFlat/0 2.4GB/s html
+BM_UFlat/1 1.4GB/s urls
+BM_UFlat/2 21.8GB/s jpg
+BM_UFlat/3 1.5GB/s jpg_200
+BM_UFlat/4 13.3GB/s pdf
+BM_UFlat/5 2.1GB/s html4
+BM_UFlat/6 1.0GB/s txt1
+BM_UFlat/7 959.4MB/s txt2
+BM_UFlat/8 1.0GB/s txt3
+BM_UFlat/9 864.5MB/s txt4
+BM_UFlat/10 2.9GB/s pb
+BM_UFlat/11 1.2GB/s gaviota
+
+BM_ZFlat/0 944.3MB/s html (22.31 %)
+BM_ZFlat/1 501.6MB/s urls (47.78 %)
+BM_ZFlat/2 14.3GB/s jpg (99.95 %)
+BM_ZFlat/3 538.3MB/s jpg_200 (73.00 %)
+BM_ZFlat/4 8.3GB/s pdf (83.30 %)
+BM_ZFlat/5 903.5MB/s html4 (22.52 %)
+BM_ZFlat/6 336.0MB/s txt1 (57.88 %)
+BM_ZFlat/7 312.3MB/s txt2 (61.91 %)
+BM_ZFlat/8 353.1MB/s txt3 (54.99 %)
+BM_ZFlat/9 289.9MB/s txt4 (66.26 %)
+BM_ZFlat/10 1.2GB/s pb (19.68 %)
+BM_ZFlat/11 527.4MB/s gaviota (37.72 %)
diff --git a/vendor/github.com/golang/snappy/decode.go b/vendor/github.com/golang/snappy/decode.go
new file mode 100644
index 0000000..72efb03
--- /dev/null
+++ b/vendor/github.com/golang/snappy/decode.go
@@ -0,0 +1,237 @@
+// Copyright 2011 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package snappy
+
+import (
+ "encoding/binary"
+ "errors"
+ "io"
+)
+
+var (
+ // ErrCorrupt reports that the input is invalid.
+ ErrCorrupt = errors.New("snappy: corrupt input")
+ // ErrTooLarge reports that the uncompressed length is too large.
+ ErrTooLarge = errors.New("snappy: decoded block is too large")
+ // ErrUnsupported reports that the input isn't supported.
+ ErrUnsupported = errors.New("snappy: unsupported input")
+
+ errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length")
+)
+
+// DecodedLen returns the length of the decoded block.
+func DecodedLen(src []byte) (int, error) {
+ v, _, err := decodedLen(src)
+ return v, err
+}
+
+// decodedLen returns the length of the decoded block and the number of bytes
+// that the length header occupied.
+func decodedLen(src []byte) (blockLen, headerLen int, err error) {
+ v, n := binary.Uvarint(src)
+ if n <= 0 || v > 0xffffffff {
+ return 0, 0, ErrCorrupt
+ }
+
+ const wordSize = 32 << (^uint(0) >> 32 & 1)
+ if wordSize == 32 && v > 0x7fffffff {
+ return 0, 0, ErrTooLarge
+ }
+ return int(v), n, nil
+}
+
+const (
+ decodeErrCodeCorrupt = 1
+ decodeErrCodeUnsupportedLiteralLength = 2
+)
+
+// Decode returns the decoded form of src. The returned slice may be a sub-
+// slice of dst if dst was large enough to hold the entire decoded block.
+// Otherwise, a newly allocated slice will be returned.
+//
+// The dst and src must not overlap. It is valid to pass a nil dst.
+func Decode(dst, src []byte) ([]byte, error) {
+ dLen, s, err := decodedLen(src)
+ if err != nil {
+ return nil, err
+ }
+ if dLen <= len(dst) {
+ dst = dst[:dLen]
+ } else {
+ dst = make([]byte, dLen)
+ }
+ switch decode(dst, src[s:]) {
+ case 0:
+ return dst, nil
+ case decodeErrCodeUnsupportedLiteralLength:
+ return nil, errUnsupportedLiteralLength
+ }
+ return nil, ErrCorrupt
+}
+
+// NewReader returns a new Reader that decompresses from r, using the framing
+// format described at
+// https://github.com/google/snappy/blob/master/framing_format.txt
+func NewReader(r io.Reader) *Reader {
+ return &Reader{
+ r: r,
+ decoded: make([]byte, maxBlockSize),
+ buf: make([]byte, maxEncodedLenOfMaxBlockSize+checksumSize),
+ }
+}
+
+// Reader is an io.Reader that can read Snappy-compressed bytes.
+type Reader struct {
+ r io.Reader
+ err error
+ decoded []byte
+ buf []byte
+ // decoded[i:j] contains decoded bytes that have not yet been passed on.
+ i, j int
+ readHeader bool
+}
+
+// Reset discards any buffered data, resets all state, and switches the Snappy
+// reader to read from r. This permits reusing a Reader rather than allocating
+// a new one.
+func (r *Reader) Reset(reader io.Reader) {
+ r.r = reader
+ r.err = nil
+ r.i = 0
+ r.j = 0
+ r.readHeader = false
+}
+
+func (r *Reader) readFull(p []byte, allowEOF bool) (ok bool) {
+ if _, r.err = io.ReadFull(r.r, p); r.err != nil {
+ if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) {
+ r.err = ErrCorrupt
+ }
+ return false
+ }
+ return true
+}
+
+// Read satisfies the io.Reader interface.
+func (r *Reader) Read(p []byte) (int, error) {
+ if r.err != nil {
+ return 0, r.err
+ }
+ for {
+ if r.i < r.j {
+ n := copy(p, r.decoded[r.i:r.j])
+ r.i += n
+ return n, nil
+ }
+ if !r.readFull(r.buf[:4], true) {
+ return 0, r.err
+ }
+ chunkType := r.buf[0]
+ if !r.readHeader {
+ if chunkType != chunkTypeStreamIdentifier {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ r.readHeader = true
+ }
+ chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16
+ if chunkLen > len(r.buf) {
+ r.err = ErrUnsupported
+ return 0, r.err
+ }
+
+ // The chunk types are specified at
+ // https://github.com/google/snappy/blob/master/framing_format.txt
+ switch chunkType {
+ case chunkTypeCompressedData:
+ // Section 4.2. Compressed data (chunk type 0x00).
+ if chunkLen < checksumSize {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ buf := r.buf[:chunkLen]
+ if !r.readFull(buf, false) {
+ return 0, r.err
+ }
+ checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
+ buf = buf[checksumSize:]
+
+ n, err := DecodedLen(buf)
+ if err != nil {
+ r.err = err
+ return 0, r.err
+ }
+ if n > len(r.decoded) {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ if _, err := Decode(r.decoded, buf); err != nil {
+ r.err = err
+ return 0, r.err
+ }
+ if crc(r.decoded[:n]) != checksum {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ r.i, r.j = 0, n
+ continue
+
+ case chunkTypeUncompressedData:
+ // Section 4.3. Uncompressed data (chunk type 0x01).
+ if chunkLen < checksumSize {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ buf := r.buf[:checksumSize]
+ if !r.readFull(buf, false) {
+ return 0, r.err
+ }
+ checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
+ // Read directly into r.decoded instead of via r.buf.
+ n := chunkLen - checksumSize
+ if n > len(r.decoded) {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ if !r.readFull(r.decoded[:n], false) {
+ return 0, r.err
+ }
+ if crc(r.decoded[:n]) != checksum {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ r.i, r.j = 0, n
+ continue
+
+ case chunkTypeStreamIdentifier:
+ // Section 4.1. Stream identifier (chunk type 0xff).
+ if chunkLen != len(magicBody) {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ if !r.readFull(r.buf[:len(magicBody)], false) {
+ return 0, r.err
+ }
+ for i := 0; i < len(magicBody); i++ {
+ if r.buf[i] != magicBody[i] {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ }
+ continue
+ }
+
+ if chunkType <= 0x7f {
+ // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f).
+ r.err = ErrUnsupported
+ return 0, r.err
+ }
+ // Section 4.4 Padding (chunk type 0xfe).
+ // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd).
+ if !r.readFull(r.buf[:chunkLen], false) {
+ return 0, r.err
+ }
+ }
+}
diff --git a/vendor/github.com/golang/snappy/decode_amd64.go b/vendor/github.com/golang/snappy/decode_amd64.go
new file mode 100644
index 0000000..fcd192b
--- /dev/null
+++ b/vendor/github.com/golang/snappy/decode_amd64.go
@@ -0,0 +1,14 @@
+// Copyright 2016 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+// +build gc
+// +build !noasm
+
+package snappy
+
+// decode has the same semantics as in decode_other.go.
+//
+//go:noescape
+func decode(dst, src []byte) int
diff --git a/vendor/github.com/golang/snappy/decode_amd64.s b/vendor/github.com/golang/snappy/decode_amd64.s
new file mode 100644
index 0000000..e6179f6
--- /dev/null
+++ b/vendor/github.com/golang/snappy/decode_amd64.s
@@ -0,0 +1,490 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+// +build gc
+// +build !noasm
+
+#include "textflag.h"
+
+// The asm code generally follows the pure Go code in decode_other.go, except
+// where marked with a "!!!".
+
+// func decode(dst, src []byte) int
+//
+// All local variables fit into registers. The non-zero stack size is only to
+// spill registers and push args when issuing a CALL. The register allocation:
+// - AX scratch
+// - BX scratch
+// - CX length or x
+// - DX offset
+// - SI &src[s]
+// - DI &dst[d]
+// + R8 dst_base
+// + R9 dst_len
+// + R10 dst_base + dst_len
+// + R11 src_base
+// + R12 src_len
+// + R13 src_base + src_len
+// - R14 used by doCopy
+// - R15 used by doCopy
+//
+// The registers R8-R13 (marked with a "+") are set at the start of the
+// function, and after a CALL returns, and are not otherwise modified.
+//
+// The d variable is implicitly DI - R8, and len(dst)-d is R10 - DI.
+// The s variable is implicitly SI - R11, and len(src)-s is R13 - SI.
+TEXT ·decode(SB), NOSPLIT, $48-56
+ // Initialize SI, DI and R8-R13.
+ MOVQ dst_base+0(FP), R8
+ MOVQ dst_len+8(FP), R9
+ MOVQ R8, DI
+ MOVQ R8, R10
+ ADDQ R9, R10
+ MOVQ src_base+24(FP), R11
+ MOVQ src_len+32(FP), R12
+ MOVQ R11, SI
+ MOVQ R11, R13
+ ADDQ R12, R13
+
+loop:
+ // for s < len(src)
+ CMPQ SI, R13
+ JEQ end
+
+ // CX = uint32(src[s])
+ //
+ // switch src[s] & 0x03
+ MOVBLZX (SI), CX
+ MOVL CX, BX
+ ANDL $3, BX
+ CMPL BX, $1
+ JAE tagCopy
+
+ // ----------------------------------------
+ // The code below handles literal tags.
+
+ // case tagLiteral:
+ // x := uint32(src[s] >> 2)
+ // switch
+ SHRL $2, CX
+ CMPL CX, $60
+ JAE tagLit60Plus
+
+ // case x < 60:
+ // s++
+ INCQ SI
+
+doLit:
+ // This is the end of the inner "switch", when we have a literal tag.
+ //
+ // We assume that CX == x and x fits in a uint32, where x is the variable
+ // used in the pure Go decode_other.go code.
+
+ // length = int(x) + 1
+ //
+ // Unlike the pure Go code, we don't need to check if length <= 0 because
+ // CX can hold 64 bits, so the increment cannot overflow.
+ INCQ CX
+
+ // Prepare to check if copying length bytes will run past the end of dst or
+ // src.
+ //
+ // AX = len(dst) - d
+ // BX = len(src) - s
+ MOVQ R10, AX
+ SUBQ DI, AX
+ MOVQ R13, BX
+ SUBQ SI, BX
+
+ // !!! Try a faster technique for short (16 or fewer bytes) copies.
+ //
+ // if length > 16 || len(dst)-d < 16 || len(src)-s < 16 {
+ // goto callMemmove // Fall back on calling runtime·memmove.
+ // }
+ //
+ // The C++ snappy code calls this TryFastAppend. It also checks len(src)-s
+ // against 21 instead of 16, because it cannot assume that all of its input
+ // is contiguous in memory and so it needs to leave enough source bytes to
+ // read the next tag without refilling buffers, but Go's Decode assumes
+ // contiguousness (the src argument is a []byte).
+ CMPQ CX, $16
+ JGT callMemmove
+ CMPQ AX, $16
+ JLT callMemmove
+ CMPQ BX, $16
+ JLT callMemmove
+
+ // !!! Implement the copy from src to dst as a 16-byte load and store.
+ // (Decode's documentation says that dst and src must not overlap.)
+ //
+ // This always copies 16 bytes, instead of only length bytes, but that's
+ // OK. If the input is a valid Snappy encoding then subsequent iterations
+ // will fix up the overrun. Otherwise, Decode returns a nil []byte (and a
+ // non-nil error), so the overrun will be ignored.
+ //
+ // Note that on amd64, it is legal and cheap to issue unaligned 8-byte or
+ // 16-byte loads and stores. This technique probably wouldn't be as
+ // effective on architectures that are fussier about alignment.
+ MOVOU 0(SI), X0
+ MOVOU X0, 0(DI)
+
+ // d += length
+ // s += length
+ ADDQ CX, DI
+ ADDQ CX, SI
+ JMP loop
+
+callMemmove:
+ // if length > len(dst)-d || length > len(src)-s { etc }
+ CMPQ CX, AX
+ JGT errCorrupt
+ CMPQ CX, BX
+ JGT errCorrupt
+
+ // copy(dst[d:], src[s:s+length])
+ //
+ // This means calling runtime·memmove(&dst[d], &src[s], length), so we push
+ // DI, SI and CX as arguments. Coincidentally, we also need to spill those
+ // three registers to the stack, to save local variables across the CALL.
+ MOVQ DI, 0(SP)
+ MOVQ SI, 8(SP)
+ MOVQ CX, 16(SP)
+ MOVQ DI, 24(SP)
+ MOVQ SI, 32(SP)
+ MOVQ CX, 40(SP)
+ CALL runtime·memmove(SB)
+
+ // Restore local variables: unspill registers from the stack and
+ // re-calculate R8-R13.
+ MOVQ 24(SP), DI
+ MOVQ 32(SP), SI
+ MOVQ 40(SP), CX
+ MOVQ dst_base+0(FP), R8
+ MOVQ dst_len+8(FP), R9
+ MOVQ R8, R10
+ ADDQ R9, R10
+ MOVQ src_base+24(FP), R11
+ MOVQ src_len+32(FP), R12
+ MOVQ R11, R13
+ ADDQ R12, R13
+
+ // d += length
+ // s += length
+ ADDQ CX, DI
+ ADDQ CX, SI
+ JMP loop
+
+tagLit60Plus:
+ // !!! This fragment does the
+ //
+ // s += x - 58; if uint(s) > uint(len(src)) { etc }
+ //
+ // checks. In the asm version, we code it once instead of once per switch case.
+ ADDQ CX, SI
+ SUBQ $58, SI
+ MOVQ SI, BX
+ SUBQ R11, BX
+ CMPQ BX, R12
+ JA errCorrupt
+
+ // case x == 60:
+ CMPL CX, $61
+ JEQ tagLit61
+ JA tagLit62Plus
+
+ // x = uint32(src[s-1])
+ MOVBLZX -1(SI), CX
+ JMP doLit
+
+tagLit61:
+ // case x == 61:
+ // x = uint32(src[s-2]) | uint32(src[s-1])<<8
+ MOVWLZX -2(SI), CX
+ JMP doLit
+
+tagLit62Plus:
+ CMPL CX, $62
+ JA tagLit63
+
+ // case x == 62:
+ // x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16
+ MOVWLZX -3(SI), CX
+ MOVBLZX -1(SI), BX
+ SHLL $16, BX
+ ORL BX, CX
+ JMP doLit
+
+tagLit63:
+ // case x == 63:
+ // x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24
+ MOVL -4(SI), CX
+ JMP doLit
+
+// The code above handles literal tags.
+// ----------------------------------------
+// The code below handles copy tags.
+
+tagCopy4:
+ // case tagCopy4:
+ // s += 5
+ ADDQ $5, SI
+
+ // if uint(s) > uint(len(src)) { etc }
+ MOVQ SI, BX
+ SUBQ R11, BX
+ CMPQ BX, R12
+ JA errCorrupt
+
+ // length = 1 + int(src[s-5])>>2
+ SHRQ $2, CX
+ INCQ CX
+
+ // offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24)
+ MOVLQZX -4(SI), DX
+ JMP doCopy
+
+tagCopy2:
+ // case tagCopy2:
+ // s += 3
+ ADDQ $3, SI
+
+ // if uint(s) > uint(len(src)) { etc }
+ MOVQ SI, BX
+ SUBQ R11, BX
+ CMPQ BX, R12
+ JA errCorrupt
+
+ // length = 1 + int(src[s-3])>>2
+ SHRQ $2, CX
+ INCQ CX
+
+ // offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8)
+ MOVWQZX -2(SI), DX
+ JMP doCopy
+
+tagCopy:
+ // We have a copy tag. We assume that:
+ // - BX == src[s] & 0x03
+ // - CX == src[s]
+ CMPQ BX, $2
+ JEQ tagCopy2
+ JA tagCopy4
+
+ // case tagCopy1:
+ // s += 2
+ ADDQ $2, SI
+
+ // if uint(s) > uint(len(src)) { etc }
+ MOVQ SI, BX
+ SUBQ R11, BX
+ CMPQ BX, R12
+ JA errCorrupt
+
+ // offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]))
+ MOVQ CX, DX
+ ANDQ $0xe0, DX
+ SHLQ $3, DX
+ MOVBQZX -1(SI), BX
+ ORQ BX, DX
+
+ // length = 4 + int(src[s-2])>>2&0x7
+ SHRQ $2, CX
+ ANDQ $7, CX
+ ADDQ $4, CX
+
+doCopy:
+ // This is the end of the outer "switch", when we have a copy tag.
+ //
+ // We assume that:
+ // - CX == length && CX > 0
+ // - DX == offset
+
+ // if offset <= 0 { etc }
+ CMPQ DX, $0
+ JLE errCorrupt
+
+ // if d < offset { etc }
+ MOVQ DI, BX
+ SUBQ R8, BX
+ CMPQ BX, DX
+ JLT errCorrupt
+
+ // if length > len(dst)-d { etc }
+ MOVQ R10, BX
+ SUBQ DI, BX
+ CMPQ CX, BX
+ JGT errCorrupt
+
+ // forwardCopy(dst[d:d+length], dst[d-offset:]); d += length
+ //
+ // Set:
+ // - R14 = len(dst)-d
+ // - R15 = &dst[d-offset]
+ MOVQ R10, R14
+ SUBQ DI, R14
+ MOVQ DI, R15
+ SUBQ DX, R15
+
+ // !!! Try a faster technique for short (16 or fewer bytes) forward copies.
+ //
+ // First, try using two 8-byte load/stores, similar to the doLit technique
+ // above. Even if dst[d:d+length] and dst[d-offset:] can overlap, this is
+ // still OK if offset >= 8. Note that this has to be two 8-byte load/stores
+ // and not one 16-byte load/store, and the first store has to be before the
+ // second load, due to the overlap if offset is in the range [8, 16).
+ //
+ // if length > 16 || offset < 8 || len(dst)-d < 16 {
+ // goto slowForwardCopy
+ // }
+ // copy 16 bytes
+ // d += length
+ CMPQ CX, $16
+ JGT slowForwardCopy
+ CMPQ DX, $8
+ JLT slowForwardCopy
+ CMPQ R14, $16
+ JLT slowForwardCopy
+ MOVQ 0(R15), AX
+ MOVQ AX, 0(DI)
+ MOVQ 8(R15), BX
+ MOVQ BX, 8(DI)
+ ADDQ CX, DI
+ JMP loop
+
+slowForwardCopy:
+ // !!! If the forward copy is longer than 16 bytes, or if offset < 8, we
+ // can still try 8-byte load stores, provided we can overrun up to 10 extra
+ // bytes. As above, the overrun will be fixed up by subsequent iterations
+ // of the outermost loop.
+ //
+ // The C++ snappy code calls this technique IncrementalCopyFastPath. Its
+ // commentary says:
+ //
+ // ----
+ //
+ // The main part of this loop is a simple copy of eight bytes at a time
+ // until we've copied (at least) the requested amount of bytes. However,
+ // if d and d-offset are less than eight bytes apart (indicating a
+ // repeating pattern of length < 8), we first need to expand the pattern in
+ // order to get the correct results. For instance, if the buffer looks like
+ // this, with the eight-byte and patterns marked as
+ // intervals:
+ //
+ // abxxxxxxxxxxxx
+ // [------] d-offset
+ // [------] d
+ //
+ // a single eight-byte copy from to will repeat the pattern
+ // once, after which we can move two bytes without moving :
+ //
+ // ababxxxxxxxxxx
+ // [------] d-offset
+ // [------] d
+ //
+ // and repeat the exercise until the two no longer overlap.
+ //
+ // This allows us to do very well in the special case of one single byte
+ // repeated many times, without taking a big hit for more general cases.
+ //
+ // The worst case of extra writing past the end of the match occurs when
+ // offset == 1 and length == 1; the last copy will read from byte positions
+ // [0..7] and write to [4..11], whereas it was only supposed to write to
+ // position 1. Thus, ten excess bytes.
+ //
+ // ----
+ //
+ // That "10 byte overrun" worst case is confirmed by Go's
+ // TestSlowForwardCopyOverrun, which also tests the fixUpSlowForwardCopy
+ // and finishSlowForwardCopy algorithm.
+ //
+ // if length > len(dst)-d-10 {
+ // goto verySlowForwardCopy
+ // }
+ SUBQ $10, R14
+ CMPQ CX, R14
+ JGT verySlowForwardCopy
+
+makeOffsetAtLeast8:
+ // !!! As above, expand the pattern so that offset >= 8 and we can use
+ // 8-byte load/stores.
+ //
+ // for offset < 8 {
+ // copy 8 bytes from dst[d-offset:] to dst[d:]
+ // length -= offset
+ // d += offset
+ // offset += offset
+ // // The two previous lines together means that d-offset, and therefore
+ // // R15, is unchanged.
+ // }
+ CMPQ DX, $8
+ JGE fixUpSlowForwardCopy
+ MOVQ (R15), BX
+ MOVQ BX, (DI)
+ SUBQ DX, CX
+ ADDQ DX, DI
+ ADDQ DX, DX
+ JMP makeOffsetAtLeast8
+
+fixUpSlowForwardCopy:
+ // !!! Add length (which might be negative now) to d (implied by DI being
+ // &dst[d]) so that d ends up at the right place when we jump back to the
+ // top of the loop. Before we do that, though, we save DI to AX so that, if
+ // length is positive, copying the remaining length bytes will write to the
+ // right place.
+ MOVQ DI, AX
+ ADDQ CX, DI
+
+finishSlowForwardCopy:
+ // !!! Repeat 8-byte load/stores until length <= 0. Ending with a negative
+ // length means that we overrun, but as above, that will be fixed up by
+ // subsequent iterations of the outermost loop.
+ CMPQ CX, $0
+ JLE loop
+ MOVQ (R15), BX
+ MOVQ BX, (AX)
+ ADDQ $8, R15
+ ADDQ $8, AX
+ SUBQ $8, CX
+ JMP finishSlowForwardCopy
+
+verySlowForwardCopy:
+ // verySlowForwardCopy is a simple implementation of forward copy. In C
+ // parlance, this is a do/while loop instead of a while loop, since we know
+ // that length > 0. In Go syntax:
+ //
+ // for {
+ // dst[d] = dst[d - offset]
+ // d++
+ // length--
+ // if length == 0 {
+ // break
+ // }
+ // }
+ MOVB (R15), BX
+ MOVB BX, (DI)
+ INCQ R15
+ INCQ DI
+ DECQ CX
+ JNZ verySlowForwardCopy
+ JMP loop
+
+// The code above handles copy tags.
+// ----------------------------------------
+
+end:
+ // This is the end of the "for s < len(src)".
+ //
+ // if d != len(dst) { etc }
+ CMPQ DI, R10
+ JNE errCorrupt
+
+ // return 0
+ MOVQ $0, ret+48(FP)
+ RET
+
+errCorrupt:
+ // return decodeErrCodeCorrupt
+ MOVQ $1, ret+48(FP)
+ RET
diff --git a/vendor/github.com/golang/snappy/decode_other.go b/vendor/github.com/golang/snappy/decode_other.go
new file mode 100644
index 0000000..8c9f204
--- /dev/null
+++ b/vendor/github.com/golang/snappy/decode_other.go
@@ -0,0 +1,101 @@
+// Copyright 2016 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !amd64 appengine !gc noasm
+
+package snappy
+
+// decode writes the decoding of src to dst. It assumes that the varint-encoded
+// length of the decompressed bytes has already been read, and that len(dst)
+// equals that length.
+//
+// It returns 0 on success or a decodeErrCodeXxx error code on failure.
+func decode(dst, src []byte) int {
+ var d, s, offset, length int
+ for s < len(src) {
+ switch src[s] & 0x03 {
+ case tagLiteral:
+ x := uint32(src[s] >> 2)
+ switch {
+ case x < 60:
+ s++
+ case x == 60:
+ s += 2
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ return decodeErrCodeCorrupt
+ }
+ x = uint32(src[s-1])
+ case x == 61:
+ s += 3
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ return decodeErrCodeCorrupt
+ }
+ x = uint32(src[s-2]) | uint32(src[s-1])<<8
+ case x == 62:
+ s += 4
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ return decodeErrCodeCorrupt
+ }
+ x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16
+ case x == 63:
+ s += 5
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ return decodeErrCodeCorrupt
+ }
+ x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24
+ }
+ length = int(x) + 1
+ if length <= 0 {
+ return decodeErrCodeUnsupportedLiteralLength
+ }
+ if length > len(dst)-d || length > len(src)-s {
+ return decodeErrCodeCorrupt
+ }
+ copy(dst[d:], src[s:s+length])
+ d += length
+ s += length
+ continue
+
+ case tagCopy1:
+ s += 2
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ return decodeErrCodeCorrupt
+ }
+ length = 4 + int(src[s-2])>>2&0x7
+ offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]))
+
+ case tagCopy2:
+ s += 3
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ return decodeErrCodeCorrupt
+ }
+ length = 1 + int(src[s-3])>>2
+ offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8)
+
+ case tagCopy4:
+ s += 5
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ return decodeErrCodeCorrupt
+ }
+ length = 1 + int(src[s-5])>>2
+ offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24)
+ }
+
+ if offset <= 0 || d < offset || length > len(dst)-d {
+ return decodeErrCodeCorrupt
+ }
+ // Copy from an earlier sub-slice of dst to a later sub-slice. Unlike
+ // the built-in copy function, this byte-by-byte copy always runs
+ // forwards, even if the slices overlap. Conceptually, this is:
+ //
+ // d += forwardCopy(dst[d:d+length], dst[d-offset:])
+ for end := d + length; d != end; d++ {
+ dst[d] = dst[d-offset]
+ }
+ }
+ if d != len(dst) {
+ return decodeErrCodeCorrupt
+ }
+ return 0
+}
diff --git a/vendor/github.com/golang/snappy/encode.go b/vendor/github.com/golang/snappy/encode.go
new file mode 100644
index 0000000..8d393e9
--- /dev/null
+++ b/vendor/github.com/golang/snappy/encode.go
@@ -0,0 +1,285 @@
+// Copyright 2011 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package snappy
+
+import (
+ "encoding/binary"
+ "errors"
+ "io"
+)
+
+// Encode returns the encoded form of src. The returned slice may be a sub-
+// slice of dst if dst was large enough to hold the entire encoded block.
+// Otherwise, a newly allocated slice will be returned.
+//
+// The dst and src must not overlap. It is valid to pass a nil dst.
+func Encode(dst, src []byte) []byte {
+ if n := MaxEncodedLen(len(src)); n < 0 {
+ panic(ErrTooLarge)
+ } else if len(dst) < n {
+ dst = make([]byte, n)
+ }
+
+ // The block starts with the varint-encoded length of the decompressed bytes.
+ d := binary.PutUvarint(dst, uint64(len(src)))
+
+ for len(src) > 0 {
+ p := src
+ src = nil
+ if len(p) > maxBlockSize {
+ p, src = p[:maxBlockSize], p[maxBlockSize:]
+ }
+ if len(p) < minNonLiteralBlockSize {
+ d += emitLiteral(dst[d:], p)
+ } else {
+ d += encodeBlock(dst[d:], p)
+ }
+ }
+ return dst[:d]
+}
+
+// inputMargin is the minimum number of extra input bytes to keep, inside
+// encodeBlock's inner loop. On some architectures, this margin lets us
+// implement a fast path for emitLiteral, where the copy of short (<= 16 byte)
+// literals can be implemented as a single load to and store from a 16-byte
+// register. That literal's actual length can be as short as 1 byte, so this
+// can copy up to 15 bytes too much, but that's OK as subsequent iterations of
+// the encoding loop will fix up the copy overrun, and this inputMargin ensures
+// that we don't overrun the dst and src buffers.
+const inputMargin = 16 - 1
+
+// minNonLiteralBlockSize is the minimum size of the input to encodeBlock that
+// could be encoded with a copy tag. This is the minimum with respect to the
+// algorithm used by encodeBlock, not a minimum enforced by the file format.
+//
+// The encoded output must start with at least a 1 byte literal, as there are
+// no previous bytes to copy. A minimal (1 byte) copy after that, generated
+// from an emitCopy call in encodeBlock's main loop, would require at least
+// another inputMargin bytes, for the reason above: we want any emitLiteral
+// calls inside encodeBlock's main loop to use the fast path if possible, which
+// requires being able to overrun by inputMargin bytes. Thus,
+// minNonLiteralBlockSize equals 1 + 1 + inputMargin.
+//
+// The C++ code doesn't use this exact threshold, but it could, as discussed at
+// https://groups.google.com/d/topic/snappy-compression/oGbhsdIJSJ8/discussion
+// The difference between Go (2+inputMargin) and C++ (inputMargin) is purely an
+// optimization. It should not affect the encoded form. This is tested by
+// TestSameEncodingAsCppShortCopies.
+const minNonLiteralBlockSize = 1 + 1 + inputMargin
+
+// MaxEncodedLen returns the maximum length of a snappy block, given its
+// uncompressed length.
+//
+// It will return a negative value if srcLen is too large to encode.
+func MaxEncodedLen(srcLen int) int {
+ n := uint64(srcLen)
+ if n > 0xffffffff {
+ return -1
+ }
+ // Compressed data can be defined as:
+ // compressed := item* literal*
+ // item := literal* copy
+ //
+ // The trailing literal sequence has a space blowup of at most 62/60
+ // since a literal of length 60 needs one tag byte + one extra byte
+ // for length information.
+ //
+ // Item blowup is trickier to measure. Suppose the "copy" op copies
+ // 4 bytes of data. Because of a special check in the encoding code,
+ // we produce a 4-byte copy only if the offset is < 65536. Therefore
+ // the copy op takes 3 bytes to encode, and this type of item leads
+ // to at most the 62/60 blowup for representing literals.
+ //
+ // Suppose the "copy" op copies 5 bytes of data. If the offset is big
+ // enough, it will take 5 bytes to encode the copy op. Therefore the
+ // worst case here is a one-byte literal followed by a five-byte copy.
+ // That is, 6 bytes of input turn into 7 bytes of "compressed" data.
+ //
+ // This last factor dominates the blowup, so the final estimate is:
+ n = 32 + n + n/6
+ if n > 0xffffffff {
+ return -1
+ }
+ return int(n)
+}
+
+var errClosed = errors.New("snappy: Writer is closed")
+
+// NewWriter returns a new Writer that compresses to w.
+//
+// The Writer returned does not buffer writes. There is no need to Flush or
+// Close such a Writer.
+//
+// Deprecated: the Writer returned is not suitable for many small writes, only
+// for few large writes. Use NewBufferedWriter instead, which is efficient
+// regardless of the frequency and shape of the writes, and remember to Close
+// that Writer when done.
+func NewWriter(w io.Writer) *Writer {
+ return &Writer{
+ w: w,
+ obuf: make([]byte, obufLen),
+ }
+}
+
+// NewBufferedWriter returns a new Writer that compresses to w, using the
+// framing format described at
+// https://github.com/google/snappy/blob/master/framing_format.txt
+//
+// The Writer returned buffers writes. Users must call Close to guarantee all
+// data has been forwarded to the underlying io.Writer. They may also call
+// Flush zero or more times before calling Close.
+func NewBufferedWriter(w io.Writer) *Writer {
+ return &Writer{
+ w: w,
+ ibuf: make([]byte, 0, maxBlockSize),
+ obuf: make([]byte, obufLen),
+ }
+}
+
+// Writer is an io.Writer that can write Snappy-compressed bytes.
+type Writer struct {
+ w io.Writer
+ err error
+
+ // ibuf is a buffer for the incoming (uncompressed) bytes.
+ //
+ // Its use is optional. For backwards compatibility, Writers created by the
+ // NewWriter function have ibuf == nil, do not buffer incoming bytes, and
+ // therefore do not need to be Flush'ed or Close'd.
+ ibuf []byte
+
+ // obuf is a buffer for the outgoing (compressed) bytes.
+ obuf []byte
+
+ // wroteStreamHeader is whether we have written the stream header.
+ wroteStreamHeader bool
+}
+
+// Reset discards the writer's state and switches the Snappy writer to write to
+// w. This permits reusing a Writer rather than allocating a new one.
+func (w *Writer) Reset(writer io.Writer) {
+ w.w = writer
+ w.err = nil
+ if w.ibuf != nil {
+ w.ibuf = w.ibuf[:0]
+ }
+ w.wroteStreamHeader = false
+}
+
+// Write satisfies the io.Writer interface.
+func (w *Writer) Write(p []byte) (nRet int, errRet error) {
+ if w.ibuf == nil {
+ // Do not buffer incoming bytes. This does not perform or compress well
+ // if the caller of Writer.Write writes many small slices. This
+ // behavior is therefore deprecated, but still supported for backwards
+ // compatibility with code that doesn't explicitly Flush or Close.
+ return w.write(p)
+ }
+
+ // The remainder of this method is based on bufio.Writer.Write from the
+ // standard library.
+
+ for len(p) > (cap(w.ibuf)-len(w.ibuf)) && w.err == nil {
+ var n int
+ if len(w.ibuf) == 0 {
+ // Large write, empty buffer.
+ // Write directly from p to avoid copy.
+ n, _ = w.write(p)
+ } else {
+ n = copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p)
+ w.ibuf = w.ibuf[:len(w.ibuf)+n]
+ w.Flush()
+ }
+ nRet += n
+ p = p[n:]
+ }
+ if w.err != nil {
+ return nRet, w.err
+ }
+ n := copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p)
+ w.ibuf = w.ibuf[:len(w.ibuf)+n]
+ nRet += n
+ return nRet, nil
+}
+
+func (w *Writer) write(p []byte) (nRet int, errRet error) {
+ if w.err != nil {
+ return 0, w.err
+ }
+ for len(p) > 0 {
+ obufStart := len(magicChunk)
+ if !w.wroteStreamHeader {
+ w.wroteStreamHeader = true
+ copy(w.obuf, magicChunk)
+ obufStart = 0
+ }
+
+ var uncompressed []byte
+ if len(p) > maxBlockSize {
+ uncompressed, p = p[:maxBlockSize], p[maxBlockSize:]
+ } else {
+ uncompressed, p = p, nil
+ }
+ checksum := crc(uncompressed)
+
+ // Compress the buffer, discarding the result if the improvement
+ // isn't at least 12.5%.
+ compressed := Encode(w.obuf[obufHeaderLen:], uncompressed)
+ chunkType := uint8(chunkTypeCompressedData)
+ chunkLen := 4 + len(compressed)
+ obufEnd := obufHeaderLen + len(compressed)
+ if len(compressed) >= len(uncompressed)-len(uncompressed)/8 {
+ chunkType = chunkTypeUncompressedData
+ chunkLen = 4 + len(uncompressed)
+ obufEnd = obufHeaderLen
+ }
+
+ // Fill in the per-chunk header that comes before the body.
+ w.obuf[len(magicChunk)+0] = chunkType
+ w.obuf[len(magicChunk)+1] = uint8(chunkLen >> 0)
+ w.obuf[len(magicChunk)+2] = uint8(chunkLen >> 8)
+ w.obuf[len(magicChunk)+3] = uint8(chunkLen >> 16)
+ w.obuf[len(magicChunk)+4] = uint8(checksum >> 0)
+ w.obuf[len(magicChunk)+5] = uint8(checksum >> 8)
+ w.obuf[len(magicChunk)+6] = uint8(checksum >> 16)
+ w.obuf[len(magicChunk)+7] = uint8(checksum >> 24)
+
+ if _, err := w.w.Write(w.obuf[obufStart:obufEnd]); err != nil {
+ w.err = err
+ return nRet, err
+ }
+ if chunkType == chunkTypeUncompressedData {
+ if _, err := w.w.Write(uncompressed); err != nil {
+ w.err = err
+ return nRet, err
+ }
+ }
+ nRet += len(uncompressed)
+ }
+ return nRet, nil
+}
+
+// Flush flushes the Writer to its underlying io.Writer.
+func (w *Writer) Flush() error {
+ if w.err != nil {
+ return w.err
+ }
+ if len(w.ibuf) == 0 {
+ return nil
+ }
+ w.write(w.ibuf)
+ w.ibuf = w.ibuf[:0]
+ return w.err
+}
+
+// Close calls Flush and then closes the Writer.
+func (w *Writer) Close() error {
+ w.Flush()
+ ret := w.err
+ if w.err == nil {
+ w.err = errClosed
+ }
+ return ret
+}
diff --git a/vendor/github.com/golang/snappy/encode_amd64.go b/vendor/github.com/golang/snappy/encode_amd64.go
new file mode 100644
index 0000000..150d91b
--- /dev/null
+++ b/vendor/github.com/golang/snappy/encode_amd64.go
@@ -0,0 +1,29 @@
+// Copyright 2016 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+// +build gc
+// +build !noasm
+
+package snappy
+
+// emitLiteral has the same semantics as in encode_other.go.
+//
+//go:noescape
+func emitLiteral(dst, lit []byte) int
+
+// emitCopy has the same semantics as in encode_other.go.
+//
+//go:noescape
+func emitCopy(dst []byte, offset, length int) int
+
+// extendMatch has the same semantics as in encode_other.go.
+//
+//go:noescape
+func extendMatch(src []byte, i, j int) int
+
+// encodeBlock has the same semantics as in encode_other.go.
+//
+//go:noescape
+func encodeBlock(dst, src []byte) (d int)
diff --git a/vendor/github.com/golang/snappy/encode_amd64.s b/vendor/github.com/golang/snappy/encode_amd64.s
new file mode 100644
index 0000000..adfd979
--- /dev/null
+++ b/vendor/github.com/golang/snappy/encode_amd64.s
@@ -0,0 +1,730 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+// +build gc
+// +build !noasm
+
+#include "textflag.h"
+
+// The XXX lines assemble on Go 1.4, 1.5 and 1.7, but not 1.6, due to a
+// Go toolchain regression. See https://github.com/golang/go/issues/15426 and
+// https://github.com/golang/snappy/issues/29
+//
+// As a workaround, the package was built with a known good assembler, and
+// those instructions were disassembled by "objdump -d" to yield the
+// 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15
+// style comments, in AT&T asm syntax. Note that rsp here is a physical
+// register, not Go/asm's SP pseudo-register (see https://golang.org/doc/asm).
+// The instructions were then encoded as "BYTE $0x.." sequences, which assemble
+// fine on Go 1.6.
+
+// The asm code generally follows the pure Go code in encode_other.go, except
+// where marked with a "!!!".
+
+// ----------------------------------------------------------------------------
+
+// func emitLiteral(dst, lit []byte) int
+//
+// All local variables fit into registers. The register allocation:
+// - AX len(lit)
+// - BX n
+// - DX return value
+// - DI &dst[i]
+// - R10 &lit[0]
+//
+// The 24 bytes of stack space is to call runtime·memmove.
+//
+// The unusual register allocation of local variables, such as R10 for the
+// source pointer, matches the allocation used at the call site in encodeBlock,
+// which makes it easier to manually inline this function.
+TEXT ·emitLiteral(SB), NOSPLIT, $24-56
+ MOVQ dst_base+0(FP), DI
+ MOVQ lit_base+24(FP), R10
+ MOVQ lit_len+32(FP), AX
+ MOVQ AX, DX
+ MOVL AX, BX
+ SUBL $1, BX
+
+ CMPL BX, $60
+ JLT oneByte
+ CMPL BX, $256
+ JLT twoBytes
+
+threeBytes:
+ MOVB $0xf4, 0(DI)
+ MOVW BX, 1(DI)
+ ADDQ $3, DI
+ ADDQ $3, DX
+ JMP memmove
+
+twoBytes:
+ MOVB $0xf0, 0(DI)
+ MOVB BX, 1(DI)
+ ADDQ $2, DI
+ ADDQ $2, DX
+ JMP memmove
+
+oneByte:
+ SHLB $2, BX
+ MOVB BX, 0(DI)
+ ADDQ $1, DI
+ ADDQ $1, DX
+
+memmove:
+ MOVQ DX, ret+48(FP)
+
+ // copy(dst[i:], lit)
+ //
+ // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push
+ // DI, R10 and AX as arguments.
+ MOVQ DI, 0(SP)
+ MOVQ R10, 8(SP)
+ MOVQ AX, 16(SP)
+ CALL runtime·memmove(SB)
+ RET
+
+// ----------------------------------------------------------------------------
+
+// func emitCopy(dst []byte, offset, length int) int
+//
+// All local variables fit into registers. The register allocation:
+// - AX length
+// - SI &dst[0]
+// - DI &dst[i]
+// - R11 offset
+//
+// The unusual register allocation of local variables, such as R11 for the
+// offset, matches the allocation used at the call site in encodeBlock, which
+// makes it easier to manually inline this function.
+TEXT ·emitCopy(SB), NOSPLIT, $0-48
+ MOVQ dst_base+0(FP), DI
+ MOVQ DI, SI
+ MOVQ offset+24(FP), R11
+ MOVQ length+32(FP), AX
+
+loop0:
+ // for length >= 68 { etc }
+ CMPL AX, $68
+ JLT step1
+
+ // Emit a length 64 copy, encoded as 3 bytes.
+ MOVB $0xfe, 0(DI)
+ MOVW R11, 1(DI)
+ ADDQ $3, DI
+ SUBL $64, AX
+ JMP loop0
+
+step1:
+ // if length > 64 { etc }
+ CMPL AX, $64
+ JLE step2
+
+ // Emit a length 60 copy, encoded as 3 bytes.
+ MOVB $0xee, 0(DI)
+ MOVW R11, 1(DI)
+ ADDQ $3, DI
+ SUBL $60, AX
+
+step2:
+ // if length >= 12 || offset >= 2048 { goto step3 }
+ CMPL AX, $12
+ JGE step3
+ CMPL R11, $2048
+ JGE step3
+
+ // Emit the remaining copy, encoded as 2 bytes.
+ MOVB R11, 1(DI)
+ SHRL $8, R11
+ SHLB $5, R11
+ SUBB $4, AX
+ SHLB $2, AX
+ ORB AX, R11
+ ORB $1, R11
+ MOVB R11, 0(DI)
+ ADDQ $2, DI
+
+ // Return the number of bytes written.
+ SUBQ SI, DI
+ MOVQ DI, ret+40(FP)
+ RET
+
+step3:
+ // Emit the remaining copy, encoded as 3 bytes.
+ SUBL $1, AX
+ SHLB $2, AX
+ ORB $2, AX
+ MOVB AX, 0(DI)
+ MOVW R11, 1(DI)
+ ADDQ $3, DI
+
+ // Return the number of bytes written.
+ SUBQ SI, DI
+ MOVQ DI, ret+40(FP)
+ RET
+
+// ----------------------------------------------------------------------------
+
+// func extendMatch(src []byte, i, j int) int
+//
+// All local variables fit into registers. The register allocation:
+// - DX &src[0]
+// - SI &src[j]
+// - R13 &src[len(src) - 8]
+// - R14 &src[len(src)]
+// - R15 &src[i]
+//
+// The unusual register allocation of local variables, such as R15 for a source
+// pointer, matches the allocation used at the call site in encodeBlock, which
+// makes it easier to manually inline this function.
+TEXT ·extendMatch(SB), NOSPLIT, $0-48
+ MOVQ src_base+0(FP), DX
+ MOVQ src_len+8(FP), R14
+ MOVQ i+24(FP), R15
+ MOVQ j+32(FP), SI
+ ADDQ DX, R14
+ ADDQ DX, R15
+ ADDQ DX, SI
+ MOVQ R14, R13
+ SUBQ $8, R13
+
+cmp8:
+ // As long as we are 8 or more bytes before the end of src, we can load and
+ // compare 8 bytes at a time. If those 8 bytes are equal, repeat.
+ CMPQ SI, R13
+ JA cmp1
+ MOVQ (R15), AX
+ MOVQ (SI), BX
+ CMPQ AX, BX
+ JNE bsf
+ ADDQ $8, R15
+ ADDQ $8, SI
+ JMP cmp8
+
+bsf:
+ // If those 8 bytes were not equal, XOR the two 8 byte values, and return
+ // the index of the first byte that differs. The BSF instruction finds the
+ // least significant 1 bit, the amd64 architecture is little-endian, and
+ // the shift by 3 converts a bit index to a byte index.
+ XORQ AX, BX
+ BSFQ BX, BX
+ SHRQ $3, BX
+ ADDQ BX, SI
+
+ // Convert from &src[ret] to ret.
+ SUBQ DX, SI
+ MOVQ SI, ret+40(FP)
+ RET
+
+cmp1:
+ // In src's tail, compare 1 byte at a time.
+ CMPQ SI, R14
+ JAE extendMatchEnd
+ MOVB (R15), AX
+ MOVB (SI), BX
+ CMPB AX, BX
+ JNE extendMatchEnd
+ ADDQ $1, R15
+ ADDQ $1, SI
+ JMP cmp1
+
+extendMatchEnd:
+ // Convert from &src[ret] to ret.
+ SUBQ DX, SI
+ MOVQ SI, ret+40(FP)
+ RET
+
+// ----------------------------------------------------------------------------
+
+// func encodeBlock(dst, src []byte) (d int)
+//
+// All local variables fit into registers, other than "var table". The register
+// allocation:
+// - AX . .
+// - BX . .
+// - CX 56 shift (note that amd64 shifts by non-immediates must use CX).
+// - DX 64 &src[0], tableSize
+// - SI 72 &src[s]
+// - DI 80 &dst[d]
+// - R9 88 sLimit
+// - R10 . &src[nextEmit]
+// - R11 96 prevHash, currHash, nextHash, offset
+// - R12 104 &src[base], skip
+// - R13 . &src[nextS], &src[len(src) - 8]
+// - R14 . len(src), bytesBetweenHashLookups, &src[len(src)], x
+// - R15 112 candidate
+//
+// The second column (56, 64, etc) is the stack offset to spill the registers
+// when calling other functions. We could pack this slightly tighter, but it's
+// simpler to have a dedicated spill map independent of the function called.
+//
+// "var table [maxTableSize]uint16" takes up 32768 bytes of stack space. An
+// extra 56 bytes, to call other functions, and an extra 64 bytes, to spill
+// local variables (registers) during calls gives 32768 + 56 + 64 = 32888.
+TEXT ·encodeBlock(SB), 0, $32888-56
+ MOVQ dst_base+0(FP), DI
+ MOVQ src_base+24(FP), SI
+ MOVQ src_len+32(FP), R14
+
+ // shift, tableSize := uint32(32-8), 1<<8
+ MOVQ $24, CX
+ MOVQ $256, DX
+
+calcShift:
+ // for ; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 {
+ // shift--
+ // }
+ CMPQ DX, $16384
+ JGE varTable
+ CMPQ DX, R14
+ JGE varTable
+ SUBQ $1, CX
+ SHLQ $1, DX
+ JMP calcShift
+
+varTable:
+ // var table [maxTableSize]uint16
+ //
+ // In the asm code, unlike the Go code, we can zero-initialize only the
+ // first tableSize elements. Each uint16 element is 2 bytes and each MOVOU
+ // writes 16 bytes, so we can do only tableSize/8 writes instead of the
+ // 2048 writes that would zero-initialize all of table's 32768 bytes.
+ SHRQ $3, DX
+ LEAQ table-32768(SP), BX
+ PXOR X0, X0
+
+memclr:
+ MOVOU X0, 0(BX)
+ ADDQ $16, BX
+ SUBQ $1, DX
+ JNZ memclr
+
+ // !!! DX = &src[0]
+ MOVQ SI, DX
+
+ // sLimit := len(src) - inputMargin
+ MOVQ R14, R9
+ SUBQ $15, R9
+
+ // !!! Pre-emptively spill CX, DX and R9 to the stack. Their values don't
+ // change for the rest of the function.
+ MOVQ CX, 56(SP)
+ MOVQ DX, 64(SP)
+ MOVQ R9, 88(SP)
+
+ // nextEmit := 0
+ MOVQ DX, R10
+
+ // s := 1
+ ADDQ $1, SI
+
+ // nextHash := hash(load32(src, s), shift)
+ MOVL 0(SI), R11
+ IMULL $0x1e35a7bd, R11
+ SHRL CX, R11
+
+outer:
+ // for { etc }
+
+ // skip := 32
+ MOVQ $32, R12
+
+ // nextS := s
+ MOVQ SI, R13
+
+ // candidate := 0
+ MOVQ $0, R15
+
+inner0:
+ // for { etc }
+
+ // s := nextS
+ MOVQ R13, SI
+
+ // bytesBetweenHashLookups := skip >> 5
+ MOVQ R12, R14
+ SHRQ $5, R14
+
+ // nextS = s + bytesBetweenHashLookups
+ ADDQ R14, R13
+
+ // skip += bytesBetweenHashLookups
+ ADDQ R14, R12
+
+ // if nextS > sLimit { goto emitRemainder }
+ MOVQ R13, AX
+ SUBQ DX, AX
+ CMPQ AX, R9
+ JA emitRemainder
+
+ // candidate = int(table[nextHash])
+ // XXX: MOVWQZX table-32768(SP)(R11*2), R15
+ // XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15
+ BYTE $0x4e
+ BYTE $0x0f
+ BYTE $0xb7
+ BYTE $0x7c
+ BYTE $0x5c
+ BYTE $0x78
+
+ // table[nextHash] = uint16(s)
+ MOVQ SI, AX
+ SUBQ DX, AX
+
+ // XXX: MOVW AX, table-32768(SP)(R11*2)
+ // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2)
+ BYTE $0x66
+ BYTE $0x42
+ BYTE $0x89
+ BYTE $0x44
+ BYTE $0x5c
+ BYTE $0x78
+
+ // nextHash = hash(load32(src, nextS), shift)
+ MOVL 0(R13), R11
+ IMULL $0x1e35a7bd, R11
+ SHRL CX, R11
+
+ // if load32(src, s) != load32(src, candidate) { continue } break
+ MOVL 0(SI), AX
+ MOVL (DX)(R15*1), BX
+ CMPL AX, BX
+ JNE inner0
+
+fourByteMatch:
+ // As per the encode_other.go code:
+ //
+ // A 4-byte match has been found. We'll later see etc.
+
+ // !!! Jump to a fast path for short (<= 16 byte) literals. See the comment
+ // on inputMargin in encode.go.
+ MOVQ SI, AX
+ SUBQ R10, AX
+ CMPQ AX, $16
+ JLE emitLiteralFastPath
+
+ // ----------------------------------------
+ // Begin inline of the emitLiteral call.
+ //
+ // d += emitLiteral(dst[d:], src[nextEmit:s])
+
+ MOVL AX, BX
+ SUBL $1, BX
+
+ CMPL BX, $60
+ JLT inlineEmitLiteralOneByte
+ CMPL BX, $256
+ JLT inlineEmitLiteralTwoBytes
+
+inlineEmitLiteralThreeBytes:
+ MOVB $0xf4, 0(DI)
+ MOVW BX, 1(DI)
+ ADDQ $3, DI
+ JMP inlineEmitLiteralMemmove
+
+inlineEmitLiteralTwoBytes:
+ MOVB $0xf0, 0(DI)
+ MOVB BX, 1(DI)
+ ADDQ $2, DI
+ JMP inlineEmitLiteralMemmove
+
+inlineEmitLiteralOneByte:
+ SHLB $2, BX
+ MOVB BX, 0(DI)
+ ADDQ $1, DI
+
+inlineEmitLiteralMemmove:
+ // Spill local variables (registers) onto the stack; call; unspill.
+ //
+ // copy(dst[i:], lit)
+ //
+ // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push
+ // DI, R10 and AX as arguments.
+ MOVQ DI, 0(SP)
+ MOVQ R10, 8(SP)
+ MOVQ AX, 16(SP)
+ ADDQ AX, DI // Finish the "d +=" part of "d += emitLiteral(etc)".
+ MOVQ SI, 72(SP)
+ MOVQ DI, 80(SP)
+ MOVQ R15, 112(SP)
+ CALL runtime·memmove(SB)
+ MOVQ 56(SP), CX
+ MOVQ 64(SP), DX
+ MOVQ 72(SP), SI
+ MOVQ 80(SP), DI
+ MOVQ 88(SP), R9
+ MOVQ 112(SP), R15
+ JMP inner1
+
+inlineEmitLiteralEnd:
+ // End inline of the emitLiteral call.
+ // ----------------------------------------
+
+emitLiteralFastPath:
+ // !!! Emit the 1-byte encoding "uint8(len(lit)-1)<<2".
+ MOVB AX, BX
+ SUBB $1, BX
+ SHLB $2, BX
+ MOVB BX, (DI)
+ ADDQ $1, DI
+
+ // !!! Implement the copy from lit to dst as a 16-byte load and store.
+ // (Encode's documentation says that dst and src must not overlap.)
+ //
+ // This always copies 16 bytes, instead of only len(lit) bytes, but that's
+ // OK. Subsequent iterations will fix up the overrun.
+ //
+ // Note that on amd64, it is legal and cheap to issue unaligned 8-byte or
+ // 16-byte loads and stores. This technique probably wouldn't be as
+ // effective on architectures that are fussier about alignment.
+ MOVOU 0(R10), X0
+ MOVOU X0, 0(DI)
+ ADDQ AX, DI
+
+inner1:
+ // for { etc }
+
+ // base := s
+ MOVQ SI, R12
+
+ // !!! offset := base - candidate
+ MOVQ R12, R11
+ SUBQ R15, R11
+ SUBQ DX, R11
+
+ // ----------------------------------------
+ // Begin inline of the extendMatch call.
+ //
+ // s = extendMatch(src, candidate+4, s+4)
+
+ // !!! R14 = &src[len(src)]
+ MOVQ src_len+32(FP), R14
+ ADDQ DX, R14
+
+ // !!! R13 = &src[len(src) - 8]
+ MOVQ R14, R13
+ SUBQ $8, R13
+
+ // !!! R15 = &src[candidate + 4]
+ ADDQ $4, R15
+ ADDQ DX, R15
+
+ // !!! s += 4
+ ADDQ $4, SI
+
+inlineExtendMatchCmp8:
+ // As long as we are 8 or more bytes before the end of src, we can load and
+ // compare 8 bytes at a time. If those 8 bytes are equal, repeat.
+ CMPQ SI, R13
+ JA inlineExtendMatchCmp1
+ MOVQ (R15), AX
+ MOVQ (SI), BX
+ CMPQ AX, BX
+ JNE inlineExtendMatchBSF
+ ADDQ $8, R15
+ ADDQ $8, SI
+ JMP inlineExtendMatchCmp8
+
+inlineExtendMatchBSF:
+ // If those 8 bytes were not equal, XOR the two 8 byte values, and return
+ // the index of the first byte that differs. The BSF instruction finds the
+ // least significant 1 bit, the amd64 architecture is little-endian, and
+ // the shift by 3 converts a bit index to a byte index.
+ XORQ AX, BX
+ BSFQ BX, BX
+ SHRQ $3, BX
+ ADDQ BX, SI
+ JMP inlineExtendMatchEnd
+
+inlineExtendMatchCmp1:
+ // In src's tail, compare 1 byte at a time.
+ CMPQ SI, R14
+ JAE inlineExtendMatchEnd
+ MOVB (R15), AX
+ MOVB (SI), BX
+ CMPB AX, BX
+ JNE inlineExtendMatchEnd
+ ADDQ $1, R15
+ ADDQ $1, SI
+ JMP inlineExtendMatchCmp1
+
+inlineExtendMatchEnd:
+ // End inline of the extendMatch call.
+ // ----------------------------------------
+
+ // ----------------------------------------
+ // Begin inline of the emitCopy call.
+ //
+ // d += emitCopy(dst[d:], base-candidate, s-base)
+
+ // !!! length := s - base
+ MOVQ SI, AX
+ SUBQ R12, AX
+
+inlineEmitCopyLoop0:
+ // for length >= 68 { etc }
+ CMPL AX, $68
+ JLT inlineEmitCopyStep1
+
+ // Emit a length 64 copy, encoded as 3 bytes.
+ MOVB $0xfe, 0(DI)
+ MOVW R11, 1(DI)
+ ADDQ $3, DI
+ SUBL $64, AX
+ JMP inlineEmitCopyLoop0
+
+inlineEmitCopyStep1:
+ // if length > 64 { etc }
+ CMPL AX, $64
+ JLE inlineEmitCopyStep2
+
+ // Emit a length 60 copy, encoded as 3 bytes.
+ MOVB $0xee, 0(DI)
+ MOVW R11, 1(DI)
+ ADDQ $3, DI
+ SUBL $60, AX
+
+inlineEmitCopyStep2:
+ // if length >= 12 || offset >= 2048 { goto inlineEmitCopyStep3 }
+ CMPL AX, $12
+ JGE inlineEmitCopyStep3
+ CMPL R11, $2048
+ JGE inlineEmitCopyStep3
+
+ // Emit the remaining copy, encoded as 2 bytes.
+ MOVB R11, 1(DI)
+ SHRL $8, R11
+ SHLB $5, R11
+ SUBB $4, AX
+ SHLB $2, AX
+ ORB AX, R11
+ ORB $1, R11
+ MOVB R11, 0(DI)
+ ADDQ $2, DI
+ JMP inlineEmitCopyEnd
+
+inlineEmitCopyStep3:
+ // Emit the remaining copy, encoded as 3 bytes.
+ SUBL $1, AX
+ SHLB $2, AX
+ ORB $2, AX
+ MOVB AX, 0(DI)
+ MOVW R11, 1(DI)
+ ADDQ $3, DI
+
+inlineEmitCopyEnd:
+ // End inline of the emitCopy call.
+ // ----------------------------------------
+
+ // nextEmit = s
+ MOVQ SI, R10
+
+ // if s >= sLimit { goto emitRemainder }
+ MOVQ SI, AX
+ SUBQ DX, AX
+ CMPQ AX, R9
+ JAE emitRemainder
+
+ // As per the encode_other.go code:
+ //
+ // We could immediately etc.
+
+ // x := load64(src, s-1)
+ MOVQ -1(SI), R14
+
+ // prevHash := hash(uint32(x>>0), shift)
+ MOVL R14, R11
+ IMULL $0x1e35a7bd, R11
+ SHRL CX, R11
+
+ // table[prevHash] = uint16(s-1)
+ MOVQ SI, AX
+ SUBQ DX, AX
+ SUBQ $1, AX
+
+ // XXX: MOVW AX, table-32768(SP)(R11*2)
+ // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2)
+ BYTE $0x66
+ BYTE $0x42
+ BYTE $0x89
+ BYTE $0x44
+ BYTE $0x5c
+ BYTE $0x78
+
+ // currHash := hash(uint32(x>>8), shift)
+ SHRQ $8, R14
+ MOVL R14, R11
+ IMULL $0x1e35a7bd, R11
+ SHRL CX, R11
+
+ // candidate = int(table[currHash])
+ // XXX: MOVWQZX table-32768(SP)(R11*2), R15
+ // XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15
+ BYTE $0x4e
+ BYTE $0x0f
+ BYTE $0xb7
+ BYTE $0x7c
+ BYTE $0x5c
+ BYTE $0x78
+
+ // table[currHash] = uint16(s)
+ ADDQ $1, AX
+
+ // XXX: MOVW AX, table-32768(SP)(R11*2)
+ // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2)
+ BYTE $0x66
+ BYTE $0x42
+ BYTE $0x89
+ BYTE $0x44
+ BYTE $0x5c
+ BYTE $0x78
+
+ // if uint32(x>>8) == load32(src, candidate) { continue }
+ MOVL (DX)(R15*1), BX
+ CMPL R14, BX
+ JEQ inner1
+
+ // nextHash = hash(uint32(x>>16), shift)
+ SHRQ $8, R14
+ MOVL R14, R11
+ IMULL $0x1e35a7bd, R11
+ SHRL CX, R11
+
+ // s++
+ ADDQ $1, SI
+
+ // break out of the inner1 for loop, i.e. continue the outer loop.
+ JMP outer
+
+emitRemainder:
+ // if nextEmit < len(src) { etc }
+ MOVQ src_len+32(FP), AX
+ ADDQ DX, AX
+ CMPQ R10, AX
+ JEQ encodeBlockEnd
+
+ // d += emitLiteral(dst[d:], src[nextEmit:])
+ //
+ // Push args.
+ MOVQ DI, 0(SP)
+ MOVQ $0, 8(SP) // Unnecessary, as the callee ignores it, but conservative.
+ MOVQ $0, 16(SP) // Unnecessary, as the callee ignores it, but conservative.
+ MOVQ R10, 24(SP)
+ SUBQ R10, AX
+ MOVQ AX, 32(SP)
+ MOVQ AX, 40(SP) // Unnecessary, as the callee ignores it, but conservative.
+
+ // Spill local variables (registers) onto the stack; call; unspill.
+ MOVQ DI, 80(SP)
+ CALL ·emitLiteral(SB)
+ MOVQ 80(SP), DI
+
+ // Finish the "d +=" part of "d += emitLiteral(etc)".
+ ADDQ 48(SP), DI
+
+encodeBlockEnd:
+ MOVQ dst_base+0(FP), AX
+ SUBQ AX, DI
+ MOVQ DI, d+48(FP)
+ RET
diff --git a/vendor/github.com/golang/snappy/encode_other.go b/vendor/github.com/golang/snappy/encode_other.go
new file mode 100644
index 0000000..dbcae90
--- /dev/null
+++ b/vendor/github.com/golang/snappy/encode_other.go
@@ -0,0 +1,238 @@
+// Copyright 2016 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !amd64 appengine !gc noasm
+
+package snappy
+
+func load32(b []byte, i int) uint32 {
+ b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line.
+ return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
+}
+
+func load64(b []byte, i int) uint64 {
+ b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line.
+ return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 |
+ uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
+}
+
+// emitLiteral writes a literal chunk and returns the number of bytes written.
+//
+// It assumes that:
+// dst is long enough to hold the encoded bytes
+// 1 <= len(lit) && len(lit) <= 65536
+func emitLiteral(dst, lit []byte) int {
+ i, n := 0, uint(len(lit)-1)
+ switch {
+ case n < 60:
+ dst[0] = uint8(n)<<2 | tagLiteral
+ i = 1
+ case n < 1<<8:
+ dst[0] = 60<<2 | tagLiteral
+ dst[1] = uint8(n)
+ i = 2
+ default:
+ dst[0] = 61<<2 | tagLiteral
+ dst[1] = uint8(n)
+ dst[2] = uint8(n >> 8)
+ i = 3
+ }
+ return i + copy(dst[i:], lit)
+}
+
+// emitCopy writes a copy chunk and returns the number of bytes written.
+//
+// It assumes that:
+// dst is long enough to hold the encoded bytes
+// 1 <= offset && offset <= 65535
+// 4 <= length && length <= 65535
+func emitCopy(dst []byte, offset, length int) int {
+ i := 0
+ // The maximum length for a single tagCopy1 or tagCopy2 op is 64 bytes. The
+ // threshold for this loop is a little higher (at 68 = 64 + 4), and the
+ // length emitted down below is is a little lower (at 60 = 64 - 4), because
+ // it's shorter to encode a length 67 copy as a length 60 tagCopy2 followed
+ // by a length 7 tagCopy1 (which encodes as 3+2 bytes) than to encode it as
+ // a length 64 tagCopy2 followed by a length 3 tagCopy2 (which encodes as
+ // 3+3 bytes). The magic 4 in the 64±4 is because the minimum length for a
+ // tagCopy1 op is 4 bytes, which is why a length 3 copy has to be an
+ // encodes-as-3-bytes tagCopy2 instead of an encodes-as-2-bytes tagCopy1.
+ for length >= 68 {
+ // Emit a length 64 copy, encoded as 3 bytes.
+ dst[i+0] = 63<<2 | tagCopy2
+ dst[i+1] = uint8(offset)
+ dst[i+2] = uint8(offset >> 8)
+ i += 3
+ length -= 64
+ }
+ if length > 64 {
+ // Emit a length 60 copy, encoded as 3 bytes.
+ dst[i+0] = 59<<2 | tagCopy2
+ dst[i+1] = uint8(offset)
+ dst[i+2] = uint8(offset >> 8)
+ i += 3
+ length -= 60
+ }
+ if length >= 12 || offset >= 2048 {
+ // Emit the remaining copy, encoded as 3 bytes.
+ dst[i+0] = uint8(length-1)<<2 | tagCopy2
+ dst[i+1] = uint8(offset)
+ dst[i+2] = uint8(offset >> 8)
+ return i + 3
+ }
+ // Emit the remaining copy, encoded as 2 bytes.
+ dst[i+0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1
+ dst[i+1] = uint8(offset)
+ return i + 2
+}
+
+// extendMatch returns the largest k such that k <= len(src) and that
+// src[i:i+k-j] and src[j:k] have the same contents.
+//
+// It assumes that:
+// 0 <= i && i < j && j <= len(src)
+func extendMatch(src []byte, i, j int) int {
+ for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 {
+ }
+ return j
+}
+
+func hash(u, shift uint32) uint32 {
+ return (u * 0x1e35a7bd) >> shift
+}
+
+// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It
+// assumes that the varint-encoded length of the decompressed bytes has already
+// been written.
+//
+// It also assumes that:
+// len(dst) >= MaxEncodedLen(len(src)) &&
+// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
+func encodeBlock(dst, src []byte) (d int) {
+ // Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive.
+ // The table element type is uint16, as s < sLimit and sLimit < len(src)
+ // and len(src) <= maxBlockSize and maxBlockSize == 65536.
+ const (
+ maxTableSize = 1 << 14
+ // tableMask is redundant, but helps the compiler eliminate bounds
+ // checks.
+ tableMask = maxTableSize - 1
+ )
+ shift := uint32(32 - 8)
+ for tableSize := 1 << 8; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 {
+ shift--
+ }
+ // In Go, all array elements are zero-initialized, so there is no advantage
+ // to a smaller tableSize per se. However, it matches the C++ algorithm,
+ // and in the asm versions of this code, we can get away with zeroing only
+ // the first tableSize elements.
+ var table [maxTableSize]uint16
+
+ // sLimit is when to stop looking for offset/length copies. The inputMargin
+ // lets us use a fast path for emitLiteral in the main loop, while we are
+ // looking for copies.
+ sLimit := len(src) - inputMargin
+
+ // nextEmit is where in src the next emitLiteral should start from.
+ nextEmit := 0
+
+ // The encoded form must start with a literal, as there are no previous
+ // bytes to copy, so we start looking for hash matches at s == 1.
+ s := 1
+ nextHash := hash(load32(src, s), shift)
+
+ for {
+ // Copied from the C++ snappy implementation:
+ //
+ // Heuristic match skipping: If 32 bytes are scanned with no matches
+ // found, start looking only at every other byte. If 32 more bytes are
+ // scanned (or skipped), look at every third byte, etc.. When a match
+ // is found, immediately go back to looking at every byte. This is a
+ // small loss (~5% performance, ~0.1% density) for compressible data
+ // due to more bookkeeping, but for non-compressible data (such as
+ // JPEG) it's a huge win since the compressor quickly "realizes" the
+ // data is incompressible and doesn't bother looking for matches
+ // everywhere.
+ //
+ // The "skip" variable keeps track of how many bytes there are since
+ // the last match; dividing it by 32 (ie. right-shifting by five) gives
+ // the number of bytes to move ahead for each iteration.
+ skip := 32
+
+ nextS := s
+ candidate := 0
+ for {
+ s = nextS
+ bytesBetweenHashLookups := skip >> 5
+ nextS = s + bytesBetweenHashLookups
+ skip += bytesBetweenHashLookups
+ if nextS > sLimit {
+ goto emitRemainder
+ }
+ candidate = int(table[nextHash&tableMask])
+ table[nextHash&tableMask] = uint16(s)
+ nextHash = hash(load32(src, nextS), shift)
+ if load32(src, s) == load32(src, candidate) {
+ break
+ }
+ }
+
+ // A 4-byte match has been found. We'll later see if more than 4 bytes
+ // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
+ // them as literal bytes.
+ d += emitLiteral(dst[d:], src[nextEmit:s])
+
+ // Call emitCopy, and then see if another emitCopy could be our next
+ // move. Repeat until we find no match for the input immediately after
+ // what was consumed by the last emitCopy call.
+ //
+ // If we exit this loop normally then we need to call emitLiteral next,
+ // though we don't yet know how big the literal will be. We handle that
+ // by proceeding to the next iteration of the main loop. We also can
+ // exit this loop via goto if we get close to exhausting the input.
+ for {
+ // Invariant: we have a 4-byte match at s, and no need to emit any
+ // literal bytes prior to s.
+ base := s
+
+ // Extend the 4-byte match as long as possible.
+ //
+ // This is an inlined version of:
+ // s = extendMatch(src, candidate+4, s+4)
+ s += 4
+ for i := candidate + 4; s < len(src) && src[i] == src[s]; i, s = i+1, s+1 {
+ }
+
+ d += emitCopy(dst[d:], base-candidate, s-base)
+ nextEmit = s
+ if s >= sLimit {
+ goto emitRemainder
+ }
+
+ // We could immediately start working at s now, but to improve
+ // compression we first update the hash table at s-1 and at s. If
+ // another emitCopy is not our next move, also calculate nextHash
+ // at s+1. At least on GOARCH=amd64, these three hash calculations
+ // are faster as one load64 call (with some shifts) instead of
+ // three load32 calls.
+ x := load64(src, s-1)
+ prevHash := hash(uint32(x>>0), shift)
+ table[prevHash&tableMask] = uint16(s - 1)
+ currHash := hash(uint32(x>>8), shift)
+ candidate = int(table[currHash&tableMask])
+ table[currHash&tableMask] = uint16(s)
+ if uint32(x>>8) != load32(src, candidate) {
+ nextHash = hash(uint32(x>>16), shift)
+ s++
+ break
+ }
+ }
+ }
+
+emitRemainder:
+ if nextEmit < len(src) {
+ d += emitLiteral(dst[d:], src[nextEmit:])
+ }
+ return d
+}
diff --git a/vendor/github.com/golang/snappy/snappy.go b/vendor/github.com/golang/snappy/snappy.go
new file mode 100644
index 0000000..c7f445f
--- /dev/null
+++ b/vendor/github.com/golang/snappy/snappy.go
@@ -0,0 +1,87 @@
+// Copyright 2011 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package snappy implements the snappy block-based compression format.
+// It aims for very high speeds and reasonable compression.
+//
+// The C++ snappy implementation is at https://github.com/google/snappy
+package snappy
+
+import (
+ "hash/crc32"
+)
+
+/*
+Each encoded block begins with the varint-encoded length of the decoded data,
+followed by a sequence of chunks. Chunks begin and end on byte boundaries. The
+first byte of each chunk is broken into its 2 least and 6 most significant bits
+called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag.
+Zero means a literal tag. All other values mean a copy tag.
+
+For literal tags:
+ - If m < 60, the next 1 + m bytes are literal bytes.
+ - Otherwise, let n be the little-endian unsigned integer denoted by the next
+ m - 59 bytes. The next 1 + n bytes after that are literal bytes.
+
+For copy tags, length bytes are copied from offset bytes ago, in the style of
+Lempel-Ziv compression algorithms. In particular:
+ - For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12).
+ The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10
+ of the offset. The next byte is bits 0-7 of the offset.
+ - For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65).
+ The length is 1 + m. The offset is the little-endian unsigned integer
+ denoted by the next 2 bytes.
+ - For l == 3, this tag is a legacy format that is no longer issued by most
+ encoders. Nonetheless, the offset ranges in [0, 1<<32) and the length in
+ [1, 65). The length is 1 + m. The offset is the little-endian unsigned
+ integer denoted by the next 4 bytes.
+*/
+const (
+ tagLiteral = 0x00
+ tagCopy1 = 0x01
+ tagCopy2 = 0x02
+ tagCopy4 = 0x03
+)
+
+const (
+ checksumSize = 4
+ chunkHeaderSize = 4
+ magicChunk = "\xff\x06\x00\x00" + magicBody
+ magicBody = "sNaPpY"
+
+ // maxBlockSize is the maximum size of the input to encodeBlock. It is not
+ // part of the wire format per se, but some parts of the encoder assume
+ // that an offset fits into a uint16.
+ //
+ // Also, for the framing format (Writer type instead of Encode function),
+ // https://github.com/google/snappy/blob/master/framing_format.txt says
+ // that "the uncompressed data in a chunk must be no longer than 65536
+ // bytes".
+ maxBlockSize = 65536
+
+ // maxEncodedLenOfMaxBlockSize equals MaxEncodedLen(maxBlockSize), but is
+ // hard coded to be a const instead of a variable, so that obufLen can also
+ // be a const. Their equivalence is confirmed by
+ // TestMaxEncodedLenOfMaxBlockSize.
+ maxEncodedLenOfMaxBlockSize = 76490
+
+ obufHeaderLen = len(magicChunk) + checksumSize + chunkHeaderSize
+ obufLen = obufHeaderLen + maxEncodedLenOfMaxBlockSize
+)
+
+const (
+ chunkTypeCompressedData = 0x00
+ chunkTypeUncompressedData = 0x01
+ chunkTypePadding = 0xfe
+ chunkTypeStreamIdentifier = 0xff
+)
+
+var crcTable = crc32.MakeTable(crc32.Castagnoli)
+
+// crc implements the checksum specified in section 3 of
+// https://github.com/google/snappy/blob/master/framing_format.txt
+func crc(b []byte) uint32 {
+ c := crc32.Update(0, crcTable, b)
+ return uint32(c>>15|c<<17) + 0xa282ead8
+}
diff --git a/vendor/github.com/google/gopacket/.gitignore b/vendor/github.com/google/gopacket/.gitignore
new file mode 100644
index 0000000..149266f
--- /dev/null
+++ b/vendor/github.com/google/gopacket/.gitignore
@@ -0,0 +1,38 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+#*
+*~
+
+# examples binaries
+examples/synscan/synscan
+examples/pfdump/pfdump
+examples/pcapdump/pcapdump
+examples/httpassembly/httpassembly
+examples/statsassembly/statsassembly
+examples/arpscan/arpscan
+examples/bidirectional/bidirectional
+examples/bytediff/bytediff
+examples/reassemblydump/reassemblydump
+layers/gen
+macs/gen
+pcap/pcap_tester
diff --git a/vendor/github.com/google/gopacket/.travis.gofmt.sh b/vendor/github.com/google/gopacket/.travis.gofmt.sh
new file mode 100755
index 0000000..e341a1c
--- /dev/null
+++ b/vendor/github.com/google/gopacket/.travis.gofmt.sh
@@ -0,0 +1,7 @@
+#!/bin/bash
+
+cd "$(dirname $0)"
+if [ -n "$(go fmt ./...)" ]; then
+ echo "Go code is not formatted, run 'go fmt github.com/google/stenographer/...'" >&2
+ exit 1
+fi
diff --git a/vendor/github.com/google/gopacket/.travis.golint.sh b/vendor/github.com/google/gopacket/.travis.golint.sh
new file mode 100755
index 0000000..0e267f5
--- /dev/null
+++ b/vendor/github.com/google/gopacket/.travis.golint.sh
@@ -0,0 +1,28 @@
+#!/bin/bash
+
+cd "$(dirname $0)"
+
+go get golang.org/x/lint/golint
+DIRS=". tcpassembly tcpassembly/tcpreader ip4defrag reassembly macs pcapgo pcap afpacket pfring routing defrag/lcmdefrag"
+# Add subdirectories here as we clean up golint on each.
+for subdir in $DIRS; do
+ pushd $subdir
+ if golint |
+ grep -v CannotSetRFMon | # pcap exported error name
+ grep -v DataLost | # tcpassembly/tcpreader exported error name
+ grep .; then
+ exit 1
+ fi
+ popd
+done
+
+pushd layers
+for file in *.go; do
+ if cat .lint_blacklist | grep -q $file; then
+ echo "Skipping lint of $file due to .lint_blacklist"
+ elif golint $file | grep .; then
+ echo "Lint error in file $file"
+ exit 1
+ fi
+done
+popd
diff --git a/vendor/github.com/google/gopacket/.travis.govet.sh b/vendor/github.com/google/gopacket/.travis.govet.sh
new file mode 100755
index 0000000..a5c1354
--- /dev/null
+++ b/vendor/github.com/google/gopacket/.travis.govet.sh
@@ -0,0 +1,10 @@
+#!/bin/bash
+
+cd "$(dirname $0)"
+DIRS=". layers pcap pcapgo tcpassembly tcpassembly/tcpreader routing ip4defrag bytediff macs defrag/lcmdefrag"
+set -e
+for subdir in $DIRS; do
+ pushd $subdir
+ go vet
+ popd
+done
diff --git a/vendor/github.com/google/gopacket/.travis.install.sh b/vendor/github.com/google/gopacket/.travis.install.sh
new file mode 100755
index 0000000..648c901
--- /dev/null
+++ b/vendor/github.com/google/gopacket/.travis.install.sh
@@ -0,0 +1,9 @@
+#!/bin/bash
+
+set -ev
+
+go get github.com/google/gopacket
+go get github.com/google/gopacket/layers
+go get github.com/google/gopacket/tcpassembly
+go get github.com/google/gopacket/reassembly
+go get github.com/google/gopacket/pcapgo
diff --git a/vendor/github.com/google/gopacket/.travis.script.sh b/vendor/github.com/google/gopacket/.travis.script.sh
new file mode 100755
index 0000000..a483f4f
--- /dev/null
+++ b/vendor/github.com/google/gopacket/.travis.script.sh
@@ -0,0 +1,10 @@
+#!/bin/bash
+
+set -ev
+
+go test github.com/google/gopacket
+go test github.com/google/gopacket/layers
+go test github.com/google/gopacket/tcpassembly
+go test github.com/google/gopacket/reassembly
+go test github.com/google/gopacket/pcapgo
+go test github.com/google/gopacket/pcap
diff --git a/vendor/github.com/google/gopacket/.travis.yml b/vendor/github.com/google/gopacket/.travis.yml
new file mode 100644
index 0000000..8ebb01d
--- /dev/null
+++ b/vendor/github.com/google/gopacket/.travis.yml
@@ -0,0 +1,55 @@
+language: go
+go:
+ - 1.11.x
+ - 1.12.x
+ - master
+
+addons:
+ apt:
+ packages:
+ libpcap-dev
+
+# use modules except for older versions (see below)
+install: true
+
+env:
+ - GO111MODULE=on
+
+script: ./.travis.script.sh
+
+matrix:
+ fast_finish: true
+ allow_failures:
+ - go: master
+
+jobs:
+ include:
+ - go: 1.5.x
+ install: ./.travis.install.sh
+ - go: 1.6.x
+ install: ./.travis.install.sh
+ - go: 1.7.x
+ install: ./.travis.install.sh
+ - go: 1.8.x
+ install: ./.travis.install.sh
+ - go: 1.9.x
+ install: ./.travis.install.sh
+ - go: 1.10.x
+ install: ./.travis.install.sh
+ - os: osx
+ go: 1.x
+ - os: windows
+ go: 1.x
+ # winpcap does not work on travis ci - so install nmap to get libpcap
+ before_install: choco install nmap
+ - stage: style
+ name: "fmt/vet/lint"
+ go: 1.x
+ script:
+ - ./.travis.gofmt.sh
+ - ./.travis.govet.sh
+ - ./.travis.golint.sh
+
+stages:
+ - style
+ - test
diff --git a/vendor/github.com/google/gopacket/AUTHORS b/vendor/github.com/google/gopacket/AUTHORS
new file mode 100644
index 0000000..e8a225f
--- /dev/null
+++ b/vendor/github.com/google/gopacket/AUTHORS
@@ -0,0 +1,53 @@
+AUTHORS AND MAINTAINERS:
+
+MAIN DEVELOPERS:
+Graeme Connell
+
+AUTHORS:
+Nigel Tao
+Cole Mickens
+Ben Daglish
+Luis Martinez
+Remco Verhoef
+Hiroaki Kawai
+Lukas Lueg
+Laurent Hausermann
+Bill Green
+Christian Mäder
+Gernot Vormayr
+Vitor Garcia Graveto
+Elias Chavarria Reyes
+Daniel Rittweiler
+
+CONTRIBUTORS:
+Attila Oláh
+Vittus Mikiassen
+Matthias Radestock
+Matthew Sackman
+Loic Prylli
+Alexandre Fiori
+Adrian Tam
+Satoshi Matsumoto
+David Stainton
+Jesse Ward
+Kane Mathers
+Jose Selvi
+Yerden Zhumabekov
+
+-----------------------------------------------
+FORKED FROM github.com/akrennmair/gopcap
+ALL THE FOLLOWING ARE FOR THAT PROJECT
+
+MAIN DEVELOPERS:
+Andreas Krennmair
+
+CONTRIBUTORS:
+Andrea Nall
+Daniel Arndt
+Dustin Sallings
+Graeme Connell
+Guillaume Savary
+Mark Smith
+Miek Gieben
+Mike Bell
+Trevor Strohman
diff --git a/vendor/github.com/google/gopacket/CONTRIBUTING.md b/vendor/github.com/google/gopacket/CONTRIBUTING.md
new file mode 100644
index 0000000..99ab7a2
--- /dev/null
+++ b/vendor/github.com/google/gopacket/CONTRIBUTING.md
@@ -0,0 +1,215 @@
+Contributing To gopacket
+========================
+
+So you've got some code and you'd like it to be part of gopacket... wonderful!
+We're happy to accept contributions, whether they're fixes to old protocols, new
+protocols entirely, or anything else you think would improve the gopacket
+library. This document is designed to help you to do just that.
+
+The first section deals with the plumbing: how to actually get a change
+submitted.
+
+The second section deals with coding style... Go is great in that it
+has a uniform style implemented by 'go fmt', but there's still some decisions
+we've made that go above and beyond, and if you follow them, they won't come up
+in your code review.
+
+The third section deals with some of the implementation decisions we've made,
+which may help you to understand the current code and which we may ask you to
+conform to (or provide compelling reasons for ignoring).
+
+Overall, we hope this document will help you to understand our system and write
+great code which fits in, and help us to turn around on your code review quickly
+so the code can make it into the master branch as quickly as possible.
+
+
+How To Submit Code
+------------------
+
+We use github.com's Pull Request feature to receive code contributions from
+external contributors. See
+https://help.github.com/articles/creating-a-pull-request/ for details on
+how to create a request.
+
+Also, there's a local script `gc` in the base directory of GoPacket that
+runs a local set of checks, which should give you relatively high confidence
+that your pull won't fail github pull checks.
+
+```sh
+go get github.com/google/gopacket
+cd $GOROOT/src/pkg/github.com/google/gopacket
+git checkout -b # create a new branch to work from
+... code code code ...
+./gc # Run this to do local commits, it performs a number of checks
+```
+
+To sum up:
+
+* DO
+ + Pull down the latest version.
+ + Make a feature-specific branch.
+ + Code using the style and methods discussed in the rest of this document.
+ + Use the ./gc command to do local commits or check correctness.
+ + Push your new feature branch up to github.com, as a pull request.
+ + Handle comments and requests from reviewers, pushing new commits up to
+ your feature branch as problems are addressed.
+ + Put interesting comments and discussions into commit comments.
+* DON'T
+ + Push to someone else's branch without their permission.
+
+
+Coding Style
+------------
+
+* Go code must be run through `go fmt`, `go vet`, and `golint`
+* Follow http://golang.org/doc/effective_go.html as much as possible.
+ + In particular, http://golang.org/doc/effective_go.html#mixed-caps. Enums
+ should be be CamelCase, with acronyms capitalized (TCPSourcePort, vs.
+ TcpSourcePort or TCP_SOURCE_PORT).
+* Bonus points for giving enum types a String() field.
+* Any exported types or functions should have commentary
+ (http://golang.org/doc/effective_go.html#commentary)
+
+
+Coding Methods And Implementation Notes
+---------------------------------------
+
+### Error Handling
+
+Many times, you'll be decoding a protocol and run across something bad, a packet
+corruption or the like. How do you handle this? First off, ALWAYS report the
+error. You can do this either by returning the error from the decode() function
+(most common), or if you're up for it you can implement and add an ErrorLayer
+through the packet builder (the first method is a simple shortcut that does
+exactly this, then stops any future decoding).
+
+Often, you'll already have decode some part of your protocol by the time you hit
+your error. Use your own discretion to determine whether the stuff you've
+already decoded should be returned to the caller or not:
+
+```go
+func decodeMyProtocol(data []byte, p gopacket.PacketBuilder) error {
+ prot := &MyProtocol{}
+ if len(data) < 10 {
+ // This error occurred before we did ANYTHING, so there's nothing in my
+ // protocol that the caller could possibly want. Just return the error.
+ return fmt.Errorf("Length %d less than 10", len(data))
+ }
+ prot.ImportantField1 = data[:5]
+ prot.ImportantField2 = data[5:10]
+ // At this point, we've already got enough information in 'prot' to
+ // warrant returning it to the caller, so we'll add it now.
+ p.AddLayer(prot)
+ if len(data) < 15 {
+ // We encountered an error later in the packet, but the caller already
+ // has the important info we've gleaned so far.
+ return fmt.Errorf("Length %d less than 15", len(data))
+ }
+ prot.ImportantField3 = data[10:15]
+ return nil // We've already added the layer, we can just return success.
+}
+```
+
+In general, our code follows the approach of returning the first error it
+encounters. In general, we don't trust any bytes after the first error we see.
+
+### What Is A Layer?
+
+The definition of a layer is up to the discretion of the coder. It should be
+something important enough that it's actually useful to the caller (IE: every
+TLV value should probably NOT be a layer). However, it can be more granular
+than a single protocol... IPv6 and SCTP both implement many layers to handle the
+various parts of the protocol. Use your best judgement, and prepare to defend
+your decisions during code review. ;)
+
+### Performance
+
+We strive to make gopacket as fast as possible while still providing lots of
+features. In general, this means:
+
+* Focus performance tuning on common protocols (IP4/6, TCP, etc), and optimize
+ others on an as-needed basis (tons of MPLS on your network? Time to optimize
+ MPLS!)
+* Use fast operations. See the toplevel benchmark_test for benchmarks of some
+ of Go's underlying features and types.
+* Test your performance changes! You should use the ./gc script's --benchmark
+ flag to submit any performance-related changes. Use pcap/gopacket_benchmark
+ to test your change against a PCAP file based on your traffic patterns.
+* Don't be TOO hacky. Sometimes, removing an unused struct from a field causes
+ a huge performance hit, due to the way that Go currently handles its segmented
+ stack... don't be afraid to clean it up anyway. We'll trust the Go compiler
+ to get good enough over time to handle this. Also, this type of
+ compiler-specific optimization is very fragile; someone adding a field to an
+ entirely different struct elsewhere in the codebase could reverse any gains
+ you might achieve by aligning your allocations.
+* Try to minimize memory allocations. If possible, use []byte to reference
+ pieces of the input, instead of using string, which requires copying the bytes
+ into a new memory allocation.
+* Think hard about what should be evaluated lazily vs. not. In general, a
+ layer's struct should almost exactly mirror the layer's frame. Anything
+ that's more interesting should be a function. This may not always be
+ possible, but it's a good rule of thumb.
+* Don't fear micro-optimizations. With the above in mind, we welcome
+ micro-optimizations that we think will have positive/neutral impacts on the
+ majority of workloads. A prime example of this is pre-allocating certain
+ structs within a larger one:
+
+```go
+type MyProtocol struct {
+ // Most packets have 1-4 of VeryCommon, so we preallocate it here.
+ initialAllocation [4]uint32
+ VeryCommon []uint32
+}
+
+func decodeMyProtocol(data []byte, p gopacket.PacketBuilder) error {
+ prot := &MyProtocol{}
+ prot.VeryCommon = proto.initialAllocation[:0]
+ for len(data) > 4 {
+ field := binary.BigEndian.Uint32(data[:4])
+ data = data[4:]
+ // Since we're using the underlying initialAllocation, we won't need to
+ // allocate new memory for the following append unless we more than 16
+ // bytes of data, which should be the uncommon case.
+ prot.VeryCommon = append(prot.VeryCommon, field)
+ }
+ p.AddLayer(prot)
+ if len(data) > 0 {
+ return fmt.Errorf("MyProtocol packet has %d bytes left after decoding", len(data))
+ }
+ return nil
+}
+```
+
+### Slices And Data
+
+If you're pulling a slice from the data you're decoding, don't copy it. Just
+use the slice itself.
+
+```go
+type MyProtocol struct {
+ A, B net.IP
+}
+func decodeMyProtocol(data []byte, p gopacket.PacketBuilder) error {
+ p.AddLayer(&MyProtocol{
+ A: data[:4],
+ B: data[4:8],
+ })
+ return nil
+}
+```
+
+The caller has already agreed, by using this library, that they won't modify the
+set of bytes they pass in to the decoder, or the library has already copied the
+set of bytes to a read-only location. See DecodeOptions.NoCopy for more
+information.
+
+### Enums/Types
+
+If a protocol has an integer field (uint8, uint16, etc) with a couple of known
+values that mean something special, make it a type. This allows us to do really
+nice things like adding a String() function to them, so we can more easily
+display those to users. Check out layers/enums.go for one example, as well as
+layers/icmp.go for layer-specific enums.
+
+When naming things, try for descriptiveness over suscinctness. For example,
+choose DNSResponseRecord over DNSRR.
diff --git a/vendor/github.com/google/gopacket/LICENSE b/vendor/github.com/google/gopacket/LICENSE
new file mode 100644
index 0000000..2100d52
--- /dev/null
+++ b/vendor/github.com/google/gopacket/LICENSE
@@ -0,0 +1,28 @@
+Copyright (c) 2012 Google, Inc. All rights reserved.
+Copyright (c) 2009-2011 Andreas Krennmair. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Andreas Krennmair, Google, nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/google/gopacket/README.md b/vendor/github.com/google/gopacket/README.md
new file mode 100644
index 0000000..a2f48a9
--- /dev/null
+++ b/vendor/github.com/google/gopacket/README.md
@@ -0,0 +1,12 @@
+# GoPacket
+
+This library provides packet decoding capabilities for Go.
+See [godoc](https://godoc.org/github.com/google/gopacket) for more details.
+
+[](https://travis-ci.org/google/gopacket)
+[](https://godoc.org/github.com/google/gopacket)
+
+Minimum Go version required is 1.5 except for pcapgo/EthernetHandle, afpacket, and bsdbpf which need at least 1.7 due to x/sys/unix dependencies.
+
+Originally forked from the gopcap project written by Andreas
+Krennmair (http://github.com/akrennmair/gopcap).
diff --git a/vendor/github.com/google/gopacket/base.go b/vendor/github.com/google/gopacket/base.go
new file mode 100644
index 0000000..91e150c
--- /dev/null
+++ b/vendor/github.com/google/gopacket/base.go
@@ -0,0 +1,178 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package gopacket
+
+import (
+ "fmt"
+)
+
+// Layer represents a single decoded packet layer (using either the
+// OSI or TCP/IP definition of a layer). When decoding, a packet's data is
+// broken up into a number of layers. The caller may call LayerType() to
+// figure out which type of layer they've received from the packet. Optionally,
+// they may then use a type assertion to get the actual layer type for deep
+// inspection of the data.
+type Layer interface {
+ // LayerType is the gopacket type for this layer.
+ LayerType() LayerType
+ // LayerContents returns the set of bytes that make up this layer.
+ LayerContents() []byte
+ // LayerPayload returns the set of bytes contained within this layer, not
+ // including the layer itself.
+ LayerPayload() []byte
+}
+
+// Payload is a Layer containing the payload of a packet. The definition of
+// what constitutes the payload of a packet depends on previous layers; for
+// TCP and UDP, we stop decoding above layer 4 and return the remaining
+// bytes as a Payload. Payload is an ApplicationLayer.
+type Payload []byte
+
+// LayerType returns LayerTypePayload
+func (p Payload) LayerType() LayerType { return LayerTypePayload }
+
+// LayerContents returns the bytes making up this layer.
+func (p Payload) LayerContents() []byte { return []byte(p) }
+
+// LayerPayload returns the payload within this layer.
+func (p Payload) LayerPayload() []byte { return nil }
+
+// Payload returns this layer as bytes.
+func (p Payload) Payload() []byte { return []byte(p) }
+
+// String implements fmt.Stringer.
+func (p Payload) String() string { return fmt.Sprintf("%d byte(s)", len(p)) }
+
+// GoString implements fmt.GoStringer.
+func (p Payload) GoString() string { return LongBytesGoString([]byte(p)) }
+
+// CanDecode implements DecodingLayer.
+func (p Payload) CanDecode() LayerClass { return LayerTypePayload }
+
+// NextLayerType implements DecodingLayer.
+func (p Payload) NextLayerType() LayerType { return LayerTypeZero }
+
+// DecodeFromBytes implements DecodingLayer.
+func (p *Payload) DecodeFromBytes(data []byte, df DecodeFeedback) error {
+ *p = Payload(data)
+ return nil
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (p Payload) SerializeTo(b SerializeBuffer, opts SerializeOptions) error {
+ bytes, err := b.PrependBytes(len(p))
+ if err != nil {
+ return err
+ }
+ copy(bytes, p)
+ return nil
+}
+
+// decodePayload decodes data by returning it all in a Payload layer.
+func decodePayload(data []byte, p PacketBuilder) error {
+ payload := &Payload{}
+ if err := payload.DecodeFromBytes(data, p); err != nil {
+ return err
+ }
+ p.AddLayer(payload)
+ p.SetApplicationLayer(payload)
+ return nil
+}
+
+// Fragment is a Layer containing a fragment of a larger frame, used by layers
+// like IPv4 and IPv6 that allow for fragmentation of their payloads.
+type Fragment []byte
+
+// LayerType returns LayerTypeFragment
+func (p *Fragment) LayerType() LayerType { return LayerTypeFragment }
+
+// LayerContents implements Layer.
+func (p *Fragment) LayerContents() []byte { return []byte(*p) }
+
+// LayerPayload implements Layer.
+func (p *Fragment) LayerPayload() []byte { return nil }
+
+// Payload returns this layer as a byte slice.
+func (p *Fragment) Payload() []byte { return []byte(*p) }
+
+// String implements fmt.Stringer.
+func (p *Fragment) String() string { return fmt.Sprintf("%d byte(s)", len(*p)) }
+
+// CanDecode implements DecodingLayer.
+func (p *Fragment) CanDecode() LayerClass { return LayerTypeFragment }
+
+// NextLayerType implements DecodingLayer.
+func (p *Fragment) NextLayerType() LayerType { return LayerTypeZero }
+
+// DecodeFromBytes implements DecodingLayer.
+func (p *Fragment) DecodeFromBytes(data []byte, df DecodeFeedback) error {
+ *p = Fragment(data)
+ return nil
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (p *Fragment) SerializeTo(b SerializeBuffer, opts SerializeOptions) error {
+ bytes, err := b.PrependBytes(len(*p))
+ if err != nil {
+ return err
+ }
+ copy(bytes, *p)
+ return nil
+}
+
+// decodeFragment decodes data by returning it all in a Fragment layer.
+func decodeFragment(data []byte, p PacketBuilder) error {
+ payload := &Fragment{}
+ if err := payload.DecodeFromBytes(data, p); err != nil {
+ return err
+ }
+ p.AddLayer(payload)
+ p.SetApplicationLayer(payload)
+ return nil
+}
+
+// These layers correspond to Internet Protocol Suite (TCP/IP) layers, and their
+// corresponding OSI layers, as best as possible.
+
+// LinkLayer is the packet layer corresponding to TCP/IP layer 1 (OSI layer 2)
+type LinkLayer interface {
+ Layer
+ LinkFlow() Flow
+}
+
+// NetworkLayer is the packet layer corresponding to TCP/IP layer 2 (OSI
+// layer 3)
+type NetworkLayer interface {
+ Layer
+ NetworkFlow() Flow
+}
+
+// TransportLayer is the packet layer corresponding to the TCP/IP layer 3 (OSI
+// layer 4)
+type TransportLayer interface {
+ Layer
+ TransportFlow() Flow
+}
+
+// ApplicationLayer is the packet layer corresponding to the TCP/IP layer 4 (OSI
+// layer 7), also known as the packet payload.
+type ApplicationLayer interface {
+ Layer
+ Payload() []byte
+}
+
+// ErrorLayer is a packet layer created when decoding of the packet has failed.
+// Its payload is all the bytes that we were unable to decode, and the returned
+// error details why the decoding failed.
+type ErrorLayer interface {
+ Layer
+ Error() error
+}
diff --git a/vendor/github.com/google/gopacket/decode.go b/vendor/github.com/google/gopacket/decode.go
new file mode 100644
index 0000000..2633f84
--- /dev/null
+++ b/vendor/github.com/google/gopacket/decode.go
@@ -0,0 +1,157 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package gopacket
+
+import (
+ "errors"
+)
+
+// DecodeFeedback is used by DecodingLayer layers to provide decoding metadata.
+type DecodeFeedback interface {
+ // SetTruncated should be called if during decoding you notice that a packet
+ // is shorter than internal layer variables (HeaderLength, or the like) say it
+ // should be. It sets packet.Metadata().Truncated.
+ SetTruncated()
+}
+
+type nilDecodeFeedback struct{}
+
+func (nilDecodeFeedback) SetTruncated() {}
+
+// NilDecodeFeedback implements DecodeFeedback by doing nothing.
+var NilDecodeFeedback DecodeFeedback = nilDecodeFeedback{}
+
+// PacketBuilder is used by layer decoders to store the layers they've decoded,
+// and to defer future decoding via NextDecoder.
+// Typically, the pattern for use is:
+// func (m *myDecoder) Decode(data []byte, p PacketBuilder) error {
+// if myLayer, err := myDecodingLogic(data); err != nil {
+// return err
+// } else {
+// p.AddLayer(myLayer)
+// }
+// // maybe do this, if myLayer is a LinkLayer
+// p.SetLinkLayer(myLayer)
+// return p.NextDecoder(nextDecoder)
+// }
+type PacketBuilder interface {
+ DecodeFeedback
+ // AddLayer should be called by a decoder immediately upon successful
+ // decoding of a layer.
+ AddLayer(l Layer)
+ // The following functions set the various specific layers in the final
+ // packet. Note that if many layers call SetX, the first call is kept and all
+ // other calls are ignored.
+ SetLinkLayer(LinkLayer)
+ SetNetworkLayer(NetworkLayer)
+ SetTransportLayer(TransportLayer)
+ SetApplicationLayer(ApplicationLayer)
+ SetErrorLayer(ErrorLayer)
+ // NextDecoder should be called by a decoder when they're done decoding a
+ // packet layer but not done with decoding the entire packet. The next
+ // decoder will be called to decode the last AddLayer's LayerPayload.
+ // Because of this, NextDecoder must only be called once all other
+ // PacketBuilder calls have been made. Set*Layer and AddLayer calls after
+ // NextDecoder calls will behave incorrectly.
+ NextDecoder(next Decoder) error
+ // DumpPacketData is used solely for decoding. If you come across an error
+ // you need to diagnose while processing a packet, call this and your packet's
+ // data will be dumped to stderr so you can create a test. This should never
+ // be called from a production decoder.
+ DumpPacketData()
+ // DecodeOptions returns the decode options
+ DecodeOptions() *DecodeOptions
+}
+
+// Decoder is an interface for logic to decode a packet layer. Users may
+// implement a Decoder to handle their own strange packet types, or may use one
+// of the many decoders available in the 'layers' subpackage to decode things
+// for them.
+type Decoder interface {
+ // Decode decodes the bytes of a packet, sending decoded values and other
+ // information to PacketBuilder, and returning an error if unsuccessful. See
+ // the PacketBuilder documentation for more details.
+ Decode([]byte, PacketBuilder) error
+}
+
+// DecodeFunc wraps a function to make it a Decoder.
+type DecodeFunc func([]byte, PacketBuilder) error
+
+// Decode implements Decoder by calling itself.
+func (d DecodeFunc) Decode(data []byte, p PacketBuilder) error {
+ // function, call thyself.
+ return d(data, p)
+}
+
+// DecodePayload is a Decoder that returns a Payload layer containing all
+// remaining bytes.
+var DecodePayload Decoder = DecodeFunc(decodePayload)
+
+// DecodeUnknown is a Decoder that returns an Unknown layer containing all
+// remaining bytes, useful if you run up against a layer that you're unable to
+// decode yet. This layer is considered an ErrorLayer.
+var DecodeUnknown Decoder = DecodeFunc(decodeUnknown)
+
+// DecodeFragment is a Decoder that returns a Fragment layer containing all
+// remaining bytes.
+var DecodeFragment Decoder = DecodeFunc(decodeFragment)
+
+// LayerTypeZero is an invalid layer type, but can be used to determine whether
+// layer type has actually been set correctly.
+var LayerTypeZero = RegisterLayerType(0, LayerTypeMetadata{Name: "Unknown", Decoder: DecodeUnknown})
+
+// LayerTypeDecodeFailure is the layer type for the default error layer.
+var LayerTypeDecodeFailure = RegisterLayerType(1, LayerTypeMetadata{Name: "DecodeFailure", Decoder: DecodeUnknown})
+
+// LayerTypePayload is the layer type for a payload that we don't try to decode
+// but treat as a success, IE: an application-level payload.
+var LayerTypePayload = RegisterLayerType(2, LayerTypeMetadata{Name: "Payload", Decoder: DecodePayload})
+
+// LayerTypeFragment is the layer type for a fragment of a layer transported
+// by an underlying layer that supports fragmentation.
+var LayerTypeFragment = RegisterLayerType(3, LayerTypeMetadata{Name: "Fragment", Decoder: DecodeFragment})
+
+// DecodeFailure is a packet layer created if decoding of the packet data failed
+// for some reason. It implements ErrorLayer. LayerContents will be the entire
+// set of bytes that failed to parse, and Error will return the reason parsing
+// failed.
+type DecodeFailure struct {
+ data []byte
+ err error
+ stack []byte
+}
+
+// Error returns the error encountered during decoding.
+func (d *DecodeFailure) Error() error { return d.err }
+
+// LayerContents implements Layer.
+func (d *DecodeFailure) LayerContents() []byte { return d.data }
+
+// LayerPayload implements Layer.
+func (d *DecodeFailure) LayerPayload() []byte { return nil }
+
+// String implements fmt.Stringer.
+func (d *DecodeFailure) String() string {
+ return "Packet decoding error: " + d.Error().Error()
+}
+
+// Dump implements Dumper.
+func (d *DecodeFailure) Dump() (s string) {
+ if d.stack != nil {
+ s = string(d.stack)
+ }
+ return
+}
+
+// LayerType returns LayerTypeDecodeFailure
+func (d *DecodeFailure) LayerType() LayerType { return LayerTypeDecodeFailure }
+
+// decodeUnknown "decodes" unsupported data types by returning an error.
+// This decoder will thus always return a DecodeFailure layer.
+func decodeUnknown(data []byte, p PacketBuilder) error {
+ return errors.New("Layer type not currently supported")
+}
diff --git a/vendor/github.com/google/gopacket/doc.go b/vendor/github.com/google/gopacket/doc.go
new file mode 100644
index 0000000..5937fd1
--- /dev/null
+++ b/vendor/github.com/google/gopacket/doc.go
@@ -0,0 +1,432 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+/*
+Package gopacket provides packet decoding for the Go language.
+
+gopacket contains many sub-packages with additional functionality you may find
+useful, including:
+
+ * layers: You'll probably use this every time. This contains of the logic
+ built into gopacket for decoding packet protocols. Note that all example
+ code below assumes that you have imported both gopacket and
+ gopacket/layers.
+ * pcap: C bindings to use libpcap to read packets off the wire.
+ * pfring: C bindings to use PF_RING to read packets off the wire.
+ * afpacket: C bindings for Linux's AF_PACKET to read packets off the wire.
+ * tcpassembly: TCP stream reassembly
+
+Also, if you're looking to dive right into code, see the examples subdirectory
+for numerous simple binaries built using gopacket libraries.
+
+Minimum go version required is 1.5 except for pcapgo/EthernetHandle, afpacket,
+and bsdbpf which need at least 1.7 due to x/sys/unix dependencies.
+
+Basic Usage
+
+gopacket takes in packet data as a []byte and decodes it into a packet with
+a non-zero number of "layers". Each layer corresponds to a protocol
+within the bytes. Once a packet has been decoded, the layers of the packet
+can be requested from the packet.
+
+ // Decode a packet
+ packet := gopacket.NewPacket(myPacketData, layers.LayerTypeEthernet, gopacket.Default)
+ // Get the TCP layer from this packet
+ if tcpLayer := packet.Layer(layers.LayerTypeTCP); tcpLayer != nil {
+ fmt.Println("This is a TCP packet!")
+ // Get actual TCP data from this layer
+ tcp, _ := tcpLayer.(*layers.TCP)
+ fmt.Printf("From src port %d to dst port %d\n", tcp.SrcPort, tcp.DstPort)
+ }
+ // Iterate over all layers, printing out each layer type
+ for _, layer := range packet.Layers() {
+ fmt.Println("PACKET LAYER:", layer.LayerType())
+ }
+
+Packets can be decoded from a number of starting points. Many of our base
+types implement Decoder, which allow us to decode packets for which
+we don't have full data.
+
+ // Decode an ethernet packet
+ ethP := gopacket.NewPacket(p1, layers.LayerTypeEthernet, gopacket.Default)
+ // Decode an IPv6 header and everything it contains
+ ipP := gopacket.NewPacket(p2, layers.LayerTypeIPv6, gopacket.Default)
+ // Decode a TCP header and its payload
+ tcpP := gopacket.NewPacket(p3, layers.LayerTypeTCP, gopacket.Default)
+
+
+Reading Packets From A Source
+
+Most of the time, you won't just have a []byte of packet data lying around.
+Instead, you'll want to read packets in from somewhere (file, interface, etc)
+and process them. To do that, you'll want to build a PacketSource.
+
+First, you'll need to construct an object that implements the PacketDataSource
+interface. There are implementations of this interface bundled with gopacket
+in the gopacket/pcap and gopacket/pfring subpackages... see their documentation
+for more information on their usage. Once you have a PacketDataSource, you can
+pass it into NewPacketSource, along with a Decoder of your choice, to create
+a PacketSource.
+
+Once you have a PacketSource, you can read packets from it in multiple ways.
+See the docs for PacketSource for more details. The easiest method is the
+Packets function, which returns a channel, then asynchronously writes new
+packets into that channel, closing the channel if the packetSource hits an
+end-of-file.
+
+ packetSource := ... // construct using pcap or pfring
+ for packet := range packetSource.Packets() {
+ handlePacket(packet) // do something with each packet
+ }
+
+You can change the decoding options of the packetSource by setting fields in
+packetSource.DecodeOptions... see the following sections for more details.
+
+
+Lazy Decoding
+
+gopacket optionally decodes packet data lazily, meaning it
+only decodes a packet layer when it needs to handle a function call.
+
+ // Create a packet, but don't actually decode anything yet
+ packet := gopacket.NewPacket(myPacketData, layers.LayerTypeEthernet, gopacket.Lazy)
+ // Now, decode the packet up to the first IPv4 layer found but no further.
+ // If no IPv4 layer was found, the whole packet will be decoded looking for
+ // it.
+ ip4 := packet.Layer(layers.LayerTypeIPv4)
+ // Decode all layers and return them. The layers up to the first IPv4 layer
+ // are already decoded, and will not require decoding a second time.
+ layers := packet.Layers()
+
+Lazily-decoded packets are not concurrency-safe. Since layers have not all been
+decoded, each call to Layer() or Layers() has the potential to mutate the packet
+in order to decode the next layer. If a packet is used
+in multiple goroutines concurrently, don't use gopacket.Lazy. Then gopacket
+will decode the packet fully, and all future function calls won't mutate the
+object.
+
+
+NoCopy Decoding
+
+By default, gopacket will copy the slice passed to NewPacket and store the
+copy within the packet, so future mutations to the bytes underlying the slice
+don't affect the packet and its layers. If you can guarantee that the
+underlying slice bytes won't be changed, you can use NoCopy to tell
+gopacket.NewPacket, and it'll use the passed-in slice itself.
+
+ // This channel returns new byte slices, each of which points to a new
+ // memory location that's guaranteed immutable for the duration of the
+ // packet.
+ for data := range myByteSliceChannel {
+ p := gopacket.NewPacket(data, layers.LayerTypeEthernet, gopacket.NoCopy)
+ doSomethingWithPacket(p)
+ }
+
+The fastest method of decoding is to use both Lazy and NoCopy, but note from
+the many caveats above that for some implementations either or both may be
+dangerous.
+
+
+Pointers To Known Layers
+
+During decoding, certain layers are stored in the packet as well-known
+layer types. For example, IPv4 and IPv6 are both considered NetworkLayer
+layers, while TCP and UDP are both TransportLayer layers. We support 4
+layers, corresponding to the 4 layers of the TCP/IP layering scheme (roughly
+anagalous to layers 2, 3, 4, and 7 of the OSI model). To access these,
+you can use the packet.LinkLayer, packet.NetworkLayer,
+packet.TransportLayer, and packet.ApplicationLayer functions. Each of
+these functions returns a corresponding interface
+(gopacket.{Link,Network,Transport,Application}Layer). The first three
+provide methods for getting src/dst addresses for that particular layer,
+while the final layer provides a Payload function to get payload data.
+This is helpful, for example, to get payloads for all packets regardless
+of their underlying data type:
+
+ // Get packets from some source
+ for packet := range someSource {
+ if app := packet.ApplicationLayer(); app != nil {
+ if strings.Contains(string(app.Payload()), "magic string") {
+ fmt.Println("Found magic string in a packet!")
+ }
+ }
+ }
+
+A particularly useful layer is ErrorLayer, which is set whenever there's
+an error parsing part of the packet.
+
+ packet := gopacket.NewPacket(myPacketData, layers.LayerTypeEthernet, gopacket.Default)
+ if err := packet.ErrorLayer(); err != nil {
+ fmt.Println("Error decoding some part of the packet:", err)
+ }
+
+Note that we don't return an error from NewPacket because we may have decoded
+a number of layers successfully before running into our erroneous layer. You
+may still be able to get your Ethernet and IPv4 layers correctly, even if
+your TCP layer is malformed.
+
+
+Flow And Endpoint
+
+gopacket has two useful objects, Flow and Endpoint, for communicating in a protocol
+independent manner the fact that a packet is coming from A and going to B.
+The general layer types LinkLayer, NetworkLayer, and TransportLayer all provide
+methods for extracting their flow information, without worrying about the type
+of the underlying Layer.
+
+A Flow is a simple object made up of a set of two Endpoints, one source and one
+destination. It details the sender and receiver of the Layer of the Packet.
+
+An Endpoint is a hashable representation of a source or destination. For
+example, for LayerTypeIPv4, an Endpoint contains the IP address bytes for a v4
+IP packet. A Flow can be broken into Endpoints, and Endpoints can be combined
+into Flows:
+
+ packet := gopacket.NewPacket(myPacketData, layers.LayerTypeEthernet, gopacket.Lazy)
+ netFlow := packet.NetworkLayer().NetworkFlow()
+ src, dst := netFlow.Endpoints()
+ reverseFlow := gopacket.NewFlow(dst, src)
+
+Both Endpoint and Flow objects can be used as map keys, and the equality
+operator can compare them, so you can easily group together all packets
+based on endpoint criteria:
+
+ flows := map[gopacket.Endpoint]chan gopacket.Packet
+ packet := gopacket.NewPacket(myPacketData, layers.LayerTypeEthernet, gopacket.Lazy)
+ // Send all TCP packets to channels based on their destination port.
+ if tcp := packet.Layer(layers.LayerTypeTCP); tcp != nil {
+ flows[tcp.TransportFlow().Dst()] <- packet
+ }
+ // Look for all packets with the same source and destination network address
+ if net := packet.NetworkLayer(); net != nil {
+ src, dst := net.NetworkFlow().Endpoints()
+ if src == dst {
+ fmt.Println("Fishy packet has same network source and dst: %s", src)
+ }
+ }
+ // Find all packets coming from UDP port 1000 to UDP port 500
+ interestingFlow := gopacket.NewFlow(layers.NewUDPPortEndpoint(1000), layers.NewUDPPortEndpoint(500))
+ if t := packet.NetworkLayer(); t != nil && t.TransportFlow() == interestingFlow {
+ fmt.Println("Found that UDP flow I was looking for!")
+ }
+
+For load-balancing purposes, both Flow and Endpoint have FastHash() functions,
+which provide quick, non-cryptographic hashes of their contents. Of particular
+importance is the fact that Flow FastHash() is symmetric: A->B will have the same
+hash as B->A. An example usage could be:
+
+ channels := [8]chan gopacket.Packet
+ for i := 0; i < 8; i++ {
+ channels[i] = make(chan gopacket.Packet)
+ go packetHandler(channels[i])
+ }
+ for packet := range getPackets() {
+ if net := packet.NetworkLayer(); net != nil {
+ channels[int(net.NetworkFlow().FastHash()) & 0x7] <- packet
+ }
+ }
+
+This allows us to split up a packet stream while still making sure that each
+stream sees all packets for a flow (and its bidirectional opposite).
+
+
+Implementing Your Own Decoder
+
+If your network has some strange encapsulation, you can implement your own
+decoder. In this example, we handle Ethernet packets which are encapsulated
+in a 4-byte header.
+
+ // Create a layer type, should be unique and high, so it doesn't conflict,
+ // giving it a name and a decoder to use.
+ var MyLayerType = gopacket.RegisterLayerType(12345, gopacket.LayerTypeMetadata{Name: "MyLayerType", Decoder: gopacket.DecodeFunc(decodeMyLayer)})
+
+ // Implement my layer
+ type MyLayer struct {
+ StrangeHeader []byte
+ payload []byte
+ }
+ func (m MyLayer) LayerType() gopacket.LayerType { return MyLayerType }
+ func (m MyLayer) LayerContents() []byte { return m.StrangeHeader }
+ func (m MyLayer) LayerPayload() []byte { return m.payload }
+
+ // Now implement a decoder... this one strips off the first 4 bytes of the
+ // packet.
+ func decodeMyLayer(data []byte, p gopacket.PacketBuilder) error {
+ // Create my layer
+ p.AddLayer(&MyLayer{data[:4], data[4:]})
+ // Determine how to handle the rest of the packet
+ return p.NextDecoder(layers.LayerTypeEthernet)
+ }
+
+ // Finally, decode your packets:
+ p := gopacket.NewPacket(data, MyLayerType, gopacket.Lazy)
+
+See the docs for Decoder and PacketBuilder for more details on how coding
+decoders works, or look at RegisterLayerType and RegisterEndpointType to see how
+to add layer/endpoint types to gopacket.
+
+
+Fast Decoding With DecodingLayerParser
+
+TLDR: DecodingLayerParser takes about 10% of the time as NewPacket to decode
+packet data, but only for known packet stacks.
+
+Basic decoding using gopacket.NewPacket or PacketSource.Packets is somewhat slow
+due to its need to allocate a new packet and every respective layer. It's very
+versatile and can handle all known layer types, but sometimes you really only
+care about a specific set of layers regardless, so that versatility is wasted.
+
+DecodingLayerParser avoids memory allocation altogether by decoding packet
+layers directly into preallocated objects, which you can then reference to get
+the packet's information. A quick example:
+
+ func main() {
+ var eth layers.Ethernet
+ var ip4 layers.IPv4
+ var ip6 layers.IPv6
+ var tcp layers.TCP
+ parser := gopacket.NewDecodingLayerParser(layers.LayerTypeEthernet, ð, &ip4, &ip6, &tcp)
+ decoded := []gopacket.LayerType{}
+ for packetData := range somehowGetPacketData() {
+ if err := parser.DecodeLayers(packetData, &decoded); err != nil {
+ fmt.Fprintf(os.Stderr, "Could not decode layers: %v\n", err)
+ continue
+ }
+ for _, layerType := range decoded {
+ switch layerType {
+ case layers.LayerTypeIPv6:
+ fmt.Println(" IP6 ", ip6.SrcIP, ip6.DstIP)
+ case layers.LayerTypeIPv4:
+ fmt.Println(" IP4 ", ip4.SrcIP, ip4.DstIP)
+ }
+ }
+ }
+ }
+
+The important thing to note here is that the parser is modifying the passed in
+layers (eth, ip4, ip6, tcp) instead of allocating new ones, thus greatly
+speeding up the decoding process. It's even branching based on layer type...
+it'll handle an (eth, ip4, tcp) or (eth, ip6, tcp) stack. However, it won't
+handle any other type... since no other decoders were passed in, an (eth, ip4,
+udp) stack will stop decoding after ip4, and only pass back [LayerTypeEthernet,
+LayerTypeIPv4] through the 'decoded' slice (along with an error saying it can't
+decode a UDP packet).
+
+Unfortunately, not all layers can be used by DecodingLayerParser... only those
+implementing the DecodingLayer interface are usable. Also, it's possible to
+create DecodingLayers that are not themselves Layers... see
+layers.IPv6ExtensionSkipper for an example of this.
+
+Faster And Customized Decoding with DecodingLayerContainer
+
+By default, DecodingLayerParser uses native map to store and search for a layer
+to decode. Though being versatile, in some cases this solution may be not so
+optimal. For example, if you have only few layers faster operations may be
+provided by sparse array indexing or linear array scan.
+
+To accomodate these scenarios, DecodingLayerContainer interface is introduced
+along with its implementations: DecodingLayerSparse, DecodingLayerArray and
+DecodingLayerMap. You can specify a container implementation to
+DecodingLayerParser with SetDecodingLayerContainer method. Example:
+
+ dlp := gopacket.NewDecodingLayerParser(LayerTypeEthernet)
+ dlp.SetDecodingLayerContainer(gopacket.DecodingLayerSparse(nil))
+ var eth layers.Ethernet
+ dlp.AddDecodingLayer(ð)
+ // ... add layers and use DecodingLayerParser as usual...
+
+To skip one level of indirection (though sacrificing some capabilities) you may
+also use DecodingLayerContainer as a decoding tool as it is. In this case you have to
+handle unknown layer types and layer panics by yourself. Example:
+
+ func main() {
+ var eth layers.Ethernet
+ var ip4 layers.IPv4
+ var ip6 layers.IPv6
+ var tcp layers.TCP
+ dlc := gopacket.DecodingLayerContainer(gopacket.DecodingLayerArray(nil))
+ dlc = dlc.Put(ð)
+ dlc = dlc.Put(&ip4)
+ dlc = dlc.Put(&ip6)
+ dlc = dlc.Put(&tcp)
+ // you may specify some meaningful DecodeFeedback
+ decoder := dlc.LayersDecoder(LayerTypeEthernet, gopacket.NilDecodeFeedback)
+ decoded := make([]gopacket.LayerType, 0, 20)
+ for packetData := range somehowGetPacketData() {
+ lt, err := decoder(packetData, &decoded)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "Could not decode layers: %v\n", err)
+ continue
+ }
+ if lt != gopacket.LayerTypeZero {
+ fmt.Fprintf(os.Stderr, "unknown layer type: %v\n", lt)
+ continue
+ }
+ for _, layerType := range decoded {
+ // examine decoded layertypes just as already shown above
+ }
+ }
+ }
+
+DecodingLayerSparse is the fastest but most effective when LayerType values
+that layers in use can decode are not large because otherwise that would lead
+to bigger memory footprint. DecodingLayerArray is very compact and primarily
+usable if the number of decoding layers is not big (up to ~10-15, but please do
+your own benchmarks). DecodingLayerMap is the most versatile one and used by
+DecodingLayerParser by default. Please refer to tests and benchmarks in layers
+subpackage to further examine usage examples and performance measurements.
+
+You may also choose to implement your own DecodingLayerContainer if you want to
+make use of your own internal packet decoding logic.
+
+Creating Packet Data
+
+As well as offering the ability to decode packet data, gopacket will allow you
+to create packets from scratch, as well. A number of gopacket layers implement
+the SerializableLayer interface; these layers can be serialized to a []byte in
+the following manner:
+
+ ip := &layers.IPv4{
+ SrcIP: net.IP{1, 2, 3, 4},
+ DstIP: net.IP{5, 6, 7, 8},
+ // etc...
+ }
+ buf := gopacket.NewSerializeBuffer()
+ opts := gopacket.SerializeOptions{} // See SerializeOptions for more details.
+ err := ip.SerializeTo(buf, opts)
+ if err != nil { panic(err) }
+ fmt.Println(buf.Bytes()) // prints out a byte slice containing the serialized IPv4 layer.
+
+SerializeTo PREPENDS the given layer onto the SerializeBuffer, and they treat
+the current buffer's Bytes() slice as the payload of the serializing layer.
+Therefore, you can serialize an entire packet by serializing a set of layers in
+reverse order (Payload, then TCP, then IP, then Ethernet, for example). The
+SerializeBuffer's SerializeLayers function is a helper that does exactly that.
+
+To generate a (empty and useless, because no fields are set)
+Ethernet(IPv4(TCP(Payload))) packet, for example, you can run:
+
+ buf := gopacket.NewSerializeBuffer()
+ opts := gopacket.SerializeOptions{}
+ gopacket.SerializeLayers(buf, opts,
+ &layers.Ethernet{},
+ &layers.IPv4{},
+ &layers.TCP{},
+ gopacket.Payload([]byte{1, 2, 3, 4}))
+ packetData := buf.Bytes()
+
+A Final Note
+
+If you use gopacket, you'll almost definitely want to make sure gopacket/layers
+is imported, since when imported it sets all the LayerType variables and fills
+in a lot of interesting variables/maps (DecodersByLayerName, etc). Therefore,
+it's recommended that even if you don't use any layers functions directly, you still import with:
+
+ import (
+ _ "github.com/google/gopacket/layers"
+ )
+*/
+package gopacket
diff --git a/vendor/github.com/google/gopacket/flows.go b/vendor/github.com/google/gopacket/flows.go
new file mode 100644
index 0000000..a00c883
--- /dev/null
+++ b/vendor/github.com/google/gopacket/flows.go
@@ -0,0 +1,236 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package gopacket
+
+import (
+ "bytes"
+ "fmt"
+ "strconv"
+)
+
+// MaxEndpointSize determines the maximum size in bytes of an endpoint address.
+//
+// Endpoints/Flows have a problem: They need to be hashable. Therefore, they
+// can't use a byte slice. The two obvious choices are to use a string or a
+// byte array. Strings work great, but string creation requires memory
+// allocation, which can be slow. Arrays work great, but have a fixed size. We
+// originally used the former, now we've switched to the latter. Use of a fixed
+// byte-array doubles the speed of constructing a flow (due to not needing to
+// allocate). This is a huge increase... too much for us to pass up.
+//
+// The end result of this, though, is that an endpoint/flow can't be created
+// using more than MaxEndpointSize bytes per address.
+const MaxEndpointSize = 16
+
+// Endpoint is the set of bytes used to address packets at various layers.
+// See LinkLayer, NetworkLayer, and TransportLayer specifications.
+// Endpoints are usable as map keys.
+type Endpoint struct {
+ typ EndpointType
+ len int
+ raw [MaxEndpointSize]byte
+}
+
+// EndpointType returns the endpoint type associated with this endpoint.
+func (a Endpoint) EndpointType() EndpointType { return a.typ }
+
+// Raw returns the raw bytes of this endpoint. These aren't human-readable
+// most of the time, but they are faster than calling String.
+func (a Endpoint) Raw() []byte { return a.raw[:a.len] }
+
+// LessThan provides a stable ordering for all endpoints. It sorts first based
+// on the EndpointType of an endpoint, then based on the raw bytes of that
+// endpoint.
+//
+// For some endpoints, the actual comparison may not make sense, however this
+// ordering does provide useful information for most Endpoint types.
+// Ordering is based first on endpoint type, then on raw endpoint bytes.
+// Endpoint bytes are sorted lexicographically.
+func (a Endpoint) LessThan(b Endpoint) bool {
+ return a.typ < b.typ || (a.typ == b.typ && bytes.Compare(a.raw[:a.len], b.raw[:b.len]) < 0)
+}
+
+// fnvHash is used by our FastHash functions, and implements the FNV hash
+// created by Glenn Fowler, Landon Curt Noll, and Phong Vo.
+// See http://isthe.com/chongo/tech/comp/fnv/.
+func fnvHash(s []byte) (h uint64) {
+ h = fnvBasis
+ for i := 0; i < len(s); i++ {
+ h ^= uint64(s[i])
+ h *= fnvPrime
+ }
+ return
+}
+
+const fnvBasis = 14695981039346656037
+const fnvPrime = 1099511628211
+
+// FastHash provides a quick hashing function for an endpoint, useful if you'd
+// like to split up endpoints by modulos or other load-balancing techniques.
+// It uses a variant of Fowler-Noll-Vo hashing.
+//
+// The output of FastHash is not guaranteed to remain the same through future
+// code revisions, so should not be used to key values in persistent storage.
+func (a Endpoint) FastHash() (h uint64) {
+ h = fnvHash(a.raw[:a.len])
+ h ^= uint64(a.typ)
+ h *= fnvPrime
+ return
+}
+
+// NewEndpoint creates a new Endpoint object.
+//
+// The size of raw must be less than MaxEndpointSize, otherwise this function
+// will panic.
+func NewEndpoint(typ EndpointType, raw []byte) (e Endpoint) {
+ e.len = len(raw)
+ if e.len > MaxEndpointSize {
+ panic("raw byte length greater than MaxEndpointSize")
+ }
+ e.typ = typ
+ copy(e.raw[:], raw)
+ return
+}
+
+// EndpointTypeMetadata is used to register a new endpoint type.
+type EndpointTypeMetadata struct {
+ // Name is the string returned by an EndpointType's String function.
+ Name string
+ // Formatter is called from an Endpoint's String function to format the raw
+ // bytes in an Endpoint into a human-readable string.
+ Formatter func([]byte) string
+}
+
+// EndpointType is the type of a gopacket Endpoint. This type determines how
+// the bytes stored in the endpoint should be interpreted.
+type EndpointType int64
+
+var endpointTypes = map[EndpointType]EndpointTypeMetadata{}
+
+// RegisterEndpointType creates a new EndpointType and registers it globally.
+// It MUST be passed a unique number, or it will panic. Numbers 0-999 are
+// reserved for gopacket's use.
+func RegisterEndpointType(num int, meta EndpointTypeMetadata) EndpointType {
+ t := EndpointType(num)
+ if _, ok := endpointTypes[t]; ok {
+ panic("Endpoint type number already in use")
+ }
+ endpointTypes[t] = meta
+ return t
+}
+
+func (e EndpointType) String() string {
+ if t, ok := endpointTypes[e]; ok {
+ return t.Name
+ }
+ return strconv.Itoa(int(e))
+}
+
+func (a Endpoint) String() string {
+ if t, ok := endpointTypes[a.typ]; ok && t.Formatter != nil {
+ return t.Formatter(a.raw[:a.len])
+ }
+ return fmt.Sprintf("%v:%v", a.typ, a.raw)
+}
+
+// Flow represents the direction of traffic for a packet layer, as a source and destination Endpoint.
+// Flows are usable as map keys.
+type Flow struct {
+ typ EndpointType
+ slen, dlen int
+ src, dst [MaxEndpointSize]byte
+}
+
+// FlowFromEndpoints creates a new flow by pasting together two endpoints.
+// The endpoints must have the same EndpointType, or this function will return
+// an error.
+func FlowFromEndpoints(src, dst Endpoint) (_ Flow, err error) {
+ if src.typ != dst.typ {
+ err = fmt.Errorf("Mismatched endpoint types: %v->%v", src.typ, dst.typ)
+ return
+ }
+ return Flow{src.typ, src.len, dst.len, src.raw, dst.raw}, nil
+}
+
+// FastHash provides a quick hashing function for a flow, useful if you'd
+// like to split up flows by modulos or other load-balancing techniques.
+// It uses a variant of Fowler-Noll-Vo hashing, and is guaranteed to collide
+// with its reverse flow. IE: the flow A->B will have the same hash as the flow
+// B->A.
+//
+// The output of FastHash is not guaranteed to remain the same through future
+// code revisions, so should not be used to key values in persistent storage.
+func (f Flow) FastHash() (h uint64) {
+ // This combination must be commutative. We don't use ^, since that would
+ // give the same hash for all A->A flows.
+ h = fnvHash(f.src[:f.slen]) + fnvHash(f.dst[:f.dlen])
+ h ^= uint64(f.typ)
+ h *= fnvPrime
+ return
+}
+
+// String returns a human-readable representation of this flow, in the form
+// "Src->Dst"
+func (f Flow) String() string {
+ s, d := f.Endpoints()
+ return fmt.Sprintf("%v->%v", s, d)
+}
+
+// EndpointType returns the EndpointType for this Flow.
+func (f Flow) EndpointType() EndpointType {
+ return f.typ
+}
+
+// Endpoints returns the two Endpoints for this flow.
+func (f Flow) Endpoints() (src, dst Endpoint) {
+ return Endpoint{f.typ, f.slen, f.src}, Endpoint{f.typ, f.dlen, f.dst}
+}
+
+// Src returns the source Endpoint for this flow.
+func (f Flow) Src() (src Endpoint) {
+ src, _ = f.Endpoints()
+ return
+}
+
+// Dst returns the destination Endpoint for this flow.
+func (f Flow) Dst() (dst Endpoint) {
+ _, dst = f.Endpoints()
+ return
+}
+
+// Reverse returns a new flow with endpoints reversed.
+func (f Flow) Reverse() Flow {
+ return Flow{f.typ, f.dlen, f.slen, f.dst, f.src}
+}
+
+// NewFlow creates a new flow.
+//
+// src and dst must have length <= MaxEndpointSize, otherwise NewFlow will
+// panic.
+func NewFlow(t EndpointType, src, dst []byte) (f Flow) {
+ f.slen = len(src)
+ f.dlen = len(dst)
+ if f.slen > MaxEndpointSize || f.dlen > MaxEndpointSize {
+ panic("flow raw byte length greater than MaxEndpointSize")
+ }
+ f.typ = t
+ copy(f.src[:], src)
+ copy(f.dst[:], dst)
+ return
+}
+
+// EndpointInvalid is an endpoint type used for invalid endpoints, IE endpoints
+// that are specified incorrectly during creation.
+var EndpointInvalid = RegisterEndpointType(0, EndpointTypeMetadata{Name: "invalid", Formatter: func(b []byte) string {
+ return fmt.Sprintf("%v", b)
+}})
+
+// InvalidEndpoint is a singleton Endpoint of type EndpointInvalid.
+var InvalidEndpoint = NewEndpoint(EndpointInvalid, nil)
+
+// InvalidFlow is a singleton Flow of type EndpointInvalid.
+var InvalidFlow = NewFlow(EndpointInvalid, nil, nil)
diff --git a/vendor/github.com/google/gopacket/gc b/vendor/github.com/google/gopacket/gc
new file mode 100755
index 0000000..b1d8d2e
--- /dev/null
+++ b/vendor/github.com/google/gopacket/gc
@@ -0,0 +1,288 @@
+#!/bin/bash
+# Copyright 2012 Google, Inc. All rights reserved.
+
+# This script provides a simple way to run benchmarks against previous code and
+# keep a log of how benchmarks change over time. When used with the --benchmark
+# flag, it runs benchmarks from the current code and from the last commit run
+# with --benchmark, then stores the results in the git commit description. We
+# rerun the old benchmarks along with the new ones, since there's no guarantee
+# that git commits will happen on the same machine, so machine differences could
+# cause wildly inaccurate results.
+#
+# If you're making changes to 'gopacket' which could cause performance changes,
+# you may be requested to use this commit script to make sure your changes don't
+# have large detrimental effects (or to show off how awesome your performance
+# improvements are).
+#
+# If not run with the --benchmark flag, this script is still very useful... it
+# makes sure all the correct go formatting, building, and testing work as
+# expected.
+
+function Usage {
+ cat <
+
+--benchmark: Run benchmark comparisons against last benchmark'd commit
+--root: Run tests that require root priviledges
+--gen: Generate code for MACs/ports by pulling down external data
+
+Note, some 'git commit' flags are necessary, if all else fails, pass in -a
+EOF
+ exit 1
+}
+
+BENCH=""
+GEN=""
+ROOT=""
+while [ ! -z "$1" ]; do
+ case "$1" in
+ "--benchmark")
+ BENCH="$2"
+ shift
+ shift
+ ;;
+ "--gen")
+ GEN="yes"
+ shift
+ ;;
+ "--root")
+ ROOT="yes"
+ shift
+ ;;
+ "--help")
+ Usage
+ ;;
+ "-h")
+ Usage
+ ;;
+ "help")
+ Usage
+ ;;
+ *)
+ break
+ ;;
+ esac
+done
+
+function Root {
+ if [ ! -z "$ROOT" ]; then
+ local exec="$1"
+ # Some folks (like me) keep source code in places inaccessible by root (like
+ # NFS), so to make sure things run smoothly we copy them to a /tmp location.
+ local tmpfile="$(mktemp -t gopacket_XXXXXXXX)"
+ echo "Running root test executable $exec as $tmpfile"
+ cp "$exec" "$tmpfile"
+ chmod a+x "$tmpfile"
+ shift
+ sudo "$tmpfile" "$@"
+ fi
+}
+
+if [ "$#" -eq "0" ]; then
+ Usage
+fi
+
+cd $(dirname $0)
+
+# Check for copyright notices.
+for filename in $(find ./ -type f -name '*.go'); do
+ if ! head -n 1 "$filename" | grep -q Copyright; then
+ echo "File '$filename' may not have copyright notice"
+ exit 1
+ fi
+done
+
+set -e
+set -x
+
+if [ ! -z "$ROOT" ]; then
+ echo "Running SUDO to get root priviledges for root tests"
+ sudo echo "have root"
+fi
+
+if [ ! -z "$GEN" ]; then
+ pushd macs
+ go run gen.go | gofmt > valid_mac_prefixes.go
+ popd
+ pushd layers
+ go run gen.go | gofmt > iana_ports.go
+ go run gen2.go | gofmt > enums_generated.go
+ popd
+fi
+
+# Make sure everything is formatted, compiles, and tests pass.
+go fmt ./...
+go test -i ./... 2>/dev/null >/dev/null || true
+go test
+go build
+pushd examples/bytediff
+go build
+popd
+if [ -f /usr/include/pcap.h ]; then
+ pushd pcap
+ go test ./...
+ go build ./...
+ go build pcap_tester.go
+ Root pcap_tester --mode=basic
+ Root pcap_tester --mode=filtered
+ Root pcap_tester --mode=timestamp || echo "You might not support timestamp sources"
+ popd
+ pushd examples/afpacket
+ go build
+ popd
+ pushd examples/pcapdump
+ go build
+ popd
+ pushd examples/arpscan
+ go build
+ popd
+ pushd examples/bidirectional
+ go build
+ popd
+ pushd examples/synscan
+ go build
+ popd
+ pushd examples/httpassembly
+ go build
+ popd
+ pushd examples/statsassembly
+ go build
+ popd
+fi
+pushd macs
+go test ./...
+gofmt -w gen.go
+go build gen.go
+popd
+pushd tcpassembly
+go test ./...
+popd
+pushd reassembly
+go test ./...
+popd
+pushd layers
+gofmt -w gen.go
+go build gen.go
+go test ./...
+popd
+pushd pcapgo
+go test ./...
+go build ./...
+popd
+if [ -f /usr/include/linux/if_packet.h ]; then
+ if grep -q TPACKET_V3 /usr/include/linux/if_packet.h; then
+ pushd afpacket
+ go build ./...
+ go test ./...
+ popd
+ fi
+fi
+if [ -f /usr/include/pfring.h ]; then
+ pushd pfring
+ go test ./...
+ go build ./...
+ popd
+ pushd examples/pfdump
+ go build
+ popd
+fi
+pushd ip4defrag
+go test ./...
+popd
+pushd defrag
+go test ./...
+popd
+
+for travis_script in `ls .travis.*.sh`; do
+ ./$travis_script
+done
+
+# Run our initial commit
+git commit "$@"
+
+if [ -z "$BENCH" ]; then
+ set +x
+ echo "We're not benchmarking and we've committed... we're done!"
+ exit
+fi
+
+### If we get here, we want to run benchmarks from current commit, and compare
+### then to benchmarks from the last --benchmark commit.
+
+# Get our current branch.
+BRANCH="$(git branch | grep '^*' | awk '{print $2}')"
+
+# File we're going to build our commit description in.
+COMMIT_FILE="$(mktemp /tmp/tmp.XXXXXXXX)"
+
+# Add the word "BENCH" to the start of the git commit.
+echo -n "BENCH " > $COMMIT_FILE
+
+# Get the current description... there must be an easier way.
+git log -n 1 | grep '^ ' | sed 's/^ //' >> $COMMIT_FILE
+
+# Get the commit sha for the last benchmark commit
+PREV=$(git log -n 1 --grep='BENCHMARK_MARKER_DO_NOT_CHANGE' | head -n 1 | awk '{print $2}')
+
+## Run current benchmarks
+
+cat >> $COMMIT_FILE <&1 | tee -a $COMMIT_FILE
+pushd layers
+go test --test.bench="$BENCH" 2>&1 | tee -a $COMMIT_FILE
+popd
+cat >> $COMMIT_FILE <&1 | tee -a $COMMIT_FILE
+fi
+
+
+
+## Reset to last benchmark commit, run benchmarks
+
+git checkout $PREV
+
+cat >> $COMMIT_FILE <&1 | tee -a $COMMIT_FILE
+pushd layers
+go test --test.bench="$BENCH" 2>&1 | tee -a $COMMIT_FILE
+popd
+cat >> $COMMIT_FILE <&1 | tee -a $COMMIT_FILE
+fi
+
+
+
+## Reset back to the most recent commit, edit the commit message by appending
+## benchmark results.
+git checkout $BRANCH
+git commit --amend -F $COMMIT_FILE
diff --git a/vendor/github.com/google/gopacket/gen.go b/vendor/github.com/google/gopacket/gen.go
new file mode 100644
index 0000000..dc75f94
--- /dev/null
+++ b/vendor/github.com/google/gopacket/gen.go
@@ -0,0 +1,75 @@
+// Copyright 2019 The GoPacket Authors. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+// +build ignore
+
+// This file generates LayersDecoder function for DecodingLayerContainer
+// go run gen.go | gofmt > layers_decoder.go
+package main
+
+import (
+ "fmt"
+ "os"
+ "time"
+)
+
+const headerFmt = `// Copyright 2019 The GoPacket Authors. All rights reserved.
+
+package gopacket
+
+// Created by gen.go, don't edit manually
+// Generated at %s
+
+// LayersDecoder returns DecodingLayerFunc for specified
+// DecodingLayerContainer, LayerType value to start decoding with and
+// some DecodeFeedback.
+func LayersDecoder(dl DecodingLayerContainer, first LayerType, df DecodeFeedback) DecodingLayerFunc {
+ firstDec, ok := dl.Decoder(first)
+ if !ok {
+ return func([]byte, *[]LayerType) (LayerType, error) {
+ return first, nil
+ }
+ }
+`
+
+var funcBody = `return func(data []byte, decoded *[]LayerType) (LayerType, error) {
+ *decoded = (*decoded)[:0] // Truncated decoded layers.
+ typ := first
+ decoder := firstDec
+ for {
+ if err := decoder.DecodeFromBytes(data, df); err != nil {
+ return LayerTypeZero, err
+ }
+ *decoded = append(*decoded, typ)
+ typ = decoder.NextLayerType()
+ if data = decoder.LayerPayload(); len(data) == 0 {
+ break
+ }
+ if decoder, ok = dlc.Decoder(typ); !ok {
+ return typ, nil
+ }
+ }
+ return LayerTypeZero, nil
+}`
+
+func main() {
+ fmt.Fprintf(os.Stderr, "Writing results to stdout\n")
+ types := []string{
+ "DecodingLayerSparse",
+ "DecodingLayerArray",
+ "DecodingLayerMap",
+ }
+
+ fmt.Printf(headerFmt, time.Now())
+ for _, t := range types {
+ fmt.Printf("if dlc, ok := dl.(%s); ok {", t)
+ fmt.Println(funcBody)
+ fmt.Println("}")
+ }
+ fmt.Println("dlc := dl")
+ fmt.Println(funcBody)
+ fmt.Println("}")
+}
diff --git a/vendor/github.com/google/gopacket/go.mod b/vendor/github.com/google/gopacket/go.mod
new file mode 100644
index 0000000..99e99f4
--- /dev/null
+++ b/vendor/github.com/google/gopacket/go.mod
@@ -0,0 +1,8 @@
+module github.com/google/gopacket
+
+go 1.12
+
+require (
+ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3
+ golang.org/x/sys v0.0.0-20190405154228-4b34438f7a67
+)
diff --git a/vendor/github.com/google/gopacket/go.sum b/vendor/github.com/google/gopacket/go.sum
new file mode 100644
index 0000000..2b28942
--- /dev/null
+++ b/vendor/github.com/google/gopacket/go.sum
@@ -0,0 +1,7 @@
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3 h1:0GoQqolDA55aaLxZyTzK/Y2ePZzZTUrRacwib7cNsYQ=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190405154228-4b34438f7a67 h1:1Fzlr8kkDLQwqMP8GxrhptBLqZG/EDpiATneiZHY998=
+golang.org/x/sys v0.0.0-20190405154228-4b34438f7a67/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
diff --git a/vendor/github.com/google/gopacket/layerclass.go b/vendor/github.com/google/gopacket/layerclass.go
new file mode 100644
index 0000000..775cd09
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layerclass.go
@@ -0,0 +1,107 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package gopacket
+
+// LayerClass is a set of LayerTypes, used for grabbing one of a number of
+// different types from a packet.
+type LayerClass interface {
+ // Contains returns true if the given layer type should be considered part
+ // of this layer class.
+ Contains(LayerType) bool
+ // LayerTypes returns the set of all layer types in this layer class.
+ // Note that this may not be a fast operation on all LayerClass
+ // implementations.
+ LayerTypes() []LayerType
+}
+
+// Contains implements LayerClass.
+func (l LayerType) Contains(a LayerType) bool {
+ return l == a
+}
+
+// LayerTypes implements LayerClass.
+func (l LayerType) LayerTypes() []LayerType {
+ return []LayerType{l}
+}
+
+// LayerClassSlice implements a LayerClass with a slice.
+type LayerClassSlice []bool
+
+// Contains returns true if the given layer type should be considered part
+// of this layer class.
+func (s LayerClassSlice) Contains(t LayerType) bool {
+ return int(t) < len(s) && s[t]
+}
+
+// LayerTypes returns all layer types in this LayerClassSlice.
+// Because of LayerClassSlice's implementation, this could be quite slow.
+func (s LayerClassSlice) LayerTypes() (all []LayerType) {
+ for i := 0; i < len(s); i++ {
+ if s[i] {
+ all = append(all, LayerType(i))
+ }
+ }
+ return
+}
+
+// NewLayerClassSlice creates a new LayerClassSlice by creating a slice of
+// size max(types) and setting slice[t] to true for each type t. Note, if
+// you implement your own LayerType and give it a high value, this WILL create
+// a very large slice.
+func NewLayerClassSlice(types []LayerType) LayerClassSlice {
+ var max LayerType
+ for _, typ := range types {
+ if typ > max {
+ max = typ
+ }
+ }
+ t := make([]bool, int(max+1))
+ for _, typ := range types {
+ t[typ] = true
+ }
+ return t
+}
+
+// LayerClassMap implements a LayerClass with a map.
+type LayerClassMap map[LayerType]bool
+
+// Contains returns true if the given layer type should be considered part
+// of this layer class.
+func (m LayerClassMap) Contains(t LayerType) bool {
+ return m[t]
+}
+
+// LayerTypes returns all layer types in this LayerClassMap.
+func (m LayerClassMap) LayerTypes() (all []LayerType) {
+ for t := range m {
+ all = append(all, t)
+ }
+ return
+}
+
+// NewLayerClassMap creates a LayerClassMap and sets map[t] to true for each
+// type in types.
+func NewLayerClassMap(types []LayerType) LayerClassMap {
+ m := LayerClassMap{}
+ for _, typ := range types {
+ m[typ] = true
+ }
+ return m
+}
+
+// NewLayerClass creates a LayerClass, attempting to be smart about which type
+// it creates based on which types are passed in.
+func NewLayerClass(types []LayerType) LayerClass {
+ for _, typ := range types {
+ if typ > maxLayerType {
+ // NewLayerClassSlice could create a very large object, so instead create
+ // a map.
+ return NewLayerClassMap(types)
+ }
+ }
+ return NewLayerClassSlice(types)
+}
diff --git a/vendor/github.com/google/gopacket/layers/.lint_blacklist b/vendor/github.com/google/gopacket/layers/.lint_blacklist
new file mode 100644
index 0000000..fded4f6
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/.lint_blacklist
@@ -0,0 +1,39 @@
+dot11.go
+eap.go
+endpoints.go
+enums_generated.go
+enums.go
+ethernet.go
+geneve.go
+icmp4.go
+icmp6.go
+igmp.go
+ip4.go
+ip6.go
+layertypes.go
+linux_sll.go
+llc.go
+lldp.go
+mpls.go
+ndp.go
+ntp.go
+ospf.go
+pflog.go
+pppoe.go
+prism.go
+radiotap.go
+rudp.go
+sctp.go
+sflow.go
+tcp.go
+tcpip.go
+tls.go
+tls_alert.go
+tls_appdata.go
+tls_cipherspec.go
+tls_hanshake.go
+tls_test.go
+udp.go
+udplite.go
+usb.go
+vrrp.go
diff --git a/vendor/github.com/google/gopacket/layers/arp.go b/vendor/github.com/google/gopacket/layers/arp.go
new file mode 100644
index 0000000..49e05ac
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/arp.go
@@ -0,0 +1,109 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+// Copyright 2009-2011 Andreas Krennmair. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+ "encoding/binary"
+ "errors"
+
+ "github.com/google/gopacket"
+)
+
+// Potential values for ARP.Operation.
+const (
+ ARPRequest = 1
+ ARPReply = 2
+)
+
+// ARP is a ARP packet header.
+type ARP struct {
+ BaseLayer
+ AddrType LinkType
+ Protocol EthernetType
+ HwAddressSize uint8
+ ProtAddressSize uint8
+ Operation uint16
+ SourceHwAddress []byte
+ SourceProtAddress []byte
+ DstHwAddress []byte
+ DstProtAddress []byte
+}
+
+// LayerType returns LayerTypeARP
+func (arp *ARP) LayerType() gopacket.LayerType { return LayerTypeARP }
+
+// DecodeFromBytes decodes the given bytes into this layer.
+func (arp *ARP) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ arp.AddrType = LinkType(binary.BigEndian.Uint16(data[0:2]))
+ arp.Protocol = EthernetType(binary.BigEndian.Uint16(data[2:4]))
+ arp.HwAddressSize = data[4]
+ arp.ProtAddressSize = data[5]
+ arp.Operation = binary.BigEndian.Uint16(data[6:8])
+ arp.SourceHwAddress = data[8 : 8+arp.HwAddressSize]
+ arp.SourceProtAddress = data[8+arp.HwAddressSize : 8+arp.HwAddressSize+arp.ProtAddressSize]
+ arp.DstHwAddress = data[8+arp.HwAddressSize+arp.ProtAddressSize : 8+2*arp.HwAddressSize+arp.ProtAddressSize]
+ arp.DstProtAddress = data[8+2*arp.HwAddressSize+arp.ProtAddressSize : 8+2*arp.HwAddressSize+2*arp.ProtAddressSize]
+
+ arpLength := 8 + 2*arp.HwAddressSize + 2*arp.ProtAddressSize
+ arp.Contents = data[:arpLength]
+ arp.Payload = data[arpLength:]
+ return nil
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (arp *ARP) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ size := 8 + len(arp.SourceHwAddress) + len(arp.SourceProtAddress) + len(arp.DstHwAddress) + len(arp.DstProtAddress)
+ bytes, err := b.PrependBytes(size)
+ if err != nil {
+ return err
+ }
+ if opts.FixLengths {
+ if len(arp.SourceHwAddress) != len(arp.DstHwAddress) {
+ return errors.New("mismatched hardware address sizes")
+ }
+ arp.HwAddressSize = uint8(len(arp.SourceHwAddress))
+ if len(arp.SourceProtAddress) != len(arp.DstProtAddress) {
+ return errors.New("mismatched prot address sizes")
+ }
+ arp.ProtAddressSize = uint8(len(arp.SourceProtAddress))
+ }
+ binary.BigEndian.PutUint16(bytes, uint16(arp.AddrType))
+ binary.BigEndian.PutUint16(bytes[2:], uint16(arp.Protocol))
+ bytes[4] = arp.HwAddressSize
+ bytes[5] = arp.ProtAddressSize
+ binary.BigEndian.PutUint16(bytes[6:], arp.Operation)
+ start := 8
+ for _, addr := range [][]byte{
+ arp.SourceHwAddress,
+ arp.SourceProtAddress,
+ arp.DstHwAddress,
+ arp.DstProtAddress,
+ } {
+ copy(bytes[start:], addr)
+ start += len(addr)
+ }
+ return nil
+}
+
+// CanDecode returns the set of layer types that this DecodingLayer can decode.
+func (arp *ARP) CanDecode() gopacket.LayerClass {
+ return LayerTypeARP
+}
+
+// NextLayerType returns the layer type contained by this DecodingLayer.
+func (arp *ARP) NextLayerType() gopacket.LayerType {
+ return gopacket.LayerTypePayload
+}
+
+func decodeARP(data []byte, p gopacket.PacketBuilder) error {
+
+ arp := &ARP{}
+ return decodingLayerDecoder(arp, data, p)
+}
diff --git a/vendor/github.com/google/gopacket/layers/asf.go b/vendor/github.com/google/gopacket/layers/asf.go
new file mode 100644
index 0000000..d698bd0
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/asf.go
@@ -0,0 +1,166 @@
+// Copyright 2019 The GoPacket Authors. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be found
+// in the LICENSE file in the root of the source tree.
+
+package layers
+
+// This file implements the ASF RMCP payload specified in section 3.2.2.3 of
+// https://www.dmtf.org/sites/default/files/standards/documents/DSP0136.pdf
+
+import (
+ "encoding/binary"
+ "fmt"
+
+ "github.com/google/gopacket"
+)
+
+const (
+ // ASFRMCPEnterprise is the IANA-assigned Enterprise Number of the ASF-RMCP.
+ ASFRMCPEnterprise uint32 = 4542
+)
+
+// ASFDataIdentifier encapsulates fields used to uniquely identify the format of
+// the data block.
+//
+// While the enterprise number is almost always 4542 (ASF-RMCP), we support
+// registering layers using structs of this type as a key in case any users are
+// using OEM-extensions.
+type ASFDataIdentifier struct {
+
+ // Enterprise is the IANA Enterprise Number associated with the entity that
+ // defines the message type. A list can be found at
+ // https://www.iana.org/assignments/enterprise-numbers/enterprise-numbers.
+ // This can be thought of as the namespace for the message type.
+ Enterprise uint32
+
+ // Type is the message type, defined by the entity associated with the
+ // enterprise above. No pressure, but in the context of EN 4542, 1 byte is
+ // the difference between sending a ping and telling a machine to do an
+ // unconditional power down (0x80 and 0x12 respectively).
+ Type uint8
+}
+
+// LayerType returns the payload layer type corresponding to an ASF message
+// type.
+func (a ASFDataIdentifier) LayerType() gopacket.LayerType {
+ if lt := asfDataLayerTypes[a]; lt != 0 {
+ return lt
+ }
+
+ // some layer types don't have a payload, e.g. ASF-RMCP Presence Ping.
+ return gopacket.LayerTypePayload
+}
+
+// RegisterASFLayerType allows specifying that the data block of ASF packets
+// with a given enterprise number and type should be processed by a given layer
+// type. This overrides any existing registrations, including defaults.
+func RegisterASFLayerType(a ASFDataIdentifier, l gopacket.LayerType) {
+ asfDataLayerTypes[a] = l
+}
+
+var (
+ // ASFDataIdentifierPresencePong is the message type of the response to a
+ // Presence Ping message. It indicates the sender is ASF-RMCP-aware.
+ ASFDataIdentifierPresencePong = ASFDataIdentifier{
+ Enterprise: ASFRMCPEnterprise,
+ Type: 0x40,
+ }
+
+ // ASFDataIdentifierPresencePing is a message type sent to a managed client
+ // to solicit a Presence Pong response. Clients may ignore this if the RMCP
+ // version is unsupported. Sending this message with a sequence number <255
+ // is the recommended way of finding out whether an implementation sends
+ // RMCP ACKs (e.g. iDRAC does, Super Micro does not).
+ //
+ // Systems implementing IPMI must respond to this ping to conform to the
+ // spec, so it is a good substitute for an ICMP ping.
+ ASFDataIdentifierPresencePing = ASFDataIdentifier{
+ Enterprise: ASFRMCPEnterprise,
+ Type: 0x80,
+ }
+
+ // asfDataLayerTypes is used to find the next layer for a given ASF header.
+ asfDataLayerTypes = map[ASFDataIdentifier]gopacket.LayerType{
+ ASFDataIdentifierPresencePong: LayerTypeASFPresencePong,
+ }
+)
+
+// ASF defines ASF's generic RMCP message Data block format. See section
+// 3.2.2.3.
+type ASF struct {
+ BaseLayer
+ ASFDataIdentifier
+
+ // Tag is used to match request/response pairs. The tag of a response is set
+ // to that of the message it is responding to. If a message is
+ // unidirectional, i.e. not part of a request/response pair, this is set to
+ // 255.
+ Tag uint8
+
+ // 1 byte reserved, set to 0x00.
+
+ // Length is the length of this layer's payload in bytes.
+ Length uint8
+}
+
+// LayerType returns LayerTypeASF. It partially satisfies Layer and
+// SerializableLayer.
+func (*ASF) LayerType() gopacket.LayerType {
+ return LayerTypeASF
+}
+
+// CanDecode returns LayerTypeASF. It partially satisfies DecodingLayer.
+func (a *ASF) CanDecode() gopacket.LayerClass {
+ return a.LayerType()
+}
+
+// DecodeFromBytes makes the layer represent the provided bytes. It partially
+// satisfies DecodingLayer.
+func (a *ASF) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ if len(data) < 8 {
+ df.SetTruncated()
+ return fmt.Errorf("invalid ASF data header, length %v less than 8",
+ len(data))
+ }
+
+ a.BaseLayer.Contents = data[:8]
+ a.BaseLayer.Payload = data[8:]
+
+ a.Enterprise = binary.BigEndian.Uint32(data[:4])
+ a.Type = uint8(data[4])
+ a.Tag = uint8(data[5])
+ // 1 byte reserved
+ a.Length = uint8(data[7])
+ return nil
+}
+
+// NextLayerType returns the layer type corresponding to the message type of
+// this ASF data layer. This partially satisfies DecodingLayer.
+func (a *ASF) NextLayerType() gopacket.LayerType {
+ return a.ASFDataIdentifier.LayerType()
+}
+
+// SerializeTo writes the serialized fom of this layer into the SerializeBuffer,
+// partially satisfying SerializableLayer.
+func (a *ASF) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ payload := b.Bytes()
+ bytes, err := b.PrependBytes(8)
+ if err != nil {
+ return err
+ }
+ binary.BigEndian.PutUint32(bytes[:4], a.Enterprise)
+ bytes[4] = uint8(a.Type)
+ bytes[5] = a.Tag
+ bytes[6] = 0x00
+ if opts.FixLengths {
+ a.Length = uint8(len(payload))
+ }
+ bytes[7] = a.Length
+ return nil
+}
+
+// decodeASF decodes the byte slice into an RMCP-ASF data struct.
+func decodeASF(data []byte, p gopacket.PacketBuilder) error {
+ return decodingLayerDecoder(&ASF{}, data, p)
+}
diff --git a/vendor/github.com/google/gopacket/layers/asf_presencepong.go b/vendor/github.com/google/gopacket/layers/asf_presencepong.go
new file mode 100644
index 0000000..e9a8baf
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/asf_presencepong.go
@@ -0,0 +1,194 @@
+// Copyright 2019 The GoPacket Authors. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be found
+// in the LICENSE file in the root of the source tree.
+
+package layers
+
+// This file implements the RMCP ASF Presence Pong message, specified in section
+// 3.2.4.3 of
+// https://www.dmtf.org/sites/default/files/standards/documents/DSP0136.pdf. It
+// also contains non-competing elements from IPMI v2.0, specified in section
+// 13.2.4 of
+// https://www.intel.com/content/dam/www/public/us/en/documents/specification-updates/ipmi-intelligent-platform-mgt-interface-spec-2nd-gen-v2-0-spec-update.pdf.
+
+import (
+ "encoding/binary"
+ "fmt"
+
+ "github.com/google/gopacket"
+)
+
+type (
+ // ASFEntity is the type of individual entities that a Presence Pong
+ // response can indicate support of. The entities currently implemented by
+ // the spec are IPMI and ASFv1.
+ ASFEntity uint8
+
+ // ASFInteraction is the type of individual interactions that a Presence
+ // Pong response can indicate support for. The interactions currently
+ // implemented by the spec are RMCP security extensions. Although not
+ // specified, IPMI uses this field to indicate support for DASH, which is
+ // supported as well.
+ ASFInteraction uint8
+)
+
+const (
+ // ASFDCMIEnterprise is the IANA-assigned Enterprise Number of the Data
+ // Center Manageability Interface Forum. The Presence Pong response's
+ // Enterprise field being set to this value indicates support for DCMI. The
+ // DCMI spec regards the OEM field as reserved, so these should be null.
+ ASFDCMIEnterprise uint32 = 36465
+
+ // ASFPresencePongEntityIPMI ANDs with Presence Pong's supported entities
+ // field if the managed system supports IPMI.
+ ASFPresencePongEntityIPMI ASFEntity = 1 << 7
+
+ // ASFPresencePongEntityASFv1 ANDs with Presence Pong's supported entities
+ // field if the managed system supports ASF v1.0.
+ ASFPresencePongEntityASFv1 ASFEntity = 1
+
+ // ASFPresencePongInteractionSecurityExtensions ANDs with Presence Pong's
+ // supported interactions field if the managed system supports RMCP v2.0
+ // security extensions. See section 3.2.3.
+ ASFPresencePongInteractionSecurityExtensions ASFInteraction = 1 << 7
+
+ // ASFPresencePongInteractionDASH ANDs with Presence Pong's supported
+ // interactions field if the managed system supports DMTF DASH. See
+ // https://www.dmtf.org/standards/dash.
+ ASFPresencePongInteractionDASH ASFInteraction = 1 << 5
+)
+
+// ASFPresencePong defines the structure of a Presence Pong message's payload.
+// See section 3.2.4.3.
+type ASFPresencePong struct {
+ BaseLayer
+
+ // Enterprise is the IANA Enterprise Number of an entity that has defined
+ // OEM-specific capabilities for the managed client. If no such capabilities
+ // exist, this is set to ASF's IANA Enterprise Number.
+ Enterprise uint32
+
+ // OEM identifies OEM-specific capabilities. Its structure is defined by the
+ // OEM. This is set to 0s if no OEM-specific capabilities exist. This
+ // implementation does not change byte order from the wire for this field.
+ OEM [4]byte
+
+ // We break out entities and interactions into separate booleans as
+ // discovery is the entire point of this type of message, so we assume they
+ // are accessed. It also makes gopacket's default layer printing more
+ // useful.
+
+ // IPMI is true if IPMI is supported by the managed system. There is no
+ // explicit version in the specification, however given the dates, this is
+ // assumed to be IPMI v1.0. Support for IPMI is contained in the "supported
+ // entities" field of the presence pong payload.
+ IPMI bool
+
+ // ASFv1 indicates support for ASF v1.0. This seems somewhat redundant as
+ // ASF must be supported in order to receive a response. This is contained
+ // in the "supported entities" field of the presence pong payload.
+ ASFv1 bool
+
+ // SecurityExtensions indicates support for RMCP Security Extensions,
+ // specified in ASF v2.0. This will always be false for v1.x
+ // implementations. This is contained in the "supported interactions" field
+ // of the presence pong payload. This field is defined in ASF v1.0, but has
+ // no useful value.
+ SecurityExtensions bool
+
+ // DASH is true if DMTF DASH is supported. This is not specified in ASF
+ // v2.0, but in IPMI v2.0, however the former does not preclude it, so we
+ // support it.
+ DASH bool
+
+ // 6 bytes reserved after the entities and interactions fields, set to 0s.
+}
+
+// SupportsDCMI returns whether the Presence Pong message indicates support for
+// the Data Center Management Interface, which is an extension of IPMI v2.0.
+func (a *ASFPresencePong) SupportsDCMI() bool {
+ return a.Enterprise == ASFDCMIEnterprise && a.IPMI && a.ASFv1
+}
+
+// LayerType returns LayerTypeASFPresencePong. It partially satisfies Layer and
+// SerializableLayer.
+func (*ASFPresencePong) LayerType() gopacket.LayerType {
+ return LayerTypeASFPresencePong
+}
+
+// CanDecode returns LayerTypeASFPresencePong. It partially satisfies
+// DecodingLayer.
+func (a *ASFPresencePong) CanDecode() gopacket.LayerClass {
+ return a.LayerType()
+}
+
+// DecodeFromBytes makes the layer represent the provided bytes. It partially
+// satisfies DecodingLayer.
+func (a *ASFPresencePong) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ if len(data) < 16 {
+ df.SetTruncated()
+ return fmt.Errorf("invalid ASF presence pong payload, length %v less than 16",
+ len(data))
+ }
+
+ a.BaseLayer.Contents = data[:16]
+ a.BaseLayer.Payload = data[16:]
+
+ a.Enterprise = binary.BigEndian.Uint32(data[:4])
+ copy(a.OEM[:], data[4:8]) // N.B. no byte order change
+ a.IPMI = data[8]&uint8(ASFPresencePongEntityIPMI) != 0
+ a.ASFv1 = data[8]&uint8(ASFPresencePongEntityASFv1) != 0
+ a.SecurityExtensions = data[9]&uint8(ASFPresencePongInteractionSecurityExtensions) != 0
+ a.DASH = data[9]&uint8(ASFPresencePongInteractionDASH) != 0
+ // ignore remaining 6 bytes; should be set to 0s
+ return nil
+}
+
+// NextLayerType returns LayerTypePayload, as there are no further layers to
+// decode. This partially satisfies DecodingLayer.
+func (a *ASFPresencePong) NextLayerType() gopacket.LayerType {
+ return gopacket.LayerTypePayload
+}
+
+// SerializeTo writes the serialized fom of this layer into the SerializeBuffer,
+// partially satisfying SerializableLayer.
+func (a *ASFPresencePong) SerializeTo(b gopacket.SerializeBuffer, _ gopacket.SerializeOptions) error {
+ bytes, err := b.PrependBytes(16)
+ if err != nil {
+ return err
+ }
+
+ binary.BigEndian.PutUint32(bytes[:4], a.Enterprise)
+
+ copy(bytes[4:8], a.OEM[:])
+
+ bytes[8] = 0
+ if a.IPMI {
+ bytes[8] |= uint8(ASFPresencePongEntityIPMI)
+ }
+ if a.ASFv1 {
+ bytes[8] |= uint8(ASFPresencePongEntityASFv1)
+ }
+
+ bytes[9] = 0
+ if a.SecurityExtensions {
+ bytes[9] |= uint8(ASFPresencePongInteractionSecurityExtensions)
+ }
+ if a.DASH {
+ bytes[9] |= uint8(ASFPresencePongInteractionDASH)
+ }
+
+ // zero-out remaining 6 bytes
+ for i := 10; i < len(bytes); i++ {
+ bytes[i] = 0x00
+ }
+
+ return nil
+}
+
+// decodeASFPresencePong decodes the byte slice into an RMCP-ASF Presence Pong
+// struct.
+func decodeASFPresencePong(data []byte, p gopacket.PacketBuilder) error {
+ return decodingLayerDecoder(&ASFPresencePong{}, data, p)
+}
diff --git a/vendor/github.com/google/gopacket/layers/base.go b/vendor/github.com/google/gopacket/layers/base.go
new file mode 100644
index 0000000..cd59b46
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/base.go
@@ -0,0 +1,52 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+ "github.com/google/gopacket"
+)
+
+// BaseLayer is a convenience struct which implements the LayerData and
+// LayerPayload functions of the Layer interface.
+type BaseLayer struct {
+ // Contents is the set of bytes that make up this layer. IE: for an
+ // Ethernet packet, this would be the set of bytes making up the
+ // Ethernet frame.
+ Contents []byte
+ // Payload is the set of bytes contained by (but not part of) this
+ // Layer. Again, to take Ethernet as an example, this would be the
+ // set of bytes encapsulated by the Ethernet protocol.
+ Payload []byte
+}
+
+// LayerContents returns the bytes of the packet layer.
+func (b *BaseLayer) LayerContents() []byte { return b.Contents }
+
+// LayerPayload returns the bytes contained within the packet layer.
+func (b *BaseLayer) LayerPayload() []byte { return b.Payload }
+
+type layerDecodingLayer interface {
+ gopacket.Layer
+ DecodeFromBytes([]byte, gopacket.DecodeFeedback) error
+ NextLayerType() gopacket.LayerType
+}
+
+func decodingLayerDecoder(d layerDecodingLayer, data []byte, p gopacket.PacketBuilder) error {
+ err := d.DecodeFromBytes(data, p)
+ if err != nil {
+ return err
+ }
+ p.AddLayer(d)
+ next := d.NextLayerType()
+ if next == gopacket.LayerTypeZero {
+ return nil
+ }
+ return p.NextDecoder(next)
+}
+
+// hacky way to zero out memory... there must be a better way?
+var lotsOfZeros [1024]byte
diff --git a/vendor/github.com/google/gopacket/layers/bfd.go b/vendor/github.com/google/gopacket/layers/bfd.go
new file mode 100644
index 0000000..43030fb
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/bfd.go
@@ -0,0 +1,481 @@
+// Copyright 2017 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+//
+
+package layers
+
+import (
+ "encoding/binary"
+ "errors"
+
+ "github.com/google/gopacket"
+)
+
+// BFD Control Packet Format
+// -------------------------
+// The current version of BFD's RFC (RFC 5880) contains the following
+// diagram for the BFD Control packet format:
+//
+// 0 1 2 3
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// |Vers | Diag |Sta|P|F|C|A|D|M| Detect Mult | Length |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | My Discriminator |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | Your Discriminator |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | Desired Min TX Interval |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | Required Min RX Interval |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | Required Min Echo RX Interval |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+//
+// An optional Authentication Section MAY be present:
+//
+// 0 1 2 3
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | Auth Type | Auth Len | Authentication Data... |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+//
+//
+// Simple Password Authentication Section Format
+// ---------------------------------------------
+// 0 1 2 3
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | Auth Type | Auth Len | Auth Key ID | Password... |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | ... |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+//
+//
+// Keyed MD5 and Meticulous Keyed MD5 Authentication Section Format
+// ----------------------------------------------------------------
+// 0 1 2 3
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | Auth Type | Auth Len | Auth Key ID | Reserved |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | Sequence Number |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | Auth Key/Digest... |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | ... |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+//
+//
+// Keyed SHA1 and Meticulous Keyed SHA1 Authentication Section Format
+// ------------------------------------------------------------------
+// 0 1 2 3
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | Auth Type | Auth Len | Auth Key ID | Reserved |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | Sequence Number |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | Auth Key/Hash... |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | ... |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+//
+// From https://tools.ietf.org/rfc/rfc5880.txt
+const bfdMinimumRecordSizeInBytes int = 24
+
+// BFDVersion represents the version as decoded from the BFD control message
+type BFDVersion uint8
+
+// BFDDiagnostic represents diagnostic infomation about a BFD session
+type BFDDiagnostic uint8
+
+// constants that define BFDDiagnostic flags
+const (
+ BFDDiagnosticNone BFDDiagnostic = 0 // No Diagnostic
+ BFDDiagnosticTimeExpired BFDDiagnostic = 1 // Control Detection Time Expired
+ BFDDiagnosticEchoFailed BFDDiagnostic = 2 // Echo Function Failed
+ BFDDiagnosticNeighborSignalDown BFDDiagnostic = 3 // Neighbor Signaled Session Down
+ BFDDiagnosticForwardPlaneReset BFDDiagnostic = 4 // Forwarding Plane Reset
+ BFDDiagnosticPathDown BFDDiagnostic = 5 // Path Down
+ BFDDiagnosticConcatPathDown BFDDiagnostic = 6 // Concatenated Path Down
+ BFDDiagnosticAdminDown BFDDiagnostic = 7 // Administratively Down
+ BFDDiagnosticRevConcatPathDown BFDDiagnostic = 8 // Reverse Concatenated Path Dow
+)
+
+// String returns a string version of BFDDiagnostic
+func (bd BFDDiagnostic) String() string {
+ switch bd {
+ default:
+ return "Unknown"
+ case BFDDiagnosticNone:
+ return "None"
+ case BFDDiagnosticTimeExpired:
+ return "Control Detection Time Expired"
+ case BFDDiagnosticEchoFailed:
+ return "Echo Function Failed"
+ case BFDDiagnosticNeighborSignalDown:
+ return "Neighbor Signaled Session Down"
+ case BFDDiagnosticForwardPlaneReset:
+ return "Forwarding Plane Reset"
+ case BFDDiagnosticPathDown:
+ return "Path Down"
+ case BFDDiagnosticConcatPathDown:
+ return "Concatenated Path Down"
+ case BFDDiagnosticAdminDown:
+ return "Administratively Down"
+ case BFDDiagnosticRevConcatPathDown:
+ return "Reverse Concatenated Path Down"
+ }
+}
+
+// BFDState represents the state of a BFD session
+type BFDState uint8
+
+// constants that define BFDState
+const (
+ BFDStateAdminDown BFDState = 0
+ BFDStateDown BFDState = 1
+ BFDStateInit BFDState = 2
+ BFDStateUp BFDState = 3
+)
+
+// String returns a string version of BFDState
+func (s BFDState) String() string {
+ switch s {
+ default:
+ return "Unknown"
+ case BFDStateAdminDown:
+ return "Admin Down"
+ case BFDStateDown:
+ return "Down"
+ case BFDStateInit:
+ return "Init"
+ case BFDStateUp:
+ return "Up"
+ }
+}
+
+// BFDDetectMultiplier represents the negotiated transmit interval,
+// multiplied by this value, provides the Detection Time for the
+// receiving system in Asynchronous mode.
+type BFDDetectMultiplier uint8
+
+// BFDDiscriminator is a unique, nonzero discriminator value used
+// to demultiplex multiple BFD sessions between the same pair of systems.
+type BFDDiscriminator uint32
+
+// BFDTimeInterval represents a time interval in microseconds
+type BFDTimeInterval uint32
+
+// BFDAuthType represents the authentication used in the BFD session
+type BFDAuthType uint8
+
+// constants that define the BFDAuthType
+const (
+ BFDAuthTypeNone BFDAuthType = 0 // No Auth
+ BFDAuthTypePassword BFDAuthType = 1 // Simple Password
+ BFDAuthTypeKeyedMD5 BFDAuthType = 2 // Keyed MD5
+ BFDAuthTypeMeticulousKeyedMD5 BFDAuthType = 3 // Meticulous Keyed MD5
+ BFDAuthTypeKeyedSHA1 BFDAuthType = 4 // Keyed SHA1
+ BFDAuthTypeMeticulousKeyedSHA1 BFDAuthType = 5 // Meticulous Keyed SHA1
+)
+
+// String returns a string version of BFDAuthType
+func (at BFDAuthType) String() string {
+ switch at {
+ default:
+ return "Unknown"
+ case BFDAuthTypeNone:
+ return "No Authentication"
+ case BFDAuthTypePassword:
+ return "Simple Password"
+ case BFDAuthTypeKeyedMD5:
+ return "Keyed MD5"
+ case BFDAuthTypeMeticulousKeyedMD5:
+ return "Meticulous Keyed MD5"
+ case BFDAuthTypeKeyedSHA1:
+ return "Keyed SHA1"
+ case BFDAuthTypeMeticulousKeyedSHA1:
+ return "Meticulous Keyed SHA1"
+ }
+}
+
+// BFDAuthKeyID represents the authentication key ID in use for
+// this packet. This allows multiple keys to be active simultaneously.
+type BFDAuthKeyID uint8
+
+// BFDAuthSequenceNumber represents the sequence number for this packet.
+// For Keyed Authentication, this value is incremented occasionally. For
+// Meticulous Keyed Authentication, this value is incremented for each
+// successive packet transmitted for a session. This provides protection
+// against replay attacks.
+type BFDAuthSequenceNumber uint32
+
+// BFDAuthData represents the authentication key or digest
+type BFDAuthData []byte
+
+// BFDAuthHeader represents authentication data used in the BFD session
+type BFDAuthHeader struct {
+ AuthType BFDAuthType
+ KeyID BFDAuthKeyID
+ SequenceNumber BFDAuthSequenceNumber
+ Data BFDAuthData
+}
+
+// Length returns the data length of the BFDAuthHeader based on the
+// authentication type
+func (h *BFDAuthHeader) Length() int {
+ switch h.AuthType {
+ case BFDAuthTypePassword:
+ return 3 + len(h.Data)
+ case BFDAuthTypeKeyedMD5, BFDAuthTypeMeticulousKeyedMD5:
+ return 8 + len(h.Data)
+ case BFDAuthTypeKeyedSHA1, BFDAuthTypeMeticulousKeyedSHA1:
+ return 8 + len(h.Data)
+ default:
+ return 0
+ }
+}
+
+// BFD represents a BFD control message packet whose payload contains
+// the control information required to for a BFD session.
+//
+// References
+// ----------
+//
+// Wikipedia's BFD entry:
+// https://en.wikipedia.org/wiki/Bidirectional_Forwarding_Detection
+// This is the best place to get an overview of BFD.
+//
+// RFC 5880 "Bidirectional Forwarding Detection (BFD)" (2010)
+// https://tools.ietf.org/html/rfc5880
+// This is the original BFD specification.
+//
+// RFC 5881 "Bidirectional Forwarding Detection (BFD) for IPv4 and IPv6 (Single Hop)" (2010)
+// https://tools.ietf.org/html/rfc5881
+// Describes the use of the Bidirectional Forwarding Detection (BFD)
+// protocol over IPv4 and IPv6 for single IP hops.
+type BFD struct {
+ BaseLayer // Stores the packet bytes and payload bytes.
+
+ Version BFDVersion // Version of the BFD protocol.
+ Diagnostic BFDDiagnostic // Diagnostic code for last state change
+ State BFDState // Current state
+ Poll bool // Requesting verification
+ Final bool // Responding to a received BFD Control packet that had the Poll (P) bit set.
+ ControlPlaneIndependent bool // BFD implementation does not share fate with its control plane
+ AuthPresent bool // Authentication Section is present and the session is to be authenticated
+ Demand bool // Demand mode is active
+ Multipoint bool // For future point-to-multipoint extensions. Must always be zero
+ DetectMultiplier BFDDetectMultiplier // Detection time multiplier
+ MyDiscriminator BFDDiscriminator // A unique, nonzero discriminator value
+ YourDiscriminator BFDDiscriminator // discriminator received from the remote system.
+ DesiredMinTxInterval BFDTimeInterval // Minimum interval, in microseconds, the local system would like to use when transmitting BFD Control packets
+ RequiredMinRxInterval BFDTimeInterval // Minimum interval, in microseconds, between received BFD Control packets that this system is capable of supporting
+ RequiredMinEchoRxInterval BFDTimeInterval // Minimum interval, in microseconds, between received BFD Echo packets that this system is capable of supporting
+ AuthHeader *BFDAuthHeader // Authentication data, variable length.
+}
+
+// Length returns the data length of a BFD Control message which
+// changes based on the presence and type of authentication
+// contained in the message
+func (d *BFD) Length() int {
+ if d.AuthPresent && (d.AuthHeader != nil) {
+ return bfdMinimumRecordSizeInBytes + d.AuthHeader.Length()
+ }
+
+ return bfdMinimumRecordSizeInBytes
+}
+
+// LayerType returns the layer type of the BFD object, which is LayerTypeBFD.
+func (d *BFD) LayerType() gopacket.LayerType {
+ return LayerTypeBFD
+}
+
+// decodeBFD analyses a byte slice and attempts to decode it as a BFD
+// control packet
+//
+// If it succeeds, it loads p with information about the packet and returns nil.
+// If it fails, it returns an error (non nil).
+//
+// This function is employed in layertypes.go to register the BFD layer.
+func decodeBFD(data []byte, p gopacket.PacketBuilder) error {
+
+ // Attempt to decode the byte slice.
+ d := &BFD{}
+ err := d.DecodeFromBytes(data, p)
+ if err != nil {
+ return err
+ }
+
+ // If the decoding worked, add the layer to the packet and set it
+ // as the application layer too, if there isn't already one.
+ p.AddLayer(d)
+ p.SetApplicationLayer(d)
+
+ return nil
+}
+
+// DecodeFromBytes analyses a byte slice and attempts to decode it as a BFD
+// control packet.
+//
+// Upon succeeds, it loads the BFD object with information about the packet
+// and returns nil.
+// Upon failure, it returns an error (non nil).
+func (d *BFD) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+
+ // If the data block is too short to be a BFD record, then return an error.
+ if len(data) < bfdMinimumRecordSizeInBytes {
+ df.SetTruncated()
+ return errors.New("BFD packet too short")
+ }
+
+ pLen := uint8(data[3])
+ if len(data) != int(pLen) {
+ return errors.New("BFD packet length does not match")
+ }
+
+ // BFD type embeds type BaseLayer which contains two fields:
+ // Contents is supposed to contain the bytes of the data at this level.
+ // Payload is supposed to contain the payload of this level.
+ // Here we set the baselayer to be the bytes of the BFD record.
+ d.BaseLayer = BaseLayer{Contents: data[:len(data)]}
+
+ // Extract the fields from the block of bytes.
+ // To make sense of this, refer to the packet diagram
+ // above and the section on endian conventions.
+
+ // The first few fields are all packed into the first 32 bits. Unpack them.
+ d.Version = BFDVersion(((data[0] & 0xE0) >> 5))
+ d.Diagnostic = BFDDiagnostic(data[0] & 0x1F)
+ data = data[1:]
+
+ d.State = BFDState((data[0] & 0xC0) >> 6)
+ d.Poll = data[0]&0x20 != 0
+ d.Final = data[0]&0x10 != 0
+ d.ControlPlaneIndependent = data[0]&0x08 != 0
+ d.AuthPresent = data[0]&0x04 != 0
+ d.Demand = data[0]&0x02 != 0
+ d.Multipoint = data[0]&0x01 != 0
+ data = data[1:]
+
+ data, d.DetectMultiplier = data[1:], BFDDetectMultiplier(data[0])
+ data, _ = data[1:], uint8(data[0]) // Consume length
+
+ // The remaining fields can just be copied in big endian order.
+ data, d.MyDiscriminator = data[4:], BFDDiscriminator(binary.BigEndian.Uint32(data[:4]))
+ data, d.YourDiscriminator = data[4:], BFDDiscriminator(binary.BigEndian.Uint32(data[:4]))
+ data, d.DesiredMinTxInterval = data[4:], BFDTimeInterval(binary.BigEndian.Uint32(data[:4]))
+ data, d.RequiredMinRxInterval = data[4:], BFDTimeInterval(binary.BigEndian.Uint32(data[:4]))
+ data, d.RequiredMinEchoRxInterval = data[4:], BFDTimeInterval(binary.BigEndian.Uint32(data[:4]))
+
+ if d.AuthPresent && (len(data) > 2) {
+ d.AuthHeader = &BFDAuthHeader{}
+ data, d.AuthHeader.AuthType = data[1:], BFDAuthType(data[0])
+ data, _ = data[1:], uint8(data[0]) // Consume length
+ data, d.AuthHeader.KeyID = data[1:], BFDAuthKeyID(data[0])
+
+ switch d.AuthHeader.AuthType {
+ case BFDAuthTypePassword:
+ d.AuthHeader.Data = BFDAuthData(data)
+ case BFDAuthTypeKeyedMD5, BFDAuthTypeMeticulousKeyedMD5:
+ // Skipped reserved byte
+ data, d.AuthHeader.SequenceNumber = data[5:], BFDAuthSequenceNumber(binary.BigEndian.Uint32(data[1:5]))
+ d.AuthHeader.Data = BFDAuthData(data)
+ case BFDAuthTypeKeyedSHA1, BFDAuthTypeMeticulousKeyedSHA1:
+ // Skipped reserved byte
+ data, d.AuthHeader.SequenceNumber = data[5:], BFDAuthSequenceNumber(binary.BigEndian.Uint32(data[1:5]))
+ d.AuthHeader.Data = BFDAuthData(data)
+ }
+ }
+
+ return nil
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (d *BFD) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ data, err := b.PrependBytes(bfdMinimumRecordSizeInBytes)
+ if err != nil {
+ return err
+ }
+
+ // Pack the first few fields into the first 32 bits.
+ data[0] = byte(byte(d.Version<<5) | byte(d.Diagnostic))
+ h := uint8(0)
+ h |= (uint8(d.State) << 6)
+ h |= (uint8(bool2uint8(d.Poll)) << 5)
+ h |= (uint8(bool2uint8(d.Final)) << 4)
+ h |= (uint8(bool2uint8(d.ControlPlaneIndependent)) << 3)
+ h |= (uint8(bool2uint8(d.AuthPresent)) << 2)
+ h |= (uint8(bool2uint8(d.Demand)) << 1)
+ h |= uint8(bool2uint8(d.Multipoint))
+ data[1] = byte(h)
+ data[2] = byte(d.DetectMultiplier)
+ data[3] = byte(d.Length())
+
+ // The remaining fields can just be copied in big endian order.
+ binary.BigEndian.PutUint32(data[4:], uint32(d.MyDiscriminator))
+ binary.BigEndian.PutUint32(data[8:], uint32(d.YourDiscriminator))
+ binary.BigEndian.PutUint32(data[12:], uint32(d.DesiredMinTxInterval))
+ binary.BigEndian.PutUint32(data[16:], uint32(d.RequiredMinRxInterval))
+ binary.BigEndian.PutUint32(data[20:], uint32(d.RequiredMinEchoRxInterval))
+
+ if d.AuthPresent && (d.AuthHeader != nil) {
+ auth, err := b.AppendBytes(int(d.AuthHeader.Length()))
+ if err != nil {
+ return err
+ }
+
+ auth[0] = byte(d.AuthHeader.AuthType)
+ auth[1] = byte(d.AuthHeader.Length())
+ auth[2] = byte(d.AuthHeader.KeyID)
+
+ switch d.AuthHeader.AuthType {
+ case BFDAuthTypePassword:
+ copy(auth[3:], d.AuthHeader.Data)
+ case BFDAuthTypeKeyedMD5, BFDAuthTypeMeticulousKeyedMD5:
+ auth[3] = byte(0)
+ binary.BigEndian.PutUint32(auth[4:], uint32(d.AuthHeader.SequenceNumber))
+ copy(auth[8:], d.AuthHeader.Data)
+ case BFDAuthTypeKeyedSHA1, BFDAuthTypeMeticulousKeyedSHA1:
+ auth[3] = byte(0)
+ binary.BigEndian.PutUint32(auth[4:], uint32(d.AuthHeader.SequenceNumber))
+ copy(auth[8:], d.AuthHeader.Data)
+ }
+ }
+
+ return nil
+}
+
+// CanDecode returns a set of layers that BFD objects can decode.
+// As BFD objects can only decide the BFD layer, we can return just that layer.
+// Apparently a single layer type implements LayerClass.
+func (d *BFD) CanDecode() gopacket.LayerClass {
+ return LayerTypeBFD
+}
+
+// NextLayerType specifies the next layer that GoPacket should attempt to
+// analyse after this (BFD) layer. As BFD packets do not contain any payload
+// bytes, there are no further layers to analyse.
+func (d *BFD) NextLayerType() gopacket.LayerType {
+ return gopacket.LayerTypeZero
+}
+
+// Payload returns an empty byte slice as BFD packets do not carry a payload
+func (d *BFD) Payload() []byte {
+ return nil
+}
+
+// bool2uint8 converts a bool to uint8
+func bool2uint8(b bool) uint8 {
+ if b {
+ return 1
+ }
+ return 0
+}
diff --git a/vendor/github.com/google/gopacket/layers/cdp.go b/vendor/github.com/google/gopacket/layers/cdp.go
new file mode 100644
index 0000000..d67203e
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/cdp.go
@@ -0,0 +1,651 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+// Enum types courtesy of...
+// http://search.cpan.org/~mchapman/Net-CDP-0.09/lib/Net/CDP.pm
+// https://code.google.com/p/ladvd/
+// http://anonsvn.wireshark.org/viewvc/releases/wireshark-1.8.6/epan/dissectors/packet-cdp.c
+
+package layers
+
+import (
+ "encoding/binary"
+ "fmt"
+ "net"
+
+ "github.com/google/gopacket"
+)
+
+// CDPTLVType is the type of each TLV value in a CiscoDiscovery packet.
+type CDPTLVType uint16
+
+// CDPTLVType values.
+const (
+ CDPTLVDevID CDPTLVType = 0x0001
+ CDPTLVAddress CDPTLVType = 0x0002
+ CDPTLVPortID CDPTLVType = 0x0003
+ CDPTLVCapabilities CDPTLVType = 0x0004
+ CDPTLVVersion CDPTLVType = 0x0005
+ CDPTLVPlatform CDPTLVType = 0x0006
+ CDPTLVIPPrefix CDPTLVType = 0x0007
+ CDPTLVHello CDPTLVType = 0x0008
+ CDPTLVVTPDomain CDPTLVType = 0x0009
+ CDPTLVNativeVLAN CDPTLVType = 0x000a
+ CDPTLVFullDuplex CDPTLVType = 0x000b
+ CDPTLVVLANReply CDPTLVType = 0x000e
+ CDPTLVVLANQuery CDPTLVType = 0x000f
+ CDPTLVPower CDPTLVType = 0x0010
+ CDPTLVMTU CDPTLVType = 0x0011
+ CDPTLVExtendedTrust CDPTLVType = 0x0012
+ CDPTLVUntrustedCOS CDPTLVType = 0x0013
+ CDPTLVSysName CDPTLVType = 0x0014
+ CDPTLVSysOID CDPTLVType = 0x0015
+ CDPTLVMgmtAddresses CDPTLVType = 0x0016
+ CDPTLVLocation CDPTLVType = 0x0017
+ CDPTLVExternalPortID CDPTLVType = 0x0018
+ CDPTLVPowerRequested CDPTLVType = 0x0019
+ CDPTLVPowerAvailable CDPTLVType = 0x001a
+ CDPTLVPortUnidirectional CDPTLVType = 0x001b
+ CDPTLVEnergyWise CDPTLVType = 0x001d
+ CDPTLVSparePairPOE CDPTLVType = 0x001f
+)
+
+// CiscoDiscoveryValue is a TLV value inside a CiscoDiscovery packet layer.
+type CiscoDiscoveryValue struct {
+ Type CDPTLVType
+ Length uint16
+ Value []byte
+}
+
+// CiscoDiscovery is a packet layer containing the Cisco Discovery Protocol.
+// See http://www.cisco.com/univercd/cc/td/doc/product/lan/trsrb/frames.htm#31885
+type CiscoDiscovery struct {
+ BaseLayer
+ Version byte
+ TTL byte
+ Checksum uint16
+ Values []CiscoDiscoveryValue
+}
+
+// CDPCapability is the set of capabilities advertised by a CDP device.
+type CDPCapability uint32
+
+// CDPCapability values.
+const (
+ CDPCapMaskRouter CDPCapability = 0x0001
+ CDPCapMaskTBBridge CDPCapability = 0x0002
+ CDPCapMaskSPBridge CDPCapability = 0x0004
+ CDPCapMaskSwitch CDPCapability = 0x0008
+ CDPCapMaskHost CDPCapability = 0x0010
+ CDPCapMaskIGMPFilter CDPCapability = 0x0020
+ CDPCapMaskRepeater CDPCapability = 0x0040
+ CDPCapMaskPhone CDPCapability = 0x0080
+ CDPCapMaskRemote CDPCapability = 0x0100
+)
+
+// CDPCapabilities represents the capabilities of a device
+type CDPCapabilities struct {
+ L3Router bool
+ TBBridge bool
+ SPBridge bool
+ L2Switch bool
+ IsHost bool
+ IGMPFilter bool
+ L1Repeater bool
+ IsPhone bool
+ RemotelyManaged bool
+}
+
+// CDP Power-over-Ethernet values.
+const (
+ CDPPoEFourWire byte = 0x01
+ CDPPoEPDArch byte = 0x02
+ CDPPoEPDRequest byte = 0x04
+ CDPPoEPSE byte = 0x08
+)
+
+// CDPSparePairPoE provides information on PoE.
+type CDPSparePairPoE struct {
+ PSEFourWire bool // Supported / Not supported
+ PDArchShared bool // Shared / Independent
+ PDRequestOn bool // On / Off
+ PSEOn bool // On / Off
+}
+
+// CDPVLANDialogue encapsulates a VLAN Query/Reply
+type CDPVLANDialogue struct {
+ ID uint8
+ VLAN uint16
+}
+
+// CDPPowerDialogue encapsulates a Power Query/Reply
+type CDPPowerDialogue struct {
+ ID uint16
+ MgmtID uint16
+ Values []uint32
+}
+
+// CDPLocation provides location information for a CDP device.
+type CDPLocation struct {
+ Type uint8 // Undocumented
+ Location string
+}
+
+// CDPHello is a Cisco Hello message (undocumented, hence the "Unknown" fields)
+type CDPHello struct {
+ OUI []byte
+ ProtocolID uint16
+ ClusterMaster net.IP
+ Unknown1 net.IP
+ Version byte
+ SubVersion byte
+ Status byte
+ Unknown2 byte
+ ClusterCommander net.HardwareAddr
+ SwitchMAC net.HardwareAddr
+ Unknown3 byte
+ ManagementVLAN uint16
+}
+
+// CDPEnergyWiseSubtype is used within CDP to define TLV values.
+type CDPEnergyWiseSubtype uint32
+
+// CDPEnergyWiseSubtype values.
+const (
+ CDPEnergyWiseRole CDPEnergyWiseSubtype = 0x00000007
+ CDPEnergyWiseDomain CDPEnergyWiseSubtype = 0x00000008
+ CDPEnergyWiseName CDPEnergyWiseSubtype = 0x00000009
+ CDPEnergyWiseReplyTo CDPEnergyWiseSubtype = 0x00000017
+)
+
+// CDPEnergyWise is used by CDP to monitor and control power usage.
+type CDPEnergyWise struct {
+ EncryptedData []byte
+ Unknown1 uint32
+ SequenceNumber uint32
+ ModelNumber string
+ Unknown2 uint16
+ HardwareID string
+ SerialNum string
+ Unknown3 []byte
+ Role string
+ Domain string
+ Name string
+ ReplyUnknown1 []byte
+ ReplyPort []byte
+ ReplyAddress []byte
+ ReplyUnknown2 []byte
+ ReplyUnknown3 []byte
+}
+
+// CiscoDiscoveryInfo represents the decoded details for a set of CiscoDiscoveryValues
+type CiscoDiscoveryInfo struct {
+ BaseLayer
+ CDPHello
+ DeviceID string
+ Addresses []net.IP
+ PortID string
+ Capabilities CDPCapabilities
+ Version string
+ Platform string
+ IPPrefixes []net.IPNet
+ VTPDomain string
+ NativeVLAN uint16
+ FullDuplex bool
+ VLANReply CDPVLANDialogue
+ VLANQuery CDPVLANDialogue
+ PowerConsumption uint16
+ MTU uint32
+ ExtendedTrust uint8
+ UntrustedCOS uint8
+ SysName string
+ SysOID string
+ MgmtAddresses []net.IP
+ Location CDPLocation
+ PowerRequest CDPPowerDialogue
+ PowerAvailable CDPPowerDialogue
+ SparePairPoe CDPSparePairPoE
+ EnergyWise CDPEnergyWise
+ Unknown []CiscoDiscoveryValue
+}
+
+// LayerType returns gopacket.LayerTypeCiscoDiscovery.
+func (c *CiscoDiscovery) LayerType() gopacket.LayerType {
+ return LayerTypeCiscoDiscovery
+}
+
+func decodeCiscoDiscovery(data []byte, p gopacket.PacketBuilder) error {
+ c := &CiscoDiscovery{
+ Version: data[0],
+ TTL: data[1],
+ Checksum: binary.BigEndian.Uint16(data[2:4]),
+ }
+ if c.Version != 1 && c.Version != 2 {
+ return fmt.Errorf("Invalid CiscoDiscovery version number %d", c.Version)
+ }
+ var err error
+ c.Values, err = decodeCiscoDiscoveryTLVs(data[4:])
+ if err != nil {
+ return err
+ }
+ c.Contents = data[0:4]
+ c.Payload = data[4:]
+ p.AddLayer(c)
+ return p.NextDecoder(gopacket.DecodeFunc(decodeCiscoDiscoveryInfo))
+}
+
+// LayerType returns gopacket.LayerTypeCiscoDiscoveryInfo.
+func (c *CiscoDiscoveryInfo) LayerType() gopacket.LayerType {
+ return LayerTypeCiscoDiscoveryInfo
+}
+
+func decodeCiscoDiscoveryTLVs(data []byte) (values []CiscoDiscoveryValue, err error) {
+ for len(data) > 0 {
+ val := CiscoDiscoveryValue{
+ Type: CDPTLVType(binary.BigEndian.Uint16(data[:2])),
+ Length: binary.BigEndian.Uint16(data[2:4]),
+ }
+ if val.Length < 4 {
+ err = fmt.Errorf("Invalid CiscoDiscovery value length %d", val.Length)
+ break
+ }
+ val.Value = data[4:val.Length]
+ values = append(values, val)
+ data = data[val.Length:]
+ }
+ return
+}
+
+func decodeCiscoDiscoveryInfo(data []byte, p gopacket.PacketBuilder) error {
+ var err error
+ info := &CiscoDiscoveryInfo{BaseLayer: BaseLayer{Contents: data}}
+ p.AddLayer(info)
+ values, err := decodeCiscoDiscoveryTLVs(data)
+ if err != nil { // Unlikely, as parent decode will fail, but better safe...
+ return err
+ }
+ for _, val := range values {
+ switch val.Type {
+ case CDPTLVDevID:
+ info.DeviceID = string(val.Value)
+ case CDPTLVAddress:
+ if err = checkCDPTLVLen(val, 4); err != nil {
+ return err
+ }
+ info.Addresses, err = decodeAddresses(val.Value)
+ if err != nil {
+ return err
+ }
+ case CDPTLVPortID:
+ info.PortID = string(val.Value)
+ case CDPTLVCapabilities:
+ if err = checkCDPTLVLen(val, 4); err != nil {
+ return err
+ }
+ val := CDPCapability(binary.BigEndian.Uint32(val.Value[0:4]))
+ info.Capabilities.L3Router = (val&CDPCapMaskRouter > 0)
+ info.Capabilities.TBBridge = (val&CDPCapMaskTBBridge > 0)
+ info.Capabilities.SPBridge = (val&CDPCapMaskSPBridge > 0)
+ info.Capabilities.L2Switch = (val&CDPCapMaskSwitch > 0)
+ info.Capabilities.IsHost = (val&CDPCapMaskHost > 0)
+ info.Capabilities.IGMPFilter = (val&CDPCapMaskIGMPFilter > 0)
+ info.Capabilities.L1Repeater = (val&CDPCapMaskRepeater > 0)
+ info.Capabilities.IsPhone = (val&CDPCapMaskPhone > 0)
+ info.Capabilities.RemotelyManaged = (val&CDPCapMaskRemote > 0)
+ case CDPTLVVersion:
+ info.Version = string(val.Value)
+ case CDPTLVPlatform:
+ info.Platform = string(val.Value)
+ case CDPTLVIPPrefix:
+ v := val.Value
+ l := len(v)
+ if l%5 == 0 && l >= 5 {
+ for len(v) > 0 {
+ _, ipnet, _ := net.ParseCIDR(fmt.Sprintf("%d.%d.%d.%d/%d", v[0], v[1], v[2], v[3], v[4]))
+ info.IPPrefixes = append(info.IPPrefixes, *ipnet)
+ v = v[5:]
+ }
+ } else {
+ return fmt.Errorf("Invalid TLV %v length %d", val.Type, len(val.Value))
+ }
+ case CDPTLVHello:
+ if err = checkCDPTLVLen(val, 32); err != nil {
+ return err
+ }
+ v := val.Value
+ info.CDPHello.OUI = v[0:3]
+ info.CDPHello.ProtocolID = binary.BigEndian.Uint16(v[3:5])
+ info.CDPHello.ClusterMaster = v[5:9]
+ info.CDPHello.Unknown1 = v[9:13]
+ info.CDPHello.Version = v[13]
+ info.CDPHello.SubVersion = v[14]
+ info.CDPHello.Status = v[15]
+ info.CDPHello.Unknown2 = v[16]
+ info.CDPHello.ClusterCommander = v[17:23]
+ info.CDPHello.SwitchMAC = v[23:29]
+ info.CDPHello.Unknown3 = v[29]
+ info.CDPHello.ManagementVLAN = binary.BigEndian.Uint16(v[30:32])
+ case CDPTLVVTPDomain:
+ info.VTPDomain = string(val.Value)
+ case CDPTLVNativeVLAN:
+ if err = checkCDPTLVLen(val, 2); err != nil {
+ return err
+ }
+ info.NativeVLAN = binary.BigEndian.Uint16(val.Value[0:2])
+ case CDPTLVFullDuplex:
+ if err = checkCDPTLVLen(val, 1); err != nil {
+ return err
+ }
+ info.FullDuplex = (val.Value[0] == 1)
+ case CDPTLVVLANReply:
+ if err = checkCDPTLVLen(val, 3); err != nil {
+ return err
+ }
+ info.VLANReply.ID = uint8(val.Value[0])
+ info.VLANReply.VLAN = binary.BigEndian.Uint16(val.Value[1:3])
+ case CDPTLVVLANQuery:
+ if err = checkCDPTLVLen(val, 3); err != nil {
+ return err
+ }
+ info.VLANQuery.ID = uint8(val.Value[0])
+ info.VLANQuery.VLAN = binary.BigEndian.Uint16(val.Value[1:3])
+ case CDPTLVPower:
+ if err = checkCDPTLVLen(val, 2); err != nil {
+ return err
+ }
+ info.PowerConsumption = binary.BigEndian.Uint16(val.Value[0:2])
+ case CDPTLVMTU:
+ if err = checkCDPTLVLen(val, 4); err != nil {
+ return err
+ }
+ info.MTU = binary.BigEndian.Uint32(val.Value[0:4])
+ case CDPTLVExtendedTrust:
+ if err = checkCDPTLVLen(val, 1); err != nil {
+ return err
+ }
+ info.ExtendedTrust = uint8(val.Value[0])
+ case CDPTLVUntrustedCOS:
+ if err = checkCDPTLVLen(val, 1); err != nil {
+ return err
+ }
+ info.UntrustedCOS = uint8(val.Value[0])
+ case CDPTLVSysName:
+ info.SysName = string(val.Value)
+ case CDPTLVSysOID:
+ info.SysOID = string(val.Value)
+ case CDPTLVMgmtAddresses:
+ if err = checkCDPTLVLen(val, 4); err != nil {
+ return err
+ }
+ info.MgmtAddresses, err = decodeAddresses(val.Value)
+ if err != nil {
+ return err
+ }
+ case CDPTLVLocation:
+ if err = checkCDPTLVLen(val, 2); err != nil {
+ return err
+ }
+ info.Location.Type = uint8(val.Value[0])
+ info.Location.Location = string(val.Value[1:])
+
+ // case CDPTLVLExternalPortID:
+ // Undocumented
+ case CDPTLVPowerRequested:
+ if err = checkCDPTLVLen(val, 4); err != nil {
+ return err
+ }
+ info.PowerRequest.ID = binary.BigEndian.Uint16(val.Value[0:2])
+ info.PowerRequest.MgmtID = binary.BigEndian.Uint16(val.Value[2:4])
+ for n := 4; n < len(val.Value); n += 4 {
+ info.PowerRequest.Values = append(info.PowerRequest.Values, binary.BigEndian.Uint32(val.Value[n:n+4]))
+ }
+ case CDPTLVPowerAvailable:
+ if err = checkCDPTLVLen(val, 4); err != nil {
+ return err
+ }
+ info.PowerAvailable.ID = binary.BigEndian.Uint16(val.Value[0:2])
+ info.PowerAvailable.MgmtID = binary.BigEndian.Uint16(val.Value[2:4])
+ for n := 4; n < len(val.Value); n += 4 {
+ info.PowerAvailable.Values = append(info.PowerAvailable.Values, binary.BigEndian.Uint32(val.Value[n:n+4]))
+ }
+ // case CDPTLVPortUnidirectional
+ // Undocumented
+ case CDPTLVEnergyWise:
+ if err = checkCDPTLVLen(val, 72); err != nil {
+ return err
+ }
+ info.EnergyWise.EncryptedData = val.Value[0:20]
+ info.EnergyWise.Unknown1 = binary.BigEndian.Uint32(val.Value[20:24])
+ info.EnergyWise.SequenceNumber = binary.BigEndian.Uint32(val.Value[24:28])
+ info.EnergyWise.ModelNumber = string(val.Value[28:44])
+ info.EnergyWise.Unknown2 = binary.BigEndian.Uint16(val.Value[44:46])
+ info.EnergyWise.HardwareID = string(val.Value[46:49])
+ info.EnergyWise.SerialNum = string(val.Value[49:60])
+ info.EnergyWise.Unknown3 = val.Value[60:68]
+ tlvLen := binary.BigEndian.Uint16(val.Value[68:70])
+ tlvNum := binary.BigEndian.Uint16(val.Value[70:72])
+ data := val.Value[72:]
+ if len(data) < int(tlvLen) {
+ return fmt.Errorf("Invalid TLV length %d vs %d", tlvLen, len(data))
+ }
+ numSeen := 0
+ for len(data) > 8 {
+ numSeen++
+ if numSeen > int(tlvNum) { // Too many TLV's ?
+ return fmt.Errorf("Too many TLV's - wanted %d, saw %d", tlvNum, numSeen)
+ }
+ tType := CDPEnergyWiseSubtype(binary.BigEndian.Uint32(data[0:4]))
+ tLen := int(binary.BigEndian.Uint32(data[4:8]))
+ if tLen > len(data)-8 {
+ return fmt.Errorf("Invalid TLV length %d vs %d", tLen, len(data)-8)
+ }
+ data = data[8:]
+ switch tType {
+ case CDPEnergyWiseRole:
+ info.EnergyWise.Role = string(data[:])
+ case CDPEnergyWiseDomain:
+ info.EnergyWise.Domain = string(data[:])
+ case CDPEnergyWiseName:
+ info.EnergyWise.Name = string(data[:])
+ case CDPEnergyWiseReplyTo:
+ if len(data) >= 18 {
+ info.EnergyWise.ReplyUnknown1 = data[0:2]
+ info.EnergyWise.ReplyPort = data[2:4]
+ info.EnergyWise.ReplyAddress = data[4:8]
+ info.EnergyWise.ReplyUnknown2 = data[8:10]
+ info.EnergyWise.ReplyUnknown3 = data[10:14]
+ }
+ }
+ data = data[tLen:]
+ }
+ case CDPTLVSparePairPOE:
+ if err = checkCDPTLVLen(val, 1); err != nil {
+ return err
+ }
+ v := val.Value[0]
+ info.SparePairPoe.PSEFourWire = (v&CDPPoEFourWire > 0)
+ info.SparePairPoe.PDArchShared = (v&CDPPoEPDArch > 0)
+ info.SparePairPoe.PDRequestOn = (v&CDPPoEPDRequest > 0)
+ info.SparePairPoe.PSEOn = (v&CDPPoEPSE > 0)
+ default:
+ info.Unknown = append(info.Unknown, val)
+ }
+ }
+ return nil
+}
+
+// CDP Protocol Types
+const (
+ CDPProtocolTypeNLPID byte = 1
+ CDPProtocolType802_2 byte = 2
+)
+
+// CDPAddressType is used to define TLV values within CDP addresses.
+type CDPAddressType uint64
+
+// CDP Address types.
+const (
+ CDPAddressTypeCLNP CDPAddressType = 0x81
+ CDPAddressTypeIPV4 CDPAddressType = 0xcc
+ CDPAddressTypeIPV6 CDPAddressType = 0xaaaa030000000800
+ CDPAddressTypeDECNET CDPAddressType = 0xaaaa030000006003
+ CDPAddressTypeAPPLETALK CDPAddressType = 0xaaaa03000000809b
+ CDPAddressTypeIPX CDPAddressType = 0xaaaa030000008137
+ CDPAddressTypeVINES CDPAddressType = 0xaaaa0300000080c4
+ CDPAddressTypeXNS CDPAddressType = 0xaaaa030000000600
+ CDPAddressTypeAPOLLO CDPAddressType = 0xaaaa030000008019
+)
+
+func decodeAddresses(v []byte) (addresses []net.IP, err error) {
+ numaddr := int(binary.BigEndian.Uint32(v[0:4]))
+ if numaddr < 1 {
+ return nil, fmt.Errorf("Invalid Address TLV number %d", numaddr)
+ }
+ v = v[4:]
+ if len(v) < numaddr*8 {
+ return nil, fmt.Errorf("Invalid Address TLV length %d", len(v))
+ }
+ for i := 0; i < numaddr; i++ {
+ prottype := v[0]
+ if prottype != CDPProtocolTypeNLPID && prottype != CDPProtocolType802_2 { // invalid protocol type
+ return nil, fmt.Errorf("Invalid Address Protocol %d", prottype)
+ }
+ protlen := int(v[1])
+ if (prottype == CDPProtocolTypeNLPID && protlen != 1) ||
+ (prottype == CDPProtocolType802_2 && protlen != 3 && protlen != 8) { // invalid length
+ return nil, fmt.Errorf("Invalid Address Protocol length %d", protlen)
+ }
+ plen := make([]byte, 8)
+ copy(plen[8-protlen:], v[2:2+protlen])
+ protocol := CDPAddressType(binary.BigEndian.Uint64(plen))
+ v = v[2+protlen:]
+ addrlen := binary.BigEndian.Uint16(v[0:2])
+ ab := v[2 : 2+addrlen]
+ if protocol == CDPAddressTypeIPV4 && addrlen == 4 {
+ addresses = append(addresses, net.IPv4(ab[0], ab[1], ab[2], ab[3]))
+ } else if protocol == CDPAddressTypeIPV6 && addrlen == 16 {
+ addresses = append(addresses, net.IP(ab))
+ } else {
+ // only handle IPV4 & IPV6 for now
+ }
+ v = v[2+addrlen:]
+ if len(v) < 8 {
+ break
+ }
+ }
+ return
+}
+
+func (t CDPTLVType) String() (s string) {
+ switch t {
+ case CDPTLVDevID:
+ s = "Device ID"
+ case CDPTLVAddress:
+ s = "Addresses"
+ case CDPTLVPortID:
+ s = "Port ID"
+ case CDPTLVCapabilities:
+ s = "Capabilities"
+ case CDPTLVVersion:
+ s = "Software Version"
+ case CDPTLVPlatform:
+ s = "Platform"
+ case CDPTLVIPPrefix:
+ s = "IP Prefix"
+ case CDPTLVHello:
+ s = "Protocol Hello"
+ case CDPTLVVTPDomain:
+ s = "VTP Management Domain"
+ case CDPTLVNativeVLAN:
+ s = "Native VLAN"
+ case CDPTLVFullDuplex:
+ s = "Full Duplex"
+ case CDPTLVVLANReply:
+ s = "VoIP VLAN Reply"
+ case CDPTLVVLANQuery:
+ s = "VLANQuery"
+ case CDPTLVPower:
+ s = "Power consumption"
+ case CDPTLVMTU:
+ s = "MTU"
+ case CDPTLVExtendedTrust:
+ s = "Extended Trust Bitmap"
+ case CDPTLVUntrustedCOS:
+ s = "Untrusted Port CoS"
+ case CDPTLVSysName:
+ s = "System Name"
+ case CDPTLVSysOID:
+ s = "System OID"
+ case CDPTLVMgmtAddresses:
+ s = "Management Addresses"
+ case CDPTLVLocation:
+ s = "Location"
+ case CDPTLVExternalPortID:
+ s = "External Port ID"
+ case CDPTLVPowerRequested:
+ s = "Power Requested"
+ case CDPTLVPowerAvailable:
+ s = "Power Available"
+ case CDPTLVPortUnidirectional:
+ s = "Port Unidirectional"
+ case CDPTLVEnergyWise:
+ s = "Energy Wise"
+ case CDPTLVSparePairPOE:
+ s = "Spare Pair POE"
+ default:
+ s = "Unknown"
+ }
+ return
+}
+
+func (a CDPAddressType) String() (s string) {
+ switch a {
+ case CDPAddressTypeCLNP:
+ s = "Connectionless Network Protocol"
+ case CDPAddressTypeIPV4:
+ s = "IPv4"
+ case CDPAddressTypeIPV6:
+ s = "IPv6"
+ case CDPAddressTypeDECNET:
+ s = "DECnet Phase IV"
+ case CDPAddressTypeAPPLETALK:
+ s = "Apple Talk"
+ case CDPAddressTypeIPX:
+ s = "Novell IPX"
+ case CDPAddressTypeVINES:
+ s = "Banyan VINES"
+ case CDPAddressTypeXNS:
+ s = "Xerox Network Systems"
+ case CDPAddressTypeAPOLLO:
+ s = "Apollo"
+ default:
+ s = "Unknown"
+ }
+ return
+}
+
+func (t CDPEnergyWiseSubtype) String() (s string) {
+ switch t {
+ case CDPEnergyWiseRole:
+ s = "Role"
+ case CDPEnergyWiseDomain:
+ s = "Domain"
+ case CDPEnergyWiseName:
+ s = "Name"
+ case CDPEnergyWiseReplyTo:
+ s = "ReplyTo"
+ default:
+ s = "Unknown"
+ }
+ return
+}
+
+func checkCDPTLVLen(v CiscoDiscoveryValue, l int) (err error) {
+ if len(v.Value) < l {
+ err = fmt.Errorf("Invalid TLV %v length %d", v.Type, len(v.Value))
+ }
+ return
+}
diff --git a/vendor/github.com/google/gopacket/layers/ctp.go b/vendor/github.com/google/gopacket/layers/ctp.go
new file mode 100644
index 0000000..8287584
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/ctp.go
@@ -0,0 +1,109 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+ "encoding/binary"
+ "fmt"
+ "github.com/google/gopacket"
+)
+
+// EthernetCTPFunction is the function code used by the EthernetCTP protocol to identify each
+// EthernetCTP layer.
+type EthernetCTPFunction uint16
+
+// EthernetCTPFunction values.
+const (
+ EthernetCTPFunctionReply EthernetCTPFunction = 1
+ EthernetCTPFunctionForwardData EthernetCTPFunction = 2
+)
+
+// EthernetCTP implements the EthernetCTP protocol, see http://www.mit.edu/people/jhawk/ctp.html.
+// We split EthernetCTP up into the top-level EthernetCTP layer, followed by zero or more
+// EthernetCTPForwardData layers, followed by a final EthernetCTPReply layer.
+type EthernetCTP struct {
+ BaseLayer
+ SkipCount uint16
+}
+
+// LayerType returns gopacket.LayerTypeEthernetCTP.
+func (c *EthernetCTP) LayerType() gopacket.LayerType {
+ return LayerTypeEthernetCTP
+}
+
+// EthernetCTPForwardData is the ForwardData layer inside EthernetCTP. See EthernetCTP's docs for more
+// details.
+type EthernetCTPForwardData struct {
+ BaseLayer
+ Function EthernetCTPFunction
+ ForwardAddress []byte
+}
+
+// LayerType returns gopacket.LayerTypeEthernetCTPForwardData.
+func (c *EthernetCTPForwardData) LayerType() gopacket.LayerType {
+ return LayerTypeEthernetCTPForwardData
+}
+
+// ForwardEndpoint returns the EthernetCTPForwardData ForwardAddress as an endpoint.
+func (c *EthernetCTPForwardData) ForwardEndpoint() gopacket.Endpoint {
+ return gopacket.NewEndpoint(EndpointMAC, c.ForwardAddress)
+}
+
+// EthernetCTPReply is the Reply layer inside EthernetCTP. See EthernetCTP's docs for more details.
+type EthernetCTPReply struct {
+ BaseLayer
+ Function EthernetCTPFunction
+ ReceiptNumber uint16
+ Data []byte
+}
+
+// LayerType returns gopacket.LayerTypeEthernetCTPReply.
+func (c *EthernetCTPReply) LayerType() gopacket.LayerType {
+ return LayerTypeEthernetCTPReply
+}
+
+// Payload returns the EthernetCTP reply's Data bytes.
+func (c *EthernetCTPReply) Payload() []byte { return c.Data }
+
+func decodeEthernetCTP(data []byte, p gopacket.PacketBuilder) error {
+ c := &EthernetCTP{
+ SkipCount: binary.LittleEndian.Uint16(data[:2]),
+ BaseLayer: BaseLayer{data[:2], data[2:]},
+ }
+ if c.SkipCount%2 != 0 {
+ return fmt.Errorf("EthernetCTP skip count is odd: %d", c.SkipCount)
+ }
+ p.AddLayer(c)
+ return p.NextDecoder(gopacket.DecodeFunc(decodeEthernetCTPFromFunctionType))
+}
+
+// decodeEthernetCTPFromFunctionType reads in the first 2 bytes to determine the EthernetCTP
+// layer type to decode next, then decodes based on that.
+func decodeEthernetCTPFromFunctionType(data []byte, p gopacket.PacketBuilder) error {
+ function := EthernetCTPFunction(binary.LittleEndian.Uint16(data[:2]))
+ switch function {
+ case EthernetCTPFunctionReply:
+ reply := &EthernetCTPReply{
+ Function: function,
+ ReceiptNumber: binary.LittleEndian.Uint16(data[2:4]),
+ Data: data[4:],
+ BaseLayer: BaseLayer{data, nil},
+ }
+ p.AddLayer(reply)
+ p.SetApplicationLayer(reply)
+ return nil
+ case EthernetCTPFunctionForwardData:
+ forward := &EthernetCTPForwardData{
+ Function: function,
+ ForwardAddress: data[2:8],
+ BaseLayer: BaseLayer{data[:8], data[8:]},
+ }
+ p.AddLayer(forward)
+ return p.NextDecoder(gopacket.DecodeFunc(decodeEthernetCTPFromFunctionType))
+ }
+ return fmt.Errorf("Unknown EthernetCTP function type %v", function)
+}
diff --git a/vendor/github.com/google/gopacket/layers/dhcpv4.go b/vendor/github.com/google/gopacket/layers/dhcpv4.go
new file mode 100644
index 0000000..3bbd036
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/dhcpv4.go
@@ -0,0 +1,585 @@
+// Copyright 2016 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+ "bytes"
+ "encoding/binary"
+ "fmt"
+ "net"
+
+ "github.com/google/gopacket"
+)
+
+// DHCPOp rerprents a bootp operation
+type DHCPOp byte
+
+// bootp operations
+const (
+ DHCPOpRequest DHCPOp = 1
+ DHCPOpReply DHCPOp = 2
+)
+
+// String returns a string version of a DHCPOp.
+func (o DHCPOp) String() string {
+ switch o {
+ case DHCPOpRequest:
+ return "Request"
+ case DHCPOpReply:
+ return "Reply"
+ default:
+ return "Unknown"
+ }
+}
+
+// DHCPMsgType represents a DHCP operation
+type DHCPMsgType byte
+
+// Constants that represent DHCP operations
+const (
+ DHCPMsgTypeUnspecified DHCPMsgType = iota
+ DHCPMsgTypeDiscover
+ DHCPMsgTypeOffer
+ DHCPMsgTypeRequest
+ DHCPMsgTypeDecline
+ DHCPMsgTypeAck
+ DHCPMsgTypeNak
+ DHCPMsgTypeRelease
+ DHCPMsgTypeInform
+)
+
+// String returns a string version of a DHCPMsgType.
+func (o DHCPMsgType) String() string {
+ switch o {
+ case DHCPMsgTypeUnspecified:
+ return "Unspecified"
+ case DHCPMsgTypeDiscover:
+ return "Discover"
+ case DHCPMsgTypeOffer:
+ return "Offer"
+ case DHCPMsgTypeRequest:
+ return "Request"
+ case DHCPMsgTypeDecline:
+ return "Decline"
+ case DHCPMsgTypeAck:
+ return "Ack"
+ case DHCPMsgTypeNak:
+ return "Nak"
+ case DHCPMsgTypeRelease:
+ return "Release"
+ case DHCPMsgTypeInform:
+ return "Inform"
+ default:
+ return "Unknown"
+ }
+}
+
+//DHCPMagic is the RFC 2131 "magic cooke" for DHCP.
+var DHCPMagic uint32 = 0x63825363
+
+// DHCPv4 contains data for a single DHCP packet.
+type DHCPv4 struct {
+ BaseLayer
+ Operation DHCPOp
+ HardwareType LinkType
+ HardwareLen uint8
+ HardwareOpts uint8
+ Xid uint32
+ Secs uint16
+ Flags uint16
+ ClientIP net.IP
+ YourClientIP net.IP
+ NextServerIP net.IP
+ RelayAgentIP net.IP
+ ClientHWAddr net.HardwareAddr
+ ServerName []byte
+ File []byte
+ Options DHCPOptions
+}
+
+// DHCPOptions is used to get nicely printed option lists which would normally
+// be cut off after 5 options.
+type DHCPOptions []DHCPOption
+
+// String returns a string version of the options list.
+func (o DHCPOptions) String() string {
+ buf := &bytes.Buffer{}
+ buf.WriteByte('[')
+ for i, opt := range o {
+ buf.WriteString(opt.String())
+ if i+1 != len(o) {
+ buf.WriteString(", ")
+ }
+ }
+ buf.WriteByte(']')
+ return buf.String()
+}
+
+// LayerType returns gopacket.LayerTypeDHCPv4
+func (d *DHCPv4) LayerType() gopacket.LayerType { return LayerTypeDHCPv4 }
+
+// DecodeFromBytes decodes the given bytes into this layer.
+func (d *DHCPv4) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ d.Options = d.Options[:0]
+ d.Operation = DHCPOp(data[0])
+ d.HardwareType = LinkType(data[1])
+ d.HardwareLen = data[2]
+ d.HardwareOpts = data[3]
+ d.Xid = binary.BigEndian.Uint32(data[4:8])
+ d.Secs = binary.BigEndian.Uint16(data[8:10])
+ d.Flags = binary.BigEndian.Uint16(data[10:12])
+ d.ClientIP = net.IP(data[12:16])
+ d.YourClientIP = net.IP(data[16:20])
+ d.NextServerIP = net.IP(data[20:24])
+ d.RelayAgentIP = net.IP(data[24:28])
+ d.ClientHWAddr = net.HardwareAddr(data[28 : 28+d.HardwareLen])
+ d.ServerName = data[44:108]
+ d.File = data[108:236]
+ if binary.BigEndian.Uint32(data[236:240]) != DHCPMagic {
+ return InvalidMagicCookie
+ }
+
+ if len(data) <= 240 {
+ // DHCP Packet could have no option (??)
+ return nil
+ }
+
+ options := data[240:]
+
+ stop := len(options)
+ start := 0
+ for start < stop {
+ o := DHCPOption{}
+ if err := o.decode(options[start:]); err != nil {
+ return err
+ }
+ if o.Type == DHCPOptEnd {
+ break
+ }
+ d.Options = append(d.Options, o)
+ // Check if the option is a single byte pad
+ if o.Type == DHCPOptPad {
+ start++
+ } else {
+ start += int(o.Length) + 2
+ }
+ }
+ return nil
+}
+
+// Len returns the length of a DHCPv4 packet.
+func (d *DHCPv4) Len() uint16 {
+ n := uint16(240)
+ for _, o := range d.Options {
+ if o.Type == DHCPOptPad {
+ n++
+ } else {
+ n += uint16(o.Length) + 2
+ }
+ }
+ n++ // for opt end
+ return n
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (d *DHCPv4) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ plen := int(d.Len())
+
+ data, err := b.PrependBytes(plen)
+ if err != nil {
+ return err
+ }
+
+ data[0] = byte(d.Operation)
+ data[1] = byte(d.HardwareType)
+ if opts.FixLengths {
+ d.HardwareLen = uint8(len(d.ClientHWAddr))
+ }
+ data[2] = d.HardwareLen
+ data[3] = d.HardwareOpts
+ binary.BigEndian.PutUint32(data[4:8], d.Xid)
+ binary.BigEndian.PutUint16(data[8:10], d.Secs)
+ binary.BigEndian.PutUint16(data[10:12], d.Flags)
+ copy(data[12:16], d.ClientIP.To4())
+ copy(data[16:20], d.YourClientIP.To4())
+ copy(data[20:24], d.NextServerIP.To4())
+ copy(data[24:28], d.RelayAgentIP.To4())
+ copy(data[28:44], d.ClientHWAddr)
+ copy(data[44:108], d.ServerName)
+ copy(data[108:236], d.File)
+ binary.BigEndian.PutUint32(data[236:240], DHCPMagic)
+
+ if len(d.Options) > 0 {
+ offset := 240
+ for _, o := range d.Options {
+ if err := o.encode(data[offset:]); err != nil {
+ return err
+ }
+ // A pad option is only a single byte
+ if o.Type == DHCPOptPad {
+ offset++
+ } else {
+ offset += 2 + len(o.Data)
+ }
+ }
+ optend := NewDHCPOption(DHCPOptEnd, nil)
+ if err := optend.encode(data[offset:]); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// CanDecode returns the set of layer types that this DecodingLayer can decode.
+func (d *DHCPv4) CanDecode() gopacket.LayerClass {
+ return LayerTypeDHCPv4
+}
+
+// NextLayerType returns the layer type contained by this DecodingLayer.
+func (d *DHCPv4) NextLayerType() gopacket.LayerType {
+ return gopacket.LayerTypePayload
+}
+
+func decodeDHCPv4(data []byte, p gopacket.PacketBuilder) error {
+ dhcp := &DHCPv4{}
+ err := dhcp.DecodeFromBytes(data, p)
+ if err != nil {
+ return err
+ }
+ p.AddLayer(dhcp)
+ return p.NextDecoder(gopacket.LayerTypePayload)
+}
+
+// DHCPOpt represents a DHCP option or parameter from RFC-2132
+type DHCPOpt byte
+
+// Constants for the DHCPOpt options.
+const (
+ DHCPOptPad DHCPOpt = 0
+ DHCPOptSubnetMask DHCPOpt = 1 // 4, net.IP
+ DHCPOptTimeOffset DHCPOpt = 2 // 4, int32 (signed seconds from UTC)
+ DHCPOptRouter DHCPOpt = 3 // n*4, [n]net.IP
+ DHCPOptTimeServer DHCPOpt = 4 // n*4, [n]net.IP
+ DHCPOptNameServer DHCPOpt = 5 // n*4, [n]net.IP
+ DHCPOptDNS DHCPOpt = 6 // n*4, [n]net.IP
+ DHCPOptLogServer DHCPOpt = 7 // n*4, [n]net.IP
+ DHCPOptCookieServer DHCPOpt = 8 // n*4, [n]net.IP
+ DHCPOptLPRServer DHCPOpt = 9 // n*4, [n]net.IP
+ DHCPOptImpressServer DHCPOpt = 10 // n*4, [n]net.IP
+ DHCPOptResLocServer DHCPOpt = 11 // n*4, [n]net.IP
+ DHCPOptHostname DHCPOpt = 12 // n, string
+ DHCPOptBootfileSize DHCPOpt = 13 // 2, uint16
+ DHCPOptMeritDumpFile DHCPOpt = 14 // >1, string
+ DHCPOptDomainName DHCPOpt = 15 // n, string
+ DHCPOptSwapServer DHCPOpt = 16 // n*4, [n]net.IP
+ DHCPOptRootPath DHCPOpt = 17 // n, string
+ DHCPOptExtensionsPath DHCPOpt = 18 // n, string
+ DHCPOptIPForwarding DHCPOpt = 19 // 1, bool
+ DHCPOptSourceRouting DHCPOpt = 20 // 1, bool
+ DHCPOptPolicyFilter DHCPOpt = 21 // 8*n, [n]{net.IP/net.IP}
+ DHCPOptDatagramMTU DHCPOpt = 22 // 2, uint16
+ DHCPOptDefaultTTL DHCPOpt = 23 // 1, byte
+ DHCPOptPathMTUAgingTimeout DHCPOpt = 24 // 4, uint32
+ DHCPOptPathPlateuTableOption DHCPOpt = 25 // 2*n, []uint16
+ DHCPOptInterfaceMTU DHCPOpt = 26 // 2, uint16
+ DHCPOptAllSubsLocal DHCPOpt = 27 // 1, bool
+ DHCPOptBroadcastAddr DHCPOpt = 28 // 4, net.IP
+ DHCPOptMaskDiscovery DHCPOpt = 29 // 1, bool
+ DHCPOptMaskSupplier DHCPOpt = 30 // 1, bool
+ DHCPOptRouterDiscovery DHCPOpt = 31 // 1, bool
+ DHCPOptSolicitAddr DHCPOpt = 32 // 4, net.IP
+ DHCPOptStaticRoute DHCPOpt = 33 // n*8, [n]{net.IP/net.IP} -- note the 2nd is router not mask
+ DHCPOptARPTrailers DHCPOpt = 34 // 1, bool
+ DHCPOptARPTimeout DHCPOpt = 35 // 4, uint32
+ DHCPOptEthernetEncap DHCPOpt = 36 // 1, bool
+ DHCPOptTCPTTL DHCPOpt = 37 // 1, byte
+ DHCPOptTCPKeepAliveInt DHCPOpt = 38 // 4, uint32
+ DHCPOptTCPKeepAliveGarbage DHCPOpt = 39 // 1, bool
+ DHCPOptNISDomain DHCPOpt = 40 // n, string
+ DHCPOptNISServers DHCPOpt = 41 // 4*n, [n]net.IP
+ DHCPOptNTPServers DHCPOpt = 42 // 4*n, [n]net.IP
+ DHCPOptVendorOption DHCPOpt = 43 // n, [n]byte // may be encapsulated.
+ DHCPOptNetBIOSTCPNS DHCPOpt = 44 // 4*n, [n]net.IP
+ DHCPOptNetBIOSTCPDDS DHCPOpt = 45 // 4*n, [n]net.IP
+ DHCPOptNETBIOSTCPNodeType DHCPOpt = 46 // 1, magic byte
+ DHCPOptNetBIOSTCPScope DHCPOpt = 47 // n, string
+ DHCPOptXFontServer DHCPOpt = 48 // n, string
+ DHCPOptXDisplayManager DHCPOpt = 49 // n, string
+ DHCPOptRequestIP DHCPOpt = 50 // 4, net.IP
+ DHCPOptLeaseTime DHCPOpt = 51 // 4, uint32
+ DHCPOptExtOptions DHCPOpt = 52 // 1, 1/2/3
+ DHCPOptMessageType DHCPOpt = 53 // 1, 1-7
+ DHCPOptServerID DHCPOpt = 54 // 4, net.IP
+ DHCPOptParamsRequest DHCPOpt = 55 // n, []byte
+ DHCPOptMessage DHCPOpt = 56 // n, 3
+ DHCPOptMaxMessageSize DHCPOpt = 57 // 2, uint16
+ DHCPOptT1 DHCPOpt = 58 // 4, uint32
+ DHCPOptT2 DHCPOpt = 59 // 4, uint32
+ DHCPOptClassID DHCPOpt = 60 // n, []byte
+ DHCPOptClientID DHCPOpt = 61 // n >= 2, []byte
+ DHCPOptDomainSearch DHCPOpt = 119 // n, string
+ DHCPOptSIPServers DHCPOpt = 120 // n, url
+ DHCPOptClasslessStaticRoute DHCPOpt = 121 //
+ DHCPOptEnd DHCPOpt = 255
+)
+
+// String returns a string version of a DHCPOpt.
+func (o DHCPOpt) String() string {
+ switch o {
+ case DHCPOptPad:
+ return "(padding)"
+ case DHCPOptSubnetMask:
+ return "SubnetMask"
+ case DHCPOptTimeOffset:
+ return "TimeOffset"
+ case DHCPOptRouter:
+ return "Router"
+ case DHCPOptTimeServer:
+ return "rfc868" // old time server protocol stringified to dissuade confusion w. NTP
+ case DHCPOptNameServer:
+ return "ien116" // obscure nameserver protocol stringified to dissuade confusion w. DNS
+ case DHCPOptDNS:
+ return "DNS"
+ case DHCPOptLogServer:
+ return "mitLCS" // MIT LCS server protocol yada yada w. Syslog
+ case DHCPOptCookieServer:
+ return "CookieServer"
+ case DHCPOptLPRServer:
+ return "LPRServer"
+ case DHCPOptImpressServer:
+ return "ImpressServer"
+ case DHCPOptResLocServer:
+ return "ResourceLocationServer"
+ case DHCPOptHostname:
+ return "Hostname"
+ case DHCPOptBootfileSize:
+ return "BootfileSize"
+ case DHCPOptMeritDumpFile:
+ return "MeritDumpFile"
+ case DHCPOptDomainName:
+ return "DomainName"
+ case DHCPOptSwapServer:
+ return "SwapServer"
+ case DHCPOptRootPath:
+ return "RootPath"
+ case DHCPOptExtensionsPath:
+ return "ExtensionsPath"
+ case DHCPOptIPForwarding:
+ return "IPForwarding"
+ case DHCPOptSourceRouting:
+ return "SourceRouting"
+ case DHCPOptPolicyFilter:
+ return "PolicyFilter"
+ case DHCPOptDatagramMTU:
+ return "DatagramMTU"
+ case DHCPOptDefaultTTL:
+ return "DefaultTTL"
+ case DHCPOptPathMTUAgingTimeout:
+ return "PathMTUAgingTimeout"
+ case DHCPOptPathPlateuTableOption:
+ return "PathPlateuTableOption"
+ case DHCPOptInterfaceMTU:
+ return "InterfaceMTU"
+ case DHCPOptAllSubsLocal:
+ return "AllSubsLocal"
+ case DHCPOptBroadcastAddr:
+ return "BroadcastAddress"
+ case DHCPOptMaskDiscovery:
+ return "MaskDiscovery"
+ case DHCPOptMaskSupplier:
+ return "MaskSupplier"
+ case DHCPOptRouterDiscovery:
+ return "RouterDiscovery"
+ case DHCPOptSolicitAddr:
+ return "SolicitAddr"
+ case DHCPOptStaticRoute:
+ return "StaticRoute"
+ case DHCPOptARPTrailers:
+ return "ARPTrailers"
+ case DHCPOptARPTimeout:
+ return "ARPTimeout"
+ case DHCPOptEthernetEncap:
+ return "EthernetEncap"
+ case DHCPOptTCPTTL:
+ return "TCPTTL"
+ case DHCPOptTCPKeepAliveInt:
+ return "TCPKeepAliveInt"
+ case DHCPOptTCPKeepAliveGarbage:
+ return "TCPKeepAliveGarbage"
+ case DHCPOptNISDomain:
+ return "NISDomain"
+ case DHCPOptNISServers:
+ return "NISServers"
+ case DHCPOptNTPServers:
+ return "NTPServers"
+ case DHCPOptVendorOption:
+ return "VendorOption"
+ case DHCPOptNetBIOSTCPNS:
+ return "NetBIOSOverTCPNS"
+ case DHCPOptNetBIOSTCPDDS:
+ return "NetBiosOverTCPDDS"
+ case DHCPOptNETBIOSTCPNodeType:
+ return "NetBIOSOverTCPNodeType"
+ case DHCPOptNetBIOSTCPScope:
+ return "NetBIOSOverTCPScope"
+ case DHCPOptXFontServer:
+ return "XFontServer"
+ case DHCPOptXDisplayManager:
+ return "XDisplayManager"
+ case DHCPOptEnd:
+ return "(end)"
+ case DHCPOptSIPServers:
+ return "SipServers"
+ case DHCPOptRequestIP:
+ return "RequestIP"
+ case DHCPOptLeaseTime:
+ return "LeaseTime"
+ case DHCPOptExtOptions:
+ return "ExtOpts"
+ case DHCPOptMessageType:
+ return "MessageType"
+ case DHCPOptServerID:
+ return "ServerID"
+ case DHCPOptParamsRequest:
+ return "ParamsRequest"
+ case DHCPOptMessage:
+ return "Message"
+ case DHCPOptMaxMessageSize:
+ return "MaxDHCPSize"
+ case DHCPOptT1:
+ return "Timer1"
+ case DHCPOptT2:
+ return "Timer2"
+ case DHCPOptClassID:
+ return "ClassID"
+ case DHCPOptClientID:
+ return "ClientID"
+ case DHCPOptDomainSearch:
+ return "DomainSearch"
+ case DHCPOptClasslessStaticRoute:
+ return "ClasslessStaticRoute"
+ default:
+ return "Unknown"
+ }
+}
+
+// DHCPOption rerpresents a DHCP option.
+type DHCPOption struct {
+ Type DHCPOpt
+ Length uint8
+ Data []byte
+}
+
+// String returns a string version of a DHCP Option.
+func (o DHCPOption) String() string {
+ switch o.Type {
+
+ case DHCPOptHostname, DHCPOptMeritDumpFile, DHCPOptDomainName, DHCPOptRootPath,
+ DHCPOptExtensionsPath, DHCPOptNISDomain, DHCPOptNetBIOSTCPScope, DHCPOptXFontServer,
+ DHCPOptXDisplayManager, DHCPOptMessage, DHCPOptDomainSearch: // string
+ return fmt.Sprintf("Option(%s:%s)", o.Type, string(o.Data))
+
+ case DHCPOptMessageType:
+ if len(o.Data) != 1 {
+ return fmt.Sprintf("Option(%s:INVALID)", o.Type)
+ }
+ return fmt.Sprintf("Option(%s:%s)", o.Type, DHCPMsgType(o.Data[0]))
+
+ case DHCPOptSubnetMask, DHCPOptServerID, DHCPOptBroadcastAddr,
+ DHCPOptSolicitAddr, DHCPOptRequestIP: // net.IP
+ if len(o.Data) < 4 {
+ return fmt.Sprintf("Option(%s:INVALID)", o.Type)
+ }
+ return fmt.Sprintf("Option(%s:%s)", o.Type, net.IP(o.Data))
+
+ case DHCPOptT1, DHCPOptT2, DHCPOptLeaseTime, DHCPOptPathMTUAgingTimeout,
+ DHCPOptARPTimeout, DHCPOptTCPKeepAliveInt: // uint32
+ if len(o.Data) != 4 {
+ return fmt.Sprintf("Option(%s:INVALID)", o.Type)
+ }
+ return fmt.Sprintf("Option(%s:%d)", o.Type,
+ uint32(o.Data[0])<<24|uint32(o.Data[1])<<16|uint32(o.Data[2])<<8|uint32(o.Data[3]))
+
+ case DHCPOptParamsRequest:
+ buf := &bytes.Buffer{}
+ buf.WriteString(fmt.Sprintf("Option(%s:", o.Type))
+ for i, v := range o.Data {
+ buf.WriteString(DHCPOpt(v).String())
+ if i+1 != len(o.Data) {
+ buf.WriteByte(',')
+ }
+ }
+ buf.WriteString(")")
+ return buf.String()
+
+ default:
+ return fmt.Sprintf("Option(%s:%v)", o.Type, o.Data)
+ }
+}
+
+// NewDHCPOption constructs a new DHCPOption with a given type and data.
+func NewDHCPOption(t DHCPOpt, data []byte) DHCPOption {
+ o := DHCPOption{Type: t}
+ if data != nil {
+ o.Data = data
+ o.Length = uint8(len(data))
+ }
+ return o
+}
+
+func (o *DHCPOption) encode(b []byte) error {
+ switch o.Type {
+ case DHCPOptPad, DHCPOptEnd:
+ b[0] = byte(o.Type)
+ default:
+ b[0] = byte(o.Type)
+ b[1] = o.Length
+ copy(b[2:], o.Data)
+ }
+ return nil
+}
+
+func (o *DHCPOption) decode(data []byte) error {
+ if len(data) < 1 {
+ // Pad/End have a length of 1
+ return DecOptionNotEnoughData
+ }
+ o.Type = DHCPOpt(data[0])
+ switch o.Type {
+ case DHCPOptPad, DHCPOptEnd:
+ o.Data = nil
+ default:
+ if len(data) < 2 {
+ return DecOptionNotEnoughData
+ }
+ o.Length = data[1]
+ if int(o.Length) > len(data[2:]) {
+ return DecOptionMalformed
+ }
+ o.Data = data[2 : 2+int(o.Length)]
+ }
+ return nil
+}
+
+// DHCPv4Error is used for constant errors for DHCPv4. It is needed for test asserts.
+type DHCPv4Error string
+
+// DHCPv4Error implements error interface.
+func (d DHCPv4Error) Error() string {
+ return string(d)
+}
+
+const (
+ // DecOptionNotEnoughData is returned when there is not enough data during option's decode process
+ DecOptionNotEnoughData = DHCPv4Error("Not enough data to decode")
+ // DecOptionMalformed is returned when the option is malformed
+ DecOptionMalformed = DHCPv4Error("Option is malformed")
+ // InvalidMagicCookie is returned when Magic cookie is missing into BOOTP header
+ InvalidMagicCookie = DHCPv4Error("Bad DHCP header")
+)
diff --git a/vendor/github.com/google/gopacket/layers/dhcpv6.go b/vendor/github.com/google/gopacket/layers/dhcpv6.go
new file mode 100644
index 0000000..052b394
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/dhcpv6.go
@@ -0,0 +1,341 @@
+// Copyright 2018 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "net"
+
+ "github.com/google/gopacket"
+)
+
+// DHCPv6MsgType represents a DHCPv6 operation
+type DHCPv6MsgType byte
+
+// Constants that represent DHCP operations
+const (
+ DHCPv6MsgTypeUnspecified DHCPv6MsgType = iota
+ DHCPv6MsgTypeSolicit
+ DHCPv6MsgTypeAdverstise
+ DHCPv6MsgTypeRequest
+ DHCPv6MsgTypeConfirm
+ DHCPv6MsgTypeRenew
+ DHCPv6MsgTypeRebind
+ DHCPv6MsgTypeReply
+ DHCPv6MsgTypeRelease
+ DHCPv6MsgTypeDecline
+ DHCPv6MsgTypeReconfigure
+ DHCPv6MsgTypeInformationRequest
+ DHCPv6MsgTypeRelayForward
+ DHCPv6MsgTypeRelayReply
+)
+
+// String returns a string version of a DHCPv6MsgType.
+func (o DHCPv6MsgType) String() string {
+ switch o {
+ case DHCPv6MsgTypeUnspecified:
+ return "Unspecified"
+ case DHCPv6MsgTypeSolicit:
+ return "Solicit"
+ case DHCPv6MsgTypeAdverstise:
+ return "Adverstise"
+ case DHCPv6MsgTypeRequest:
+ return "Request"
+ case DHCPv6MsgTypeConfirm:
+ return "Confirm"
+ case DHCPv6MsgTypeRenew:
+ return "Renew"
+ case DHCPv6MsgTypeRebind:
+ return "Rebind"
+ case DHCPv6MsgTypeReply:
+ return "Reply"
+ case DHCPv6MsgTypeRelease:
+ return "Release"
+ case DHCPv6MsgTypeDecline:
+ return "Decline"
+ case DHCPv6MsgTypeReconfigure:
+ return "Reconfigure"
+ case DHCPv6MsgTypeInformationRequest:
+ return "InformationRequest"
+ case DHCPv6MsgTypeRelayForward:
+ return "RelayForward"
+ case DHCPv6MsgTypeRelayReply:
+ return "RelayReply"
+ default:
+ return "Unknown"
+ }
+}
+
+// DHCPv6 contains data for a single DHCP packet.
+type DHCPv6 struct {
+ BaseLayer
+ MsgType DHCPv6MsgType
+ HopCount uint8
+ LinkAddr net.IP
+ PeerAddr net.IP
+ TransactionID []byte
+ Options DHCPv6Options
+}
+
+// LayerType returns gopacket.LayerTypeDHCPv6
+func (d *DHCPv6) LayerType() gopacket.LayerType { return LayerTypeDHCPv6 }
+
+// DecodeFromBytes decodes the given bytes into this layer.
+func (d *DHCPv6) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ d.BaseLayer = BaseLayer{Contents: data}
+ d.Options = d.Options[:0]
+ d.MsgType = DHCPv6MsgType(data[0])
+
+ offset := 0
+ if d.MsgType == DHCPv6MsgTypeRelayForward || d.MsgType == DHCPv6MsgTypeRelayReply {
+ d.HopCount = data[1]
+ d.LinkAddr = net.IP(data[2:18])
+ d.PeerAddr = net.IP(data[18:34])
+ offset = 34
+ } else {
+ d.TransactionID = data[1:4]
+ offset = 4
+ }
+
+ stop := len(data)
+ for offset < stop {
+ o := DHCPv6Option{}
+ if err := o.decode(data[offset:]); err != nil {
+ return err
+ }
+ d.Options = append(d.Options, o)
+ offset += int(o.Length) + 4 // 2 from option code, 2 from option length
+ }
+
+ return nil
+}
+
+// Len returns the length of a DHCPv6 packet.
+func (d *DHCPv6) Len() int {
+ n := 1
+ if d.MsgType == DHCPv6MsgTypeRelayForward || d.MsgType == DHCPv6MsgTypeRelayReply {
+ n += 33
+ } else {
+ n += 3
+ }
+
+ for _, o := range d.Options {
+ n += int(o.Length) + 4 // 2 from option code, 2 from option length
+ }
+
+ return n
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (d *DHCPv6) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ plen := int(d.Len())
+
+ data, err := b.PrependBytes(plen)
+ if err != nil {
+ return err
+ }
+
+ offset := 0
+ data[0] = byte(d.MsgType)
+ if d.MsgType == DHCPv6MsgTypeRelayForward || d.MsgType == DHCPv6MsgTypeRelayReply {
+ data[1] = byte(d.HopCount)
+ copy(data[2:18], d.LinkAddr.To16())
+ copy(data[18:34], d.PeerAddr.To16())
+ offset = 34
+ } else {
+ copy(data[1:4], d.TransactionID)
+ offset = 4
+ }
+
+ if len(d.Options) > 0 {
+ for _, o := range d.Options {
+ if err := o.encode(data[offset:], opts); err != nil {
+ return err
+ }
+ offset += int(o.Length) + 4 // 2 from option code, 2 from option length
+ }
+ }
+ return nil
+}
+
+// CanDecode returns the set of layer types that this DecodingLayer can decode.
+func (d *DHCPv6) CanDecode() gopacket.LayerClass {
+ return LayerTypeDHCPv6
+}
+
+// NextLayerType returns the layer type contained by this DecodingLayer.
+func (d *DHCPv6) NextLayerType() gopacket.LayerType {
+ return gopacket.LayerTypePayload
+}
+
+func decodeDHCPv6(data []byte, p gopacket.PacketBuilder) error {
+ dhcp := &DHCPv6{}
+ err := dhcp.DecodeFromBytes(data, p)
+ if err != nil {
+ return err
+ }
+ p.AddLayer(dhcp)
+ return p.NextDecoder(gopacket.LayerTypePayload)
+}
+
+// DHCPv6StatusCode represents a DHCP status code - RFC-3315
+type DHCPv6StatusCode uint16
+
+// Constants for the DHCPv6StatusCode.
+const (
+ DHCPv6StatusCodeSuccess DHCPv6StatusCode = iota
+ DHCPv6StatusCodeUnspecFail
+ DHCPv6StatusCodeNoAddrsAvail
+ DHCPv6StatusCodeNoBinding
+ DHCPv6StatusCodeNotOnLink
+ DHCPv6StatusCodeUseMulticast
+)
+
+// String returns a string version of a DHCPv6StatusCode.
+func (o DHCPv6StatusCode) String() string {
+ switch o {
+ case DHCPv6StatusCodeSuccess:
+ return "Success"
+ case DHCPv6StatusCodeUnspecFail:
+ return "UnspecifiedFailure"
+ case DHCPv6StatusCodeNoAddrsAvail:
+ return "NoAddressAvailable"
+ case DHCPv6StatusCodeNoBinding:
+ return "NoBinding"
+ case DHCPv6StatusCodeNotOnLink:
+ return "NotOnLink"
+ case DHCPv6StatusCodeUseMulticast:
+ return "UseMulticast"
+ default:
+ return "Unknown"
+ }
+}
+
+// DHCPv6DUIDType represents a DHCP DUID - RFC-3315
+type DHCPv6DUIDType uint16
+
+// Constants for the DHCPv6DUIDType.
+const (
+ DHCPv6DUIDTypeLLT DHCPv6DUIDType = iota + 1
+ DHCPv6DUIDTypeEN
+ DHCPv6DUIDTypeLL
+)
+
+// String returns a string version of a DHCPv6DUIDType.
+func (o DHCPv6DUIDType) String() string {
+ switch o {
+ case DHCPv6DUIDTypeLLT:
+ return "LLT"
+ case DHCPv6DUIDTypeEN:
+ return "EN"
+ case DHCPv6DUIDTypeLL:
+ return "LL"
+ default:
+ return "Unknown"
+ }
+}
+
+// DHCPv6DUID means DHCP Unique Identifier as stated in RFC 3315, section 9 (https://tools.ietf.org/html/rfc3315#page-19)
+type DHCPv6DUID struct {
+ Type DHCPv6DUIDType
+ // LLT, LL
+ HardwareType []byte
+ // EN
+ EnterpriseNumber []byte
+ // LLT
+ Time []byte
+ // LLT, LL
+ LinkLayerAddress net.HardwareAddr
+ // EN
+ Identifier []byte
+}
+
+// DecodeFromBytes decodes the given bytes into a DHCPv6DUID
+func (d *DHCPv6DUID) DecodeFromBytes(data []byte) error {
+ if len(data) < 2 {
+ return errors.New("Not enough bytes to decode: " + string(len(data)))
+ }
+
+ d.Type = DHCPv6DUIDType(binary.BigEndian.Uint16(data[:2]))
+ if d.Type == DHCPv6DUIDTypeLLT || d.Type == DHCPv6DUIDTypeLL {
+ d.HardwareType = data[2:4]
+ }
+
+ if d.Type == DHCPv6DUIDTypeLLT {
+ d.Time = data[4:8]
+ d.LinkLayerAddress = net.HardwareAddr(data[8:])
+ } else if d.Type == DHCPv6DUIDTypeEN {
+ d.EnterpriseNumber = data[2:6]
+ d.Identifier = data[6:]
+ } else { // DHCPv6DUIDTypeLL
+ d.LinkLayerAddress = net.HardwareAddr(data[4:])
+ }
+
+ return nil
+}
+
+// Encode encodes the DHCPv6DUID in a slice of bytes
+func (d *DHCPv6DUID) Encode() []byte {
+ length := d.Len()
+ data := make([]byte, length)
+ binary.BigEndian.PutUint16(data[0:2], uint16(d.Type))
+
+ if d.Type == DHCPv6DUIDTypeLLT || d.Type == DHCPv6DUIDTypeLL {
+ copy(data[2:4], d.HardwareType)
+ }
+
+ if d.Type == DHCPv6DUIDTypeLLT {
+ copy(data[4:8], d.Time)
+ copy(data[8:], d.LinkLayerAddress)
+ } else if d.Type == DHCPv6DUIDTypeEN {
+ copy(data[2:6], d.EnterpriseNumber)
+ copy(data[6:], d.Identifier)
+ } else {
+ copy(data[4:], d.LinkLayerAddress)
+ }
+
+ return data
+}
+
+// Len returns the length of the DHCPv6DUID, respecting the type
+func (d *DHCPv6DUID) Len() int {
+ length := 2 // d.Type
+ if d.Type == DHCPv6DUIDTypeLLT {
+ length += 2 /*HardwareType*/ + 4 /*d.Time*/ + len(d.LinkLayerAddress)
+ } else if d.Type == DHCPv6DUIDTypeEN {
+ length += 4 /*d.EnterpriseNumber*/ + len(d.Identifier)
+ } else { // LL
+ length += 2 /*d.HardwareType*/ + len(d.LinkLayerAddress)
+ }
+
+ return length
+}
+
+func (d *DHCPv6DUID) String() string {
+ duid := "Type: " + d.Type.String() + ", "
+ if d.Type == DHCPv6DUIDTypeLLT {
+ duid += fmt.Sprintf("HardwareType: %v, Time: %v, LinkLayerAddress: %v", d.HardwareType, d.Time, d.LinkLayerAddress)
+ } else if d.Type == DHCPv6DUIDTypeEN {
+ duid += fmt.Sprintf("EnterpriseNumber: %v, Identifier: %v", d.EnterpriseNumber, d.Identifier)
+ } else { // DHCPv6DUIDTypeLL
+ duid += fmt.Sprintf("HardwareType: %v, LinkLayerAddress: %v", d.HardwareType, d.LinkLayerAddress)
+ }
+ return duid
+}
+
+func decodeDHCPv6DUID(data []byte) (*DHCPv6DUID, error) {
+ duid := &DHCPv6DUID{}
+ err := duid.DecodeFromBytes(data)
+ if err != nil {
+ return nil, err
+ }
+ return duid, nil
+}
diff --git a/vendor/github.com/google/gopacket/layers/dhcpv6_options.go b/vendor/github.com/google/gopacket/layers/dhcpv6_options.go
new file mode 100644
index 0000000..0c05e35
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/dhcpv6_options.go
@@ -0,0 +1,621 @@
+// Copyright 2018 The GoPacket Authors. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+ "bytes"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "github.com/google/gopacket"
+)
+
+// DHCPv6Opt represents a DHCP option or parameter from RFC-3315
+type DHCPv6Opt uint16
+
+// Constants for the DHCPv6Opt options.
+const (
+ DHCPv6OptClientID DHCPv6Opt = 1
+ DHCPv6OptServerID DHCPv6Opt = 2
+ DHCPv6OptIANA DHCPv6Opt = 3
+ DHCPv6OptIATA DHCPv6Opt = 4
+ DHCPv6OptIAAddr DHCPv6Opt = 5
+ DHCPv6OptOro DHCPv6Opt = 6
+ DHCPv6OptPreference DHCPv6Opt = 7
+ DHCPv6OptElapsedTime DHCPv6Opt = 8
+ DHCPv6OptRelayMessage DHCPv6Opt = 9
+ DHCPv6OptAuth DHCPv6Opt = 11
+ DHCPv6OptUnicast DHCPv6Opt = 12
+ DHCPv6OptStatusCode DHCPv6Opt = 13
+ DHCPv6OptRapidCommit DHCPv6Opt = 14
+ DHCPv6OptUserClass DHCPv6Opt = 15
+ DHCPv6OptVendorClass DHCPv6Opt = 16
+ DHCPv6OptVendorOpts DHCPv6Opt = 17
+ DHCPv6OptInterfaceID DHCPv6Opt = 18
+ DHCPv6OptReconfigureMessage DHCPv6Opt = 19
+ DHCPv6OptReconfigureAccept DHCPv6Opt = 20
+
+ // RFC 3319 Session Initiation Protocol (SIP)
+ DHCPv6OptSIPServersDomainList DHCPv6Opt = 21
+ DHCPv6OptSIPServersAddressList DHCPv6Opt = 22
+
+ // RFC 3646 DNS Configuration
+ DHCPv6OptDNSServers DHCPv6Opt = 23
+ DHCPv6OptDomainList DHCPv6Opt = 24
+
+ // RFC 3633 Prefix Delegation
+ DHCPv6OptIAPD DHCPv6Opt = 25
+ DHCPv6OptIAPrefix DHCPv6Opt = 26
+
+ // RFC 3898 Network Information Service (NIS)
+ DHCPv6OptNISServers DHCPv6Opt = 27
+ DHCPv6OptNISPServers DHCPv6Opt = 28
+ DHCPv6OptNISDomainName DHCPv6Opt = 29
+ DHCPv6OptNISPDomainName DHCPv6Opt = 30
+
+ // RFC 4075 Simple Network Time Protocol (SNTP)
+ DHCPv6OptSNTPServers DHCPv6Opt = 31
+
+ // RFC 4242 Information Refresh Time Option
+ DHCPv6OptInformationRefreshTime DHCPv6Opt = 32
+
+ // RFC 4280 Broadcast and Multicast Control Servers
+ DHCPv6OptBCMCSServerDomainNameList DHCPv6Opt = 33
+ DHCPv6OptBCMCSServerAddressList DHCPv6Opt = 34
+
+ // RFC 4776 Civic Address ConfigurationOption
+ DHCPv6OptGeoconfCivic DHCPv6Opt = 36
+
+ // RFC 4649 Relay Agent Remote-ID
+ DHCPv6OptRemoteID DHCPv6Opt = 37
+
+ // RFC 4580 Relay Agent Subscriber-ID
+ DHCPv6OptSubscriberID DHCPv6Opt = 38
+
+ // RFC 4704 Client Full Qualified Domain Name (FQDN)
+ DHCPv6OptClientFQDN DHCPv6Opt = 39
+
+ // RFC 5192 Protocol for Carrying Authentication for Network Access (PANA)
+ DHCPv6OptPanaAgent DHCPv6Opt = 40
+
+ // RFC 4833 Timezone Options
+ DHCPv6OptNewPOSIXTimezone DHCPv6Opt = 41
+ DHCPv6OptNewTZDBTimezone DHCPv6Opt = 42
+
+ // RFC 4994 Relay Agent Echo Request
+ DHCPv6OptEchoRequestOption DHCPv6Opt = 43
+
+ // RFC 5007 Leasequery
+ DHCPv6OptLQQuery DHCPv6Opt = 44
+ DHCPv6OptCLTTime DHCPv6Opt = 45
+ DHCPv6OptClientData DHCPv6Opt = 46
+ DHCPv6OptLQRelayData DHCPv6Opt = 47
+ DHCPv6OptLQClientLink DHCPv6Opt = 48
+
+ // RFC 6610 Home Information Discovery in Mobile IPv6 (MIPv6)
+ DHCPv6OptMIP6HNIDF DHCPv6Opt = 49
+ DHCPv6OptMIP6VDINF DHCPv6Opt = 50
+ DHCPv6OptMIP6IDINF DHCPv6Opt = 69
+ DHCPv6OptMIP6UDINF DHCPv6Opt = 70
+ DHCPv6OptMIP6HNP DHCPv6Opt = 71
+ DHCPv6OptMIP6HAA DHCPv6Opt = 72
+ DHCPv6OptMIP6HAF DHCPv6Opt = 73
+
+ // RFC 5223 Discovering Location-to-Service Translation (LoST) Servers
+ DHCPv6OptV6LOST DHCPv6Opt = 51
+
+ // RFC 5417 Control And Provisioning of Wireless Access Points (CAPWAP)
+ DHCPv6OptCAPWAPACV6 DHCPv6Opt = 52
+
+ // RFC 5460 Bulk Leasequery
+ DHCPv6OptRelayID DHCPv6Opt = 53
+
+ // RFC 5678 IEEE 802.21 Mobility Services (MoS) Discovery
+ DHCPv6OptIPv6AddressMoS DHCPv6Opt = 54
+ DHCPv6OptIPv6FQDNMoS DHCPv6Opt = 55
+
+ // RFC 5908 NTP Server Option
+ DHCPv6OptNTPServer DHCPv6Opt = 56
+
+ // RFC 5986 Discovering the Local Location Information Server (LIS)
+ DHCPv6OptV6AccessDomain DHCPv6Opt = 57
+
+ // RFC 5986 SIP User Agent
+ DHCPv6OptSIPUACSList DHCPv6Opt = 58
+
+ // RFC 5970 Options for Network Boot
+ DHCPv6OptBootFileURL DHCPv6Opt = 59
+ DHCPv6OptBootFileParam DHCPv6Opt = 60
+ DHCPv6OptClientArchType DHCPv6Opt = 61
+ DHCPv6OptNII DHCPv6Opt = 62
+
+ // RFC 6225 Coordinate-Based Location Configuration Information
+ DHCPv6OptGeolocation DHCPv6Opt = 63
+
+ // RFC 6334 Dual-Stack Lite
+ DHCPv6OptAFTRName DHCPv6Opt = 64
+
+ // RFC 6440 EAP Re-authentication Protocol (ERP)
+ DHCPv6OptERPLocalDomainName DHCPv6Opt = 65
+
+ // RFC 6422 Relay-Supplied DHCP Options
+ DHCPv6OptRSOO DHCPv6Opt = 66
+
+ // RFC 6603 Prefix Exclude Option for DHCPv6-based Prefix Delegation
+ DHCPv6OptPDExclude DHCPv6Opt = 67
+
+ // RFC 6607 Virtual Subnet Selection
+ DHCPv6OptVSS DHCPv6Opt = 68
+
+ // RFC 6731 Improved Recursive DNS Server Selection for Multi-Interfaced Nodes
+ DHCPv6OptRDNSSSelection DHCPv6Opt = 74
+
+ // RFC 6784 Kerberos Options for DHCPv6
+ DHCPv6OptKRBPrincipalName DHCPv6Opt = 75
+ DHCPv6OptKRBRealmName DHCPv6Opt = 76
+ DHCPv6OptKRBKDC DHCPv6Opt = 77
+
+ // RFC 6939 Client Link-Layer Address Option
+ DHCPv6OptClientLinkLayerAddress DHCPv6Opt = 79
+
+ // RFC 6977 Triggering DHCPv6 Reconfiguration from Relay Agents
+ DHCPv6OptLinkAddress DHCPv6Opt = 80
+
+ // RFC 7037 RADIUS Option for the DHCPv6 Relay Agent
+ DHCPv6OptRADIUS DHCPv6Opt = 81
+
+ // RFC 7083 Modification to Default Values of SOL_MAX_RT and INF_MAX_RT
+ DHCPv6OptSolMaxRt DHCPv6Opt = 82
+ DHCPv6OptInfMaxRt DHCPv6Opt = 83
+
+ // RFC 7078 Distributing Address Selection Policy
+ DHCPv6OptAddrSel DHCPv6Opt = 84
+ DHCPv6OptAddrSelTable DHCPv6Opt = 85
+
+ // RFC 7291 DHCP Options for the Port Control Protocol (PCP)
+ DHCPv6OptV6PCPServer DHCPv6Opt = 86
+
+ // RFC 7341 DHCPv4-over-DHCPv6 (DHCP 4o6) Transport
+ DHCPv6OptDHCPv4Message DHCPv6Opt = 87
+ DHCPv6OptDHCPv4OverDHCPv6Server DHCPv6Opt = 88
+
+ // RFC 7598 Configuration of Softwire Address and Port-Mapped Clients
+ DHCPv6OptS46Rule DHCPv6Opt = 89
+ DHCPv6OptS46BR DHCPv6Opt = 90
+ DHCPv6OptS46DMR DHCPv6Opt = 91
+ DHCPv6OptS46V4V4Bind DHCPv6Opt = 92
+ DHCPv6OptS46PortParameters DHCPv6Opt = 93
+ DHCPv6OptS46ContMAPE DHCPv6Opt = 94
+ DHCPv6OptS46ContMAPT DHCPv6Opt = 95
+ DHCPv6OptS46ContLW DHCPv6Opt = 96
+
+ // RFC 7600 IPv4 Residual Deployment via IPv6
+ DHCPv6Opt4RD DHCPv6Opt = 97
+ DHCPv6Opt4RDMapRule DHCPv6Opt = 98
+ DHCPv6Opt4RDNonMapRule DHCPv6Opt = 99
+
+ // RFC 7653 Active Leasequery
+ DHCPv6OptLQBaseTime DHCPv6Opt = 100
+ DHCPv6OptLQStartTime DHCPv6Opt = 101
+ DHCPv6OptLQEndTime DHCPv6Opt = 102
+
+ // RFC 7710 Captive-Portal Identification
+ DHCPv6OptCaptivePortal DHCPv6Opt = 103
+
+ // RFC 7774 Multicast Protocol for Low-Power and Lossy Networks (MPL) Parameter Configuration
+ DHCPv6OptMPLParameters DHCPv6Opt = 104
+
+ // RFC 7839 Access-Network-Identifier (ANI)
+ DHCPv6OptANIATT DHCPv6Opt = 105
+ DHCPv6OptANINetworkName DHCPv6Opt = 106
+ DHCPv6OptANIAPName DHCPv6Opt = 107
+ DHCPv6OptANIAPBSSID DHCPv6Opt = 108
+ DHCPv6OptANIOperatorID DHCPv6Opt = 109
+ DHCPv6OptANIOperatorRealm DHCPv6Opt = 110
+
+ // RFC 8026 Unified IPv4-in-IPv6 Softwire Customer Premises Equipment (CPE)
+ DHCPv6OptS46Priority DHCPv6Opt = 111
+
+ // draft-ietf-opsawg-mud-25 Manufacturer Usage Description (MUD)
+ DHCPv6OptMUDURLV6 DHCPv6Opt = 112
+
+ // RFC 8115 IPv4-Embedded Multicast and Unicast IPv6 Prefixes
+ DHCPv6OptV6Prefix64 DHCPv6Opt = 113
+
+ // RFC 8156 DHCPv6 Failover Protocol
+ DHCPv6OptFBindingStatus DHCPv6Opt = 114
+ DHCPv6OptFConnectFlags DHCPv6Opt = 115
+ DHCPv6OptFDNSRemovalInfo DHCPv6Opt = 116
+ DHCPv6OptFDNSHostName DHCPv6Opt = 117
+ DHCPv6OptFDNSZoneName DHCPv6Opt = 118
+ DHCPv6OptFDNSFlags DHCPv6Opt = 119
+ DHCPv6OptFExpirationTime DHCPv6Opt = 120
+ DHCPv6OptFMaxUnacknowledgedBNDUPD DHCPv6Opt = 121
+ DHCPv6OptFMCLT DHCPv6Opt = 122
+ DHCPv6OptFPartnerLifetime DHCPv6Opt = 123
+ DHCPv6OptFPartnerLifetimeSent DHCPv6Opt = 124
+ DHCPv6OptFPartnerDownTime DHCPv6Opt = 125
+ DHCPv6OptFPartnerRawCltTime DHCPv6Opt = 126
+ DHCPv6OptFProtocolVersion DHCPv6Opt = 127
+ DHCPv6OptFKeepaliveTime DHCPv6Opt = 128
+ DHCPv6OptFReconfigureData DHCPv6Opt = 129
+ DHCPv6OptFRelationshipName DHCPv6Opt = 130
+ DHCPv6OptFServerFlags DHCPv6Opt = 131
+ DHCPv6OptFServerState DHCPv6Opt = 132
+ DHCPv6OptFStartTimeOfState DHCPv6Opt = 133
+ DHCPv6OptFStateExpirationTime DHCPv6Opt = 134
+
+ // RFC 8357 Generalized UDP Source Port for DHCP Relay
+ DHCPv6OptRelayPort DHCPv6Opt = 135
+
+ // draft-ietf-netconf-zerotouch-25 Zero Touch Provisioning for Networking Devices
+ DHCPv6OptV6ZeroTouchRedirect DHCPv6Opt = 136
+
+ // RFC 6153 Access Network Discovery and Selection Function (ANDSF) Discovery
+ DHCPv6OptIPV6AddressANDSF DHCPv6Opt = 143
+)
+
+// String returns a string version of a DHCPv6Opt.
+func (o DHCPv6Opt) String() string {
+ switch o {
+ case DHCPv6OptClientID:
+ return "ClientID"
+ case DHCPv6OptServerID:
+ return "ServerID"
+ case DHCPv6OptIANA:
+ return "IA_NA"
+ case DHCPv6OptIATA:
+ return "IA_TA"
+ case DHCPv6OptIAAddr:
+ return "IAAddr"
+ case DHCPv6OptOro:
+ return "Oro"
+ case DHCPv6OptPreference:
+ return "Preference"
+ case DHCPv6OptElapsedTime:
+ return "ElapsedTime"
+ case DHCPv6OptRelayMessage:
+ return "RelayMessage"
+ case DHCPv6OptAuth:
+ return "Auth"
+ case DHCPv6OptUnicast:
+ return "Unicast"
+ case DHCPv6OptStatusCode:
+ return "StatusCode"
+ case DHCPv6OptRapidCommit:
+ return "RapidCommit"
+ case DHCPv6OptUserClass:
+ return "UserClass"
+ case DHCPv6OptVendorClass:
+ return "VendorClass"
+ case DHCPv6OptVendorOpts:
+ return "VendorOpts"
+ case DHCPv6OptInterfaceID:
+ return "InterfaceID"
+ case DHCPv6OptReconfigureMessage:
+ return "ReconfigureMessage"
+ case DHCPv6OptReconfigureAccept:
+ return "ReconfigureAccept"
+ case DHCPv6OptSIPServersDomainList:
+ return "SIPServersDomainList"
+ case DHCPv6OptSIPServersAddressList:
+ return "SIPServersAddressList"
+ case DHCPv6OptDNSServers:
+ return "DNSRecursiveNameServer"
+ case DHCPv6OptDomainList:
+ return "DomainSearchList"
+ case DHCPv6OptIAPD:
+ return "IdentityAssociationPrefixDelegation"
+ case DHCPv6OptIAPrefix:
+ return "IAPDPrefix"
+ case DHCPv6OptNISServers:
+ return "NISServers"
+ case DHCPv6OptNISPServers:
+ return "NISv2Servers"
+ case DHCPv6OptNISDomainName:
+ return "NISDomainName"
+ case DHCPv6OptNISPDomainName:
+ return "NISv2DomainName"
+ case DHCPv6OptSNTPServers:
+ return "SNTPServers"
+ case DHCPv6OptInformationRefreshTime:
+ return "InformationRefreshTime"
+ case DHCPv6OptBCMCSServerDomainNameList:
+ return "BCMCSControlServersDomainNameList"
+ case DHCPv6OptBCMCSServerAddressList:
+ return "BCMCSControlServersAddressList"
+ case DHCPv6OptGeoconfCivic:
+ return "CivicAddress"
+ case DHCPv6OptRemoteID:
+ return "RelayAgentRemoteID"
+ case DHCPv6OptSubscriberID:
+ return "RelayAgentSubscriberID"
+ case DHCPv6OptClientFQDN:
+ return "ClientFQDN"
+ case DHCPv6OptPanaAgent:
+ return "PANAAuthenticationAgent"
+ case DHCPv6OptNewPOSIXTimezone:
+ return "NewPOSIXTimezone"
+ case DHCPv6OptNewTZDBTimezone:
+ return "NewTZDBTimezone"
+ case DHCPv6OptEchoRequestOption:
+ return "EchoRequest"
+ case DHCPv6OptLQQuery:
+ return "LeasequeryQuery"
+ case DHCPv6OptClientData:
+ return "LeasequeryClientData"
+ case DHCPv6OptCLTTime:
+ return "LeasequeryClientLastTransactionTime"
+ case DHCPv6OptLQRelayData:
+ return "LeasequeryRelayData"
+ case DHCPv6OptLQClientLink:
+ return "LeasequeryClientLink"
+ case DHCPv6OptMIP6HNIDF:
+ return "MIPv6HomeNetworkIDFQDN"
+ case DHCPv6OptMIP6VDINF:
+ return "MIPv6VisitedHomeNetworkInformation"
+ case DHCPv6OptMIP6IDINF:
+ return "MIPv6IdentifiedHomeNetworkInformation"
+ case DHCPv6OptMIP6UDINF:
+ return "MIPv6UnrestrictedHomeNetworkInformation"
+ case DHCPv6OptMIP6HNP:
+ return "MIPv6HomeNetworkPrefix"
+ case DHCPv6OptMIP6HAA:
+ return "MIPv6HomeAgentAddress"
+ case DHCPv6OptMIP6HAF:
+ return "MIPv6HomeAgentFQDN"
+ case DHCPv6OptV6LOST:
+ return "LoST Server"
+ case DHCPv6OptCAPWAPACV6:
+ return "CAPWAPAccessControllerV6"
+ case DHCPv6OptRelayID:
+ return "LeasequeryRelayID"
+ case DHCPv6OptIPv6AddressMoS:
+ return "MoSIPv6Address"
+ case DHCPv6OptIPv6FQDNMoS:
+ return "MoSDomainNameList"
+ case DHCPv6OptNTPServer:
+ return "NTPServer"
+ case DHCPv6OptV6AccessDomain:
+ return "AccessNetworkDomainName"
+ case DHCPv6OptSIPUACSList:
+ return "SIPUserAgentConfigurationServiceDomains"
+ case DHCPv6OptBootFileURL:
+ return "BootFileURL"
+ case DHCPv6OptBootFileParam:
+ return "BootFileParameters"
+ case DHCPv6OptClientArchType:
+ return "ClientSystemArchitectureType"
+ case DHCPv6OptNII:
+ return "ClientNetworkInterfaceIdentifier"
+ case DHCPv6OptGeolocation:
+ return "Geolocation"
+ case DHCPv6OptAFTRName:
+ return "AFTRName"
+ case DHCPv6OptERPLocalDomainName:
+ return "AFTRName"
+ case DHCPv6OptRSOO:
+ return "RSOOption"
+ case DHCPv6OptPDExclude:
+ return "PrefixExclude"
+ case DHCPv6OptVSS:
+ return "VirtualSubnetSelection"
+ case DHCPv6OptRDNSSSelection:
+ return "RDNSSSelection"
+ case DHCPv6OptKRBPrincipalName:
+ return "KerberosPrincipalName"
+ case DHCPv6OptKRBRealmName:
+ return "KerberosRealmName"
+ case DHCPv6OptKRBKDC:
+ return "KerberosKDC"
+ case DHCPv6OptClientLinkLayerAddress:
+ return "ClientLinkLayerAddress"
+ case DHCPv6OptLinkAddress:
+ return "LinkAddress"
+ case DHCPv6OptRADIUS:
+ return "RADIUS"
+ case DHCPv6OptSolMaxRt:
+ return "SolMaxRt"
+ case DHCPv6OptInfMaxRt:
+ return "InfMaxRt"
+ case DHCPv6OptAddrSel:
+ return "AddressSelection"
+ case DHCPv6OptAddrSelTable:
+ return "AddressSelectionTable"
+ case DHCPv6OptV6PCPServer:
+ return "PCPServer"
+ case DHCPv6OptDHCPv4Message:
+ return "DHCPv4Message"
+ case DHCPv6OptDHCPv4OverDHCPv6Server:
+ return "DHCP4o6ServerAddress"
+ case DHCPv6OptS46Rule:
+ return "S46Rule"
+ case DHCPv6OptS46BR:
+ return "S46BR"
+ case DHCPv6OptS46DMR:
+ return "S46DMR"
+ case DHCPv6OptS46V4V4Bind:
+ return "S46IPv4IPv6AddressBinding"
+ case DHCPv6OptS46PortParameters:
+ return "S46PortParameters"
+ case DHCPv6OptS46ContMAPE:
+ return "S46MAPEContainer"
+ case DHCPv6OptS46ContMAPT:
+ return "S46MAPTContainer"
+ case DHCPv6OptS46ContLW:
+ return "S46Lightweight4Over6Container"
+ case DHCPv6Opt4RD:
+ return "4RD"
+ case DHCPv6Opt4RDMapRule:
+ return "4RDMapRule"
+ case DHCPv6Opt4RDNonMapRule:
+ return "4RDNonMapRule"
+ case DHCPv6OptLQBaseTime:
+ return "LQBaseTime"
+ case DHCPv6OptLQStartTime:
+ return "LQStartTime"
+ case DHCPv6OptLQEndTime:
+ return "LQEndTime"
+ case DHCPv6OptCaptivePortal:
+ return "CaptivePortal"
+ case DHCPv6OptMPLParameters:
+ return "MPLParameterConfiguration"
+ case DHCPv6OptANIATT:
+ return "ANIAccessTechnologyType"
+ case DHCPv6OptANINetworkName:
+ return "ANINetworkName"
+ case DHCPv6OptANIAPName:
+ return "ANIAccessPointName"
+ case DHCPv6OptANIAPBSSID:
+ return "ANIAccessPointBSSID"
+ case DHCPv6OptANIOperatorID:
+ return "ANIOperatorIdentifier"
+ case DHCPv6OptANIOperatorRealm:
+ return "ANIOperatorRealm"
+ case DHCPv6OptS46Priority:
+ return "S64Priority"
+ case DHCPv6OptMUDURLV6:
+ return "ManufacturerUsageDescriptionURL"
+ case DHCPv6OptV6Prefix64:
+ return "V6Prefix64"
+ case DHCPv6OptFBindingStatus:
+ return "FailoverBindingStatus"
+ case DHCPv6OptFConnectFlags:
+ return "FailoverConnectFlags"
+ case DHCPv6OptFDNSRemovalInfo:
+ return "FailoverDNSRemovalInfo"
+ case DHCPv6OptFDNSHostName:
+ return "FailoverDNSHostName"
+ case DHCPv6OptFDNSZoneName:
+ return "FailoverDNSZoneName"
+ case DHCPv6OptFDNSFlags:
+ return "FailoverDNSFlags"
+ case DHCPv6OptFExpirationTime:
+ return "FailoverExpirationTime"
+ case DHCPv6OptFMaxUnacknowledgedBNDUPD:
+ return "FailoverMaxUnacknowledgedBNDUPDMessages"
+ case DHCPv6OptFMCLT:
+ return "FailoverMaximumClientLeadTime"
+ case DHCPv6OptFPartnerLifetime:
+ return "FailoverPartnerLifetime"
+ case DHCPv6OptFPartnerLifetimeSent:
+ return "FailoverPartnerLifetimeSent"
+ case DHCPv6OptFPartnerDownTime:
+ return "FailoverPartnerDownTime"
+ case DHCPv6OptFPartnerRawCltTime:
+ return "FailoverPartnerRawClientLeadTime"
+ case DHCPv6OptFProtocolVersion:
+ return "FailoverProtocolVersion"
+ case DHCPv6OptFKeepaliveTime:
+ return "FailoverKeepaliveTime"
+ case DHCPv6OptFReconfigureData:
+ return "FailoverReconfigureData"
+ case DHCPv6OptFRelationshipName:
+ return "FailoverRelationshipName"
+ case DHCPv6OptFServerFlags:
+ return "FailoverServerFlags"
+ case DHCPv6OptFServerState:
+ return "FailoverServerState"
+ case DHCPv6OptFStartTimeOfState:
+ return "FailoverStartTimeOfState"
+ case DHCPv6OptFStateExpirationTime:
+ return "FailoverStateExpirationTime"
+ case DHCPv6OptRelayPort:
+ return "RelayPort"
+ case DHCPv6OptV6ZeroTouchRedirect:
+ return "ZeroTouch"
+ case DHCPv6OptIPV6AddressANDSF:
+ return "ANDSFIPv6Address"
+ default:
+ return fmt.Sprintf("Unknown(%d)", uint16(o))
+ }
+}
+
+// DHCPv6Options is used to get nicely printed option lists which would normally
+// be cut off after 5 options.
+type DHCPv6Options []DHCPv6Option
+
+// String returns a string version of the options list.
+func (o DHCPv6Options) String() string {
+ buf := &bytes.Buffer{}
+ buf.WriteByte('[')
+ for i, opt := range o {
+ buf.WriteString(opt.String())
+ if i+1 != len(o) {
+ buf.WriteString(", ")
+ }
+ }
+ buf.WriteByte(']')
+ return buf.String()
+}
+
+// DHCPv6Option rerpresents a DHCP option.
+type DHCPv6Option struct {
+ Code DHCPv6Opt
+ Length uint16
+ Data []byte
+}
+
+// String returns a string version of a DHCP Option.
+func (o DHCPv6Option) String() string {
+ switch o.Code {
+ case DHCPv6OptClientID, DHCPv6OptServerID:
+ duid, err := decodeDHCPv6DUID(o.Data)
+ if err != nil {
+ return fmt.Sprintf("Option(%s:INVALID)", o.Code)
+ }
+ return fmt.Sprintf("Option(%s:[%s])", o.Code, duid.String())
+ case DHCPv6OptOro:
+ options := ""
+ for i := 0; i < int(o.Length); i += 2 {
+ if options != "" {
+ options += ","
+ }
+ option := DHCPv6Opt(binary.BigEndian.Uint16(o.Data[i : i+2]))
+ options += option.String()
+ }
+ return fmt.Sprintf("Option(%s:[%s])", o.Code, options)
+ default:
+ return fmt.Sprintf("Option(%s:%v)", o.Code, o.Data)
+ }
+}
+
+// NewDHCPv6Option constructs a new DHCPv6Option with a given type and data.
+func NewDHCPv6Option(code DHCPv6Opt, data []byte) DHCPv6Option {
+ o := DHCPv6Option{Code: code}
+ if data != nil {
+ o.Data = data
+ o.Length = uint16(len(data))
+ }
+
+ return o
+}
+
+func (o *DHCPv6Option) encode(b []byte, opts gopacket.SerializeOptions) error {
+ binary.BigEndian.PutUint16(b[0:2], uint16(o.Code))
+ if opts.FixLengths {
+ binary.BigEndian.PutUint16(b[2:4], uint16(len(o.Data)))
+ } else {
+ binary.BigEndian.PutUint16(b[2:4], o.Length)
+ }
+ copy(b[4:], o.Data)
+
+ return nil
+}
+
+func (o *DHCPv6Option) decode(data []byte) error {
+ if len(data) < 2 {
+ return errors.New("not enough data to decode")
+ }
+ o.Code = DHCPv6Opt(binary.BigEndian.Uint16(data[0:2]))
+ if len(data) < 3 {
+ return errors.New("not enough data to decode")
+ }
+ o.Length = binary.BigEndian.Uint16(data[2:4])
+ o.Data = data[4 : 4+o.Length]
+ return nil
+}
diff --git a/vendor/github.com/google/gopacket/layers/dns.go b/vendor/github.com/google/gopacket/layers/dns.go
new file mode 100644
index 0000000..a0b2d72
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/dns.go
@@ -0,0 +1,1053 @@
+// Copyright 2014, 2018 GoPacket Authors. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "net"
+ "strings"
+
+ "github.com/google/gopacket"
+)
+
+// DNSClass defines the class associated with a request/response. Different DNS
+// classes can be thought of as an array of parallel namespace trees.
+type DNSClass uint16
+
+// DNSClass known values.
+const (
+ DNSClassIN DNSClass = 1 // Internet
+ DNSClassCS DNSClass = 2 // the CSNET class (Obsolete)
+ DNSClassCH DNSClass = 3 // the CHAOS class
+ DNSClassHS DNSClass = 4 // Hesiod [Dyer 87]
+ DNSClassAny DNSClass = 255 // AnyClass
+)
+
+func (dc DNSClass) String() string {
+ switch dc {
+ default:
+ return "Unknown"
+ case DNSClassIN:
+ return "IN"
+ case DNSClassCS:
+ return "CS"
+ case DNSClassCH:
+ return "CH"
+ case DNSClassHS:
+ return "HS"
+ case DNSClassAny:
+ return "Any"
+ }
+}
+
+// DNSType defines the type of data being requested/returned in a
+// question/answer.
+type DNSType uint16
+
+// DNSType known values.
+const (
+ DNSTypeA DNSType = 1 // a host address
+ DNSTypeNS DNSType = 2 // an authoritative name server
+ DNSTypeMD DNSType = 3 // a mail destination (Obsolete - use MX)
+ DNSTypeMF DNSType = 4 // a mail forwarder (Obsolete - use MX)
+ DNSTypeCNAME DNSType = 5 // the canonical name for an alias
+ DNSTypeSOA DNSType = 6 // marks the start of a zone of authority
+ DNSTypeMB DNSType = 7 // a mailbox domain name (EXPERIMENTAL)
+ DNSTypeMG DNSType = 8 // a mail group member (EXPERIMENTAL)
+ DNSTypeMR DNSType = 9 // a mail rename domain name (EXPERIMENTAL)
+ DNSTypeNULL DNSType = 10 // a null RR (EXPERIMENTAL)
+ DNSTypeWKS DNSType = 11 // a well known service description
+ DNSTypePTR DNSType = 12 // a domain name pointer
+ DNSTypeHINFO DNSType = 13 // host information
+ DNSTypeMINFO DNSType = 14 // mailbox or mail list information
+ DNSTypeMX DNSType = 15 // mail exchange
+ DNSTypeTXT DNSType = 16 // text strings
+ DNSTypeAAAA DNSType = 28 // a IPv6 host address [RFC3596]
+ DNSTypeSRV DNSType = 33 // server discovery [RFC2782] [RFC6195]
+ DNSTypeOPT DNSType = 41 // OPT Pseudo-RR [RFC6891]
+)
+
+func (dt DNSType) String() string {
+ switch dt {
+ default:
+ return "Unknown"
+ case DNSTypeA:
+ return "A"
+ case DNSTypeNS:
+ return "NS"
+ case DNSTypeMD:
+ return "MD"
+ case DNSTypeMF:
+ return "MF"
+ case DNSTypeCNAME:
+ return "CNAME"
+ case DNSTypeSOA:
+ return "SOA"
+ case DNSTypeMB:
+ return "MB"
+ case DNSTypeMG:
+ return "MG"
+ case DNSTypeMR:
+ return "MR"
+ case DNSTypeNULL:
+ return "NULL"
+ case DNSTypeWKS:
+ return "WKS"
+ case DNSTypePTR:
+ return "PTR"
+ case DNSTypeHINFO:
+ return "HINFO"
+ case DNSTypeMINFO:
+ return "MINFO"
+ case DNSTypeMX:
+ return "MX"
+ case DNSTypeTXT:
+ return "TXT"
+ case DNSTypeAAAA:
+ return "AAAA"
+ case DNSTypeSRV:
+ return "SRV"
+ case DNSTypeOPT:
+ return "OPT"
+ }
+}
+
+// DNSResponseCode provides response codes for question answers.
+type DNSResponseCode uint8
+
+// DNSResponseCode known values.
+const (
+ DNSResponseCodeNoErr DNSResponseCode = 0 // No error
+ DNSResponseCodeFormErr DNSResponseCode = 1 // Format Error [RFC1035]
+ DNSResponseCodeServFail DNSResponseCode = 2 // Server Failure [RFC1035]
+ DNSResponseCodeNXDomain DNSResponseCode = 3 // Non-Existent Domain [RFC1035]
+ DNSResponseCodeNotImp DNSResponseCode = 4 // Not Implemented [RFC1035]
+ DNSResponseCodeRefused DNSResponseCode = 5 // Query Refused [RFC1035]
+ DNSResponseCodeYXDomain DNSResponseCode = 6 // Name Exists when it should not [RFC2136]
+ DNSResponseCodeYXRRSet DNSResponseCode = 7 // RR Set Exists when it should not [RFC2136]
+ DNSResponseCodeNXRRSet DNSResponseCode = 8 // RR Set that should exist does not [RFC2136]
+ DNSResponseCodeNotAuth DNSResponseCode = 9 // Server Not Authoritative for zone [RFC2136]
+ DNSResponseCodeNotZone DNSResponseCode = 10 // Name not contained in zone [RFC2136]
+ DNSResponseCodeBadVers DNSResponseCode = 16 // Bad OPT Version [RFC2671]
+ DNSResponseCodeBadSig DNSResponseCode = 16 // TSIG Signature Failure [RFC2845]
+ DNSResponseCodeBadKey DNSResponseCode = 17 // Key not recognized [RFC2845]
+ DNSResponseCodeBadTime DNSResponseCode = 18 // Signature out of time window [RFC2845]
+ DNSResponseCodeBadMode DNSResponseCode = 19 // Bad TKEY Mode [RFC2930]
+ DNSResponseCodeBadName DNSResponseCode = 20 // Duplicate key name [RFC2930]
+ DNSResponseCodeBadAlg DNSResponseCode = 21 // Algorithm not supported [RFC2930]
+ DNSResponseCodeBadTruc DNSResponseCode = 22 // Bad Truncation [RFC4635]
+)
+
+func (drc DNSResponseCode) String() string {
+ switch drc {
+ default:
+ return "Unknown"
+ case DNSResponseCodeNoErr:
+ return "No Error"
+ case DNSResponseCodeFormErr:
+ return "Format Error"
+ case DNSResponseCodeServFail:
+ return "Server Failure "
+ case DNSResponseCodeNXDomain:
+ return "Non-Existent Domain"
+ case DNSResponseCodeNotImp:
+ return "Not Implemented"
+ case DNSResponseCodeRefused:
+ return "Query Refused"
+ case DNSResponseCodeYXDomain:
+ return "Name Exists when it should not"
+ case DNSResponseCodeYXRRSet:
+ return "RR Set Exists when it should not"
+ case DNSResponseCodeNXRRSet:
+ return "RR Set that should exist does not"
+ case DNSResponseCodeNotAuth:
+ return "Server Not Authoritative for zone"
+ case DNSResponseCodeNotZone:
+ return "Name not contained in zone"
+ case DNSResponseCodeBadVers:
+ return "Bad OPT Version"
+ case DNSResponseCodeBadKey:
+ return "Key not recognized"
+ case DNSResponseCodeBadTime:
+ return "Signature out of time window"
+ case DNSResponseCodeBadMode:
+ return "Bad TKEY Mode"
+ case DNSResponseCodeBadName:
+ return "Duplicate key name"
+ case DNSResponseCodeBadAlg:
+ return "Algorithm not supported"
+ case DNSResponseCodeBadTruc:
+ return "Bad Truncation"
+ }
+}
+
+// DNSOpCode defines a set of different operation types.
+type DNSOpCode uint8
+
+// DNSOpCode known values.
+const (
+ DNSOpCodeQuery DNSOpCode = 0 // Query [RFC1035]
+ DNSOpCodeIQuery DNSOpCode = 1 // Inverse Query Obsolete [RFC3425]
+ DNSOpCodeStatus DNSOpCode = 2 // Status [RFC1035]
+ DNSOpCodeNotify DNSOpCode = 4 // Notify [RFC1996]
+ DNSOpCodeUpdate DNSOpCode = 5 // Update [RFC2136]
+)
+
+func (doc DNSOpCode) String() string {
+ switch doc {
+ default:
+ return "Unknown"
+ case DNSOpCodeQuery:
+ return "Query"
+ case DNSOpCodeIQuery:
+ return "Inverse Query"
+ case DNSOpCodeStatus:
+ return "Status"
+ case DNSOpCodeNotify:
+ return "Notify"
+ case DNSOpCodeUpdate:
+ return "Update"
+ }
+}
+
+// DNS is specified in RFC 1034 / RFC 1035
+// +---------------------+
+// | Header |
+// +---------------------+
+// | Question | the question for the name server
+// +---------------------+
+// | Answer | RRs answering the question
+// +---------------------+
+// | Authority | RRs pointing toward an authority
+// +---------------------+
+// | Additional | RRs holding additional information
+// +---------------------+
+//
+// DNS Header
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | ID |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// |QR| Opcode |AA|TC|RD|RA| Z | RCODE |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | QDCOUNT |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | ANCOUNT |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | NSCOUNT |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | ARCOUNT |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+
+// DNS contains data from a single Domain Name Service packet.
+type DNS struct {
+ BaseLayer
+
+ // Header fields
+ ID uint16
+ QR bool
+ OpCode DNSOpCode
+
+ AA bool // Authoritative answer
+ TC bool // Truncated
+ RD bool // Recursion desired
+ RA bool // Recursion available
+ Z uint8 // Reserved for future use
+
+ ResponseCode DNSResponseCode
+ QDCount uint16 // Number of questions to expect
+ ANCount uint16 // Number of answers to expect
+ NSCount uint16 // Number of authorities to expect
+ ARCount uint16 // Number of additional records to expect
+
+ // Entries
+ Questions []DNSQuestion
+ Answers []DNSResourceRecord
+ Authorities []DNSResourceRecord
+ Additionals []DNSResourceRecord
+
+ // buffer for doing name decoding. We use a single reusable buffer to avoid
+ // name decoding on a single object via multiple DecodeFromBytes calls
+ // requiring constant allocation of small byte slices.
+ buffer []byte
+}
+
+// LayerType returns gopacket.LayerTypeDNS.
+func (d *DNS) LayerType() gopacket.LayerType { return LayerTypeDNS }
+
+// decodeDNS decodes the byte slice into a DNS type. It also
+// setups the application Layer in PacketBuilder.
+func decodeDNS(data []byte, p gopacket.PacketBuilder) error {
+ d := &DNS{}
+ err := d.DecodeFromBytes(data, p)
+ if err != nil {
+ return err
+ }
+ p.AddLayer(d)
+ p.SetApplicationLayer(d)
+ return nil
+}
+
+// DecodeFromBytes decodes the slice into the DNS struct.
+func (d *DNS) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ d.buffer = d.buffer[:0]
+
+ if len(data) < 12 {
+ df.SetTruncated()
+ return errDNSPacketTooShort
+ }
+
+ // since there are no further layers, the baselayer's content is
+ // pointing to this layer
+ d.BaseLayer = BaseLayer{Contents: data[:len(data)]}
+ d.ID = binary.BigEndian.Uint16(data[:2])
+ d.QR = data[2]&0x80 != 0
+ d.OpCode = DNSOpCode(data[2]>>3) & 0x0F
+ d.AA = data[2]&0x04 != 0
+ d.TC = data[2]&0x02 != 0
+ d.RD = data[2]&0x01 != 0
+ d.RA = data[3]&0x80 != 0
+ d.Z = uint8(data[3]>>4) & 0x7
+ d.ResponseCode = DNSResponseCode(data[3] & 0xF)
+ d.QDCount = binary.BigEndian.Uint16(data[4:6])
+ d.ANCount = binary.BigEndian.Uint16(data[6:8])
+ d.NSCount = binary.BigEndian.Uint16(data[8:10])
+ d.ARCount = binary.BigEndian.Uint16(data[10:12])
+
+ d.Questions = d.Questions[:0]
+ d.Answers = d.Answers[:0]
+ d.Authorities = d.Authorities[:0]
+ d.Additionals = d.Additionals[:0]
+
+ offset := 12
+ var err error
+ for i := 0; i < int(d.QDCount); i++ {
+ var q DNSQuestion
+ if offset, err = q.decode(data, offset, df, &d.buffer); err != nil {
+ return err
+ }
+ d.Questions = append(d.Questions, q)
+ }
+
+ // For some horrible reason, if we do the obvious thing in this loop:
+ // var r DNSResourceRecord
+ // if blah := r.decode(blah); err != nil {
+ // return err
+ // }
+ // d.Foo = append(d.Foo, r)
+ // the Go compiler thinks that 'r' escapes to the heap, causing a malloc for
+ // every Answer, Authority, and Additional. To get around this, we do
+ // something really silly: we append an empty resource record to our slice,
+ // then use the last value in the slice to call decode. Since the value is
+ // already in the slice, there's no WAY it can escape... on the other hand our
+ // code is MUCH uglier :(
+ for i := 0; i < int(d.ANCount); i++ {
+ d.Answers = append(d.Answers, DNSResourceRecord{})
+ if offset, err = d.Answers[i].decode(data, offset, df, &d.buffer); err != nil {
+ d.Answers = d.Answers[:i] // strip off erroneous value
+ return err
+ }
+ }
+ for i := 0; i < int(d.NSCount); i++ {
+ d.Authorities = append(d.Authorities, DNSResourceRecord{})
+ if offset, err = d.Authorities[i].decode(data, offset, df, &d.buffer); err != nil {
+ d.Authorities = d.Authorities[:i] // strip off erroneous value
+ return err
+ }
+ }
+ for i := 0; i < int(d.ARCount); i++ {
+ d.Additionals = append(d.Additionals, DNSResourceRecord{})
+ if offset, err = d.Additionals[i].decode(data, offset, df, &d.buffer); err != nil {
+ d.Additionals = d.Additionals[:i] // strip off erroneous value
+ return err
+ }
+ }
+
+ if uint16(len(d.Questions)) != d.QDCount {
+ return errDecodeQueryBadQDCount
+ } else if uint16(len(d.Answers)) != d.ANCount {
+ return errDecodeQueryBadANCount
+ } else if uint16(len(d.Authorities)) != d.NSCount {
+ return errDecodeQueryBadNSCount
+ } else if uint16(len(d.Additionals)) != d.ARCount {
+ return errDecodeQueryBadARCount
+ }
+ return nil
+}
+
+// CanDecode implements gopacket.DecodingLayer.
+func (d *DNS) CanDecode() gopacket.LayerClass {
+ return LayerTypeDNS
+}
+
+// NextLayerType implements gopacket.DecodingLayer.
+func (d *DNS) NextLayerType() gopacket.LayerType {
+ return gopacket.LayerTypePayload
+}
+
+// Payload returns nil.
+func (d *DNS) Payload() []byte {
+ return nil
+}
+
+func b2i(b bool) int {
+ if b {
+ return 1
+ }
+ return 0
+}
+
+func recSize(rr *DNSResourceRecord) int {
+ switch rr.Type {
+ case DNSTypeA:
+ return 4
+ case DNSTypeAAAA:
+ return 16
+ case DNSTypeNS:
+ return len(rr.NS) + 2
+ case DNSTypeCNAME:
+ return len(rr.CNAME) + 2
+ case DNSTypePTR:
+ return len(rr.PTR) + 2
+ case DNSTypeSOA:
+ return len(rr.SOA.MName) + 2 + len(rr.SOA.RName) + 2 + 20
+ case DNSTypeMX:
+ return 2 + len(rr.MX.Name) + 2
+ case DNSTypeTXT:
+ l := len(rr.TXTs)
+ for _, txt := range rr.TXTs {
+ l += len(txt)
+ }
+ return l
+ case DNSTypeSRV:
+ return 6 + len(rr.SRV.Name) + 2
+ case DNSTypeOPT:
+ l := len(rr.OPT) * 4
+ for _, opt := range rr.OPT {
+ l += len(opt.Data)
+ }
+ return l
+ }
+
+ return 0
+}
+
+func computeSize(recs []DNSResourceRecord) int {
+ sz := 0
+ for _, rr := range recs {
+ v := len(rr.Name)
+
+ if v == 0 {
+ sz += v + 11
+ } else {
+ sz += v + 12
+ }
+
+ sz += recSize(&rr)
+ }
+ return sz
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+func (d *DNS) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ dsz := 0
+ for _, q := range d.Questions {
+ dsz += len(q.Name) + 6
+ }
+ dsz += computeSize(d.Answers)
+ dsz += computeSize(d.Authorities)
+ dsz += computeSize(d.Additionals)
+
+ bytes, err := b.PrependBytes(12 + dsz)
+ if err != nil {
+ return err
+ }
+ binary.BigEndian.PutUint16(bytes, d.ID)
+ bytes[2] = byte((b2i(d.QR) << 7) | (int(d.OpCode) << 3) | (b2i(d.AA) << 2) | (b2i(d.TC) << 1) | b2i(d.RD))
+ bytes[3] = byte((b2i(d.RA) << 7) | (int(d.Z) << 4) | int(d.ResponseCode))
+
+ if opts.FixLengths {
+ d.QDCount = uint16(len(d.Questions))
+ d.ANCount = uint16(len(d.Answers))
+ d.NSCount = uint16(len(d.Authorities))
+ d.ARCount = uint16(len(d.Additionals))
+ }
+ binary.BigEndian.PutUint16(bytes[4:], d.QDCount)
+ binary.BigEndian.PutUint16(bytes[6:], d.ANCount)
+ binary.BigEndian.PutUint16(bytes[8:], d.NSCount)
+ binary.BigEndian.PutUint16(bytes[10:], d.ARCount)
+
+ off := 12
+ for _, qd := range d.Questions {
+ n := qd.encode(bytes, off)
+ off += n
+ }
+
+ for i := range d.Answers {
+ // done this way so we can modify DNSResourceRecord to fix
+ // lengths if requested
+ qa := &d.Answers[i]
+ n, err := qa.encode(bytes, off, opts)
+ if err != nil {
+ return err
+ }
+ off += n
+ }
+
+ for i := range d.Authorities {
+ qa := &d.Authorities[i]
+ n, err := qa.encode(bytes, off, opts)
+ if err != nil {
+ return err
+ }
+ off += n
+ }
+ for i := range d.Additionals {
+ qa := &d.Additionals[i]
+ n, err := qa.encode(bytes, off, opts)
+ if err != nil {
+ return err
+ }
+ off += n
+ }
+
+ return nil
+}
+
+const maxRecursionLevel = 255
+
+func decodeName(data []byte, offset int, buffer *[]byte, level int) ([]byte, int, error) {
+ if level > maxRecursionLevel {
+ return nil, 0, errMaxRecursion
+ } else if offset >= len(data) {
+ return nil, 0, errDNSNameOffsetTooHigh
+ } else if offset < 0 {
+ return nil, 0, errDNSNameOffsetNegative
+ }
+ start := len(*buffer)
+ index := offset
+ if data[index] == 0x00 {
+ return nil, index + 1, nil
+ }
+loop:
+ for data[index] != 0x00 {
+ switch data[index] & 0xc0 {
+ default:
+ /* RFC 1035
+ A domain name represented as a sequence of labels, where
+ each label consists of a length octet followed by that
+ number of octets. The domain name terminates with the
+ zero length octet for the null label of the root. Note
+ that this field may be an odd number of octets; no
+ padding is used.
+ */
+ index2 := index + int(data[index]) + 1
+ if index2-offset > 255 {
+ return nil, 0, errDNSNameTooLong
+ } else if index2 < index+1 || index2 > len(data) {
+ return nil, 0, errDNSNameInvalidIndex
+ }
+ *buffer = append(*buffer, '.')
+ *buffer = append(*buffer, data[index+1:index2]...)
+ index = index2
+
+ case 0xc0:
+ /* RFC 1035
+ The pointer takes the form of a two octet sequence.
+
+ The first two bits are ones. This allows a pointer to
+ be distinguished from a label, since the label must
+ begin with two zero bits because labels are restricted
+ to 63 octets or less. (The 10 and 01 combinations are
+ reserved for future use.) The OFFSET field specifies
+ an offset from the start of the message (i.e., the
+ first octet of the ID field in the domain header). A
+ zero offset specifies the first byte of the ID field,
+ etc.
+
+ The compression scheme allows a domain name in a message to be
+ represented as either:
+ - a sequence of labels ending in a zero octet
+ - a pointer
+ - a sequence of labels ending with a pointer
+ */
+ if index+2 > len(data) {
+ return nil, 0, errDNSPointerOffsetTooHigh
+ }
+ offsetp := int(binary.BigEndian.Uint16(data[index:index+2]) & 0x3fff)
+ if offsetp > len(data) {
+ return nil, 0, errDNSPointerOffsetTooHigh
+ }
+ // This looks a little tricky, but actually isn't. Because of how
+ // decodeName is written, calling it appends the decoded name to the
+ // current buffer. We already have the start of the buffer, then, so
+ // once this call is done buffer[start:] will contain our full name.
+ _, _, err := decodeName(data, offsetp, buffer, level+1)
+ if err != nil {
+ return nil, 0, err
+ }
+ index++ // pointer is two bytes, so add an extra byte here.
+ break loop
+ /* EDNS, or other DNS option ? */
+ case 0x40: // RFC 2673
+ return nil, 0, fmt.Errorf("qname '0x40' - RFC 2673 unsupported yet (data=%x index=%d)",
+ data[index], index)
+
+ case 0x80:
+ return nil, 0, fmt.Errorf("qname '0x80' unsupported yet (data=%x index=%d)",
+ data[index], index)
+ }
+ if index >= len(data) {
+ return nil, 0, errDNSIndexOutOfRange
+ }
+ }
+ if len(*buffer) <= start {
+ return (*buffer)[start:], index + 1, nil
+ }
+ return (*buffer)[start+1:], index + 1, nil
+}
+
+// DNSQuestion wraps a single request (question) within a DNS query.
+type DNSQuestion struct {
+ Name []byte
+ Type DNSType
+ Class DNSClass
+}
+
+func (q *DNSQuestion) decode(data []byte, offset int, df gopacket.DecodeFeedback, buffer *[]byte) (int, error) {
+ name, endq, err := decodeName(data, offset, buffer, 1)
+ if err != nil {
+ return 0, err
+ }
+
+ q.Name = name
+ q.Type = DNSType(binary.BigEndian.Uint16(data[endq : endq+2]))
+ q.Class = DNSClass(binary.BigEndian.Uint16(data[endq+2 : endq+4]))
+
+ return endq + 4, nil
+}
+
+func (q *DNSQuestion) encode(data []byte, offset int) int {
+ noff := encodeName(q.Name, data, offset)
+ nSz := noff - offset
+ binary.BigEndian.PutUint16(data[noff:], uint16(q.Type))
+ binary.BigEndian.PutUint16(data[noff+2:], uint16(q.Class))
+ return nSz + 4
+}
+
+// DNSResourceRecord
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | |
+// / /
+// / NAME /
+// | |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | TYPE |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | CLASS |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | TTL |
+// | |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | RDLENGTH |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--|
+// / RDATA /
+// / /
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+
+// DNSResourceRecord wraps the data from a single DNS resource within a
+// response.
+type DNSResourceRecord struct {
+ // Header
+ Name []byte
+ Type DNSType
+ Class DNSClass
+ TTL uint32
+
+ // RDATA Raw Values
+ DataLength uint16
+ Data []byte
+
+ // RDATA Decoded Values
+ IP net.IP
+ NS, CNAME, PTR []byte
+ TXTs [][]byte
+ SOA DNSSOA
+ SRV DNSSRV
+ MX DNSMX
+ OPT []DNSOPT // See RFC 6891, section 6.1.2
+
+ // Undecoded TXT for backward compatibility
+ TXT []byte
+}
+
+// decode decodes the resource record, returning the total length of the record.
+func (rr *DNSResourceRecord) decode(data []byte, offset int, df gopacket.DecodeFeedback, buffer *[]byte) (int, error) {
+ name, endq, err := decodeName(data, offset, buffer, 1)
+ if err != nil {
+ return 0, err
+ }
+
+ rr.Name = name
+ rr.Type = DNSType(binary.BigEndian.Uint16(data[endq : endq+2]))
+ rr.Class = DNSClass(binary.BigEndian.Uint16(data[endq+2 : endq+4]))
+ rr.TTL = binary.BigEndian.Uint32(data[endq+4 : endq+8])
+ rr.DataLength = binary.BigEndian.Uint16(data[endq+8 : endq+10])
+ end := endq + 10 + int(rr.DataLength)
+ if end > len(data) {
+ return 0, errDecodeRecordLength
+ }
+ rr.Data = data[endq+10 : end]
+
+ if err = rr.decodeRData(data, endq+10, buffer); err != nil {
+ return 0, err
+ }
+
+ return endq + 10 + int(rr.DataLength), nil
+}
+
+func encodeName(name []byte, data []byte, offset int) int {
+ l := 0
+ for i := range name {
+ if name[i] == '.' {
+ data[offset+i-l] = byte(l)
+ l = 0
+ } else {
+ // skip one to write the length
+ data[offset+i+1] = name[i]
+ l++
+ }
+ }
+
+ if len(name) == 0 {
+ data[offset] = 0x00 // terminal
+ return offset + 1
+ }
+
+ // length for final portion
+ data[offset+len(name)-l] = byte(l)
+ data[offset+len(name)+1] = 0x00 // terminal
+ return offset + len(name) + 2
+}
+
+func (rr *DNSResourceRecord) encode(data []byte, offset int, opts gopacket.SerializeOptions) (int, error) {
+
+ noff := encodeName(rr.Name, data, offset)
+ nSz := noff - offset
+
+ binary.BigEndian.PutUint16(data[noff:], uint16(rr.Type))
+ binary.BigEndian.PutUint16(data[noff+2:], uint16(rr.Class))
+ binary.BigEndian.PutUint32(data[noff+4:], uint32(rr.TTL))
+
+ switch rr.Type {
+ case DNSTypeA:
+ copy(data[noff+10:], rr.IP.To4())
+ case DNSTypeAAAA:
+ copy(data[noff+10:], rr.IP)
+ case DNSTypeNS:
+ encodeName(rr.NS, data, noff+10)
+ case DNSTypeCNAME:
+ encodeName(rr.CNAME, data, noff+10)
+ case DNSTypePTR:
+ encodeName(rr.PTR, data, noff+10)
+ case DNSTypeSOA:
+ noff2 := encodeName(rr.SOA.MName, data, noff+10)
+ noff2 = encodeName(rr.SOA.RName, data, noff2)
+ binary.BigEndian.PutUint32(data[noff2:], rr.SOA.Serial)
+ binary.BigEndian.PutUint32(data[noff2+4:], rr.SOA.Refresh)
+ binary.BigEndian.PutUint32(data[noff2+8:], rr.SOA.Retry)
+ binary.BigEndian.PutUint32(data[noff2+12:], rr.SOA.Expire)
+ binary.BigEndian.PutUint32(data[noff2+16:], rr.SOA.Minimum)
+ case DNSTypeMX:
+ binary.BigEndian.PutUint16(data[noff+10:], rr.MX.Preference)
+ encodeName(rr.MX.Name, data, noff+12)
+ case DNSTypeTXT:
+ noff2 := noff + 10
+ for _, txt := range rr.TXTs {
+ data[noff2] = byte(len(txt))
+ copy(data[noff2+1:], txt)
+ noff2 += 1 + len(txt)
+ }
+ case DNSTypeSRV:
+ binary.BigEndian.PutUint16(data[noff+10:], rr.SRV.Priority)
+ binary.BigEndian.PutUint16(data[noff+12:], rr.SRV.Weight)
+ binary.BigEndian.PutUint16(data[noff+14:], rr.SRV.Port)
+ encodeName(rr.SRV.Name, data, noff+16)
+ case DNSTypeOPT:
+ noff2 := noff + 10
+ for _, opt := range rr.OPT {
+ binary.BigEndian.PutUint16(data[noff2:], uint16(opt.Code))
+ binary.BigEndian.PutUint16(data[noff2+2:], uint16(len(opt.Data)))
+ copy(data[noff2+4:], opt.Data)
+ noff2 += 4 + len(opt.Data)
+ }
+ default:
+ return 0, fmt.Errorf("serializing resource record of type %v not supported", rr.Type)
+ }
+
+ // DataLength
+ dSz := recSize(rr)
+ binary.BigEndian.PutUint16(data[noff+8:], uint16(dSz))
+
+ if opts.FixLengths {
+ rr.DataLength = uint16(dSz)
+ }
+
+ return nSz + 10 + dSz, nil
+}
+
+func (rr *DNSResourceRecord) String() string {
+
+ if rr.Type == DNSTypeOPT {
+ opts := make([]string, len(rr.OPT))
+ for i, opt := range rr.OPT {
+ opts[i] = opt.String()
+ }
+ return "OPT " + strings.Join(opts, ",")
+ }
+ if rr.Class == DNSClassIN {
+ switch rr.Type {
+ case DNSTypeA, DNSTypeAAAA:
+ return rr.IP.String()
+ case DNSTypeNS:
+ return "NS " + string(rr.NS)
+ case DNSTypeCNAME:
+ return "CNAME " + string(rr.CNAME)
+ case DNSTypePTR:
+ return "PTR " + string(rr.PTR)
+ case DNSTypeTXT:
+ return "TXT " + string(rr.TXT)
+ }
+ }
+
+ return fmt.Sprintf("<%v, %v>", rr.Class, rr.Type)
+}
+
+func decodeCharacterStrings(data []byte) ([][]byte, error) {
+ strings := make([][]byte, 0, 1)
+ end := len(data)
+ for index, index2 := 0, 0; index != end; index = index2 {
+ index2 = index + 1 + int(data[index]) // index increases by 1..256 and does not overflow
+ if index2 > end {
+ return nil, errCharStringMissData
+ }
+ strings = append(strings, data[index+1:index2])
+ }
+ return strings, nil
+}
+
+func decodeOPTs(data []byte, offset int) ([]DNSOPT, error) {
+ allOPT := []DNSOPT{}
+ end := len(data)
+
+ if offset == end {
+ return allOPT, nil // There is no data to read
+ }
+
+ if offset+4 > end {
+ return allOPT, fmt.Errorf("DNSOPT record is of length %d, it should be at least length 4", end-offset)
+ }
+
+ for i := offset; i < end; {
+ opt := DNSOPT{}
+ opt.Code = DNSOptionCode(binary.BigEndian.Uint16(data[i : i+2]))
+ l := binary.BigEndian.Uint16(data[i+2 : i+4])
+ if i+4+int(l) > end {
+ return allOPT, fmt.Errorf("Malformed DNSOPT record. The length (%d) field implies a packet larger than the one received", l)
+ }
+ opt.Data = data[i+4 : i+4+int(l)]
+ allOPT = append(allOPT, opt)
+ i += int(l) + 4
+ }
+ return allOPT, nil
+}
+
+func (rr *DNSResourceRecord) decodeRData(data []byte, offset int, buffer *[]byte) error {
+ switch rr.Type {
+ case DNSTypeA:
+ rr.IP = rr.Data
+ case DNSTypeAAAA:
+ rr.IP = rr.Data
+ case DNSTypeTXT, DNSTypeHINFO:
+ rr.TXT = rr.Data
+ txts, err := decodeCharacterStrings(rr.Data)
+ if err != nil {
+ return err
+ }
+ rr.TXTs = txts
+ case DNSTypeNS:
+ name, _, err := decodeName(data, offset, buffer, 1)
+ if err != nil {
+ return err
+ }
+ rr.NS = name
+ case DNSTypeCNAME:
+ name, _, err := decodeName(data, offset, buffer, 1)
+ if err != nil {
+ return err
+ }
+ rr.CNAME = name
+ case DNSTypePTR:
+ name, _, err := decodeName(data, offset, buffer, 1)
+ if err != nil {
+ return err
+ }
+ rr.PTR = name
+ case DNSTypeSOA:
+ name, endq, err := decodeName(data, offset, buffer, 1)
+ if err != nil {
+ return err
+ }
+ rr.SOA.MName = name
+ name, endq, err = decodeName(data, endq, buffer, 1)
+ if err != nil {
+ return err
+ }
+ rr.SOA.RName = name
+ rr.SOA.Serial = binary.BigEndian.Uint32(data[endq : endq+4])
+ rr.SOA.Refresh = binary.BigEndian.Uint32(data[endq+4 : endq+8])
+ rr.SOA.Retry = binary.BigEndian.Uint32(data[endq+8 : endq+12])
+ rr.SOA.Expire = binary.BigEndian.Uint32(data[endq+12 : endq+16])
+ rr.SOA.Minimum = binary.BigEndian.Uint32(data[endq+16 : endq+20])
+ case DNSTypeMX:
+ rr.MX.Preference = binary.BigEndian.Uint16(data[offset : offset+2])
+ name, _, err := decodeName(data, offset+2, buffer, 1)
+ if err != nil {
+ return err
+ }
+ rr.MX.Name = name
+ case DNSTypeSRV:
+ rr.SRV.Priority = binary.BigEndian.Uint16(data[offset : offset+2])
+ rr.SRV.Weight = binary.BigEndian.Uint16(data[offset+2 : offset+4])
+ rr.SRV.Port = binary.BigEndian.Uint16(data[offset+4 : offset+6])
+ name, _, err := decodeName(data, offset+6, buffer, 1)
+ if err != nil {
+ return err
+ }
+ rr.SRV.Name = name
+ case DNSTypeOPT:
+ allOPT, err := decodeOPTs(data, offset)
+ if err != nil {
+ return err
+ }
+ rr.OPT = allOPT
+ }
+ return nil
+}
+
+// DNSSOA is a Start of Authority record. Each domain requires a SOA record at
+// the cutover where a domain is delegated from its parent.
+type DNSSOA struct {
+ MName, RName []byte
+ Serial, Refresh, Retry, Expire, Minimum uint32
+}
+
+// DNSSRV is a Service record, defining a location (hostname/port) of a
+// server/service.
+type DNSSRV struct {
+ Priority, Weight, Port uint16
+ Name []byte
+}
+
+// DNSMX is a mail exchange record, defining a mail server for a recipient's
+// domain.
+type DNSMX struct {
+ Preference uint16
+ Name []byte
+}
+
+// DNSOptionCode represents the code of a DNS Option, see RFC6891, section 6.1.2
+type DNSOptionCode uint16
+
+func (doc DNSOptionCode) String() string {
+ switch doc {
+ default:
+ return "Unknown"
+ case DNSOptionCodeNSID:
+ return "NSID"
+ case DNSOptionCodeDAU:
+ return "DAU"
+ case DNSOptionCodeDHU:
+ return "DHU"
+ case DNSOptionCodeN3U:
+ return "N3U"
+ case DNSOptionCodeEDNSClientSubnet:
+ return "EDNSClientSubnet"
+ case DNSOptionCodeEDNSExpire:
+ return "EDNSExpire"
+ case DNSOptionCodeCookie:
+ return "Cookie"
+ case DNSOptionCodeEDNSKeepAlive:
+ return "EDNSKeepAlive"
+ case DNSOptionCodePadding:
+ return "CodePadding"
+ case DNSOptionCodeChain:
+ return "CodeChain"
+ case DNSOptionCodeEDNSKeyTag:
+ return "CodeEDNSKeyTag"
+ case DNSOptionCodeEDNSClientTag:
+ return "EDNSClientTag"
+ case DNSOptionCodeEDNSServerTag:
+ return "EDNSServerTag"
+ case DNSOptionCodeDeviceID:
+ return "DeviceID"
+ }
+}
+
+// DNSOptionCode known values. See IANA
+const (
+ DNSOptionCodeNSID DNSOptionCode = 3
+ DNSOptionCodeDAU DNSOptionCode = 5
+ DNSOptionCodeDHU DNSOptionCode = 6
+ DNSOptionCodeN3U DNSOptionCode = 7
+ DNSOptionCodeEDNSClientSubnet DNSOptionCode = 8
+ DNSOptionCodeEDNSExpire DNSOptionCode = 9
+ DNSOptionCodeCookie DNSOptionCode = 10
+ DNSOptionCodeEDNSKeepAlive DNSOptionCode = 11
+ DNSOptionCodePadding DNSOptionCode = 12
+ DNSOptionCodeChain DNSOptionCode = 13
+ DNSOptionCodeEDNSKeyTag DNSOptionCode = 14
+ DNSOptionCodeEDNSClientTag DNSOptionCode = 16
+ DNSOptionCodeEDNSServerTag DNSOptionCode = 17
+ DNSOptionCodeDeviceID DNSOptionCode = 26946
+)
+
+// DNSOPT is a DNS Option, see RFC6891, section 6.1.2
+type DNSOPT struct {
+ Code DNSOptionCode
+ Data []byte
+}
+
+func (opt DNSOPT) String() string {
+ return fmt.Sprintf("%s=%x", opt.Code, opt.Data)
+}
+
+var (
+ errMaxRecursion = errors.New("max DNS recursion level hit")
+
+ errDNSNameOffsetTooHigh = errors.New("dns name offset too high")
+ errDNSNameOffsetNegative = errors.New("dns name offset is negative")
+ errDNSPacketTooShort = errors.New("DNS packet too short")
+ errDNSNameTooLong = errors.New("dns name is too long")
+ errDNSNameInvalidIndex = errors.New("dns name uncomputable: invalid index")
+ errDNSPointerOffsetTooHigh = errors.New("dns offset pointer too high")
+ errDNSIndexOutOfRange = errors.New("dns index walked out of range")
+ errDNSNameHasNoData = errors.New("no dns data found for name")
+
+ errCharStringMissData = errors.New("Insufficient data for a ")
+
+ errDecodeRecordLength = errors.New("resource record length exceeds data")
+
+ errDecodeQueryBadQDCount = errors.New("Invalid query decoding, not the right number of questions")
+ errDecodeQueryBadANCount = errors.New("Invalid query decoding, not the right number of answers")
+ errDecodeQueryBadNSCount = errors.New("Invalid query decoding, not the right number of authorities")
+ errDecodeQueryBadARCount = errors.New("Invalid query decoding, not the right number of additionals info")
+)
diff --git a/vendor/github.com/google/gopacket/layers/doc.go b/vendor/github.com/google/gopacket/layers/doc.go
new file mode 100644
index 0000000..3c882c3
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/doc.go
@@ -0,0 +1,61 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+/*
+Package layers provides decoding layers for many common protocols.
+
+The layers package contains decode implementations for a number of different
+types of packet layers. Users of gopacket will almost always want to also use
+layers to actually decode packet data into useful pieces. To see the set of
+protocols that gopacket/layers is currently able to decode,
+look at the set of LayerTypes defined in the Variables sections. The
+layers package also defines endpoints for many of the common packet layers
+that have source/destination addresses associated with them, for example IPv4/6
+(IPs) and TCP/UDP (ports).
+Finally, layers contains a number of useful enumerations (IPProtocol,
+EthernetType, LinkType, PPPType, etc...). Many of these implement the
+gopacket.Decoder interface, so they can be passed into gopacket as decoders.
+
+Most common protocol layers are named using acronyms or other industry-common
+names (IPv4, TCP, PPP). Some of the less common ones have their names expanded
+(CiscoDiscoveryProtocol).
+For certain protocols, sub-parts of the protocol are split out into their own
+layers (SCTP, for example). This is done mostly in cases where portions of the
+protocol may fulfill the capabilities of interesting layers (SCTPData implements
+ApplicationLayer, while base SCTP implements TransportLayer), or possibly
+because splitting a protocol into a few layers makes decoding easier.
+
+This package is meant to be used with its parent,
+http://github.com/google/gopacket.
+
+Port Types
+
+Instead of using raw uint16 or uint8 values for ports, we use a different port
+type for every protocol, for example TCPPort and UDPPort. This allows us to
+override string behavior for each port, which we do by setting up port name
+maps (TCPPortNames, UDPPortNames, etc...). Well-known ports are annotated with
+their protocol names, and their String function displays these names:
+
+ p := TCPPort(80)
+ fmt.Printf("Number: %d String: %v", p, p)
+ // Prints: "Number: 80 String: 80(http)"
+
+Modifying Decode Behavior
+
+layers links together decoding through its enumerations. For example, after
+decoding layer type Ethernet, it uses Ethernet.EthernetType as its next decoder.
+All enumerations that act as decoders, like EthernetType, can be modified by
+users depending on their preferences. For example, if you have a spiffy new
+IPv4 decoder that works way better than the one built into layers, you can do
+this:
+
+ var mySpiffyIPv4Decoder gopacket.Decoder = ...
+ layers.EthernetTypeMetadata[EthernetTypeIPv4].DecodeWith = mySpiffyIPv4Decoder
+
+This will make all future ethernet packets use your new decoder to decode IPv4
+packets, instead of the built-in decoder used by gopacket.
+*/
+package layers
diff --git a/vendor/github.com/google/gopacket/layers/dot11.go b/vendor/github.com/google/gopacket/layers/dot11.go
new file mode 100644
index 0000000..3843d70
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/dot11.go
@@ -0,0 +1,2105 @@
+// Copyright 2014 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+// See http://standards.ieee.org/findstds/standard/802.11-2012.html for info on
+// all of the layers in this file.
+
+package layers
+
+import (
+ "bytes"
+ "encoding/binary"
+ "fmt"
+ "hash/crc32"
+ "net"
+
+ "github.com/google/gopacket"
+)
+
+// Dot11Flags contains the set of 8 flags in the IEEE 802.11 frame control
+// header, all in one place.
+type Dot11Flags uint8
+
+const (
+ Dot11FlagsToDS Dot11Flags = 1 << iota
+ Dot11FlagsFromDS
+ Dot11FlagsMF
+ Dot11FlagsRetry
+ Dot11FlagsPowerManagement
+ Dot11FlagsMD
+ Dot11FlagsWEP
+ Dot11FlagsOrder
+)
+
+func (d Dot11Flags) ToDS() bool {
+ return d&Dot11FlagsToDS != 0
+}
+func (d Dot11Flags) FromDS() bool {
+ return d&Dot11FlagsFromDS != 0
+}
+func (d Dot11Flags) MF() bool {
+ return d&Dot11FlagsMF != 0
+}
+func (d Dot11Flags) Retry() bool {
+ return d&Dot11FlagsRetry != 0
+}
+func (d Dot11Flags) PowerManagement() bool {
+ return d&Dot11FlagsPowerManagement != 0
+}
+func (d Dot11Flags) MD() bool {
+ return d&Dot11FlagsMD != 0
+}
+func (d Dot11Flags) WEP() bool {
+ return d&Dot11FlagsWEP != 0
+}
+func (d Dot11Flags) Order() bool {
+ return d&Dot11FlagsOrder != 0
+}
+
+// String provides a human readable string for Dot11Flags.
+// This string is possibly subject to change over time; if you're storing this
+// persistently, you should probably store the Dot11Flags value, not its string.
+func (a Dot11Flags) String() string {
+ var out bytes.Buffer
+ if a.ToDS() {
+ out.WriteString("TO-DS,")
+ }
+ if a.FromDS() {
+ out.WriteString("FROM-DS,")
+ }
+ if a.MF() {
+ out.WriteString("MF,")
+ }
+ if a.Retry() {
+ out.WriteString("Retry,")
+ }
+ if a.PowerManagement() {
+ out.WriteString("PowerManagement,")
+ }
+ if a.MD() {
+ out.WriteString("MD,")
+ }
+ if a.WEP() {
+ out.WriteString("WEP,")
+ }
+ if a.Order() {
+ out.WriteString("Order,")
+ }
+
+ if length := out.Len(); length > 0 {
+ return string(out.Bytes()[:length-1]) // strip final comma
+ }
+ return ""
+}
+
+type Dot11Reason uint16
+
+// TODO: Verify these reasons, and append more reasons if necessary.
+
+const (
+ Dot11ReasonReserved Dot11Reason = 1
+ Dot11ReasonUnspecified Dot11Reason = 2
+ Dot11ReasonAuthExpired Dot11Reason = 3
+ Dot11ReasonDeauthStLeaving Dot11Reason = 4
+ Dot11ReasonInactivity Dot11Reason = 5
+ Dot11ReasonApFull Dot11Reason = 6
+ Dot11ReasonClass2FromNonAuth Dot11Reason = 7
+ Dot11ReasonClass3FromNonAss Dot11Reason = 8
+ Dot11ReasonDisasStLeaving Dot11Reason = 9
+ Dot11ReasonStNotAuth Dot11Reason = 10
+)
+
+// String provides a human readable string for Dot11Reason.
+// This string is possibly subject to change over time; if you're storing this
+// persistently, you should probably store the Dot11Reason value, not its string.
+func (a Dot11Reason) String() string {
+ switch a {
+ case Dot11ReasonReserved:
+ return "Reserved"
+ case Dot11ReasonUnspecified:
+ return "Unspecified"
+ case Dot11ReasonAuthExpired:
+ return "Auth. expired"
+ case Dot11ReasonDeauthStLeaving:
+ return "Deauth. st. leaving"
+ case Dot11ReasonInactivity:
+ return "Inactivity"
+ case Dot11ReasonApFull:
+ return "Ap. full"
+ case Dot11ReasonClass2FromNonAuth:
+ return "Class2 from non auth."
+ case Dot11ReasonClass3FromNonAss:
+ return "Class3 from non ass."
+ case Dot11ReasonDisasStLeaving:
+ return "Disass st. leaving"
+ case Dot11ReasonStNotAuth:
+ return "St. not auth."
+ default:
+ return "Unknown reason"
+ }
+}
+
+type Dot11Status uint16
+
+const (
+ Dot11StatusSuccess Dot11Status = 0
+ Dot11StatusFailure Dot11Status = 1 // Unspecified failure
+ Dot11StatusCannotSupportAllCapabilities Dot11Status = 10 // Cannot support all requested capabilities in the Capability Information field
+ Dot11StatusInabilityExistsAssociation Dot11Status = 11 // Reassociation denied due to inability to confirm that association exists
+ Dot11StatusAssociationDenied Dot11Status = 12 // Association denied due to reason outside the scope of this standard
+ Dot11StatusAlgorithmUnsupported Dot11Status = 13 // Responding station does not support the specified authentication algorithm
+ Dot11StatusOufOfExpectedSequence Dot11Status = 14 // Received an Authentication frame with authentication transaction sequence number out of expected sequence
+ Dot11StatusChallengeFailure Dot11Status = 15 // Authentication rejected because of challenge failure
+ Dot11StatusTimeout Dot11Status = 16 // Authentication rejected due to timeout waiting for next frame in sequence
+ Dot11StatusAPUnableToHandle Dot11Status = 17 // Association denied because AP is unable to handle additional associated stations
+ Dot11StatusRateUnsupported Dot11Status = 18 // Association denied due to requesting station not supporting all of the data rates in the BSSBasicRateSet parameter
+)
+
+// String provides a human readable string for Dot11Status.
+// This string is possibly subject to change over time; if you're storing this
+// persistently, you should probably store the Dot11Status value, not its string.
+func (a Dot11Status) String() string {
+ switch a {
+ case Dot11StatusSuccess:
+ return "success"
+ case Dot11StatusFailure:
+ return "failure"
+ case Dot11StatusCannotSupportAllCapabilities:
+ return "cannot-support-all-capabilities"
+ case Dot11StatusInabilityExistsAssociation:
+ return "inability-exists-association"
+ case Dot11StatusAssociationDenied:
+ return "association-denied"
+ case Dot11StatusAlgorithmUnsupported:
+ return "algorithm-unsupported"
+ case Dot11StatusOufOfExpectedSequence:
+ return "out-of-expected-sequence"
+ case Dot11StatusChallengeFailure:
+ return "challenge-failure"
+ case Dot11StatusTimeout:
+ return "timeout"
+ case Dot11StatusAPUnableToHandle:
+ return "ap-unable-to-handle"
+ case Dot11StatusRateUnsupported:
+ return "rate-unsupported"
+ default:
+ return "unknown status"
+ }
+}
+
+type Dot11AckPolicy uint8
+
+const (
+ Dot11AckPolicyNormal Dot11AckPolicy = 0
+ Dot11AckPolicyNone Dot11AckPolicy = 1
+ Dot11AckPolicyNoExplicit Dot11AckPolicy = 2
+ Dot11AckPolicyBlock Dot11AckPolicy = 3
+)
+
+// String provides a human readable string for Dot11AckPolicy.
+// This string is possibly subject to change over time; if you're storing this
+// persistently, you should probably store the Dot11AckPolicy value, not its string.
+func (a Dot11AckPolicy) String() string {
+ switch a {
+ case Dot11AckPolicyNormal:
+ return "normal-ack"
+ case Dot11AckPolicyNone:
+ return "no-ack"
+ case Dot11AckPolicyNoExplicit:
+ return "no-explicit-ack"
+ case Dot11AckPolicyBlock:
+ return "block-ack"
+ default:
+ return "unknown-ack-policy"
+ }
+}
+
+type Dot11Algorithm uint16
+
+const (
+ Dot11AlgorithmOpen Dot11Algorithm = 0
+ Dot11AlgorithmSharedKey Dot11Algorithm = 1
+)
+
+// String provides a human readable string for Dot11Algorithm.
+// This string is possibly subject to change over time; if you're storing this
+// persistently, you should probably store the Dot11Algorithm value, not its string.
+func (a Dot11Algorithm) String() string {
+ switch a {
+ case Dot11AlgorithmOpen:
+ return "open"
+ case Dot11AlgorithmSharedKey:
+ return "shared-key"
+ default:
+ return "unknown-algorithm"
+ }
+}
+
+type Dot11InformationElementID uint8
+
+const (
+ Dot11InformationElementIDSSID Dot11InformationElementID = 0
+ Dot11InformationElementIDRates Dot11InformationElementID = 1
+ Dot11InformationElementIDFHSet Dot11InformationElementID = 2
+ Dot11InformationElementIDDSSet Dot11InformationElementID = 3
+ Dot11InformationElementIDCFSet Dot11InformationElementID = 4
+ Dot11InformationElementIDTIM Dot11InformationElementID = 5
+ Dot11InformationElementIDIBSSSet Dot11InformationElementID = 6
+ Dot11InformationElementIDCountryInfo Dot11InformationElementID = 7
+ Dot11InformationElementIDHoppingPatternParam Dot11InformationElementID = 8
+ Dot11InformationElementIDHoppingPatternTable Dot11InformationElementID = 9
+ Dot11InformationElementIDRequest Dot11InformationElementID = 10
+ Dot11InformationElementIDQBSSLoadElem Dot11InformationElementID = 11
+ Dot11InformationElementIDEDCAParamSet Dot11InformationElementID = 12
+ Dot11InformationElementIDTrafficSpec Dot11InformationElementID = 13
+ Dot11InformationElementIDTrafficClass Dot11InformationElementID = 14
+ Dot11InformationElementIDSchedule Dot11InformationElementID = 15
+ Dot11InformationElementIDChallenge Dot11InformationElementID = 16
+ Dot11InformationElementIDPowerConst Dot11InformationElementID = 32
+ Dot11InformationElementIDPowerCapability Dot11InformationElementID = 33
+ Dot11InformationElementIDTPCRequest Dot11InformationElementID = 34
+ Dot11InformationElementIDTPCReport Dot11InformationElementID = 35
+ Dot11InformationElementIDSupportedChannels Dot11InformationElementID = 36
+ Dot11InformationElementIDSwitchChannelAnnounce Dot11InformationElementID = 37
+ Dot11InformationElementIDMeasureRequest Dot11InformationElementID = 38
+ Dot11InformationElementIDMeasureReport Dot11InformationElementID = 39
+ Dot11InformationElementIDQuiet Dot11InformationElementID = 40
+ Dot11InformationElementIDIBSSDFS Dot11InformationElementID = 41
+ Dot11InformationElementIDERPInfo Dot11InformationElementID = 42
+ Dot11InformationElementIDTSDelay Dot11InformationElementID = 43
+ Dot11InformationElementIDTCLASProcessing Dot11InformationElementID = 44
+ Dot11InformationElementIDHTCapabilities Dot11InformationElementID = 45
+ Dot11InformationElementIDQOSCapability Dot11InformationElementID = 46
+ Dot11InformationElementIDERPInfo2 Dot11InformationElementID = 47
+ Dot11InformationElementIDRSNInfo Dot11InformationElementID = 48
+ Dot11InformationElementIDESRates Dot11InformationElementID = 50
+ Dot11InformationElementIDAPChannelReport Dot11InformationElementID = 51
+ Dot11InformationElementIDNeighborReport Dot11InformationElementID = 52
+ Dot11InformationElementIDRCPI Dot11InformationElementID = 53
+ Dot11InformationElementIDMobilityDomain Dot11InformationElementID = 54
+ Dot11InformationElementIDFastBSSTrans Dot11InformationElementID = 55
+ Dot11InformationElementIDTimeoutInt Dot11InformationElementID = 56
+ Dot11InformationElementIDRICData Dot11InformationElementID = 57
+ Dot11InformationElementIDDSERegisteredLoc Dot11InformationElementID = 58
+ Dot11InformationElementIDSuppOperatingClass Dot11InformationElementID = 59
+ Dot11InformationElementIDExtChanSwitchAnnounce Dot11InformationElementID = 60
+ Dot11InformationElementIDHTInfo Dot11InformationElementID = 61
+ Dot11InformationElementIDSecChanOffset Dot11InformationElementID = 62
+ Dot11InformationElementIDBSSAverageAccessDelay Dot11InformationElementID = 63
+ Dot11InformationElementIDAntenna Dot11InformationElementID = 64
+ Dot11InformationElementIDRSNI Dot11InformationElementID = 65
+ Dot11InformationElementIDMeasurePilotTrans Dot11InformationElementID = 66
+ Dot11InformationElementIDBSSAvailAdmCapacity Dot11InformationElementID = 67
+ Dot11InformationElementIDBSSACAccDelayWAPIParam Dot11InformationElementID = 68
+ Dot11InformationElementIDTimeAdvertisement Dot11InformationElementID = 69
+ Dot11InformationElementIDRMEnabledCapabilities Dot11InformationElementID = 70
+ Dot11InformationElementIDMultipleBSSID Dot11InformationElementID = 71
+ Dot11InformationElementID2040BSSCoExist Dot11InformationElementID = 72
+ Dot11InformationElementID2040BSSIntChanReport Dot11InformationElementID = 73
+ Dot11InformationElementIDOverlapBSSScanParam Dot11InformationElementID = 74
+ Dot11InformationElementIDRICDescriptor Dot11InformationElementID = 75
+ Dot11InformationElementIDManagementMIC Dot11InformationElementID = 76
+ Dot11InformationElementIDEventRequest Dot11InformationElementID = 78
+ Dot11InformationElementIDEventReport Dot11InformationElementID = 79
+ Dot11InformationElementIDDiagnosticRequest Dot11InformationElementID = 80
+ Dot11InformationElementIDDiagnosticReport Dot11InformationElementID = 81
+ Dot11InformationElementIDLocationParam Dot11InformationElementID = 82
+ Dot11InformationElementIDNonTransBSSIDCapability Dot11InformationElementID = 83
+ Dot11InformationElementIDSSIDList Dot11InformationElementID = 84
+ Dot11InformationElementIDMultipleBSSIDIndex Dot11InformationElementID = 85
+ Dot11InformationElementIDFMSDescriptor Dot11InformationElementID = 86
+ Dot11InformationElementIDFMSRequest Dot11InformationElementID = 87
+ Dot11InformationElementIDFMSResponse Dot11InformationElementID = 88
+ Dot11InformationElementIDQOSTrafficCapability Dot11InformationElementID = 89
+ Dot11InformationElementIDBSSMaxIdlePeriod Dot11InformationElementID = 90
+ Dot11InformationElementIDTFSRequest Dot11InformationElementID = 91
+ Dot11InformationElementIDTFSResponse Dot11InformationElementID = 92
+ Dot11InformationElementIDWNMSleepMode Dot11InformationElementID = 93
+ Dot11InformationElementIDTIMBroadcastRequest Dot11InformationElementID = 94
+ Dot11InformationElementIDTIMBroadcastResponse Dot11InformationElementID = 95
+ Dot11InformationElementIDCollInterferenceReport Dot11InformationElementID = 96
+ Dot11InformationElementIDChannelUsage Dot11InformationElementID = 97
+ Dot11InformationElementIDTimeZone Dot11InformationElementID = 98
+ Dot11InformationElementIDDMSRequest Dot11InformationElementID = 99
+ Dot11InformationElementIDDMSResponse Dot11InformationElementID = 100
+ Dot11InformationElementIDLinkIdentifier Dot11InformationElementID = 101
+ Dot11InformationElementIDWakeupSchedule Dot11InformationElementID = 102
+ Dot11InformationElementIDChannelSwitchTiming Dot11InformationElementID = 104
+ Dot11InformationElementIDPTIControl Dot11InformationElementID = 105
+ Dot11InformationElementIDPUBufferStatus Dot11InformationElementID = 106
+ Dot11InformationElementIDInterworking Dot11InformationElementID = 107
+ Dot11InformationElementIDAdvertisementProtocol Dot11InformationElementID = 108
+ Dot11InformationElementIDExpBWRequest Dot11InformationElementID = 109
+ Dot11InformationElementIDQOSMapSet Dot11InformationElementID = 110
+ Dot11InformationElementIDRoamingConsortium Dot11InformationElementID = 111
+ Dot11InformationElementIDEmergencyAlertIdentifier Dot11InformationElementID = 112
+ Dot11InformationElementIDMeshConfiguration Dot11InformationElementID = 113
+ Dot11InformationElementIDMeshID Dot11InformationElementID = 114
+ Dot11InformationElementIDMeshLinkMetricReport Dot11InformationElementID = 115
+ Dot11InformationElementIDCongestionNotification Dot11InformationElementID = 116
+ Dot11InformationElementIDMeshPeeringManagement Dot11InformationElementID = 117
+ Dot11InformationElementIDMeshChannelSwitchParam Dot11InformationElementID = 118
+ Dot11InformationElementIDMeshAwakeWindows Dot11InformationElementID = 119
+ Dot11InformationElementIDBeaconTiming Dot11InformationElementID = 120
+ Dot11InformationElementIDMCCAOPSetupRequest Dot11InformationElementID = 121
+ Dot11InformationElementIDMCCAOPSetupReply Dot11InformationElementID = 122
+ Dot11InformationElementIDMCCAOPAdvertisement Dot11InformationElementID = 123
+ Dot11InformationElementIDMCCAOPTeardown Dot11InformationElementID = 124
+ Dot11InformationElementIDGateAnnouncement Dot11InformationElementID = 125
+ Dot11InformationElementIDRootAnnouncement Dot11InformationElementID = 126
+ Dot11InformationElementIDExtCapability Dot11InformationElementID = 127
+ Dot11InformationElementIDAgereProprietary Dot11InformationElementID = 128
+ Dot11InformationElementIDPathRequest Dot11InformationElementID = 130
+ Dot11InformationElementIDPathReply Dot11InformationElementID = 131
+ Dot11InformationElementIDPathError Dot11InformationElementID = 132
+ Dot11InformationElementIDCiscoCCX1CKIPDeviceName Dot11InformationElementID = 133
+ Dot11InformationElementIDCiscoCCX2 Dot11InformationElementID = 136
+ Dot11InformationElementIDProxyUpdate Dot11InformationElementID = 137
+ Dot11InformationElementIDProxyUpdateConfirmation Dot11InformationElementID = 138
+ Dot11InformationElementIDAuthMeshPerringExch Dot11InformationElementID = 139
+ Dot11InformationElementIDMIC Dot11InformationElementID = 140
+ Dot11InformationElementIDDestinationURI Dot11InformationElementID = 141
+ Dot11InformationElementIDUAPSDCoexistence Dot11InformationElementID = 142
+ Dot11InformationElementIDWakeupSchedule80211ad Dot11InformationElementID = 143
+ Dot11InformationElementIDExtendedSchedule Dot11InformationElementID = 144
+ Dot11InformationElementIDSTAAvailability Dot11InformationElementID = 145
+ Dot11InformationElementIDDMGTSPEC Dot11InformationElementID = 146
+ Dot11InformationElementIDNextDMGATI Dot11InformationElementID = 147
+ Dot11InformationElementIDDMSCapabilities Dot11InformationElementID = 148
+ Dot11InformationElementIDCiscoUnknown95 Dot11InformationElementID = 149
+ Dot11InformationElementIDVendor2 Dot11InformationElementID = 150
+ Dot11InformationElementIDDMGOperating Dot11InformationElementID = 151
+ Dot11InformationElementIDDMGBSSParamChange Dot11InformationElementID = 152
+ Dot11InformationElementIDDMGBeamRefinement Dot11InformationElementID = 153
+ Dot11InformationElementIDChannelMeasFeedback Dot11InformationElementID = 154
+ Dot11InformationElementIDAwakeWindow Dot11InformationElementID = 157
+ Dot11InformationElementIDMultiBand Dot11InformationElementID = 158
+ Dot11InformationElementIDADDBAExtension Dot11InformationElementID = 159
+ Dot11InformationElementIDNEXTPCPList Dot11InformationElementID = 160
+ Dot11InformationElementIDPCPHandover Dot11InformationElementID = 161
+ Dot11InformationElementIDDMGLinkMargin Dot11InformationElementID = 162
+ Dot11InformationElementIDSwitchingStream Dot11InformationElementID = 163
+ Dot11InformationElementIDSessionTransmission Dot11InformationElementID = 164
+ Dot11InformationElementIDDynamicTonePairReport Dot11InformationElementID = 165
+ Dot11InformationElementIDClusterReport Dot11InformationElementID = 166
+ Dot11InformationElementIDRelayCapabilities Dot11InformationElementID = 167
+ Dot11InformationElementIDRelayTransferParameter Dot11InformationElementID = 168
+ Dot11InformationElementIDBeamlinkMaintenance Dot11InformationElementID = 169
+ Dot11InformationElementIDMultipleMacSublayers Dot11InformationElementID = 170
+ Dot11InformationElementIDUPID Dot11InformationElementID = 171
+ Dot11InformationElementIDDMGLinkAdaptionAck Dot11InformationElementID = 172
+ Dot11InformationElementIDSymbolProprietary Dot11InformationElementID = 173
+ Dot11InformationElementIDMCCAOPAdvertOverview Dot11InformationElementID = 174
+ Dot11InformationElementIDQuietPeriodRequest Dot11InformationElementID = 175
+ Dot11InformationElementIDQuietPeriodResponse Dot11InformationElementID = 177
+ Dot11InformationElementIDECPACPolicy Dot11InformationElementID = 182
+ Dot11InformationElementIDClusterTimeOffset Dot11InformationElementID = 183
+ Dot11InformationElementIDAntennaSectorID Dot11InformationElementID = 190
+ Dot11InformationElementIDVHTCapabilities Dot11InformationElementID = 191
+ Dot11InformationElementIDVHTOperation Dot11InformationElementID = 192
+ Dot11InformationElementIDExtendedBSSLoad Dot11InformationElementID = 193
+ Dot11InformationElementIDWideBWChannelSwitch Dot11InformationElementID = 194
+ Dot11InformationElementIDVHTTxPowerEnvelope Dot11InformationElementID = 195
+ Dot11InformationElementIDChannelSwitchWrapper Dot11InformationElementID = 196
+ Dot11InformationElementIDOperatingModeNotification Dot11InformationElementID = 199
+ Dot11InformationElementIDUPSIM Dot11InformationElementID = 200
+ Dot11InformationElementIDReducedNeighborReport Dot11InformationElementID = 201
+ Dot11InformationElementIDTVHTOperation Dot11InformationElementID = 202
+ Dot11InformationElementIDDeviceLocation Dot11InformationElementID = 204
+ Dot11InformationElementIDWhiteSpaceMap Dot11InformationElementID = 205
+ Dot11InformationElementIDFineTuningMeasureParams Dot11InformationElementID = 206
+ Dot11InformationElementIDVendor Dot11InformationElementID = 221
+)
+
+// String provides a human readable string for Dot11InformationElementID.
+// This string is possibly subject to change over time; if you're storing this
+// persistently, you should probably store the Dot11InformationElementID value,
+// not its string.
+func (a Dot11InformationElementID) String() string {
+ switch a {
+ case Dot11InformationElementIDSSID:
+ return "SSID parameter set"
+ case Dot11InformationElementIDRates:
+ return "Supported Rates"
+ case Dot11InformationElementIDFHSet:
+ return "FH Parameter set"
+ case Dot11InformationElementIDDSSet:
+ return "DS Parameter set"
+ case Dot11InformationElementIDCFSet:
+ return "CF Parameter set"
+ case Dot11InformationElementIDTIM:
+ return "Traffic Indication Map (TIM)"
+ case Dot11InformationElementIDIBSSSet:
+ return "IBSS Parameter set"
+ case Dot11InformationElementIDCountryInfo:
+ return "Country Information"
+ case Dot11InformationElementIDHoppingPatternParam:
+ return "Hopping Pattern Parameters"
+ case Dot11InformationElementIDHoppingPatternTable:
+ return "Hopping Pattern Table"
+ case Dot11InformationElementIDRequest:
+ return "Request"
+ case Dot11InformationElementIDQBSSLoadElem:
+ return "QBSS Load Element"
+ case Dot11InformationElementIDEDCAParamSet:
+ return "EDCA Parameter Set"
+ case Dot11InformationElementIDTrafficSpec:
+ return "Traffic Specification"
+ case Dot11InformationElementIDTrafficClass:
+ return "Traffic Classification"
+ case Dot11InformationElementIDSchedule:
+ return "Schedule"
+ case Dot11InformationElementIDChallenge:
+ return "Challenge text"
+ case Dot11InformationElementIDPowerConst:
+ return "Power Constraint"
+ case Dot11InformationElementIDPowerCapability:
+ return "Power Capability"
+ case Dot11InformationElementIDTPCRequest:
+ return "TPC Request"
+ case Dot11InformationElementIDTPCReport:
+ return "TPC Report"
+ case Dot11InformationElementIDSupportedChannels:
+ return "Supported Channels"
+ case Dot11InformationElementIDSwitchChannelAnnounce:
+ return "Channel Switch Announcement"
+ case Dot11InformationElementIDMeasureRequest:
+ return "Measurement Request"
+ case Dot11InformationElementIDMeasureReport:
+ return "Measurement Report"
+ case Dot11InformationElementIDQuiet:
+ return "Quiet"
+ case Dot11InformationElementIDIBSSDFS:
+ return "IBSS DFS"
+ case Dot11InformationElementIDERPInfo:
+ return "ERP Information"
+ case Dot11InformationElementIDTSDelay:
+ return "TS Delay"
+ case Dot11InformationElementIDTCLASProcessing:
+ return "TCLAS Processing"
+ case Dot11InformationElementIDHTCapabilities:
+ return "HT Capabilities (802.11n D1.10)"
+ case Dot11InformationElementIDQOSCapability:
+ return "QOS Capability"
+ case Dot11InformationElementIDERPInfo2:
+ return "ERP Information-2"
+ case Dot11InformationElementIDRSNInfo:
+ return "RSN Information"
+ case Dot11InformationElementIDESRates:
+ return "Extended Supported Rates"
+ case Dot11InformationElementIDAPChannelReport:
+ return "AP Channel Report"
+ case Dot11InformationElementIDNeighborReport:
+ return "Neighbor Report"
+ case Dot11InformationElementIDRCPI:
+ return "RCPI"
+ case Dot11InformationElementIDMobilityDomain:
+ return "Mobility Domain"
+ case Dot11InformationElementIDFastBSSTrans:
+ return "Fast BSS Transition"
+ case Dot11InformationElementIDTimeoutInt:
+ return "Timeout Interval"
+ case Dot11InformationElementIDRICData:
+ return "RIC Data"
+ case Dot11InformationElementIDDSERegisteredLoc:
+ return "DSE Registered Location"
+ case Dot11InformationElementIDSuppOperatingClass:
+ return "Supported Operating Classes"
+ case Dot11InformationElementIDExtChanSwitchAnnounce:
+ return "Extended Channel Switch Announcement"
+ case Dot11InformationElementIDHTInfo:
+ return "HT Information (802.11n D1.10)"
+ case Dot11InformationElementIDSecChanOffset:
+ return "Secondary Channel Offset (802.11n D1.10)"
+ case Dot11InformationElementIDBSSAverageAccessDelay:
+ return "BSS Average Access Delay"
+ case Dot11InformationElementIDAntenna:
+ return "Antenna"
+ case Dot11InformationElementIDRSNI:
+ return "RSNI"
+ case Dot11InformationElementIDMeasurePilotTrans:
+ return "Measurement Pilot Transmission"
+ case Dot11InformationElementIDBSSAvailAdmCapacity:
+ return "BSS Available Admission Capacity"
+ case Dot11InformationElementIDBSSACAccDelayWAPIParam:
+ return "BSS AC Access Delay/WAPI Parameter Set"
+ case Dot11InformationElementIDTimeAdvertisement:
+ return "Time Advertisement"
+ case Dot11InformationElementIDRMEnabledCapabilities:
+ return "RM Enabled Capabilities"
+ case Dot11InformationElementIDMultipleBSSID:
+ return "Multiple BSSID"
+ case Dot11InformationElementID2040BSSCoExist:
+ return "20/40 BSS Coexistence"
+ case Dot11InformationElementID2040BSSIntChanReport:
+ return "20/40 BSS Intolerant Channel Report"
+ case Dot11InformationElementIDOverlapBSSScanParam:
+ return "Overlapping BSS Scan Parameters"
+ case Dot11InformationElementIDRICDescriptor:
+ return "RIC Descriptor"
+ case Dot11InformationElementIDManagementMIC:
+ return "Management MIC"
+ case Dot11InformationElementIDEventRequest:
+ return "Event Request"
+ case Dot11InformationElementIDEventReport:
+ return "Event Report"
+ case Dot11InformationElementIDDiagnosticRequest:
+ return "Diagnostic Request"
+ case Dot11InformationElementIDDiagnosticReport:
+ return "Diagnostic Report"
+ case Dot11InformationElementIDLocationParam:
+ return "Location Parameters"
+ case Dot11InformationElementIDNonTransBSSIDCapability:
+ return "Non Transmitted BSSID Capability"
+ case Dot11InformationElementIDSSIDList:
+ return "SSID List"
+ case Dot11InformationElementIDMultipleBSSIDIndex:
+ return "Multiple BSSID Index"
+ case Dot11InformationElementIDFMSDescriptor:
+ return "FMS Descriptor"
+ case Dot11InformationElementIDFMSRequest:
+ return "FMS Request"
+ case Dot11InformationElementIDFMSResponse:
+ return "FMS Response"
+ case Dot11InformationElementIDQOSTrafficCapability:
+ return "QoS Traffic Capability"
+ case Dot11InformationElementIDBSSMaxIdlePeriod:
+ return "BSS Max Idle Period"
+ case Dot11InformationElementIDTFSRequest:
+ return "TFS Request"
+ case Dot11InformationElementIDTFSResponse:
+ return "TFS Response"
+ case Dot11InformationElementIDWNMSleepMode:
+ return "WNM-Sleep Mode"
+ case Dot11InformationElementIDTIMBroadcastRequest:
+ return "TIM Broadcast Request"
+ case Dot11InformationElementIDTIMBroadcastResponse:
+ return "TIM Broadcast Response"
+ case Dot11InformationElementIDCollInterferenceReport:
+ return "Collocated Interference Report"
+ case Dot11InformationElementIDChannelUsage:
+ return "Channel Usage"
+ case Dot11InformationElementIDTimeZone:
+ return "Time Zone"
+ case Dot11InformationElementIDDMSRequest:
+ return "DMS Request"
+ case Dot11InformationElementIDDMSResponse:
+ return "DMS Response"
+ case Dot11InformationElementIDLinkIdentifier:
+ return "Link Identifier"
+ case Dot11InformationElementIDWakeupSchedule:
+ return "Wakeup Schedule"
+ case Dot11InformationElementIDChannelSwitchTiming:
+ return "Channel Switch Timing"
+ case Dot11InformationElementIDPTIControl:
+ return "PTI Control"
+ case Dot11InformationElementIDPUBufferStatus:
+ return "PU Buffer Status"
+ case Dot11InformationElementIDInterworking:
+ return "Interworking"
+ case Dot11InformationElementIDAdvertisementProtocol:
+ return "Advertisement Protocol"
+ case Dot11InformationElementIDExpBWRequest:
+ return "Expedited Bandwidth Request"
+ case Dot11InformationElementIDQOSMapSet:
+ return "QoS Map Set"
+ case Dot11InformationElementIDRoamingConsortium:
+ return "Roaming Consortium"
+ case Dot11InformationElementIDEmergencyAlertIdentifier:
+ return "Emergency Alert Identifier"
+ case Dot11InformationElementIDMeshConfiguration:
+ return "Mesh Configuration"
+ case Dot11InformationElementIDMeshID:
+ return "Mesh ID"
+ case Dot11InformationElementIDMeshLinkMetricReport:
+ return "Mesh Link Metric Report"
+ case Dot11InformationElementIDCongestionNotification:
+ return "Congestion Notification"
+ case Dot11InformationElementIDMeshPeeringManagement:
+ return "Mesh Peering Management"
+ case Dot11InformationElementIDMeshChannelSwitchParam:
+ return "Mesh Channel Switch Parameters"
+ case Dot11InformationElementIDMeshAwakeWindows:
+ return "Mesh Awake Windows"
+ case Dot11InformationElementIDBeaconTiming:
+ return "Beacon Timing"
+ case Dot11InformationElementIDMCCAOPSetupRequest:
+ return "MCCAOP Setup Request"
+ case Dot11InformationElementIDMCCAOPSetupReply:
+ return "MCCAOP SETUP Reply"
+ case Dot11InformationElementIDMCCAOPAdvertisement:
+ return "MCCAOP Advertisement"
+ case Dot11InformationElementIDMCCAOPTeardown:
+ return "MCCAOP Teardown"
+ case Dot11InformationElementIDGateAnnouncement:
+ return "Gate Announcement"
+ case Dot11InformationElementIDRootAnnouncement:
+ return "Root Announcement"
+ case Dot11InformationElementIDExtCapability:
+ return "Extended Capabilities"
+ case Dot11InformationElementIDAgereProprietary:
+ return "Agere Proprietary"
+ case Dot11InformationElementIDPathRequest:
+ return "Path Request"
+ case Dot11InformationElementIDPathReply:
+ return "Path Reply"
+ case Dot11InformationElementIDPathError:
+ return "Path Error"
+ case Dot11InformationElementIDCiscoCCX1CKIPDeviceName:
+ return "Cisco CCX1 CKIP + Device Name"
+ case Dot11InformationElementIDCiscoCCX2:
+ return "Cisco CCX2"
+ case Dot11InformationElementIDProxyUpdate:
+ return "Proxy Update"
+ case Dot11InformationElementIDProxyUpdateConfirmation:
+ return "Proxy Update Confirmation"
+ case Dot11InformationElementIDAuthMeshPerringExch:
+ return "Auhenticated Mesh Perring Exchange"
+ case Dot11InformationElementIDMIC:
+ return "MIC (Message Integrity Code)"
+ case Dot11InformationElementIDDestinationURI:
+ return "Destination URI"
+ case Dot11InformationElementIDUAPSDCoexistence:
+ return "U-APSD Coexistence"
+ case Dot11InformationElementIDWakeupSchedule80211ad:
+ return "Wakeup Schedule 802.11ad"
+ case Dot11InformationElementIDExtendedSchedule:
+ return "Extended Schedule"
+ case Dot11InformationElementIDSTAAvailability:
+ return "STA Availability"
+ case Dot11InformationElementIDDMGTSPEC:
+ return "DMG TSPEC"
+ case Dot11InformationElementIDNextDMGATI:
+ return "Next DMG ATI"
+ case Dot11InformationElementIDDMSCapabilities:
+ return "DMG Capabilities"
+ case Dot11InformationElementIDCiscoUnknown95:
+ return "Cisco Unknown 95"
+ case Dot11InformationElementIDVendor2:
+ return "Vendor Specific"
+ case Dot11InformationElementIDDMGOperating:
+ return "DMG Operating"
+ case Dot11InformationElementIDDMGBSSParamChange:
+ return "DMG BSS Parameter Change"
+ case Dot11InformationElementIDDMGBeamRefinement:
+ return "DMG Beam Refinement"
+ case Dot11InformationElementIDChannelMeasFeedback:
+ return "Channel Measurement Feedback"
+ case Dot11InformationElementIDAwakeWindow:
+ return "Awake Window"
+ case Dot11InformationElementIDMultiBand:
+ return "Multi Band"
+ case Dot11InformationElementIDADDBAExtension:
+ return "ADDBA Extension"
+ case Dot11InformationElementIDNEXTPCPList:
+ return "NEXTPCP List"
+ case Dot11InformationElementIDPCPHandover:
+ return "PCP Handover"
+ case Dot11InformationElementIDDMGLinkMargin:
+ return "DMG Link Margin"
+ case Dot11InformationElementIDSwitchingStream:
+ return "Switching Stream"
+ case Dot11InformationElementIDSessionTransmission:
+ return "Session Transmission"
+ case Dot11InformationElementIDDynamicTonePairReport:
+ return "Dynamic Tone Pairing Report"
+ case Dot11InformationElementIDClusterReport:
+ return "Cluster Report"
+ case Dot11InformationElementIDRelayCapabilities:
+ return "Relay Capabilities"
+ case Dot11InformationElementIDRelayTransferParameter:
+ return "Relay Transfer Parameter"
+ case Dot11InformationElementIDBeamlinkMaintenance:
+ return "Beamlink Maintenance"
+ case Dot11InformationElementIDMultipleMacSublayers:
+ return "Multiple MAC Sublayers"
+ case Dot11InformationElementIDUPID:
+ return "U-PID"
+ case Dot11InformationElementIDDMGLinkAdaptionAck:
+ return "DMG Link Adaption Acknowledgment"
+ case Dot11InformationElementIDSymbolProprietary:
+ return "Symbol Proprietary"
+ case Dot11InformationElementIDMCCAOPAdvertOverview:
+ return "MCCAOP Advertisement Overview"
+ case Dot11InformationElementIDQuietPeriodRequest:
+ return "Quiet Period Request"
+ case Dot11InformationElementIDQuietPeriodResponse:
+ return "Quiet Period Response"
+ case Dot11InformationElementIDECPACPolicy:
+ return "ECPAC Policy"
+ case Dot11InformationElementIDClusterTimeOffset:
+ return "Cluster Time Offset"
+ case Dot11InformationElementIDAntennaSectorID:
+ return "Antenna Sector ID"
+ case Dot11InformationElementIDVHTCapabilities:
+ return "VHT Capabilities (IEEE Std 802.11ac/D3.1)"
+ case Dot11InformationElementIDVHTOperation:
+ return "VHT Operation (IEEE Std 802.11ac/D3.1)"
+ case Dot11InformationElementIDExtendedBSSLoad:
+ return "Extended BSS Load"
+ case Dot11InformationElementIDWideBWChannelSwitch:
+ return "Wide Bandwidth Channel Switch"
+ case Dot11InformationElementIDVHTTxPowerEnvelope:
+ return "VHT Tx Power Envelope (IEEE Std 802.11ac/D5.0)"
+ case Dot11InformationElementIDChannelSwitchWrapper:
+ return "Channel Switch Wrapper"
+ case Dot11InformationElementIDOperatingModeNotification:
+ return "Operating Mode Notification"
+ case Dot11InformationElementIDUPSIM:
+ return "UP SIM"
+ case Dot11InformationElementIDReducedNeighborReport:
+ return "Reduced Neighbor Report"
+ case Dot11InformationElementIDTVHTOperation:
+ return "TVHT Op"
+ case Dot11InformationElementIDDeviceLocation:
+ return "Device Location"
+ case Dot11InformationElementIDWhiteSpaceMap:
+ return "White Space Map"
+ case Dot11InformationElementIDFineTuningMeasureParams:
+ return "Fine Tuning Measure Parameters"
+ case Dot11InformationElementIDVendor:
+ return "Vendor"
+ default:
+ return "Unknown information element id"
+ }
+}
+
+// Dot11 provides an IEEE 802.11 base packet header.
+// See http://standards.ieee.org/findstds/standard/802.11-2012.html
+// for excruciating detail.
+type Dot11 struct {
+ BaseLayer
+ Type Dot11Type
+ Proto uint8
+ Flags Dot11Flags
+ DurationID uint16
+ Address1 net.HardwareAddr
+ Address2 net.HardwareAddr
+ Address3 net.HardwareAddr
+ Address4 net.HardwareAddr
+ SequenceNumber uint16
+ FragmentNumber uint16
+ Checksum uint32
+ QOS *Dot11QOS
+ HTControl *Dot11HTControl
+ DataLayer gopacket.Layer
+}
+
+type Dot11QOS struct {
+ TID uint8 /* Traffic IDentifier */
+ EOSP bool /* End of service period */
+ AckPolicy Dot11AckPolicy
+ TXOP uint8
+}
+
+type Dot11HTControl struct {
+ ACConstraint bool
+ RDGMorePPDU bool
+
+ VHT *Dot11HTControlVHT
+ HT *Dot11HTControlHT
+}
+
+type Dot11HTControlHT struct {
+ LinkAdapationControl *Dot11LinkAdapationControl
+ CalibrationPosition uint8
+ CalibrationSequence uint8
+ CSISteering uint8
+ NDPAnnouncement bool
+ DEI bool
+}
+
+type Dot11HTControlVHT struct {
+ MRQ bool
+ UnsolicitedMFB bool
+ MSI *uint8
+ MFB Dot11HTControlMFB
+ CompressedMSI *uint8
+ STBCIndication bool
+ MFSI *uint8
+ GID *uint8
+ CodingType *Dot11CodingType
+ FbTXBeamformed bool
+}
+
+type Dot11HTControlMFB struct {
+ NumSTS uint8
+ VHTMCS uint8
+ BW uint8
+ SNR int8
+}
+
+type Dot11LinkAdapationControl struct {
+ TRQ bool
+ MRQ bool
+ MSI uint8
+ MFSI uint8
+ ASEL *Dot11ASEL
+ MFB *uint8
+}
+
+type Dot11ASEL struct {
+ Command uint8
+ Data uint8
+}
+
+type Dot11CodingType uint8
+
+const (
+ Dot11CodingTypeBCC = 0
+ Dot11CodingTypeLDPC = 1
+)
+
+func (a Dot11CodingType) String() string {
+ switch a {
+ case Dot11CodingTypeBCC:
+ return "BCC"
+ case Dot11CodingTypeLDPC:
+ return "LDPC"
+ default:
+ return "Unknown coding type"
+ }
+}
+
+func (m *Dot11HTControlMFB) NoFeedBackPresent() bool {
+ return m.VHTMCS == 15 && m.NumSTS == 7
+}
+
+func decodeDot11(data []byte, p gopacket.PacketBuilder) error {
+ d := &Dot11{}
+ err := d.DecodeFromBytes(data, p)
+ if err != nil {
+ return err
+ }
+ p.AddLayer(d)
+ if d.DataLayer != nil {
+ p.AddLayer(d.DataLayer)
+ }
+ return p.NextDecoder(d.NextLayerType())
+}
+
+func (m *Dot11) LayerType() gopacket.LayerType { return LayerTypeDot11 }
+func (m *Dot11) CanDecode() gopacket.LayerClass { return LayerTypeDot11 }
+func (m *Dot11) NextLayerType() gopacket.LayerType {
+ if m.DataLayer != nil {
+ if m.Flags.WEP() {
+ return LayerTypeDot11WEP
+ }
+ return m.DataLayer.(gopacket.DecodingLayer).NextLayerType()
+ }
+ return m.Type.LayerType()
+}
+
+func createU8(x uint8) *uint8 {
+ return &x
+}
+
+var dataDecodeMap = map[Dot11Type]func() gopacket.DecodingLayer{
+ Dot11TypeData: func() gopacket.DecodingLayer { return &Dot11Data{} },
+ Dot11TypeDataCFAck: func() gopacket.DecodingLayer { return &Dot11DataCFAck{} },
+ Dot11TypeDataCFPoll: func() gopacket.DecodingLayer { return &Dot11DataCFPoll{} },
+ Dot11TypeDataCFAckPoll: func() gopacket.DecodingLayer { return &Dot11DataCFAckPoll{} },
+ Dot11TypeDataNull: func() gopacket.DecodingLayer { return &Dot11DataNull{} },
+ Dot11TypeDataCFAckNoData: func() gopacket.DecodingLayer { return &Dot11DataCFAckNoData{} },
+ Dot11TypeDataCFPollNoData: func() gopacket.DecodingLayer { return &Dot11DataCFPollNoData{} },
+ Dot11TypeDataCFAckPollNoData: func() gopacket.DecodingLayer { return &Dot11DataCFAckPollNoData{} },
+ Dot11TypeDataQOSData: func() gopacket.DecodingLayer { return &Dot11DataQOSData{} },
+ Dot11TypeDataQOSDataCFAck: func() gopacket.DecodingLayer { return &Dot11DataQOSDataCFAck{} },
+ Dot11TypeDataQOSDataCFPoll: func() gopacket.DecodingLayer { return &Dot11DataQOSDataCFPoll{} },
+ Dot11TypeDataQOSDataCFAckPoll: func() gopacket.DecodingLayer { return &Dot11DataQOSDataCFAckPoll{} },
+ Dot11TypeDataQOSNull: func() gopacket.DecodingLayer { return &Dot11DataQOSNull{} },
+ Dot11TypeDataQOSCFPollNoData: func() gopacket.DecodingLayer { return &Dot11DataQOSCFPollNoData{} },
+ Dot11TypeDataQOSCFAckPollNoData: func() gopacket.DecodingLayer { return &Dot11DataQOSCFAckPollNoData{} },
+}
+
+func (m *Dot11) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ if len(data) < 10 {
+ df.SetTruncated()
+ return fmt.Errorf("Dot11 length %v too short, %v required", len(data), 10)
+ }
+ m.Type = Dot11Type((data[0])&0xFC) >> 2
+
+ m.Proto = uint8(data[0]) & 0x0003
+ m.Flags = Dot11Flags(data[1])
+ m.DurationID = binary.LittleEndian.Uint16(data[2:4])
+ m.Address1 = net.HardwareAddr(data[4:10])
+
+ offset := 10
+
+ mainType := m.Type.MainType()
+
+ switch mainType {
+ case Dot11TypeCtrl:
+ switch m.Type {
+ case Dot11TypeCtrlRTS, Dot11TypeCtrlPowersavePoll, Dot11TypeCtrlCFEnd, Dot11TypeCtrlCFEndAck:
+ if len(data) < offset+6 {
+ df.SetTruncated()
+ return fmt.Errorf("Dot11 length %v too short, %v required", len(data), offset+6)
+ }
+ m.Address2 = net.HardwareAddr(data[offset : offset+6])
+ offset += 6
+ }
+ case Dot11TypeMgmt, Dot11TypeData:
+ if len(data) < offset+14 {
+ df.SetTruncated()
+ return fmt.Errorf("Dot11 length %v too short, %v required", len(data), offset+14)
+ }
+ m.Address2 = net.HardwareAddr(data[offset : offset+6])
+ offset += 6
+ m.Address3 = net.HardwareAddr(data[offset : offset+6])
+ offset += 6
+
+ m.SequenceNumber = (binary.LittleEndian.Uint16(data[offset:offset+2]) & 0xFFF0) >> 4
+ m.FragmentNumber = (binary.LittleEndian.Uint16(data[offset:offset+2]) & 0x000F)
+ offset += 2
+ }
+
+ if mainType == Dot11TypeData && m.Flags.FromDS() && m.Flags.ToDS() {
+ if len(data) < offset+6 {
+ df.SetTruncated()
+ return fmt.Errorf("Dot11 length %v too short, %v required", len(data), offset+6)
+ }
+ m.Address4 = net.HardwareAddr(data[offset : offset+6])
+ offset += 6
+ }
+
+ if m.Type.QOS() {
+ if len(data) < offset+2 {
+ df.SetTruncated()
+ return fmt.Errorf("Dot11 length %v too short, %v required", len(data), offset+6)
+ }
+ m.QOS = &Dot11QOS{
+ TID: (uint8(data[offset]) & 0x0F),
+ EOSP: (uint8(data[offset]) & 0x10) == 0x10,
+ AckPolicy: Dot11AckPolicy((uint8(data[offset]) & 0x60) >> 5),
+ TXOP: uint8(data[offset+1]),
+ }
+ offset += 2
+ }
+ if m.Flags.Order() && (m.Type.QOS() || mainType == Dot11TypeMgmt) {
+ if len(data) < offset+4 {
+ df.SetTruncated()
+ return fmt.Errorf("Dot11 length %v too short, %v required", len(data), offset+6)
+ }
+
+ htc := &Dot11HTControl{
+ ACConstraint: data[offset+3]&0x40 != 0,
+ RDGMorePPDU: data[offset+3]&0x80 != 0,
+ }
+ m.HTControl = htc
+
+ if data[offset]&0x1 != 0 { // VHT Variant
+ vht := &Dot11HTControlVHT{}
+ htc.VHT = vht
+ vht.MRQ = data[offset]&0x4 != 0
+ vht.UnsolicitedMFB = data[offset+3]&0x20 != 0
+ vht.MFB = Dot11HTControlMFB{
+ NumSTS: uint8(data[offset+1] >> 1 & 0x7),
+ VHTMCS: uint8(data[offset+1] >> 4 & 0xF),
+ BW: uint8(data[offset+2] & 0x3),
+ SNR: int8((-(data[offset+2] >> 2 & 0x20))+data[offset+2]>>2&0x1F) + 22,
+ }
+
+ if vht.UnsolicitedMFB {
+ if !vht.MFB.NoFeedBackPresent() {
+ vht.CompressedMSI = createU8(data[offset] >> 3 & 0x3)
+ vht.STBCIndication = data[offset]&0x20 != 0
+ vht.CodingType = (*Dot11CodingType)(createU8(data[offset+3] >> 3 & 0x1))
+ vht.FbTXBeamformed = data[offset+3]&0x10 != 0
+ vht.GID = createU8(
+ data[offset]>>6 +
+ (data[offset+1] & 0x1 << 2) +
+ data[offset+3]&0x7<<3)
+ }
+ } else {
+ if vht.MRQ {
+ vht.MSI = createU8((data[offset] >> 3) & 0x07)
+ }
+ vht.MFSI = createU8(data[offset]>>6 + (data[offset+1] & 0x1 << 2))
+ }
+
+ } else { // HT Variant
+ ht := &Dot11HTControlHT{}
+ htc.HT = ht
+
+ lac := &Dot11LinkAdapationControl{}
+ ht.LinkAdapationControl = lac
+ lac.TRQ = data[offset]&0x2 != 0
+ lac.MFSI = data[offset]>>6&0x3 + data[offset+1]&0x1<<3
+ if data[offset]&0x3C == 0x38 { // ASEL
+ lac.ASEL = &Dot11ASEL{
+ Command: data[offset+1] >> 1 & 0x7,
+ Data: data[offset+1] >> 4 & 0xF,
+ }
+ } else {
+ lac.MRQ = data[offset]&0x4 != 0
+ if lac.MRQ {
+ lac.MSI = data[offset] >> 3 & 0x7
+ }
+ lac.MFB = createU8(data[offset+1] >> 1)
+ }
+ ht.CalibrationPosition = data[offset+2] & 0x3
+ ht.CalibrationSequence = data[offset+2] >> 2 & 0x3
+ ht.CSISteering = data[offset+2] >> 6 & 0x3
+ ht.NDPAnnouncement = data[offset+3]&0x1 != 0
+ if mainType != Dot11TypeMgmt {
+ ht.DEI = data[offset+3]&0x20 != 0
+ }
+ }
+
+ offset += 4
+ }
+
+ if len(data) < offset+4 {
+ df.SetTruncated()
+ return fmt.Errorf("Dot11 length %v too short, %v required", len(data), offset+4)
+ }
+
+ m.BaseLayer = BaseLayer{
+ Contents: data[0:offset],
+ Payload: data[offset : len(data)-4],
+ }
+
+ if mainType == Dot11TypeData {
+ l := dataDecodeMap[m.Type]()
+ err := l.DecodeFromBytes(m.BaseLayer.Payload, df)
+ if err != nil {
+ return err
+ }
+ m.DataLayer = l.(gopacket.Layer)
+ }
+
+ m.Checksum = binary.LittleEndian.Uint32(data[len(data)-4 : len(data)])
+ return nil
+}
+
+func (m *Dot11) ChecksumValid() bool {
+ // only for CTRL and MGMT frames
+ h := crc32.NewIEEE()
+ h.Write(m.Contents)
+ h.Write(m.Payload)
+ return m.Checksum == h.Sum32()
+}
+
+func (m Dot11) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ buf, err := b.PrependBytes(24)
+
+ if err != nil {
+ return err
+ }
+
+ buf[0] = (uint8(m.Type) << 2) | m.Proto
+ buf[1] = uint8(m.Flags)
+
+ binary.LittleEndian.PutUint16(buf[2:4], m.DurationID)
+
+ copy(buf[4:10], m.Address1)
+
+ offset := 10
+
+ switch m.Type.MainType() {
+ case Dot11TypeCtrl:
+ switch m.Type {
+ case Dot11TypeCtrlRTS, Dot11TypeCtrlPowersavePoll, Dot11TypeCtrlCFEnd, Dot11TypeCtrlCFEndAck:
+ copy(buf[offset:offset+6], m.Address2)
+ offset += 6
+ }
+ case Dot11TypeMgmt, Dot11TypeData:
+ copy(buf[offset:offset+6], m.Address2)
+ offset += 6
+ copy(buf[offset:offset+6], m.Address3)
+ offset += 6
+
+ binary.LittleEndian.PutUint16(buf[offset:offset+2], (m.SequenceNumber<<4)|m.FragmentNumber)
+ offset += 2
+ }
+
+ if m.Type.MainType() == Dot11TypeData && m.Flags.FromDS() && m.Flags.ToDS() {
+ copy(buf[offset:offset+6], m.Address4)
+ offset += 6
+ }
+
+ return nil
+}
+
+// Dot11Mgmt is a base for all IEEE 802.11 management layers.
+type Dot11Mgmt struct {
+ BaseLayer
+}
+
+func (m *Dot11Mgmt) NextLayerType() gopacket.LayerType { return gopacket.LayerTypePayload }
+func (m *Dot11Mgmt) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ m.Contents = data
+ return nil
+}
+
+// Dot11Ctrl is a base for all IEEE 802.11 control layers.
+type Dot11Ctrl struct {
+ BaseLayer
+}
+
+func (m *Dot11Ctrl) NextLayerType() gopacket.LayerType { return gopacket.LayerTypePayload }
+
+func (m *Dot11Ctrl) LayerType() gopacket.LayerType { return LayerTypeDot11Ctrl }
+func (m *Dot11Ctrl) CanDecode() gopacket.LayerClass { return LayerTypeDot11Ctrl }
+func (m *Dot11Ctrl) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ m.Contents = data
+ return nil
+}
+
+func decodeDot11Ctrl(data []byte, p gopacket.PacketBuilder) error {
+ d := &Dot11Ctrl{}
+ return decodingLayerDecoder(d, data, p)
+}
+
+// Dot11WEP contains WEP encrpted IEEE 802.11 data.
+type Dot11WEP struct {
+ BaseLayer
+}
+
+func (m *Dot11WEP) NextLayerType() gopacket.LayerType { return gopacket.LayerTypePayload }
+
+func (m *Dot11WEP) LayerType() gopacket.LayerType { return LayerTypeDot11WEP }
+func (m *Dot11WEP) CanDecode() gopacket.LayerClass { return LayerTypeDot11WEP }
+func (m *Dot11WEP) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ m.Contents = data
+ return nil
+}
+
+func decodeDot11WEP(data []byte, p gopacket.PacketBuilder) error {
+ d := &Dot11WEP{}
+ return decodingLayerDecoder(d, data, p)
+}
+
+// Dot11Data is a base for all IEEE 802.11 data layers.
+type Dot11Data struct {
+ BaseLayer
+}
+
+func (m *Dot11Data) NextLayerType() gopacket.LayerType {
+ return LayerTypeLLC
+}
+
+func (m *Dot11Data) LayerType() gopacket.LayerType { return LayerTypeDot11Data }
+func (m *Dot11Data) CanDecode() gopacket.LayerClass { return LayerTypeDot11Data }
+func (m *Dot11Data) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ m.Payload = data
+ return nil
+}
+
+func decodeDot11Data(data []byte, p gopacket.PacketBuilder) error {
+ d := &Dot11Data{}
+ return decodingLayerDecoder(d, data, p)
+}
+
+type Dot11DataCFAck struct {
+ Dot11Data
+}
+
+func decodeDot11DataCFAck(data []byte, p gopacket.PacketBuilder) error {
+ d := &Dot11DataCFAck{}
+ return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11DataCFAck) LayerType() gopacket.LayerType { return LayerTypeDot11DataCFAck }
+func (m *Dot11DataCFAck) CanDecode() gopacket.LayerClass { return LayerTypeDot11DataCFAck }
+func (m *Dot11DataCFAck) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ return m.Dot11Data.DecodeFromBytes(data, df)
+}
+
+type Dot11DataCFPoll struct {
+ Dot11Data
+}
+
+func decodeDot11DataCFPoll(data []byte, p gopacket.PacketBuilder) error {
+ d := &Dot11DataCFPoll{}
+ return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11DataCFPoll) LayerType() gopacket.LayerType { return LayerTypeDot11DataCFPoll }
+func (m *Dot11DataCFPoll) CanDecode() gopacket.LayerClass { return LayerTypeDot11DataCFPoll }
+func (m *Dot11DataCFPoll) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ return m.Dot11Data.DecodeFromBytes(data, df)
+}
+
+type Dot11DataCFAckPoll struct {
+ Dot11Data
+}
+
+func decodeDot11DataCFAckPoll(data []byte, p gopacket.PacketBuilder) error {
+ d := &Dot11DataCFAckPoll{}
+ return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11DataCFAckPoll) LayerType() gopacket.LayerType { return LayerTypeDot11DataCFAckPoll }
+func (m *Dot11DataCFAckPoll) CanDecode() gopacket.LayerClass { return LayerTypeDot11DataCFAckPoll }
+func (m *Dot11DataCFAckPoll) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ return m.Dot11Data.DecodeFromBytes(data, df)
+}
+
+type Dot11DataNull struct {
+ Dot11Data
+}
+
+func decodeDot11DataNull(data []byte, p gopacket.PacketBuilder) error {
+ d := &Dot11DataNull{}
+ return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11DataNull) LayerType() gopacket.LayerType { return LayerTypeDot11DataNull }
+func (m *Dot11DataNull) CanDecode() gopacket.LayerClass { return LayerTypeDot11DataNull }
+func (m *Dot11DataNull) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ return m.Dot11Data.DecodeFromBytes(data, df)
+}
+
+type Dot11DataCFAckNoData struct {
+ Dot11Data
+}
+
+func decodeDot11DataCFAckNoData(data []byte, p gopacket.PacketBuilder) error {
+ d := &Dot11DataCFAckNoData{}
+ return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11DataCFAckNoData) LayerType() gopacket.LayerType { return LayerTypeDot11DataCFAckNoData }
+func (m *Dot11DataCFAckNoData) CanDecode() gopacket.LayerClass { return LayerTypeDot11DataCFAckNoData }
+func (m *Dot11DataCFAckNoData) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ return m.Dot11Data.DecodeFromBytes(data, df)
+}
+
+type Dot11DataCFPollNoData struct {
+ Dot11Data
+}
+
+func decodeDot11DataCFPollNoData(data []byte, p gopacket.PacketBuilder) error {
+ d := &Dot11DataCFPollNoData{}
+ return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11DataCFPollNoData) LayerType() gopacket.LayerType { return LayerTypeDot11DataCFPollNoData }
+func (m *Dot11DataCFPollNoData) CanDecode() gopacket.LayerClass { return LayerTypeDot11DataCFPollNoData }
+func (m *Dot11DataCFPollNoData) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ return m.Dot11Data.DecodeFromBytes(data, df)
+}
+
+type Dot11DataCFAckPollNoData struct {
+ Dot11Data
+}
+
+func decodeDot11DataCFAckPollNoData(data []byte, p gopacket.PacketBuilder) error {
+ d := &Dot11DataCFAckPollNoData{}
+ return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11DataCFAckPollNoData) LayerType() gopacket.LayerType {
+ return LayerTypeDot11DataCFAckPollNoData
+}
+func (m *Dot11DataCFAckPollNoData) CanDecode() gopacket.LayerClass {
+ return LayerTypeDot11DataCFAckPollNoData
+}
+func (m *Dot11DataCFAckPollNoData) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ return m.Dot11Data.DecodeFromBytes(data, df)
+}
+
+type Dot11DataQOS struct {
+ Dot11Ctrl
+}
+
+func (m *Dot11DataQOS) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ m.BaseLayer = BaseLayer{Payload: data}
+ return nil
+}
+
+type Dot11DataQOSData struct {
+ Dot11DataQOS
+}
+
+func decodeDot11DataQOSData(data []byte, p gopacket.PacketBuilder) error {
+ d := &Dot11DataQOSData{}
+ return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11DataQOSData) LayerType() gopacket.LayerType { return LayerTypeDot11DataQOSData }
+func (m *Dot11DataQOSData) CanDecode() gopacket.LayerClass { return LayerTypeDot11DataQOSData }
+
+func (m *Dot11DataQOSData) NextLayerType() gopacket.LayerType {
+ return LayerTypeDot11Data
+}
+
+type Dot11DataQOSDataCFAck struct {
+ Dot11DataQOS
+}
+
+func decodeDot11DataQOSDataCFAck(data []byte, p gopacket.PacketBuilder) error {
+ d := &Dot11DataQOSDataCFAck{}
+ return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11DataQOSDataCFAck) LayerType() gopacket.LayerType { return LayerTypeDot11DataQOSDataCFAck }
+func (m *Dot11DataQOSDataCFAck) CanDecode() gopacket.LayerClass { return LayerTypeDot11DataQOSDataCFAck }
+func (m *Dot11DataQOSDataCFAck) NextLayerType() gopacket.LayerType { return LayerTypeDot11DataCFAck }
+
+type Dot11DataQOSDataCFPoll struct {
+ Dot11DataQOS
+}
+
+func decodeDot11DataQOSDataCFPoll(data []byte, p gopacket.PacketBuilder) error {
+ d := &Dot11DataQOSDataCFPoll{}
+ return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11DataQOSDataCFPoll) LayerType() gopacket.LayerType {
+ return LayerTypeDot11DataQOSDataCFPoll
+}
+func (m *Dot11DataQOSDataCFPoll) CanDecode() gopacket.LayerClass {
+ return LayerTypeDot11DataQOSDataCFPoll
+}
+func (m *Dot11DataQOSDataCFPoll) NextLayerType() gopacket.LayerType { return LayerTypeDot11DataCFPoll }
+
+type Dot11DataQOSDataCFAckPoll struct {
+ Dot11DataQOS
+}
+
+func decodeDot11DataQOSDataCFAckPoll(data []byte, p gopacket.PacketBuilder) error {
+ d := &Dot11DataQOSDataCFAckPoll{}
+ return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11DataQOSDataCFAckPoll) LayerType() gopacket.LayerType {
+ return LayerTypeDot11DataQOSDataCFAckPoll
+}
+func (m *Dot11DataQOSDataCFAckPoll) CanDecode() gopacket.LayerClass {
+ return LayerTypeDot11DataQOSDataCFAckPoll
+}
+func (m *Dot11DataQOSDataCFAckPoll) NextLayerType() gopacket.LayerType {
+ return LayerTypeDot11DataCFAckPoll
+}
+
+type Dot11DataQOSNull struct {
+ Dot11DataQOS
+}
+
+func decodeDot11DataQOSNull(data []byte, p gopacket.PacketBuilder) error {
+ d := &Dot11DataQOSNull{}
+ return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11DataQOSNull) LayerType() gopacket.LayerType { return LayerTypeDot11DataQOSNull }
+func (m *Dot11DataQOSNull) CanDecode() gopacket.LayerClass { return LayerTypeDot11DataQOSNull }
+func (m *Dot11DataQOSNull) NextLayerType() gopacket.LayerType { return LayerTypeDot11DataNull }
+
+type Dot11DataQOSCFPollNoData struct {
+ Dot11DataQOS
+}
+
+func decodeDot11DataQOSCFPollNoData(data []byte, p gopacket.PacketBuilder) error {
+ d := &Dot11DataQOSCFPollNoData{}
+ return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11DataQOSCFPollNoData) LayerType() gopacket.LayerType {
+ return LayerTypeDot11DataQOSCFPollNoData
+}
+func (m *Dot11DataQOSCFPollNoData) CanDecode() gopacket.LayerClass {
+ return LayerTypeDot11DataQOSCFPollNoData
+}
+func (m *Dot11DataQOSCFPollNoData) NextLayerType() gopacket.LayerType {
+ return LayerTypeDot11DataCFPollNoData
+}
+
+type Dot11DataQOSCFAckPollNoData struct {
+ Dot11DataQOS
+}
+
+func decodeDot11DataQOSCFAckPollNoData(data []byte, p gopacket.PacketBuilder) error {
+ d := &Dot11DataQOSCFAckPollNoData{}
+ return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11DataQOSCFAckPollNoData) LayerType() gopacket.LayerType {
+ return LayerTypeDot11DataQOSCFAckPollNoData
+}
+func (m *Dot11DataQOSCFAckPollNoData) CanDecode() gopacket.LayerClass {
+ return LayerTypeDot11DataQOSCFAckPollNoData
+}
+func (m *Dot11DataQOSCFAckPollNoData) NextLayerType() gopacket.LayerType {
+ return LayerTypeDot11DataCFAckPollNoData
+}
+
+type Dot11InformationElement struct {
+ BaseLayer
+ ID Dot11InformationElementID
+ Length uint8
+ OUI []byte
+ Info []byte
+}
+
+func (m *Dot11InformationElement) LayerType() gopacket.LayerType {
+ return LayerTypeDot11InformationElement
+}
+func (m *Dot11InformationElement) CanDecode() gopacket.LayerClass {
+ return LayerTypeDot11InformationElement
+}
+
+func (m *Dot11InformationElement) NextLayerType() gopacket.LayerType {
+ return LayerTypeDot11InformationElement
+}
+
+func (m *Dot11InformationElement) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ if len(data) < 2 {
+ df.SetTruncated()
+ return fmt.Errorf("Dot11InformationElement length %v too short, %v required", len(data), 2)
+ }
+ m.ID = Dot11InformationElementID(data[0])
+ m.Length = data[1]
+ offset := int(2)
+
+ if len(data) < offset+int(m.Length) {
+ df.SetTruncated()
+ return fmt.Errorf("Dot11InformationElement length %v too short, %v required", len(data), offset+int(m.Length))
+ }
+ if m.ID == 221 {
+ // Vendor extension
+ m.OUI = data[offset : offset+4]
+ m.Info = data[offset+4 : offset+int(m.Length)]
+ } else {
+ m.Info = data[offset : offset+int(m.Length)]
+ }
+
+ offset += int(m.Length)
+
+ m.BaseLayer = BaseLayer{Contents: data[:offset], Payload: data[offset:]}
+ return nil
+}
+
+func (d *Dot11InformationElement) String() string {
+ if d.ID == 0 {
+ return fmt.Sprintf("802.11 Information Element (ID: %v, Length: %v, SSID: %v)", d.ID, d.Length, string(d.Info))
+ } else if d.ID == 1 {
+ rates := ""
+ for i := 0; i < len(d.Info); i++ {
+ if d.Info[i]&0x80 == 0 {
+ rates += fmt.Sprintf("%.1f ", float32(d.Info[i])*0.5)
+ } else {
+ rates += fmt.Sprintf("%.1f* ", float32(d.Info[i]&0x7F)*0.5)
+ }
+ }
+ return fmt.Sprintf("802.11 Information Element (ID: %v, Length: %v, Rates: %s Mbit)", d.ID, d.Length, rates)
+ } else if d.ID == 221 {
+ return fmt.Sprintf("802.11 Information Element (ID: %v, Length: %v, OUI: %X, Info: %X)", d.ID, d.Length, d.OUI, d.Info)
+ } else {
+ return fmt.Sprintf("802.11 Information Element (ID: %v, Length: %v, Info: %X)", d.ID, d.Length, d.Info)
+ }
+}
+
+func (m Dot11InformationElement) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ length := len(m.Info) + len(m.OUI)
+ if buf, err := b.PrependBytes(2 + length); err != nil {
+ return err
+ } else {
+ buf[0] = uint8(m.ID)
+ buf[1] = uint8(length)
+ copy(buf[2:], m.OUI)
+ copy(buf[2+len(m.OUI):], m.Info)
+ }
+ return nil
+}
+
+func decodeDot11InformationElement(data []byte, p gopacket.PacketBuilder) error {
+ d := &Dot11InformationElement{}
+ return decodingLayerDecoder(d, data, p)
+}
+
+type Dot11CtrlCTS struct {
+ Dot11Ctrl
+}
+
+func decodeDot11CtrlCTS(data []byte, p gopacket.PacketBuilder) error {
+ d := &Dot11CtrlCTS{}
+ return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11CtrlCTS) LayerType() gopacket.LayerType {
+ return LayerTypeDot11CtrlCTS
+}
+func (m *Dot11CtrlCTS) CanDecode() gopacket.LayerClass {
+ return LayerTypeDot11CtrlCTS
+}
+func (m *Dot11CtrlCTS) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ return m.Dot11Ctrl.DecodeFromBytes(data, df)
+}
+
+type Dot11CtrlRTS struct {
+ Dot11Ctrl
+}
+
+func decodeDot11CtrlRTS(data []byte, p gopacket.PacketBuilder) error {
+ d := &Dot11CtrlRTS{}
+ return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11CtrlRTS) LayerType() gopacket.LayerType {
+ return LayerTypeDot11CtrlRTS
+}
+func (m *Dot11CtrlRTS) CanDecode() gopacket.LayerClass {
+ return LayerTypeDot11CtrlRTS
+}
+func (m *Dot11CtrlRTS) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ return m.Dot11Ctrl.DecodeFromBytes(data, df)
+}
+
+type Dot11CtrlBlockAckReq struct {
+ Dot11Ctrl
+}
+
+func decodeDot11CtrlBlockAckReq(data []byte, p gopacket.PacketBuilder) error {
+ d := &Dot11CtrlBlockAckReq{}
+ return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11CtrlBlockAckReq) LayerType() gopacket.LayerType {
+ return LayerTypeDot11CtrlBlockAckReq
+}
+func (m *Dot11CtrlBlockAckReq) CanDecode() gopacket.LayerClass {
+ return LayerTypeDot11CtrlBlockAckReq
+}
+func (m *Dot11CtrlBlockAckReq) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ return m.Dot11Ctrl.DecodeFromBytes(data, df)
+}
+
+type Dot11CtrlBlockAck struct {
+ Dot11Ctrl
+}
+
+func decodeDot11CtrlBlockAck(data []byte, p gopacket.PacketBuilder) error {
+ d := &Dot11CtrlBlockAck{}
+ return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11CtrlBlockAck) LayerType() gopacket.LayerType { return LayerTypeDot11CtrlBlockAck }
+func (m *Dot11CtrlBlockAck) CanDecode() gopacket.LayerClass { return LayerTypeDot11CtrlBlockAck }
+func (m *Dot11CtrlBlockAck) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ return m.Dot11Ctrl.DecodeFromBytes(data, df)
+}
+
+type Dot11CtrlPowersavePoll struct {
+ Dot11Ctrl
+}
+
+func decodeDot11CtrlPowersavePoll(data []byte, p gopacket.PacketBuilder) error {
+ d := &Dot11CtrlPowersavePoll{}
+ return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11CtrlPowersavePoll) LayerType() gopacket.LayerType {
+ return LayerTypeDot11CtrlPowersavePoll
+}
+func (m *Dot11CtrlPowersavePoll) CanDecode() gopacket.LayerClass {
+ return LayerTypeDot11CtrlPowersavePoll
+}
+func (m *Dot11CtrlPowersavePoll) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ return m.Dot11Ctrl.DecodeFromBytes(data, df)
+}
+
+type Dot11CtrlAck struct {
+ Dot11Ctrl
+}
+
+func decodeDot11CtrlAck(data []byte, p gopacket.PacketBuilder) error {
+ d := &Dot11CtrlAck{}
+ return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11CtrlAck) LayerType() gopacket.LayerType { return LayerTypeDot11CtrlAck }
+func (m *Dot11CtrlAck) CanDecode() gopacket.LayerClass { return LayerTypeDot11CtrlAck }
+func (m *Dot11CtrlAck) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ return m.Dot11Ctrl.DecodeFromBytes(data, df)
+}
+
+type Dot11CtrlCFEnd struct {
+ Dot11Ctrl
+}
+
+func decodeDot11CtrlCFEnd(data []byte, p gopacket.PacketBuilder) error {
+ d := &Dot11CtrlCFEnd{}
+ return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11CtrlCFEnd) LayerType() gopacket.LayerType {
+ return LayerTypeDot11CtrlCFEnd
+}
+func (m *Dot11CtrlCFEnd) CanDecode() gopacket.LayerClass {
+ return LayerTypeDot11CtrlCFEnd
+}
+func (m *Dot11CtrlCFEnd) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ return m.Dot11Ctrl.DecodeFromBytes(data, df)
+}
+
+type Dot11CtrlCFEndAck struct {
+ Dot11Ctrl
+}
+
+func decodeDot11CtrlCFEndAck(data []byte, p gopacket.PacketBuilder) error {
+ d := &Dot11CtrlCFEndAck{}
+ return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11CtrlCFEndAck) LayerType() gopacket.LayerType {
+ return LayerTypeDot11CtrlCFEndAck
+}
+func (m *Dot11CtrlCFEndAck) CanDecode() gopacket.LayerClass {
+ return LayerTypeDot11CtrlCFEndAck
+}
+func (m *Dot11CtrlCFEndAck) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ return m.Dot11Ctrl.DecodeFromBytes(data, df)
+}
+
+type Dot11MgmtAssociationReq struct {
+ Dot11Mgmt
+ CapabilityInfo uint16
+ ListenInterval uint16
+}
+
+func decodeDot11MgmtAssociationReq(data []byte, p gopacket.PacketBuilder) error {
+ d := &Dot11MgmtAssociationReq{}
+ return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11MgmtAssociationReq) LayerType() gopacket.LayerType {
+ return LayerTypeDot11MgmtAssociationReq
+}
+func (m *Dot11MgmtAssociationReq) CanDecode() gopacket.LayerClass {
+ return LayerTypeDot11MgmtAssociationReq
+}
+func (m *Dot11MgmtAssociationReq) NextLayerType() gopacket.LayerType {
+ return LayerTypeDot11InformationElement
+}
+func (m *Dot11MgmtAssociationReq) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ if len(data) < 4 {
+ df.SetTruncated()
+ return fmt.Errorf("Dot11MgmtAssociationReq length %v too short, %v required", len(data), 4)
+ }
+ m.CapabilityInfo = binary.LittleEndian.Uint16(data[0:2])
+ m.ListenInterval = binary.LittleEndian.Uint16(data[2:4])
+ m.Payload = data[4:]
+ return m.Dot11Mgmt.DecodeFromBytes(data, df)
+}
+
+func (m Dot11MgmtAssociationReq) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ buf, err := b.PrependBytes(4)
+
+ if err != nil {
+ return err
+ }
+
+ binary.LittleEndian.PutUint16(buf[0:2], m.CapabilityInfo)
+ binary.LittleEndian.PutUint16(buf[2:4], m.ListenInterval)
+
+ return nil
+}
+
+type Dot11MgmtAssociationResp struct {
+ Dot11Mgmt
+ CapabilityInfo uint16
+ Status Dot11Status
+ AID uint16
+}
+
+func decodeDot11MgmtAssociationResp(data []byte, p gopacket.PacketBuilder) error {
+ d := &Dot11MgmtAssociationResp{}
+ return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11MgmtAssociationResp) CanDecode() gopacket.LayerClass {
+ return LayerTypeDot11MgmtAssociationResp
+}
+func (m *Dot11MgmtAssociationResp) LayerType() gopacket.LayerType {
+ return LayerTypeDot11MgmtAssociationResp
+}
+func (m *Dot11MgmtAssociationResp) NextLayerType() gopacket.LayerType {
+ return LayerTypeDot11InformationElement
+}
+func (m *Dot11MgmtAssociationResp) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ if len(data) < 6 {
+ df.SetTruncated()
+ return fmt.Errorf("Dot11MgmtAssociationResp length %v too short, %v required", len(data), 6)
+ }
+ m.CapabilityInfo = binary.LittleEndian.Uint16(data[0:2])
+ m.Status = Dot11Status(binary.LittleEndian.Uint16(data[2:4]))
+ m.AID = binary.LittleEndian.Uint16(data[4:6])
+ m.Payload = data[6:]
+ return m.Dot11Mgmt.DecodeFromBytes(data, df)
+}
+
+func (m Dot11MgmtAssociationResp) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ buf, err := b.PrependBytes(6)
+
+ if err != nil {
+ return err
+ }
+
+ binary.LittleEndian.PutUint16(buf[0:2], m.CapabilityInfo)
+ binary.LittleEndian.PutUint16(buf[2:4], uint16(m.Status))
+ binary.LittleEndian.PutUint16(buf[4:6], m.AID)
+
+ return nil
+}
+
+type Dot11MgmtReassociationReq struct {
+ Dot11Mgmt
+ CapabilityInfo uint16
+ ListenInterval uint16
+ CurrentApAddress net.HardwareAddr
+}
+
+func decodeDot11MgmtReassociationReq(data []byte, p gopacket.PacketBuilder) error {
+ d := &Dot11MgmtReassociationReq{}
+ return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11MgmtReassociationReq) LayerType() gopacket.LayerType {
+ return LayerTypeDot11MgmtReassociationReq
+}
+func (m *Dot11MgmtReassociationReq) CanDecode() gopacket.LayerClass {
+ return LayerTypeDot11MgmtReassociationReq
+}
+func (m *Dot11MgmtReassociationReq) NextLayerType() gopacket.LayerType {
+ return LayerTypeDot11InformationElement
+}
+func (m *Dot11MgmtReassociationReq) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ if len(data) < 10 {
+ df.SetTruncated()
+ return fmt.Errorf("Dot11MgmtReassociationReq length %v too short, %v required", len(data), 10)
+ }
+ m.CapabilityInfo = binary.LittleEndian.Uint16(data[0:2])
+ m.ListenInterval = binary.LittleEndian.Uint16(data[2:4])
+ m.CurrentApAddress = net.HardwareAddr(data[4:10])
+ m.Payload = data[10:]
+ return m.Dot11Mgmt.DecodeFromBytes(data, df)
+}
+
+func (m Dot11MgmtReassociationReq) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ buf, err := b.PrependBytes(10)
+
+ if err != nil {
+ return err
+ }
+
+ binary.LittleEndian.PutUint16(buf[0:2], m.CapabilityInfo)
+ binary.LittleEndian.PutUint16(buf[2:4], m.ListenInterval)
+
+ copy(buf[4:10], m.CurrentApAddress)
+
+ return nil
+}
+
+type Dot11MgmtReassociationResp struct {
+ Dot11Mgmt
+}
+
+func decodeDot11MgmtReassociationResp(data []byte, p gopacket.PacketBuilder) error {
+ d := &Dot11MgmtReassociationResp{}
+ return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11MgmtReassociationResp) LayerType() gopacket.LayerType {
+ return LayerTypeDot11MgmtReassociationResp
+}
+func (m *Dot11MgmtReassociationResp) CanDecode() gopacket.LayerClass {
+ return LayerTypeDot11MgmtReassociationResp
+}
+func (m *Dot11MgmtReassociationResp) NextLayerType() gopacket.LayerType {
+ return LayerTypeDot11InformationElement
+}
+
+type Dot11MgmtProbeReq struct {
+ Dot11Mgmt
+}
+
+func decodeDot11MgmtProbeReq(data []byte, p gopacket.PacketBuilder) error {
+ d := &Dot11MgmtProbeReq{}
+ return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11MgmtProbeReq) LayerType() gopacket.LayerType { return LayerTypeDot11MgmtProbeReq }
+func (m *Dot11MgmtProbeReq) CanDecode() gopacket.LayerClass { return LayerTypeDot11MgmtProbeReq }
+func (m *Dot11MgmtProbeReq) NextLayerType() gopacket.LayerType {
+ return LayerTypeDot11InformationElement
+}
+
+type Dot11MgmtProbeResp struct {
+ Dot11Mgmt
+ Timestamp uint64
+ Interval uint16
+ Flags uint16
+}
+
+func decodeDot11MgmtProbeResp(data []byte, p gopacket.PacketBuilder) error {
+ d := &Dot11MgmtProbeResp{}
+ return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11MgmtProbeResp) LayerType() gopacket.LayerType { return LayerTypeDot11MgmtProbeResp }
+func (m *Dot11MgmtProbeResp) CanDecode() gopacket.LayerClass { return LayerTypeDot11MgmtProbeResp }
+func (m *Dot11MgmtProbeResp) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ if len(data) < 12 {
+ df.SetTruncated()
+
+ return fmt.Errorf("Dot11MgmtProbeResp length %v too short, %v required", len(data), 12)
+ }
+
+ m.Timestamp = binary.LittleEndian.Uint64(data[0:8])
+ m.Interval = binary.LittleEndian.Uint16(data[8:10])
+ m.Flags = binary.LittleEndian.Uint16(data[10:12])
+ m.Payload = data[12:]
+
+ return m.Dot11Mgmt.DecodeFromBytes(data, df)
+}
+
+func (m *Dot11MgmtProbeResp) NextLayerType() gopacket.LayerType {
+ return LayerTypeDot11InformationElement
+}
+
+func (m Dot11MgmtProbeResp) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ buf, err := b.PrependBytes(12)
+
+ if err != nil {
+ return err
+ }
+
+ binary.LittleEndian.PutUint64(buf[0:8], m.Timestamp)
+ binary.LittleEndian.PutUint16(buf[8:10], m.Interval)
+ binary.LittleEndian.PutUint16(buf[10:12], m.Flags)
+
+ return nil
+}
+
+type Dot11MgmtMeasurementPilot struct {
+ Dot11Mgmt
+}
+
+func decodeDot11MgmtMeasurementPilot(data []byte, p gopacket.PacketBuilder) error {
+ d := &Dot11MgmtMeasurementPilot{}
+ return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11MgmtMeasurementPilot) LayerType() gopacket.LayerType {
+ return LayerTypeDot11MgmtMeasurementPilot
+}
+func (m *Dot11MgmtMeasurementPilot) CanDecode() gopacket.LayerClass {
+ return LayerTypeDot11MgmtMeasurementPilot
+}
+
+type Dot11MgmtBeacon struct {
+ Dot11Mgmt
+ Timestamp uint64
+ Interval uint16
+ Flags uint16
+}
+
+func decodeDot11MgmtBeacon(data []byte, p gopacket.PacketBuilder) error {
+ d := &Dot11MgmtBeacon{}
+ return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11MgmtBeacon) LayerType() gopacket.LayerType { return LayerTypeDot11MgmtBeacon }
+func (m *Dot11MgmtBeacon) CanDecode() gopacket.LayerClass { return LayerTypeDot11MgmtBeacon }
+func (m *Dot11MgmtBeacon) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ if len(data) < 12 {
+ df.SetTruncated()
+ return fmt.Errorf("Dot11MgmtBeacon length %v too short, %v required", len(data), 12)
+ }
+ m.Timestamp = binary.LittleEndian.Uint64(data[0:8])
+ m.Interval = binary.LittleEndian.Uint16(data[8:10])
+ m.Flags = binary.LittleEndian.Uint16(data[10:12])
+ m.Payload = data[12:]
+ return m.Dot11Mgmt.DecodeFromBytes(data, df)
+}
+
+func (m *Dot11MgmtBeacon) NextLayerType() gopacket.LayerType { return LayerTypeDot11InformationElement }
+
+func (m Dot11MgmtBeacon) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ buf, err := b.PrependBytes(12)
+
+ if err != nil {
+ return err
+ }
+
+ binary.LittleEndian.PutUint64(buf[0:8], m.Timestamp)
+ binary.LittleEndian.PutUint16(buf[8:10], m.Interval)
+ binary.LittleEndian.PutUint16(buf[10:12], m.Flags)
+
+ return nil
+}
+
+type Dot11MgmtATIM struct {
+ Dot11Mgmt
+}
+
+func decodeDot11MgmtATIM(data []byte, p gopacket.PacketBuilder) error {
+ d := &Dot11MgmtATIM{}
+ return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11MgmtATIM) LayerType() gopacket.LayerType { return LayerTypeDot11MgmtATIM }
+func (m *Dot11MgmtATIM) CanDecode() gopacket.LayerClass { return LayerTypeDot11MgmtATIM }
+
+type Dot11MgmtDisassociation struct {
+ Dot11Mgmt
+ Reason Dot11Reason
+}
+
+func decodeDot11MgmtDisassociation(data []byte, p gopacket.PacketBuilder) error {
+ d := &Dot11MgmtDisassociation{}
+ return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11MgmtDisassociation) LayerType() gopacket.LayerType {
+ return LayerTypeDot11MgmtDisassociation
+}
+func (m *Dot11MgmtDisassociation) CanDecode() gopacket.LayerClass {
+ return LayerTypeDot11MgmtDisassociation
+}
+func (m *Dot11MgmtDisassociation) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ if len(data) < 2 {
+ df.SetTruncated()
+ return fmt.Errorf("Dot11MgmtDisassociation length %v too short, %v required", len(data), 2)
+ }
+ m.Reason = Dot11Reason(binary.LittleEndian.Uint16(data[0:2]))
+ return m.Dot11Mgmt.DecodeFromBytes(data, df)
+}
+
+func (m Dot11MgmtDisassociation) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ buf, err := b.PrependBytes(2)
+
+ if err != nil {
+ return err
+ }
+
+ binary.LittleEndian.PutUint16(buf[0:2], uint16(m.Reason))
+
+ return nil
+}
+
+type Dot11MgmtAuthentication struct {
+ Dot11Mgmt
+ Algorithm Dot11Algorithm
+ Sequence uint16
+ Status Dot11Status
+}
+
+func decodeDot11MgmtAuthentication(data []byte, p gopacket.PacketBuilder) error {
+ d := &Dot11MgmtAuthentication{}
+ return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11MgmtAuthentication) LayerType() gopacket.LayerType {
+ return LayerTypeDot11MgmtAuthentication
+}
+func (m *Dot11MgmtAuthentication) CanDecode() gopacket.LayerClass {
+ return LayerTypeDot11MgmtAuthentication
+}
+func (m *Dot11MgmtAuthentication) NextLayerType() gopacket.LayerType {
+ return LayerTypeDot11InformationElement
+}
+func (m *Dot11MgmtAuthentication) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ if len(data) < 6 {
+ df.SetTruncated()
+ return fmt.Errorf("Dot11MgmtAuthentication length %v too short, %v required", len(data), 6)
+ }
+ m.Algorithm = Dot11Algorithm(binary.LittleEndian.Uint16(data[0:2]))
+ m.Sequence = binary.LittleEndian.Uint16(data[2:4])
+ m.Status = Dot11Status(binary.LittleEndian.Uint16(data[4:6]))
+ m.Payload = data[6:]
+ return m.Dot11Mgmt.DecodeFromBytes(data, df)
+}
+
+func (m Dot11MgmtAuthentication) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ buf, err := b.PrependBytes(6)
+
+ if err != nil {
+ return err
+ }
+
+ binary.LittleEndian.PutUint16(buf[0:2], uint16(m.Algorithm))
+ binary.LittleEndian.PutUint16(buf[2:4], m.Sequence)
+ binary.LittleEndian.PutUint16(buf[4:6], uint16(m.Status))
+
+ return nil
+}
+
+type Dot11MgmtDeauthentication struct {
+ Dot11Mgmt
+ Reason Dot11Reason
+}
+
+func decodeDot11MgmtDeauthentication(data []byte, p gopacket.PacketBuilder) error {
+ d := &Dot11MgmtDeauthentication{}
+ return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11MgmtDeauthentication) LayerType() gopacket.LayerType {
+ return LayerTypeDot11MgmtDeauthentication
+}
+func (m *Dot11MgmtDeauthentication) CanDecode() gopacket.LayerClass {
+ return LayerTypeDot11MgmtDeauthentication
+}
+func (m *Dot11MgmtDeauthentication) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ if len(data) < 2 {
+ df.SetTruncated()
+ return fmt.Errorf("Dot11MgmtDeauthentication length %v too short, %v required", len(data), 2)
+ }
+ m.Reason = Dot11Reason(binary.LittleEndian.Uint16(data[0:2]))
+ return m.Dot11Mgmt.DecodeFromBytes(data, df)
+}
+
+func (m Dot11MgmtDeauthentication) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ buf, err := b.PrependBytes(2)
+
+ if err != nil {
+ return err
+ }
+
+ binary.LittleEndian.PutUint16(buf[0:2], uint16(m.Reason))
+
+ return nil
+}
+
+type Dot11MgmtAction struct {
+ Dot11Mgmt
+}
+
+func decodeDot11MgmtAction(data []byte, p gopacket.PacketBuilder) error {
+ d := &Dot11MgmtAction{}
+ return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11MgmtAction) LayerType() gopacket.LayerType { return LayerTypeDot11MgmtAction }
+func (m *Dot11MgmtAction) CanDecode() gopacket.LayerClass { return LayerTypeDot11MgmtAction }
+
+type Dot11MgmtActionNoAck struct {
+ Dot11Mgmt
+}
+
+func decodeDot11MgmtActionNoAck(data []byte, p gopacket.PacketBuilder) error {
+ d := &Dot11MgmtActionNoAck{}
+ return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11MgmtActionNoAck) LayerType() gopacket.LayerType { return LayerTypeDot11MgmtActionNoAck }
+func (m *Dot11MgmtActionNoAck) CanDecode() gopacket.LayerClass { return LayerTypeDot11MgmtActionNoAck }
+
+type Dot11MgmtArubaWLAN struct {
+ Dot11Mgmt
+}
+
+func decodeDot11MgmtArubaWLAN(data []byte, p gopacket.PacketBuilder) error {
+ d := &Dot11MgmtArubaWLAN{}
+ return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11MgmtArubaWLAN) LayerType() gopacket.LayerType { return LayerTypeDot11MgmtArubaWLAN }
+func (m *Dot11MgmtArubaWLAN) CanDecode() gopacket.LayerClass { return LayerTypeDot11MgmtArubaWLAN }
diff --git a/vendor/github.com/google/gopacket/layers/dot1q.go b/vendor/github.com/google/gopacket/layers/dot1q.go
new file mode 100644
index 0000000..47f93d7
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/dot1q.go
@@ -0,0 +1,71 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+// Copyright 2009-2011 Andreas Krennmair. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+ "encoding/binary"
+ "fmt"
+ "github.com/google/gopacket"
+)
+
+// Dot1Q is the packet layer for 802.1Q VLAN headers.
+type Dot1Q struct {
+ BaseLayer
+ Priority uint8
+ DropEligible bool
+ VLANIdentifier uint16
+ Type EthernetType
+}
+
+// LayerType returns gopacket.LayerTypeDot1Q
+func (d *Dot1Q) LayerType() gopacket.LayerType { return LayerTypeDot1Q }
+
+// DecodeFromBytes decodes the given bytes into this layer.
+func (d *Dot1Q) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ d.Priority = (data[0] & 0xE0) >> 5
+ d.DropEligible = data[0]&0x10 != 0
+ d.VLANIdentifier = binary.BigEndian.Uint16(data[:2]) & 0x0FFF
+ d.Type = EthernetType(binary.BigEndian.Uint16(data[2:4]))
+ d.BaseLayer = BaseLayer{Contents: data[:4], Payload: data[4:]}
+ return nil
+}
+
+// CanDecode returns the set of layer types that this DecodingLayer can decode.
+func (d *Dot1Q) CanDecode() gopacket.LayerClass {
+ return LayerTypeDot1Q
+}
+
+// NextLayerType returns the layer type contained by this DecodingLayer.
+func (d *Dot1Q) NextLayerType() gopacket.LayerType {
+ return d.Type.LayerType()
+}
+
+func decodeDot1Q(data []byte, p gopacket.PacketBuilder) error {
+ d := &Dot1Q{}
+ return decodingLayerDecoder(d, data, p)
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (d *Dot1Q) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ bytes, err := b.PrependBytes(4)
+ if err != nil {
+ return err
+ }
+ if d.VLANIdentifier > 0xFFF {
+ return fmt.Errorf("vlan identifier %v is too high", d.VLANIdentifier)
+ }
+ firstBytes := uint16(d.Priority)<<13 | d.VLANIdentifier
+ if d.DropEligible {
+ firstBytes |= 0x1000
+ }
+ binary.BigEndian.PutUint16(bytes, firstBytes)
+ binary.BigEndian.PutUint16(bytes[2:], uint16(d.Type))
+ return nil
+}
diff --git a/vendor/github.com/google/gopacket/layers/eap.go b/vendor/github.com/google/gopacket/layers/eap.go
new file mode 100644
index 0000000..250f857
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/eap.go
@@ -0,0 +1,106 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+ "encoding/binary"
+ "fmt"
+ "github.com/google/gopacket"
+)
+
+type EAPCode uint8
+type EAPType uint8
+
+const (
+ EAPCodeRequest EAPCode = 1
+ EAPCodeResponse EAPCode = 2
+ EAPCodeSuccess EAPCode = 3
+ EAPCodeFailure EAPCode = 4
+
+ // EAPTypeNone means that this EAP layer has no Type or TypeData.
+ // Success and Failure EAPs will have this set.
+ EAPTypeNone EAPType = 0
+
+ EAPTypeIdentity EAPType = 1
+ EAPTypeNotification EAPType = 2
+ EAPTypeNACK EAPType = 3
+ EAPTypeOTP EAPType = 4
+ EAPTypeTokenCard EAPType = 5
+)
+
+// EAP defines an Extensible Authentication Protocol (rfc 3748) layer.
+type EAP struct {
+ BaseLayer
+ Code EAPCode
+ Id uint8
+ Length uint16
+ Type EAPType
+ TypeData []byte
+}
+
+// LayerType returns LayerTypeEAP.
+func (e *EAP) LayerType() gopacket.LayerType { return LayerTypeEAP }
+
+// DecodeFromBytes decodes the given bytes into this layer.
+func (e *EAP) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ e.Code = EAPCode(data[0])
+ e.Id = data[1]
+ e.Length = binary.BigEndian.Uint16(data[2:4])
+ switch {
+ case e.Length > 4:
+ e.Type = EAPType(data[4])
+ e.TypeData = data[5:]
+ case e.Length == 4:
+ e.Type = 0
+ e.TypeData = nil
+ default:
+ return fmt.Errorf("invalid EAP length %d", e.Length)
+ }
+ e.BaseLayer.Contents = data[:e.Length]
+ e.BaseLayer.Payload = data[e.Length:] // Should be 0 bytes
+ return nil
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (e *EAP) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ if opts.FixLengths {
+ e.Length = uint16(len(e.TypeData) + 1)
+ }
+ size := len(e.TypeData) + 4
+ if size > 4 {
+ size++
+ }
+ bytes, err := b.PrependBytes(size)
+ if err != nil {
+ return err
+ }
+ bytes[0] = byte(e.Code)
+ bytes[1] = e.Id
+ binary.BigEndian.PutUint16(bytes[2:], e.Length)
+ if size > 4 {
+ bytes[4] = byte(e.Type)
+ copy(bytes[5:], e.TypeData)
+ }
+ return nil
+}
+
+// CanDecode returns the set of layer types that this DecodingLayer can decode.
+func (e *EAP) CanDecode() gopacket.LayerClass {
+ return LayerTypeEAP
+}
+
+// NextLayerType returns the layer type contained by this DecodingLayer.
+func (e *EAP) NextLayerType() gopacket.LayerType {
+ return gopacket.LayerTypeZero
+}
+
+func decodeEAP(data []byte, p gopacket.PacketBuilder) error {
+ e := &EAP{}
+ return decodingLayerDecoder(e, data, p)
+}
diff --git a/vendor/github.com/google/gopacket/layers/eapol.go b/vendor/github.com/google/gopacket/layers/eapol.go
new file mode 100644
index 0000000..12aa5ba
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/eapol.go
@@ -0,0 +1,298 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+ "encoding/binary"
+ "fmt"
+ "github.com/google/gopacket"
+)
+
+// EAPOL defines an EAP over LAN (802.1x) layer.
+type EAPOL struct {
+ BaseLayer
+ Version uint8
+ Type EAPOLType
+ Length uint16
+}
+
+// LayerType returns LayerTypeEAPOL.
+func (e *EAPOL) LayerType() gopacket.LayerType { return LayerTypeEAPOL }
+
+// DecodeFromBytes decodes the given bytes into this layer.
+func (e *EAPOL) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ e.Version = data[0]
+ e.Type = EAPOLType(data[1])
+ e.Length = binary.BigEndian.Uint16(data[2:4])
+ e.BaseLayer = BaseLayer{data[:4], data[4:]}
+ return nil
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer
+func (e *EAPOL) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ bytes, _ := b.PrependBytes(4)
+ bytes[0] = e.Version
+ bytes[1] = byte(e.Type)
+ binary.BigEndian.PutUint16(bytes[2:], e.Length)
+ return nil
+}
+
+// CanDecode returns the set of layer types that this DecodingLayer can decode.
+func (e *EAPOL) CanDecode() gopacket.LayerClass {
+ return LayerTypeEAPOL
+}
+
+// NextLayerType returns the layer type contained by this DecodingLayer.
+func (e *EAPOL) NextLayerType() gopacket.LayerType {
+ return e.Type.LayerType()
+}
+
+func decodeEAPOL(data []byte, p gopacket.PacketBuilder) error {
+ e := &EAPOL{}
+ return decodingLayerDecoder(e, data, p)
+}
+
+// EAPOLKeyDescriptorType is an enumeration of key descriptor types
+// as specified by 802.1x in the EAPOL-Key frame
+type EAPOLKeyDescriptorType uint8
+
+// Enumeration of EAPOLKeyDescriptorType
+const (
+ EAPOLKeyDescriptorTypeRC4 EAPOLKeyDescriptorType = 1
+ EAPOLKeyDescriptorTypeDot11 EAPOLKeyDescriptorType = 2
+ EAPOLKeyDescriptorTypeWPA EAPOLKeyDescriptorType = 254
+)
+
+func (kdt EAPOLKeyDescriptorType) String() string {
+ switch kdt {
+ case EAPOLKeyDescriptorTypeRC4:
+ return "RC4"
+ case EAPOLKeyDescriptorTypeDot11:
+ return "802.11"
+ case EAPOLKeyDescriptorTypeWPA:
+ return "WPA"
+ default:
+ return fmt.Sprintf("unknown descriptor type %d", kdt)
+ }
+}
+
+// EAPOLKeyDescriptorVersion is an enumeration of versions specifying the
+// encryption algorithm for the key data and the authentication for the
+// message integrity code (MIC)
+type EAPOLKeyDescriptorVersion uint8
+
+// Enumeration of EAPOLKeyDescriptorVersion
+const (
+ EAPOLKeyDescriptorVersionOther EAPOLKeyDescriptorVersion = 0
+ EAPOLKeyDescriptorVersionRC4HMACMD5 EAPOLKeyDescriptorVersion = 1
+ EAPOLKeyDescriptorVersionAESHMACSHA1 EAPOLKeyDescriptorVersion = 2
+ EAPOLKeyDescriptorVersionAES128CMAC EAPOLKeyDescriptorVersion = 3
+)
+
+func (v EAPOLKeyDescriptorVersion) String() string {
+ switch v {
+ case EAPOLKeyDescriptorVersionOther:
+ return "Other"
+ case EAPOLKeyDescriptorVersionRC4HMACMD5:
+ return "RC4-HMAC-MD5"
+ case EAPOLKeyDescriptorVersionAESHMACSHA1:
+ return "AES-HMAC-SHA1-128"
+ case EAPOLKeyDescriptorVersionAES128CMAC:
+ return "AES-128-CMAC"
+ default:
+ return fmt.Sprintf("unknown version %d", v)
+ }
+}
+
+// EAPOLKeyType is an enumeration of key derivation types describing
+// the purpose of the keys being derived.
+type EAPOLKeyType uint8
+
+// Enumeration of EAPOLKeyType
+const (
+ EAPOLKeyTypeGroupSMK EAPOLKeyType = 0
+ EAPOLKeyTypePairwise EAPOLKeyType = 1
+)
+
+func (kt EAPOLKeyType) String() string {
+ switch kt {
+ case EAPOLKeyTypeGroupSMK:
+ return "Group/SMK"
+ case EAPOLKeyTypePairwise:
+ return "Pairwise"
+ default:
+ return fmt.Sprintf("unknown key type %d", kt)
+ }
+}
+
+// EAPOLKey defines an EAPOL-Key frame for 802.1x authentication
+type EAPOLKey struct {
+ BaseLayer
+ KeyDescriptorType EAPOLKeyDescriptorType
+ KeyDescriptorVersion EAPOLKeyDescriptorVersion
+ KeyType EAPOLKeyType
+ KeyIndex uint8
+ Install bool
+ KeyACK bool
+ KeyMIC bool
+ Secure bool
+ MICError bool
+ Request bool
+ HasEncryptedKeyData bool
+ SMKMessage bool
+ KeyLength uint16
+ ReplayCounter uint64
+ Nonce []byte
+ IV []byte
+ RSC uint64
+ ID uint64
+ MIC []byte
+ KeyDataLength uint16
+ EncryptedKeyData []byte
+}
+
+// LayerType returns LayerTypeEAPOLKey.
+func (ek *EAPOLKey) LayerType() gopacket.LayerType {
+ return LayerTypeEAPOLKey
+}
+
+// CanDecode returns the set of layer types that this DecodingLayer can decode.
+func (ek *EAPOLKey) CanDecode() gopacket.LayerType {
+ return LayerTypeEAPOLKey
+}
+
+// NextLayerType returns layers.LayerTypeDot11InformationElement if the key
+// data exists and is unencrypted, otherwise it does not expect a next layer.
+func (ek *EAPOLKey) NextLayerType() gopacket.LayerType {
+ if !ek.HasEncryptedKeyData && ek.KeyDataLength > 0 {
+ return LayerTypeDot11InformationElement
+ }
+ return gopacket.LayerTypePayload
+}
+
+const eapolKeyFrameLen = 95
+
+// DecodeFromBytes decodes the given bytes into this layer.
+func (ek *EAPOLKey) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ if len(data) < eapolKeyFrameLen {
+ df.SetTruncated()
+ return fmt.Errorf("EAPOLKey length %v too short, %v required",
+ len(data), eapolKeyFrameLen)
+ }
+
+ ek.KeyDescriptorType = EAPOLKeyDescriptorType(data[0])
+
+ info := binary.BigEndian.Uint16(data[1:3])
+ ek.KeyDescriptorVersion = EAPOLKeyDescriptorVersion(info & 0x0007)
+ ek.KeyType = EAPOLKeyType((info & 0x0008) >> 3)
+ ek.KeyIndex = uint8((info & 0x0030) >> 4)
+ ek.Install = (info & 0x0040) != 0
+ ek.KeyACK = (info & 0x0080) != 0
+ ek.KeyMIC = (info & 0x0100) != 0
+ ek.Secure = (info & 0x0200) != 0
+ ek.MICError = (info & 0x0400) != 0
+ ek.Request = (info & 0x0800) != 0
+ ek.HasEncryptedKeyData = (info & 0x1000) != 0
+ ek.SMKMessage = (info & 0x2000) != 0
+
+ ek.KeyLength = binary.BigEndian.Uint16(data[3:5])
+ ek.ReplayCounter = binary.BigEndian.Uint64(data[5:13])
+
+ ek.Nonce = data[13:45]
+ ek.IV = data[45:61]
+ ek.RSC = binary.BigEndian.Uint64(data[61:69])
+ ek.ID = binary.BigEndian.Uint64(data[69:77])
+ ek.MIC = data[77:93]
+
+ ek.KeyDataLength = binary.BigEndian.Uint16(data[93:95])
+
+ totalLength := eapolKeyFrameLen + int(ek.KeyDataLength)
+ if len(data) < totalLength {
+ df.SetTruncated()
+ return fmt.Errorf("EAPOLKey data length %d too short, %d required",
+ len(data)-eapolKeyFrameLen, ek.KeyDataLength)
+ }
+
+ if ek.HasEncryptedKeyData {
+ ek.EncryptedKeyData = data[eapolKeyFrameLen:totalLength]
+ ek.BaseLayer = BaseLayer{
+ Contents: data[:totalLength],
+ Payload: data[totalLength:],
+ }
+ } else {
+ ek.BaseLayer = BaseLayer{
+ Contents: data[:eapolKeyFrameLen],
+ Payload: data[eapolKeyFrameLen:],
+ }
+ }
+
+ return nil
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (ek *EAPOLKey) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ buf, err := b.PrependBytes(eapolKeyFrameLen + len(ek.EncryptedKeyData))
+ if err != nil {
+ return err
+ }
+
+ buf[0] = byte(ek.KeyDescriptorType)
+
+ var info uint16
+ info |= uint16(ek.KeyDescriptorVersion)
+ info |= uint16(ek.KeyType) << 3
+ info |= uint16(ek.KeyIndex) << 4
+ if ek.Install {
+ info |= 0x0040
+ }
+ if ek.KeyACK {
+ info |= 0x0080
+ }
+ if ek.KeyMIC {
+ info |= 0x0100
+ }
+ if ek.Secure {
+ info |= 0x0200
+ }
+ if ek.MICError {
+ info |= 0x0400
+ }
+ if ek.Request {
+ info |= 0x0800
+ }
+ if ek.HasEncryptedKeyData {
+ info |= 0x1000
+ }
+ if ek.SMKMessage {
+ info |= 0x2000
+ }
+ binary.BigEndian.PutUint16(buf[1:3], info)
+
+ binary.BigEndian.PutUint16(buf[3:5], ek.KeyLength)
+ binary.BigEndian.PutUint64(buf[5:13], ek.ReplayCounter)
+
+ copy(buf[13:45], ek.Nonce)
+ copy(buf[45:61], ek.IV)
+ binary.BigEndian.PutUint64(buf[61:69], ek.RSC)
+ binary.BigEndian.PutUint64(buf[69:77], ek.ID)
+ copy(buf[77:93], ek.MIC)
+
+ binary.BigEndian.PutUint16(buf[93:95], ek.KeyDataLength)
+ if len(ek.EncryptedKeyData) > 0 {
+ copy(buf[95:95+len(ek.EncryptedKeyData)], ek.EncryptedKeyData)
+ }
+
+ return nil
+}
+
+func decodeEAPOLKey(data []byte, p gopacket.PacketBuilder) error {
+ ek := &EAPOLKey{}
+ return decodingLayerDecoder(ek, data, p)
+}
diff --git a/vendor/github.com/google/gopacket/layers/endpoints.go b/vendor/github.com/google/gopacket/layers/endpoints.go
new file mode 100644
index 0000000..4c91cc3
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/endpoints.go
@@ -0,0 +1,97 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+ "encoding/binary"
+ "github.com/google/gopacket"
+ "net"
+ "strconv"
+)
+
+var (
+ // We use two different endpoint types for IPv4 vs IPv6 addresses, so that
+ // ordering with endpointA.LessThan(endpointB) sanely groups all IPv4
+ // addresses and all IPv6 addresses, such that IPv6 > IPv4 for all addresses.
+ EndpointIPv4 = gopacket.RegisterEndpointType(1, gopacket.EndpointTypeMetadata{Name: "IPv4", Formatter: func(b []byte) string {
+ return net.IP(b).String()
+ }})
+ EndpointIPv6 = gopacket.RegisterEndpointType(2, gopacket.EndpointTypeMetadata{Name: "IPv6", Formatter: func(b []byte) string {
+ return net.IP(b).String()
+ }})
+
+ EndpointMAC = gopacket.RegisterEndpointType(3, gopacket.EndpointTypeMetadata{Name: "MAC", Formatter: func(b []byte) string {
+ return net.HardwareAddr(b).String()
+ }})
+ EndpointTCPPort = gopacket.RegisterEndpointType(4, gopacket.EndpointTypeMetadata{Name: "TCP", Formatter: func(b []byte) string {
+ return strconv.Itoa(int(binary.BigEndian.Uint16(b)))
+ }})
+ EndpointUDPPort = gopacket.RegisterEndpointType(5, gopacket.EndpointTypeMetadata{Name: "UDP", Formatter: func(b []byte) string {
+ return strconv.Itoa(int(binary.BigEndian.Uint16(b)))
+ }})
+ EndpointSCTPPort = gopacket.RegisterEndpointType(6, gopacket.EndpointTypeMetadata{Name: "SCTP", Formatter: func(b []byte) string {
+ return strconv.Itoa(int(binary.BigEndian.Uint16(b)))
+ }})
+ EndpointRUDPPort = gopacket.RegisterEndpointType(7, gopacket.EndpointTypeMetadata{Name: "RUDP", Formatter: func(b []byte) string {
+ return strconv.Itoa(int(b[0]))
+ }})
+ EndpointUDPLitePort = gopacket.RegisterEndpointType(8, gopacket.EndpointTypeMetadata{Name: "UDPLite", Formatter: func(b []byte) string {
+ return strconv.Itoa(int(binary.BigEndian.Uint16(b)))
+ }})
+ EndpointPPP = gopacket.RegisterEndpointType(9, gopacket.EndpointTypeMetadata{Name: "PPP", Formatter: func([]byte) string {
+ return "point"
+ }})
+)
+
+// NewIPEndpoint creates a new IP (v4 or v6) endpoint from a net.IP address.
+// It returns gopacket.InvalidEndpoint if the IP address is invalid.
+func NewIPEndpoint(a net.IP) gopacket.Endpoint {
+ ipv4 := a.To4()
+ if ipv4 != nil {
+ return gopacket.NewEndpoint(EndpointIPv4, []byte(ipv4))
+ }
+
+ ipv6 := a.To16()
+ if ipv6 != nil {
+ return gopacket.NewEndpoint(EndpointIPv6, []byte(ipv6))
+ }
+
+ return gopacket.InvalidEndpoint
+}
+
+// NewMACEndpoint returns a new MAC address endpoint.
+func NewMACEndpoint(a net.HardwareAddr) gopacket.Endpoint {
+ return gopacket.NewEndpoint(EndpointMAC, []byte(a))
+}
+func newPortEndpoint(t gopacket.EndpointType, p uint16) gopacket.Endpoint {
+ return gopacket.NewEndpoint(t, []byte{byte(p >> 8), byte(p)})
+}
+
+// NewTCPPortEndpoint returns an endpoint based on a TCP port.
+func NewTCPPortEndpoint(p TCPPort) gopacket.Endpoint {
+ return newPortEndpoint(EndpointTCPPort, uint16(p))
+}
+
+// NewUDPPortEndpoint returns an endpoint based on a UDP port.
+func NewUDPPortEndpoint(p UDPPort) gopacket.Endpoint {
+ return newPortEndpoint(EndpointUDPPort, uint16(p))
+}
+
+// NewSCTPPortEndpoint returns an endpoint based on a SCTP port.
+func NewSCTPPortEndpoint(p SCTPPort) gopacket.Endpoint {
+ return newPortEndpoint(EndpointSCTPPort, uint16(p))
+}
+
+// NewRUDPPortEndpoint returns an endpoint based on a RUDP port.
+func NewRUDPPortEndpoint(p RUDPPort) gopacket.Endpoint {
+ return gopacket.NewEndpoint(EndpointRUDPPort, []byte{byte(p)})
+}
+
+// NewUDPLitePortEndpoint returns an endpoint based on a UDPLite port.
+func NewUDPLitePortEndpoint(p UDPLitePort) gopacket.Endpoint {
+ return newPortEndpoint(EndpointUDPLitePort, uint16(p))
+}
diff --git a/vendor/github.com/google/gopacket/layers/enums.go b/vendor/github.com/google/gopacket/layers/enums.go
new file mode 100644
index 0000000..fa443e6
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/enums.go
@@ -0,0 +1,448 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+// Copyright 2009-2011 Andreas Krennmair. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+ "errors"
+ "fmt"
+ "runtime"
+
+ "github.com/google/gopacket"
+)
+
+// EnumMetadata keeps track of a set of metadata for each enumeration value
+// for protocol enumerations.
+type EnumMetadata struct {
+ // DecodeWith is the decoder to use to decode this protocol's data.
+ DecodeWith gopacket.Decoder
+ // Name is the name of the enumeration value.
+ Name string
+ // LayerType is the layer type implied by the given enum.
+ LayerType gopacket.LayerType
+}
+
+// errorFunc returns a decoder that spits out a specific error message.
+func errorFunc(msg string) gopacket.Decoder {
+ var e = errors.New(msg)
+ return gopacket.DecodeFunc(func([]byte, gopacket.PacketBuilder) error {
+ return e
+ })
+}
+
+// EthernetType is an enumeration of ethernet type values, and acts as a decoder
+// for any type it supports.
+type EthernetType uint16
+
+const (
+ // EthernetTypeLLC is not an actual ethernet type. It is instead a
+ // placeholder we use in Ethernet frames that use the 802.3 standard of
+ // srcmac|dstmac|length|LLC instead of srcmac|dstmac|ethertype.
+ EthernetTypeLLC EthernetType = 0
+ EthernetTypeIPv4 EthernetType = 0x0800
+ EthernetTypeARP EthernetType = 0x0806
+ EthernetTypeIPv6 EthernetType = 0x86DD
+ EthernetTypeCiscoDiscovery EthernetType = 0x2000
+ EthernetTypeNortelDiscovery EthernetType = 0x01a2
+ EthernetTypeTransparentEthernetBridging EthernetType = 0x6558
+ EthernetTypeDot1Q EthernetType = 0x8100
+ EthernetTypePPP EthernetType = 0x880b
+ EthernetTypePPPoEDiscovery EthernetType = 0x8863
+ EthernetTypePPPoESession EthernetType = 0x8864
+ EthernetTypeMPLSUnicast EthernetType = 0x8847
+ EthernetTypeMPLSMulticast EthernetType = 0x8848
+ EthernetTypeEAPOL EthernetType = 0x888e
+ EthernetTypeQinQ EthernetType = 0x88a8
+ EthernetTypeLinkLayerDiscovery EthernetType = 0x88cc
+ EthernetTypeEthernetCTP EthernetType = 0x9000
+)
+
+// IPProtocol is an enumeration of IP protocol values, and acts as a decoder
+// for any type it supports.
+type IPProtocol uint8
+
+const (
+ IPProtocolIPv6HopByHop IPProtocol = 0
+ IPProtocolICMPv4 IPProtocol = 1
+ IPProtocolIGMP IPProtocol = 2
+ IPProtocolIPv4 IPProtocol = 4
+ IPProtocolTCP IPProtocol = 6
+ IPProtocolUDP IPProtocol = 17
+ IPProtocolRUDP IPProtocol = 27
+ IPProtocolIPv6 IPProtocol = 41
+ IPProtocolIPv6Routing IPProtocol = 43
+ IPProtocolIPv6Fragment IPProtocol = 44
+ IPProtocolGRE IPProtocol = 47
+ IPProtocolESP IPProtocol = 50
+ IPProtocolAH IPProtocol = 51
+ IPProtocolICMPv6 IPProtocol = 58
+ IPProtocolNoNextHeader IPProtocol = 59
+ IPProtocolIPv6Destination IPProtocol = 60
+ IPProtocolOSPF IPProtocol = 89
+ IPProtocolIPIP IPProtocol = 94
+ IPProtocolEtherIP IPProtocol = 97
+ IPProtocolVRRP IPProtocol = 112
+ IPProtocolSCTP IPProtocol = 132
+ IPProtocolUDPLite IPProtocol = 136
+ IPProtocolMPLSInIP IPProtocol = 137
+)
+
+// LinkType is an enumeration of link types, and acts as a decoder for any
+// link type it supports.
+type LinkType uint8
+
+const (
+ // According to pcap-linktype(7) and http://www.tcpdump.org/linktypes.html
+ LinkTypeNull LinkType = 0
+ LinkTypeEthernet LinkType = 1
+ LinkTypeAX25 LinkType = 3
+ LinkTypeTokenRing LinkType = 6
+ LinkTypeArcNet LinkType = 7
+ LinkTypeSLIP LinkType = 8
+ LinkTypePPP LinkType = 9
+ LinkTypeFDDI LinkType = 10
+ LinkTypePPP_HDLC LinkType = 50
+ LinkTypePPPEthernet LinkType = 51
+ LinkTypeATM_RFC1483 LinkType = 100
+ LinkTypeRaw LinkType = 101
+ LinkTypeC_HDLC LinkType = 104
+ LinkTypeIEEE802_11 LinkType = 105
+ LinkTypeFRelay LinkType = 107
+ LinkTypeLoop LinkType = 108
+ LinkTypeLinuxSLL LinkType = 113
+ LinkTypeLTalk LinkType = 114
+ LinkTypePFLog LinkType = 117
+ LinkTypePrismHeader LinkType = 119
+ LinkTypeIPOverFC LinkType = 122
+ LinkTypeSunATM LinkType = 123
+ LinkTypeIEEE80211Radio LinkType = 127
+ LinkTypeARCNetLinux LinkType = 129
+ LinkTypeIPOver1394 LinkType = 138
+ LinkTypeMTP2Phdr LinkType = 139
+ LinkTypeMTP2 LinkType = 140
+ LinkTypeMTP3 LinkType = 141
+ LinkTypeSCCP LinkType = 142
+ LinkTypeDOCSIS LinkType = 143
+ LinkTypeLinuxIRDA LinkType = 144
+ LinkTypeLinuxLAPD LinkType = 177
+ LinkTypeLinuxUSB LinkType = 220
+ LinkTypeIPv4 LinkType = 228
+ LinkTypeIPv6 LinkType = 229
+)
+
+// PPPoECode is the PPPoE code enum, taken from http://tools.ietf.org/html/rfc2516
+type PPPoECode uint8
+
+const (
+ PPPoECodePADI PPPoECode = 0x09
+ PPPoECodePADO PPPoECode = 0x07
+ PPPoECodePADR PPPoECode = 0x19
+ PPPoECodePADS PPPoECode = 0x65
+ PPPoECodePADT PPPoECode = 0xA7
+ PPPoECodeSession PPPoECode = 0x00
+)
+
+// PPPType is an enumeration of PPP type values, and acts as a decoder for any
+// type it supports.
+type PPPType uint16
+
+const (
+ PPPTypeIPv4 PPPType = 0x0021
+ PPPTypeIPv6 PPPType = 0x0057
+ PPPTypeMPLSUnicast PPPType = 0x0281
+ PPPTypeMPLSMulticast PPPType = 0x0283
+)
+
+// SCTPChunkType is an enumeration of chunk types inside SCTP packets.
+type SCTPChunkType uint8
+
+const (
+ SCTPChunkTypeData SCTPChunkType = 0
+ SCTPChunkTypeInit SCTPChunkType = 1
+ SCTPChunkTypeInitAck SCTPChunkType = 2
+ SCTPChunkTypeSack SCTPChunkType = 3
+ SCTPChunkTypeHeartbeat SCTPChunkType = 4
+ SCTPChunkTypeHeartbeatAck SCTPChunkType = 5
+ SCTPChunkTypeAbort SCTPChunkType = 6
+ SCTPChunkTypeShutdown SCTPChunkType = 7
+ SCTPChunkTypeShutdownAck SCTPChunkType = 8
+ SCTPChunkTypeError SCTPChunkType = 9
+ SCTPChunkTypeCookieEcho SCTPChunkType = 10
+ SCTPChunkTypeCookieAck SCTPChunkType = 11
+ SCTPChunkTypeShutdownComplete SCTPChunkType = 14
+)
+
+// FDDIFrameControl is an enumeration of FDDI frame control bytes.
+type FDDIFrameControl uint8
+
+const (
+ FDDIFrameControlLLC FDDIFrameControl = 0x50
+)
+
+// EAPOLType is an enumeration of EAPOL packet types.
+type EAPOLType uint8
+
+const (
+ EAPOLTypeEAP EAPOLType = 0
+ EAPOLTypeStart EAPOLType = 1
+ EAPOLTypeLogOff EAPOLType = 2
+ EAPOLTypeKey EAPOLType = 3
+ EAPOLTypeASFAlert EAPOLType = 4
+)
+
+// ProtocolFamily is the set of values defined as PF_* in sys/socket.h
+type ProtocolFamily uint8
+
+const (
+ ProtocolFamilyIPv4 ProtocolFamily = 2
+ // BSDs use different values for INET6... glory be. These values taken from
+ // tcpdump 4.3.0.
+ ProtocolFamilyIPv6BSD ProtocolFamily = 24
+ ProtocolFamilyIPv6FreeBSD ProtocolFamily = 28
+ ProtocolFamilyIPv6Darwin ProtocolFamily = 30
+ ProtocolFamilyIPv6Linux ProtocolFamily = 10
+)
+
+// Dot11Type is a combination of IEEE 802.11 frame's Type and Subtype fields.
+// By combining these two fields together into a single type, we're able to
+// provide a String function that correctly displays the subtype given the
+// top-level type.
+//
+// If you just care about the top-level type, use the MainType function.
+type Dot11Type uint8
+
+// MainType strips the subtype information from the given type,
+// returning just the overarching type (Mgmt, Ctrl, Data, Reserved).
+func (d Dot11Type) MainType() Dot11Type {
+ return d & dot11TypeMask
+}
+
+func (d Dot11Type) QOS() bool {
+ return d&dot11QOSMask == Dot11TypeDataQOSData
+}
+
+const (
+ Dot11TypeMgmt Dot11Type = 0x00
+ Dot11TypeCtrl Dot11Type = 0x01
+ Dot11TypeData Dot11Type = 0x02
+ Dot11TypeReserved Dot11Type = 0x03
+ dot11TypeMask = 0x03
+ dot11QOSMask = 0x23
+
+ // The following are type/subtype conglomerations.
+
+ // Management
+ Dot11TypeMgmtAssociationReq Dot11Type = 0x00
+ Dot11TypeMgmtAssociationResp Dot11Type = 0x04
+ Dot11TypeMgmtReassociationReq Dot11Type = 0x08
+ Dot11TypeMgmtReassociationResp Dot11Type = 0x0c
+ Dot11TypeMgmtProbeReq Dot11Type = 0x10
+ Dot11TypeMgmtProbeResp Dot11Type = 0x14
+ Dot11TypeMgmtMeasurementPilot Dot11Type = 0x18
+ Dot11TypeMgmtBeacon Dot11Type = 0x20
+ Dot11TypeMgmtATIM Dot11Type = 0x24
+ Dot11TypeMgmtDisassociation Dot11Type = 0x28
+ Dot11TypeMgmtAuthentication Dot11Type = 0x2c
+ Dot11TypeMgmtDeauthentication Dot11Type = 0x30
+ Dot11TypeMgmtAction Dot11Type = 0x34
+ Dot11TypeMgmtActionNoAck Dot11Type = 0x38
+
+ // Control
+ Dot11TypeCtrlWrapper Dot11Type = 0x1d
+ Dot11TypeCtrlBlockAckReq Dot11Type = 0x21
+ Dot11TypeCtrlBlockAck Dot11Type = 0x25
+ Dot11TypeCtrlPowersavePoll Dot11Type = 0x29
+ Dot11TypeCtrlRTS Dot11Type = 0x2d
+ Dot11TypeCtrlCTS Dot11Type = 0x31
+ Dot11TypeCtrlAck Dot11Type = 0x35
+ Dot11TypeCtrlCFEnd Dot11Type = 0x39
+ Dot11TypeCtrlCFEndAck Dot11Type = 0x3d
+
+ // Data
+ Dot11TypeDataCFAck Dot11Type = 0x06
+ Dot11TypeDataCFPoll Dot11Type = 0x0a
+ Dot11TypeDataCFAckPoll Dot11Type = 0x0e
+ Dot11TypeDataNull Dot11Type = 0x12
+ Dot11TypeDataCFAckNoData Dot11Type = 0x16
+ Dot11TypeDataCFPollNoData Dot11Type = 0x1a
+ Dot11TypeDataCFAckPollNoData Dot11Type = 0x1e
+ Dot11TypeDataQOSData Dot11Type = 0x22
+ Dot11TypeDataQOSDataCFAck Dot11Type = 0x26
+ Dot11TypeDataQOSDataCFPoll Dot11Type = 0x2a
+ Dot11TypeDataQOSDataCFAckPoll Dot11Type = 0x2e
+ Dot11TypeDataQOSNull Dot11Type = 0x32
+ Dot11TypeDataQOSCFPollNoData Dot11Type = 0x3a
+ Dot11TypeDataQOSCFAckPollNoData Dot11Type = 0x3e
+)
+
+// Decode a raw v4 or v6 IP packet.
+func decodeIPv4or6(data []byte, p gopacket.PacketBuilder) error {
+ version := data[0] >> 4
+ switch version {
+ case 4:
+ return decodeIPv4(data, p)
+ case 6:
+ return decodeIPv6(data, p)
+ }
+ return fmt.Errorf("Invalid IP packet version %v", version)
+}
+
+func initActualTypeData() {
+ // Each of the XXXTypeMetadata arrays contains mappings of how to handle enum
+ // values for various enum types in gopacket/layers.
+ // These arrays are actually created by gen2.go and stored in
+ // enums_generated.go.
+ //
+ // So, EthernetTypeMetadata[2] contains information on how to handle EthernetType
+ // 2, including which name to give it and which decoder to use to decode
+ // packet data of that type. These arrays are filled by default with all of the
+ // protocols gopacket/layers knows how to handle, but users of the library can
+ // add new decoders or override existing ones. For example, if you write a better
+ // TCP decoder, you can override IPProtocolMetadata[IPProtocolTCP].DecodeWith
+ // with your new decoder, and all gopacket/layers decoding will use your new
+ // decoder whenever they encounter that IPProtocol.
+
+ // Here we link up all enumerations with their respective names and decoders.
+ EthernetTypeMetadata[EthernetTypeLLC] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeLLC), Name: "LLC", LayerType: LayerTypeLLC}
+ EthernetTypeMetadata[EthernetTypeIPv4] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeIPv4), Name: "IPv4", LayerType: LayerTypeIPv4}
+ EthernetTypeMetadata[EthernetTypeIPv6] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeIPv6), Name: "IPv6", LayerType: LayerTypeIPv6}
+ EthernetTypeMetadata[EthernetTypeARP] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeARP), Name: "ARP", LayerType: LayerTypeARP}
+ EthernetTypeMetadata[EthernetTypeDot1Q] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot1Q), Name: "Dot1Q", LayerType: LayerTypeDot1Q}
+ EthernetTypeMetadata[EthernetTypePPP] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodePPP), Name: "PPP", LayerType: LayerTypePPP}
+ EthernetTypeMetadata[EthernetTypePPPoEDiscovery] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodePPPoE), Name: "PPPoEDiscovery", LayerType: LayerTypePPPoE}
+ EthernetTypeMetadata[EthernetTypePPPoESession] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodePPPoE), Name: "PPPoESession", LayerType: LayerTypePPPoE}
+ EthernetTypeMetadata[EthernetTypeEthernetCTP] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeEthernetCTP), Name: "EthernetCTP", LayerType: LayerTypeEthernetCTP}
+ EthernetTypeMetadata[EthernetTypeCiscoDiscovery] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeCiscoDiscovery), Name: "CiscoDiscovery", LayerType: LayerTypeCiscoDiscovery}
+ EthernetTypeMetadata[EthernetTypeNortelDiscovery] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeNortelDiscovery), Name: "NortelDiscovery", LayerType: LayerTypeNortelDiscovery}
+ EthernetTypeMetadata[EthernetTypeLinkLayerDiscovery] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeLinkLayerDiscovery), Name: "LinkLayerDiscovery", LayerType: LayerTypeLinkLayerDiscovery}
+ EthernetTypeMetadata[EthernetTypeMPLSUnicast] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeMPLS), Name: "MPLSUnicast", LayerType: LayerTypeMPLS}
+ EthernetTypeMetadata[EthernetTypeMPLSMulticast] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeMPLS), Name: "MPLSMulticast", LayerType: LayerTypeMPLS}
+ EthernetTypeMetadata[EthernetTypeEAPOL] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeEAPOL), Name: "EAPOL", LayerType: LayerTypeEAPOL}
+ EthernetTypeMetadata[EthernetTypeQinQ] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot1Q), Name: "Dot1Q", LayerType: LayerTypeDot1Q}
+ EthernetTypeMetadata[EthernetTypeTransparentEthernetBridging] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeEthernet), Name: "TransparentEthernetBridging", LayerType: LayerTypeEthernet}
+
+ IPProtocolMetadata[IPProtocolIPv4] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeIPv4), Name: "IPv4", LayerType: LayerTypeIPv4}
+ IPProtocolMetadata[IPProtocolTCP] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeTCP), Name: "TCP", LayerType: LayerTypeTCP}
+ IPProtocolMetadata[IPProtocolUDP] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeUDP), Name: "UDP", LayerType: LayerTypeUDP}
+ IPProtocolMetadata[IPProtocolICMPv4] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeICMPv4), Name: "ICMPv4", LayerType: LayerTypeICMPv4}
+ IPProtocolMetadata[IPProtocolICMPv6] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeICMPv6), Name: "ICMPv6", LayerType: LayerTypeICMPv6}
+ IPProtocolMetadata[IPProtocolSCTP] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeSCTP), Name: "SCTP", LayerType: LayerTypeSCTP}
+ IPProtocolMetadata[IPProtocolIPv6] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeIPv6), Name: "IPv6", LayerType: LayerTypeIPv6}
+ IPProtocolMetadata[IPProtocolIPIP] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeIPv4), Name: "IPv4", LayerType: LayerTypeIPv4}
+ IPProtocolMetadata[IPProtocolEtherIP] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeEtherIP), Name: "EtherIP", LayerType: LayerTypeEtherIP}
+ IPProtocolMetadata[IPProtocolRUDP] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeRUDP), Name: "RUDP", LayerType: LayerTypeRUDP}
+ IPProtocolMetadata[IPProtocolGRE] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeGRE), Name: "GRE", LayerType: LayerTypeGRE}
+ IPProtocolMetadata[IPProtocolIPv6HopByHop] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeIPv6HopByHop), Name: "IPv6HopByHop", LayerType: LayerTypeIPv6HopByHop}
+ IPProtocolMetadata[IPProtocolIPv6Routing] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeIPv6Routing), Name: "IPv6Routing", LayerType: LayerTypeIPv6Routing}
+ IPProtocolMetadata[IPProtocolIPv6Fragment] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeIPv6Fragment), Name: "IPv6Fragment", LayerType: LayerTypeIPv6Fragment}
+ IPProtocolMetadata[IPProtocolIPv6Destination] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeIPv6Destination), Name: "IPv6Destination", LayerType: LayerTypeIPv6Destination}
+ IPProtocolMetadata[IPProtocolOSPF] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeOSPF), Name: "OSPF", LayerType: LayerTypeOSPF}
+ IPProtocolMetadata[IPProtocolAH] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeIPSecAH), Name: "IPSecAH", LayerType: LayerTypeIPSecAH}
+ IPProtocolMetadata[IPProtocolESP] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeIPSecESP), Name: "IPSecESP", LayerType: LayerTypeIPSecESP}
+ IPProtocolMetadata[IPProtocolUDPLite] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeUDPLite), Name: "UDPLite", LayerType: LayerTypeUDPLite}
+ IPProtocolMetadata[IPProtocolMPLSInIP] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeMPLS), Name: "MPLS", LayerType: LayerTypeMPLS}
+ IPProtocolMetadata[IPProtocolNoNextHeader] = EnumMetadata{DecodeWith: gopacket.DecodePayload, Name: "NoNextHeader", LayerType: gopacket.LayerTypePayload}
+ IPProtocolMetadata[IPProtocolIGMP] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeIGMP), Name: "IGMP", LayerType: LayerTypeIGMP}
+ IPProtocolMetadata[IPProtocolVRRP] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeVRRP), Name: "VRRP", LayerType: LayerTypeVRRP}
+
+ SCTPChunkTypeMetadata[SCTPChunkTypeData] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeSCTPData), Name: "Data"}
+ SCTPChunkTypeMetadata[SCTPChunkTypeInit] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeSCTPInit), Name: "Init"}
+ SCTPChunkTypeMetadata[SCTPChunkTypeInitAck] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeSCTPInit), Name: "InitAck"}
+ SCTPChunkTypeMetadata[SCTPChunkTypeSack] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeSCTPSack), Name: "Sack"}
+ SCTPChunkTypeMetadata[SCTPChunkTypeHeartbeat] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeSCTPHeartbeat), Name: "Heartbeat"}
+ SCTPChunkTypeMetadata[SCTPChunkTypeHeartbeatAck] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeSCTPHeartbeat), Name: "HeartbeatAck"}
+ SCTPChunkTypeMetadata[SCTPChunkTypeAbort] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeSCTPError), Name: "Abort"}
+ SCTPChunkTypeMetadata[SCTPChunkTypeError] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeSCTPError), Name: "Error"}
+ SCTPChunkTypeMetadata[SCTPChunkTypeShutdown] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeSCTPShutdown), Name: "Shutdown"}
+ SCTPChunkTypeMetadata[SCTPChunkTypeShutdownAck] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeSCTPShutdownAck), Name: "ShutdownAck"}
+ SCTPChunkTypeMetadata[SCTPChunkTypeCookieEcho] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeSCTPCookieEcho), Name: "CookieEcho"}
+ SCTPChunkTypeMetadata[SCTPChunkTypeCookieAck] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeSCTPEmptyLayer), Name: "CookieAck"}
+ SCTPChunkTypeMetadata[SCTPChunkTypeShutdownComplete] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeSCTPEmptyLayer), Name: "ShutdownComplete"}
+
+ PPPTypeMetadata[PPPTypeIPv4] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeIPv4), Name: "IPv4"}
+ PPPTypeMetadata[PPPTypeIPv6] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeIPv6), Name: "IPv6"}
+ PPPTypeMetadata[PPPTypeMPLSUnicast] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeMPLS), Name: "MPLSUnicast"}
+ PPPTypeMetadata[PPPTypeMPLSMulticast] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeMPLS), Name: "MPLSMulticast"}
+
+ PPPoECodeMetadata[PPPoECodeSession] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodePPP), Name: "PPP"}
+
+ LinkTypeMetadata[LinkTypeEthernet] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeEthernet), Name: "Ethernet"}
+ LinkTypeMetadata[LinkTypePPP] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodePPP), Name: "PPP"}
+ LinkTypeMetadata[LinkTypeFDDI] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeFDDI), Name: "FDDI"}
+ LinkTypeMetadata[LinkTypeNull] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeLoopback), Name: "Null"}
+ LinkTypeMetadata[LinkTypeIEEE802_11] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11), Name: "Dot11"}
+ LinkTypeMetadata[LinkTypeLoop] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeLoopback), Name: "Loop"}
+ LinkTypeMetadata[LinkTypeIEEE802_11] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11), Name: "802.11"}
+ LinkTypeMetadata[LinkTypeRaw] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeIPv4or6), Name: "Raw"}
+ // See https://github.com/the-tcpdump-group/libpcap/blob/170f717e6e818cdc4bcbbfd906b63088eaa88fa0/pcap/dlt.h#L85
+ // Or https://github.com/wireshark/wireshark/blob/854cfe53efe44080609c78053ecfb2342ad84a08/wiretap/pcap-common.c#L508
+ if runtime.GOOS == "openbsd" {
+ LinkTypeMetadata[14] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeIPv4or6), Name: "Raw"}
+ } else {
+ LinkTypeMetadata[12] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeIPv4or6), Name: "Raw"}
+ }
+ LinkTypeMetadata[LinkTypePFLog] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodePFLog), Name: "PFLog"}
+ LinkTypeMetadata[LinkTypeIEEE80211Radio] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeRadioTap), Name: "RadioTap"}
+ LinkTypeMetadata[LinkTypeLinuxUSB] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeUSB), Name: "USB"}
+ LinkTypeMetadata[LinkTypeLinuxSLL] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeLinuxSLL), Name: "Linux SLL"}
+ LinkTypeMetadata[LinkTypePrismHeader] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodePrismHeader), Name: "Prism"}
+
+ FDDIFrameControlMetadata[FDDIFrameControlLLC] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeLLC), Name: "LLC"}
+
+ EAPOLTypeMetadata[EAPOLTypeEAP] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeEAP), Name: "EAP", LayerType: LayerTypeEAP}
+ EAPOLTypeMetadata[EAPOLTypeKey] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeEAPOLKey), Name: "EAPOLKey", LayerType: LayerTypeEAPOLKey}
+
+ ProtocolFamilyMetadata[ProtocolFamilyIPv4] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeIPv4), Name: "IPv4", LayerType: LayerTypeIPv4}
+ ProtocolFamilyMetadata[ProtocolFamilyIPv6BSD] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeIPv6), Name: "IPv6", LayerType: LayerTypeIPv6}
+ ProtocolFamilyMetadata[ProtocolFamilyIPv6FreeBSD] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeIPv6), Name: "IPv6", LayerType: LayerTypeIPv6}
+ ProtocolFamilyMetadata[ProtocolFamilyIPv6Darwin] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeIPv6), Name: "IPv6", LayerType: LayerTypeIPv6}
+ ProtocolFamilyMetadata[ProtocolFamilyIPv6Linux] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeIPv6), Name: "IPv6", LayerType: LayerTypeIPv6}
+
+ Dot11TypeMetadata[Dot11TypeMgmtAssociationReq] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11MgmtAssociationReq), Name: "MgmtAssociationReq", LayerType: LayerTypeDot11MgmtAssociationReq}
+ Dot11TypeMetadata[Dot11TypeMgmtAssociationResp] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11MgmtAssociationResp), Name: "MgmtAssociationResp", LayerType: LayerTypeDot11MgmtAssociationResp}
+ Dot11TypeMetadata[Dot11TypeMgmtReassociationReq] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11MgmtReassociationReq), Name: "MgmtReassociationReq", LayerType: LayerTypeDot11MgmtReassociationReq}
+ Dot11TypeMetadata[Dot11TypeMgmtReassociationResp] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11MgmtReassociationResp), Name: "MgmtReassociationResp", LayerType: LayerTypeDot11MgmtReassociationResp}
+ Dot11TypeMetadata[Dot11TypeMgmtProbeReq] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11MgmtProbeReq), Name: "MgmtProbeReq", LayerType: LayerTypeDot11MgmtProbeReq}
+ Dot11TypeMetadata[Dot11TypeMgmtProbeResp] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11MgmtProbeResp), Name: "MgmtProbeResp", LayerType: LayerTypeDot11MgmtProbeResp}
+ Dot11TypeMetadata[Dot11TypeMgmtMeasurementPilot] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11MgmtMeasurementPilot), Name: "MgmtMeasurementPilot", LayerType: LayerTypeDot11MgmtMeasurementPilot}
+ Dot11TypeMetadata[Dot11TypeMgmtBeacon] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11MgmtBeacon), Name: "MgmtBeacon", LayerType: LayerTypeDot11MgmtBeacon}
+ Dot11TypeMetadata[Dot11TypeMgmtATIM] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11MgmtATIM), Name: "MgmtATIM", LayerType: LayerTypeDot11MgmtATIM}
+ Dot11TypeMetadata[Dot11TypeMgmtDisassociation] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11MgmtDisassociation), Name: "MgmtDisassociation", LayerType: LayerTypeDot11MgmtDisassociation}
+ Dot11TypeMetadata[Dot11TypeMgmtAuthentication] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11MgmtAuthentication), Name: "MgmtAuthentication", LayerType: LayerTypeDot11MgmtAuthentication}
+ Dot11TypeMetadata[Dot11TypeMgmtDeauthentication] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11MgmtDeauthentication), Name: "MgmtDeauthentication", LayerType: LayerTypeDot11MgmtDeauthentication}
+ Dot11TypeMetadata[Dot11TypeMgmtAction] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11MgmtAction), Name: "MgmtAction", LayerType: LayerTypeDot11MgmtAction}
+ Dot11TypeMetadata[Dot11TypeMgmtActionNoAck] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11MgmtActionNoAck), Name: "MgmtActionNoAck", LayerType: LayerTypeDot11MgmtActionNoAck}
+ Dot11TypeMetadata[Dot11TypeCtrl] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11Ctrl), Name: "Ctrl", LayerType: LayerTypeDot11Ctrl}
+ Dot11TypeMetadata[Dot11TypeCtrlWrapper] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11Ctrl), Name: "CtrlWrapper", LayerType: LayerTypeDot11Ctrl}
+ Dot11TypeMetadata[Dot11TypeCtrlBlockAckReq] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11CtrlBlockAckReq), Name: "CtrlBlockAckReq", LayerType: LayerTypeDot11CtrlBlockAckReq}
+ Dot11TypeMetadata[Dot11TypeCtrlBlockAck] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11CtrlBlockAck), Name: "CtrlBlockAck", LayerType: LayerTypeDot11CtrlBlockAck}
+ Dot11TypeMetadata[Dot11TypeCtrlPowersavePoll] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11CtrlPowersavePoll), Name: "CtrlPowersavePoll", LayerType: LayerTypeDot11CtrlPowersavePoll}
+ Dot11TypeMetadata[Dot11TypeCtrlRTS] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11CtrlRTS), Name: "CtrlRTS", LayerType: LayerTypeDot11CtrlRTS}
+ Dot11TypeMetadata[Dot11TypeCtrlCTS] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11CtrlCTS), Name: "CtrlCTS", LayerType: LayerTypeDot11CtrlCTS}
+ Dot11TypeMetadata[Dot11TypeCtrlAck] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11CtrlAck), Name: "CtrlAck", LayerType: LayerTypeDot11CtrlAck}
+ Dot11TypeMetadata[Dot11TypeCtrlCFEnd] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11CtrlCFEnd), Name: "CtrlCFEnd", LayerType: LayerTypeDot11CtrlCFEnd}
+ Dot11TypeMetadata[Dot11TypeCtrlCFEndAck] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11CtrlCFEndAck), Name: "CtrlCFEndAck", LayerType: LayerTypeDot11CtrlCFEndAck}
+ Dot11TypeMetadata[Dot11TypeData] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11Data), Name: "Data", LayerType: LayerTypeDot11Data}
+ Dot11TypeMetadata[Dot11TypeDataCFAck] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11DataCFAck), Name: "DataCFAck", LayerType: LayerTypeDot11DataCFAck}
+ Dot11TypeMetadata[Dot11TypeDataCFPoll] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11DataCFPoll), Name: "DataCFPoll", LayerType: LayerTypeDot11DataCFPoll}
+ Dot11TypeMetadata[Dot11TypeDataCFAckPoll] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11DataCFAckPoll), Name: "DataCFAckPoll", LayerType: LayerTypeDot11DataCFAckPoll}
+ Dot11TypeMetadata[Dot11TypeDataNull] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11DataNull), Name: "DataNull", LayerType: LayerTypeDot11DataNull}
+ Dot11TypeMetadata[Dot11TypeDataCFAckNoData] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11DataCFAckNoData), Name: "DataCFAckNoData", LayerType: LayerTypeDot11DataCFAckNoData}
+ Dot11TypeMetadata[Dot11TypeDataCFPollNoData] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11DataCFPollNoData), Name: "DataCFPollNoData", LayerType: LayerTypeDot11DataCFPollNoData}
+ Dot11TypeMetadata[Dot11TypeDataCFAckPollNoData] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11DataCFAckPollNoData), Name: "DataCFAckPollNoData", LayerType: LayerTypeDot11DataCFAckPollNoData}
+ Dot11TypeMetadata[Dot11TypeDataQOSData] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11DataQOSData), Name: "DataQOSData", LayerType: LayerTypeDot11DataQOSData}
+ Dot11TypeMetadata[Dot11TypeDataQOSDataCFAck] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11DataQOSDataCFAck), Name: "DataQOSDataCFAck", LayerType: LayerTypeDot11DataQOSDataCFAck}
+ Dot11TypeMetadata[Dot11TypeDataQOSDataCFPoll] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11DataQOSDataCFPoll), Name: "DataQOSDataCFPoll", LayerType: LayerTypeDot11DataQOSDataCFPoll}
+ Dot11TypeMetadata[Dot11TypeDataQOSDataCFAckPoll] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11DataQOSDataCFAckPoll), Name: "DataQOSDataCFAckPoll", LayerType: LayerTypeDot11DataQOSDataCFAckPoll}
+ Dot11TypeMetadata[Dot11TypeDataQOSNull] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11DataQOSNull), Name: "DataQOSNull", LayerType: LayerTypeDot11DataQOSNull}
+ Dot11TypeMetadata[Dot11TypeDataQOSCFPollNoData] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11DataQOSCFPollNoData), Name: "DataQOSCFPollNoData", LayerType: LayerTypeDot11DataQOSCFPollNoData}
+ Dot11TypeMetadata[Dot11TypeDataQOSCFAckPollNoData] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11DataQOSCFAckPollNoData), Name: "DataQOSCFAckPollNoData", LayerType: LayerTypeDot11DataQOSCFAckPollNoData}
+
+ USBTransportTypeMetadata[USBTransportTypeInterrupt] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeUSBInterrupt), Name: "Interrupt", LayerType: LayerTypeUSBInterrupt}
+ USBTransportTypeMetadata[USBTransportTypeControl] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeUSBControl), Name: "Control", LayerType: LayerTypeUSBControl}
+ USBTransportTypeMetadata[USBTransportTypeBulk] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeUSBBulk), Name: "Bulk", LayerType: LayerTypeUSBBulk}
+}
diff --git a/vendor/github.com/google/gopacket/layers/enums_generated.go b/vendor/github.com/google/gopacket/layers/enums_generated.go
new file mode 100644
index 0000000..bf77aac
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/enums_generated.go
@@ -0,0 +1,434 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+
+package layers
+
+// Created by gen2.go, don't edit manually
+// Generated at 2017-10-23 10:20:24.458771856 -0600 MDT m=+0.001159033
+
+import (
+ "fmt"
+
+ "github.com/google/gopacket"
+)
+
+func init() {
+ initUnknownTypesForLinkType()
+ initUnknownTypesForEthernetType()
+ initUnknownTypesForPPPType()
+ initUnknownTypesForIPProtocol()
+ initUnknownTypesForSCTPChunkType()
+ initUnknownTypesForPPPoECode()
+ initUnknownTypesForFDDIFrameControl()
+ initUnknownTypesForEAPOLType()
+ initUnknownTypesForProtocolFamily()
+ initUnknownTypesForDot11Type()
+ initUnknownTypesForUSBTransportType()
+ initActualTypeData()
+}
+
+// Decoder calls LinkTypeMetadata.DecodeWith's decoder.
+func (a LinkType) Decode(data []byte, p gopacket.PacketBuilder) error {
+ return LinkTypeMetadata[a].DecodeWith.Decode(data, p)
+}
+
+// String returns LinkTypeMetadata.Name.
+func (a LinkType) String() string {
+ return LinkTypeMetadata[a].Name
+}
+
+// LayerType returns LinkTypeMetadata.LayerType.
+func (a LinkType) LayerType() gopacket.LayerType {
+ return LinkTypeMetadata[a].LayerType
+}
+
+type errorDecoderForLinkType int
+
+func (a *errorDecoderForLinkType) Decode(data []byte, p gopacket.PacketBuilder) error {
+ return a
+}
+func (a *errorDecoderForLinkType) Error() string {
+ return fmt.Sprintf("Unable to decode LinkType %d", int(*a))
+}
+
+var errorDecodersForLinkType [256]errorDecoderForLinkType
+var LinkTypeMetadata [256]EnumMetadata
+
+func initUnknownTypesForLinkType() {
+ for i := 0; i < 256; i++ {
+ errorDecodersForLinkType[i] = errorDecoderForLinkType(i)
+ LinkTypeMetadata[i] = EnumMetadata{
+ DecodeWith: &errorDecodersForLinkType[i],
+ Name: "UnknownLinkType",
+ }
+ }
+}
+
+// Decoder calls EthernetTypeMetadata.DecodeWith's decoder.
+func (a EthernetType) Decode(data []byte, p gopacket.PacketBuilder) error {
+ return EthernetTypeMetadata[a].DecodeWith.Decode(data, p)
+}
+
+// String returns EthernetTypeMetadata.Name.
+func (a EthernetType) String() string {
+ return EthernetTypeMetadata[a].Name
+}
+
+// LayerType returns EthernetTypeMetadata.LayerType.
+func (a EthernetType) LayerType() gopacket.LayerType {
+ return EthernetTypeMetadata[a].LayerType
+}
+
+type errorDecoderForEthernetType int
+
+func (a *errorDecoderForEthernetType) Decode(data []byte, p gopacket.PacketBuilder) error {
+ return a
+}
+func (a *errorDecoderForEthernetType) Error() string {
+ return fmt.Sprintf("Unable to decode EthernetType %d", int(*a))
+}
+
+var errorDecodersForEthernetType [65536]errorDecoderForEthernetType
+var EthernetTypeMetadata [65536]EnumMetadata
+
+func initUnknownTypesForEthernetType() {
+ for i := 0; i < 65536; i++ {
+ errorDecodersForEthernetType[i] = errorDecoderForEthernetType(i)
+ EthernetTypeMetadata[i] = EnumMetadata{
+ DecodeWith: &errorDecodersForEthernetType[i],
+ Name: "UnknownEthernetType",
+ }
+ }
+}
+
+// Decoder calls PPPTypeMetadata.DecodeWith's decoder.
+func (a PPPType) Decode(data []byte, p gopacket.PacketBuilder) error {
+ return PPPTypeMetadata[a].DecodeWith.Decode(data, p)
+}
+
+// String returns PPPTypeMetadata.Name.
+func (a PPPType) String() string {
+ return PPPTypeMetadata[a].Name
+}
+
+// LayerType returns PPPTypeMetadata.LayerType.
+func (a PPPType) LayerType() gopacket.LayerType {
+ return PPPTypeMetadata[a].LayerType
+}
+
+type errorDecoderForPPPType int
+
+func (a *errorDecoderForPPPType) Decode(data []byte, p gopacket.PacketBuilder) error {
+ return a
+}
+func (a *errorDecoderForPPPType) Error() string {
+ return fmt.Sprintf("Unable to decode PPPType %d", int(*a))
+}
+
+var errorDecodersForPPPType [65536]errorDecoderForPPPType
+var PPPTypeMetadata [65536]EnumMetadata
+
+func initUnknownTypesForPPPType() {
+ for i := 0; i < 65536; i++ {
+ errorDecodersForPPPType[i] = errorDecoderForPPPType(i)
+ PPPTypeMetadata[i] = EnumMetadata{
+ DecodeWith: &errorDecodersForPPPType[i],
+ Name: "UnknownPPPType",
+ }
+ }
+}
+
+// Decoder calls IPProtocolMetadata.DecodeWith's decoder.
+func (a IPProtocol) Decode(data []byte, p gopacket.PacketBuilder) error {
+ return IPProtocolMetadata[a].DecodeWith.Decode(data, p)
+}
+
+// String returns IPProtocolMetadata.Name.
+func (a IPProtocol) String() string {
+ return IPProtocolMetadata[a].Name
+}
+
+// LayerType returns IPProtocolMetadata.LayerType.
+func (a IPProtocol) LayerType() gopacket.LayerType {
+ return IPProtocolMetadata[a].LayerType
+}
+
+type errorDecoderForIPProtocol int
+
+func (a *errorDecoderForIPProtocol) Decode(data []byte, p gopacket.PacketBuilder) error {
+ return a
+}
+func (a *errorDecoderForIPProtocol) Error() string {
+ return fmt.Sprintf("Unable to decode IPProtocol %d", int(*a))
+}
+
+var errorDecodersForIPProtocol [256]errorDecoderForIPProtocol
+var IPProtocolMetadata [256]EnumMetadata
+
+func initUnknownTypesForIPProtocol() {
+ for i := 0; i < 256; i++ {
+ errorDecodersForIPProtocol[i] = errorDecoderForIPProtocol(i)
+ IPProtocolMetadata[i] = EnumMetadata{
+ DecodeWith: &errorDecodersForIPProtocol[i],
+ Name: "UnknownIPProtocol",
+ }
+ }
+}
+
+// Decoder calls SCTPChunkTypeMetadata.DecodeWith's decoder.
+func (a SCTPChunkType) Decode(data []byte, p gopacket.PacketBuilder) error {
+ return SCTPChunkTypeMetadata[a].DecodeWith.Decode(data, p)
+}
+
+// String returns SCTPChunkTypeMetadata.Name.
+func (a SCTPChunkType) String() string {
+ return SCTPChunkTypeMetadata[a].Name
+}
+
+// LayerType returns SCTPChunkTypeMetadata.LayerType.
+func (a SCTPChunkType) LayerType() gopacket.LayerType {
+ return SCTPChunkTypeMetadata[a].LayerType
+}
+
+type errorDecoderForSCTPChunkType int
+
+func (a *errorDecoderForSCTPChunkType) Decode(data []byte, p gopacket.PacketBuilder) error {
+ return a
+}
+func (a *errorDecoderForSCTPChunkType) Error() string {
+ return fmt.Sprintf("Unable to decode SCTPChunkType %d", int(*a))
+}
+
+var errorDecodersForSCTPChunkType [256]errorDecoderForSCTPChunkType
+var SCTPChunkTypeMetadata [256]EnumMetadata
+
+func initUnknownTypesForSCTPChunkType() {
+ for i := 0; i < 256; i++ {
+ errorDecodersForSCTPChunkType[i] = errorDecoderForSCTPChunkType(i)
+ SCTPChunkTypeMetadata[i] = EnumMetadata{
+ DecodeWith: &errorDecodersForSCTPChunkType[i],
+ Name: "UnknownSCTPChunkType",
+ }
+ }
+}
+
+// Decoder calls PPPoECodeMetadata.DecodeWith's decoder.
+func (a PPPoECode) Decode(data []byte, p gopacket.PacketBuilder) error {
+ return PPPoECodeMetadata[a].DecodeWith.Decode(data, p)
+}
+
+// String returns PPPoECodeMetadata.Name.
+func (a PPPoECode) String() string {
+ return PPPoECodeMetadata[a].Name
+}
+
+// LayerType returns PPPoECodeMetadata.LayerType.
+func (a PPPoECode) LayerType() gopacket.LayerType {
+ return PPPoECodeMetadata[a].LayerType
+}
+
+type errorDecoderForPPPoECode int
+
+func (a *errorDecoderForPPPoECode) Decode(data []byte, p gopacket.PacketBuilder) error {
+ return a
+}
+func (a *errorDecoderForPPPoECode) Error() string {
+ return fmt.Sprintf("Unable to decode PPPoECode %d", int(*a))
+}
+
+var errorDecodersForPPPoECode [256]errorDecoderForPPPoECode
+var PPPoECodeMetadata [256]EnumMetadata
+
+func initUnknownTypesForPPPoECode() {
+ for i := 0; i < 256; i++ {
+ errorDecodersForPPPoECode[i] = errorDecoderForPPPoECode(i)
+ PPPoECodeMetadata[i] = EnumMetadata{
+ DecodeWith: &errorDecodersForPPPoECode[i],
+ Name: "UnknownPPPoECode",
+ }
+ }
+}
+
+// Decoder calls FDDIFrameControlMetadata.DecodeWith's decoder.
+func (a FDDIFrameControl) Decode(data []byte, p gopacket.PacketBuilder) error {
+ return FDDIFrameControlMetadata[a].DecodeWith.Decode(data, p)
+}
+
+// String returns FDDIFrameControlMetadata.Name.
+func (a FDDIFrameControl) String() string {
+ return FDDIFrameControlMetadata[a].Name
+}
+
+// LayerType returns FDDIFrameControlMetadata.LayerType.
+func (a FDDIFrameControl) LayerType() gopacket.LayerType {
+ return FDDIFrameControlMetadata[a].LayerType
+}
+
+type errorDecoderForFDDIFrameControl int
+
+func (a *errorDecoderForFDDIFrameControl) Decode(data []byte, p gopacket.PacketBuilder) error {
+ return a
+}
+func (a *errorDecoderForFDDIFrameControl) Error() string {
+ return fmt.Sprintf("Unable to decode FDDIFrameControl %d", int(*a))
+}
+
+var errorDecodersForFDDIFrameControl [256]errorDecoderForFDDIFrameControl
+var FDDIFrameControlMetadata [256]EnumMetadata
+
+func initUnknownTypesForFDDIFrameControl() {
+ for i := 0; i < 256; i++ {
+ errorDecodersForFDDIFrameControl[i] = errorDecoderForFDDIFrameControl(i)
+ FDDIFrameControlMetadata[i] = EnumMetadata{
+ DecodeWith: &errorDecodersForFDDIFrameControl[i],
+ Name: "UnknownFDDIFrameControl",
+ }
+ }
+}
+
+// Decoder calls EAPOLTypeMetadata.DecodeWith's decoder.
+func (a EAPOLType) Decode(data []byte, p gopacket.PacketBuilder) error {
+ return EAPOLTypeMetadata[a].DecodeWith.Decode(data, p)
+}
+
+// String returns EAPOLTypeMetadata.Name.
+func (a EAPOLType) String() string {
+ return EAPOLTypeMetadata[a].Name
+}
+
+// LayerType returns EAPOLTypeMetadata.LayerType.
+func (a EAPOLType) LayerType() gopacket.LayerType {
+ return EAPOLTypeMetadata[a].LayerType
+}
+
+type errorDecoderForEAPOLType int
+
+func (a *errorDecoderForEAPOLType) Decode(data []byte, p gopacket.PacketBuilder) error {
+ return a
+}
+func (a *errorDecoderForEAPOLType) Error() string {
+ return fmt.Sprintf("Unable to decode EAPOLType %d", int(*a))
+}
+
+var errorDecodersForEAPOLType [256]errorDecoderForEAPOLType
+var EAPOLTypeMetadata [256]EnumMetadata
+
+func initUnknownTypesForEAPOLType() {
+ for i := 0; i < 256; i++ {
+ errorDecodersForEAPOLType[i] = errorDecoderForEAPOLType(i)
+ EAPOLTypeMetadata[i] = EnumMetadata{
+ DecodeWith: &errorDecodersForEAPOLType[i],
+ Name: "UnknownEAPOLType",
+ }
+ }
+}
+
+// Decoder calls ProtocolFamilyMetadata.DecodeWith's decoder.
+func (a ProtocolFamily) Decode(data []byte, p gopacket.PacketBuilder) error {
+ return ProtocolFamilyMetadata[a].DecodeWith.Decode(data, p)
+}
+
+// String returns ProtocolFamilyMetadata.Name.
+func (a ProtocolFamily) String() string {
+ return ProtocolFamilyMetadata[a].Name
+}
+
+// LayerType returns ProtocolFamilyMetadata.LayerType.
+func (a ProtocolFamily) LayerType() gopacket.LayerType {
+ return ProtocolFamilyMetadata[a].LayerType
+}
+
+type errorDecoderForProtocolFamily int
+
+func (a *errorDecoderForProtocolFamily) Decode(data []byte, p gopacket.PacketBuilder) error {
+ return a
+}
+func (a *errorDecoderForProtocolFamily) Error() string {
+ return fmt.Sprintf("Unable to decode ProtocolFamily %d", int(*a))
+}
+
+var errorDecodersForProtocolFamily [256]errorDecoderForProtocolFamily
+var ProtocolFamilyMetadata [256]EnumMetadata
+
+func initUnknownTypesForProtocolFamily() {
+ for i := 0; i < 256; i++ {
+ errorDecodersForProtocolFamily[i] = errorDecoderForProtocolFamily(i)
+ ProtocolFamilyMetadata[i] = EnumMetadata{
+ DecodeWith: &errorDecodersForProtocolFamily[i],
+ Name: "UnknownProtocolFamily",
+ }
+ }
+}
+
+// Decoder calls Dot11TypeMetadata.DecodeWith's decoder.
+func (a Dot11Type) Decode(data []byte, p gopacket.PacketBuilder) error {
+ return Dot11TypeMetadata[a].DecodeWith.Decode(data, p)
+}
+
+// String returns Dot11TypeMetadata.Name.
+func (a Dot11Type) String() string {
+ return Dot11TypeMetadata[a].Name
+}
+
+// LayerType returns Dot11TypeMetadata.LayerType.
+func (a Dot11Type) LayerType() gopacket.LayerType {
+ return Dot11TypeMetadata[a].LayerType
+}
+
+type errorDecoderForDot11Type int
+
+func (a *errorDecoderForDot11Type) Decode(data []byte, p gopacket.PacketBuilder) error {
+ return a
+}
+func (a *errorDecoderForDot11Type) Error() string {
+ return fmt.Sprintf("Unable to decode Dot11Type %d", int(*a))
+}
+
+var errorDecodersForDot11Type [256]errorDecoderForDot11Type
+var Dot11TypeMetadata [256]EnumMetadata
+
+func initUnknownTypesForDot11Type() {
+ for i := 0; i < 256; i++ {
+ errorDecodersForDot11Type[i] = errorDecoderForDot11Type(i)
+ Dot11TypeMetadata[i] = EnumMetadata{
+ DecodeWith: &errorDecodersForDot11Type[i],
+ Name: "UnknownDot11Type",
+ }
+ }
+}
+
+// Decoder calls USBTransportTypeMetadata.DecodeWith's decoder.
+func (a USBTransportType) Decode(data []byte, p gopacket.PacketBuilder) error {
+ return USBTransportTypeMetadata[a].DecodeWith.Decode(data, p)
+}
+
+// String returns USBTransportTypeMetadata.Name.
+func (a USBTransportType) String() string {
+ return USBTransportTypeMetadata[a].Name
+}
+
+// LayerType returns USBTransportTypeMetadata.LayerType.
+func (a USBTransportType) LayerType() gopacket.LayerType {
+ return USBTransportTypeMetadata[a].LayerType
+}
+
+type errorDecoderForUSBTransportType int
+
+func (a *errorDecoderForUSBTransportType) Decode(data []byte, p gopacket.PacketBuilder) error {
+ return a
+}
+func (a *errorDecoderForUSBTransportType) Error() string {
+ return fmt.Sprintf("Unable to decode USBTransportType %d", int(*a))
+}
+
+var errorDecodersForUSBTransportType [256]errorDecoderForUSBTransportType
+var USBTransportTypeMetadata [256]EnumMetadata
+
+func initUnknownTypesForUSBTransportType() {
+ for i := 0; i < 256; i++ {
+ errorDecodersForUSBTransportType[i] = errorDecoderForUSBTransportType(i)
+ USBTransportTypeMetadata[i] = EnumMetadata{
+ DecodeWith: &errorDecodersForUSBTransportType[i],
+ Name: "UnknownUSBTransportType",
+ }
+ }
+}
diff --git a/vendor/github.com/google/gopacket/layers/etherip.go b/vendor/github.com/google/gopacket/layers/etherip.go
new file mode 100644
index 0000000..5b7b722
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/etherip.go
@@ -0,0 +1,45 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+ "encoding/binary"
+ "github.com/google/gopacket"
+)
+
+// EtherIP is the struct for storing RFC 3378 EtherIP packet headers.
+type EtherIP struct {
+ BaseLayer
+ Version uint8
+ Reserved uint16
+}
+
+// LayerType returns gopacket.LayerTypeEtherIP.
+func (e *EtherIP) LayerType() gopacket.LayerType { return LayerTypeEtherIP }
+
+// DecodeFromBytes decodes the given bytes into this layer.
+func (e *EtherIP) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ e.Version = data[0] >> 4
+ e.Reserved = binary.BigEndian.Uint16(data[:2]) & 0x0fff
+ e.BaseLayer = BaseLayer{data[:2], data[2:]}
+ return nil
+}
+
+// CanDecode returns the set of layer types that this DecodingLayer can decode.
+func (e *EtherIP) CanDecode() gopacket.LayerClass {
+ return LayerTypeEtherIP
+}
+
+// NextLayerType returns the layer type contained by this DecodingLayer.
+func (e *EtherIP) NextLayerType() gopacket.LayerType {
+ return LayerTypeEthernet
+}
+
+func decodeEtherIP(data []byte, p gopacket.PacketBuilder) error {
+ e := &EtherIP{}
+ return decodingLayerDecoder(e, data, p)
+}
diff --git a/vendor/github.com/google/gopacket/layers/ethernet.go b/vendor/github.com/google/gopacket/layers/ethernet.go
new file mode 100644
index 0000000..b73748f
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/ethernet.go
@@ -0,0 +1,123 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+// Copyright 2009-2011 Andreas Krennmair. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "github.com/google/gopacket"
+ "net"
+)
+
+// EthernetBroadcast is the broadcast MAC address used by Ethernet.
+var EthernetBroadcast = net.HardwareAddr{0xff, 0xff, 0xff, 0xff, 0xff, 0xff}
+
+// Ethernet is the layer for Ethernet frame headers.
+type Ethernet struct {
+ BaseLayer
+ SrcMAC, DstMAC net.HardwareAddr
+ EthernetType EthernetType
+ // Length is only set if a length field exists within this header. Ethernet
+ // headers follow two different standards, one that uses an EthernetType, the
+ // other which defines a length the follows with a LLC header (802.3). If the
+ // former is the case, we set EthernetType and Length stays 0. In the latter
+ // case, we set Length and EthernetType = EthernetTypeLLC.
+ Length uint16
+}
+
+// LayerType returns LayerTypeEthernet
+func (e *Ethernet) LayerType() gopacket.LayerType { return LayerTypeEthernet }
+
+func (e *Ethernet) LinkFlow() gopacket.Flow {
+ return gopacket.NewFlow(EndpointMAC, e.SrcMAC, e.DstMAC)
+}
+
+func (eth *Ethernet) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ if len(data) < 14 {
+ return errors.New("Ethernet packet too small")
+ }
+ eth.DstMAC = net.HardwareAddr(data[0:6])
+ eth.SrcMAC = net.HardwareAddr(data[6:12])
+ eth.EthernetType = EthernetType(binary.BigEndian.Uint16(data[12:14]))
+ eth.BaseLayer = BaseLayer{data[:14], data[14:]}
+ eth.Length = 0
+ if eth.EthernetType < 0x0600 {
+ eth.Length = uint16(eth.EthernetType)
+ eth.EthernetType = EthernetTypeLLC
+ if cmp := len(eth.Payload) - int(eth.Length); cmp < 0 {
+ df.SetTruncated()
+ } else if cmp > 0 {
+ // Strip off bytes at the end, since we have too many bytes
+ eth.Payload = eth.Payload[:len(eth.Payload)-cmp]
+ }
+ // fmt.Println(eth)
+ }
+ return nil
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (eth *Ethernet) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ if len(eth.DstMAC) != 6 {
+ return fmt.Errorf("invalid dst MAC: %v", eth.DstMAC)
+ }
+ if len(eth.SrcMAC) != 6 {
+ return fmt.Errorf("invalid src MAC: %v", eth.SrcMAC)
+ }
+ payload := b.Bytes()
+ bytes, err := b.PrependBytes(14)
+ if err != nil {
+ return err
+ }
+ copy(bytes, eth.DstMAC)
+ copy(bytes[6:], eth.SrcMAC)
+ if eth.Length != 0 || eth.EthernetType == EthernetTypeLLC {
+ if opts.FixLengths {
+ eth.Length = uint16(len(payload))
+ }
+ if eth.EthernetType != EthernetTypeLLC {
+ return fmt.Errorf("ethernet type %v not compatible with length value %v", eth.EthernetType, eth.Length)
+ } else if eth.Length > 0x0600 {
+ return fmt.Errorf("invalid ethernet length %v", eth.Length)
+ }
+ binary.BigEndian.PutUint16(bytes[12:], eth.Length)
+ } else {
+ binary.BigEndian.PutUint16(bytes[12:], uint16(eth.EthernetType))
+ }
+ length := len(b.Bytes())
+ if length < 60 {
+ // Pad out to 60 bytes.
+ padding, err := b.AppendBytes(60 - length)
+ if err != nil {
+ return err
+ }
+ copy(padding, lotsOfZeros[:])
+ }
+ return nil
+}
+
+func (eth *Ethernet) CanDecode() gopacket.LayerClass {
+ return LayerTypeEthernet
+}
+
+func (eth *Ethernet) NextLayerType() gopacket.LayerType {
+ return eth.EthernetType.LayerType()
+}
+
+func decodeEthernet(data []byte, p gopacket.PacketBuilder) error {
+ eth := &Ethernet{}
+ err := eth.DecodeFromBytes(data, p)
+ if err != nil {
+ return err
+ }
+ p.AddLayer(eth)
+ p.SetLinkLayer(eth)
+ return p.NextDecoder(eth.EthernetType)
+}
diff --git a/vendor/github.com/google/gopacket/layers/fddi.go b/vendor/github.com/google/gopacket/layers/fddi.go
new file mode 100644
index 0000000..ed9e195
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/fddi.go
@@ -0,0 +1,41 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+ "github.com/google/gopacket"
+ "net"
+)
+
+// FDDI contains the header for FDDI frames.
+type FDDI struct {
+ BaseLayer
+ FrameControl FDDIFrameControl
+ Priority uint8
+ SrcMAC, DstMAC net.HardwareAddr
+}
+
+// LayerType returns LayerTypeFDDI.
+func (f *FDDI) LayerType() gopacket.LayerType { return LayerTypeFDDI }
+
+// LinkFlow returns a new flow of type EndpointMAC.
+func (f *FDDI) LinkFlow() gopacket.Flow {
+ return gopacket.NewFlow(EndpointMAC, f.SrcMAC, f.DstMAC)
+}
+
+func decodeFDDI(data []byte, p gopacket.PacketBuilder) error {
+ f := &FDDI{
+ FrameControl: FDDIFrameControl(data[0] & 0xF8),
+ Priority: data[0] & 0x07,
+ SrcMAC: net.HardwareAddr(data[1:7]),
+ DstMAC: net.HardwareAddr(data[7:13]),
+ BaseLayer: BaseLayer{data[:13], data[13:]},
+ }
+ p.SetLinkLayer(f)
+ p.AddLayer(f)
+ return p.NextDecoder(f.FrameControl)
+}
diff --git a/vendor/github.com/google/gopacket/layers/gen.go b/vendor/github.com/google/gopacket/layers/gen.go
new file mode 100644
index 0000000..ab7a0c0
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/gen.go
@@ -0,0 +1,109 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+// +build ignore
+
+// This binary pulls known ports from IANA, and uses them to populate
+// iana_ports.go's TCPPortNames and UDPPortNames maps.
+//
+// go run gen.go | gofmt > iana_ports.go
+package main
+
+import (
+ "bytes"
+ "encoding/xml"
+ "flag"
+ "fmt"
+ "io/ioutil"
+ "net/http"
+ "os"
+ "strconv"
+ "time"
+)
+
+const fmtString = `// Copyright 2012 Google, Inc. All rights reserved.
+
+package layers
+
+// Created by gen.go, don't edit manually
+// Generated at %s
+// Fetched from %q
+
+// TCPPortNames contains the port names for all TCP ports.
+var TCPPortNames = tcpPortNames
+
+// UDPPortNames contains the port names for all UDP ports.
+var UDPPortNames = udpPortNames
+
+// SCTPPortNames contains the port names for all SCTP ports.
+var SCTPPortNames = sctpPortNames
+
+var tcpPortNames = map[TCPPort]string{
+%s}
+var udpPortNames = map[UDPPort]string{
+%s}
+var sctpPortNames = map[SCTPPort]string{
+%s}
+`
+
+var url = flag.String("url", "http://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.xml", "URL to grab port numbers from")
+
+func main() {
+ fmt.Fprintf(os.Stderr, "Fetching ports from %q\n", *url)
+ resp, err := http.Get(*url)
+ if err != nil {
+ panic(err)
+ }
+ defer resp.Body.Close()
+ body, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ panic(err)
+ }
+ fmt.Fprintln(os.Stderr, "Parsing XML")
+ var registry struct {
+ Records []struct {
+ Protocol string `xml:"protocol"`
+ Number string `xml:"number"`
+ Name string `xml:"name"`
+ } `xml:"record"`
+ }
+ xml.Unmarshal(body, ®istry)
+ var tcpPorts bytes.Buffer
+ var udpPorts bytes.Buffer
+ var sctpPorts bytes.Buffer
+ done := map[string]map[int]bool{
+ "tcp": map[int]bool{},
+ "udp": map[int]bool{},
+ "sctp": map[int]bool{},
+ }
+ for _, r := range registry.Records {
+ port, err := strconv.Atoi(r.Number)
+ if err != nil {
+ continue
+ }
+ if r.Name == "" {
+ continue
+ }
+ var b *bytes.Buffer
+ switch r.Protocol {
+ case "tcp":
+ b = &tcpPorts
+ case "udp":
+ b = &udpPorts
+ case "sctp":
+ b = &sctpPorts
+ default:
+ continue
+ }
+ if done[r.Protocol][port] {
+ continue
+ }
+ done[r.Protocol][port] = true
+ fmt.Fprintf(b, "\t%d: %q,\n", port, r.Name)
+ }
+ fmt.Fprintln(os.Stderr, "Writing results to stdout")
+ fmt.Printf(fmtString, time.Now(), *url, tcpPorts.String(), udpPorts.String(), sctpPorts.String())
+}
diff --git a/vendor/github.com/google/gopacket/layers/gen2.go b/vendor/github.com/google/gopacket/layers/gen2.go
new file mode 100644
index 0000000..150cad7
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/gen2.go
@@ -0,0 +1,104 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+// +build ignore
+
+// This binary handles creating string constants and function templates for enums.
+//
+// go run gen.go | gofmt > enums_generated.go
+package main
+
+import (
+ "fmt"
+ "log"
+ "os"
+ "text/template"
+ "time"
+)
+
+const fmtString = `// Copyright 2012 Google, Inc. All rights reserved.
+
+package layers
+
+// Created by gen2.go, don't edit manually
+// Generated at %s
+
+import (
+ "fmt"
+
+ "github.com/google/gopacket"
+)
+
+`
+
+var funcsTmpl = template.Must(template.New("foo").Parse(`
+// Decoder calls {{.Name}}Metadata.DecodeWith's decoder.
+func (a {{.Name}}) Decode(data []byte, p gopacket.PacketBuilder) error {
+ return {{.Name}}Metadata[a].DecodeWith.Decode(data, p)
+}
+// String returns {{.Name}}Metadata.Name.
+func (a {{.Name}}) String() string {
+ return {{.Name}}Metadata[a].Name
+}
+// LayerType returns {{.Name}}Metadata.LayerType.
+func (a {{.Name}}) LayerType() gopacket.LayerType {
+ return {{.Name}}Metadata[a].LayerType
+}
+
+type errorDecoderFor{{.Name}} int
+func (a *errorDecoderFor{{.Name}}) Decode(data []byte, p gopacket.PacketBuilder) error {
+ return a
+}
+func (a *errorDecoderFor{{.Name}}) Error() string {
+ return fmt.Sprintf("Unable to decode {{.Name}} %d", int(*a))
+}
+
+var errorDecodersFor{{.Name}} [{{.Num}}]errorDecoderFor{{.Name}}
+var {{.Name}}Metadata [{{.Num}}]EnumMetadata
+
+func initUnknownTypesFor{{.Name}}() {
+ for i := 0; i < {{.Num}}; i++ {
+ errorDecodersFor{{.Name}}[i] = errorDecoderFor{{.Name}}(i)
+ {{.Name}}Metadata[i] = EnumMetadata{
+ DecodeWith: &errorDecodersFor{{.Name}}[i],
+ Name: "Unknown{{.Name}}",
+ }
+ }
+}
+`))
+
+func main() {
+ fmt.Fprintf(os.Stderr, "Writing results to stdout\n")
+ fmt.Printf(fmtString, time.Now())
+ types := []struct {
+ Name string
+ Num int
+ }{
+ {"LinkType", 256},
+ {"EthernetType", 65536},
+ {"PPPType", 65536},
+ {"IPProtocol", 256},
+ {"SCTPChunkType", 256},
+ {"PPPoECode", 256},
+ {"FDDIFrameControl", 256},
+ {"EAPOLType", 256},
+ {"ProtocolFamily", 256},
+ {"Dot11Type", 256},
+ {"USBTransportType", 256},
+ }
+
+ fmt.Println("func init() {")
+ for _, t := range types {
+ fmt.Printf("initUnknownTypesFor%s()\n", t.Name)
+ }
+ fmt.Println("initActualTypeData()")
+ fmt.Println("}")
+ for _, t := range types {
+ if err := funcsTmpl.Execute(os.Stdout, t); err != nil {
+ log.Fatalf("Failed to execute template %s: %v", t.Name, err)
+ }
+ }
+}
diff --git a/vendor/github.com/google/gopacket/layers/gen_linted.sh b/vendor/github.com/google/gopacket/layers/gen_linted.sh
new file mode 100644
index 0000000..75c701f
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/gen_linted.sh
@@ -0,0 +1,3 @@
+#!/bin/bash
+
+for i in *.go; do golint $i | grep -q . || echo $i; done > .linted
diff --git a/vendor/github.com/google/gopacket/layers/geneve.go b/vendor/github.com/google/gopacket/layers/geneve.go
new file mode 100644
index 0000000..72fe7c7
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/geneve.go
@@ -0,0 +1,110 @@
+// Copyright 2016 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+ "encoding/binary"
+ "errors"
+
+ "github.com/google/gopacket"
+)
+
+// Geneve is specifed here https://tools.ietf.org/html/draft-ietf-nvo3-geneve-03
+// Geneve Header:
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// |Ver| Opt Len |O|C| Rsvd. | Protocol Type |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | Virtual Network Identifier (VNI) | Reserved |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | Variable Length Options |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+type Geneve struct {
+ BaseLayer
+ Version uint8 // 2 bits
+ OptionsLength uint8 // 6 bits
+ OAMPacket bool // 1 bits
+ CriticalOption bool // 1 bits
+ Protocol EthernetType // 16 bits
+ VNI uint32 // 24bits
+ Options []*GeneveOption
+}
+
+// Geneve Tunnel Options
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | Option Class | Type |R|R|R| Length |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | Variable Option Data |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+type GeneveOption struct {
+ Class uint16 // 16 bits
+ Type uint8 // 8 bits
+ Flags uint8 // 3 bits
+ Length uint8 // 5 bits
+ Data []byte
+}
+
+// LayerType returns LayerTypeGeneve
+func (gn *Geneve) LayerType() gopacket.LayerType { return LayerTypeGeneve }
+
+func decodeGeneveOption(data []byte, gn *Geneve) (*GeneveOption, uint8) {
+ opt := &GeneveOption{}
+
+ opt.Class = binary.BigEndian.Uint16(data[0:2])
+ opt.Type = data[2]
+ opt.Flags = data[3] >> 4
+ opt.Length = (data[3]&0xf)*4 + 4
+
+ opt.Data = make([]byte, opt.Length-4)
+ copy(opt.Data, data[4:opt.Length])
+
+ return opt, opt.Length
+}
+
+func (gn *Geneve) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ if len(data) < 7 {
+ df.SetTruncated()
+ return errors.New("geneve packet too short")
+ }
+
+ gn.Version = data[0] >> 7
+ gn.OptionsLength = (data[0] & 0x3f) * 4
+
+ gn.OAMPacket = data[1]&0x80 > 0
+ gn.CriticalOption = data[1]&0x40 > 0
+ gn.Protocol = EthernetType(binary.BigEndian.Uint16(data[2:4]))
+
+ var buf [4]byte
+ copy(buf[1:], data[4:7])
+ gn.VNI = binary.BigEndian.Uint32(buf[:])
+
+ offset, length := uint8(8), int32(gn.OptionsLength)
+ if len(data) < int(length+7) {
+ df.SetTruncated()
+ return errors.New("geneve packet too short")
+ }
+
+ for length > 0 {
+ opt, len := decodeGeneveOption(data[offset:], gn)
+ gn.Options = append(gn.Options, opt)
+
+ length -= int32(len)
+ offset += len
+ }
+
+ gn.BaseLayer = BaseLayer{data[:offset], data[offset:]}
+
+ return nil
+}
+
+func (gn *Geneve) NextLayerType() gopacket.LayerType {
+ return gn.Protocol.LayerType()
+}
+
+func decodeGeneve(data []byte, p gopacket.PacketBuilder) error {
+ gn := &Geneve{}
+ return decodingLayerDecoder(gn, data, p)
+}
diff --git a/vendor/github.com/google/gopacket/layers/gre.go b/vendor/github.com/google/gopacket/layers/gre.go
new file mode 100644
index 0000000..9c5e7d2
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/gre.go
@@ -0,0 +1,200 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+ "encoding/binary"
+
+ "github.com/google/gopacket"
+)
+
+// GRE is a Generic Routing Encapsulation header.
+type GRE struct {
+ BaseLayer
+ ChecksumPresent, RoutingPresent, KeyPresent, SeqPresent, StrictSourceRoute, AckPresent bool
+ RecursionControl, Flags, Version uint8
+ Protocol EthernetType
+ Checksum, Offset uint16
+ Key, Seq, Ack uint32
+ *GRERouting
+}
+
+// GRERouting is GRE routing information, present if the RoutingPresent flag is
+// set.
+type GRERouting struct {
+ AddressFamily uint16
+ SREOffset, SRELength uint8
+ RoutingInformation []byte
+ Next *GRERouting
+}
+
+// LayerType returns gopacket.LayerTypeGRE.
+func (g *GRE) LayerType() gopacket.LayerType { return LayerTypeGRE }
+
+// DecodeFromBytes decodes the given bytes into this layer.
+func (g *GRE) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ g.ChecksumPresent = data[0]&0x80 != 0
+ g.RoutingPresent = data[0]&0x40 != 0
+ g.KeyPresent = data[0]&0x20 != 0
+ g.SeqPresent = data[0]&0x10 != 0
+ g.StrictSourceRoute = data[0]&0x08 != 0
+ g.AckPresent = data[1]&0x80 != 0
+ g.RecursionControl = data[0] & 0x7
+ g.Flags = data[1] >> 3
+ g.Version = data[1] & 0x7
+ g.Protocol = EthernetType(binary.BigEndian.Uint16(data[2:4]))
+ offset := 4
+ if g.ChecksumPresent || g.RoutingPresent {
+ g.Checksum = binary.BigEndian.Uint16(data[offset : offset+2])
+ g.Offset = binary.BigEndian.Uint16(data[offset+2 : offset+4])
+ offset += 4
+ }
+ if g.KeyPresent {
+ g.Key = binary.BigEndian.Uint32(data[offset : offset+4])
+ offset += 4
+ }
+ if g.SeqPresent {
+ g.Seq = binary.BigEndian.Uint32(data[offset : offset+4])
+ offset += 4
+ }
+ if g.RoutingPresent {
+ tail := &g.GRERouting
+ for {
+ sre := &GRERouting{
+ AddressFamily: binary.BigEndian.Uint16(data[offset : offset+2]),
+ SREOffset: data[offset+2],
+ SRELength: data[offset+3],
+ }
+ sre.RoutingInformation = data[offset+4 : offset+4+int(sre.SRELength)]
+ offset += 4 + int(sre.SRELength)
+ if sre.AddressFamily == 0 && sre.SRELength == 0 {
+ break
+ }
+ (*tail) = sre
+ tail = &sre.Next
+ }
+ }
+ if g.AckPresent {
+ g.Ack = binary.BigEndian.Uint32(data[offset : offset+4])
+ offset += 4
+ }
+ g.BaseLayer = BaseLayer{data[:offset], data[offset:]}
+ return nil
+}
+
+// SerializeTo writes the serialized form of this layer into the SerializationBuffer,
+// implementing gopacket.SerializableLayer. See the docs for gopacket.SerializableLayer for more info.
+func (g *GRE) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ size := 4
+ if g.ChecksumPresent || g.RoutingPresent {
+ size += 4
+ }
+ if g.KeyPresent {
+ size += 4
+ }
+ if g.SeqPresent {
+ size += 4
+ }
+ if g.RoutingPresent {
+ r := g.GRERouting
+ for r != nil {
+ size += 4 + int(r.SRELength)
+ r = r.Next
+ }
+ size += 4
+ }
+ if g.AckPresent {
+ size += 4
+ }
+ buf, err := b.PrependBytes(size)
+ if err != nil {
+ return err
+ }
+ // Reset any potentially dirty memory in the first 2 bytes, as these use OR to set flags.
+ buf[0] = 0
+ buf[1] = 0
+ if g.ChecksumPresent {
+ buf[0] |= 0x80
+ }
+ if g.RoutingPresent {
+ buf[0] |= 0x40
+ }
+ if g.KeyPresent {
+ buf[0] |= 0x20
+ }
+ if g.SeqPresent {
+ buf[0] |= 0x10
+ }
+ if g.StrictSourceRoute {
+ buf[0] |= 0x08
+ }
+ if g.AckPresent {
+ buf[1] |= 0x80
+ }
+ buf[0] |= g.RecursionControl
+ buf[1] |= g.Flags << 3
+ buf[1] |= g.Version
+ binary.BigEndian.PutUint16(buf[2:4], uint16(g.Protocol))
+ offset := 4
+ if g.ChecksumPresent || g.RoutingPresent {
+ // Don't write the checksum value yet, as we may need to compute it,
+ // which requires the entire header be complete.
+ // Instead we zeroize the memory in case it is dirty.
+ buf[offset] = 0
+ buf[offset+1] = 0
+ binary.BigEndian.PutUint16(buf[offset+2:offset+4], g.Offset)
+ offset += 4
+ }
+ if g.KeyPresent {
+ binary.BigEndian.PutUint32(buf[offset:offset+4], g.Key)
+ offset += 4
+ }
+ if g.SeqPresent {
+ binary.BigEndian.PutUint32(buf[offset:offset+4], g.Seq)
+ offset += 4
+ }
+ if g.RoutingPresent {
+ sre := g.GRERouting
+ for sre != nil {
+ binary.BigEndian.PutUint16(buf[offset:offset+2], sre.AddressFamily)
+ buf[offset+2] = sre.SREOffset
+ buf[offset+3] = sre.SRELength
+ copy(buf[offset+4:offset+4+int(sre.SRELength)], sre.RoutingInformation)
+ offset += 4 + int(sre.SRELength)
+ sre = sre.Next
+ }
+ // Terminate routing field with a "NULL" SRE.
+ binary.BigEndian.PutUint32(buf[offset:offset+4], 0)
+ }
+ if g.AckPresent {
+ binary.BigEndian.PutUint32(buf[offset:offset+4], g.Ack)
+ offset += 4
+ }
+ if g.ChecksumPresent {
+ if opts.ComputeChecksums {
+ g.Checksum = tcpipChecksum(b.Bytes(), 0)
+ }
+
+ binary.BigEndian.PutUint16(buf[4:6], g.Checksum)
+ }
+ return nil
+}
+
+// CanDecode returns the set of layer types that this DecodingLayer can decode.
+func (g *GRE) CanDecode() gopacket.LayerClass {
+ return LayerTypeGRE
+}
+
+// NextLayerType returns the layer type contained by this DecodingLayer.
+func (g *GRE) NextLayerType() gopacket.LayerType {
+ return g.Protocol.LayerType()
+}
+
+func decodeGRE(data []byte, p gopacket.PacketBuilder) error {
+ g := &GRE{}
+ return decodingLayerDecoder(g, data, p)
+}
diff --git a/vendor/github.com/google/gopacket/layers/gtp.go b/vendor/github.com/google/gopacket/layers/gtp.go
new file mode 100644
index 0000000..0ec8a6a
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/gtp.go
@@ -0,0 +1,181 @@
+// Copyright 2017 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+//
+
+package layers
+
+import (
+ "encoding/binary"
+ "fmt"
+ "github.com/google/gopacket"
+)
+
+const gtpMinimumSizeInBytes int = 8
+
+// GTPExtensionHeader is used to carry extra data and enable future extensions of the GTP without the need to use another version number.
+type GTPExtensionHeader struct {
+ Type uint8
+ Content []byte
+}
+
+// GTPv1U protocol is used to exchange user data over GTP tunnels across the Sx interfaces.
+// Defined in https://portal.3gpp.org/desktopmodules/Specifications/SpecificationDetails.aspx?specificationId=1595
+type GTPv1U struct {
+ BaseLayer
+ Version uint8
+ ProtocolType uint8
+ Reserved uint8
+ ExtensionHeaderFlag bool
+ SequenceNumberFlag bool
+ NPDUFlag bool
+ MessageType uint8
+ MessageLength uint16
+ TEID uint32
+ SequenceNumber uint16
+ NPDU uint8
+ GTPExtensionHeaders []GTPExtensionHeader
+}
+
+// LayerType returns LayerTypeGTPV1U
+func (g *GTPv1U) LayerType() gopacket.LayerType { return LayerTypeGTPv1U }
+
+// DecodeFromBytes analyses a byte slice and attempts to decode it as a GTPv1U packet
+func (g *GTPv1U) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ hLen := gtpMinimumSizeInBytes
+ dLen := len(data)
+ if dLen < hLen {
+ return fmt.Errorf("GTP packet too small: %d bytes", dLen)
+ }
+ g.Version = (data[0] >> 5) & 0x07
+ g.ProtocolType = (data[0] >> 4) & 0x01
+ g.Reserved = (data[0] >> 3) & 0x01
+ g.SequenceNumberFlag = ((data[0] >> 1) & 0x01) == 1
+ g.NPDUFlag = (data[0] & 0x01) == 1
+ g.ExtensionHeaderFlag = ((data[0] >> 2) & 0x01) == 1
+ g.MessageType = data[1]
+ g.MessageLength = binary.BigEndian.Uint16(data[2:4])
+ pLen := 8 + g.MessageLength
+ if uint16(dLen) < pLen {
+ return fmt.Errorf("GTP packet too small: %d bytes", dLen)
+ }
+ // Field used to multiplex different connections in the same GTP tunnel.
+ g.TEID = binary.BigEndian.Uint32(data[4:8])
+ cIndex := uint16(hLen)
+ if g.SequenceNumberFlag || g.NPDUFlag || g.ExtensionHeaderFlag {
+ hLen += 4
+ cIndex += 4
+ if dLen < hLen {
+ return fmt.Errorf("GTP packet too small: %d bytes", dLen)
+ }
+ if g.SequenceNumberFlag {
+ g.SequenceNumber = binary.BigEndian.Uint16(data[8:10])
+ }
+ if g.NPDUFlag {
+ g.NPDU = data[10]
+ }
+ if g.ExtensionHeaderFlag {
+ extensionFlag := true
+ for extensionFlag {
+ extensionType := uint8(data[cIndex-1])
+ extensionLength := uint(data[cIndex])
+ if extensionLength == 0 {
+ return fmt.Errorf("GTP packet with invalid extension header")
+ }
+ // extensionLength is in 4-octet units
+ lIndex := cIndex + (uint16(extensionLength) * 4)
+ if uint16(dLen) < lIndex {
+ fmt.Println(dLen, lIndex)
+ return fmt.Errorf("GTP packet with small extension header: %d bytes", dLen)
+ }
+ content := data[cIndex+1 : lIndex-1]
+ eh := GTPExtensionHeader{Type: extensionType, Content: content}
+ g.GTPExtensionHeaders = append(g.GTPExtensionHeaders, eh)
+ cIndex = lIndex
+ // Check if coming bytes are from an extension header
+ extensionFlag = data[cIndex-1] != 0
+
+ }
+ }
+ }
+ g.BaseLayer = BaseLayer{Contents: data[:cIndex], Payload: data[cIndex:]}
+ return nil
+
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (g *GTPv1U) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ data, err := b.PrependBytes(gtpMinimumSizeInBytes)
+ if err != nil {
+ return err
+ }
+ data[0] |= (g.Version << 5)
+ data[0] |= (1 << 4)
+ if len(g.GTPExtensionHeaders) > 0 {
+ data[0] |= 0x04
+ g.ExtensionHeaderFlag = true
+ }
+ if g.SequenceNumberFlag {
+ data[0] |= 0x02
+ }
+ if g.NPDUFlag {
+ data[0] |= 0x01
+ }
+ data[1] = g.MessageType
+ binary.BigEndian.PutUint16(data[2:4], g.MessageLength)
+ binary.BigEndian.PutUint32(data[4:8], g.TEID)
+ if g.ExtensionHeaderFlag || g.SequenceNumberFlag || g.NPDUFlag {
+ data, err := b.AppendBytes(4)
+ if err != nil {
+ return err
+ }
+ binary.BigEndian.PutUint16(data[:2], g.SequenceNumber)
+ data[2] = g.NPDU
+ for _, eh := range g.GTPExtensionHeaders {
+ data[len(data)-1] = eh.Type
+ lContent := len(eh.Content)
+ // extensionLength is in 4-octet units
+ extensionLength := (lContent + 2) / 4
+ // Get two extra byte for the next extension header type and length
+ data, err = b.AppendBytes(lContent + 2)
+ if err != nil {
+ return err
+ }
+ data[0] = byte(extensionLength)
+ copy(data[1:lContent+1], eh.Content)
+ }
+ }
+ return nil
+
+}
+
+// CanDecode returns a set of layers that GTP objects can decode.
+func (g *GTPv1U) CanDecode() gopacket.LayerClass {
+ return LayerTypeGTPv1U
+}
+
+// NextLayerType specifies the next layer that GoPacket should attempt to
+func (g *GTPv1U) NextLayerType() gopacket.LayerType {
+ version := uint8(g.LayerPayload()[0]) >> 4
+ if version == 4 {
+ return LayerTypeIPv4
+ } else if version == 6 {
+ return LayerTypeIPv6
+ } else {
+ return LayerTypePPP
+ }
+}
+
+func decodeGTPv1u(data []byte, p gopacket.PacketBuilder) error {
+ gtp := >Pv1U{}
+ err := gtp.DecodeFromBytes(data, p)
+ if err != nil {
+ return err
+ }
+ p.AddLayer(gtp)
+ return p.NextDecoder(gtp.NextLayerType())
+}
diff --git a/vendor/github.com/google/gopacket/layers/iana_ports.go b/vendor/github.com/google/gopacket/layers/iana_ports.go
new file mode 100644
index 0000000..ddcf3ec
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/iana_ports.go
@@ -0,0 +1,11351 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+
+package layers
+
+// Created by gen.go, don't edit manually
+// Generated at 2017-10-23 09:57:28.214859163 -0600 MDT m=+1.011679290
+// Fetched from "http://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.xml"
+
+// TCPPortNames contains the port names for all TCP ports.
+var TCPPortNames = tcpPortNames
+
+// UDPPortNames contains the port names for all UDP ports.
+var UDPPortNames = udpPortNames
+
+// SCTPPortNames contains the port names for all SCTP ports.
+var SCTPPortNames = sctpPortNames
+
+var tcpPortNames = map[TCPPort]string{
+ 1: "tcpmux",
+ 2: "compressnet",
+ 3: "compressnet",
+ 5: "rje",
+ 7: "echo",
+ 9: "discard",
+ 11: "systat",
+ 13: "daytime",
+ 17: "qotd",
+ 18: "msp",
+ 19: "chargen",
+ 20: "ftp-data",
+ 21: "ftp",
+ 22: "ssh",
+ 23: "telnet",
+ 25: "smtp",
+ 27: "nsw-fe",
+ 29: "msg-icp",
+ 31: "msg-auth",
+ 33: "dsp",
+ 37: "time",
+ 38: "rap",
+ 39: "rlp",
+ 41: "graphics",
+ 42: "name",
+ 43: "nicname",
+ 44: "mpm-flags",
+ 45: "mpm",
+ 46: "mpm-snd",
+ 48: "auditd",
+ 49: "tacacs",
+ 50: "re-mail-ck",
+ 52: "xns-time",
+ 53: "domain",
+ 54: "xns-ch",
+ 55: "isi-gl",
+ 56: "xns-auth",
+ 58: "xns-mail",
+ 62: "acas",
+ 63: "whoispp",
+ 64: "covia",
+ 65: "tacacs-ds",
+ 66: "sql-net",
+ 67: "bootps",
+ 68: "bootpc",
+ 69: "tftp",
+ 70: "gopher",
+ 71: "netrjs-1",
+ 72: "netrjs-2",
+ 73: "netrjs-3",
+ 74: "netrjs-4",
+ 76: "deos",
+ 78: "vettcp",
+ 79: "finger",
+ 80: "http",
+ 82: "xfer",
+ 83: "mit-ml-dev",
+ 84: "ctf",
+ 85: "mit-ml-dev",
+ 86: "mfcobol",
+ 88: "kerberos",
+ 89: "su-mit-tg",
+ 90: "dnsix",
+ 91: "mit-dov",
+ 92: "npp",
+ 93: "dcp",
+ 94: "objcall",
+ 95: "supdup",
+ 96: "dixie",
+ 97: "swift-rvf",
+ 98: "tacnews",
+ 99: "metagram",
+ 101: "hostname",
+ 102: "iso-tsap",
+ 103: "gppitnp",
+ 104: "acr-nema",
+ 105: "cso",
+ 106: "3com-tsmux",
+ 107: "rtelnet",
+ 108: "snagas",
+ 109: "pop2",
+ 110: "pop3",
+ 111: "sunrpc",
+ 112: "mcidas",
+ 113: "ident",
+ 115: "sftp",
+ 116: "ansanotify",
+ 117: "uucp-path",
+ 118: "sqlserv",
+ 119: "nntp",
+ 120: "cfdptkt",
+ 121: "erpc",
+ 122: "smakynet",
+ 123: "ntp",
+ 124: "ansatrader",
+ 125: "locus-map",
+ 126: "nxedit",
+ 127: "locus-con",
+ 128: "gss-xlicen",
+ 129: "pwdgen",
+ 130: "cisco-fna",
+ 131: "cisco-tna",
+ 132: "cisco-sys",
+ 133: "statsrv",
+ 134: "ingres-net",
+ 135: "epmap",
+ 136: "profile",
+ 137: "netbios-ns",
+ 138: "netbios-dgm",
+ 139: "netbios-ssn",
+ 140: "emfis-data",
+ 141: "emfis-cntl",
+ 142: "bl-idm",
+ 143: "imap",
+ 144: "uma",
+ 145: "uaac",
+ 146: "iso-tp0",
+ 147: "iso-ip",
+ 148: "jargon",
+ 149: "aed-512",
+ 150: "sql-net",
+ 151: "hems",
+ 152: "bftp",
+ 153: "sgmp",
+ 154: "netsc-prod",
+ 155: "netsc-dev",
+ 156: "sqlsrv",
+ 157: "knet-cmp",
+ 158: "pcmail-srv",
+ 159: "nss-routing",
+ 160: "sgmp-traps",
+ 161: "snmp",
+ 162: "snmptrap",
+ 163: "cmip-man",
+ 164: "cmip-agent",
+ 165: "xns-courier",
+ 166: "s-net",
+ 167: "namp",
+ 168: "rsvd",
+ 169: "send",
+ 170: "print-srv",
+ 171: "multiplex",
+ 172: "cl-1",
+ 173: "xyplex-mux",
+ 174: "mailq",
+ 175: "vmnet",
+ 176: "genrad-mux",
+ 177: "xdmcp",
+ 178: "nextstep",
+ 179: "bgp",
+ 180: "ris",
+ 181: "unify",
+ 182: "audit",
+ 183: "ocbinder",
+ 184: "ocserver",
+ 185: "remote-kis",
+ 186: "kis",
+ 187: "aci",
+ 188: "mumps",
+ 189: "qft",
+ 190: "gacp",
+ 191: "prospero",
+ 192: "osu-nms",
+ 193: "srmp",
+ 194: "irc",
+ 195: "dn6-nlm-aud",
+ 196: "dn6-smm-red",
+ 197: "dls",
+ 198: "dls-mon",
+ 199: "smux",
+ 200: "src",
+ 201: "at-rtmp",
+ 202: "at-nbp",
+ 203: "at-3",
+ 204: "at-echo",
+ 205: "at-5",
+ 206: "at-zis",
+ 207: "at-7",
+ 208: "at-8",
+ 209: "qmtp",
+ 210: "z39-50",
+ 211: "914c-g",
+ 212: "anet",
+ 213: "ipx",
+ 214: "vmpwscs",
+ 215: "softpc",
+ 216: "CAIlic",
+ 217: "dbase",
+ 218: "mpp",
+ 219: "uarps",
+ 220: "imap3",
+ 221: "fln-spx",
+ 222: "rsh-spx",
+ 223: "cdc",
+ 224: "masqdialer",
+ 242: "direct",
+ 243: "sur-meas",
+ 244: "inbusiness",
+ 245: "link",
+ 246: "dsp3270",
+ 247: "subntbcst-tftp",
+ 248: "bhfhs",
+ 256: "rap",
+ 257: "set",
+ 259: "esro-gen",
+ 260: "openport",
+ 261: "nsiiops",
+ 262: "arcisdms",
+ 263: "hdap",
+ 264: "bgmp",
+ 265: "x-bone-ctl",
+ 266: "sst",
+ 267: "td-service",
+ 268: "td-replica",
+ 269: "manet",
+ 271: "pt-tls",
+ 280: "http-mgmt",
+ 281: "personal-link",
+ 282: "cableport-ax",
+ 283: "rescap",
+ 284: "corerjd",
+ 286: "fxp",
+ 287: "k-block",
+ 308: "novastorbakcup",
+ 309: "entrusttime",
+ 310: "bhmds",
+ 311: "asip-webadmin",
+ 312: "vslmp",
+ 313: "magenta-logic",
+ 314: "opalis-robot",
+ 315: "dpsi",
+ 316: "decauth",
+ 317: "zannet",
+ 318: "pkix-timestamp",
+ 319: "ptp-event",
+ 320: "ptp-general",
+ 321: "pip",
+ 322: "rtsps",
+ 323: "rpki-rtr",
+ 324: "rpki-rtr-tls",
+ 333: "texar",
+ 344: "pdap",
+ 345: "pawserv",
+ 346: "zserv",
+ 347: "fatserv",
+ 348: "csi-sgwp",
+ 349: "mftp",
+ 350: "matip-type-a",
+ 351: "matip-type-b",
+ 352: "dtag-ste-sb",
+ 353: "ndsauth",
+ 354: "bh611",
+ 355: "datex-asn",
+ 356: "cloanto-net-1",
+ 357: "bhevent",
+ 358: "shrinkwrap",
+ 359: "nsrmp",
+ 360: "scoi2odialog",
+ 361: "semantix",
+ 362: "srssend",
+ 363: "rsvp-tunnel",
+ 364: "aurora-cmgr",
+ 365: "dtk",
+ 366: "odmr",
+ 367: "mortgageware",
+ 368: "qbikgdp",
+ 369: "rpc2portmap",
+ 370: "codaauth2",
+ 371: "clearcase",
+ 372: "ulistproc",
+ 373: "legent-1",
+ 374: "legent-2",
+ 375: "hassle",
+ 376: "nip",
+ 377: "tnETOS",
+ 378: "dsETOS",
+ 379: "is99c",
+ 380: "is99s",
+ 381: "hp-collector",
+ 382: "hp-managed-node",
+ 383: "hp-alarm-mgr",
+ 384: "arns",
+ 385: "ibm-app",
+ 386: "asa",
+ 387: "aurp",
+ 388: "unidata-ldm",
+ 389: "ldap",
+ 390: "uis",
+ 391: "synotics-relay",
+ 392: "synotics-broker",
+ 393: "meta5",
+ 394: "embl-ndt",
+ 395: "netcp",
+ 396: "netware-ip",
+ 397: "mptn",
+ 398: "kryptolan",
+ 399: "iso-tsap-c2",
+ 400: "osb-sd",
+ 401: "ups",
+ 402: "genie",
+ 403: "decap",
+ 404: "nced",
+ 405: "ncld",
+ 406: "imsp",
+ 407: "timbuktu",
+ 408: "prm-sm",
+ 409: "prm-nm",
+ 410: "decladebug",
+ 411: "rmt",
+ 412: "synoptics-trap",
+ 413: "smsp",
+ 414: "infoseek",
+ 415: "bnet",
+ 416: "silverplatter",
+ 417: "onmux",
+ 418: "hyper-g",
+ 419: "ariel1",
+ 420: "smpte",
+ 421: "ariel2",
+ 422: "ariel3",
+ 423: "opc-job-start",
+ 424: "opc-job-track",
+ 425: "icad-el",
+ 426: "smartsdp",
+ 427: "svrloc",
+ 428: "ocs-cmu",
+ 429: "ocs-amu",
+ 430: "utmpsd",
+ 431: "utmpcd",
+ 432: "iasd",
+ 433: "nnsp",
+ 434: "mobileip-agent",
+ 435: "mobilip-mn",
+ 436: "dna-cml",
+ 437: "comscm",
+ 438: "dsfgw",
+ 439: "dasp",
+ 440: "sgcp",
+ 441: "decvms-sysmgt",
+ 442: "cvc-hostd",
+ 443: "https",
+ 444: "snpp",
+ 445: "microsoft-ds",
+ 446: "ddm-rdb",
+ 447: "ddm-dfm",
+ 448: "ddm-ssl",
+ 449: "as-servermap",
+ 450: "tserver",
+ 451: "sfs-smp-net",
+ 452: "sfs-config",
+ 453: "creativeserver",
+ 454: "contentserver",
+ 455: "creativepartnr",
+ 456: "macon-tcp",
+ 457: "scohelp",
+ 458: "appleqtc",
+ 459: "ampr-rcmd",
+ 460: "skronk",
+ 461: "datasurfsrv",
+ 462: "datasurfsrvsec",
+ 463: "alpes",
+ 464: "kpasswd",
+ 465: "urd",
+ 466: "digital-vrc",
+ 467: "mylex-mapd",
+ 468: "photuris",
+ 469: "rcp",
+ 470: "scx-proxy",
+ 471: "mondex",
+ 472: "ljk-login",
+ 473: "hybrid-pop",
+ 474: "tn-tl-w1",
+ 475: "tcpnethaspsrv",
+ 476: "tn-tl-fd1",
+ 477: "ss7ns",
+ 478: "spsc",
+ 479: "iafserver",
+ 480: "iafdbase",
+ 481: "ph",
+ 482: "bgs-nsi",
+ 483: "ulpnet",
+ 484: "integra-sme",
+ 485: "powerburst",
+ 486: "avian",
+ 487: "saft",
+ 488: "gss-http",
+ 489: "nest-protocol",
+ 490: "micom-pfs",
+ 491: "go-login",
+ 492: "ticf-1",
+ 493: "ticf-2",
+ 494: "pov-ray",
+ 495: "intecourier",
+ 496: "pim-rp-disc",
+ 497: "retrospect",
+ 498: "siam",
+ 499: "iso-ill",
+ 500: "isakmp",
+ 501: "stmf",
+ 502: "mbap",
+ 503: "intrinsa",
+ 504: "citadel",
+ 505: "mailbox-lm",
+ 506: "ohimsrv",
+ 507: "crs",
+ 508: "xvttp",
+ 509: "snare",
+ 510: "fcp",
+ 511: "passgo",
+ 512: "exec",
+ 513: "login",
+ 514: "shell",
+ 515: "printer",
+ 516: "videotex",
+ 517: "talk",
+ 518: "ntalk",
+ 519: "utime",
+ 520: "efs",
+ 521: "ripng",
+ 522: "ulp",
+ 523: "ibm-db2",
+ 524: "ncp",
+ 525: "timed",
+ 526: "tempo",
+ 527: "stx",
+ 528: "custix",
+ 529: "irc-serv",
+ 530: "courier",
+ 531: "conference",
+ 532: "netnews",
+ 533: "netwall",
+ 534: "windream",
+ 535: "iiop",
+ 536: "opalis-rdv",
+ 537: "nmsp",
+ 538: "gdomap",
+ 539: "apertus-ldp",
+ 540: "uucp",
+ 541: "uucp-rlogin",
+ 542: "commerce",
+ 543: "klogin",
+ 544: "kshell",
+ 545: "appleqtcsrvr",
+ 546: "dhcpv6-client",
+ 547: "dhcpv6-server",
+ 548: "afpovertcp",
+ 549: "idfp",
+ 550: "new-rwho",
+ 551: "cybercash",
+ 552: "devshr-nts",
+ 553: "pirp",
+ 554: "rtsp",
+ 555: "dsf",
+ 556: "remotefs",
+ 557: "openvms-sysipc",
+ 558: "sdnskmp",
+ 559: "teedtap",
+ 560: "rmonitor",
+ 561: "monitor",
+ 562: "chshell",
+ 563: "nntps",
+ 564: "9pfs",
+ 565: "whoami",
+ 566: "streettalk",
+ 567: "banyan-rpc",
+ 568: "ms-shuttle",
+ 569: "ms-rome",
+ 570: "meter",
+ 571: "meter",
+ 572: "sonar",
+ 573: "banyan-vip",
+ 574: "ftp-agent",
+ 575: "vemmi",
+ 576: "ipcd",
+ 577: "vnas",
+ 578: "ipdd",
+ 579: "decbsrv",
+ 580: "sntp-heartbeat",
+ 581: "bdp",
+ 582: "scc-security",
+ 583: "philips-vc",
+ 584: "keyserver",
+ 586: "password-chg",
+ 587: "submission",
+ 588: "cal",
+ 589: "eyelink",
+ 590: "tns-cml",
+ 591: "http-alt",
+ 592: "eudora-set",
+ 593: "http-rpc-epmap",
+ 594: "tpip",
+ 595: "cab-protocol",
+ 596: "smsd",
+ 597: "ptcnameservice",
+ 598: "sco-websrvrmg3",
+ 599: "acp",
+ 600: "ipcserver",
+ 601: "syslog-conn",
+ 602: "xmlrpc-beep",
+ 603: "idxp",
+ 604: "tunnel",
+ 605: "soap-beep",
+ 606: "urm",
+ 607: "nqs",
+ 608: "sift-uft",
+ 609: "npmp-trap",
+ 610: "npmp-local",
+ 611: "npmp-gui",
+ 612: "hmmp-ind",
+ 613: "hmmp-op",
+ 614: "sshell",
+ 615: "sco-inetmgr",
+ 616: "sco-sysmgr",
+ 617: "sco-dtmgr",
+ 618: "dei-icda",
+ 619: "compaq-evm",
+ 620: "sco-websrvrmgr",
+ 621: "escp-ip",
+ 622: "collaborator",
+ 623: "oob-ws-http",
+ 624: "cryptoadmin",
+ 625: "dec-dlm",
+ 626: "asia",
+ 627: "passgo-tivoli",
+ 628: "qmqp",
+ 629: "3com-amp3",
+ 630: "rda",
+ 631: "ipp",
+ 632: "bmpp",
+ 633: "servstat",
+ 634: "ginad",
+ 635: "rlzdbase",
+ 636: "ldaps",
+ 637: "lanserver",
+ 638: "mcns-sec",
+ 639: "msdp",
+ 640: "entrust-sps",
+ 641: "repcmd",
+ 642: "esro-emsdp",
+ 643: "sanity",
+ 644: "dwr",
+ 645: "pssc",
+ 646: "ldp",
+ 647: "dhcp-failover",
+ 648: "rrp",
+ 649: "cadview-3d",
+ 650: "obex",
+ 651: "ieee-mms",
+ 652: "hello-port",
+ 653: "repscmd",
+ 654: "aodv",
+ 655: "tinc",
+ 656: "spmp",
+ 657: "rmc",
+ 658: "tenfold",
+ 660: "mac-srvr-admin",
+ 661: "hap",
+ 662: "pftp",
+ 663: "purenoise",
+ 664: "oob-ws-https",
+ 665: "sun-dr",
+ 666: "mdqs",
+ 667: "disclose",
+ 668: "mecomm",
+ 669: "meregister",
+ 670: "vacdsm-sws",
+ 671: "vacdsm-app",
+ 672: "vpps-qua",
+ 673: "cimplex",
+ 674: "acap",
+ 675: "dctp",
+ 676: "vpps-via",
+ 677: "vpp",
+ 678: "ggf-ncp",
+ 679: "mrm",
+ 680: "entrust-aaas",
+ 681: "entrust-aams",
+ 682: "xfr",
+ 683: "corba-iiop",
+ 684: "corba-iiop-ssl",
+ 685: "mdc-portmapper",
+ 686: "hcp-wismar",
+ 687: "asipregistry",
+ 688: "realm-rusd",
+ 689: "nmap",
+ 690: "vatp",
+ 691: "msexch-routing",
+ 692: "hyperwave-isp",
+ 693: "connendp",
+ 694: "ha-cluster",
+ 695: "ieee-mms-ssl",
+ 696: "rushd",
+ 697: "uuidgen",
+ 698: "olsr",
+ 699: "accessnetwork",
+ 700: "epp",
+ 701: "lmp",
+ 702: "iris-beep",
+ 704: "elcsd",
+ 705: "agentx",
+ 706: "silc",
+ 707: "borland-dsj",
+ 709: "entrust-kmsh",
+ 710: "entrust-ash",
+ 711: "cisco-tdp",
+ 712: "tbrpf",
+ 713: "iris-xpc",
+ 714: "iris-xpcs",
+ 715: "iris-lwz",
+ 729: "netviewdm1",
+ 730: "netviewdm2",
+ 731: "netviewdm3",
+ 741: "netgw",
+ 742: "netrcs",
+ 744: "flexlm",
+ 747: "fujitsu-dev",
+ 748: "ris-cm",
+ 749: "kerberos-adm",
+ 750: "rfile",
+ 751: "pump",
+ 752: "qrh",
+ 753: "rrh",
+ 754: "tell",
+ 758: "nlogin",
+ 759: "con",
+ 760: "ns",
+ 761: "rxe",
+ 762: "quotad",
+ 763: "cycleserv",
+ 764: "omserv",
+ 765: "webster",
+ 767: "phonebook",
+ 769: "vid",
+ 770: "cadlock",
+ 771: "rtip",
+ 772: "cycleserv2",
+ 773: "submit",
+ 774: "rpasswd",
+ 775: "entomb",
+ 776: "wpages",
+ 777: "multiling-http",
+ 780: "wpgs",
+ 800: "mdbs-daemon",
+ 801: "device",
+ 802: "mbap-s",
+ 810: "fcp-udp",
+ 828: "itm-mcell-s",
+ 829: "pkix-3-ca-ra",
+ 830: "netconf-ssh",
+ 831: "netconf-beep",
+ 832: "netconfsoaphttp",
+ 833: "netconfsoapbeep",
+ 847: "dhcp-failover2",
+ 848: "gdoi",
+ 853: "domain-s",
+ 854: "dlep",
+ 860: "iscsi",
+ 861: "owamp-control",
+ 862: "twamp-control",
+ 873: "rsync",
+ 886: "iclcnet-locate",
+ 887: "iclcnet-svinfo",
+ 888: "accessbuilder",
+ 900: "omginitialrefs",
+ 901: "smpnameres",
+ 902: "ideafarm-door",
+ 903: "ideafarm-panic",
+ 910: "kink",
+ 911: "xact-backup",
+ 912: "apex-mesh",
+ 913: "apex-edge",
+ 953: "rndc",
+ 989: "ftps-data",
+ 990: "ftps",
+ 991: "nas",
+ 992: "telnets",
+ 993: "imaps",
+ 995: "pop3s",
+ 996: "vsinet",
+ 997: "maitrd",
+ 998: "busboy",
+ 999: "garcon",
+ 1000: "cadlock2",
+ 1001: "webpush",
+ 1010: "surf",
+ 1021: "exp1",
+ 1022: "exp2",
+ 1025: "blackjack",
+ 1026: "cap",
+ 1029: "solid-mux",
+ 1033: "netinfo-local",
+ 1034: "activesync",
+ 1035: "mxxrlogin",
+ 1036: "nsstp",
+ 1037: "ams",
+ 1038: "mtqp",
+ 1039: "sbl",
+ 1040: "netarx",
+ 1041: "danf-ak2",
+ 1042: "afrog",
+ 1043: "boinc-client",
+ 1044: "dcutility",
+ 1045: "fpitp",
+ 1046: "wfremotertm",
+ 1047: "neod1",
+ 1048: "neod2",
+ 1049: "td-postman",
+ 1050: "cma",
+ 1051: "optima-vnet",
+ 1052: "ddt",
+ 1053: "remote-as",
+ 1054: "brvread",
+ 1055: "ansyslmd",
+ 1056: "vfo",
+ 1057: "startron",
+ 1058: "nim",
+ 1059: "nimreg",
+ 1060: "polestar",
+ 1061: "kiosk",
+ 1062: "veracity",
+ 1063: "kyoceranetdev",
+ 1064: "jstel",
+ 1065: "syscomlan",
+ 1066: "fpo-fns",
+ 1067: "instl-boots",
+ 1068: "instl-bootc",
+ 1069: "cognex-insight",
+ 1070: "gmrupdateserv",
+ 1071: "bsquare-voip",
+ 1072: "cardax",
+ 1073: "bridgecontrol",
+ 1074: "warmspotMgmt",
+ 1075: "rdrmshc",
+ 1076: "dab-sti-c",
+ 1077: "imgames",
+ 1078: "avocent-proxy",
+ 1079: "asprovatalk",
+ 1080: "socks",
+ 1081: "pvuniwien",
+ 1082: "amt-esd-prot",
+ 1083: "ansoft-lm-1",
+ 1084: "ansoft-lm-2",
+ 1085: "webobjects",
+ 1086: "cplscrambler-lg",
+ 1087: "cplscrambler-in",
+ 1088: "cplscrambler-al",
+ 1089: "ff-annunc",
+ 1090: "ff-fms",
+ 1091: "ff-sm",
+ 1092: "obrpd",
+ 1093: "proofd",
+ 1094: "rootd",
+ 1095: "nicelink",
+ 1096: "cnrprotocol",
+ 1097: "sunclustermgr",
+ 1098: "rmiactivation",
+ 1099: "rmiregistry",
+ 1100: "mctp",
+ 1101: "pt2-discover",
+ 1102: "adobeserver-1",
+ 1103: "adobeserver-2",
+ 1104: "xrl",
+ 1105: "ftranhc",
+ 1106: "isoipsigport-1",
+ 1107: "isoipsigport-2",
+ 1108: "ratio-adp",
+ 1110: "webadmstart",
+ 1111: "lmsocialserver",
+ 1112: "icp",
+ 1113: "ltp-deepspace",
+ 1114: "mini-sql",
+ 1115: "ardus-trns",
+ 1116: "ardus-cntl",
+ 1117: "ardus-mtrns",
+ 1118: "sacred",
+ 1119: "bnetgame",
+ 1120: "bnetfile",
+ 1121: "rmpp",
+ 1122: "availant-mgr",
+ 1123: "murray",
+ 1124: "hpvmmcontrol",
+ 1125: "hpvmmagent",
+ 1126: "hpvmmdata",
+ 1127: "kwdb-commn",
+ 1128: "saphostctrl",
+ 1129: "saphostctrls",
+ 1130: "casp",
+ 1131: "caspssl",
+ 1132: "kvm-via-ip",
+ 1133: "dfn",
+ 1134: "aplx",
+ 1135: "omnivision",
+ 1136: "hhb-gateway",
+ 1137: "trim",
+ 1138: "encrypted-admin",
+ 1139: "evm",
+ 1140: "autonoc",
+ 1141: "mxomss",
+ 1142: "edtools",
+ 1143: "imyx",
+ 1144: "fuscript",
+ 1145: "x9-icue",
+ 1146: "audit-transfer",
+ 1147: "capioverlan",
+ 1148: "elfiq-repl",
+ 1149: "bvtsonar",
+ 1150: "blaze",
+ 1151: "unizensus",
+ 1152: "winpoplanmess",
+ 1153: "c1222-acse",
+ 1154: "resacommunity",
+ 1155: "nfa",
+ 1156: "iascontrol-oms",
+ 1157: "iascontrol",
+ 1158: "dbcontrol-oms",
+ 1159: "oracle-oms",
+ 1160: "olsv",
+ 1161: "health-polling",
+ 1162: "health-trap",
+ 1163: "sddp",
+ 1164: "qsm-proxy",
+ 1165: "qsm-gui",
+ 1166: "qsm-remote",
+ 1167: "cisco-ipsla",
+ 1168: "vchat",
+ 1169: "tripwire",
+ 1170: "atc-lm",
+ 1171: "atc-appserver",
+ 1172: "dnap",
+ 1173: "d-cinema-rrp",
+ 1174: "fnet-remote-ui",
+ 1175: "dossier",
+ 1176: "indigo-server",
+ 1177: "dkmessenger",
+ 1178: "sgi-storman",
+ 1179: "b2n",
+ 1180: "mc-client",
+ 1181: "3comnetman",
+ 1182: "accelenet",
+ 1183: "llsurfup-http",
+ 1184: "llsurfup-https",
+ 1185: "catchpole",
+ 1186: "mysql-cluster",
+ 1187: "alias",
+ 1188: "hp-webadmin",
+ 1189: "unet",
+ 1190: "commlinx-avl",
+ 1191: "gpfs",
+ 1192: "caids-sensor",
+ 1193: "fiveacross",
+ 1194: "openvpn",
+ 1195: "rsf-1",
+ 1196: "netmagic",
+ 1197: "carrius-rshell",
+ 1198: "cajo-discovery",
+ 1199: "dmidi",
+ 1200: "scol",
+ 1201: "nucleus-sand",
+ 1202: "caiccipc",
+ 1203: "ssslic-mgr",
+ 1204: "ssslog-mgr",
+ 1205: "accord-mgc",
+ 1206: "anthony-data",
+ 1207: "metasage",
+ 1208: "seagull-ais",
+ 1209: "ipcd3",
+ 1210: "eoss",
+ 1211: "groove-dpp",
+ 1212: "lupa",
+ 1213: "mpc-lifenet",
+ 1214: "kazaa",
+ 1215: "scanstat-1",
+ 1216: "etebac5",
+ 1217: "hpss-ndapi",
+ 1218: "aeroflight-ads",
+ 1219: "aeroflight-ret",
+ 1220: "qt-serveradmin",
+ 1221: "sweetware-apps",
+ 1222: "nerv",
+ 1223: "tgp",
+ 1224: "vpnz",
+ 1225: "slinkysearch",
+ 1226: "stgxfws",
+ 1227: "dns2go",
+ 1228: "florence",
+ 1229: "zented",
+ 1230: "periscope",
+ 1231: "menandmice-lpm",
+ 1232: "first-defense",
+ 1233: "univ-appserver",
+ 1234: "search-agent",
+ 1235: "mosaicsyssvc1",
+ 1236: "bvcontrol",
+ 1237: "tsdos390",
+ 1238: "hacl-qs",
+ 1239: "nmsd",
+ 1240: "instantia",
+ 1241: "nessus",
+ 1242: "nmasoverip",
+ 1243: "serialgateway",
+ 1244: "isbconference1",
+ 1245: "isbconference2",
+ 1246: "payrouter",
+ 1247: "visionpyramid",
+ 1248: "hermes",
+ 1249: "mesavistaco",
+ 1250: "swldy-sias",
+ 1251: "servergraph",
+ 1252: "bspne-pcc",
+ 1253: "q55-pcc",
+ 1254: "de-noc",
+ 1255: "de-cache-query",
+ 1256: "de-server",
+ 1257: "shockwave2",
+ 1258: "opennl",
+ 1259: "opennl-voice",
+ 1260: "ibm-ssd",
+ 1261: "mpshrsv",
+ 1262: "qnts-orb",
+ 1263: "dka",
+ 1264: "prat",
+ 1265: "dssiapi",
+ 1266: "dellpwrappks",
+ 1267: "epc",
+ 1268: "propel-msgsys",
+ 1269: "watilapp",
+ 1270: "opsmgr",
+ 1271: "excw",
+ 1272: "cspmlockmgr",
+ 1273: "emc-gateway",
+ 1274: "t1distproc",
+ 1275: "ivcollector",
+ 1277: "miva-mqs",
+ 1278: "dellwebadmin-1",
+ 1279: "dellwebadmin-2",
+ 1280: "pictrography",
+ 1281: "healthd",
+ 1282: "emperion",
+ 1283: "productinfo",
+ 1284: "iee-qfx",
+ 1285: "neoiface",
+ 1286: "netuitive",
+ 1287: "routematch",
+ 1288: "navbuddy",
+ 1289: "jwalkserver",
+ 1290: "winjaserver",
+ 1291: "seagulllms",
+ 1292: "dsdn",
+ 1293: "pkt-krb-ipsec",
+ 1294: "cmmdriver",
+ 1295: "ehtp",
+ 1296: "dproxy",
+ 1297: "sdproxy",
+ 1298: "lpcp",
+ 1299: "hp-sci",
+ 1300: "h323hostcallsc",
+ 1301: "ci3-software-1",
+ 1302: "ci3-software-2",
+ 1303: "sftsrv",
+ 1304: "boomerang",
+ 1305: "pe-mike",
+ 1306: "re-conn-proto",
+ 1307: "pacmand",
+ 1308: "odsi",
+ 1309: "jtag-server",
+ 1310: "husky",
+ 1311: "rxmon",
+ 1312: "sti-envision",
+ 1313: "bmc-patroldb",
+ 1314: "pdps",
+ 1315: "els",
+ 1316: "exbit-escp",
+ 1317: "vrts-ipcserver",
+ 1318: "krb5gatekeeper",
+ 1319: "amx-icsp",
+ 1320: "amx-axbnet",
+ 1321: "pip",
+ 1322: "novation",
+ 1323: "brcd",
+ 1324: "delta-mcp",
+ 1325: "dx-instrument",
+ 1326: "wimsic",
+ 1327: "ultrex",
+ 1328: "ewall",
+ 1329: "netdb-export",
+ 1330: "streetperfect",
+ 1331: "intersan",
+ 1332: "pcia-rxp-b",
+ 1333: "passwrd-policy",
+ 1334: "writesrv",
+ 1335: "digital-notary",
+ 1336: "ischat",
+ 1337: "menandmice-dns",
+ 1338: "wmc-log-svc",
+ 1339: "kjtsiteserver",
+ 1340: "naap",
+ 1341: "qubes",
+ 1342: "esbroker",
+ 1343: "re101",
+ 1344: "icap",
+ 1345: "vpjp",
+ 1346: "alta-ana-lm",
+ 1347: "bbn-mmc",
+ 1348: "bbn-mmx",
+ 1349: "sbook",
+ 1350: "editbench",
+ 1351: "equationbuilder",
+ 1352: "lotusnote",
+ 1353: "relief",
+ 1354: "XSIP-network",
+ 1355: "intuitive-edge",
+ 1356: "cuillamartin",
+ 1357: "pegboard",
+ 1358: "connlcli",
+ 1359: "ftsrv",
+ 1360: "mimer",
+ 1361: "linx",
+ 1362: "timeflies",
+ 1363: "ndm-requester",
+ 1364: "ndm-server",
+ 1365: "adapt-sna",
+ 1366: "netware-csp",
+ 1367: "dcs",
+ 1368: "screencast",
+ 1369: "gv-us",
+ 1370: "us-gv",
+ 1371: "fc-cli",
+ 1372: "fc-ser",
+ 1373: "chromagrafx",
+ 1374: "molly",
+ 1375: "bytex",
+ 1376: "ibm-pps",
+ 1377: "cichlid",
+ 1378: "elan",
+ 1379: "dbreporter",
+ 1380: "telesis-licman",
+ 1381: "apple-licman",
+ 1382: "udt-os",
+ 1383: "gwha",
+ 1384: "os-licman",
+ 1385: "atex-elmd",
+ 1386: "checksum",
+ 1387: "cadsi-lm",
+ 1388: "objective-dbc",
+ 1389: "iclpv-dm",
+ 1390: "iclpv-sc",
+ 1391: "iclpv-sas",
+ 1392: "iclpv-pm",
+ 1393: "iclpv-nls",
+ 1394: "iclpv-nlc",
+ 1395: "iclpv-wsm",
+ 1396: "dvl-activemail",
+ 1397: "audio-activmail",
+ 1398: "video-activmail",
+ 1399: "cadkey-licman",
+ 1400: "cadkey-tablet",
+ 1401: "goldleaf-licman",
+ 1402: "prm-sm-np",
+ 1403: "prm-nm-np",
+ 1404: "igi-lm",
+ 1405: "ibm-res",
+ 1406: "netlabs-lm",
+ 1407: "tibet-server",
+ 1408: "sophia-lm",
+ 1409: "here-lm",
+ 1410: "hiq",
+ 1411: "af",
+ 1412: "innosys",
+ 1413: "innosys-acl",
+ 1414: "ibm-mqseries",
+ 1415: "dbstar",
+ 1416: "novell-lu6-2",
+ 1417: "timbuktu-srv1",
+ 1418: "timbuktu-srv2",
+ 1419: "timbuktu-srv3",
+ 1420: "timbuktu-srv4",
+ 1421: "gandalf-lm",
+ 1422: "autodesk-lm",
+ 1423: "essbase",
+ 1424: "hybrid",
+ 1425: "zion-lm",
+ 1426: "sais",
+ 1427: "mloadd",
+ 1428: "informatik-lm",
+ 1429: "nms",
+ 1430: "tpdu",
+ 1431: "rgtp",
+ 1432: "blueberry-lm",
+ 1433: "ms-sql-s",
+ 1434: "ms-sql-m",
+ 1435: "ibm-cics",
+ 1436: "saism",
+ 1437: "tabula",
+ 1438: "eicon-server",
+ 1439: "eicon-x25",
+ 1440: "eicon-slp",
+ 1441: "cadis-1",
+ 1442: "cadis-2",
+ 1443: "ies-lm",
+ 1444: "marcam-lm",
+ 1445: "proxima-lm",
+ 1446: "ora-lm",
+ 1447: "apri-lm",
+ 1448: "oc-lm",
+ 1449: "peport",
+ 1450: "dwf",
+ 1451: "infoman",
+ 1452: "gtegsc-lm",
+ 1453: "genie-lm",
+ 1454: "interhdl-elmd",
+ 1455: "esl-lm",
+ 1456: "dca",
+ 1457: "valisys-lm",
+ 1458: "nrcabq-lm",
+ 1459: "proshare1",
+ 1460: "proshare2",
+ 1461: "ibm-wrless-lan",
+ 1462: "world-lm",
+ 1463: "nucleus",
+ 1464: "msl-lmd",
+ 1465: "pipes",
+ 1466: "oceansoft-lm",
+ 1467: "csdmbase",
+ 1468: "csdm",
+ 1469: "aal-lm",
+ 1470: "uaiact",
+ 1471: "csdmbase",
+ 1472: "csdm",
+ 1473: "openmath",
+ 1474: "telefinder",
+ 1475: "taligent-lm",
+ 1476: "clvm-cfg",
+ 1477: "ms-sna-server",
+ 1478: "ms-sna-base",
+ 1479: "dberegister",
+ 1480: "pacerforum",
+ 1481: "airs",
+ 1482: "miteksys-lm",
+ 1483: "afs",
+ 1484: "confluent",
+ 1485: "lansource",
+ 1486: "nms-topo-serv",
+ 1487: "localinfosrvr",
+ 1488: "docstor",
+ 1489: "dmdocbroker",
+ 1490: "insitu-conf",
+ 1492: "stone-design-1",
+ 1493: "netmap-lm",
+ 1494: "ica",
+ 1495: "cvc",
+ 1496: "liberty-lm",
+ 1497: "rfx-lm",
+ 1498: "sybase-sqlany",
+ 1499: "fhc",
+ 1500: "vlsi-lm",
+ 1501: "saiscm",
+ 1502: "shivadiscovery",
+ 1503: "imtc-mcs",
+ 1504: "evb-elm",
+ 1505: "funkproxy",
+ 1506: "utcd",
+ 1507: "symplex",
+ 1508: "diagmond",
+ 1509: "robcad-lm",
+ 1510: "mvx-lm",
+ 1511: "3l-l1",
+ 1512: "wins",
+ 1513: "fujitsu-dtc",
+ 1514: "fujitsu-dtcns",
+ 1515: "ifor-protocol",
+ 1516: "vpad",
+ 1517: "vpac",
+ 1518: "vpvd",
+ 1519: "vpvc",
+ 1520: "atm-zip-office",
+ 1521: "ncube-lm",
+ 1522: "ricardo-lm",
+ 1523: "cichild-lm",
+ 1524: "ingreslock",
+ 1525: "orasrv",
+ 1526: "pdap-np",
+ 1527: "tlisrv",
+ 1529: "coauthor",
+ 1530: "rap-service",
+ 1531: "rap-listen",
+ 1532: "miroconnect",
+ 1533: "virtual-places",
+ 1534: "micromuse-lm",
+ 1535: "ampr-info",
+ 1536: "ampr-inter",
+ 1537: "sdsc-lm",
+ 1538: "3ds-lm",
+ 1539: "intellistor-lm",
+ 1540: "rds",
+ 1541: "rds2",
+ 1542: "gridgen-elmd",
+ 1543: "simba-cs",
+ 1544: "aspeclmd",
+ 1545: "vistium-share",
+ 1546: "abbaccuray",
+ 1547: "laplink",
+ 1548: "axon-lm",
+ 1549: "shivahose",
+ 1550: "3m-image-lm",
+ 1551: "hecmtl-db",
+ 1552: "pciarray",
+ 1553: "sna-cs",
+ 1554: "caci-lm",
+ 1555: "livelan",
+ 1556: "veritas-pbx",
+ 1557: "arbortext-lm",
+ 1558: "xingmpeg",
+ 1559: "web2host",
+ 1560: "asci-val",
+ 1561: "facilityview",
+ 1562: "pconnectmgr",
+ 1563: "cadabra-lm",
+ 1564: "pay-per-view",
+ 1565: "winddlb",
+ 1566: "corelvideo",
+ 1567: "jlicelmd",
+ 1568: "tsspmap",
+ 1569: "ets",
+ 1570: "orbixd",
+ 1571: "rdb-dbs-disp",
+ 1572: "chip-lm",
+ 1573: "itscomm-ns",
+ 1574: "mvel-lm",
+ 1575: "oraclenames",
+ 1576: "moldflow-lm",
+ 1577: "hypercube-lm",
+ 1578: "jacobus-lm",
+ 1579: "ioc-sea-lm",
+ 1580: "tn-tl-r1",
+ 1581: "mil-2045-47001",
+ 1582: "msims",
+ 1583: "simbaexpress",
+ 1584: "tn-tl-fd2",
+ 1585: "intv",
+ 1586: "ibm-abtact",
+ 1587: "pra-elmd",
+ 1588: "triquest-lm",
+ 1589: "vqp",
+ 1590: "gemini-lm",
+ 1591: "ncpm-pm",
+ 1592: "commonspace",
+ 1593: "mainsoft-lm",
+ 1594: "sixtrak",
+ 1595: "radio",
+ 1596: "radio-sm",
+ 1597: "orbplus-iiop",
+ 1598: "picknfs",
+ 1599: "simbaservices",
+ 1600: "issd",
+ 1601: "aas",
+ 1602: "inspect",
+ 1603: "picodbc",
+ 1604: "icabrowser",
+ 1605: "slp",
+ 1606: "slm-api",
+ 1607: "stt",
+ 1608: "smart-lm",
+ 1609: "isysg-lm",
+ 1610: "taurus-wh",
+ 1611: "ill",
+ 1612: "netbill-trans",
+ 1613: "netbill-keyrep",
+ 1614: "netbill-cred",
+ 1615: "netbill-auth",
+ 1616: "netbill-prod",
+ 1617: "nimrod-agent",
+ 1618: "skytelnet",
+ 1619: "xs-openstorage",
+ 1620: "faxportwinport",
+ 1621: "softdataphone",
+ 1622: "ontime",
+ 1623: "jaleosnd",
+ 1624: "udp-sr-port",
+ 1625: "svs-omagent",
+ 1626: "shockwave",
+ 1627: "t128-gateway",
+ 1628: "lontalk-norm",
+ 1629: "lontalk-urgnt",
+ 1630: "oraclenet8cman",
+ 1631: "visitview",
+ 1632: "pammratc",
+ 1633: "pammrpc",
+ 1634: "loaprobe",
+ 1635: "edb-server1",
+ 1636: "isdc",
+ 1637: "islc",
+ 1638: "ismc",
+ 1639: "cert-initiator",
+ 1640: "cert-responder",
+ 1641: "invision",
+ 1642: "isis-am",
+ 1643: "isis-ambc",
+ 1644: "saiseh",
+ 1645: "sightline",
+ 1646: "sa-msg-port",
+ 1647: "rsap",
+ 1648: "concurrent-lm",
+ 1649: "kermit",
+ 1650: "nkd",
+ 1651: "shiva-confsrvr",
+ 1652: "xnmp",
+ 1653: "alphatech-lm",
+ 1654: "stargatealerts",
+ 1655: "dec-mbadmin",
+ 1656: "dec-mbadmin-h",
+ 1657: "fujitsu-mmpdc",
+ 1658: "sixnetudr",
+ 1659: "sg-lm",
+ 1660: "skip-mc-gikreq",
+ 1661: "netview-aix-1",
+ 1662: "netview-aix-2",
+ 1663: "netview-aix-3",
+ 1664: "netview-aix-4",
+ 1665: "netview-aix-5",
+ 1666: "netview-aix-6",
+ 1667: "netview-aix-7",
+ 1668: "netview-aix-8",
+ 1669: "netview-aix-9",
+ 1670: "netview-aix-10",
+ 1671: "netview-aix-11",
+ 1672: "netview-aix-12",
+ 1673: "proshare-mc-1",
+ 1674: "proshare-mc-2",
+ 1675: "pdp",
+ 1676: "netcomm1",
+ 1677: "groupwise",
+ 1678: "prolink",
+ 1679: "darcorp-lm",
+ 1680: "microcom-sbp",
+ 1681: "sd-elmd",
+ 1682: "lanyon-lantern",
+ 1683: "ncpm-hip",
+ 1684: "snaresecure",
+ 1685: "n2nremote",
+ 1686: "cvmon",
+ 1687: "nsjtp-ctrl",
+ 1688: "nsjtp-data",
+ 1689: "firefox",
+ 1690: "ng-umds",
+ 1691: "empire-empuma",
+ 1692: "sstsys-lm",
+ 1693: "rrirtr",
+ 1694: "rrimwm",
+ 1695: "rrilwm",
+ 1696: "rrifmm",
+ 1697: "rrisat",
+ 1698: "rsvp-encap-1",
+ 1699: "rsvp-encap-2",
+ 1700: "mps-raft",
+ 1701: "l2f",
+ 1702: "deskshare",
+ 1703: "hb-engine",
+ 1704: "bcs-broker",
+ 1705: "slingshot",
+ 1706: "jetform",
+ 1707: "vdmplay",
+ 1708: "gat-lmd",
+ 1709: "centra",
+ 1710: "impera",
+ 1711: "pptconference",
+ 1712: "registrar",
+ 1713: "conferencetalk",
+ 1714: "sesi-lm",
+ 1715: "houdini-lm",
+ 1716: "xmsg",
+ 1717: "fj-hdnet",
+ 1718: "h323gatedisc",
+ 1719: "h323gatestat",
+ 1720: "h323hostcall",
+ 1721: "caicci",
+ 1722: "hks-lm",
+ 1723: "pptp",
+ 1724: "csbphonemaster",
+ 1725: "iden-ralp",
+ 1726: "iberiagames",
+ 1727: "winddx",
+ 1728: "telindus",
+ 1729: "citynl",
+ 1730: "roketz",
+ 1731: "msiccp",
+ 1732: "proxim",
+ 1733: "siipat",
+ 1734: "cambertx-lm",
+ 1735: "privatechat",
+ 1736: "street-stream",
+ 1737: "ultimad",
+ 1738: "gamegen1",
+ 1739: "webaccess",
+ 1740: "encore",
+ 1741: "cisco-net-mgmt",
+ 1742: "3Com-nsd",
+ 1743: "cinegrfx-lm",
+ 1744: "ncpm-ft",
+ 1745: "remote-winsock",
+ 1746: "ftrapid-1",
+ 1747: "ftrapid-2",
+ 1748: "oracle-em1",
+ 1749: "aspen-services",
+ 1750: "sslp",
+ 1751: "swiftnet",
+ 1752: "lofr-lm",
+ 1753: "predatar-comms",
+ 1754: "oracle-em2",
+ 1755: "ms-streaming",
+ 1756: "capfast-lmd",
+ 1757: "cnhrp",
+ 1758: "tftp-mcast",
+ 1759: "spss-lm",
+ 1760: "www-ldap-gw",
+ 1761: "cft-0",
+ 1762: "cft-1",
+ 1763: "cft-2",
+ 1764: "cft-3",
+ 1765: "cft-4",
+ 1766: "cft-5",
+ 1767: "cft-6",
+ 1768: "cft-7",
+ 1769: "bmc-net-adm",
+ 1770: "bmc-net-svc",
+ 1771: "vaultbase",
+ 1772: "essweb-gw",
+ 1773: "kmscontrol",
+ 1774: "global-dtserv",
+ 1775: "vdab",
+ 1776: "femis",
+ 1777: "powerguardian",
+ 1778: "prodigy-intrnet",
+ 1779: "pharmasoft",
+ 1780: "dpkeyserv",
+ 1781: "answersoft-lm",
+ 1782: "hp-hcip",
+ 1784: "finle-lm",
+ 1785: "windlm",
+ 1786: "funk-logger",
+ 1787: "funk-license",
+ 1788: "psmond",
+ 1789: "hello",
+ 1790: "nmsp",
+ 1791: "ea1",
+ 1792: "ibm-dt-2",
+ 1793: "rsc-robot",
+ 1794: "cera-bcm",
+ 1795: "dpi-proxy",
+ 1796: "vocaltec-admin",
+ 1797: "uma",
+ 1798: "etp",
+ 1799: "netrisk",
+ 1800: "ansys-lm",
+ 1801: "msmq",
+ 1802: "concomp1",
+ 1803: "hp-hcip-gwy",
+ 1804: "enl",
+ 1805: "enl-name",
+ 1806: "musiconline",
+ 1807: "fhsp",
+ 1808: "oracle-vp2",
+ 1809: "oracle-vp1",
+ 1810: "jerand-lm",
+ 1811: "scientia-sdb",
+ 1812: "radius",
+ 1813: "radius-acct",
+ 1814: "tdp-suite",
+ 1815: "mmpft",
+ 1816: "harp",
+ 1817: "rkb-oscs",
+ 1818: "etftp",
+ 1819: "plato-lm",
+ 1820: "mcagent",
+ 1821: "donnyworld",
+ 1822: "es-elmd",
+ 1823: "unisys-lm",
+ 1824: "metrics-pas",
+ 1825: "direcpc-video",
+ 1826: "ardt",
+ 1827: "asi",
+ 1828: "itm-mcell-u",
+ 1829: "optika-emedia",
+ 1830: "net8-cman",
+ 1831: "myrtle",
+ 1832: "tht-treasure",
+ 1833: "udpradio",
+ 1834: "ardusuni",
+ 1835: "ardusmul",
+ 1836: "ste-smsc",
+ 1837: "csoft1",
+ 1838: "talnet",
+ 1839: "netopia-vo1",
+ 1840: "netopia-vo2",
+ 1841: "netopia-vo3",
+ 1842: "netopia-vo4",
+ 1843: "netopia-vo5",
+ 1844: "direcpc-dll",
+ 1845: "altalink",
+ 1846: "tunstall-pnc",
+ 1847: "slp-notify",
+ 1848: "fjdocdist",
+ 1849: "alpha-sms",
+ 1850: "gsi",
+ 1851: "ctcd",
+ 1852: "virtual-time",
+ 1853: "vids-avtp",
+ 1854: "buddy-draw",
+ 1855: "fiorano-rtrsvc",
+ 1856: "fiorano-msgsvc",
+ 1857: "datacaptor",
+ 1858: "privateark",
+ 1859: "gammafetchsvr",
+ 1860: "sunscalar-svc",
+ 1861: "lecroy-vicp",
+ 1862: "mysql-cm-agent",
+ 1863: "msnp",
+ 1864: "paradym-31port",
+ 1865: "entp",
+ 1866: "swrmi",
+ 1867: "udrive",
+ 1868: "viziblebrowser",
+ 1869: "transact",
+ 1870: "sunscalar-dns",
+ 1871: "canocentral0",
+ 1872: "canocentral1",
+ 1873: "fjmpjps",
+ 1874: "fjswapsnp",
+ 1875: "westell-stats",
+ 1876: "ewcappsrv",
+ 1877: "hp-webqosdb",
+ 1878: "drmsmc",
+ 1879: "nettgain-nms",
+ 1880: "vsat-control",
+ 1881: "ibm-mqseries2",
+ 1882: "ecsqdmn",
+ 1883: "mqtt",
+ 1884: "idmaps",
+ 1885: "vrtstrapserver",
+ 1886: "leoip",
+ 1887: "filex-lport",
+ 1888: "ncconfig",
+ 1889: "unify-adapter",
+ 1890: "wilkenlistener",
+ 1891: "childkey-notif",
+ 1892: "childkey-ctrl",
+ 1893: "elad",
+ 1894: "o2server-port",
+ 1896: "b-novative-ls",
+ 1897: "metaagent",
+ 1898: "cymtec-port",
+ 1899: "mc2studios",
+ 1900: "ssdp",
+ 1901: "fjicl-tep-a",
+ 1902: "fjicl-tep-b",
+ 1903: "linkname",
+ 1904: "fjicl-tep-c",
+ 1905: "sugp",
+ 1906: "tpmd",
+ 1907: "intrastar",
+ 1908: "dawn",
+ 1909: "global-wlink",
+ 1910: "ultrabac",
+ 1911: "mtp",
+ 1912: "rhp-iibp",
+ 1913: "armadp",
+ 1914: "elm-momentum",
+ 1915: "facelink",
+ 1916: "persona",
+ 1917: "noagent",
+ 1918: "can-nds",
+ 1919: "can-dch",
+ 1920: "can-ferret",
+ 1921: "noadmin",
+ 1922: "tapestry",
+ 1923: "spice",
+ 1924: "xiip",
+ 1925: "discovery-port",
+ 1926: "egs",
+ 1927: "videte-cipc",
+ 1928: "emsd-port",
+ 1929: "bandwiz-system",
+ 1930: "driveappserver",
+ 1931: "amdsched",
+ 1932: "ctt-broker",
+ 1933: "xmapi",
+ 1934: "xaapi",
+ 1935: "macromedia-fcs",
+ 1936: "jetcmeserver",
+ 1937: "jwserver",
+ 1938: "jwclient",
+ 1939: "jvserver",
+ 1940: "jvclient",
+ 1941: "dic-aida",
+ 1942: "res",
+ 1943: "beeyond-media",
+ 1944: "close-combat",
+ 1945: "dialogic-elmd",
+ 1946: "tekpls",
+ 1947: "sentinelsrm",
+ 1948: "eye2eye",
+ 1949: "ismaeasdaqlive",
+ 1950: "ismaeasdaqtest",
+ 1951: "bcs-lmserver",
+ 1952: "mpnjsc",
+ 1953: "rapidbase",
+ 1954: "abr-api",
+ 1955: "abr-secure",
+ 1956: "vrtl-vmf-ds",
+ 1957: "unix-status",
+ 1958: "dxadmind",
+ 1959: "simp-all",
+ 1960: "nasmanager",
+ 1961: "bts-appserver",
+ 1962: "biap-mp",
+ 1963: "webmachine",
+ 1964: "solid-e-engine",
+ 1965: "tivoli-npm",
+ 1966: "slush",
+ 1967: "sns-quote",
+ 1968: "lipsinc",
+ 1969: "lipsinc1",
+ 1970: "netop-rc",
+ 1971: "netop-school",
+ 1972: "intersys-cache",
+ 1973: "dlsrap",
+ 1974: "drp",
+ 1975: "tcoflashagent",
+ 1976: "tcoregagent",
+ 1977: "tcoaddressbook",
+ 1978: "unisql",
+ 1979: "unisql-java",
+ 1980: "pearldoc-xact",
+ 1981: "p2pq",
+ 1982: "estamp",
+ 1983: "lhtp",
+ 1984: "bb",
+ 1985: "hsrp",
+ 1986: "licensedaemon",
+ 1987: "tr-rsrb-p1",
+ 1988: "tr-rsrb-p2",
+ 1989: "tr-rsrb-p3",
+ 1990: "stun-p1",
+ 1991: "stun-p2",
+ 1992: "stun-p3",
+ 1993: "snmp-tcp-port",
+ 1994: "stun-port",
+ 1995: "perf-port",
+ 1996: "tr-rsrb-port",
+ 1997: "gdp-port",
+ 1998: "x25-svc-port",
+ 1999: "tcp-id-port",
+ 2000: "cisco-sccp",
+ 2001: "dc",
+ 2002: "globe",
+ 2003: "brutus",
+ 2004: "mailbox",
+ 2005: "berknet",
+ 2006: "invokator",
+ 2007: "dectalk",
+ 2008: "conf",
+ 2009: "news",
+ 2010: "search",
+ 2011: "raid-cc",
+ 2012: "ttyinfo",
+ 2013: "raid-am",
+ 2014: "troff",
+ 2015: "cypress",
+ 2016: "bootserver",
+ 2017: "cypress-stat",
+ 2018: "terminaldb",
+ 2019: "whosockami",
+ 2020: "xinupageserver",
+ 2021: "servexec",
+ 2022: "down",
+ 2023: "xinuexpansion3",
+ 2024: "xinuexpansion4",
+ 2025: "ellpack",
+ 2026: "scrabble",
+ 2027: "shadowserver",
+ 2028: "submitserver",
+ 2029: "hsrpv6",
+ 2030: "device2",
+ 2031: "mobrien-chat",
+ 2032: "blackboard",
+ 2033: "glogger",
+ 2034: "scoremgr",
+ 2035: "imsldoc",
+ 2036: "e-dpnet",
+ 2037: "applus",
+ 2038: "objectmanager",
+ 2039: "prizma",
+ 2040: "lam",
+ 2041: "interbase",
+ 2042: "isis",
+ 2043: "isis-bcast",
+ 2044: "rimsl",
+ 2045: "cdfunc",
+ 2046: "sdfunc",
+ 2047: "dls",
+ 2048: "dls-monitor",
+ 2049: "shilp",
+ 2050: "av-emb-config",
+ 2051: "epnsdp",
+ 2052: "clearvisn",
+ 2053: "lot105-ds-upd",
+ 2054: "weblogin",
+ 2055: "iop",
+ 2056: "omnisky",
+ 2057: "rich-cp",
+ 2058: "newwavesearch",
+ 2059: "bmc-messaging",
+ 2060: "teleniumdaemon",
+ 2061: "netmount",
+ 2062: "icg-swp",
+ 2063: "icg-bridge",
+ 2064: "icg-iprelay",
+ 2065: "dlsrpn",
+ 2066: "aura",
+ 2067: "dlswpn",
+ 2068: "avauthsrvprtcl",
+ 2069: "event-port",
+ 2070: "ah-esp-encap",
+ 2071: "acp-port",
+ 2072: "msync",
+ 2073: "gxs-data-port",
+ 2074: "vrtl-vmf-sa",
+ 2075: "newlixengine",
+ 2076: "newlixconfig",
+ 2077: "tsrmagt",
+ 2078: "tpcsrvr",
+ 2079: "idware-router",
+ 2080: "autodesk-nlm",
+ 2081: "kme-trap-port",
+ 2082: "infowave",
+ 2083: "radsec",
+ 2084: "sunclustergeo",
+ 2085: "ada-cip",
+ 2086: "gnunet",
+ 2087: "eli",
+ 2088: "ip-blf",
+ 2089: "sep",
+ 2090: "lrp",
+ 2091: "prp",
+ 2092: "descent3",
+ 2093: "nbx-cc",
+ 2094: "nbx-au",
+ 2095: "nbx-ser",
+ 2096: "nbx-dir",
+ 2097: "jetformpreview",
+ 2098: "dialog-port",
+ 2099: "h2250-annex-g",
+ 2100: "amiganetfs",
+ 2101: "rtcm-sc104",
+ 2102: "zephyr-srv",
+ 2103: "zephyr-clt",
+ 2104: "zephyr-hm",
+ 2105: "minipay",
+ 2106: "mzap",
+ 2107: "bintec-admin",
+ 2108: "comcam",
+ 2109: "ergolight",
+ 2110: "umsp",
+ 2111: "dsatp",
+ 2112: "idonix-metanet",
+ 2113: "hsl-storm",
+ 2114: "newheights",
+ 2115: "kdm",
+ 2116: "ccowcmr",
+ 2117: "mentaclient",
+ 2118: "mentaserver",
+ 2119: "gsigatekeeper",
+ 2120: "qencp",
+ 2121: "scientia-ssdb",
+ 2122: "caupc-remote",
+ 2123: "gtp-control",
+ 2124: "elatelink",
+ 2125: "lockstep",
+ 2126: "pktcable-cops",
+ 2127: "index-pc-wb",
+ 2128: "net-steward",
+ 2129: "cs-live",
+ 2130: "xds",
+ 2131: "avantageb2b",
+ 2132: "solera-epmap",
+ 2133: "zymed-zpp",
+ 2134: "avenue",
+ 2135: "gris",
+ 2136: "appworxsrv",
+ 2137: "connect",
+ 2138: "unbind-cluster",
+ 2139: "ias-auth",
+ 2140: "ias-reg",
+ 2141: "ias-admind",
+ 2142: "tdmoip",
+ 2143: "lv-jc",
+ 2144: "lv-ffx",
+ 2145: "lv-pici",
+ 2146: "lv-not",
+ 2147: "lv-auth",
+ 2148: "veritas-ucl",
+ 2149: "acptsys",
+ 2150: "dynamic3d",
+ 2151: "docent",
+ 2152: "gtp-user",
+ 2153: "ctlptc",
+ 2154: "stdptc",
+ 2155: "brdptc",
+ 2156: "trp",
+ 2157: "xnds",
+ 2158: "touchnetplus",
+ 2159: "gdbremote",
+ 2160: "apc-2160",
+ 2161: "apc-2161",
+ 2162: "navisphere",
+ 2163: "navisphere-sec",
+ 2164: "ddns-v3",
+ 2165: "x-bone-api",
+ 2166: "iwserver",
+ 2167: "raw-serial",
+ 2168: "easy-soft-mux",
+ 2169: "brain",
+ 2170: "eyetv",
+ 2171: "msfw-storage",
+ 2172: "msfw-s-storage",
+ 2173: "msfw-replica",
+ 2174: "msfw-array",
+ 2175: "airsync",
+ 2176: "rapi",
+ 2177: "qwave",
+ 2178: "bitspeer",
+ 2179: "vmrdp",
+ 2180: "mc-gt-srv",
+ 2181: "eforward",
+ 2182: "cgn-stat",
+ 2183: "cgn-config",
+ 2184: "nvd",
+ 2185: "onbase-dds",
+ 2186: "gtaua",
+ 2187: "ssmc",
+ 2188: "radware-rpm",
+ 2189: "radware-rpm-s",
+ 2190: "tivoconnect",
+ 2191: "tvbus",
+ 2192: "asdis",
+ 2193: "drwcs",
+ 2197: "mnp-exchange",
+ 2198: "onehome-remote",
+ 2199: "onehome-help",
+ 2200: "ici",
+ 2201: "ats",
+ 2202: "imtc-map",
+ 2203: "b2-runtime",
+ 2204: "b2-license",
+ 2205: "jps",
+ 2206: "hpocbus",
+ 2207: "hpssd",
+ 2208: "hpiod",
+ 2209: "rimf-ps",
+ 2210: "noaaport",
+ 2211: "emwin",
+ 2212: "leecoposserver",
+ 2213: "kali",
+ 2214: "rpi",
+ 2215: "ipcore",
+ 2216: "vtu-comms",
+ 2217: "gotodevice",
+ 2218: "bounzza",
+ 2219: "netiq-ncap",
+ 2220: "netiq",
+ 2221: "ethernet-ip-s",
+ 2222: "EtherNet-IP-1",
+ 2223: "rockwell-csp2",
+ 2224: "efi-mg",
+ 2225: "rcip-itu",
+ 2226: "di-drm",
+ 2227: "di-msg",
+ 2228: "ehome-ms",
+ 2229: "datalens",
+ 2230: "queueadm",
+ 2231: "wimaxasncp",
+ 2232: "ivs-video",
+ 2233: "infocrypt",
+ 2234: "directplay",
+ 2235: "sercomm-wlink",
+ 2236: "nani",
+ 2237: "optech-port1-lm",
+ 2238: "aviva-sna",
+ 2239: "imagequery",
+ 2240: "recipe",
+ 2241: "ivsd",
+ 2242: "foliocorp",
+ 2243: "magicom",
+ 2244: "nmsserver",
+ 2245: "hao",
+ 2246: "pc-mta-addrmap",
+ 2247: "antidotemgrsvr",
+ 2248: "ums",
+ 2249: "rfmp",
+ 2250: "remote-collab",
+ 2251: "dif-port",
+ 2252: "njenet-ssl",
+ 2253: "dtv-chan-req",
+ 2254: "seispoc",
+ 2255: "vrtp",
+ 2256: "pcc-mfp",
+ 2257: "simple-tx-rx",
+ 2258: "rcts",
+ 2260: "apc-2260",
+ 2261: "comotionmaster",
+ 2262: "comotionback",
+ 2263: "ecwcfg",
+ 2264: "apx500api-1",
+ 2265: "apx500api-2",
+ 2266: "mfserver",
+ 2267: "ontobroker",
+ 2268: "amt",
+ 2269: "mikey",
+ 2270: "starschool",
+ 2271: "mmcals",
+ 2272: "mmcal",
+ 2273: "mysql-im",
+ 2274: "pcttunnell",
+ 2275: "ibridge-data",
+ 2276: "ibridge-mgmt",
+ 2277: "bluectrlproxy",
+ 2278: "s3db",
+ 2279: "xmquery",
+ 2280: "lnvpoller",
+ 2281: "lnvconsole",
+ 2282: "lnvalarm",
+ 2283: "lnvstatus",
+ 2284: "lnvmaps",
+ 2285: "lnvmailmon",
+ 2286: "nas-metering",
+ 2287: "dna",
+ 2288: "netml",
+ 2289: "dict-lookup",
+ 2290: "sonus-logging",
+ 2291: "eapsp",
+ 2292: "mib-streaming",
+ 2293: "npdbgmngr",
+ 2294: "konshus-lm",
+ 2295: "advant-lm",
+ 2296: "theta-lm",
+ 2297: "d2k-datamover1",
+ 2298: "d2k-datamover2",
+ 2299: "pc-telecommute",
+ 2300: "cvmmon",
+ 2301: "cpq-wbem",
+ 2302: "binderysupport",
+ 2303: "proxy-gateway",
+ 2304: "attachmate-uts",
+ 2305: "mt-scaleserver",
+ 2306: "tappi-boxnet",
+ 2307: "pehelp",
+ 2308: "sdhelp",
+ 2309: "sdserver",
+ 2310: "sdclient",
+ 2311: "messageservice",
+ 2312: "wanscaler",
+ 2313: "iapp",
+ 2314: "cr-websystems",
+ 2315: "precise-sft",
+ 2316: "sent-lm",
+ 2317: "attachmate-g32",
+ 2318: "cadencecontrol",
+ 2319: "infolibria",
+ 2320: "siebel-ns",
+ 2321: "rdlap",
+ 2322: "ofsd",
+ 2323: "3d-nfsd",
+ 2324: "cosmocall",
+ 2325: "ansysli",
+ 2326: "idcp",
+ 2327: "xingcsm",
+ 2328: "netrix-sftm",
+ 2329: "nvd",
+ 2330: "tscchat",
+ 2331: "agentview",
+ 2332: "rcc-host",
+ 2333: "snapp",
+ 2334: "ace-client",
+ 2335: "ace-proxy",
+ 2336: "appleugcontrol",
+ 2337: "ideesrv",
+ 2338: "norton-lambert",
+ 2339: "3com-webview",
+ 2340: "wrs-registry",
+ 2341: "xiostatus",
+ 2342: "manage-exec",
+ 2343: "nati-logos",
+ 2344: "fcmsys",
+ 2345: "dbm",
+ 2346: "redstorm-join",
+ 2347: "redstorm-find",
+ 2348: "redstorm-info",
+ 2349: "redstorm-diag",
+ 2350: "psbserver",
+ 2351: "psrserver",
+ 2352: "pslserver",
+ 2353: "pspserver",
+ 2354: "psprserver",
+ 2355: "psdbserver",
+ 2356: "gxtelmd",
+ 2357: "unihub-server",
+ 2358: "futrix",
+ 2359: "flukeserver",
+ 2360: "nexstorindltd",
+ 2361: "tl1",
+ 2362: "digiman",
+ 2363: "mediacntrlnfsd",
+ 2364: "oi-2000",
+ 2365: "dbref",
+ 2366: "qip-login",
+ 2367: "service-ctrl",
+ 2368: "opentable",
+ 2370: "l3-hbmon",
+ 2371: "hp-rda",
+ 2372: "lanmessenger",
+ 2373: "remographlm",
+ 2374: "hydra",
+ 2375: "docker",
+ 2376: "docker-s",
+ 2377: "swarm",
+ 2379: "etcd-client",
+ 2380: "etcd-server",
+ 2381: "compaq-https",
+ 2382: "ms-olap3",
+ 2383: "ms-olap4",
+ 2384: "sd-request",
+ 2385: "sd-data",
+ 2386: "virtualtape",
+ 2387: "vsamredirector",
+ 2388: "mynahautostart",
+ 2389: "ovsessionmgr",
+ 2390: "rsmtp",
+ 2391: "3com-net-mgmt",
+ 2392: "tacticalauth",
+ 2393: "ms-olap1",
+ 2394: "ms-olap2",
+ 2395: "lan900-remote",
+ 2396: "wusage",
+ 2397: "ncl",
+ 2398: "orbiter",
+ 2399: "fmpro-fdal",
+ 2400: "opequus-server",
+ 2401: "cvspserver",
+ 2402: "taskmaster2000",
+ 2403: "taskmaster2000",
+ 2404: "iec-104",
+ 2405: "trc-netpoll",
+ 2406: "jediserver",
+ 2407: "orion",
+ 2408: "railgun-webaccl",
+ 2409: "sns-protocol",
+ 2410: "vrts-registry",
+ 2411: "netwave-ap-mgmt",
+ 2412: "cdn",
+ 2413: "orion-rmi-reg",
+ 2414: "beeyond",
+ 2415: "codima-rtp",
+ 2416: "rmtserver",
+ 2417: "composit-server",
+ 2418: "cas",
+ 2419: "attachmate-s2s",
+ 2420: "dslremote-mgmt",
+ 2421: "g-talk",
+ 2422: "crmsbits",
+ 2423: "rnrp",
+ 2424: "kofax-svr",
+ 2425: "fjitsuappmgr",
+ 2426: "vcmp",
+ 2427: "mgcp-gateway",
+ 2428: "ott",
+ 2429: "ft-role",
+ 2430: "venus",
+ 2431: "venus-se",
+ 2432: "codasrv",
+ 2433: "codasrv-se",
+ 2434: "pxc-epmap",
+ 2435: "optilogic",
+ 2436: "topx",
+ 2437: "unicontrol",
+ 2438: "msp",
+ 2439: "sybasedbsynch",
+ 2440: "spearway",
+ 2441: "pvsw-inet",
+ 2442: "netangel",
+ 2443: "powerclientcsf",
+ 2444: "btpp2sectrans",
+ 2445: "dtn1",
+ 2446: "bues-service",
+ 2447: "ovwdb",
+ 2448: "hpppssvr",
+ 2449: "ratl",
+ 2450: "netadmin",
+ 2451: "netchat",
+ 2452: "snifferclient",
+ 2453: "madge-ltd",
+ 2454: "indx-dds",
+ 2455: "wago-io-system",
+ 2456: "altav-remmgt",
+ 2457: "rapido-ip",
+ 2458: "griffin",
+ 2459: "community",
+ 2460: "ms-theater",
+ 2461: "qadmifoper",
+ 2462: "qadmifevent",
+ 2463: "lsi-raid-mgmt",
+ 2464: "direcpc-si",
+ 2465: "lbm",
+ 2466: "lbf",
+ 2467: "high-criteria",
+ 2468: "qip-msgd",
+ 2469: "mti-tcs-comm",
+ 2470: "taskman-port",
+ 2471: "seaodbc",
+ 2472: "c3",
+ 2473: "aker-cdp",
+ 2474: "vitalanalysis",
+ 2475: "ace-server",
+ 2476: "ace-svr-prop",
+ 2477: "ssm-cvs",
+ 2478: "ssm-cssps",
+ 2479: "ssm-els",
+ 2480: "powerexchange",
+ 2481: "giop",
+ 2482: "giop-ssl",
+ 2483: "ttc",
+ 2484: "ttc-ssl",
+ 2485: "netobjects1",
+ 2486: "netobjects2",
+ 2487: "pns",
+ 2488: "moy-corp",
+ 2489: "tsilb",
+ 2490: "qip-qdhcp",
+ 2491: "conclave-cpp",
+ 2492: "groove",
+ 2493: "talarian-mqs",
+ 2494: "bmc-ar",
+ 2495: "fast-rem-serv",
+ 2496: "dirgis",
+ 2497: "quaddb",
+ 2498: "odn-castraq",
+ 2499: "unicontrol",
+ 2500: "rtsserv",
+ 2501: "rtsclient",
+ 2502: "kentrox-prot",
+ 2503: "nms-dpnss",
+ 2504: "wlbs",
+ 2505: "ppcontrol",
+ 2506: "jbroker",
+ 2507: "spock",
+ 2508: "jdatastore",
+ 2509: "fjmpss",
+ 2510: "fjappmgrbulk",
+ 2511: "metastorm",
+ 2512: "citrixima",
+ 2513: "citrixadmin",
+ 2514: "facsys-ntp",
+ 2515: "facsys-router",
+ 2516: "maincontrol",
+ 2517: "call-sig-trans",
+ 2518: "willy",
+ 2519: "globmsgsvc",
+ 2520: "pvsw",
+ 2521: "adaptecmgr",
+ 2522: "windb",
+ 2523: "qke-llc-v3",
+ 2524: "optiwave-lm",
+ 2525: "ms-v-worlds",
+ 2526: "ema-sent-lm",
+ 2527: "iqserver",
+ 2528: "ncr-ccl",
+ 2529: "utsftp",
+ 2530: "vrcommerce",
+ 2531: "ito-e-gui",
+ 2532: "ovtopmd",
+ 2533: "snifferserver",
+ 2534: "combox-web-acc",
+ 2535: "madcap",
+ 2536: "btpp2audctr1",
+ 2537: "upgrade",
+ 2538: "vnwk-prapi",
+ 2539: "vsiadmin",
+ 2540: "lonworks",
+ 2541: "lonworks2",
+ 2542: "udrawgraph",
+ 2543: "reftek",
+ 2544: "novell-zen",
+ 2545: "sis-emt",
+ 2546: "vytalvaultbrtp",
+ 2547: "vytalvaultvsmp",
+ 2548: "vytalvaultpipe",
+ 2549: "ipass",
+ 2550: "ads",
+ 2551: "isg-uda-server",
+ 2552: "call-logging",
+ 2553: "efidiningport",
+ 2554: "vcnet-link-v10",
+ 2555: "compaq-wcp",
+ 2556: "nicetec-nmsvc",
+ 2557: "nicetec-mgmt",
+ 2558: "pclemultimedia",
+ 2559: "lstp",
+ 2560: "labrat",
+ 2561: "mosaixcc",
+ 2562: "delibo",
+ 2563: "cti-redwood",
+ 2564: "hp-3000-telnet",
+ 2565: "coord-svr",
+ 2566: "pcs-pcw",
+ 2567: "clp",
+ 2568: "spamtrap",
+ 2569: "sonuscallsig",
+ 2570: "hs-port",
+ 2571: "cecsvc",
+ 2572: "ibp",
+ 2573: "trustestablish",
+ 2574: "blockade-bpsp",
+ 2575: "hl7",
+ 2576: "tclprodebugger",
+ 2577: "scipticslsrvr",
+ 2578: "rvs-isdn-dcp",
+ 2579: "mpfoncl",
+ 2580: "tributary",
+ 2581: "argis-te",
+ 2582: "argis-ds",
+ 2583: "mon",
+ 2584: "cyaserv",
+ 2585: "netx-server",
+ 2586: "netx-agent",
+ 2587: "masc",
+ 2588: "privilege",
+ 2589: "quartus-tcl",
+ 2590: "idotdist",
+ 2591: "maytagshuffle",
+ 2592: "netrek",
+ 2593: "mns-mail",
+ 2594: "dts",
+ 2595: "worldfusion1",
+ 2596: "worldfusion2",
+ 2597: "homesteadglory",
+ 2598: "citriximaclient",
+ 2599: "snapd",
+ 2600: "hpstgmgr",
+ 2601: "discp-client",
+ 2602: "discp-server",
+ 2603: "servicemeter",
+ 2604: "nsc-ccs",
+ 2605: "nsc-posa",
+ 2606: "netmon",
+ 2607: "connection",
+ 2608: "wag-service",
+ 2609: "system-monitor",
+ 2610: "versa-tek",
+ 2611: "lionhead",
+ 2612: "qpasa-agent",
+ 2613: "smntubootstrap",
+ 2614: "neveroffline",
+ 2615: "firepower",
+ 2616: "appswitch-emp",
+ 2617: "cmadmin",
+ 2618: "priority-e-com",
+ 2619: "bruce",
+ 2620: "lpsrecommender",
+ 2621: "miles-apart",
+ 2622: "metricadbc",
+ 2623: "lmdp",
+ 2624: "aria",
+ 2625: "blwnkl-port",
+ 2626: "gbjd816",
+ 2627: "moshebeeri",
+ 2628: "dict",
+ 2629: "sitaraserver",
+ 2630: "sitaramgmt",
+ 2631: "sitaradir",
+ 2632: "irdg-post",
+ 2633: "interintelli",
+ 2634: "pk-electronics",
+ 2635: "backburner",
+ 2636: "solve",
+ 2637: "imdocsvc",
+ 2638: "sybaseanywhere",
+ 2639: "aminet",
+ 2640: "ami-control",
+ 2641: "hdl-srv",
+ 2642: "tragic",
+ 2643: "gte-samp",
+ 2644: "travsoft-ipx-t",
+ 2645: "novell-ipx-cmd",
+ 2646: "and-lm",
+ 2647: "syncserver",
+ 2648: "upsnotifyprot",
+ 2649: "vpsipport",
+ 2650: "eristwoguns",
+ 2651: "ebinsite",
+ 2652: "interpathpanel",
+ 2653: "sonus",
+ 2654: "corel-vncadmin",
+ 2655: "unglue",
+ 2656: "kana",
+ 2657: "sns-dispatcher",
+ 2658: "sns-admin",
+ 2659: "sns-query",
+ 2660: "gcmonitor",
+ 2661: "olhost",
+ 2662: "bintec-capi",
+ 2663: "bintec-tapi",
+ 2664: "patrol-mq-gm",
+ 2665: "patrol-mq-nm",
+ 2666: "extensis",
+ 2667: "alarm-clock-s",
+ 2668: "alarm-clock-c",
+ 2669: "toad",
+ 2670: "tve-announce",
+ 2671: "newlixreg",
+ 2672: "nhserver",
+ 2673: "firstcall42",
+ 2674: "ewnn",
+ 2675: "ttc-etap",
+ 2676: "simslink",
+ 2677: "gadgetgate1way",
+ 2678: "gadgetgate2way",
+ 2679: "syncserverssl",
+ 2680: "pxc-sapxom",
+ 2681: "mpnjsomb",
+ 2683: "ncdloadbalance",
+ 2684: "mpnjsosv",
+ 2685: "mpnjsocl",
+ 2686: "mpnjsomg",
+ 2687: "pq-lic-mgmt",
+ 2688: "md-cg-http",
+ 2689: "fastlynx",
+ 2690: "hp-nnm-data",
+ 2691: "itinternet",
+ 2692: "admins-lms",
+ 2694: "pwrsevent",
+ 2695: "vspread",
+ 2696: "unifyadmin",
+ 2697: "oce-snmp-trap",
+ 2698: "mck-ivpip",
+ 2699: "csoft-plusclnt",
+ 2700: "tqdata",
+ 2701: "sms-rcinfo",
+ 2702: "sms-xfer",
+ 2703: "sms-chat",
+ 2704: "sms-remctrl",
+ 2705: "sds-admin",
+ 2706: "ncdmirroring",
+ 2707: "emcsymapiport",
+ 2708: "banyan-net",
+ 2709: "supermon",
+ 2710: "sso-service",
+ 2711: "sso-control",
+ 2712: "aocp",
+ 2713: "raventbs",
+ 2714: "raventdm",
+ 2715: "hpstgmgr2",
+ 2716: "inova-ip-disco",
+ 2717: "pn-requester",
+ 2718: "pn-requester2",
+ 2719: "scan-change",
+ 2720: "wkars",
+ 2721: "smart-diagnose",
+ 2722: "proactivesrvr",
+ 2723: "watchdog-nt",
+ 2724: "qotps",
+ 2725: "msolap-ptp2",
+ 2726: "tams",
+ 2727: "mgcp-callagent",
+ 2728: "sqdr",
+ 2729: "tcim-control",
+ 2730: "nec-raidplus",
+ 2731: "fyre-messanger",
+ 2732: "g5m",
+ 2733: "signet-ctf",
+ 2734: "ccs-software",
+ 2735: "netiq-mc",
+ 2736: "radwiz-nms-srv",
+ 2737: "srp-feedback",
+ 2738: "ndl-tcp-ois-gw",
+ 2739: "tn-timing",
+ 2740: "alarm",
+ 2741: "tsb",
+ 2742: "tsb2",
+ 2743: "murx",
+ 2744: "honyaku",
+ 2745: "urbisnet",
+ 2746: "cpudpencap",
+ 2747: "fjippol-swrly",
+ 2748: "fjippol-polsvr",
+ 2749: "fjippol-cnsl",
+ 2750: "fjippol-port1",
+ 2751: "fjippol-port2",
+ 2752: "rsisysaccess",
+ 2753: "de-spot",
+ 2754: "apollo-cc",
+ 2755: "expresspay",
+ 2756: "simplement-tie",
+ 2757: "cnrp",
+ 2758: "apollo-status",
+ 2759: "apollo-gms",
+ 2760: "sabams",
+ 2761: "dicom-iscl",
+ 2762: "dicom-tls",
+ 2763: "desktop-dna",
+ 2764: "data-insurance",
+ 2765: "qip-audup",
+ 2766: "compaq-scp",
+ 2767: "uadtc",
+ 2768: "uacs",
+ 2769: "exce",
+ 2770: "veronica",
+ 2771: "vergencecm",
+ 2772: "auris",
+ 2773: "rbakcup1",
+ 2774: "rbakcup2",
+ 2775: "smpp",
+ 2776: "ridgeway1",
+ 2777: "ridgeway2",
+ 2778: "gwen-sonya",
+ 2779: "lbc-sync",
+ 2780: "lbc-control",
+ 2781: "whosells",
+ 2782: "everydayrc",
+ 2783: "aises",
+ 2784: "www-dev",
+ 2785: "aic-np",
+ 2786: "aic-oncrpc",
+ 2787: "piccolo",
+ 2788: "fryeserv",
+ 2789: "media-agent",
+ 2790: "plgproxy",
+ 2791: "mtport-regist",
+ 2792: "f5-globalsite",
+ 2793: "initlsmsad",
+ 2795: "livestats",
+ 2796: "ac-tech",
+ 2797: "esp-encap",
+ 2798: "tmesis-upshot",
+ 2799: "icon-discover",
+ 2800: "acc-raid",
+ 2801: "igcp",
+ 2802: "veritas-tcp1",
+ 2803: "btprjctrl",
+ 2804: "dvr-esm",
+ 2805: "wta-wsp-s",
+ 2806: "cspuni",
+ 2807: "cspmulti",
+ 2808: "j-lan-p",
+ 2809: "corbaloc",
+ 2810: "netsteward",
+ 2811: "gsiftp",
+ 2812: "atmtcp",
+ 2813: "llm-pass",
+ 2814: "llm-csv",
+ 2815: "lbc-measure",
+ 2816: "lbc-watchdog",
+ 2817: "nmsigport",
+ 2818: "rmlnk",
+ 2819: "fc-faultnotify",
+ 2820: "univision",
+ 2821: "vrts-at-port",
+ 2822: "ka0wuc",
+ 2823: "cqg-netlan",
+ 2824: "cqg-netlan-1",
+ 2826: "slc-systemlog",
+ 2827: "slc-ctrlrloops",
+ 2828: "itm-lm",
+ 2829: "silkp1",
+ 2830: "silkp2",
+ 2831: "silkp3",
+ 2832: "silkp4",
+ 2833: "glishd",
+ 2834: "evtp",
+ 2835: "evtp-data",
+ 2836: "catalyst",
+ 2837: "repliweb",
+ 2838: "starbot",
+ 2839: "nmsigport",
+ 2840: "l3-exprt",
+ 2841: "l3-ranger",
+ 2842: "l3-hawk",
+ 2843: "pdnet",
+ 2844: "bpcp-poll",
+ 2845: "bpcp-trap",
+ 2846: "aimpp-hello",
+ 2847: "aimpp-port-req",
+ 2848: "amt-blc-port",
+ 2849: "fxp",
+ 2850: "metaconsole",
+ 2851: "webemshttp",
+ 2852: "bears-01",
+ 2853: "ispipes",
+ 2854: "infomover",
+ 2855: "msrp",
+ 2856: "cesdinv",
+ 2857: "simctlp",
+ 2858: "ecnp",
+ 2859: "activememory",
+ 2860: "dialpad-voice1",
+ 2861: "dialpad-voice2",
+ 2862: "ttg-protocol",
+ 2863: "sonardata",
+ 2864: "astromed-main",
+ 2865: "pit-vpn",
+ 2866: "iwlistener",
+ 2867: "esps-portal",
+ 2868: "npep-messaging",
+ 2869: "icslap",
+ 2870: "daishi",
+ 2871: "msi-selectplay",
+ 2872: "radix",
+ 2874: "dxmessagebase1",
+ 2875: "dxmessagebase2",
+ 2876: "sps-tunnel",
+ 2877: "bluelance",
+ 2878: "aap",
+ 2879: "ucentric-ds",
+ 2880: "synapse",
+ 2881: "ndsp",
+ 2882: "ndtp",
+ 2883: "ndnp",
+ 2884: "flashmsg",
+ 2885: "topflow",
+ 2886: "responselogic",
+ 2887: "aironetddp",
+ 2888: "spcsdlobby",
+ 2889: "rsom",
+ 2890: "cspclmulti",
+ 2891: "cinegrfx-elmd",
+ 2892: "snifferdata",
+ 2893: "vseconnector",
+ 2894: "abacus-remote",
+ 2895: "natuslink",
+ 2896: "ecovisiong6-1",
+ 2897: "citrix-rtmp",
+ 2898: "appliance-cfg",
+ 2899: "powergemplus",
+ 2900: "quicksuite",
+ 2901: "allstorcns",
+ 2902: "netaspi",
+ 2903: "suitcase",
+ 2904: "m2ua",
+ 2905: "m3ua",
+ 2906: "caller9",
+ 2907: "webmethods-b2b",
+ 2908: "mao",
+ 2909: "funk-dialout",
+ 2910: "tdaccess",
+ 2911: "blockade",
+ 2912: "epicon",
+ 2913: "boosterware",
+ 2914: "gamelobby",
+ 2915: "tksocket",
+ 2916: "elvin-server",
+ 2917: "elvin-client",
+ 2918: "kastenchasepad",
+ 2919: "roboer",
+ 2920: "roboeda",
+ 2921: "cesdcdman",
+ 2922: "cesdcdtrn",
+ 2923: "wta-wsp-wtp-s",
+ 2924: "precise-vip",
+ 2926: "mobile-file-dl",
+ 2927: "unimobilectrl",
+ 2928: "redstone-cpss",
+ 2929: "amx-webadmin",
+ 2930: "amx-weblinx",
+ 2931: "circle-x",
+ 2932: "incp",
+ 2933: "4-tieropmgw",
+ 2934: "4-tieropmcli",
+ 2935: "qtp",
+ 2936: "otpatch",
+ 2937: "pnaconsult-lm",
+ 2938: "sm-pas-1",
+ 2939: "sm-pas-2",
+ 2940: "sm-pas-3",
+ 2941: "sm-pas-4",
+ 2942: "sm-pas-5",
+ 2943: "ttnrepository",
+ 2944: "megaco-h248",
+ 2945: "h248-binary",
+ 2946: "fjsvmpor",
+ 2947: "gpsd",
+ 2948: "wap-push",
+ 2949: "wap-pushsecure",
+ 2950: "esip",
+ 2951: "ottp",
+ 2952: "mpfwsas",
+ 2953: "ovalarmsrv",
+ 2954: "ovalarmsrv-cmd",
+ 2955: "csnotify",
+ 2956: "ovrimosdbman",
+ 2957: "jmact5",
+ 2958: "jmact6",
+ 2959: "rmopagt",
+ 2960: "dfoxserver",
+ 2961: "boldsoft-lm",
+ 2962: "iph-policy-cli",
+ 2963: "iph-policy-adm",
+ 2964: "bullant-srap",
+ 2965: "bullant-rap",
+ 2966: "idp-infotrieve",
+ 2967: "ssc-agent",
+ 2968: "enpp",
+ 2969: "essp",
+ 2970: "index-net",
+ 2971: "netclip",
+ 2972: "pmsm-webrctl",
+ 2973: "svnetworks",
+ 2974: "signal",
+ 2975: "fjmpcm",
+ 2976: "cns-srv-port",
+ 2977: "ttc-etap-ns",
+ 2978: "ttc-etap-ds",
+ 2979: "h263-video",
+ 2980: "wimd",
+ 2981: "mylxamport",
+ 2982: "iwb-whiteboard",
+ 2983: "netplan",
+ 2984: "hpidsadmin",
+ 2985: "hpidsagent",
+ 2986: "stonefalls",
+ 2987: "identify",
+ 2988: "hippad",
+ 2989: "zarkov",
+ 2990: "boscap",
+ 2991: "wkstn-mon",
+ 2992: "avenyo",
+ 2993: "veritas-vis1",
+ 2994: "veritas-vis2",
+ 2995: "idrs",
+ 2996: "vsixml",
+ 2997: "rebol",
+ 2998: "realsecure",
+ 2999: "remoteware-un",
+ 3000: "hbci",
+ 3001: "origo-native",
+ 3002: "exlm-agent",
+ 3003: "cgms",
+ 3004: "csoftragent",
+ 3005: "geniuslm",
+ 3006: "ii-admin",
+ 3007: "lotusmtap",
+ 3008: "midnight-tech",
+ 3009: "pxc-ntfy",
+ 3010: "gw",
+ 3011: "trusted-web",
+ 3012: "twsdss",
+ 3013: "gilatskysurfer",
+ 3014: "broker-service",
+ 3015: "nati-dstp",
+ 3016: "notify-srvr",
+ 3017: "event-listener",
+ 3018: "srvc-registry",
+ 3019: "resource-mgr",
+ 3020: "cifs",
+ 3021: "agriserver",
+ 3022: "csregagent",
+ 3023: "magicnotes",
+ 3024: "nds-sso",
+ 3025: "arepa-raft",
+ 3026: "agri-gateway",
+ 3027: "LiebDevMgmt-C",
+ 3028: "LiebDevMgmt-DM",
+ 3029: "LiebDevMgmt-A",
+ 3030: "arepa-cas",
+ 3031: "eppc",
+ 3032: "redwood-chat",
+ 3033: "pdb",
+ 3034: "osmosis-aeea",
+ 3035: "fjsv-gssagt",
+ 3036: "hagel-dump",
+ 3037: "hp-san-mgmt",
+ 3038: "santak-ups",
+ 3039: "cogitate",
+ 3040: "tomato-springs",
+ 3041: "di-traceware",
+ 3042: "journee",
+ 3043: "brp",
+ 3044: "epp",
+ 3045: "responsenet",
+ 3046: "di-ase",
+ 3047: "hlserver",
+ 3048: "pctrader",
+ 3049: "nsws",
+ 3050: "gds-db",
+ 3051: "galaxy-server",
+ 3052: "apc-3052",
+ 3053: "dsom-server",
+ 3054: "amt-cnf-prot",
+ 3055: "policyserver",
+ 3056: "cdl-server",
+ 3057: "goahead-fldup",
+ 3058: "videobeans",
+ 3059: "qsoft",
+ 3060: "interserver",
+ 3061: "cautcpd",
+ 3062: "ncacn-ip-tcp",
+ 3063: "ncadg-ip-udp",
+ 3064: "rprt",
+ 3065: "slinterbase",
+ 3066: "netattachsdmp",
+ 3067: "fjhpjp",
+ 3068: "ls3bcast",
+ 3069: "ls3",
+ 3070: "mgxswitch",
+ 3071: "xplat-replicate",
+ 3072: "csd-monitor",
+ 3073: "vcrp",
+ 3074: "xbox",
+ 3075: "orbix-locator",
+ 3076: "orbix-config",
+ 3077: "orbix-loc-ssl",
+ 3078: "orbix-cfg-ssl",
+ 3079: "lv-frontpanel",
+ 3080: "stm-pproc",
+ 3081: "tl1-lv",
+ 3082: "tl1-raw",
+ 3083: "tl1-telnet",
+ 3084: "itm-mccs",
+ 3085: "pcihreq",
+ 3086: "jdl-dbkitchen",
+ 3087: "asoki-sma",
+ 3088: "xdtp",
+ 3089: "ptk-alink",
+ 3090: "stss",
+ 3091: "1ci-smcs",
+ 3093: "rapidmq-center",
+ 3094: "rapidmq-reg",
+ 3095: "panasas",
+ 3096: "ndl-aps",
+ 3098: "umm-port",
+ 3099: "chmd",
+ 3100: "opcon-xps",
+ 3101: "hp-pxpib",
+ 3102: "slslavemon",
+ 3103: "autocuesmi",
+ 3104: "autocuelog",
+ 3105: "cardbox",
+ 3106: "cardbox-http",
+ 3107: "business",
+ 3108: "geolocate",
+ 3109: "personnel",
+ 3110: "sim-control",
+ 3111: "wsynch",
+ 3112: "ksysguard",
+ 3113: "cs-auth-svr",
+ 3114: "ccmad",
+ 3115: "mctet-master",
+ 3116: "mctet-gateway",
+ 3117: "mctet-jserv",
+ 3118: "pkagent",
+ 3119: "d2000kernel",
+ 3120: "d2000webserver",
+ 3121: "pcmk-remote",
+ 3122: "vtr-emulator",
+ 3123: "edix",
+ 3124: "beacon-port",
+ 3125: "a13-an",
+ 3127: "ctx-bridge",
+ 3128: "ndl-aas",
+ 3129: "netport-id",
+ 3130: "icpv2",
+ 3131: "netbookmark",
+ 3132: "ms-rule-engine",
+ 3133: "prism-deploy",
+ 3134: "ecp",
+ 3135: "peerbook-port",
+ 3136: "grubd",
+ 3137: "rtnt-1",
+ 3138: "rtnt-2",
+ 3139: "incognitorv",
+ 3140: "ariliamulti",
+ 3141: "vmodem",
+ 3142: "rdc-wh-eos",
+ 3143: "seaview",
+ 3144: "tarantella",
+ 3145: "csi-lfap",
+ 3146: "bears-02",
+ 3147: "rfio",
+ 3148: "nm-game-admin",
+ 3149: "nm-game-server",
+ 3150: "nm-asses-admin",
+ 3151: "nm-assessor",
+ 3152: "feitianrockey",
+ 3153: "s8-client-port",
+ 3154: "ccmrmi",
+ 3155: "jpegmpeg",
+ 3156: "indura",
+ 3157: "e3consultants",
+ 3158: "stvp",
+ 3159: "navegaweb-port",
+ 3160: "tip-app-server",
+ 3161: "doc1lm",
+ 3162: "sflm",
+ 3163: "res-sap",
+ 3164: "imprs",
+ 3165: "newgenpay",
+ 3166: "sossecollector",
+ 3167: "nowcontact",
+ 3168: "poweronnud",
+ 3169: "serverview-as",
+ 3170: "serverview-asn",
+ 3171: "serverview-gf",
+ 3172: "serverview-rm",
+ 3173: "serverview-icc",
+ 3174: "armi-server",
+ 3175: "t1-e1-over-ip",
+ 3176: "ars-master",
+ 3177: "phonex-port",
+ 3178: "radclientport",
+ 3179: "h2gf-w-2m",
+ 3180: "mc-brk-srv",
+ 3181: "bmcpatrolagent",
+ 3182: "bmcpatrolrnvu",
+ 3183: "cops-tls",
+ 3184: "apogeex-port",
+ 3185: "smpppd",
+ 3186: "iiw-port",
+ 3187: "odi-port",
+ 3188: "brcm-comm-port",
+ 3189: "pcle-infex",
+ 3190: "csvr-proxy",
+ 3191: "csvr-sslproxy",
+ 3192: "firemonrcc",
+ 3193: "spandataport",
+ 3194: "magbind",
+ 3195: "ncu-1",
+ 3196: "ncu-2",
+ 3197: "embrace-dp-s",
+ 3198: "embrace-dp-c",
+ 3199: "dmod-workspace",
+ 3200: "tick-port",
+ 3201: "cpq-tasksmart",
+ 3202: "intraintra",
+ 3203: "netwatcher-mon",
+ 3204: "netwatcher-db",
+ 3205: "isns",
+ 3206: "ironmail",
+ 3207: "vx-auth-port",
+ 3208: "pfu-prcallback",
+ 3209: "netwkpathengine",
+ 3210: "flamenco-proxy",
+ 3211: "avsecuremgmt",
+ 3212: "surveyinst",
+ 3213: "neon24x7",
+ 3214: "jmq-daemon-1",
+ 3215: "jmq-daemon-2",
+ 3216: "ferrari-foam",
+ 3217: "unite",
+ 3218: "smartpackets",
+ 3219: "wms-messenger",
+ 3220: "xnm-ssl",
+ 3221: "xnm-clear-text",
+ 3222: "glbp",
+ 3223: "digivote",
+ 3224: "aes-discovery",
+ 3225: "fcip-port",
+ 3226: "isi-irp",
+ 3227: "dwnmshttp",
+ 3228: "dwmsgserver",
+ 3229: "global-cd-port",
+ 3230: "sftdst-port",
+ 3231: "vidigo",
+ 3232: "mdtp",
+ 3233: "whisker",
+ 3234: "alchemy",
+ 3235: "mdap-port",
+ 3236: "apparenet-ts",
+ 3237: "apparenet-tps",
+ 3238: "apparenet-as",
+ 3239: "apparenet-ui",
+ 3240: "triomotion",
+ 3241: "sysorb",
+ 3242: "sdp-id-port",
+ 3243: "timelot",
+ 3244: "onesaf",
+ 3245: "vieo-fe",
+ 3246: "dvt-system",
+ 3247: "dvt-data",
+ 3248: "procos-lm",
+ 3249: "ssp",
+ 3250: "hicp",
+ 3251: "sysscanner",
+ 3252: "dhe",
+ 3253: "pda-data",
+ 3254: "pda-sys",
+ 3255: "semaphore",
+ 3256: "cpqrpm-agent",
+ 3257: "cpqrpm-server",
+ 3258: "ivecon-port",
+ 3259: "epncdp2",
+ 3260: "iscsi-target",
+ 3261: "winshadow",
+ 3262: "necp",
+ 3263: "ecolor-imager",
+ 3264: "ccmail",
+ 3265: "altav-tunnel",
+ 3266: "ns-cfg-server",
+ 3267: "ibm-dial-out",
+ 3268: "msft-gc",
+ 3269: "msft-gc-ssl",
+ 3270: "verismart",
+ 3271: "csoft-prev",
+ 3272: "user-manager",
+ 3273: "sxmp",
+ 3274: "ordinox-server",
+ 3275: "samd",
+ 3276: "maxim-asics",
+ 3277: "awg-proxy",
+ 3278: "lkcmserver",
+ 3279: "admind",
+ 3280: "vs-server",
+ 3281: "sysopt",
+ 3282: "datusorb",
+ 3283: "Apple Remote Desktop (Net Assistant)",
+ 3284: "4talk",
+ 3285: "plato",
+ 3286: "e-net",
+ 3287: "directvdata",
+ 3288: "cops",
+ 3289: "enpc",
+ 3290: "caps-lm",
+ 3291: "sah-lm",
+ 3292: "cart-o-rama",
+ 3293: "fg-fps",
+ 3294: "fg-gip",
+ 3295: "dyniplookup",
+ 3296: "rib-slm",
+ 3297: "cytel-lm",
+ 3298: "deskview",
+ 3299: "pdrncs",
+ 3300: "ceph",
+ 3302: "mcs-fastmail",
+ 3303: "opsession-clnt",
+ 3304: "opsession-srvr",
+ 3305: "odette-ftp",
+ 3306: "mysql",
+ 3307: "opsession-prxy",
+ 3308: "tns-server",
+ 3309: "tns-adv",
+ 3310: "dyna-access",
+ 3311: "mcns-tel-ret",
+ 3312: "appman-server",
+ 3313: "uorb",
+ 3314: "uohost",
+ 3315: "cdid",
+ 3316: "aicc-cmi",
+ 3317: "vsaiport",
+ 3318: "ssrip",
+ 3319: "sdt-lmd",
+ 3320: "officelink2000",
+ 3321: "vnsstr",
+ 3326: "sftu",
+ 3327: "bbars",
+ 3328: "egptlm",
+ 3329: "hp-device-disc",
+ 3330: "mcs-calypsoicf",
+ 3331: "mcs-messaging",
+ 3332: "mcs-mailsvr",
+ 3333: "dec-notes",
+ 3334: "directv-web",
+ 3335: "directv-soft",
+ 3336: "directv-tick",
+ 3337: "directv-catlg",
+ 3338: "anet-b",
+ 3339: "anet-l",
+ 3340: "anet-m",
+ 3341: "anet-h",
+ 3342: "webtie",
+ 3343: "ms-cluster-net",
+ 3344: "bnt-manager",
+ 3345: "influence",
+ 3346: "trnsprntproxy",
+ 3347: "phoenix-rpc",
+ 3348: "pangolin-laser",
+ 3349: "chevinservices",
+ 3350: "findviatv",
+ 3351: "btrieve",
+ 3352: "ssql",
+ 3353: "fatpipe",
+ 3354: "suitjd",
+ 3355: "ordinox-dbase",
+ 3356: "upnotifyps",
+ 3357: "adtech-test",
+ 3358: "mpsysrmsvr",
+ 3359: "wg-netforce",
+ 3360: "kv-server",
+ 3361: "kv-agent",
+ 3362: "dj-ilm",
+ 3363: "nati-vi-server",
+ 3364: "creativeserver",
+ 3365: "contentserver",
+ 3366: "creativepartnr",
+ 3372: "tip2",
+ 3373: "lavenir-lm",
+ 3374: "cluster-disc",
+ 3375: "vsnm-agent",
+ 3376: "cdbroker",
+ 3377: "cogsys-lm",
+ 3378: "wsicopy",
+ 3379: "socorfs",
+ 3380: "sns-channels",
+ 3381: "geneous",
+ 3382: "fujitsu-neat",
+ 3383: "esp-lm",
+ 3384: "hp-clic",
+ 3385: "qnxnetman",
+ 3386: "gprs-data",
+ 3387: "backroomnet",
+ 3388: "cbserver",
+ 3389: "ms-wbt-server",
+ 3390: "dsc",
+ 3391: "savant",
+ 3392: "efi-lm",
+ 3393: "d2k-tapestry1",
+ 3394: "d2k-tapestry2",
+ 3395: "dyna-lm",
+ 3396: "printer-agent",
+ 3397: "cloanto-lm",
+ 3398: "mercantile",
+ 3399: "csms",
+ 3400: "csms2",
+ 3401: "filecast",
+ 3402: "fxaengine-net",
+ 3405: "nokia-ann-ch1",
+ 3406: "nokia-ann-ch2",
+ 3407: "ldap-admin",
+ 3408: "BESApi",
+ 3409: "networklens",
+ 3410: "networklenss",
+ 3411: "biolink-auth",
+ 3412: "xmlblaster",
+ 3413: "svnet",
+ 3414: "wip-port",
+ 3415: "bcinameservice",
+ 3416: "commandport",
+ 3417: "csvr",
+ 3418: "rnmap",
+ 3419: "softaudit",
+ 3420: "ifcp-port",
+ 3421: "bmap",
+ 3422: "rusb-sys-port",
+ 3423: "xtrm",
+ 3424: "xtrms",
+ 3425: "agps-port",
+ 3426: "arkivio",
+ 3427: "websphere-snmp",
+ 3428: "twcss",
+ 3429: "gcsp",
+ 3430: "ssdispatch",
+ 3431: "ndl-als",
+ 3432: "osdcp",
+ 3433: "opnet-smp",
+ 3434: "opencm",
+ 3435: "pacom",
+ 3436: "gc-config",
+ 3437: "autocueds",
+ 3438: "spiral-admin",
+ 3439: "hri-port",
+ 3440: "ans-console",
+ 3441: "connect-client",
+ 3442: "connect-server",
+ 3443: "ov-nnm-websrv",
+ 3444: "denali-server",
+ 3445: "monp",
+ 3446: "3comfaxrpc",
+ 3447: "directnet",
+ 3448: "dnc-port",
+ 3449: "hotu-chat",
+ 3450: "castorproxy",
+ 3451: "asam",
+ 3452: "sabp-signal",
+ 3453: "pscupd",
+ 3454: "mira",
+ 3455: "prsvp",
+ 3456: "vat",
+ 3457: "vat-control",
+ 3458: "d3winosfi",
+ 3459: "integral",
+ 3460: "edm-manager",
+ 3461: "edm-stager",
+ 3462: "edm-std-notify",
+ 3463: "edm-adm-notify",
+ 3464: "edm-mgr-sync",
+ 3465: "edm-mgr-cntrl",
+ 3466: "workflow",
+ 3467: "rcst",
+ 3468: "ttcmremotectrl",
+ 3469: "pluribus",
+ 3470: "jt400",
+ 3471: "jt400-ssl",
+ 3472: "jaugsremotec-1",
+ 3473: "jaugsremotec-2",
+ 3474: "ttntspauto",
+ 3475: "genisar-port",
+ 3476: "nppmp",
+ 3477: "ecomm",
+ 3478: "stun",
+ 3479: "twrpc",
+ 3480: "plethora",
+ 3481: "cleanerliverc",
+ 3482: "vulture",
+ 3483: "slim-devices",
+ 3484: "gbs-stp",
+ 3485: "celatalk",
+ 3486: "ifsf-hb-port",
+ 3487: "ltctcp",
+ 3488: "fs-rh-srv",
+ 3489: "dtp-dia",
+ 3490: "colubris",
+ 3491: "swr-port",
+ 3492: "tvdumtray-port",
+ 3493: "nut",
+ 3494: "ibm3494",
+ 3495: "seclayer-tcp",
+ 3496: "seclayer-tls",
+ 3497: "ipether232port",
+ 3498: "dashpas-port",
+ 3499: "sccip-media",
+ 3500: "rtmp-port",
+ 3501: "isoft-p2p",
+ 3502: "avinstalldisc",
+ 3503: "lsp-ping",
+ 3504: "ironstorm",
+ 3505: "ccmcomm",
+ 3506: "apc-3506",
+ 3507: "nesh-broker",
+ 3508: "interactionweb",
+ 3509: "vt-ssl",
+ 3510: "xss-port",
+ 3511: "webmail-2",
+ 3512: "aztec",
+ 3513: "arcpd",
+ 3514: "must-p2p",
+ 3515: "must-backplane",
+ 3516: "smartcard-port",
+ 3517: "802-11-iapp",
+ 3518: "artifact-msg",
+ 3519: "nvmsgd",
+ 3520: "galileolog",
+ 3521: "mc3ss",
+ 3522: "nssocketport",
+ 3523: "odeumservlink",
+ 3524: "ecmport",
+ 3525: "eisport",
+ 3526: "starquiz-port",
+ 3527: "beserver-msg-q",
+ 3528: "jboss-iiop",
+ 3529: "jboss-iiop-ssl",
+ 3530: "gf",
+ 3531: "joltid",
+ 3532: "raven-rmp",
+ 3533: "raven-rdp",
+ 3534: "urld-port",
+ 3535: "ms-la",
+ 3536: "snac",
+ 3537: "ni-visa-remote",
+ 3538: "ibm-diradm",
+ 3539: "ibm-diradm-ssl",
+ 3540: "pnrp-port",
+ 3541: "voispeed-port",
+ 3542: "hacl-monitor",
+ 3543: "qftest-lookup",
+ 3544: "teredo",
+ 3545: "camac",
+ 3547: "symantec-sim",
+ 3548: "interworld",
+ 3549: "tellumat-nms",
+ 3550: "ssmpp",
+ 3551: "apcupsd",
+ 3552: "taserver",
+ 3553: "rbr-discovery",
+ 3554: "questnotify",
+ 3555: "razor",
+ 3556: "sky-transport",
+ 3557: "personalos-001",
+ 3558: "mcp-port",
+ 3559: "cctv-port",
+ 3560: "iniserve-port",
+ 3561: "bmc-onekey",
+ 3562: "sdbproxy",
+ 3563: "watcomdebug",
+ 3564: "esimport",
+ 3565: "m2pa",
+ 3566: "quest-data-hub",
+ 3567: "dof-eps",
+ 3568: "dof-tunnel-sec",
+ 3569: "mbg-ctrl",
+ 3570: "mccwebsvr-port",
+ 3571: "megardsvr-port",
+ 3572: "megaregsvrport",
+ 3573: "tag-ups-1",
+ 3574: "dmaf-server",
+ 3575: "ccm-port",
+ 3576: "cmc-port",
+ 3577: "config-port",
+ 3578: "data-port",
+ 3579: "ttat3lb",
+ 3580: "nati-svrloc",
+ 3581: "kfxaclicensing",
+ 3582: "press",
+ 3583: "canex-watch",
+ 3584: "u-dbap",
+ 3585: "emprise-lls",
+ 3586: "emprise-lsc",
+ 3587: "p2pgroup",
+ 3588: "sentinel",
+ 3589: "isomair",
+ 3590: "wv-csp-sms",
+ 3591: "gtrack-server",
+ 3592: "gtrack-ne",
+ 3593: "bpmd",
+ 3594: "mediaspace",
+ 3595: "shareapp",
+ 3596: "iw-mmogame",
+ 3597: "a14",
+ 3598: "a15",
+ 3599: "quasar-server",
+ 3600: "trap-daemon",
+ 3601: "visinet-gui",
+ 3602: "infiniswitchcl",
+ 3603: "int-rcv-cntrl",
+ 3604: "bmc-jmx-port",
+ 3605: "comcam-io",
+ 3606: "splitlock",
+ 3607: "precise-i3",
+ 3608: "trendchip-dcp",
+ 3609: "cpdi-pidas-cm",
+ 3610: "echonet",
+ 3611: "six-degrees",
+ 3612: "hp-dataprotect",
+ 3613: "alaris-disc",
+ 3614: "sigma-port",
+ 3615: "start-network",
+ 3616: "cd3o-protocol",
+ 3617: "sharp-server",
+ 3618: "aairnet-1",
+ 3619: "aairnet-2",
+ 3620: "ep-pcp",
+ 3621: "ep-nsp",
+ 3622: "ff-lr-port",
+ 3623: "haipe-discover",
+ 3624: "dist-upgrade",
+ 3625: "volley",
+ 3626: "bvcdaemon-port",
+ 3627: "jamserverport",
+ 3628: "ept-machine",
+ 3629: "escvpnet",
+ 3630: "cs-remote-db",
+ 3631: "cs-services",
+ 3632: "distcc",
+ 3633: "wacp",
+ 3634: "hlibmgr",
+ 3635: "sdo",
+ 3636: "servistaitsm",
+ 3637: "scservp",
+ 3638: "ehp-backup",
+ 3639: "xap-ha",
+ 3640: "netplay-port1",
+ 3641: "netplay-port2",
+ 3642: "juxml-port",
+ 3643: "audiojuggler",
+ 3644: "ssowatch",
+ 3645: "cyc",
+ 3646: "xss-srv-port",
+ 3647: "splitlock-gw",
+ 3648: "fjcp",
+ 3649: "nmmp",
+ 3650: "prismiq-plugin",
+ 3651: "xrpc-registry",
+ 3652: "vxcrnbuport",
+ 3653: "tsp",
+ 3654: "vaprtm",
+ 3655: "abatemgr",
+ 3656: "abatjss",
+ 3657: "immedianet-bcn",
+ 3658: "ps-ams",
+ 3659: "apple-sasl",
+ 3660: "can-nds-ssl",
+ 3661: "can-ferret-ssl",
+ 3662: "pserver",
+ 3663: "dtp",
+ 3664: "ups-engine",
+ 3665: "ent-engine",
+ 3666: "eserver-pap",
+ 3667: "infoexch",
+ 3668: "dell-rm-port",
+ 3669: "casanswmgmt",
+ 3670: "smile",
+ 3671: "efcp",
+ 3672: "lispworks-orb",
+ 3673: "mediavault-gui",
+ 3674: "wininstall-ipc",
+ 3675: "calltrax",
+ 3676: "va-pacbase",
+ 3677: "roverlog",
+ 3678: "ipr-dglt",
+ 3679: "Escale (Newton Dock)",
+ 3680: "npds-tracker",
+ 3681: "bts-x73",
+ 3682: "cas-mapi",
+ 3683: "bmc-ea",
+ 3684: "faxstfx-port",
+ 3685: "dsx-agent",
+ 3686: "tnmpv2",
+ 3687: "simple-push",
+ 3688: "simple-push-s",
+ 3689: "daap",
+ 3690: "svn",
+ 3691: "magaya-network",
+ 3692: "intelsync",
+ 3693: "easl",
+ 3695: "bmc-data-coll",
+ 3696: "telnetcpcd",
+ 3697: "nw-license",
+ 3698: "sagectlpanel",
+ 3699: "kpn-icw",
+ 3700: "lrs-paging",
+ 3701: "netcelera",
+ 3702: "ws-discovery",
+ 3703: "adobeserver-3",
+ 3704: "adobeserver-4",
+ 3705: "adobeserver-5",
+ 3706: "rt-event",
+ 3707: "rt-event-s",
+ 3708: "sun-as-iiops",
+ 3709: "ca-idms",
+ 3710: "portgate-auth",
+ 3711: "edb-server2",
+ 3712: "sentinel-ent",
+ 3713: "tftps",
+ 3714: "delos-dms",
+ 3715: "anoto-rendezv",
+ 3716: "wv-csp-sms-cir",
+ 3717: "wv-csp-udp-cir",
+ 3718: "opus-services",
+ 3719: "itelserverport",
+ 3720: "ufastro-instr",
+ 3721: "xsync",
+ 3722: "xserveraid",
+ 3723: "sychrond",
+ 3724: "blizwow",
+ 3725: "na-er-tip",
+ 3726: "array-manager",
+ 3727: "e-mdu",
+ 3728: "e-woa",
+ 3729: "fksp-audit",
+ 3730: "client-ctrl",
+ 3731: "smap",
+ 3732: "m-wnn",
+ 3733: "multip-msg",
+ 3734: "synel-data",
+ 3735: "pwdis",
+ 3736: "rs-rmi",
+ 3737: "xpanel",
+ 3738: "versatalk",
+ 3739: "launchbird-lm",
+ 3740: "heartbeat",
+ 3741: "wysdma",
+ 3742: "cst-port",
+ 3743: "ipcs-command",
+ 3744: "sasg",
+ 3745: "gw-call-port",
+ 3746: "linktest",
+ 3747: "linktest-s",
+ 3748: "webdata",
+ 3749: "cimtrak",
+ 3750: "cbos-ip-port",
+ 3751: "gprs-cube",
+ 3752: "vipremoteagent",
+ 3753: "nattyserver",
+ 3754: "timestenbroker",
+ 3755: "sas-remote-hlp",
+ 3756: "canon-capt",
+ 3757: "grf-port",
+ 3758: "apw-registry",
+ 3759: "exapt-lmgr",
+ 3760: "adtempusclient",
+ 3761: "gsakmp",
+ 3762: "gbs-smp",
+ 3763: "xo-wave",
+ 3764: "mni-prot-rout",
+ 3765: "rtraceroute",
+ 3766: "sitewatch-s",
+ 3767: "listmgr-port",
+ 3768: "rblcheckd",
+ 3769: "haipe-otnk",
+ 3770: "cindycollab",
+ 3771: "paging-port",
+ 3772: "ctp",
+ 3773: "ctdhercules",
+ 3774: "zicom",
+ 3775: "ispmmgr",
+ 3776: "dvcprov-port",
+ 3777: "jibe-eb",
+ 3778: "c-h-it-port",
+ 3779: "cognima",
+ 3780: "nnp",
+ 3781: "abcvoice-port",
+ 3782: "iso-tp0s",
+ 3783: "bim-pem",
+ 3784: "bfd-control",
+ 3785: "bfd-echo",
+ 3786: "upstriggervsw",
+ 3787: "fintrx",
+ 3788: "isrp-port",
+ 3789: "remotedeploy",
+ 3790: "quickbooksrds",
+ 3791: "tvnetworkvideo",
+ 3792: "sitewatch",
+ 3793: "dcsoftware",
+ 3794: "jaus",
+ 3795: "myblast",
+ 3796: "spw-dialer",
+ 3797: "idps",
+ 3798: "minilock",
+ 3799: "radius-dynauth",
+ 3800: "pwgpsi",
+ 3801: "ibm-mgr",
+ 3802: "vhd",
+ 3803: "soniqsync",
+ 3804: "iqnet-port",
+ 3805: "tcpdataserver",
+ 3806: "wsmlb",
+ 3807: "spugna",
+ 3808: "sun-as-iiops-ca",
+ 3809: "apocd",
+ 3810: "wlanauth",
+ 3811: "amp",
+ 3812: "neto-wol-server",
+ 3813: "rap-ip",
+ 3814: "neto-dcs",
+ 3815: "lansurveyorxml",
+ 3816: "sunlps-http",
+ 3817: "tapeware",
+ 3818: "crinis-hb",
+ 3819: "epl-slp",
+ 3820: "scp",
+ 3821: "pmcp",
+ 3822: "acp-discovery",
+ 3823: "acp-conduit",
+ 3824: "acp-policy",
+ 3825: "ffserver",
+ 3826: "warmux",
+ 3827: "netmpi",
+ 3828: "neteh",
+ 3829: "neteh-ext",
+ 3830: "cernsysmgmtagt",
+ 3831: "dvapps",
+ 3832: "xxnetserver",
+ 3833: "aipn-auth",
+ 3834: "spectardata",
+ 3835: "spectardb",
+ 3836: "markem-dcp",
+ 3837: "mkm-discovery",
+ 3838: "sos",
+ 3839: "amx-rms",
+ 3840: "flirtmitmir",
+ 3841: "shiprush-db-svr",
+ 3842: "nhci",
+ 3843: "quest-agent",
+ 3844: "rnm",
+ 3845: "v-one-spp",
+ 3846: "an-pcp",
+ 3847: "msfw-control",
+ 3848: "item",
+ 3849: "spw-dnspreload",
+ 3850: "qtms-bootstrap",
+ 3851: "spectraport",
+ 3852: "sse-app-config",
+ 3853: "sscan",
+ 3854: "stryker-com",
+ 3855: "opentrac",
+ 3856: "informer",
+ 3857: "trap-port",
+ 3858: "trap-port-mom",
+ 3859: "nav-port",
+ 3860: "sasp",
+ 3861: "winshadow-hd",
+ 3862: "giga-pocket",
+ 3863: "asap-tcp",
+ 3864: "asap-tcp-tls",
+ 3865: "xpl",
+ 3866: "dzdaemon",
+ 3867: "dzoglserver",
+ 3868: "diameter",
+ 3869: "ovsam-mgmt",
+ 3870: "ovsam-d-agent",
+ 3871: "avocent-adsap",
+ 3872: "oem-agent",
+ 3873: "fagordnc",
+ 3874: "sixxsconfig",
+ 3875: "pnbscada",
+ 3876: "dl-agent",
+ 3877: "xmpcr-interface",
+ 3878: "fotogcad",
+ 3879: "appss-lm",
+ 3880: "igrs",
+ 3881: "idac",
+ 3882: "msdts1",
+ 3883: "vrpn",
+ 3884: "softrack-meter",
+ 3885: "topflow-ssl",
+ 3886: "nei-management",
+ 3887: "ciphire-data",
+ 3888: "ciphire-serv",
+ 3889: "dandv-tester",
+ 3890: "ndsconnect",
+ 3891: "rtc-pm-port",
+ 3892: "pcc-image-port",
+ 3893: "cgi-starapi",
+ 3894: "syam-agent",
+ 3895: "syam-smc",
+ 3896: "sdo-tls",
+ 3897: "sdo-ssh",
+ 3898: "senip",
+ 3899: "itv-control",
+ 3900: "udt-os",
+ 3901: "nimsh",
+ 3902: "nimaux",
+ 3903: "charsetmgr",
+ 3904: "omnilink-port",
+ 3905: "mupdate",
+ 3906: "topovista-data",
+ 3907: "imoguia-port",
+ 3908: "hppronetman",
+ 3909: "surfcontrolcpa",
+ 3910: "prnrequest",
+ 3911: "prnstatus",
+ 3912: "gbmt-stars",
+ 3913: "listcrt-port",
+ 3914: "listcrt-port-2",
+ 3915: "agcat",
+ 3916: "wysdmc",
+ 3917: "aftmux",
+ 3918: "pktcablemmcops",
+ 3919: "hyperip",
+ 3920: "exasoftport1",
+ 3921: "herodotus-net",
+ 3922: "sor-update",
+ 3923: "symb-sb-port",
+ 3924: "mpl-gprs-port",
+ 3925: "zmp",
+ 3926: "winport",
+ 3927: "natdataservice",
+ 3928: "netboot-pxe",
+ 3929: "smauth-port",
+ 3930: "syam-webserver",
+ 3931: "msr-plugin-port",
+ 3932: "dyn-site",
+ 3933: "plbserve-port",
+ 3934: "sunfm-port",
+ 3935: "sdp-portmapper",
+ 3936: "mailprox",
+ 3937: "dvbservdsc",
+ 3938: "dbcontrol-agent",
+ 3939: "aamp",
+ 3940: "xecp-node",
+ 3941: "homeportal-web",
+ 3942: "srdp",
+ 3943: "tig",
+ 3944: "sops",
+ 3945: "emcads",
+ 3946: "backupedge",
+ 3947: "ccp",
+ 3948: "apdap",
+ 3949: "drip",
+ 3950: "namemunge",
+ 3951: "pwgippfax",
+ 3952: "i3-sessionmgr",
+ 3953: "xmlink-connect",
+ 3954: "adrep",
+ 3955: "p2pcommunity",
+ 3956: "gvcp",
+ 3957: "mqe-broker",
+ 3958: "mqe-agent",
+ 3959: "treehopper",
+ 3960: "bess",
+ 3961: "proaxess",
+ 3962: "sbi-agent",
+ 3963: "thrp",
+ 3964: "sasggprs",
+ 3965: "ati-ip-to-ncpe",
+ 3966: "bflckmgr",
+ 3967: "ppsms",
+ 3968: "ianywhere-dbns",
+ 3969: "landmarks",
+ 3970: "lanrevagent",
+ 3971: "lanrevserver",
+ 3972: "iconp",
+ 3973: "progistics",
+ 3974: "citysearch",
+ 3975: "airshot",
+ 3976: "opswagent",
+ 3977: "opswmanager",
+ 3978: "secure-cfg-svr",
+ 3979: "smwan",
+ 3980: "acms",
+ 3981: "starfish",
+ 3982: "eis",
+ 3983: "eisp",
+ 3984: "mapper-nodemgr",
+ 3985: "mapper-mapethd",
+ 3986: "mapper-ws-ethd",
+ 3987: "centerline",
+ 3988: "dcs-config",
+ 3989: "bv-queryengine",
+ 3990: "bv-is",
+ 3991: "bv-smcsrv",
+ 3992: "bv-ds",
+ 3993: "bv-agent",
+ 3995: "iss-mgmt-ssl",
+ 3996: "abcsoftware",
+ 3997: "agentsease-db",
+ 3998: "dnx",
+ 3999: "nvcnet",
+ 4000: "terabase",
+ 4001: "newoak",
+ 4002: "pxc-spvr-ft",
+ 4003: "pxc-splr-ft",
+ 4004: "pxc-roid",
+ 4005: "pxc-pin",
+ 4006: "pxc-spvr",
+ 4007: "pxc-splr",
+ 4008: "netcheque",
+ 4009: "chimera-hwm",
+ 4010: "samsung-unidex",
+ 4011: "altserviceboot",
+ 4012: "pda-gate",
+ 4013: "acl-manager",
+ 4014: "taiclock",
+ 4015: "talarian-mcast1",
+ 4016: "talarian-mcast2",
+ 4017: "talarian-mcast3",
+ 4018: "talarian-mcast4",
+ 4019: "talarian-mcast5",
+ 4020: "trap",
+ 4021: "nexus-portal",
+ 4022: "dnox",
+ 4023: "esnm-zoning",
+ 4024: "tnp1-port",
+ 4025: "partimage",
+ 4026: "as-debug",
+ 4027: "bxp",
+ 4028: "dtserver-port",
+ 4029: "ip-qsig",
+ 4030: "jdmn-port",
+ 4031: "suucp",
+ 4032: "vrts-auth-port",
+ 4033: "sanavigator",
+ 4034: "ubxd",
+ 4035: "wap-push-http",
+ 4036: "wap-push-https",
+ 4037: "ravehd",
+ 4038: "fazzt-ptp",
+ 4039: "fazzt-admin",
+ 4040: "yo-main",
+ 4041: "houston",
+ 4042: "ldxp",
+ 4043: "nirp",
+ 4044: "ltp",
+ 4045: "npp",
+ 4046: "acp-proto",
+ 4047: "ctp-state",
+ 4049: "wafs",
+ 4050: "cisco-wafs",
+ 4051: "cppdp",
+ 4052: "interact",
+ 4053: "ccu-comm-1",
+ 4054: "ccu-comm-2",
+ 4055: "ccu-comm-3",
+ 4056: "lms",
+ 4057: "wfm",
+ 4058: "kingfisher",
+ 4059: "dlms-cosem",
+ 4060: "dsmeter-iatc",
+ 4061: "ice-location",
+ 4062: "ice-slocation",
+ 4063: "ice-router",
+ 4064: "ice-srouter",
+ 4065: "avanti-cdp",
+ 4066: "pmas",
+ 4067: "idp",
+ 4068: "ipfltbcst",
+ 4069: "minger",
+ 4070: "tripe",
+ 4071: "aibkup",
+ 4072: "zieto-sock",
+ 4073: "iRAPP",
+ 4074: "cequint-cityid",
+ 4075: "perimlan",
+ 4076: "seraph",
+ 4078: "cssp",
+ 4079: "santools",
+ 4080: "lorica-in",
+ 4081: "lorica-in-sec",
+ 4082: "lorica-out",
+ 4083: "lorica-out-sec",
+ 4085: "ezmessagesrv",
+ 4087: "applusservice",
+ 4088: "npsp",
+ 4089: "opencore",
+ 4090: "omasgport",
+ 4091: "ewinstaller",
+ 4092: "ewdgs",
+ 4093: "pvxpluscs",
+ 4094: "sysrqd",
+ 4095: "xtgui",
+ 4096: "bre",
+ 4097: "patrolview",
+ 4098: "drmsfsd",
+ 4099: "dpcp",
+ 4100: "igo-incognito",
+ 4101: "brlp-0",
+ 4102: "brlp-1",
+ 4103: "brlp-2",
+ 4104: "brlp-3",
+ 4105: "shofar",
+ 4106: "synchronite",
+ 4107: "j-ac",
+ 4108: "accel",
+ 4109: "izm",
+ 4110: "g2tag",
+ 4111: "xgrid",
+ 4112: "apple-vpns-rp",
+ 4113: "aipn-reg",
+ 4114: "jomamqmonitor",
+ 4115: "cds",
+ 4116: "smartcard-tls",
+ 4117: "hillrserv",
+ 4118: "netscript",
+ 4119: "assuria-slm",
+ 4120: "minirem",
+ 4121: "e-builder",
+ 4122: "fprams",
+ 4123: "z-wave",
+ 4124: "tigv2",
+ 4125: "opsview-envoy",
+ 4126: "ddrepl",
+ 4127: "unikeypro",
+ 4128: "nufw",
+ 4129: "nuauth",
+ 4130: "fronet",
+ 4131: "stars",
+ 4132: "nuts-dem",
+ 4133: "nuts-bootp",
+ 4134: "nifty-hmi",
+ 4135: "cl-db-attach",
+ 4136: "cl-db-request",
+ 4137: "cl-db-remote",
+ 4138: "nettest",
+ 4139: "thrtx",
+ 4140: "cedros-fds",
+ 4141: "oirtgsvc",
+ 4142: "oidocsvc",
+ 4143: "oidsr",
+ 4145: "vvr-control",
+ 4146: "tgcconnect",
+ 4147: "vrxpservman",
+ 4148: "hhb-handheld",
+ 4149: "agslb",
+ 4150: "PowerAlert-nsa",
+ 4151: "menandmice-noh",
+ 4152: "idig-mux",
+ 4153: "mbl-battd",
+ 4154: "atlinks",
+ 4155: "bzr",
+ 4156: "stat-results",
+ 4157: "stat-scanner",
+ 4158: "stat-cc",
+ 4159: "nss",
+ 4160: "jini-discovery",
+ 4161: "omscontact",
+ 4162: "omstopology",
+ 4163: "silverpeakpeer",
+ 4164: "silverpeakcomm",
+ 4165: "altcp",
+ 4166: "joost",
+ 4167: "ddgn",
+ 4168: "pslicser",
+ 4169: "iadt",
+ 4170: "d-cinema-csp",
+ 4171: "ml-svnet",
+ 4172: "pcoip",
+ 4174: "smcluster",
+ 4175: "bccp",
+ 4176: "tl-ipcproxy",
+ 4177: "wello",
+ 4178: "storman",
+ 4179: "MaxumSP",
+ 4180: "httpx",
+ 4181: "macbak",
+ 4182: "pcptcpservice",
+ 4183: "cyborgnet",
+ 4184: "universe-suite",
+ 4185: "wcpp",
+ 4186: "boxbackupstore",
+ 4187: "csc-proxy",
+ 4188: "vatata",
+ 4189: "pcep",
+ 4190: "sieve",
+ 4192: "azeti",
+ 4193: "pvxplusio",
+ 4197: "hctl",
+ 4199: "eims-admin",
+ 4300: "corelccam",
+ 4301: "d-data",
+ 4302: "d-data-control",
+ 4303: "srcp",
+ 4304: "owserver",
+ 4305: "batman",
+ 4306: "pinghgl",
+ 4307: "trueconf",
+ 4308: "compx-lockview",
+ 4309: "dserver",
+ 4310: "mirrtex",
+ 4311: "p6ssmc",
+ 4312: "pscl-mgt",
+ 4313: "perrla",
+ 4314: "choiceview-agt",
+ 4316: "choiceview-clt",
+ 4320: "fdt-rcatp",
+ 4321: "rwhois",
+ 4322: "trim-event",
+ 4323: "trim-ice",
+ 4325: "geognosisman",
+ 4326: "geognosis",
+ 4327: "jaxer-web",
+ 4328: "jaxer-manager",
+ 4329: "publiqare-sync",
+ 4330: "dey-sapi",
+ 4331: "ktickets-rest",
+ 4333: "ahsp",
+ 4334: "netconf-ch-ssh",
+ 4335: "netconf-ch-tls",
+ 4336: "restconf-ch-tls",
+ 4340: "gaia",
+ 4341: "lisp-data",
+ 4342: "lisp-cons",
+ 4343: "unicall",
+ 4344: "vinainstall",
+ 4345: "m4-network-as",
+ 4346: "elanlm",
+ 4347: "lansurveyor",
+ 4348: "itose",
+ 4349: "fsportmap",
+ 4350: "net-device",
+ 4351: "plcy-net-svcs",
+ 4352: "pjlink",
+ 4353: "f5-iquery",
+ 4354: "qsnet-trans",
+ 4355: "qsnet-workst",
+ 4356: "qsnet-assist",
+ 4357: "qsnet-cond",
+ 4358: "qsnet-nucl",
+ 4359: "omabcastltkm",
+ 4360: "matrix-vnet",
+ 4368: "wxbrief",
+ 4369: "epmd",
+ 4370: "elpro-tunnel",
+ 4371: "l2c-control",
+ 4372: "l2c-data",
+ 4373: "remctl",
+ 4374: "psi-ptt",
+ 4375: "tolteces",
+ 4376: "bip",
+ 4377: "cp-spxsvr",
+ 4378: "cp-spxdpy",
+ 4379: "ctdb",
+ 4389: "xandros-cms",
+ 4390: "wiegand",
+ 4391: "apwi-imserver",
+ 4392: "apwi-rxserver",
+ 4393: "apwi-rxspooler",
+ 4395: "omnivisionesx",
+ 4396: "fly",
+ 4400: "ds-srv",
+ 4401: "ds-srvr",
+ 4402: "ds-clnt",
+ 4403: "ds-user",
+ 4404: "ds-admin",
+ 4405: "ds-mail",
+ 4406: "ds-slp",
+ 4407: "nacagent",
+ 4408: "slscc",
+ 4409: "netcabinet-com",
+ 4410: "itwo-server",
+ 4411: "found",
+ 4413: "avi-nms",
+ 4414: "updog",
+ 4415: "brcd-vr-req",
+ 4416: "pjj-player",
+ 4417: "workflowdir",
+ 4419: "cbp",
+ 4420: "nvm-express",
+ 4421: "scaleft",
+ 4422: "tsepisp",
+ 4423: "thingkit",
+ 4425: "netrockey6",
+ 4426: "beacon-port-2",
+ 4427: "drizzle",
+ 4428: "omviserver",
+ 4429: "omviagent",
+ 4430: "rsqlserver",
+ 4431: "wspipe",
+ 4432: "l-acoustics",
+ 4433: "vop",
+ 4442: "saris",
+ 4443: "pharos",
+ 4444: "krb524",
+ 4445: "upnotifyp",
+ 4446: "n1-fwp",
+ 4447: "n1-rmgmt",
+ 4448: "asc-slmd",
+ 4449: "privatewire",
+ 4450: "camp",
+ 4451: "ctisystemmsg",
+ 4452: "ctiprogramload",
+ 4453: "nssalertmgr",
+ 4454: "nssagentmgr",
+ 4455: "prchat-user",
+ 4456: "prchat-server",
+ 4457: "prRegister",
+ 4458: "mcp",
+ 4484: "hpssmgmt",
+ 4485: "assyst-dr",
+ 4486: "icms",
+ 4487: "prex-tcp",
+ 4488: "awacs-ice",
+ 4500: "ipsec-nat-t",
+ 4535: "ehs",
+ 4536: "ehs-ssl",
+ 4537: "wssauthsvc",
+ 4538: "swx-gate",
+ 4545: "worldscores",
+ 4546: "sf-lm",
+ 4547: "lanner-lm",
+ 4548: "synchromesh",
+ 4549: "aegate",
+ 4550: "gds-adppiw-db",
+ 4551: "ieee-mih",
+ 4552: "menandmice-mon",
+ 4553: "icshostsvc",
+ 4554: "msfrs",
+ 4555: "rsip",
+ 4556: "dtn-bundle",
+ 4559: "hylafax",
+ 4563: "amahi-anywhere",
+ 4566: "kwtc",
+ 4567: "tram",
+ 4568: "bmc-reporting",
+ 4569: "iax",
+ 4570: "deploymentmap",
+ 4573: "cardifftec-back",
+ 4590: "rid",
+ 4591: "l3t-at-an",
+ 4593: "ipt-anri-anri",
+ 4594: "ias-session",
+ 4595: "ias-paging",
+ 4596: "ias-neighbor",
+ 4597: "a21-an-1xbs",
+ 4598: "a16-an-an",
+ 4599: "a17-an-an",
+ 4600: "piranha1",
+ 4601: "piranha2",
+ 4602: "mtsserver",
+ 4603: "menandmice-upg",
+ 4604: "irp",
+ 4605: "sixchat",
+ 4658: "playsta2-app",
+ 4659: "playsta2-lob",
+ 4660: "smaclmgr",
+ 4661: "kar2ouche",
+ 4662: "oms",
+ 4663: "noteit",
+ 4664: "ems",
+ 4665: "contclientms",
+ 4666: "eportcomm",
+ 4667: "mmacomm",
+ 4668: "mmaeds",
+ 4669: "eportcommdata",
+ 4670: "light",
+ 4671: "acter",
+ 4672: "rfa",
+ 4673: "cxws",
+ 4674: "appiq-mgmt",
+ 4675: "dhct-status",
+ 4676: "dhct-alerts",
+ 4677: "bcs",
+ 4678: "traversal",
+ 4679: "mgesupervision",
+ 4680: "mgemanagement",
+ 4681: "parliant",
+ 4682: "finisar",
+ 4683: "spike",
+ 4684: "rfid-rp1",
+ 4685: "autopac",
+ 4686: "msp-os",
+ 4687: "nst",
+ 4688: "mobile-p2p",
+ 4689: "altovacentral",
+ 4690: "prelude",
+ 4691: "mtn",
+ 4692: "conspiracy",
+ 4700: "netxms-agent",
+ 4701: "netxms-mgmt",
+ 4702: "netxms-sync",
+ 4703: "npqes-test",
+ 4704: "assuria-ins",
+ 4711: "trinity-dist",
+ 4725: "truckstar",
+ 4727: "fcis",
+ 4728: "capmux",
+ 4730: "gearman",
+ 4731: "remcap",
+ 4733: "resorcs",
+ 4737: "ipdr-sp",
+ 4738: "solera-lpn",
+ 4739: "ipfix",
+ 4740: "ipfixs",
+ 4741: "lumimgrd",
+ 4742: "sicct",
+ 4743: "openhpid",
+ 4744: "ifsp",
+ 4745: "fmp",
+ 4749: "profilemac",
+ 4750: "ssad",
+ 4751: "spocp",
+ 4752: "snap",
+ 4753: "simon",
+ 4756: "RDCenter",
+ 4774: "converge",
+ 4784: "bfd-multi-ctl",
+ 4786: "smart-install",
+ 4787: "sia-ctrl-plane",
+ 4788: "xmcp",
+ 4800: "iims",
+ 4801: "iwec",
+ 4802: "ilss",
+ 4803: "notateit",
+ 4827: "htcp",
+ 4837: "varadero-0",
+ 4838: "varadero-1",
+ 4839: "varadero-2",
+ 4840: "opcua-tcp",
+ 4841: "quosa",
+ 4842: "gw-asv",
+ 4843: "opcua-tls",
+ 4844: "gw-log",
+ 4845: "wcr-remlib",
+ 4846: "contamac-icm",
+ 4847: "wfc",
+ 4848: "appserv-http",
+ 4849: "appserv-https",
+ 4850: "sun-as-nodeagt",
+ 4851: "derby-repli",
+ 4867: "unify-debug",
+ 4868: "phrelay",
+ 4869: "phrelaydbg",
+ 4870: "cc-tracking",
+ 4871: "wired",
+ 4876: "tritium-can",
+ 4877: "lmcs",
+ 4879: "wsdl-event",
+ 4880: "hislip",
+ 4883: "wmlserver",
+ 4884: "hivestor",
+ 4885: "abbs",
+ 4894: "lyskom",
+ 4899: "radmin-port",
+ 4900: "hfcs",
+ 4901: "flr-agent",
+ 4902: "magiccontrol",
+ 4912: "lutap",
+ 4913: "lutcp",
+ 4914: "bones",
+ 4915: "frcs",
+ 4940: "eq-office-4940",
+ 4941: "eq-office-4941",
+ 4942: "eq-office-4942",
+ 4949: "munin",
+ 4950: "sybasesrvmon",
+ 4951: "pwgwims",
+ 4952: "sagxtsds",
+ 4953: "dbsyncarbiter",
+ 4969: "ccss-qmm",
+ 4970: "ccss-qsm",
+ 4971: "burp",
+ 4984: "webyast",
+ 4985: "gerhcs",
+ 4986: "mrip",
+ 4987: "smar-se-port1",
+ 4988: "smar-se-port2",
+ 4989: "parallel",
+ 4990: "busycal",
+ 4991: "vrt",
+ 4999: "hfcs-manager",
+ 5000: "commplex-main",
+ 5001: "commplex-link",
+ 5002: "rfe",
+ 5003: "fmpro-internal",
+ 5004: "avt-profile-1",
+ 5005: "avt-profile-2",
+ 5006: "wsm-server",
+ 5007: "wsm-server-ssl",
+ 5008: "synapsis-edge",
+ 5009: "winfs",
+ 5010: "telelpathstart",
+ 5011: "telelpathattack",
+ 5012: "nsp",
+ 5013: "fmpro-v6",
+ 5015: "fmwp",
+ 5020: "zenginkyo-1",
+ 5021: "zenginkyo-2",
+ 5022: "mice",
+ 5023: "htuilsrv",
+ 5024: "scpi-telnet",
+ 5025: "scpi-raw",
+ 5026: "strexec-d",
+ 5027: "strexec-s",
+ 5028: "qvr",
+ 5029: "infobright",
+ 5030: "surfpass",
+ 5032: "signacert-agent",
+ 5033: "jtnetd-server",
+ 5034: "jtnetd-status",
+ 5042: "asnaacceler8db",
+ 5043: "swxadmin",
+ 5044: "lxi-evntsvc",
+ 5045: "osp",
+ 5048: "texai",
+ 5049: "ivocalize",
+ 5050: "mmcc",
+ 5051: "ita-agent",
+ 5052: "ita-manager",
+ 5053: "rlm",
+ 5054: "rlm-admin",
+ 5055: "unot",
+ 5056: "intecom-ps1",
+ 5057: "intecom-ps2",
+ 5059: "sds",
+ 5060: "sip",
+ 5061: "sips",
+ 5062: "na-localise",
+ 5063: "csrpc",
+ 5064: "ca-1",
+ 5065: "ca-2",
+ 5066: "stanag-5066",
+ 5067: "authentx",
+ 5068: "bitforestsrv",
+ 5069: "i-net-2000-npr",
+ 5070: "vtsas",
+ 5071: "powerschool",
+ 5072: "ayiya",
+ 5073: "tag-pm",
+ 5074: "alesquery",
+ 5075: "pvaccess",
+ 5080: "onscreen",
+ 5081: "sdl-ets",
+ 5082: "qcp",
+ 5083: "qfp",
+ 5084: "llrp",
+ 5085: "encrypted-llrp",
+ 5086: "aprigo-cs",
+ 5087: "biotic",
+ 5093: "sentinel-lm",
+ 5094: "hart-ip",
+ 5099: "sentlm-srv2srv",
+ 5100: "socalia",
+ 5101: "talarian-tcp",
+ 5102: "oms-nonsecure",
+ 5103: "actifio-c2c",
+ 5106: "actifioudsagent",
+ 5107: "actifioreplic",
+ 5111: "taep-as-svc",
+ 5112: "pm-cmdsvr",
+ 5114: "ev-services",
+ 5115: "autobuild",
+ 5117: "gradecam",
+ 5120: "barracuda-bbs",
+ 5133: "nbt-pc",
+ 5134: "ppactivation",
+ 5135: "erp-scale",
+ 5137: "ctsd",
+ 5145: "rmonitor-secure",
+ 5146: "social-alarm",
+ 5150: "atmp",
+ 5151: "esri-sde",
+ 5152: "sde-discovery",
+ 5153: "toruxserver",
+ 5154: "bzflag",
+ 5155: "asctrl-agent",
+ 5156: "rugameonline",
+ 5157: "mediat",
+ 5161: "snmpssh",
+ 5162: "snmpssh-trap",
+ 5163: "sbackup",
+ 5164: "vpa",
+ 5165: "ife-icorp",
+ 5166: "winpcs",
+ 5167: "scte104",
+ 5168: "scte30",
+ 5172: "pcoip-mgmt",
+ 5190: "aol",
+ 5191: "aol-1",
+ 5192: "aol-2",
+ 5193: "aol-3",
+ 5194: "cpscomm",
+ 5195: "ampl-lic",
+ 5196: "ampl-tableproxy",
+ 5197: "tunstall-lwp",
+ 5200: "targus-getdata",
+ 5201: "targus-getdata1",
+ 5202: "targus-getdata2",
+ 5203: "targus-getdata3",
+ 5209: "nomad",
+ 5215: "noteza",
+ 5221: "3exmp",
+ 5222: "xmpp-client",
+ 5223: "hpvirtgrp",
+ 5224: "hpvirtctrl",
+ 5225: "hp-server",
+ 5226: "hp-status",
+ 5227: "perfd",
+ 5228: "hpvroom",
+ 5229: "jaxflow",
+ 5230: "jaxflow-data",
+ 5231: "crusecontrol",
+ 5232: "csedaemon",
+ 5233: "enfs",
+ 5234: "eenet",
+ 5235: "galaxy-network",
+ 5236: "padl2sim",
+ 5237: "mnet-discovery",
+ 5245: "downtools",
+ 5248: "caacws",
+ 5249: "caaclang2",
+ 5250: "soagateway",
+ 5251: "caevms",
+ 5252: "movaz-ssc",
+ 5253: "kpdp",
+ 5254: "logcabin",
+ 5264: "3com-njack-1",
+ 5265: "3com-njack-2",
+ 5269: "xmpp-server",
+ 5270: "cartographerxmp",
+ 5271: "cuelink",
+ 5272: "pk",
+ 5280: "xmpp-bosh",
+ 5281: "undo-lm",
+ 5282: "transmit-port",
+ 5298: "presence",
+ 5299: "nlg-data",
+ 5300: "hacl-hb",
+ 5301: "hacl-gs",
+ 5302: "hacl-cfg",
+ 5303: "hacl-probe",
+ 5304: "hacl-local",
+ 5305: "hacl-test",
+ 5306: "sun-mc-grp",
+ 5307: "sco-aip",
+ 5308: "cfengine",
+ 5309: "jprinter",
+ 5310: "outlaws",
+ 5312: "permabit-cs",
+ 5313: "rrdp",
+ 5314: "opalis-rbt-ipc",
+ 5315: "hacl-poll",
+ 5316: "hpbladems",
+ 5317: "hpdevms",
+ 5318: "pkix-cmc",
+ 5320: "bsfserver-zn",
+ 5321: "bsfsvr-zn-ssl",
+ 5343: "kfserver",
+ 5344: "xkotodrcp",
+ 5349: "stuns",
+ 5352: "dns-llq",
+ 5353: "mdns",
+ 5354: "mdnsresponder",
+ 5355: "llmnr",
+ 5356: "ms-smlbiz",
+ 5357: "wsdapi",
+ 5358: "wsdapi-s",
+ 5359: "ms-alerter",
+ 5360: "ms-sideshow",
+ 5361: "ms-s-sideshow",
+ 5362: "serverwsd2",
+ 5363: "net-projection",
+ 5397: "stresstester",
+ 5398: "elektron-admin",
+ 5399: "securitychase",
+ 5400: "excerpt",
+ 5401: "excerpts",
+ 5402: "mftp",
+ 5403: "hpoms-ci-lstn",
+ 5404: "hpoms-dps-lstn",
+ 5405: "netsupport",
+ 5406: "systemics-sox",
+ 5407: "foresyte-clear",
+ 5408: "foresyte-sec",
+ 5409: "salient-dtasrv",
+ 5410: "salient-usrmgr",
+ 5411: "actnet",
+ 5412: "continuus",
+ 5413: "wwiotalk",
+ 5414: "statusd",
+ 5415: "ns-server",
+ 5416: "sns-gateway",
+ 5417: "sns-agent",
+ 5418: "mcntp",
+ 5419: "dj-ice",
+ 5420: "cylink-c",
+ 5421: "netsupport2",
+ 5422: "salient-mux",
+ 5423: "virtualuser",
+ 5424: "beyond-remote",
+ 5425: "br-channel",
+ 5426: "devbasic",
+ 5427: "sco-peer-tta",
+ 5428: "telaconsole",
+ 5429: "base",
+ 5430: "radec-corp",
+ 5431: "park-agent",
+ 5432: "postgresql",
+ 5433: "pyrrho",
+ 5434: "sgi-arrayd",
+ 5435: "sceanics",
+ 5443: "spss",
+ 5445: "smbdirect",
+ 5450: "tiepie",
+ 5453: "surebox",
+ 5454: "apc-5454",
+ 5455: "apc-5455",
+ 5456: "apc-5456",
+ 5461: "silkmeter",
+ 5462: "ttl-publisher",
+ 5463: "ttlpriceproxy",
+ 5464: "quailnet",
+ 5465: "netops-broker",
+ 5470: "apsolab-col",
+ 5471: "apsolab-cols",
+ 5472: "apsolab-tag",
+ 5473: "apsolab-tags",
+ 5475: "apsolab-data",
+ 5500: "fcp-addr-srvr1",
+ 5501: "fcp-addr-srvr2",
+ 5502: "fcp-srvr-inst1",
+ 5503: "fcp-srvr-inst2",
+ 5504: "fcp-cics-gw1",
+ 5505: "checkoutdb",
+ 5506: "amc",
+ 5507: "psl-management",
+ 5550: "cbus",
+ 5553: "sgi-eventmond",
+ 5554: "sgi-esphttp",
+ 5555: "personal-agent",
+ 5556: "freeciv",
+ 5557: "farenet",
+ 5565: "hpe-dp-bura",
+ 5566: "westec-connect",
+ 5567: "dof-dps-mc-sec",
+ 5568: "sdt",
+ 5569: "rdmnet-ctrl",
+ 5573: "sdmmp",
+ 5574: "lsi-bobcat",
+ 5575: "ora-oap",
+ 5579: "fdtracks",
+ 5580: "tmosms0",
+ 5581: "tmosms1",
+ 5582: "fac-restore",
+ 5583: "tmo-icon-sync",
+ 5584: "bis-web",
+ 5585: "bis-sync",
+ 5586: "att-mt-sms",
+ 5597: "ininmessaging",
+ 5598: "mctfeed",
+ 5599: "esinstall",
+ 5600: "esmmanager",
+ 5601: "esmagent",
+ 5602: "a1-msc",
+ 5603: "a1-bs",
+ 5604: "a3-sdunode",
+ 5605: "a4-sdunode",
+ 5618: "efr",
+ 5627: "ninaf",
+ 5628: "htrust",
+ 5629: "symantec-sfdb",
+ 5630: "precise-comm",
+ 5631: "pcanywheredata",
+ 5632: "pcanywherestat",
+ 5633: "beorl",
+ 5634: "xprtld",
+ 5635: "sfmsso",
+ 5636: "sfm-db-server",
+ 5637: "cssc",
+ 5638: "flcrs",
+ 5639: "ics",
+ 5646: "vfmobile",
+ 5666: "nrpe",
+ 5670: "filemq",
+ 5671: "amqps",
+ 5672: "amqp",
+ 5673: "jms",
+ 5674: "hyperscsi-port",
+ 5675: "v5ua",
+ 5676: "raadmin",
+ 5677: "questdb2-lnchr",
+ 5678: "rrac",
+ 5679: "dccm",
+ 5680: "auriga-router",
+ 5681: "ncxcp",
+ 5688: "ggz",
+ 5689: "qmvideo",
+ 5693: "rbsystem",
+ 5696: "kmip",
+ 5700: "supportassist",
+ 5705: "storageos",
+ 5713: "proshareaudio",
+ 5714: "prosharevideo",
+ 5715: "prosharedata",
+ 5716: "prosharerequest",
+ 5717: "prosharenotify",
+ 5718: "dpm",
+ 5719: "dpm-agent",
+ 5720: "ms-licensing",
+ 5721: "dtpt",
+ 5722: "msdfsr",
+ 5723: "omhs",
+ 5724: "omsdk",
+ 5725: "ms-ilm",
+ 5726: "ms-ilm-sts",
+ 5727: "asgenf",
+ 5728: "io-dist-data",
+ 5729: "openmail",
+ 5730: "unieng",
+ 5741: "ida-discover1",
+ 5742: "ida-discover2",
+ 5743: "watchdoc-pod",
+ 5744: "watchdoc",
+ 5745: "fcopy-server",
+ 5746: "fcopys-server",
+ 5747: "tunatic",
+ 5748: "tunalyzer",
+ 5750: "rscd",
+ 5755: "openmailg",
+ 5757: "x500ms",
+ 5766: "openmailns",
+ 5767: "s-openmail",
+ 5768: "openmailpxy",
+ 5769: "spramsca",
+ 5770: "spramsd",
+ 5771: "netagent",
+ 5777: "dali-port",
+ 5780: "vts-rpc",
+ 5781: "3par-evts",
+ 5782: "3par-mgmt",
+ 5783: "3par-mgmt-ssl",
+ 5785: "3par-rcopy",
+ 5793: "xtreamx",
+ 5813: "icmpd",
+ 5814: "spt-automation",
+ 5841: "shiprush-d-ch",
+ 5842: "reversion",
+ 5859: "wherehoo",
+ 5863: "ppsuitemsg",
+ 5868: "diameters",
+ 5883: "jute",
+ 5900: "rfb",
+ 5910: "cm",
+ 5911: "cpdlc",
+ 5912: "fis",
+ 5913: "ads-c",
+ 5963: "indy",
+ 5968: "mppolicy-v5",
+ 5969: "mppolicy-mgr",
+ 5984: "couchdb",
+ 5985: "wsman",
+ 5986: "wsmans",
+ 5987: "wbem-rmi",
+ 5988: "wbem-http",
+ 5989: "wbem-https",
+ 5990: "wbem-exp-https",
+ 5991: "nuxsl",
+ 5992: "consul-insight",
+ 5993: "cim-rs",
+ 5999: "cvsup",
+ 6064: "ndl-ahp-svc",
+ 6065: "winpharaoh",
+ 6066: "ewctsp",
+ 6068: "gsmp-ancp",
+ 6069: "trip",
+ 6070: "messageasap",
+ 6071: "ssdtp",
+ 6072: "diagnose-proc",
+ 6073: "directplay8",
+ 6074: "max",
+ 6075: "dpm-acm",
+ 6076: "msft-dpm-cert",
+ 6077: "iconstructsrv",
+ 6084: "reload-config",
+ 6085: "konspire2b",
+ 6086: "pdtp",
+ 6087: "ldss",
+ 6088: "doglms",
+ 6099: "raxa-mgmt",
+ 6100: "synchronet-db",
+ 6101: "synchronet-rtc",
+ 6102: "synchronet-upd",
+ 6103: "rets",
+ 6104: "dbdb",
+ 6105: "primaserver",
+ 6106: "mpsserver",
+ 6107: "etc-control",
+ 6108: "sercomm-scadmin",
+ 6109: "globecast-id",
+ 6110: "softcm",
+ 6111: "spc",
+ 6112: "dtspcd",
+ 6113: "dayliteserver",
+ 6114: "wrspice",
+ 6115: "xic",
+ 6116: "xtlserv",
+ 6117: "daylitetouch",
+ 6121: "spdy",
+ 6122: "bex-webadmin",
+ 6123: "backup-express",
+ 6124: "pnbs",
+ 6130: "damewaremobgtwy",
+ 6133: "nbt-wol",
+ 6140: "pulsonixnls",
+ 6141: "meta-corp",
+ 6142: "aspentec-lm",
+ 6143: "watershed-lm",
+ 6144: "statsci1-lm",
+ 6145: "statsci2-lm",
+ 6146: "lonewolf-lm",
+ 6147: "montage-lm",
+ 6148: "ricardo-lm",
+ 6149: "tal-pod",
+ 6159: "efb-aci",
+ 6160: "ecmp",
+ 6161: "patrol-ism",
+ 6162: "patrol-coll",
+ 6163: "pscribe",
+ 6200: "lm-x",
+ 6209: "qmtps",
+ 6222: "radmind",
+ 6241: "jeol-nsdtp-1",
+ 6242: "jeol-nsdtp-2",
+ 6243: "jeol-nsdtp-3",
+ 6244: "jeol-nsdtp-4",
+ 6251: "tl1-raw-ssl",
+ 6252: "tl1-ssh",
+ 6253: "crip",
+ 6267: "gld",
+ 6268: "grid",
+ 6269: "grid-alt",
+ 6300: "bmc-grx",
+ 6301: "bmc-ctd-ldap",
+ 6306: "ufmp",
+ 6315: "scup",
+ 6316: "abb-escp",
+ 6317: "nav-data-cmd",
+ 6320: "repsvc",
+ 6321: "emp-server1",
+ 6322: "emp-server2",
+ 6324: "hrd-ncs",
+ 6325: "dt-mgmtsvc",
+ 6326: "dt-vra",
+ 6343: "sflow",
+ 6344: "streletz",
+ 6346: "gnutella-svc",
+ 6347: "gnutella-rtr",
+ 6350: "adap",
+ 6355: "pmcs",
+ 6360: "metaedit-mu",
+ 6370: "metaedit-se",
+ 6379: "redis",
+ 6382: "metatude-mds",
+ 6389: "clariion-evr01",
+ 6390: "metaedit-ws",
+ 6417: "faxcomservice",
+ 6418: "syserverremote",
+ 6419: "svdrp",
+ 6420: "nim-vdrshell",
+ 6421: "nim-wan",
+ 6432: "pgbouncer",
+ 6442: "tarp",
+ 6443: "sun-sr-https",
+ 6444: "sge-qmaster",
+ 6445: "sge-execd",
+ 6446: "mysql-proxy",
+ 6455: "skip-cert-recv",
+ 6456: "skip-cert-send",
+ 6464: "ieee11073-20701",
+ 6471: "lvision-lm",
+ 6480: "sun-sr-http",
+ 6481: "servicetags",
+ 6482: "ldoms-mgmt",
+ 6483: "SunVTS-RMI",
+ 6484: "sun-sr-jms",
+ 6485: "sun-sr-iiop",
+ 6486: "sun-sr-iiops",
+ 6487: "sun-sr-iiop-aut",
+ 6488: "sun-sr-jmx",
+ 6489: "sun-sr-admin",
+ 6500: "boks",
+ 6501: "boks-servc",
+ 6502: "boks-servm",
+ 6503: "boks-clntd",
+ 6505: "badm-priv",
+ 6506: "badm-pub",
+ 6507: "bdir-priv",
+ 6508: "bdir-pub",
+ 6509: "mgcs-mfp-port",
+ 6510: "mcer-port",
+ 6513: "netconf-tls",
+ 6514: "syslog-tls",
+ 6515: "elipse-rec",
+ 6543: "lds-distrib",
+ 6544: "lds-dump",
+ 6547: "apc-6547",
+ 6548: "apc-6548",
+ 6549: "apc-6549",
+ 6550: "fg-sysupdate",
+ 6551: "sum",
+ 6558: "xdsxdm",
+ 6566: "sane-port",
+ 6568: "canit-store",
+ 6579: "affiliate",
+ 6580: "parsec-master",
+ 6581: "parsec-peer",
+ 6582: "parsec-game",
+ 6583: "joaJewelSuite",
+ 6600: "mshvlm",
+ 6601: "mstmg-sstp",
+ 6602: "wsscomfrmwk",
+ 6619: "odette-ftps",
+ 6620: "kftp-data",
+ 6621: "kftp",
+ 6622: "mcftp",
+ 6623: "ktelnet",
+ 6624: "datascaler-db",
+ 6625: "datascaler-ctl",
+ 6626: "wago-service",
+ 6627: "nexgen",
+ 6628: "afesc-mc",
+ 6629: "nexgen-aux",
+ 6632: "mxodbc-connect",
+ 6640: "ovsdb",
+ 6653: "openflow",
+ 6655: "pcs-sf-ui-man",
+ 6656: "emgmsg",
+ 6670: "vocaltec-gold",
+ 6671: "p4p-portal",
+ 6672: "vision-server",
+ 6673: "vision-elmd",
+ 6678: "vfbp",
+ 6679: "osaut",
+ 6687: "clever-ctrace",
+ 6688: "clever-tcpip",
+ 6689: "tsa",
+ 6690: "cleverdetect",
+ 6697: "ircs-u",
+ 6701: "kti-icad-srvr",
+ 6702: "e-design-net",
+ 6703: "e-design-web",
+ 6714: "ibprotocol",
+ 6715: "fibotrader-com",
+ 6716: "princity-agent",
+ 6767: "bmc-perf-agent",
+ 6768: "bmc-perf-mgrd",
+ 6769: "adi-gxp-srvprt",
+ 6770: "plysrv-http",
+ 6771: "plysrv-https",
+ 6777: "ntz-tracker",
+ 6778: "ntz-p2p-storage",
+ 6785: "dgpf-exchg",
+ 6786: "smc-jmx",
+ 6787: "smc-admin",
+ 6788: "smc-http",
+ 6789: "radg",
+ 6790: "hnmp",
+ 6791: "hnm",
+ 6801: "acnet",
+ 6817: "pentbox-sim",
+ 6831: "ambit-lm",
+ 6841: "netmo-default",
+ 6842: "netmo-http",
+ 6850: "iccrushmore",
+ 6868: "acctopus-cc",
+ 6888: "muse",
+ 6900: "rtimeviewer",
+ 6901: "jetstream",
+ 6935: "ethoscan",
+ 6936: "xsmsvc",
+ 6946: "bioserver",
+ 6951: "otlp",
+ 6961: "jmact3",
+ 6962: "jmevt2",
+ 6963: "swismgr1",
+ 6964: "swismgr2",
+ 6965: "swistrap",
+ 6966: "swispol",
+ 6969: "acmsoda",
+ 6970: "conductor",
+ 6997: "MobilitySrv",
+ 6998: "iatp-highpri",
+ 6999: "iatp-normalpri",
+ 7000: "afs3-fileserver",
+ 7001: "afs3-callback",
+ 7002: "afs3-prserver",
+ 7003: "afs3-vlserver",
+ 7004: "afs3-kaserver",
+ 7005: "afs3-volser",
+ 7006: "afs3-errors",
+ 7007: "afs3-bos",
+ 7008: "afs3-update",
+ 7009: "afs3-rmtsys",
+ 7010: "ups-onlinet",
+ 7011: "talon-disc",
+ 7012: "talon-engine",
+ 7013: "microtalon-dis",
+ 7014: "microtalon-com",
+ 7015: "talon-webserver",
+ 7016: "spg",
+ 7017: "grasp",
+ 7018: "fisa-svc",
+ 7019: "doceri-ctl",
+ 7020: "dpserve",
+ 7021: "dpserveadmin",
+ 7022: "ctdp",
+ 7023: "ct2nmcs",
+ 7024: "vmsvc",
+ 7025: "vmsvc-2",
+ 7030: "op-probe",
+ 7031: "iposplanet",
+ 7070: "arcp",
+ 7071: "iwg1",
+ 7073: "martalk",
+ 7080: "empowerid",
+ 7099: "lazy-ptop",
+ 7100: "font-service",
+ 7101: "elcn",
+ 7117: "rothaga",
+ 7121: "virprot-lm",
+ 7128: "scenidm",
+ 7129: "scenccs",
+ 7161: "cabsm-comm",
+ 7162: "caistoragemgr",
+ 7163: "cacsambroker",
+ 7164: "fsr",
+ 7165: "doc-server",
+ 7166: "aruba-server",
+ 7167: "casrmagent",
+ 7168: "cnckadserver",
+ 7169: "ccag-pib",
+ 7170: "nsrp",
+ 7171: "drm-production",
+ 7172: "metalbend",
+ 7173: "zsecure",
+ 7174: "clutild",
+ 7200: "fodms",
+ 7201: "dlip",
+ 7202: "pon-ictp",
+ 7215: "PS-Server",
+ 7216: "PS-Capture-Pro",
+ 7227: "ramp",
+ 7228: "citrixupp",
+ 7229: "citrixuppg",
+ 7236: "display",
+ 7237: "pads",
+ 7244: "frc-hicp",
+ 7262: "cnap",
+ 7272: "watchme-7272",
+ 7273: "oma-rlp",
+ 7274: "oma-rlp-s",
+ 7275: "oma-ulp",
+ 7276: "oma-ilp",
+ 7277: "oma-ilp-s",
+ 7278: "oma-dcdocbs",
+ 7279: "ctxlic",
+ 7280: "itactionserver1",
+ 7281: "itactionserver2",
+ 7282: "mzca-action",
+ 7283: "genstat",
+ 7365: "lcm-server",
+ 7391: "mindfilesys",
+ 7392: "mrssrendezvous",
+ 7393: "nfoldman",
+ 7394: "fse",
+ 7395: "winqedit",
+ 7397: "hexarc",
+ 7400: "rtps-discovery",
+ 7401: "rtps-dd-ut",
+ 7402: "rtps-dd-mt",
+ 7410: "ionixnetmon",
+ 7411: "daqstream",
+ 7421: "mtportmon",
+ 7426: "pmdmgr",
+ 7427: "oveadmgr",
+ 7428: "ovladmgr",
+ 7429: "opi-sock",
+ 7430: "xmpv7",
+ 7431: "pmd",
+ 7437: "faximum",
+ 7443: "oracleas-https",
+ 7471: "sttunnel",
+ 7473: "rise",
+ 7474: "neo4j",
+ 7478: "openit",
+ 7491: "telops-lmd",
+ 7500: "silhouette",
+ 7501: "ovbus",
+ 7508: "adcp",
+ 7509: "acplt",
+ 7510: "ovhpas",
+ 7511: "pafec-lm",
+ 7542: "saratoga",
+ 7543: "atul",
+ 7544: "nta-ds",
+ 7545: "nta-us",
+ 7546: "cfs",
+ 7547: "cwmp",
+ 7548: "tidp",
+ 7549: "nls-tl",
+ 7551: "controlone-con",
+ 7560: "sncp",
+ 7563: "cfw",
+ 7566: "vsi-omega",
+ 7569: "dell-eql-asm",
+ 7570: "aries-kfinder",
+ 7574: "coherence",
+ 7588: "sun-lm",
+ 7606: "mipi-debug",
+ 7624: "indi",
+ 7626: "simco",
+ 7627: "soap-http",
+ 7628: "zen-pawn",
+ 7629: "xdas",
+ 7630: "hawk",
+ 7631: "tesla-sys-msg",
+ 7633: "pmdfmgt",
+ 7648: "cuseeme",
+ 7672: "imqstomp",
+ 7673: "imqstomps",
+ 7674: "imqtunnels",
+ 7675: "imqtunnel",
+ 7676: "imqbrokerd",
+ 7677: "sun-user-https",
+ 7680: "pando-pub",
+ 7683: "dmt",
+ 7687: "bolt",
+ 7689: "collaber",
+ 7697: "klio",
+ 7700: "em7-secom",
+ 7707: "sync-em7",
+ 7708: "scinet",
+ 7720: "medimageportal",
+ 7724: "nsdeepfreezectl",
+ 7725: "nitrogen",
+ 7726: "freezexservice",
+ 7727: "trident-data",
+ 7728: "osvr",
+ 7734: "smip",
+ 7738: "aiagent",
+ 7741: "scriptview",
+ 7742: "msss",
+ 7743: "sstp-1",
+ 7744: "raqmon-pdu",
+ 7747: "prgp",
+ 7775: "inetfs",
+ 7777: "cbt",
+ 7778: "interwise",
+ 7779: "vstat",
+ 7781: "accu-lmgr",
+ 7786: "minivend",
+ 7787: "popup-reminders",
+ 7789: "office-tools",
+ 7794: "q3ade",
+ 7797: "pnet-conn",
+ 7798: "pnet-enc",
+ 7799: "altbsdp",
+ 7800: "asr",
+ 7801: "ssp-client",
+ 7810: "rbt-wanopt",
+ 7845: "apc-7845",
+ 7846: "apc-7846",
+ 7847: "csoauth",
+ 7869: "mobileanalyzer",
+ 7870: "rbt-smc",
+ 7871: "mdm",
+ 7878: "owms",
+ 7880: "pss",
+ 7887: "ubroker",
+ 7900: "mevent",
+ 7901: "tnos-sp",
+ 7902: "tnos-dp",
+ 7903: "tnos-dps",
+ 7913: "qo-secure",
+ 7932: "t2-drm",
+ 7933: "t2-brm",
+ 7962: "generalsync",
+ 7967: "supercell",
+ 7979: "micromuse-ncps",
+ 7980: "quest-vista",
+ 7981: "sossd-collect",
+ 7982: "sossd-agent",
+ 7997: "pushns",
+ 7999: "irdmi2",
+ 8000: "irdmi",
+ 8001: "vcom-tunnel",
+ 8002: "teradataordbms",
+ 8003: "mcreport",
+ 8005: "mxi",
+ 8006: "wpl-analytics",
+ 8007: "warppipe",
+ 8008: "http-alt",
+ 8019: "qbdb",
+ 8020: "intu-ec-svcdisc",
+ 8021: "intu-ec-client",
+ 8022: "oa-system",
+ 8025: "ca-audit-da",
+ 8026: "ca-audit-ds",
+ 8032: "pro-ed",
+ 8033: "mindprint",
+ 8034: "vantronix-mgmt",
+ 8040: "ampify",
+ 8041: "enguity-xccetp",
+ 8042: "fs-agent",
+ 8043: "fs-server",
+ 8044: "fs-mgmt",
+ 8051: "rocrail",
+ 8052: "senomix01",
+ 8053: "senomix02",
+ 8054: "senomix03",
+ 8055: "senomix04",
+ 8056: "senomix05",
+ 8057: "senomix06",
+ 8058: "senomix07",
+ 8059: "senomix08",
+ 8066: "toad-bi-appsrvr",
+ 8067: "infi-async",
+ 8070: "ucs-isc",
+ 8074: "gadugadu",
+ 8077: "mles",
+ 8080: "http-alt",
+ 8081: "sunproxyadmin",
+ 8082: "us-cli",
+ 8083: "us-srv",
+ 8086: "d-s-n",
+ 8087: "simplifymedia",
+ 8088: "radan-http",
+ 8090: "opsmessaging",
+ 8091: "jamlink",
+ 8097: "sac",
+ 8100: "xprint-server",
+ 8101: "ldoms-migr",
+ 8102: "kz-migr",
+ 8115: "mtl8000-matrix",
+ 8116: "cp-cluster",
+ 8117: "purityrpc",
+ 8118: "privoxy",
+ 8121: "apollo-data",
+ 8122: "apollo-admin",
+ 8128: "paycash-online",
+ 8129: "paycash-wbp",
+ 8130: "indigo-vrmi",
+ 8131: "indigo-vbcp",
+ 8132: "dbabble",
+ 8140: "puppet",
+ 8148: "isdd",
+ 8153: "quantastor",
+ 8160: "patrol",
+ 8161: "patrol-snmp",
+ 8162: "lpar2rrd",
+ 8181: "intermapper",
+ 8182: "vmware-fdm",
+ 8183: "proremote",
+ 8184: "itach",
+ 8190: "gcp-rphy",
+ 8191: "limnerpressure",
+ 8192: "spytechphone",
+ 8194: "blp1",
+ 8195: "blp2",
+ 8199: "vvr-data",
+ 8200: "trivnet1",
+ 8201: "trivnet2",
+ 8204: "lm-perfworks",
+ 8205: "lm-instmgr",
+ 8206: "lm-dta",
+ 8207: "lm-sserver",
+ 8208: "lm-webwatcher",
+ 8230: "rexecj",
+ 8243: "synapse-nhttps",
+ 8270: "robot-remote",
+ 8276: "pando-sec",
+ 8280: "synapse-nhttp",
+ 8282: "libelle",
+ 8292: "blp3",
+ 8293: "hiperscan-id",
+ 8294: "blp4",
+ 8300: "tmi",
+ 8301: "amberon",
+ 8313: "hub-open-net",
+ 8320: "tnp-discover",
+ 8321: "tnp",
+ 8322: "garmin-marine",
+ 8351: "server-find",
+ 8376: "cruise-enum",
+ 8377: "cruise-swroute",
+ 8378: "cruise-config",
+ 8379: "cruise-diags",
+ 8380: "cruise-update",
+ 8383: "m2mservices",
+ 8400: "cvd",
+ 8401: "sabarsd",
+ 8402: "abarsd",
+ 8403: "admind",
+ 8404: "svcloud",
+ 8405: "svbackup",
+ 8415: "dlpx-sp",
+ 8416: "espeech",
+ 8417: "espeech-rtp",
+ 8423: "aritts",
+ 8442: "cybro-a-bus",
+ 8443: "pcsync-https",
+ 8444: "pcsync-http",
+ 8445: "copy",
+ 8450: "npmp",
+ 8457: "nexentamv",
+ 8470: "cisco-avp",
+ 8471: "pim-port",
+ 8472: "otv",
+ 8473: "vp2p",
+ 8474: "noteshare",
+ 8500: "fmtp",
+ 8501: "cmtp-mgt",
+ 8502: "ftnmtp",
+ 8554: "rtsp-alt",
+ 8555: "d-fence",
+ 8567: "dof-tunnel",
+ 8600: "asterix",
+ 8610: "canon-mfnp",
+ 8611: "canon-bjnp1",
+ 8612: "canon-bjnp2",
+ 8613: "canon-bjnp3",
+ 8614: "canon-bjnp4",
+ 8615: "imink",
+ 8665: "monetra",
+ 8666: "monetra-admin",
+ 8675: "msi-cps-rm",
+ 8686: "sun-as-jmxrmi",
+ 8688: "openremote-ctrl",
+ 8699: "vnyx",
+ 8711: "nvc",
+ 8733: "ibus",
+ 8750: "dey-keyneg",
+ 8763: "mc-appserver",
+ 8764: "openqueue",
+ 8765: "ultraseek-http",
+ 8766: "amcs",
+ 8770: "dpap",
+ 8778: "uec",
+ 8786: "msgclnt",
+ 8787: "msgsrvr",
+ 8793: "acd-pm",
+ 8800: "sunwebadmin",
+ 8804: "truecm",
+ 8873: "dxspider",
+ 8880: "cddbp-alt",
+ 8881: "galaxy4d",
+ 8883: "secure-mqtt",
+ 8888: "ddi-tcp-1",
+ 8889: "ddi-tcp-2",
+ 8890: "ddi-tcp-3",
+ 8891: "ddi-tcp-4",
+ 8892: "ddi-tcp-5",
+ 8893: "ddi-tcp-6",
+ 8894: "ddi-tcp-7",
+ 8899: "ospf-lite",
+ 8900: "jmb-cds1",
+ 8901: "jmb-cds2",
+ 8910: "manyone-http",
+ 8911: "manyone-xml",
+ 8912: "wcbackup",
+ 8913: "dragonfly",
+ 8937: "twds",
+ 8953: "ub-dns-control",
+ 8954: "cumulus-admin",
+ 8980: "nod-provider",
+ 8989: "sunwebadmins",
+ 8990: "http-wmap",
+ 8991: "https-wmap",
+ 8997: "oracle-ms-ens",
+ 8998: "canto-roboflow",
+ 8999: "bctp",
+ 9000: "cslistener",
+ 9001: "etlservicemgr",
+ 9002: "dynamid",
+ 9005: "golem",
+ 9008: "ogs-server",
+ 9009: "pichat",
+ 9010: "sdr",
+ 9020: "tambora",
+ 9021: "panagolin-ident",
+ 9022: "paragent",
+ 9023: "swa-1",
+ 9024: "swa-2",
+ 9025: "swa-3",
+ 9026: "swa-4",
+ 9050: "versiera",
+ 9051: "fio-cmgmt",
+ 9060: "CardWeb-IO",
+ 9080: "glrpc",
+ 9083: "emc-pp-mgmtsvc",
+ 9084: "aurora",
+ 9085: "ibm-rsyscon",
+ 9086: "net2display",
+ 9087: "classic",
+ 9088: "sqlexec",
+ 9089: "sqlexec-ssl",
+ 9090: "websm",
+ 9091: "xmltec-xmlmail",
+ 9092: "XmlIpcRegSvc",
+ 9093: "copycat",
+ 9100: "hp-pdl-datastr",
+ 9101: "bacula-dir",
+ 9102: "bacula-fd",
+ 9103: "bacula-sd",
+ 9104: "peerwire",
+ 9105: "xadmin",
+ 9106: "astergate",
+ 9107: "astergatefax",
+ 9119: "mxit",
+ 9122: "grcmp",
+ 9123: "grcp",
+ 9131: "dddp",
+ 9160: "apani1",
+ 9161: "apani2",
+ 9162: "apani3",
+ 9163: "apani4",
+ 9164: "apani5",
+ 9191: "sun-as-jpda",
+ 9200: "wap-wsp",
+ 9201: "wap-wsp-wtp",
+ 9202: "wap-wsp-s",
+ 9203: "wap-wsp-wtp-s",
+ 9204: "wap-vcard",
+ 9205: "wap-vcal",
+ 9206: "wap-vcard-s",
+ 9207: "wap-vcal-s",
+ 9208: "rjcdb-vcards",
+ 9209: "almobile-system",
+ 9210: "oma-mlp",
+ 9211: "oma-mlp-s",
+ 9212: "serverviewdbms",
+ 9213: "serverstart",
+ 9214: "ipdcesgbs",
+ 9215: "insis",
+ 9216: "acme",
+ 9217: "fsc-port",
+ 9222: "teamcoherence",
+ 9255: "mon",
+ 9278: "pegasus",
+ 9279: "pegasus-ctl",
+ 9280: "pgps",
+ 9281: "swtp-port1",
+ 9282: "swtp-port2",
+ 9283: "callwaveiam",
+ 9284: "visd",
+ 9285: "n2h2server",
+ 9287: "cumulus",
+ 9292: "armtechdaemon",
+ 9293: "storview",
+ 9294: "armcenterhttp",
+ 9295: "armcenterhttps",
+ 9300: "vrace",
+ 9306: "sphinxql",
+ 9312: "sphinxapi",
+ 9318: "secure-ts",
+ 9321: "guibase",
+ 9343: "mpidcmgr",
+ 9344: "mphlpdmc",
+ 9345: "rancher",
+ 9346: "ctechlicensing",
+ 9374: "fjdmimgr",
+ 9380: "boxp",
+ 9387: "d2dconfig",
+ 9388: "d2ddatatrans",
+ 9389: "adws",
+ 9390: "otp",
+ 9396: "fjinvmgr",
+ 9397: "mpidcagt",
+ 9400: "sec-t4net-srv",
+ 9401: "sec-t4net-clt",
+ 9402: "sec-pc2fax-srv",
+ 9418: "git",
+ 9443: "tungsten-https",
+ 9444: "wso2esb-console",
+ 9445: "mindarray-ca",
+ 9450: "sntlkeyssrvr",
+ 9500: "ismserver",
+ 9535: "mngsuite",
+ 9536: "laes-bf",
+ 9555: "trispen-sra",
+ 9592: "ldgateway",
+ 9593: "cba8",
+ 9594: "msgsys",
+ 9595: "pds",
+ 9596: "mercury-disc",
+ 9597: "pd-admin",
+ 9598: "vscp",
+ 9599: "robix",
+ 9600: "micromuse-ncpw",
+ 9612: "streamcomm-ds",
+ 9614: "iadt-tls",
+ 9616: "erunbook-agent",
+ 9617: "erunbook-server",
+ 9618: "condor",
+ 9628: "odbcpathway",
+ 9629: "uniport",
+ 9630: "peoctlr",
+ 9631: "peocoll",
+ 9640: "pqsflows",
+ 9666: "zoomcp",
+ 9667: "xmms2",
+ 9668: "tec5-sdctp",
+ 9694: "client-wakeup",
+ 9695: "ccnx",
+ 9700: "board-roar",
+ 9747: "l5nas-parchan",
+ 9750: "board-voip",
+ 9753: "rasadv",
+ 9762: "tungsten-http",
+ 9800: "davsrc",
+ 9801: "sstp-2",
+ 9802: "davsrcs",
+ 9875: "sapv1",
+ 9876: "sd",
+ 9888: "cyborg-systems",
+ 9889: "gt-proxy",
+ 9898: "monkeycom",
+ 9900: "iua",
+ 9909: "domaintime",
+ 9911: "sype-transport",
+ 9925: "xybrid-cloud",
+ 9950: "apc-9950",
+ 9951: "apc-9951",
+ 9952: "apc-9952",
+ 9953: "acis",
+ 9954: "hinp",
+ 9955: "alljoyn-stm",
+ 9966: "odnsp",
+ 9978: "xybrid-rt",
+ 9979: "visweather",
+ 9981: "pumpkindb",
+ 9987: "dsm-scm-target",
+ 9988: "nsesrvr",
+ 9990: "osm-appsrvr",
+ 9991: "osm-oev",
+ 9992: "palace-1",
+ 9993: "palace-2",
+ 9994: "palace-3",
+ 9995: "palace-4",
+ 9996: "palace-5",
+ 9997: "palace-6",
+ 9998: "distinct32",
+ 9999: "distinct",
+ 10000: "ndmp",
+ 10001: "scp-config",
+ 10002: "documentum",
+ 10003: "documentum-s",
+ 10004: "emcrmirccd",
+ 10005: "emcrmird",
+ 10006: "netapp-sync",
+ 10007: "mvs-capacity",
+ 10008: "octopus",
+ 10009: "swdtp-sv",
+ 10010: "rxapi",
+ 10020: "abb-hw",
+ 10050: "zabbix-agent",
+ 10051: "zabbix-trapper",
+ 10055: "qptlmd",
+ 10080: "amanda",
+ 10081: "famdc",
+ 10100: "itap-ddtp",
+ 10101: "ezmeeting-2",
+ 10102: "ezproxy-2",
+ 10103: "ezrelay",
+ 10104: "swdtp",
+ 10107: "bctp-server",
+ 10110: "nmea-0183",
+ 10113: "netiq-endpoint",
+ 10114: "netiq-qcheck",
+ 10115: "netiq-endpt",
+ 10116: "netiq-voipa",
+ 10117: "iqrm",
+ 10125: "cimple",
+ 10128: "bmc-perf-sd",
+ 10129: "bmc-gms",
+ 10160: "qb-db-server",
+ 10161: "snmptls",
+ 10162: "snmptls-trap",
+ 10200: "trisoap",
+ 10201: "rsms",
+ 10252: "apollo-relay",
+ 10260: "axis-wimp-port",
+ 10261: "tile-ml",
+ 10288: "blocks",
+ 10321: "cosir",
+ 10540: "MOS-lower",
+ 10541: "MOS-upper",
+ 10542: "MOS-aux",
+ 10543: "MOS-soap",
+ 10544: "MOS-soap-opt",
+ 10548: "serverdocs",
+ 10631: "printopia",
+ 10800: "gap",
+ 10805: "lpdg",
+ 10809: "nbd",
+ 10860: "helix",
+ 10880: "bveapi",
+ 10933: "octopustentacle",
+ 10990: "rmiaux",
+ 11000: "irisa",
+ 11001: "metasys",
+ 11095: "weave",
+ 11103: "origo-sync",
+ 11104: "netapp-icmgmt",
+ 11105: "netapp-icdata",
+ 11106: "sgi-lk",
+ 11109: "sgi-dmfmgr",
+ 11110: "sgi-soap",
+ 11111: "vce",
+ 11112: "dicom",
+ 11161: "suncacao-snmp",
+ 11162: "suncacao-jmxmp",
+ 11163: "suncacao-rmi",
+ 11164: "suncacao-csa",
+ 11165: "suncacao-websvc",
+ 11172: "oemcacao-jmxmp",
+ 11173: "t5-straton",
+ 11174: "oemcacao-rmi",
+ 11175: "oemcacao-websvc",
+ 11201: "smsqp",
+ 11202: "dcsl-backup",
+ 11208: "wifree",
+ 11211: "memcache",
+ 11319: "imip",
+ 11320: "imip-channels",
+ 11321: "arena-server",
+ 11367: "atm-uhas",
+ 11371: "hkp",
+ 11489: "asgcypresstcps",
+ 11600: "tempest-port",
+ 11623: "emc-xsw-dconfig",
+ 11720: "h323callsigalt",
+ 11723: "emc-xsw-dcache",
+ 11751: "intrepid-ssl",
+ 11796: "lanschool",
+ 11876: "xoraya",
+ 11967: "sysinfo-sp",
+ 12000: "entextxid",
+ 12001: "entextnetwk",
+ 12002: "entexthigh",
+ 12003: "entextmed",
+ 12004: "entextlow",
+ 12005: "dbisamserver1",
+ 12006: "dbisamserver2",
+ 12007: "accuracer",
+ 12008: "accuracer-dbms",
+ 12010: "edbsrvr",
+ 12012: "vipera",
+ 12013: "vipera-ssl",
+ 12109: "rets-ssl",
+ 12121: "nupaper-ss",
+ 12168: "cawas",
+ 12172: "hivep",
+ 12300: "linogridengine",
+ 12302: "rads",
+ 12321: "warehouse-sss",
+ 12322: "warehouse",
+ 12345: "italk",
+ 12753: "tsaf",
+ 12865: "netperf",
+ 13160: "i-zipqd",
+ 13216: "bcslogc",
+ 13217: "rs-pias",
+ 13218: "emc-vcas-tcp",
+ 13223: "powwow-client",
+ 13224: "powwow-server",
+ 13400: "doip-data",
+ 13720: "bprd",
+ 13721: "bpdbm",
+ 13722: "bpjava-msvc",
+ 13724: "vnetd",
+ 13782: "bpcd",
+ 13783: "vopied",
+ 13785: "nbdb",
+ 13786: "nomdb",
+ 13818: "dsmcc-config",
+ 13819: "dsmcc-session",
+ 13820: "dsmcc-passthru",
+ 13821: "dsmcc-download",
+ 13822: "dsmcc-ccp",
+ 13823: "bmdss",
+ 13894: "ucontrol",
+ 13929: "dta-systems",
+ 13930: "medevolve",
+ 14000: "scotty-ft",
+ 14001: "sua",
+ 14033: "sage-best-com1",
+ 14034: "sage-best-com2",
+ 14141: "vcs-app",
+ 14142: "icpp",
+ 14143: "icpps",
+ 14145: "gcm-app",
+ 14149: "vrts-tdd",
+ 14150: "vcscmd",
+ 14154: "vad",
+ 14250: "cps",
+ 14414: "ca-web-update",
+ 14500: "xpra",
+ 14936: "hde-lcesrvr-1",
+ 14937: "hde-lcesrvr-2",
+ 15000: "hydap",
+ 15002: "onep-tls",
+ 15345: "xpilot",
+ 15363: "3link",
+ 15555: "cisco-snat",
+ 15660: "bex-xr",
+ 15740: "ptp",
+ 15999: "programmar",
+ 16000: "fmsas",
+ 16001: "fmsascon",
+ 16002: "gsms",
+ 16020: "jwpc",
+ 16021: "jwpc-bin",
+ 16161: "sun-sea-port",
+ 16162: "solaris-audit",
+ 16309: "etb4j",
+ 16310: "pduncs",
+ 16311: "pdefmns",
+ 16360: "netserialext1",
+ 16361: "netserialext2",
+ 16367: "netserialext3",
+ 16368: "netserialext4",
+ 16384: "connected",
+ 16385: "rdgs",
+ 16619: "xoms",
+ 16665: "axon-tunnel",
+ 16789: "cadsisvr",
+ 16900: "newbay-snc-mc",
+ 16950: "sgcip",
+ 16991: "intel-rci-mp",
+ 16992: "amt-soap-http",
+ 16993: "amt-soap-https",
+ 16994: "amt-redir-tcp",
+ 16995: "amt-redir-tls",
+ 17007: "isode-dua",
+ 17184: "vestasdlp",
+ 17185: "soundsvirtual",
+ 17219: "chipper",
+ 17220: "avtp",
+ 17221: "avdecc",
+ 17223: "isa100-gci",
+ 17225: "trdp-md",
+ 17234: "integrius-stp",
+ 17235: "ssh-mgmt",
+ 17500: "db-lsp",
+ 17555: "ailith",
+ 17729: "ea",
+ 17754: "zep",
+ 17755: "zigbee-ip",
+ 17756: "zigbee-ips",
+ 17777: "sw-orion",
+ 18000: "biimenu",
+ 18104: "radpdf",
+ 18136: "racf",
+ 18181: "opsec-cvp",
+ 18182: "opsec-ufp",
+ 18183: "opsec-sam",
+ 18184: "opsec-lea",
+ 18185: "opsec-omi",
+ 18186: "ohsc",
+ 18187: "opsec-ela",
+ 18241: "checkpoint-rtm",
+ 18242: "iclid",
+ 18243: "clusterxl",
+ 18262: "gv-pf",
+ 18463: "ac-cluster",
+ 18634: "rds-ib",
+ 18635: "rds-ip",
+ 18668: "vdmmesh",
+ 18769: "ique",
+ 18881: "infotos",
+ 18888: "apc-necmp",
+ 19000: "igrid",
+ 19007: "scintilla",
+ 19020: "j-link",
+ 19191: "opsec-uaa",
+ 19194: "ua-secureagent",
+ 19220: "cora",
+ 19283: "keysrvr",
+ 19315: "keyshadow",
+ 19398: "mtrgtrans",
+ 19410: "hp-sco",
+ 19411: "hp-sca",
+ 19412: "hp-sessmon",
+ 19539: "fxuptp",
+ 19540: "sxuptp",
+ 19541: "jcp",
+ 19998: "iec-104-sec",
+ 19999: "dnp-sec",
+ 20000: "dnp",
+ 20001: "microsan",
+ 20002: "commtact-http",
+ 20003: "commtact-https",
+ 20005: "openwebnet",
+ 20013: "ss-idi",
+ 20014: "opendeploy",
+ 20034: "nburn-id",
+ 20046: "tmophl7mts",
+ 20048: "mountd",
+ 20049: "nfsrdma",
+ 20057: "avesterra",
+ 20167: "tolfab",
+ 20202: "ipdtp-port",
+ 20222: "ipulse-ics",
+ 20480: "emwavemsg",
+ 20670: "track",
+ 20999: "athand-mmp",
+ 21000: "irtrans",
+ 21010: "notezilla-lan",
+ 21221: "aigairserver",
+ 21553: "rdm-tfs",
+ 21554: "dfserver",
+ 21590: "vofr-gateway",
+ 21800: "tvpm",
+ 21845: "webphone",
+ 21846: "netspeak-is",
+ 21847: "netspeak-cs",
+ 21848: "netspeak-acd",
+ 21849: "netspeak-cps",
+ 22000: "snapenetio",
+ 22001: "optocontrol",
+ 22002: "optohost002",
+ 22003: "optohost003",
+ 22004: "optohost004",
+ 22005: "optohost004",
+ 22125: "dcap",
+ 22128: "gsidcap",
+ 22222: "easyengine",
+ 22273: "wnn6",
+ 22305: "cis",
+ 22335: "shrewd-control",
+ 22343: "cis-secure",
+ 22347: "wibukey",
+ 22350: "codemeter",
+ 22351: "codemeter-cmwan",
+ 22537: "caldsoft-backup",
+ 22555: "vocaltec-wconf",
+ 22763: "talikaserver",
+ 22800: "aws-brf",
+ 22951: "brf-gw",
+ 23000: "inovaport1",
+ 23001: "inovaport2",
+ 23002: "inovaport3",
+ 23003: "inovaport4",
+ 23004: "inovaport5",
+ 23005: "inovaport6",
+ 23053: "gntp",
+ 23294: "5afe-dir",
+ 23333: "elxmgmt",
+ 23400: "novar-dbase",
+ 23401: "novar-alarm",
+ 23402: "novar-global",
+ 23456: "aequus",
+ 23457: "aequus-alt",
+ 23546: "areaguard-neo",
+ 24000: "med-ltp",
+ 24001: "med-fsp-rx",
+ 24002: "med-fsp-tx",
+ 24003: "med-supp",
+ 24004: "med-ovw",
+ 24005: "med-ci",
+ 24006: "med-net-svc",
+ 24242: "filesphere",
+ 24249: "vista-4gl",
+ 24321: "ild",
+ 24386: "intel-rci",
+ 24465: "tonidods",
+ 24554: "binkp",
+ 24577: "bilobit",
+ 24666: "sdtvwcam",
+ 24676: "canditv",
+ 24677: "flashfiler",
+ 24678: "proactivate",
+ 24680: "tcc-http",
+ 24754: "cslg",
+ 24922: "find",
+ 25000: "icl-twobase1",
+ 25001: "icl-twobase2",
+ 25002: "icl-twobase3",
+ 25003: "icl-twobase4",
+ 25004: "icl-twobase5",
+ 25005: "icl-twobase6",
+ 25006: "icl-twobase7",
+ 25007: "icl-twobase8",
+ 25008: "icl-twobase9",
+ 25009: "icl-twobase10",
+ 25576: "sauterdongle",
+ 25604: "idtp",
+ 25793: "vocaltec-hos",
+ 25900: "tasp-net",
+ 25901: "niobserver",
+ 25902: "nilinkanalyst",
+ 25903: "niprobe",
+ 26000: "quake",
+ 26133: "scscp",
+ 26208: "wnn6-ds",
+ 26257: "cockroach",
+ 26260: "ezproxy",
+ 26261: "ezmeeting",
+ 26262: "k3software-svr",
+ 26263: "k3software-cli",
+ 26486: "exoline-tcp",
+ 26487: "exoconfig",
+ 26489: "exonet",
+ 27345: "imagepump",
+ 27442: "jesmsjc",
+ 27504: "kopek-httphead",
+ 27782: "ars-vista",
+ 27876: "astrolink",
+ 27999: "tw-auth-key",
+ 28000: "nxlmd",
+ 28001: "pqsp",
+ 28200: "voxelstorm",
+ 28240: "siemensgsm",
+ 28589: "bosswave",
+ 29167: "otmp",
+ 29999: "bingbang",
+ 30000: "ndmps",
+ 30001: "pago-services1",
+ 30002: "pago-services2",
+ 30003: "amicon-fpsu-ra",
+ 30100: "rwp",
+ 30260: "kingdomsonline",
+ 30400: "gs-realtime",
+ 30999: "ovobs",
+ 31016: "ka-sddp",
+ 31020: "autotrac-acp",
+ 31400: "pace-licensed",
+ 31416: "xqosd",
+ 31457: "tetrinet",
+ 31620: "lm-mon",
+ 31685: "dsx-monitor",
+ 31765: "gamesmith-port",
+ 31948: "iceedcp-tx",
+ 31949: "iceedcp-rx",
+ 32034: "iracinghelper",
+ 32249: "t1distproc60",
+ 32400: "plex",
+ 32483: "apm-link",
+ 32635: "sec-ntb-clnt",
+ 32636: "DMExpress",
+ 32767: "filenet-powsrm",
+ 32768: "filenet-tms",
+ 32769: "filenet-rpc",
+ 32770: "filenet-nch",
+ 32771: "filenet-rmi",
+ 32772: "filenet-pa",
+ 32773: "filenet-cm",
+ 32774: "filenet-re",
+ 32775: "filenet-pch",
+ 32776: "filenet-peior",
+ 32777: "filenet-obrok",
+ 32801: "mlsn",
+ 32811: "retp",
+ 32896: "idmgratm",
+ 33060: "mysqlx",
+ 33123: "aurora-balaena",
+ 33331: "diamondport",
+ 33333: "dgi-serv",
+ 33334: "speedtrace",
+ 33434: "traceroute",
+ 33656: "snip-slave",
+ 34249: "turbonote-2",
+ 34378: "p-net-local",
+ 34379: "p-net-remote",
+ 34567: "dhanalakshmi",
+ 34962: "profinet-rt",
+ 34963: "profinet-rtm",
+ 34964: "profinet-cm",
+ 34980: "ethercat",
+ 35000: "heathview",
+ 35001: "rt-viewer",
+ 35002: "rt-sound",
+ 35003: "rt-devicemapper",
+ 35004: "rt-classmanager",
+ 35005: "rt-labtracker",
+ 35006: "rt-helper",
+ 35100: "axio-disc",
+ 35354: "kitim",
+ 35355: "altova-lm",
+ 35356: "guttersnex",
+ 35357: "openstack-id",
+ 36001: "allpeers",
+ 36524: "febooti-aw",
+ 36602: "observium-agent",
+ 36700: "mapx",
+ 36865: "kastenxpipe",
+ 37475: "neckar",
+ 37483: "gdrive-sync",
+ 37601: "eftp",
+ 37654: "unisys-eportal",
+ 38000: "ivs-database",
+ 38001: "ivs-insertion",
+ 38002: "cresco-control",
+ 38201: "galaxy7-data",
+ 38202: "fairview",
+ 38203: "agpolicy",
+ 38800: "sruth",
+ 38865: "secrmmsafecopya",
+ 39681: "turbonote-1",
+ 40000: "safetynetp",
+ 40404: "sptx",
+ 40841: "cscp",
+ 40842: "csccredir",
+ 40843: "csccfirewall",
+ 41111: "fs-qos",
+ 41121: "tentacle",
+ 41230: "z-wave-s",
+ 41794: "crestron-cip",
+ 41795: "crestron-ctp",
+ 41796: "crestron-cips",
+ 41797: "crestron-ctps",
+ 42508: "candp",
+ 42509: "candrp",
+ 42510: "caerpc",
+ 43000: "recvr-rc",
+ 43188: "reachout",
+ 43189: "ndm-agent-port",
+ 43190: "ip-provision",
+ 43191: "noit-transport",
+ 43210: "shaperai",
+ 43439: "eq3-update",
+ 43440: "ew-mgmt",
+ 43441: "ciscocsdb",
+ 44123: "z-wave-tunnel",
+ 44321: "pmcd",
+ 44322: "pmcdproxy",
+ 44323: "pmwebapi",
+ 44444: "cognex-dataman",
+ 44553: "rbr-debug",
+ 44818: "EtherNet-IP-2",
+ 44900: "m3da",
+ 45000: "asmp",
+ 45001: "asmps",
+ 45002: "rs-status",
+ 45045: "synctest",
+ 45054: "invision-ag",
+ 45514: "cloudcheck",
+ 45678: "eba",
+ 45824: "dai-shell",
+ 45825: "qdb2service",
+ 45966: "ssr-servermgr",
+ 46336: "inedo",
+ 46998: "spremotetablet",
+ 46999: "mediabox",
+ 47000: "mbus",
+ 47001: "winrm",
+ 47557: "dbbrowse",
+ 47624: "directplaysrvr",
+ 47806: "ap",
+ 47808: "bacnet",
+ 48000: "nimcontroller",
+ 48001: "nimspooler",
+ 48002: "nimhub",
+ 48003: "nimgtw",
+ 48004: "nimbusdb",
+ 48005: "nimbusdbctrl",
+ 48049: "3gpp-cbsp",
+ 48050: "weandsf",
+ 48128: "isnetserv",
+ 48129: "blp5",
+ 48556: "com-bardac-dw",
+ 48619: "iqobject",
+ 48653: "robotraconteur",
+ 49000: "matahari",
+ 49001: "nusrp",
+}
+var udpPortNames = map[UDPPort]string{
+ 1: "tcpmux",
+ 2: "compressnet",
+ 3: "compressnet",
+ 5: "rje",
+ 7: "echo",
+ 9: "discard",
+ 11: "systat",
+ 13: "daytime",
+ 17: "qotd",
+ 18: "msp",
+ 19: "chargen",
+ 20: "ftp-data",
+ 21: "ftp",
+ 22: "ssh",
+ 23: "telnet",
+ 25: "smtp",
+ 27: "nsw-fe",
+ 29: "msg-icp",
+ 31: "msg-auth",
+ 33: "dsp",
+ 37: "time",
+ 38: "rap",
+ 39: "rlp",
+ 41: "graphics",
+ 42: "name",
+ 43: "nicname",
+ 44: "mpm-flags",
+ 45: "mpm",
+ 46: "mpm-snd",
+ 48: "auditd",
+ 49: "tacacs",
+ 50: "re-mail-ck",
+ 52: "xns-time",
+ 53: "domain",
+ 54: "xns-ch",
+ 55: "isi-gl",
+ 56: "xns-auth",
+ 58: "xns-mail",
+ 62: "acas",
+ 63: "whoispp",
+ 64: "covia",
+ 65: "tacacs-ds",
+ 66: "sql-net",
+ 67: "bootps",
+ 68: "bootpc",
+ 69: "tftp",
+ 70: "gopher",
+ 71: "netrjs-1",
+ 72: "netrjs-2",
+ 73: "netrjs-3",
+ 74: "netrjs-4",
+ 76: "deos",
+ 78: "vettcp",
+ 79: "finger",
+ 80: "http",
+ 82: "xfer",
+ 83: "mit-ml-dev",
+ 84: "ctf",
+ 85: "mit-ml-dev",
+ 86: "mfcobol",
+ 88: "kerberos",
+ 89: "su-mit-tg",
+ 90: "dnsix",
+ 91: "mit-dov",
+ 92: "npp",
+ 93: "dcp",
+ 94: "objcall",
+ 95: "supdup",
+ 96: "dixie",
+ 97: "swift-rvf",
+ 98: "tacnews",
+ 99: "metagram",
+ 101: "hostname",
+ 102: "iso-tsap",
+ 103: "gppitnp",
+ 104: "acr-nema",
+ 105: "cso",
+ 106: "3com-tsmux",
+ 107: "rtelnet",
+ 108: "snagas",
+ 109: "pop2",
+ 110: "pop3",
+ 111: "sunrpc",
+ 112: "mcidas",
+ 113: "auth",
+ 115: "sftp",
+ 116: "ansanotify",
+ 117: "uucp-path",
+ 118: "sqlserv",
+ 119: "nntp",
+ 120: "cfdptkt",
+ 121: "erpc",
+ 122: "smakynet",
+ 123: "ntp",
+ 124: "ansatrader",
+ 125: "locus-map",
+ 126: "nxedit",
+ 127: "locus-con",
+ 128: "gss-xlicen",
+ 129: "pwdgen",
+ 130: "cisco-fna",
+ 131: "cisco-tna",
+ 132: "cisco-sys",
+ 133: "statsrv",
+ 134: "ingres-net",
+ 135: "epmap",
+ 136: "profile",
+ 137: "netbios-ns",
+ 138: "netbios-dgm",
+ 139: "netbios-ssn",
+ 140: "emfis-data",
+ 141: "emfis-cntl",
+ 142: "bl-idm",
+ 143: "imap",
+ 144: "uma",
+ 145: "uaac",
+ 146: "iso-tp0",
+ 147: "iso-ip",
+ 148: "jargon",
+ 149: "aed-512",
+ 150: "sql-net",
+ 151: "hems",
+ 152: "bftp",
+ 153: "sgmp",
+ 154: "netsc-prod",
+ 155: "netsc-dev",
+ 156: "sqlsrv",
+ 157: "knet-cmp",
+ 158: "pcmail-srv",
+ 159: "nss-routing",
+ 160: "sgmp-traps",
+ 161: "snmp",
+ 162: "snmptrap",
+ 163: "cmip-man",
+ 164: "cmip-agent",
+ 165: "xns-courier",
+ 166: "s-net",
+ 167: "namp",
+ 168: "rsvd",
+ 169: "send",
+ 170: "print-srv",
+ 171: "multiplex",
+ 172: "cl-1",
+ 173: "xyplex-mux",
+ 174: "mailq",
+ 175: "vmnet",
+ 176: "genrad-mux",
+ 177: "xdmcp",
+ 178: "nextstep",
+ 179: "bgp",
+ 180: "ris",
+ 181: "unify",
+ 182: "audit",
+ 183: "ocbinder",
+ 184: "ocserver",
+ 185: "remote-kis",
+ 186: "kis",
+ 187: "aci",
+ 188: "mumps",
+ 189: "qft",
+ 190: "gacp",
+ 191: "prospero",
+ 192: "osu-nms",
+ 193: "srmp",
+ 194: "irc",
+ 195: "dn6-nlm-aud",
+ 196: "dn6-smm-red",
+ 197: "dls",
+ 198: "dls-mon",
+ 199: "smux",
+ 200: "src",
+ 201: "at-rtmp",
+ 202: "at-nbp",
+ 203: "at-3",
+ 204: "at-echo",
+ 205: "at-5",
+ 206: "at-zis",
+ 207: "at-7",
+ 208: "at-8",
+ 209: "qmtp",
+ 210: "z39-50",
+ 211: "914c-g",
+ 212: "anet",
+ 213: "ipx",
+ 214: "vmpwscs",
+ 215: "softpc",
+ 216: "CAIlic",
+ 217: "dbase",
+ 218: "mpp",
+ 219: "uarps",
+ 220: "imap3",
+ 221: "fln-spx",
+ 222: "rsh-spx",
+ 223: "cdc",
+ 224: "masqdialer",
+ 242: "direct",
+ 243: "sur-meas",
+ 244: "inbusiness",
+ 245: "link",
+ 246: "dsp3270",
+ 247: "subntbcst-tftp",
+ 248: "bhfhs",
+ 256: "rap",
+ 257: "set",
+ 259: "esro-gen",
+ 260: "openport",
+ 261: "nsiiops",
+ 262: "arcisdms",
+ 263: "hdap",
+ 264: "bgmp",
+ 265: "x-bone-ctl",
+ 266: "sst",
+ 267: "td-service",
+ 268: "td-replica",
+ 269: "manet",
+ 270: "gist",
+ 280: "http-mgmt",
+ 281: "personal-link",
+ 282: "cableport-ax",
+ 283: "rescap",
+ 284: "corerjd",
+ 286: "fxp",
+ 287: "k-block",
+ 308: "novastorbakcup",
+ 309: "entrusttime",
+ 310: "bhmds",
+ 311: "asip-webadmin",
+ 312: "vslmp",
+ 313: "magenta-logic",
+ 314: "opalis-robot",
+ 315: "dpsi",
+ 316: "decauth",
+ 317: "zannet",
+ 318: "pkix-timestamp",
+ 319: "ptp-event",
+ 320: "ptp-general",
+ 321: "pip",
+ 322: "rtsps",
+ 333: "texar",
+ 344: "pdap",
+ 345: "pawserv",
+ 346: "zserv",
+ 347: "fatserv",
+ 348: "csi-sgwp",
+ 349: "mftp",
+ 350: "matip-type-a",
+ 351: "matip-type-b",
+ 352: "dtag-ste-sb",
+ 353: "ndsauth",
+ 354: "bh611",
+ 355: "datex-asn",
+ 356: "cloanto-net-1",
+ 357: "bhevent",
+ 358: "shrinkwrap",
+ 359: "nsrmp",
+ 360: "scoi2odialog",
+ 361: "semantix",
+ 362: "srssend",
+ 363: "rsvp-tunnel",
+ 364: "aurora-cmgr",
+ 365: "dtk",
+ 366: "odmr",
+ 367: "mortgageware",
+ 368: "qbikgdp",
+ 369: "rpc2portmap",
+ 370: "codaauth2",
+ 371: "clearcase",
+ 372: "ulistproc",
+ 373: "legent-1",
+ 374: "legent-2",
+ 375: "hassle",
+ 376: "nip",
+ 377: "tnETOS",
+ 378: "dsETOS",
+ 379: "is99c",
+ 380: "is99s",
+ 381: "hp-collector",
+ 382: "hp-managed-node",
+ 383: "hp-alarm-mgr",
+ 384: "arns",
+ 385: "ibm-app",
+ 386: "asa",
+ 387: "aurp",
+ 388: "unidata-ldm",
+ 389: "ldap",
+ 390: "uis",
+ 391: "synotics-relay",
+ 392: "synotics-broker",
+ 393: "meta5",
+ 394: "embl-ndt",
+ 395: "netcp",
+ 396: "netware-ip",
+ 397: "mptn",
+ 398: "kryptolan",
+ 399: "iso-tsap-c2",
+ 400: "osb-sd",
+ 401: "ups",
+ 402: "genie",
+ 403: "decap",
+ 404: "nced",
+ 405: "ncld",
+ 406: "imsp",
+ 407: "timbuktu",
+ 408: "prm-sm",
+ 409: "prm-nm",
+ 410: "decladebug",
+ 411: "rmt",
+ 412: "synoptics-trap",
+ 413: "smsp",
+ 414: "infoseek",
+ 415: "bnet",
+ 416: "silverplatter",
+ 417: "onmux",
+ 418: "hyper-g",
+ 419: "ariel1",
+ 420: "smpte",
+ 421: "ariel2",
+ 422: "ariel3",
+ 423: "opc-job-start",
+ 424: "opc-job-track",
+ 425: "icad-el",
+ 426: "smartsdp",
+ 427: "svrloc",
+ 428: "ocs-cmu",
+ 429: "ocs-amu",
+ 430: "utmpsd",
+ 431: "utmpcd",
+ 432: "iasd",
+ 433: "nnsp",
+ 434: "mobileip-agent",
+ 435: "mobilip-mn",
+ 436: "dna-cml",
+ 437: "comscm",
+ 438: "dsfgw",
+ 439: "dasp",
+ 440: "sgcp",
+ 441: "decvms-sysmgt",
+ 442: "cvc-hostd",
+ 443: "https",
+ 444: "snpp",
+ 445: "microsoft-ds",
+ 446: "ddm-rdb",
+ 447: "ddm-dfm",
+ 448: "ddm-ssl",
+ 449: "as-servermap",
+ 450: "tserver",
+ 451: "sfs-smp-net",
+ 452: "sfs-config",
+ 453: "creativeserver",
+ 454: "contentserver",
+ 455: "creativepartnr",
+ 456: "macon-udp",
+ 457: "scohelp",
+ 458: "appleqtc",
+ 459: "ampr-rcmd",
+ 460: "skronk",
+ 461: "datasurfsrv",
+ 462: "datasurfsrvsec",
+ 463: "alpes",
+ 464: "kpasswd",
+ 465: "igmpv3lite",
+ 466: "digital-vrc",
+ 467: "mylex-mapd",
+ 468: "photuris",
+ 469: "rcp",
+ 470: "scx-proxy",
+ 471: "mondex",
+ 472: "ljk-login",
+ 473: "hybrid-pop",
+ 474: "tn-tl-w2",
+ 475: "tcpnethaspsrv",
+ 476: "tn-tl-fd1",
+ 477: "ss7ns",
+ 478: "spsc",
+ 479: "iafserver",
+ 480: "iafdbase",
+ 481: "ph",
+ 482: "bgs-nsi",
+ 483: "ulpnet",
+ 484: "integra-sme",
+ 485: "powerburst",
+ 486: "avian",
+ 487: "saft",
+ 488: "gss-http",
+ 489: "nest-protocol",
+ 490: "micom-pfs",
+ 491: "go-login",
+ 492: "ticf-1",
+ 493: "ticf-2",
+ 494: "pov-ray",
+ 495: "intecourier",
+ 496: "pim-rp-disc",
+ 497: "retrospect",
+ 498: "siam",
+ 499: "iso-ill",
+ 500: "isakmp",
+ 501: "stmf",
+ 502: "mbap",
+ 503: "intrinsa",
+ 504: "citadel",
+ 505: "mailbox-lm",
+ 506: "ohimsrv",
+ 507: "crs",
+ 508: "xvttp",
+ 509: "snare",
+ 510: "fcp",
+ 511: "passgo",
+ 512: "comsat",
+ 513: "who",
+ 514: "syslog",
+ 515: "printer",
+ 516: "videotex",
+ 517: "talk",
+ 518: "ntalk",
+ 519: "utime",
+ 520: "router",
+ 521: "ripng",
+ 522: "ulp",
+ 523: "ibm-db2",
+ 524: "ncp",
+ 525: "timed",
+ 526: "tempo",
+ 527: "stx",
+ 528: "custix",
+ 529: "irc-serv",
+ 530: "courier",
+ 531: "conference",
+ 532: "netnews",
+ 533: "netwall",
+ 534: "windream",
+ 535: "iiop",
+ 536: "opalis-rdv",
+ 537: "nmsp",
+ 538: "gdomap",
+ 539: "apertus-ldp",
+ 540: "uucp",
+ 541: "uucp-rlogin",
+ 542: "commerce",
+ 543: "klogin",
+ 544: "kshell",
+ 545: "appleqtcsrvr",
+ 546: "dhcpv6-client",
+ 547: "dhcpv6-server",
+ 548: "afpovertcp",
+ 549: "idfp",
+ 550: "new-rwho",
+ 551: "cybercash",
+ 552: "devshr-nts",
+ 553: "pirp",
+ 554: "rtsp",
+ 555: "dsf",
+ 556: "remotefs",
+ 557: "openvms-sysipc",
+ 558: "sdnskmp",
+ 559: "teedtap",
+ 560: "rmonitor",
+ 561: "monitor",
+ 562: "chshell",
+ 563: "nntps",
+ 564: "9pfs",
+ 565: "whoami",
+ 566: "streettalk",
+ 567: "banyan-rpc",
+ 568: "ms-shuttle",
+ 569: "ms-rome",
+ 570: "meter",
+ 571: "meter",
+ 572: "sonar",
+ 573: "banyan-vip",
+ 574: "ftp-agent",
+ 575: "vemmi",
+ 576: "ipcd",
+ 577: "vnas",
+ 578: "ipdd",
+ 579: "decbsrv",
+ 580: "sntp-heartbeat",
+ 581: "bdp",
+ 582: "scc-security",
+ 583: "philips-vc",
+ 584: "keyserver",
+ 586: "password-chg",
+ 587: "submission",
+ 588: "cal",
+ 589: "eyelink",
+ 590: "tns-cml",
+ 591: "http-alt",
+ 592: "eudora-set",
+ 593: "http-rpc-epmap",
+ 594: "tpip",
+ 595: "cab-protocol",
+ 596: "smsd",
+ 597: "ptcnameservice",
+ 598: "sco-websrvrmg3",
+ 599: "acp",
+ 600: "ipcserver",
+ 601: "syslog-conn",
+ 602: "xmlrpc-beep",
+ 603: "idxp",
+ 604: "tunnel",
+ 605: "soap-beep",
+ 606: "urm",
+ 607: "nqs",
+ 608: "sift-uft",
+ 609: "npmp-trap",
+ 610: "npmp-local",
+ 611: "npmp-gui",
+ 612: "hmmp-ind",
+ 613: "hmmp-op",
+ 614: "sshell",
+ 615: "sco-inetmgr",
+ 616: "sco-sysmgr",
+ 617: "sco-dtmgr",
+ 618: "dei-icda",
+ 619: "compaq-evm",
+ 620: "sco-websrvrmgr",
+ 621: "escp-ip",
+ 622: "collaborator",
+ 623: "asf-rmcp",
+ 624: "cryptoadmin",
+ 625: "dec-dlm",
+ 626: "asia",
+ 627: "passgo-tivoli",
+ 628: "qmqp",
+ 629: "3com-amp3",
+ 630: "rda",
+ 631: "ipp",
+ 632: "bmpp",
+ 633: "servstat",
+ 634: "ginad",
+ 635: "rlzdbase",
+ 636: "ldaps",
+ 637: "lanserver",
+ 638: "mcns-sec",
+ 639: "msdp",
+ 640: "entrust-sps",
+ 641: "repcmd",
+ 642: "esro-emsdp",
+ 643: "sanity",
+ 644: "dwr",
+ 645: "pssc",
+ 646: "ldp",
+ 647: "dhcp-failover",
+ 648: "rrp",
+ 649: "cadview-3d",
+ 650: "obex",
+ 651: "ieee-mms",
+ 652: "hello-port",
+ 653: "repscmd",
+ 654: "aodv",
+ 655: "tinc",
+ 656: "spmp",
+ 657: "rmc",
+ 658: "tenfold",
+ 660: "mac-srvr-admin",
+ 661: "hap",
+ 662: "pftp",
+ 663: "purenoise",
+ 664: "asf-secure-rmcp",
+ 665: "sun-dr",
+ 666: "mdqs",
+ 667: "disclose",
+ 668: "mecomm",
+ 669: "meregister",
+ 670: "vacdsm-sws",
+ 671: "vacdsm-app",
+ 672: "vpps-qua",
+ 673: "cimplex",
+ 674: "acap",
+ 675: "dctp",
+ 676: "vpps-via",
+ 677: "vpp",
+ 678: "ggf-ncp",
+ 679: "mrm",
+ 680: "entrust-aaas",
+ 681: "entrust-aams",
+ 682: "xfr",
+ 683: "corba-iiop",
+ 684: "corba-iiop-ssl",
+ 685: "mdc-portmapper",
+ 686: "hcp-wismar",
+ 687: "asipregistry",
+ 688: "realm-rusd",
+ 689: "nmap",
+ 690: "vatp",
+ 691: "msexch-routing",
+ 692: "hyperwave-isp",
+ 693: "connendp",
+ 694: "ha-cluster",
+ 695: "ieee-mms-ssl",
+ 696: "rushd",
+ 697: "uuidgen",
+ 698: "olsr",
+ 699: "accessnetwork",
+ 700: "epp",
+ 701: "lmp",
+ 702: "iris-beep",
+ 704: "elcsd",
+ 705: "agentx",
+ 706: "silc",
+ 707: "borland-dsj",
+ 709: "entrust-kmsh",
+ 710: "entrust-ash",
+ 711: "cisco-tdp",
+ 712: "tbrpf",
+ 713: "iris-xpc",
+ 714: "iris-xpcs",
+ 715: "iris-lwz",
+ 716: "pana",
+ 729: "netviewdm1",
+ 730: "netviewdm2",
+ 731: "netviewdm3",
+ 741: "netgw",
+ 742: "netrcs",
+ 744: "flexlm",
+ 747: "fujitsu-dev",
+ 748: "ris-cm",
+ 749: "kerberos-adm",
+ 750: "loadav",
+ 751: "pump",
+ 752: "qrh",
+ 753: "rrh",
+ 754: "tell",
+ 758: "nlogin",
+ 759: "con",
+ 760: "ns",
+ 761: "rxe",
+ 762: "quotad",
+ 763: "cycleserv",
+ 764: "omserv",
+ 765: "webster",
+ 767: "phonebook",
+ 769: "vid",
+ 770: "cadlock",
+ 771: "rtip",
+ 772: "cycleserv2",
+ 773: "notify",
+ 774: "acmaint-dbd",
+ 775: "acmaint-transd",
+ 776: "wpages",
+ 777: "multiling-http",
+ 780: "wpgs",
+ 800: "mdbs-daemon",
+ 801: "device",
+ 802: "mbap-s",
+ 810: "fcp-udp",
+ 828: "itm-mcell-s",
+ 829: "pkix-3-ca-ra",
+ 830: "netconf-ssh",
+ 831: "netconf-beep",
+ 832: "netconfsoaphttp",
+ 833: "netconfsoapbeep",
+ 847: "dhcp-failover2",
+ 848: "gdoi",
+ 853: "domain-s",
+ 854: "dlep",
+ 860: "iscsi",
+ 861: "owamp-control",
+ 862: "twamp-control",
+ 873: "rsync",
+ 886: "iclcnet-locate",
+ 887: "iclcnet-svinfo",
+ 888: "accessbuilder",
+ 900: "omginitialrefs",
+ 901: "smpnameres",
+ 902: "ideafarm-door",
+ 903: "ideafarm-panic",
+ 910: "kink",
+ 911: "xact-backup",
+ 912: "apex-mesh",
+ 913: "apex-edge",
+ 989: "ftps-data",
+ 990: "ftps",
+ 991: "nas",
+ 992: "telnets",
+ 993: "imaps",
+ 995: "pop3s",
+ 996: "vsinet",
+ 997: "maitrd",
+ 998: "puparp",
+ 999: "applix",
+ 1000: "cadlock2",
+ 1010: "surf",
+ 1021: "exp1",
+ 1022: "exp2",
+ 1025: "blackjack",
+ 1026: "cap",
+ 1027: "6a44",
+ 1029: "solid-mux",
+ 1033: "netinfo-local",
+ 1034: "activesync",
+ 1035: "mxxrlogin",
+ 1036: "nsstp",
+ 1037: "ams",
+ 1038: "mtqp",
+ 1039: "sbl",
+ 1040: "netarx",
+ 1041: "danf-ak2",
+ 1042: "afrog",
+ 1043: "boinc-client",
+ 1044: "dcutility",
+ 1045: "fpitp",
+ 1046: "wfremotertm",
+ 1047: "neod1",
+ 1048: "neod2",
+ 1049: "td-postman",
+ 1050: "cma",
+ 1051: "optima-vnet",
+ 1052: "ddt",
+ 1053: "remote-as",
+ 1054: "brvread",
+ 1055: "ansyslmd",
+ 1056: "vfo",
+ 1057: "startron",
+ 1058: "nim",
+ 1059: "nimreg",
+ 1060: "polestar",
+ 1061: "kiosk",
+ 1062: "veracity",
+ 1063: "kyoceranetdev",
+ 1064: "jstel",
+ 1065: "syscomlan",
+ 1066: "fpo-fns",
+ 1067: "instl-boots",
+ 1068: "instl-bootc",
+ 1069: "cognex-insight",
+ 1070: "gmrupdateserv",
+ 1071: "bsquare-voip",
+ 1072: "cardax",
+ 1073: "bridgecontrol",
+ 1074: "warmspotMgmt",
+ 1075: "rdrmshc",
+ 1076: "dab-sti-c",
+ 1077: "imgames",
+ 1078: "avocent-proxy",
+ 1079: "asprovatalk",
+ 1080: "socks",
+ 1081: "pvuniwien",
+ 1082: "amt-esd-prot",
+ 1083: "ansoft-lm-1",
+ 1084: "ansoft-lm-2",
+ 1085: "webobjects",
+ 1086: "cplscrambler-lg",
+ 1087: "cplscrambler-in",
+ 1088: "cplscrambler-al",
+ 1089: "ff-annunc",
+ 1090: "ff-fms",
+ 1091: "ff-sm",
+ 1092: "obrpd",
+ 1093: "proofd",
+ 1094: "rootd",
+ 1095: "nicelink",
+ 1096: "cnrprotocol",
+ 1097: "sunclustermgr",
+ 1098: "rmiactivation",
+ 1099: "rmiregistry",
+ 1100: "mctp",
+ 1101: "pt2-discover",
+ 1102: "adobeserver-1",
+ 1103: "adobeserver-2",
+ 1104: "xrl",
+ 1105: "ftranhc",
+ 1106: "isoipsigport-1",
+ 1107: "isoipsigport-2",
+ 1108: "ratio-adp",
+ 1110: "nfsd-keepalive",
+ 1111: "lmsocialserver",
+ 1112: "icp",
+ 1113: "ltp-deepspace",
+ 1114: "mini-sql",
+ 1115: "ardus-trns",
+ 1116: "ardus-cntl",
+ 1117: "ardus-mtrns",
+ 1118: "sacred",
+ 1119: "bnetgame",
+ 1120: "bnetfile",
+ 1121: "rmpp",
+ 1122: "availant-mgr",
+ 1123: "murray",
+ 1124: "hpvmmcontrol",
+ 1125: "hpvmmagent",
+ 1126: "hpvmmdata",
+ 1127: "kwdb-commn",
+ 1128: "saphostctrl",
+ 1129: "saphostctrls",
+ 1130: "casp",
+ 1131: "caspssl",
+ 1132: "kvm-via-ip",
+ 1133: "dfn",
+ 1134: "aplx",
+ 1135: "omnivision",
+ 1136: "hhb-gateway",
+ 1137: "trim",
+ 1138: "encrypted-admin",
+ 1139: "evm",
+ 1140: "autonoc",
+ 1141: "mxomss",
+ 1142: "edtools",
+ 1143: "imyx",
+ 1144: "fuscript",
+ 1145: "x9-icue",
+ 1146: "audit-transfer",
+ 1147: "capioverlan",
+ 1148: "elfiq-repl",
+ 1149: "bvtsonar",
+ 1150: "blaze",
+ 1151: "unizensus",
+ 1152: "winpoplanmess",
+ 1153: "c1222-acse",
+ 1154: "resacommunity",
+ 1155: "nfa",
+ 1156: "iascontrol-oms",
+ 1157: "iascontrol",
+ 1158: "dbcontrol-oms",
+ 1159: "oracle-oms",
+ 1160: "olsv",
+ 1161: "health-polling",
+ 1162: "health-trap",
+ 1163: "sddp",
+ 1164: "qsm-proxy",
+ 1165: "qsm-gui",
+ 1166: "qsm-remote",
+ 1167: "cisco-ipsla",
+ 1168: "vchat",
+ 1169: "tripwire",
+ 1170: "atc-lm",
+ 1171: "atc-appserver",
+ 1172: "dnap",
+ 1173: "d-cinema-rrp",
+ 1174: "fnet-remote-ui",
+ 1175: "dossier",
+ 1176: "indigo-server",
+ 1177: "dkmessenger",
+ 1178: "sgi-storman",
+ 1179: "b2n",
+ 1180: "mc-client",
+ 1181: "3comnetman",
+ 1182: "accelenet-data",
+ 1183: "llsurfup-http",
+ 1184: "llsurfup-https",
+ 1185: "catchpole",
+ 1186: "mysql-cluster",
+ 1187: "alias",
+ 1188: "hp-webadmin",
+ 1189: "unet",
+ 1190: "commlinx-avl",
+ 1191: "gpfs",
+ 1192: "caids-sensor",
+ 1193: "fiveacross",
+ 1194: "openvpn",
+ 1195: "rsf-1",
+ 1196: "netmagic",
+ 1197: "carrius-rshell",
+ 1198: "cajo-discovery",
+ 1199: "dmidi",
+ 1200: "scol",
+ 1201: "nucleus-sand",
+ 1202: "caiccipc",
+ 1203: "ssslic-mgr",
+ 1204: "ssslog-mgr",
+ 1205: "accord-mgc",
+ 1206: "anthony-data",
+ 1207: "metasage",
+ 1208: "seagull-ais",
+ 1209: "ipcd3",
+ 1210: "eoss",
+ 1211: "groove-dpp",
+ 1212: "lupa",
+ 1213: "mpc-lifenet",
+ 1214: "kazaa",
+ 1215: "scanstat-1",
+ 1216: "etebac5",
+ 1217: "hpss-ndapi",
+ 1218: "aeroflight-ads",
+ 1219: "aeroflight-ret",
+ 1220: "qt-serveradmin",
+ 1221: "sweetware-apps",
+ 1222: "nerv",
+ 1223: "tgp",
+ 1224: "vpnz",
+ 1225: "slinkysearch",
+ 1226: "stgxfws",
+ 1227: "dns2go",
+ 1228: "florence",
+ 1229: "zented",
+ 1230: "periscope",
+ 1231: "menandmice-lpm",
+ 1232: "first-defense",
+ 1233: "univ-appserver",
+ 1234: "search-agent",
+ 1235: "mosaicsyssvc1",
+ 1236: "bvcontrol",
+ 1237: "tsdos390",
+ 1238: "hacl-qs",
+ 1239: "nmsd",
+ 1240: "instantia",
+ 1241: "nessus",
+ 1242: "nmasoverip",
+ 1243: "serialgateway",
+ 1244: "isbconference1",
+ 1245: "isbconference2",
+ 1246: "payrouter",
+ 1247: "visionpyramid",
+ 1248: "hermes",
+ 1249: "mesavistaco",
+ 1250: "swldy-sias",
+ 1251: "servergraph",
+ 1252: "bspne-pcc",
+ 1253: "q55-pcc",
+ 1254: "de-noc",
+ 1255: "de-cache-query",
+ 1256: "de-server",
+ 1257: "shockwave2",
+ 1258: "opennl",
+ 1259: "opennl-voice",
+ 1260: "ibm-ssd",
+ 1261: "mpshrsv",
+ 1262: "qnts-orb",
+ 1263: "dka",
+ 1264: "prat",
+ 1265: "dssiapi",
+ 1266: "dellpwrappks",
+ 1267: "epc",
+ 1268: "propel-msgsys",
+ 1269: "watilapp",
+ 1270: "opsmgr",
+ 1271: "excw",
+ 1272: "cspmlockmgr",
+ 1273: "emc-gateway",
+ 1274: "t1distproc",
+ 1275: "ivcollector",
+ 1277: "miva-mqs",
+ 1278: "dellwebadmin-1",
+ 1279: "dellwebadmin-2",
+ 1280: "pictrography",
+ 1281: "healthd",
+ 1282: "emperion",
+ 1283: "productinfo",
+ 1284: "iee-qfx",
+ 1285: "neoiface",
+ 1286: "netuitive",
+ 1287: "routematch",
+ 1288: "navbuddy",
+ 1289: "jwalkserver",
+ 1290: "winjaserver",
+ 1291: "seagulllms",
+ 1292: "dsdn",
+ 1293: "pkt-krb-ipsec",
+ 1294: "cmmdriver",
+ 1295: "ehtp",
+ 1296: "dproxy",
+ 1297: "sdproxy",
+ 1298: "lpcp",
+ 1299: "hp-sci",
+ 1300: "h323hostcallsc",
+ 1301: "ci3-software-1",
+ 1302: "ci3-software-2",
+ 1303: "sftsrv",
+ 1304: "boomerang",
+ 1305: "pe-mike",
+ 1306: "re-conn-proto",
+ 1307: "pacmand",
+ 1308: "odsi",
+ 1309: "jtag-server",
+ 1310: "husky",
+ 1311: "rxmon",
+ 1312: "sti-envision",
+ 1313: "bmc-patroldb",
+ 1314: "pdps",
+ 1315: "els",
+ 1316: "exbit-escp",
+ 1317: "vrts-ipcserver",
+ 1318: "krb5gatekeeper",
+ 1319: "amx-icsp",
+ 1320: "amx-axbnet",
+ 1321: "pip",
+ 1322: "novation",
+ 1323: "brcd",
+ 1324: "delta-mcp",
+ 1325: "dx-instrument",
+ 1326: "wimsic",
+ 1327: "ultrex",
+ 1328: "ewall",
+ 1329: "netdb-export",
+ 1330: "streetperfect",
+ 1331: "intersan",
+ 1332: "pcia-rxp-b",
+ 1333: "passwrd-policy",
+ 1334: "writesrv",
+ 1335: "digital-notary",
+ 1336: "ischat",
+ 1337: "menandmice-dns",
+ 1338: "wmc-log-svc",
+ 1339: "kjtsiteserver",
+ 1340: "naap",
+ 1341: "qubes",
+ 1342: "esbroker",
+ 1343: "re101",
+ 1344: "icap",
+ 1345: "vpjp",
+ 1346: "alta-ana-lm",
+ 1347: "bbn-mmc",
+ 1348: "bbn-mmx",
+ 1349: "sbook",
+ 1350: "editbench",
+ 1351: "equationbuilder",
+ 1352: "lotusnote",
+ 1353: "relief",
+ 1354: "XSIP-network",
+ 1355: "intuitive-edge",
+ 1356: "cuillamartin",
+ 1357: "pegboard",
+ 1358: "connlcli",
+ 1359: "ftsrv",
+ 1360: "mimer",
+ 1361: "linx",
+ 1362: "timeflies",
+ 1363: "ndm-requester",
+ 1364: "ndm-server",
+ 1365: "adapt-sna",
+ 1366: "netware-csp",
+ 1367: "dcs",
+ 1368: "screencast",
+ 1369: "gv-us",
+ 1370: "us-gv",
+ 1371: "fc-cli",
+ 1372: "fc-ser",
+ 1373: "chromagrafx",
+ 1374: "molly",
+ 1375: "bytex",
+ 1376: "ibm-pps",
+ 1377: "cichlid",
+ 1378: "elan",
+ 1379: "dbreporter",
+ 1380: "telesis-licman",
+ 1381: "apple-licman",
+ 1382: "udt-os",
+ 1383: "gwha",
+ 1384: "os-licman",
+ 1385: "atex-elmd",
+ 1386: "checksum",
+ 1387: "cadsi-lm",
+ 1388: "objective-dbc",
+ 1389: "iclpv-dm",
+ 1390: "iclpv-sc",
+ 1391: "iclpv-sas",
+ 1392: "iclpv-pm",
+ 1393: "iclpv-nls",
+ 1394: "iclpv-nlc",
+ 1395: "iclpv-wsm",
+ 1396: "dvl-activemail",
+ 1397: "audio-activmail",
+ 1398: "video-activmail",
+ 1399: "cadkey-licman",
+ 1400: "cadkey-tablet",
+ 1401: "goldleaf-licman",
+ 1402: "prm-sm-np",
+ 1403: "prm-nm-np",
+ 1404: "igi-lm",
+ 1405: "ibm-res",
+ 1406: "netlabs-lm",
+ 1408: "sophia-lm",
+ 1409: "here-lm",
+ 1410: "hiq",
+ 1411: "af",
+ 1412: "innosys",
+ 1413: "innosys-acl",
+ 1414: "ibm-mqseries",
+ 1415: "dbstar",
+ 1416: "novell-lu6-2",
+ 1417: "timbuktu-srv1",
+ 1418: "timbuktu-srv2",
+ 1419: "timbuktu-srv3",
+ 1420: "timbuktu-srv4",
+ 1421: "gandalf-lm",
+ 1422: "autodesk-lm",
+ 1423: "essbase",
+ 1424: "hybrid",
+ 1425: "zion-lm",
+ 1426: "sais",
+ 1427: "mloadd",
+ 1428: "informatik-lm",
+ 1429: "nms",
+ 1430: "tpdu",
+ 1431: "rgtp",
+ 1432: "blueberry-lm",
+ 1433: "ms-sql-s",
+ 1434: "ms-sql-m",
+ 1435: "ibm-cics",
+ 1436: "saism",
+ 1437: "tabula",
+ 1438: "eicon-server",
+ 1439: "eicon-x25",
+ 1440: "eicon-slp",
+ 1441: "cadis-1",
+ 1442: "cadis-2",
+ 1443: "ies-lm",
+ 1444: "marcam-lm",
+ 1445: "proxima-lm",
+ 1446: "ora-lm",
+ 1447: "apri-lm",
+ 1448: "oc-lm",
+ 1449: "peport",
+ 1450: "dwf",
+ 1451: "infoman",
+ 1452: "gtegsc-lm",
+ 1453: "genie-lm",
+ 1454: "interhdl-elmd",
+ 1455: "esl-lm",
+ 1456: "dca",
+ 1457: "valisys-lm",
+ 1458: "nrcabq-lm",
+ 1459: "proshare1",
+ 1460: "proshare2",
+ 1461: "ibm-wrless-lan",
+ 1462: "world-lm",
+ 1463: "nucleus",
+ 1464: "msl-lmd",
+ 1465: "pipes",
+ 1466: "oceansoft-lm",
+ 1467: "csdmbase",
+ 1468: "csdm",
+ 1469: "aal-lm",
+ 1470: "uaiact",
+ 1471: "csdmbase",
+ 1472: "csdm",
+ 1473: "openmath",
+ 1474: "telefinder",
+ 1475: "taligent-lm",
+ 1476: "clvm-cfg",
+ 1477: "ms-sna-server",
+ 1478: "ms-sna-base",
+ 1479: "dberegister",
+ 1480: "pacerforum",
+ 1481: "airs",
+ 1482: "miteksys-lm",
+ 1483: "afs",
+ 1484: "confluent",
+ 1485: "lansource",
+ 1486: "nms-topo-serv",
+ 1487: "localinfosrvr",
+ 1488: "docstor",
+ 1489: "dmdocbroker",
+ 1490: "insitu-conf",
+ 1492: "stone-design-1",
+ 1493: "netmap-lm",
+ 1494: "ica",
+ 1495: "cvc",
+ 1496: "liberty-lm",
+ 1497: "rfx-lm",
+ 1498: "sybase-sqlany",
+ 1499: "fhc",
+ 1500: "vlsi-lm",
+ 1501: "saiscm",
+ 1502: "shivadiscovery",
+ 1503: "imtc-mcs",
+ 1504: "evb-elm",
+ 1505: "funkproxy",
+ 1506: "utcd",
+ 1507: "symplex",
+ 1508: "diagmond",
+ 1509: "robcad-lm",
+ 1510: "mvx-lm",
+ 1511: "3l-l1",
+ 1512: "wins",
+ 1513: "fujitsu-dtc",
+ 1514: "fujitsu-dtcns",
+ 1515: "ifor-protocol",
+ 1516: "vpad",
+ 1517: "vpac",
+ 1518: "vpvd",
+ 1519: "vpvc",
+ 1520: "atm-zip-office",
+ 1521: "ncube-lm",
+ 1522: "ricardo-lm",
+ 1523: "cichild-lm",
+ 1524: "ingreslock",
+ 1525: "orasrv",
+ 1526: "pdap-np",
+ 1527: "tlisrv",
+ 1528: "ngr-t",
+ 1529: "coauthor",
+ 1530: "rap-service",
+ 1531: "rap-listen",
+ 1532: "miroconnect",
+ 1533: "virtual-places",
+ 1534: "micromuse-lm",
+ 1535: "ampr-info",
+ 1536: "ampr-inter",
+ 1537: "sdsc-lm",
+ 1538: "3ds-lm",
+ 1539: "intellistor-lm",
+ 1540: "rds",
+ 1541: "rds2",
+ 1542: "gridgen-elmd",
+ 1543: "simba-cs",
+ 1544: "aspeclmd",
+ 1545: "vistium-share",
+ 1546: "abbaccuray",
+ 1547: "laplink",
+ 1548: "axon-lm",
+ 1549: "shivasound",
+ 1550: "3m-image-lm",
+ 1551: "hecmtl-db",
+ 1552: "pciarray",
+ 1553: "sna-cs",
+ 1554: "caci-lm",
+ 1555: "livelan",
+ 1556: "veritas-pbx",
+ 1557: "arbortext-lm",
+ 1558: "xingmpeg",
+ 1559: "web2host",
+ 1560: "asci-val",
+ 1561: "facilityview",
+ 1562: "pconnectmgr",
+ 1563: "cadabra-lm",
+ 1564: "pay-per-view",
+ 1565: "winddlb",
+ 1566: "corelvideo",
+ 1567: "jlicelmd",
+ 1568: "tsspmap",
+ 1569: "ets",
+ 1570: "orbixd",
+ 1571: "rdb-dbs-disp",
+ 1572: "chip-lm",
+ 1573: "itscomm-ns",
+ 1574: "mvel-lm",
+ 1575: "oraclenames",
+ 1576: "moldflow-lm",
+ 1577: "hypercube-lm",
+ 1578: "jacobus-lm",
+ 1579: "ioc-sea-lm",
+ 1580: "tn-tl-r2",
+ 1581: "mil-2045-47001",
+ 1582: "msims",
+ 1583: "simbaexpress",
+ 1584: "tn-tl-fd2",
+ 1585: "intv",
+ 1586: "ibm-abtact",
+ 1587: "pra-elmd",
+ 1588: "triquest-lm",
+ 1589: "vqp",
+ 1590: "gemini-lm",
+ 1591: "ncpm-pm",
+ 1592: "commonspace",
+ 1593: "mainsoft-lm",
+ 1594: "sixtrak",
+ 1595: "radio",
+ 1596: "radio-bc",
+ 1597: "orbplus-iiop",
+ 1598: "picknfs",
+ 1599: "simbaservices",
+ 1600: "issd",
+ 1601: "aas",
+ 1602: "inspect",
+ 1603: "picodbc",
+ 1604: "icabrowser",
+ 1605: "slp",
+ 1606: "slm-api",
+ 1607: "stt",
+ 1608: "smart-lm",
+ 1609: "isysg-lm",
+ 1610: "taurus-wh",
+ 1611: "ill",
+ 1612: "netbill-trans",
+ 1613: "netbill-keyrep",
+ 1614: "netbill-cred",
+ 1615: "netbill-auth",
+ 1616: "netbill-prod",
+ 1617: "nimrod-agent",
+ 1618: "skytelnet",
+ 1619: "xs-openstorage",
+ 1620: "faxportwinport",
+ 1621: "softdataphone",
+ 1622: "ontime",
+ 1623: "jaleosnd",
+ 1624: "udp-sr-port",
+ 1625: "svs-omagent",
+ 1626: "shockwave",
+ 1627: "t128-gateway",
+ 1628: "lontalk-norm",
+ 1629: "lontalk-urgnt",
+ 1630: "oraclenet8cman",
+ 1631: "visitview",
+ 1632: "pammratc",
+ 1633: "pammrpc",
+ 1634: "loaprobe",
+ 1635: "edb-server1",
+ 1636: "isdc",
+ 1637: "islc",
+ 1638: "ismc",
+ 1639: "cert-initiator",
+ 1640: "cert-responder",
+ 1641: "invision",
+ 1642: "isis-am",
+ 1643: "isis-ambc",
+ 1644: "saiseh",
+ 1645: "sightline",
+ 1646: "sa-msg-port",
+ 1647: "rsap",
+ 1648: "concurrent-lm",
+ 1649: "kermit",
+ 1650: "nkd",
+ 1651: "shiva-confsrvr",
+ 1652: "xnmp",
+ 1653: "alphatech-lm",
+ 1654: "stargatealerts",
+ 1655: "dec-mbadmin",
+ 1656: "dec-mbadmin-h",
+ 1657: "fujitsu-mmpdc",
+ 1658: "sixnetudr",
+ 1659: "sg-lm",
+ 1660: "skip-mc-gikreq",
+ 1661: "netview-aix-1",
+ 1662: "netview-aix-2",
+ 1663: "netview-aix-3",
+ 1664: "netview-aix-4",
+ 1665: "netview-aix-5",
+ 1666: "netview-aix-6",
+ 1667: "netview-aix-7",
+ 1668: "netview-aix-8",
+ 1669: "netview-aix-9",
+ 1670: "netview-aix-10",
+ 1671: "netview-aix-11",
+ 1672: "netview-aix-12",
+ 1673: "proshare-mc-1",
+ 1674: "proshare-mc-2",
+ 1675: "pdp",
+ 1676: "netcomm2",
+ 1677: "groupwise",
+ 1678: "prolink",
+ 1679: "darcorp-lm",
+ 1680: "microcom-sbp",
+ 1681: "sd-elmd",
+ 1682: "lanyon-lantern",
+ 1683: "ncpm-hip",
+ 1684: "snaresecure",
+ 1685: "n2nremote",
+ 1686: "cvmon",
+ 1687: "nsjtp-ctrl",
+ 1688: "nsjtp-data",
+ 1689: "firefox",
+ 1690: "ng-umds",
+ 1691: "empire-empuma",
+ 1692: "sstsys-lm",
+ 1693: "rrirtr",
+ 1694: "rrimwm",
+ 1695: "rrilwm",
+ 1696: "rrifmm",
+ 1697: "rrisat",
+ 1698: "rsvp-encap-1",
+ 1699: "rsvp-encap-2",
+ 1700: "mps-raft",
+ 1701: "l2f",
+ 1702: "deskshare",
+ 1703: "hb-engine",
+ 1704: "bcs-broker",
+ 1705: "slingshot",
+ 1706: "jetform",
+ 1707: "vdmplay",
+ 1708: "gat-lmd",
+ 1709: "centra",
+ 1710: "impera",
+ 1711: "pptconference",
+ 1712: "registrar",
+ 1713: "conferencetalk",
+ 1714: "sesi-lm",
+ 1715: "houdini-lm",
+ 1716: "xmsg",
+ 1717: "fj-hdnet",
+ 1718: "h323gatedisc",
+ 1719: "h323gatestat",
+ 1720: "h323hostcall",
+ 1721: "caicci",
+ 1722: "hks-lm",
+ 1723: "pptp",
+ 1724: "csbphonemaster",
+ 1725: "iden-ralp",
+ 1726: "iberiagames",
+ 1727: "winddx",
+ 1728: "telindus",
+ 1729: "citynl",
+ 1730: "roketz",
+ 1731: "msiccp",
+ 1732: "proxim",
+ 1733: "siipat",
+ 1734: "cambertx-lm",
+ 1735: "privatechat",
+ 1736: "street-stream",
+ 1737: "ultimad",
+ 1738: "gamegen1",
+ 1739: "webaccess",
+ 1740: "encore",
+ 1741: "cisco-net-mgmt",
+ 1742: "3Com-nsd",
+ 1743: "cinegrfx-lm",
+ 1744: "ncpm-ft",
+ 1745: "remote-winsock",
+ 1746: "ftrapid-1",
+ 1747: "ftrapid-2",
+ 1748: "oracle-em1",
+ 1749: "aspen-services",
+ 1750: "sslp",
+ 1751: "swiftnet",
+ 1752: "lofr-lm",
+ 1754: "oracle-em2",
+ 1755: "ms-streaming",
+ 1756: "capfast-lmd",
+ 1757: "cnhrp",
+ 1758: "tftp-mcast",
+ 1759: "spss-lm",
+ 1760: "www-ldap-gw",
+ 1761: "cft-0",
+ 1762: "cft-1",
+ 1763: "cft-2",
+ 1764: "cft-3",
+ 1765: "cft-4",
+ 1766: "cft-5",
+ 1767: "cft-6",
+ 1768: "cft-7",
+ 1769: "bmc-net-adm",
+ 1770: "bmc-net-svc",
+ 1771: "vaultbase",
+ 1772: "essweb-gw",
+ 1773: "kmscontrol",
+ 1774: "global-dtserv",
+ 1776: "femis",
+ 1777: "powerguardian",
+ 1778: "prodigy-intrnet",
+ 1779: "pharmasoft",
+ 1780: "dpkeyserv",
+ 1781: "answersoft-lm",
+ 1782: "hp-hcip",
+ 1784: "finle-lm",
+ 1785: "windlm",
+ 1786: "funk-logger",
+ 1787: "funk-license",
+ 1788: "psmond",
+ 1789: "hello",
+ 1790: "nmsp",
+ 1791: "ea1",
+ 1792: "ibm-dt-2",
+ 1793: "rsc-robot",
+ 1794: "cera-bcm",
+ 1795: "dpi-proxy",
+ 1796: "vocaltec-admin",
+ 1797: "uma",
+ 1798: "etp",
+ 1799: "netrisk",
+ 1800: "ansys-lm",
+ 1801: "msmq",
+ 1802: "concomp1",
+ 1803: "hp-hcip-gwy",
+ 1804: "enl",
+ 1805: "enl-name",
+ 1806: "musiconline",
+ 1807: "fhsp",
+ 1808: "oracle-vp2",
+ 1809: "oracle-vp1",
+ 1810: "jerand-lm",
+ 1811: "scientia-sdb",
+ 1812: "radius",
+ 1813: "radius-acct",
+ 1814: "tdp-suite",
+ 1815: "mmpft",
+ 1816: "harp",
+ 1817: "rkb-oscs",
+ 1818: "etftp",
+ 1819: "plato-lm",
+ 1820: "mcagent",
+ 1821: "donnyworld",
+ 1822: "es-elmd",
+ 1823: "unisys-lm",
+ 1824: "metrics-pas",
+ 1825: "direcpc-video",
+ 1826: "ardt",
+ 1827: "asi",
+ 1828: "itm-mcell-u",
+ 1829: "optika-emedia",
+ 1830: "net8-cman",
+ 1831: "myrtle",
+ 1832: "tht-treasure",
+ 1833: "udpradio",
+ 1834: "ardusuni",
+ 1835: "ardusmul",
+ 1836: "ste-smsc",
+ 1837: "csoft1",
+ 1838: "talnet",
+ 1839: "netopia-vo1",
+ 1840: "netopia-vo2",
+ 1841: "netopia-vo3",
+ 1842: "netopia-vo4",
+ 1843: "netopia-vo5",
+ 1844: "direcpc-dll",
+ 1845: "altalink",
+ 1846: "tunstall-pnc",
+ 1847: "slp-notify",
+ 1848: "fjdocdist",
+ 1849: "alpha-sms",
+ 1850: "gsi",
+ 1851: "ctcd",
+ 1852: "virtual-time",
+ 1853: "vids-avtp",
+ 1854: "buddy-draw",
+ 1855: "fiorano-rtrsvc",
+ 1856: "fiorano-msgsvc",
+ 1857: "datacaptor",
+ 1858: "privateark",
+ 1859: "gammafetchsvr",
+ 1860: "sunscalar-svc",
+ 1861: "lecroy-vicp",
+ 1862: "mysql-cm-agent",
+ 1863: "msnp",
+ 1864: "paradym-31port",
+ 1865: "entp",
+ 1866: "swrmi",
+ 1867: "udrive",
+ 1868: "viziblebrowser",
+ 1869: "transact",
+ 1870: "sunscalar-dns",
+ 1871: "canocentral0",
+ 1872: "canocentral1",
+ 1873: "fjmpjps",
+ 1874: "fjswapsnp",
+ 1875: "westell-stats",
+ 1876: "ewcappsrv",
+ 1877: "hp-webqosdb",
+ 1878: "drmsmc",
+ 1879: "nettgain-nms",
+ 1880: "vsat-control",
+ 1881: "ibm-mqseries2",
+ 1882: "ecsqdmn",
+ 1883: "mqtt",
+ 1884: "idmaps",
+ 1885: "vrtstrapserver",
+ 1886: "leoip",
+ 1887: "filex-lport",
+ 1888: "ncconfig",
+ 1889: "unify-adapter",
+ 1890: "wilkenlistener",
+ 1891: "childkey-notif",
+ 1892: "childkey-ctrl",
+ 1893: "elad",
+ 1894: "o2server-port",
+ 1896: "b-novative-ls",
+ 1897: "metaagent",
+ 1898: "cymtec-port",
+ 1899: "mc2studios",
+ 1900: "ssdp",
+ 1901: "fjicl-tep-a",
+ 1902: "fjicl-tep-b",
+ 1903: "linkname",
+ 1904: "fjicl-tep-c",
+ 1905: "sugp",
+ 1906: "tpmd",
+ 1907: "intrastar",
+ 1908: "dawn",
+ 1909: "global-wlink",
+ 1910: "ultrabac",
+ 1911: "mtp",
+ 1912: "rhp-iibp",
+ 1913: "armadp",
+ 1914: "elm-momentum",
+ 1915: "facelink",
+ 1916: "persona",
+ 1917: "noagent",
+ 1918: "can-nds",
+ 1919: "can-dch",
+ 1920: "can-ferret",
+ 1921: "noadmin",
+ 1922: "tapestry",
+ 1923: "spice",
+ 1924: "xiip",
+ 1925: "discovery-port",
+ 1926: "egs",
+ 1927: "videte-cipc",
+ 1928: "emsd-port",
+ 1929: "bandwiz-system",
+ 1930: "driveappserver",
+ 1931: "amdsched",
+ 1932: "ctt-broker",
+ 1933: "xmapi",
+ 1934: "xaapi",
+ 1935: "macromedia-fcs",
+ 1936: "jetcmeserver",
+ 1937: "jwserver",
+ 1938: "jwclient",
+ 1939: "jvserver",
+ 1940: "jvclient",
+ 1941: "dic-aida",
+ 1942: "res",
+ 1943: "beeyond-media",
+ 1944: "close-combat",
+ 1945: "dialogic-elmd",
+ 1946: "tekpls",
+ 1947: "sentinelsrm",
+ 1948: "eye2eye",
+ 1949: "ismaeasdaqlive",
+ 1950: "ismaeasdaqtest",
+ 1951: "bcs-lmserver",
+ 1952: "mpnjsc",
+ 1953: "rapidbase",
+ 1954: "abr-api",
+ 1955: "abr-secure",
+ 1956: "vrtl-vmf-ds",
+ 1957: "unix-status",
+ 1958: "dxadmind",
+ 1959: "simp-all",
+ 1960: "nasmanager",
+ 1961: "bts-appserver",
+ 1962: "biap-mp",
+ 1963: "webmachine",
+ 1964: "solid-e-engine",
+ 1965: "tivoli-npm",
+ 1966: "slush",
+ 1967: "sns-quote",
+ 1968: "lipsinc",
+ 1969: "lipsinc1",
+ 1970: "netop-rc",
+ 1971: "netop-school",
+ 1972: "intersys-cache",
+ 1973: "dlsrap",
+ 1974: "drp",
+ 1975: "tcoflashagent",
+ 1976: "tcoregagent",
+ 1977: "tcoaddressbook",
+ 1978: "unisql",
+ 1979: "unisql-java",
+ 1980: "pearldoc-xact",
+ 1981: "p2pq",
+ 1982: "estamp",
+ 1983: "lhtp",
+ 1984: "bb",
+ 1985: "hsrp",
+ 1986: "licensedaemon",
+ 1987: "tr-rsrb-p1",
+ 1988: "tr-rsrb-p2",
+ 1989: "tr-rsrb-p3",
+ 1990: "stun-p1",
+ 1991: "stun-p2",
+ 1992: "stun-p3",
+ 1993: "snmp-tcp-port",
+ 1994: "stun-port",
+ 1995: "perf-port",
+ 1996: "tr-rsrb-port",
+ 1997: "gdp-port",
+ 1998: "x25-svc-port",
+ 1999: "tcp-id-port",
+ 2000: "cisco-sccp",
+ 2001: "wizard",
+ 2002: "globe",
+ 2003: "brutus",
+ 2004: "emce",
+ 2005: "oracle",
+ 2006: "raid-cd",
+ 2007: "raid-am",
+ 2008: "terminaldb",
+ 2009: "whosockami",
+ 2010: "pipe-server",
+ 2011: "servserv",
+ 2012: "raid-ac",
+ 2013: "raid-cd",
+ 2014: "raid-sf",
+ 2015: "raid-cs",
+ 2016: "bootserver",
+ 2017: "bootclient",
+ 2018: "rellpack",
+ 2019: "about",
+ 2020: "xinupageserver",
+ 2021: "xinuexpansion1",
+ 2022: "xinuexpansion2",
+ 2023: "xinuexpansion3",
+ 2024: "xinuexpansion4",
+ 2025: "xribs",
+ 2026: "scrabble",
+ 2027: "shadowserver",
+ 2028: "submitserver",
+ 2029: "hsrpv6",
+ 2030: "device2",
+ 2031: "mobrien-chat",
+ 2032: "blackboard",
+ 2033: "glogger",
+ 2034: "scoremgr",
+ 2035: "imsldoc",
+ 2036: "e-dpnet",
+ 2037: "applus",
+ 2038: "objectmanager",
+ 2039: "prizma",
+ 2040: "lam",
+ 2041: "interbase",
+ 2042: "isis",
+ 2043: "isis-bcast",
+ 2044: "rimsl",
+ 2045: "cdfunc",
+ 2046: "sdfunc",
+ 2047: "dls",
+ 2048: "dls-monitor",
+ 2049: "shilp",
+ 2050: "av-emb-config",
+ 2051: "epnsdp",
+ 2052: "clearvisn",
+ 2053: "lot105-ds-upd",
+ 2054: "weblogin",
+ 2055: "iop",
+ 2056: "omnisky",
+ 2057: "rich-cp",
+ 2058: "newwavesearch",
+ 2059: "bmc-messaging",
+ 2060: "teleniumdaemon",
+ 2061: "netmount",
+ 2062: "icg-swp",
+ 2063: "icg-bridge",
+ 2064: "icg-iprelay",
+ 2065: "dlsrpn",
+ 2066: "aura",
+ 2067: "dlswpn",
+ 2068: "avauthsrvprtcl",
+ 2069: "event-port",
+ 2070: "ah-esp-encap",
+ 2071: "acp-port",
+ 2072: "msync",
+ 2073: "gxs-data-port",
+ 2074: "vrtl-vmf-sa",
+ 2075: "newlixengine",
+ 2076: "newlixconfig",
+ 2077: "tsrmagt",
+ 2078: "tpcsrvr",
+ 2079: "idware-router",
+ 2080: "autodesk-nlm",
+ 2081: "kme-trap-port",
+ 2082: "infowave",
+ 2083: "radsec",
+ 2084: "sunclustergeo",
+ 2085: "ada-cip",
+ 2086: "gnunet",
+ 2087: "eli",
+ 2088: "ip-blf",
+ 2089: "sep",
+ 2090: "lrp",
+ 2091: "prp",
+ 2092: "descent3",
+ 2093: "nbx-cc",
+ 2094: "nbx-au",
+ 2095: "nbx-ser",
+ 2096: "nbx-dir",
+ 2097: "jetformpreview",
+ 2098: "dialog-port",
+ 2099: "h2250-annex-g",
+ 2100: "amiganetfs",
+ 2101: "rtcm-sc104",
+ 2102: "zephyr-srv",
+ 2103: "zephyr-clt",
+ 2104: "zephyr-hm",
+ 2105: "minipay",
+ 2106: "mzap",
+ 2107: "bintec-admin",
+ 2108: "comcam",
+ 2109: "ergolight",
+ 2110: "umsp",
+ 2111: "dsatp",
+ 2112: "idonix-metanet",
+ 2113: "hsl-storm",
+ 2114: "newheights",
+ 2115: "kdm",
+ 2116: "ccowcmr",
+ 2117: "mentaclient",
+ 2118: "mentaserver",
+ 2119: "gsigatekeeper",
+ 2120: "qencp",
+ 2121: "scientia-ssdb",
+ 2122: "caupc-remote",
+ 2123: "gtp-control",
+ 2124: "elatelink",
+ 2125: "lockstep",
+ 2126: "pktcable-cops",
+ 2127: "index-pc-wb",
+ 2128: "net-steward",
+ 2129: "cs-live",
+ 2130: "xds",
+ 2131: "avantageb2b",
+ 2132: "solera-epmap",
+ 2133: "zymed-zpp",
+ 2134: "avenue",
+ 2135: "gris",
+ 2136: "appworxsrv",
+ 2137: "connect",
+ 2138: "unbind-cluster",
+ 2139: "ias-auth",
+ 2140: "ias-reg",
+ 2141: "ias-admind",
+ 2142: "tdmoip",
+ 2143: "lv-jc",
+ 2144: "lv-ffx",
+ 2145: "lv-pici",
+ 2146: "lv-not",
+ 2147: "lv-auth",
+ 2148: "veritas-ucl",
+ 2149: "acptsys",
+ 2150: "dynamic3d",
+ 2151: "docent",
+ 2152: "gtp-user",
+ 2153: "ctlptc",
+ 2154: "stdptc",
+ 2155: "brdptc",
+ 2156: "trp",
+ 2157: "xnds",
+ 2158: "touchnetplus",
+ 2159: "gdbremote",
+ 2160: "apc-2160",
+ 2161: "apc-2161",
+ 2162: "navisphere",
+ 2163: "navisphere-sec",
+ 2164: "ddns-v3",
+ 2165: "x-bone-api",
+ 2166: "iwserver",
+ 2167: "raw-serial",
+ 2168: "easy-soft-mux",
+ 2169: "brain",
+ 2170: "eyetv",
+ 2171: "msfw-storage",
+ 2172: "msfw-s-storage",
+ 2173: "msfw-replica",
+ 2174: "msfw-array",
+ 2175: "airsync",
+ 2176: "rapi",
+ 2177: "qwave",
+ 2178: "bitspeer",
+ 2179: "vmrdp",
+ 2180: "mc-gt-srv",
+ 2181: "eforward",
+ 2182: "cgn-stat",
+ 2183: "cgn-config",
+ 2184: "nvd",
+ 2185: "onbase-dds",
+ 2186: "gtaua",
+ 2187: "ssmd",
+ 2190: "tivoconnect",
+ 2191: "tvbus",
+ 2192: "asdis",
+ 2193: "drwcs",
+ 2197: "mnp-exchange",
+ 2198: "onehome-remote",
+ 2199: "onehome-help",
+ 2200: "ici",
+ 2201: "ats",
+ 2202: "imtc-map",
+ 2203: "b2-runtime",
+ 2204: "b2-license",
+ 2205: "jps",
+ 2206: "hpocbus",
+ 2207: "hpssd",
+ 2208: "hpiod",
+ 2209: "rimf-ps",
+ 2210: "noaaport",
+ 2211: "emwin",
+ 2212: "leecoposserver",
+ 2213: "kali",
+ 2214: "rpi",
+ 2215: "ipcore",
+ 2216: "vtu-comms",
+ 2217: "gotodevice",
+ 2218: "bounzza",
+ 2219: "netiq-ncap",
+ 2220: "netiq",
+ 2221: "ethernet-ip-s",
+ 2222: "EtherNet-IP-1",
+ 2223: "rockwell-csp2",
+ 2224: "efi-mg",
+ 2226: "di-drm",
+ 2227: "di-msg",
+ 2228: "ehome-ms",
+ 2229: "datalens",
+ 2230: "queueadm",
+ 2231: "wimaxasncp",
+ 2232: "ivs-video",
+ 2233: "infocrypt",
+ 2234: "directplay",
+ 2235: "sercomm-wlink",
+ 2236: "nani",
+ 2237: "optech-port1-lm",
+ 2238: "aviva-sna",
+ 2239: "imagequery",
+ 2240: "recipe",
+ 2241: "ivsd",
+ 2242: "foliocorp",
+ 2243: "magicom",
+ 2244: "nmsserver",
+ 2245: "hao",
+ 2246: "pc-mta-addrmap",
+ 2247: "antidotemgrsvr",
+ 2248: "ums",
+ 2249: "rfmp",
+ 2250: "remote-collab",
+ 2251: "dif-port",
+ 2252: "njenet-ssl",
+ 2253: "dtv-chan-req",
+ 2254: "seispoc",
+ 2255: "vrtp",
+ 2256: "pcc-mfp",
+ 2257: "simple-tx-rx",
+ 2258: "rcts",
+ 2260: "apc-2260",
+ 2261: "comotionmaster",
+ 2262: "comotionback",
+ 2263: "ecwcfg",
+ 2264: "apx500api-1",
+ 2265: "apx500api-2",
+ 2266: "mfserver",
+ 2267: "ontobroker",
+ 2268: "amt",
+ 2269: "mikey",
+ 2270: "starschool",
+ 2271: "mmcals",
+ 2272: "mmcal",
+ 2273: "mysql-im",
+ 2274: "pcttunnell",
+ 2275: "ibridge-data",
+ 2276: "ibridge-mgmt",
+ 2277: "bluectrlproxy",
+ 2278: "s3db",
+ 2279: "xmquery",
+ 2280: "lnvpoller",
+ 2281: "lnvconsole",
+ 2282: "lnvalarm",
+ 2283: "lnvstatus",
+ 2284: "lnvmaps",
+ 2285: "lnvmailmon",
+ 2286: "nas-metering",
+ 2287: "dna",
+ 2288: "netml",
+ 2289: "dict-lookup",
+ 2290: "sonus-logging",
+ 2291: "eapsp",
+ 2292: "mib-streaming",
+ 2293: "npdbgmngr",
+ 2294: "konshus-lm",
+ 2295: "advant-lm",
+ 2296: "theta-lm",
+ 2297: "d2k-datamover1",
+ 2298: "d2k-datamover2",
+ 2299: "pc-telecommute",
+ 2300: "cvmmon",
+ 2301: "cpq-wbem",
+ 2302: "binderysupport",
+ 2303: "proxy-gateway",
+ 2304: "attachmate-uts",
+ 2305: "mt-scaleserver",
+ 2306: "tappi-boxnet",
+ 2307: "pehelp",
+ 2308: "sdhelp",
+ 2309: "sdserver",
+ 2310: "sdclient",
+ 2311: "messageservice",
+ 2312: "wanscaler",
+ 2313: "iapp",
+ 2314: "cr-websystems",
+ 2315: "precise-sft",
+ 2316: "sent-lm",
+ 2317: "attachmate-g32",
+ 2318: "cadencecontrol",
+ 2319: "infolibria",
+ 2320: "siebel-ns",
+ 2321: "rdlap",
+ 2322: "ofsd",
+ 2323: "3d-nfsd",
+ 2324: "cosmocall",
+ 2325: "ansysli",
+ 2326: "idcp",
+ 2327: "xingcsm",
+ 2328: "netrix-sftm",
+ 2329: "nvd",
+ 2330: "tscchat",
+ 2331: "agentview",
+ 2332: "rcc-host",
+ 2333: "snapp",
+ 2334: "ace-client",
+ 2335: "ace-proxy",
+ 2336: "appleugcontrol",
+ 2337: "ideesrv",
+ 2338: "norton-lambert",
+ 2339: "3com-webview",
+ 2340: "wrs-registry",
+ 2341: "xiostatus",
+ 2342: "manage-exec",
+ 2343: "nati-logos",
+ 2344: "fcmsys",
+ 2345: "dbm",
+ 2346: "redstorm-join",
+ 2347: "redstorm-find",
+ 2348: "redstorm-info",
+ 2349: "redstorm-diag",
+ 2350: "psbserver",
+ 2351: "psrserver",
+ 2352: "pslserver",
+ 2353: "pspserver",
+ 2354: "psprserver",
+ 2355: "psdbserver",
+ 2356: "gxtelmd",
+ 2357: "unihub-server",
+ 2358: "futrix",
+ 2359: "flukeserver",
+ 2360: "nexstorindltd",
+ 2361: "tl1",
+ 2362: "digiman",
+ 2363: "mediacntrlnfsd",
+ 2364: "oi-2000",
+ 2365: "dbref",
+ 2366: "qip-login",
+ 2367: "service-ctrl",
+ 2368: "opentable",
+ 2370: "l3-hbmon",
+ 2372: "lanmessenger",
+ 2381: "compaq-https",
+ 2382: "ms-olap3",
+ 2383: "ms-olap4",
+ 2384: "sd-capacity",
+ 2385: "sd-data",
+ 2386: "virtualtape",
+ 2387: "vsamredirector",
+ 2388: "mynahautostart",
+ 2389: "ovsessionmgr",
+ 2390: "rsmtp",
+ 2391: "3com-net-mgmt",
+ 2392: "tacticalauth",
+ 2393: "ms-olap1",
+ 2394: "ms-olap2",
+ 2395: "lan900-remote",
+ 2396: "wusage",
+ 2397: "ncl",
+ 2398: "orbiter",
+ 2399: "fmpro-fdal",
+ 2400: "opequus-server",
+ 2401: "cvspserver",
+ 2402: "taskmaster2000",
+ 2403: "taskmaster2000",
+ 2404: "iec-104",
+ 2405: "trc-netpoll",
+ 2406: "jediserver",
+ 2407: "orion",
+ 2409: "sns-protocol",
+ 2410: "vrts-registry",
+ 2411: "netwave-ap-mgmt",
+ 2412: "cdn",
+ 2413: "orion-rmi-reg",
+ 2414: "beeyond",
+ 2415: "codima-rtp",
+ 2416: "rmtserver",
+ 2417: "composit-server",
+ 2418: "cas",
+ 2419: "attachmate-s2s",
+ 2420: "dslremote-mgmt",
+ 2421: "g-talk",
+ 2422: "crmsbits",
+ 2423: "rnrp",
+ 2424: "kofax-svr",
+ 2425: "fjitsuappmgr",
+ 2426: "vcmp",
+ 2427: "mgcp-gateway",
+ 2428: "ott",
+ 2429: "ft-role",
+ 2430: "venus",
+ 2431: "venus-se",
+ 2432: "codasrv",
+ 2433: "codasrv-se",
+ 2434: "pxc-epmap",
+ 2435: "optilogic",
+ 2436: "topx",
+ 2437: "unicontrol",
+ 2438: "msp",
+ 2439: "sybasedbsynch",
+ 2440: "spearway",
+ 2441: "pvsw-inet",
+ 2442: "netangel",
+ 2443: "powerclientcsf",
+ 2444: "btpp2sectrans",
+ 2445: "dtn1",
+ 2446: "bues-service",
+ 2447: "ovwdb",
+ 2448: "hpppssvr",
+ 2449: "ratl",
+ 2450: "netadmin",
+ 2451: "netchat",
+ 2452: "snifferclient",
+ 2453: "madge-ltd",
+ 2454: "indx-dds",
+ 2455: "wago-io-system",
+ 2456: "altav-remmgt",
+ 2457: "rapido-ip",
+ 2458: "griffin",
+ 2459: "community",
+ 2460: "ms-theater",
+ 2461: "qadmifoper",
+ 2462: "qadmifevent",
+ 2463: "lsi-raid-mgmt",
+ 2464: "direcpc-si",
+ 2465: "lbm",
+ 2466: "lbf",
+ 2467: "high-criteria",
+ 2468: "qip-msgd",
+ 2469: "mti-tcs-comm",
+ 2470: "taskman-port",
+ 2471: "seaodbc",
+ 2472: "c3",
+ 2473: "aker-cdp",
+ 2474: "vitalanalysis",
+ 2475: "ace-server",
+ 2476: "ace-svr-prop",
+ 2477: "ssm-cvs",
+ 2478: "ssm-cssps",
+ 2479: "ssm-els",
+ 2480: "powerexchange",
+ 2481: "giop",
+ 2482: "giop-ssl",
+ 2483: "ttc",
+ 2484: "ttc-ssl",
+ 2485: "netobjects1",
+ 2486: "netobjects2",
+ 2487: "pns",
+ 2488: "moy-corp",
+ 2489: "tsilb",
+ 2490: "qip-qdhcp",
+ 2491: "conclave-cpp",
+ 2492: "groove",
+ 2493: "talarian-mqs",
+ 2494: "bmc-ar",
+ 2495: "fast-rem-serv",
+ 2496: "dirgis",
+ 2497: "quaddb",
+ 2498: "odn-castraq",
+ 2499: "unicontrol",
+ 2500: "rtsserv",
+ 2501: "rtsclient",
+ 2502: "kentrox-prot",
+ 2503: "nms-dpnss",
+ 2504: "wlbs",
+ 2505: "ppcontrol",
+ 2506: "jbroker",
+ 2507: "spock",
+ 2508: "jdatastore",
+ 2509: "fjmpss",
+ 2510: "fjappmgrbulk",
+ 2511: "metastorm",
+ 2512: "citrixima",
+ 2513: "citrixadmin",
+ 2514: "facsys-ntp",
+ 2515: "facsys-router",
+ 2516: "maincontrol",
+ 2517: "call-sig-trans",
+ 2518: "willy",
+ 2519: "globmsgsvc",
+ 2520: "pvsw",
+ 2521: "adaptecmgr",
+ 2522: "windb",
+ 2523: "qke-llc-v3",
+ 2524: "optiwave-lm",
+ 2525: "ms-v-worlds",
+ 2526: "ema-sent-lm",
+ 2527: "iqserver",
+ 2528: "ncr-ccl",
+ 2529: "utsftp",
+ 2530: "vrcommerce",
+ 2531: "ito-e-gui",
+ 2532: "ovtopmd",
+ 2533: "snifferserver",
+ 2534: "combox-web-acc",
+ 2535: "madcap",
+ 2536: "btpp2audctr1",
+ 2537: "upgrade",
+ 2538: "vnwk-prapi",
+ 2539: "vsiadmin",
+ 2540: "lonworks",
+ 2541: "lonworks2",
+ 2542: "udrawgraph",
+ 2543: "reftek",
+ 2544: "novell-zen",
+ 2545: "sis-emt",
+ 2546: "vytalvaultbrtp",
+ 2547: "vytalvaultvsmp",
+ 2548: "vytalvaultpipe",
+ 2549: "ipass",
+ 2550: "ads",
+ 2551: "isg-uda-server",
+ 2552: "call-logging",
+ 2553: "efidiningport",
+ 2554: "vcnet-link-v10",
+ 2555: "compaq-wcp",
+ 2556: "nicetec-nmsvc",
+ 2557: "nicetec-mgmt",
+ 2558: "pclemultimedia",
+ 2559: "lstp",
+ 2560: "labrat",
+ 2561: "mosaixcc",
+ 2562: "delibo",
+ 2563: "cti-redwood",
+ 2564: "hp-3000-telnet",
+ 2565: "coord-svr",
+ 2566: "pcs-pcw",
+ 2567: "clp",
+ 2568: "spamtrap",
+ 2569: "sonuscallsig",
+ 2570: "hs-port",
+ 2571: "cecsvc",
+ 2572: "ibp",
+ 2573: "trustestablish",
+ 2574: "blockade-bpsp",
+ 2575: "hl7",
+ 2576: "tclprodebugger",
+ 2577: "scipticslsrvr",
+ 2578: "rvs-isdn-dcp",
+ 2579: "mpfoncl",
+ 2580: "tributary",
+ 2581: "argis-te",
+ 2582: "argis-ds",
+ 2583: "mon",
+ 2584: "cyaserv",
+ 2585: "netx-server",
+ 2586: "netx-agent",
+ 2587: "masc",
+ 2588: "privilege",
+ 2589: "quartus-tcl",
+ 2590: "idotdist",
+ 2591: "maytagshuffle",
+ 2592: "netrek",
+ 2593: "mns-mail",
+ 2594: "dts",
+ 2595: "worldfusion1",
+ 2596: "worldfusion2",
+ 2597: "homesteadglory",
+ 2598: "citriximaclient",
+ 2599: "snapd",
+ 2600: "hpstgmgr",
+ 2601: "discp-client",
+ 2602: "discp-server",
+ 2603: "servicemeter",
+ 2604: "nsc-ccs",
+ 2605: "nsc-posa",
+ 2606: "netmon",
+ 2607: "connection",
+ 2608: "wag-service",
+ 2609: "system-monitor",
+ 2610: "versa-tek",
+ 2611: "lionhead",
+ 2612: "qpasa-agent",
+ 2613: "smntubootstrap",
+ 2614: "neveroffline",
+ 2615: "firepower",
+ 2616: "appswitch-emp",
+ 2617: "cmadmin",
+ 2618: "priority-e-com",
+ 2619: "bruce",
+ 2620: "lpsrecommender",
+ 2621: "miles-apart",
+ 2622: "metricadbc",
+ 2623: "lmdp",
+ 2624: "aria",
+ 2625: "blwnkl-port",
+ 2626: "gbjd816",
+ 2627: "moshebeeri",
+ 2628: "dict",
+ 2629: "sitaraserver",
+ 2630: "sitaramgmt",
+ 2631: "sitaradir",
+ 2632: "irdg-post",
+ 2633: "interintelli",
+ 2634: "pk-electronics",
+ 2635: "backburner",
+ 2636: "solve",
+ 2637: "imdocsvc",
+ 2638: "sybaseanywhere",
+ 2639: "aminet",
+ 2640: "ami-control",
+ 2641: "hdl-srv",
+ 2642: "tragic",
+ 2643: "gte-samp",
+ 2644: "travsoft-ipx-t",
+ 2645: "novell-ipx-cmd",
+ 2646: "and-lm",
+ 2647: "syncserver",
+ 2648: "upsnotifyprot",
+ 2649: "vpsipport",
+ 2650: "eristwoguns",
+ 2651: "ebinsite",
+ 2652: "interpathpanel",
+ 2653: "sonus",
+ 2654: "corel-vncadmin",
+ 2655: "unglue",
+ 2656: "kana",
+ 2657: "sns-dispatcher",
+ 2658: "sns-admin",
+ 2659: "sns-query",
+ 2660: "gcmonitor",
+ 2661: "olhost",
+ 2662: "bintec-capi",
+ 2663: "bintec-tapi",
+ 2664: "patrol-mq-gm",
+ 2665: "patrol-mq-nm",
+ 2666: "extensis",
+ 2667: "alarm-clock-s",
+ 2668: "alarm-clock-c",
+ 2669: "toad",
+ 2670: "tve-announce",
+ 2671: "newlixreg",
+ 2672: "nhserver",
+ 2673: "firstcall42",
+ 2674: "ewnn",
+ 2675: "ttc-etap",
+ 2676: "simslink",
+ 2677: "gadgetgate1way",
+ 2678: "gadgetgate2way",
+ 2679: "syncserverssl",
+ 2680: "pxc-sapxom",
+ 2681: "mpnjsomb",
+ 2683: "ncdloadbalance",
+ 2684: "mpnjsosv",
+ 2685: "mpnjsocl",
+ 2686: "mpnjsomg",
+ 2687: "pq-lic-mgmt",
+ 2688: "md-cg-http",
+ 2689: "fastlynx",
+ 2690: "hp-nnm-data",
+ 2691: "itinternet",
+ 2692: "admins-lms",
+ 2694: "pwrsevent",
+ 2695: "vspread",
+ 2696: "unifyadmin",
+ 2697: "oce-snmp-trap",
+ 2698: "mck-ivpip",
+ 2699: "csoft-plusclnt",
+ 2700: "tqdata",
+ 2701: "sms-rcinfo",
+ 2702: "sms-xfer",
+ 2703: "sms-chat",
+ 2704: "sms-remctrl",
+ 2705: "sds-admin",
+ 2706: "ncdmirroring",
+ 2707: "emcsymapiport",
+ 2708: "banyan-net",
+ 2709: "supermon",
+ 2710: "sso-service",
+ 2711: "sso-control",
+ 2712: "aocp",
+ 2713: "raventbs",
+ 2714: "raventdm",
+ 2715: "hpstgmgr2",
+ 2716: "inova-ip-disco",
+ 2717: "pn-requester",
+ 2718: "pn-requester2",
+ 2719: "scan-change",
+ 2720: "wkars",
+ 2721: "smart-diagnose",
+ 2722: "proactivesrvr",
+ 2723: "watchdog-nt",
+ 2724: "qotps",
+ 2725: "msolap-ptp2",
+ 2726: "tams",
+ 2727: "mgcp-callagent",
+ 2728: "sqdr",
+ 2729: "tcim-control",
+ 2730: "nec-raidplus",
+ 2731: "fyre-messanger",
+ 2732: "g5m",
+ 2733: "signet-ctf",
+ 2734: "ccs-software",
+ 2735: "netiq-mc",
+ 2736: "radwiz-nms-srv",
+ 2737: "srp-feedback",
+ 2738: "ndl-tcp-ois-gw",
+ 2739: "tn-timing",
+ 2740: "alarm",
+ 2741: "tsb",
+ 2742: "tsb2",
+ 2743: "murx",
+ 2744: "honyaku",
+ 2745: "urbisnet",
+ 2746: "cpudpencap",
+ 2747: "fjippol-swrly",
+ 2748: "fjippol-polsvr",
+ 2749: "fjippol-cnsl",
+ 2750: "fjippol-port1",
+ 2751: "fjippol-port2",
+ 2752: "rsisysaccess",
+ 2753: "de-spot",
+ 2754: "apollo-cc",
+ 2755: "expresspay",
+ 2756: "simplement-tie",
+ 2757: "cnrp",
+ 2758: "apollo-status",
+ 2759: "apollo-gms",
+ 2760: "sabams",
+ 2761: "dicom-iscl",
+ 2762: "dicom-tls",
+ 2763: "desktop-dna",
+ 2764: "data-insurance",
+ 2765: "qip-audup",
+ 2766: "compaq-scp",
+ 2767: "uadtc",
+ 2768: "uacs",
+ 2769: "exce",
+ 2770: "veronica",
+ 2771: "vergencecm",
+ 2772: "auris",
+ 2773: "rbakcup1",
+ 2774: "rbakcup2",
+ 2775: "smpp",
+ 2776: "ridgeway1",
+ 2777: "ridgeway2",
+ 2778: "gwen-sonya",
+ 2779: "lbc-sync",
+ 2780: "lbc-control",
+ 2781: "whosells",
+ 2782: "everydayrc",
+ 2783: "aises",
+ 2784: "www-dev",
+ 2785: "aic-np",
+ 2786: "aic-oncrpc",
+ 2787: "piccolo",
+ 2788: "fryeserv",
+ 2789: "media-agent",
+ 2790: "plgproxy",
+ 2791: "mtport-regist",
+ 2792: "f5-globalsite",
+ 2793: "initlsmsad",
+ 2795: "livestats",
+ 2796: "ac-tech",
+ 2797: "esp-encap",
+ 2798: "tmesis-upshot",
+ 2799: "icon-discover",
+ 2800: "acc-raid",
+ 2801: "igcp",
+ 2802: "veritas-udp1",
+ 2803: "btprjctrl",
+ 2804: "dvr-esm",
+ 2805: "wta-wsp-s",
+ 2806: "cspuni",
+ 2807: "cspmulti",
+ 2808: "j-lan-p",
+ 2809: "corbaloc",
+ 2810: "netsteward",
+ 2811: "gsiftp",
+ 2812: "atmtcp",
+ 2813: "llm-pass",
+ 2814: "llm-csv",
+ 2815: "lbc-measure",
+ 2816: "lbc-watchdog",
+ 2817: "nmsigport",
+ 2818: "rmlnk",
+ 2819: "fc-faultnotify",
+ 2820: "univision",
+ 2821: "vrts-at-port",
+ 2822: "ka0wuc",
+ 2823: "cqg-netlan",
+ 2824: "cqg-netlan-1",
+ 2826: "slc-systemlog",
+ 2827: "slc-ctrlrloops",
+ 2828: "itm-lm",
+ 2829: "silkp1",
+ 2830: "silkp2",
+ 2831: "silkp3",
+ 2832: "silkp4",
+ 2833: "glishd",
+ 2834: "evtp",
+ 2835: "evtp-data",
+ 2836: "catalyst",
+ 2837: "repliweb",
+ 2838: "starbot",
+ 2839: "nmsigport",
+ 2840: "l3-exprt",
+ 2841: "l3-ranger",
+ 2842: "l3-hawk",
+ 2843: "pdnet",
+ 2844: "bpcp-poll",
+ 2845: "bpcp-trap",
+ 2846: "aimpp-hello",
+ 2847: "aimpp-port-req",
+ 2848: "amt-blc-port",
+ 2849: "fxp",
+ 2850: "metaconsole",
+ 2851: "webemshttp",
+ 2852: "bears-01",
+ 2853: "ispipes",
+ 2854: "infomover",
+ 2856: "cesdinv",
+ 2857: "simctlp",
+ 2858: "ecnp",
+ 2859: "activememory",
+ 2860: "dialpad-voice1",
+ 2861: "dialpad-voice2",
+ 2862: "ttg-protocol",
+ 2863: "sonardata",
+ 2864: "astromed-main",
+ 2865: "pit-vpn",
+ 2866: "iwlistener",
+ 2867: "esps-portal",
+ 2868: "npep-messaging",
+ 2869: "icslap",
+ 2870: "daishi",
+ 2871: "msi-selectplay",
+ 2872: "radix",
+ 2874: "dxmessagebase1",
+ 2875: "dxmessagebase2",
+ 2876: "sps-tunnel",
+ 2877: "bluelance",
+ 2878: "aap",
+ 2879: "ucentric-ds",
+ 2880: "synapse",
+ 2881: "ndsp",
+ 2882: "ndtp",
+ 2883: "ndnp",
+ 2884: "flashmsg",
+ 2885: "topflow",
+ 2886: "responselogic",
+ 2887: "aironetddp",
+ 2888: "spcsdlobby",
+ 2889: "rsom",
+ 2890: "cspclmulti",
+ 2891: "cinegrfx-elmd",
+ 2892: "snifferdata",
+ 2893: "vseconnector",
+ 2894: "abacus-remote",
+ 2895: "natuslink",
+ 2896: "ecovisiong6-1",
+ 2897: "citrix-rtmp",
+ 2898: "appliance-cfg",
+ 2899: "powergemplus",
+ 2900: "quicksuite",
+ 2901: "allstorcns",
+ 2902: "netaspi",
+ 2903: "suitcase",
+ 2904: "m2ua",
+ 2906: "caller9",
+ 2907: "webmethods-b2b",
+ 2908: "mao",
+ 2909: "funk-dialout",
+ 2910: "tdaccess",
+ 2911: "blockade",
+ 2912: "epicon",
+ 2913: "boosterware",
+ 2914: "gamelobby",
+ 2915: "tksocket",
+ 2916: "elvin-server",
+ 2917: "elvin-client",
+ 2918: "kastenchasepad",
+ 2919: "roboer",
+ 2920: "roboeda",
+ 2921: "cesdcdman",
+ 2922: "cesdcdtrn",
+ 2923: "wta-wsp-wtp-s",
+ 2924: "precise-vip",
+ 2926: "mobile-file-dl",
+ 2927: "unimobilectrl",
+ 2928: "redstone-cpss",
+ 2929: "amx-webadmin",
+ 2930: "amx-weblinx",
+ 2931: "circle-x",
+ 2932: "incp",
+ 2933: "4-tieropmgw",
+ 2934: "4-tieropmcli",
+ 2935: "qtp",
+ 2936: "otpatch",
+ 2937: "pnaconsult-lm",
+ 2938: "sm-pas-1",
+ 2939: "sm-pas-2",
+ 2940: "sm-pas-3",
+ 2941: "sm-pas-4",
+ 2942: "sm-pas-5",
+ 2943: "ttnrepository",
+ 2944: "megaco-h248",
+ 2945: "h248-binary",
+ 2946: "fjsvmpor",
+ 2947: "gpsd",
+ 2948: "wap-push",
+ 2949: "wap-pushsecure",
+ 2950: "esip",
+ 2951: "ottp",
+ 2952: "mpfwsas",
+ 2953: "ovalarmsrv",
+ 2954: "ovalarmsrv-cmd",
+ 2955: "csnotify",
+ 2956: "ovrimosdbman",
+ 2957: "jmact5",
+ 2958: "jmact6",
+ 2959: "rmopagt",
+ 2960: "dfoxserver",
+ 2961: "boldsoft-lm",
+ 2962: "iph-policy-cli",
+ 2963: "iph-policy-adm",
+ 2964: "bullant-srap",
+ 2965: "bullant-rap",
+ 2966: "idp-infotrieve",
+ 2967: "ssc-agent",
+ 2968: "enpp",
+ 2969: "essp",
+ 2970: "index-net",
+ 2971: "netclip",
+ 2972: "pmsm-webrctl",
+ 2973: "svnetworks",
+ 2974: "signal",
+ 2975: "fjmpcm",
+ 2976: "cns-srv-port",
+ 2977: "ttc-etap-ns",
+ 2978: "ttc-etap-ds",
+ 2979: "h263-video",
+ 2980: "wimd",
+ 2981: "mylxamport",
+ 2982: "iwb-whiteboard",
+ 2983: "netplan",
+ 2984: "hpidsadmin",
+ 2985: "hpidsagent",
+ 2986: "stonefalls",
+ 2987: "identify",
+ 2988: "hippad",
+ 2989: "zarkov",
+ 2990: "boscap",
+ 2991: "wkstn-mon",
+ 2992: "avenyo",
+ 2993: "veritas-vis1",
+ 2994: "veritas-vis2",
+ 2995: "idrs",
+ 2996: "vsixml",
+ 2997: "rebol",
+ 2998: "realsecure",
+ 2999: "remoteware-un",
+ 3000: "hbci",
+ 3002: "exlm-agent",
+ 3003: "cgms",
+ 3004: "csoftragent",
+ 3005: "geniuslm",
+ 3006: "ii-admin",
+ 3007: "lotusmtap",
+ 3008: "midnight-tech",
+ 3009: "pxc-ntfy",
+ 3010: "ping-pong",
+ 3011: "trusted-web",
+ 3012: "twsdss",
+ 3013: "gilatskysurfer",
+ 3014: "broker-service",
+ 3015: "nati-dstp",
+ 3016: "notify-srvr",
+ 3017: "event-listener",
+ 3018: "srvc-registry",
+ 3019: "resource-mgr",
+ 3020: "cifs",
+ 3021: "agriserver",
+ 3022: "csregagent",
+ 3023: "magicnotes",
+ 3024: "nds-sso",
+ 3025: "arepa-raft",
+ 3026: "agri-gateway",
+ 3027: "LiebDevMgmt-C",
+ 3028: "LiebDevMgmt-DM",
+ 3029: "LiebDevMgmt-A",
+ 3030: "arepa-cas",
+ 3031: "eppc",
+ 3032: "redwood-chat",
+ 3033: "pdb",
+ 3034: "osmosis-aeea",
+ 3035: "fjsv-gssagt",
+ 3036: "hagel-dump",
+ 3037: "hp-san-mgmt",
+ 3038: "santak-ups",
+ 3039: "cogitate",
+ 3040: "tomato-springs",
+ 3041: "di-traceware",
+ 3042: "journee",
+ 3043: "brp",
+ 3044: "epp",
+ 3045: "responsenet",
+ 3046: "di-ase",
+ 3047: "hlserver",
+ 3048: "pctrader",
+ 3049: "nsws",
+ 3050: "gds-db",
+ 3051: "galaxy-server",
+ 3052: "apc-3052",
+ 3053: "dsom-server",
+ 3054: "amt-cnf-prot",
+ 3055: "policyserver",
+ 3056: "cdl-server",
+ 3057: "goahead-fldup",
+ 3058: "videobeans",
+ 3059: "qsoft",
+ 3060: "interserver",
+ 3061: "cautcpd",
+ 3062: "ncacn-ip-tcp",
+ 3063: "ncadg-ip-udp",
+ 3064: "rprt",
+ 3065: "slinterbase",
+ 3066: "netattachsdmp",
+ 3067: "fjhpjp",
+ 3068: "ls3bcast",
+ 3069: "ls3",
+ 3070: "mgxswitch",
+ 3072: "csd-monitor",
+ 3073: "vcrp",
+ 3074: "xbox",
+ 3075: "orbix-locator",
+ 3076: "orbix-config",
+ 3077: "orbix-loc-ssl",
+ 3078: "orbix-cfg-ssl",
+ 3079: "lv-frontpanel",
+ 3080: "stm-pproc",
+ 3081: "tl1-lv",
+ 3082: "tl1-raw",
+ 3083: "tl1-telnet",
+ 3084: "itm-mccs",
+ 3085: "pcihreq",
+ 3086: "jdl-dbkitchen",
+ 3087: "asoki-sma",
+ 3088: "xdtp",
+ 3089: "ptk-alink",
+ 3090: "stss",
+ 3091: "1ci-smcs",
+ 3093: "rapidmq-center",
+ 3094: "rapidmq-reg",
+ 3095: "panasas",
+ 3096: "ndl-aps",
+ 3098: "umm-port",
+ 3099: "chmd",
+ 3100: "opcon-xps",
+ 3101: "hp-pxpib",
+ 3102: "slslavemon",
+ 3103: "autocuesmi",
+ 3104: "autocuetime",
+ 3105: "cardbox",
+ 3106: "cardbox-http",
+ 3107: "business",
+ 3108: "geolocate",
+ 3109: "personnel",
+ 3110: "sim-control",
+ 3111: "wsynch",
+ 3112: "ksysguard",
+ 3113: "cs-auth-svr",
+ 3114: "ccmad",
+ 3115: "mctet-master",
+ 3116: "mctet-gateway",
+ 3117: "mctet-jserv",
+ 3118: "pkagent",
+ 3119: "d2000kernel",
+ 3120: "d2000webserver",
+ 3122: "vtr-emulator",
+ 3123: "edix",
+ 3124: "beacon-port",
+ 3125: "a13-an",
+ 3127: "ctx-bridge",
+ 3128: "ndl-aas",
+ 3129: "netport-id",
+ 3130: "icpv2",
+ 3131: "netbookmark",
+ 3132: "ms-rule-engine",
+ 3133: "prism-deploy",
+ 3134: "ecp",
+ 3135: "peerbook-port",
+ 3136: "grubd",
+ 3137: "rtnt-1",
+ 3138: "rtnt-2",
+ 3139: "incognitorv",
+ 3140: "ariliamulti",
+ 3141: "vmodem",
+ 3142: "rdc-wh-eos",
+ 3143: "seaview",
+ 3144: "tarantella",
+ 3145: "csi-lfap",
+ 3146: "bears-02",
+ 3147: "rfio",
+ 3148: "nm-game-admin",
+ 3149: "nm-game-server",
+ 3150: "nm-asses-admin",
+ 3151: "nm-assessor",
+ 3152: "feitianrockey",
+ 3153: "s8-client-port",
+ 3154: "ccmrmi",
+ 3155: "jpegmpeg",
+ 3156: "indura",
+ 3157: "e3consultants",
+ 3158: "stvp",
+ 3159: "navegaweb-port",
+ 3160: "tip-app-server",
+ 3161: "doc1lm",
+ 3162: "sflm",
+ 3163: "res-sap",
+ 3164: "imprs",
+ 3165: "newgenpay",
+ 3166: "sossecollector",
+ 3167: "nowcontact",
+ 3168: "poweronnud",
+ 3169: "serverview-as",
+ 3170: "serverview-asn",
+ 3171: "serverview-gf",
+ 3172: "serverview-rm",
+ 3173: "serverview-icc",
+ 3174: "armi-server",
+ 3175: "t1-e1-over-ip",
+ 3176: "ars-master",
+ 3177: "phonex-port",
+ 3178: "radclientport",
+ 3179: "h2gf-w-2m",
+ 3180: "mc-brk-srv",
+ 3181: "bmcpatrolagent",
+ 3182: "bmcpatrolrnvu",
+ 3183: "cops-tls",
+ 3184: "apogeex-port",
+ 3185: "smpppd",
+ 3186: "iiw-port",
+ 3187: "odi-port",
+ 3188: "brcm-comm-port",
+ 3189: "pcle-infex",
+ 3190: "csvr-proxy",
+ 3191: "csvr-sslproxy",
+ 3192: "firemonrcc",
+ 3193: "spandataport",
+ 3194: "magbind",
+ 3195: "ncu-1",
+ 3196: "ncu-2",
+ 3197: "embrace-dp-s",
+ 3198: "embrace-dp-c",
+ 3199: "dmod-workspace",
+ 3200: "tick-port",
+ 3201: "cpq-tasksmart",
+ 3202: "intraintra",
+ 3203: "netwatcher-mon",
+ 3204: "netwatcher-db",
+ 3205: "isns",
+ 3206: "ironmail",
+ 3207: "vx-auth-port",
+ 3208: "pfu-prcallback",
+ 3209: "netwkpathengine",
+ 3210: "flamenco-proxy",
+ 3211: "avsecuremgmt",
+ 3212: "surveyinst",
+ 3213: "neon24x7",
+ 3214: "jmq-daemon-1",
+ 3215: "jmq-daemon-2",
+ 3216: "ferrari-foam",
+ 3217: "unite",
+ 3218: "smartpackets",
+ 3219: "wms-messenger",
+ 3220: "xnm-ssl",
+ 3221: "xnm-clear-text",
+ 3222: "glbp",
+ 3223: "digivote",
+ 3224: "aes-discovery",
+ 3225: "fcip-port",
+ 3226: "isi-irp",
+ 3227: "dwnmshttp",
+ 3228: "dwmsgserver",
+ 3229: "global-cd-port",
+ 3230: "sftdst-port",
+ 3231: "vidigo",
+ 3232: "mdtp",
+ 3233: "whisker",
+ 3234: "alchemy",
+ 3235: "mdap-port",
+ 3236: "apparenet-ts",
+ 3237: "apparenet-tps",
+ 3238: "apparenet-as",
+ 3239: "apparenet-ui",
+ 3240: "triomotion",
+ 3241: "sysorb",
+ 3242: "sdp-id-port",
+ 3243: "timelot",
+ 3244: "onesaf",
+ 3245: "vieo-fe",
+ 3246: "dvt-system",
+ 3247: "dvt-data",
+ 3248: "procos-lm",
+ 3249: "ssp",
+ 3250: "hicp",
+ 3251: "sysscanner",
+ 3252: "dhe",
+ 3253: "pda-data",
+ 3254: "pda-sys",
+ 3255: "semaphore",
+ 3256: "cpqrpm-agent",
+ 3257: "cpqrpm-server",
+ 3258: "ivecon-port",
+ 3259: "epncdp2",
+ 3260: "iscsi-target",
+ 3261: "winshadow",
+ 3262: "necp",
+ 3263: "ecolor-imager",
+ 3264: "ccmail",
+ 3265: "altav-tunnel",
+ 3266: "ns-cfg-server",
+ 3267: "ibm-dial-out",
+ 3268: "msft-gc",
+ 3269: "msft-gc-ssl",
+ 3270: "verismart",
+ 3271: "csoft-prev",
+ 3272: "user-manager",
+ 3273: "sxmp",
+ 3274: "ordinox-server",
+ 3275: "samd",
+ 3276: "maxim-asics",
+ 3277: "awg-proxy",
+ 3278: "lkcmserver",
+ 3279: "admind",
+ 3280: "vs-server",
+ 3281: "sysopt",
+ 3282: "datusorb",
+ 3283: "Apple Remote Desktop (Net Assistant)",
+ 3284: "4talk",
+ 3285: "plato",
+ 3286: "e-net",
+ 3287: "directvdata",
+ 3288: "cops",
+ 3289: "enpc",
+ 3290: "caps-lm",
+ 3291: "sah-lm",
+ 3292: "cart-o-rama",
+ 3293: "fg-fps",
+ 3294: "fg-gip",
+ 3295: "dyniplookup",
+ 3296: "rib-slm",
+ 3297: "cytel-lm",
+ 3298: "deskview",
+ 3299: "pdrncs",
+ 3302: "mcs-fastmail",
+ 3303: "opsession-clnt",
+ 3304: "opsession-srvr",
+ 3305: "odette-ftp",
+ 3306: "mysql",
+ 3307: "opsession-prxy",
+ 3308: "tns-server",
+ 3309: "tns-adv",
+ 3310: "dyna-access",
+ 3311: "mcns-tel-ret",
+ 3312: "appman-server",
+ 3313: "uorb",
+ 3314: "uohost",
+ 3315: "cdid",
+ 3316: "aicc-cmi",
+ 3317: "vsaiport",
+ 3318: "ssrip",
+ 3319: "sdt-lmd",
+ 3320: "officelink2000",
+ 3321: "vnsstr",
+ 3326: "sftu",
+ 3327: "bbars",
+ 3328: "egptlm",
+ 3329: "hp-device-disc",
+ 3330: "mcs-calypsoicf",
+ 3331: "mcs-messaging",
+ 3332: "mcs-mailsvr",
+ 3333: "dec-notes",
+ 3334: "directv-web",
+ 3335: "directv-soft",
+ 3336: "directv-tick",
+ 3337: "directv-catlg",
+ 3338: "anet-b",
+ 3339: "anet-l",
+ 3340: "anet-m",
+ 3341: "anet-h",
+ 3342: "webtie",
+ 3343: "ms-cluster-net",
+ 3344: "bnt-manager",
+ 3345: "influence",
+ 3346: "trnsprntproxy",
+ 3347: "phoenix-rpc",
+ 3348: "pangolin-laser",
+ 3349: "chevinservices",
+ 3350: "findviatv",
+ 3351: "btrieve",
+ 3352: "ssql",
+ 3353: "fatpipe",
+ 3354: "suitjd",
+ 3355: "ordinox-dbase",
+ 3356: "upnotifyps",
+ 3357: "adtech-test",
+ 3358: "mpsysrmsvr",
+ 3359: "wg-netforce",
+ 3360: "kv-server",
+ 3361: "kv-agent",
+ 3362: "dj-ilm",
+ 3363: "nati-vi-server",
+ 3364: "creativeserver",
+ 3365: "contentserver",
+ 3366: "creativepartnr",
+ 3372: "tip2",
+ 3373: "lavenir-lm",
+ 3374: "cluster-disc",
+ 3375: "vsnm-agent",
+ 3376: "cdbroker",
+ 3377: "cogsys-lm",
+ 3378: "wsicopy",
+ 3379: "socorfs",
+ 3380: "sns-channels",
+ 3381: "geneous",
+ 3382: "fujitsu-neat",
+ 3383: "esp-lm",
+ 3384: "hp-clic",
+ 3385: "qnxnetman",
+ 3386: "gprs-sig",
+ 3387: "backroomnet",
+ 3388: "cbserver",
+ 3389: "ms-wbt-server",
+ 3390: "dsc",
+ 3391: "savant",
+ 3392: "efi-lm",
+ 3393: "d2k-tapestry1",
+ 3394: "d2k-tapestry2",
+ 3395: "dyna-lm",
+ 3396: "printer-agent",
+ 3397: "cloanto-lm",
+ 3398: "mercantile",
+ 3399: "csms",
+ 3400: "csms2",
+ 3401: "filecast",
+ 3402: "fxaengine-net",
+ 3405: "nokia-ann-ch1",
+ 3406: "nokia-ann-ch2",
+ 3407: "ldap-admin",
+ 3408: "BESApi",
+ 3409: "networklens",
+ 3410: "networklenss",
+ 3411: "biolink-auth",
+ 3412: "xmlblaster",
+ 3413: "svnet",
+ 3414: "wip-port",
+ 3415: "bcinameservice",
+ 3416: "commandport",
+ 3417: "csvr",
+ 3418: "rnmap",
+ 3419: "softaudit",
+ 3420: "ifcp-port",
+ 3421: "bmap",
+ 3422: "rusb-sys-port",
+ 3423: "xtrm",
+ 3424: "xtrms",
+ 3425: "agps-port",
+ 3426: "arkivio",
+ 3427: "websphere-snmp",
+ 3428: "twcss",
+ 3429: "gcsp",
+ 3430: "ssdispatch",
+ 3431: "ndl-als",
+ 3432: "osdcp",
+ 3433: "opnet-smp",
+ 3434: "opencm",
+ 3435: "pacom",
+ 3436: "gc-config",
+ 3437: "autocueds",
+ 3438: "spiral-admin",
+ 3439: "hri-port",
+ 3440: "ans-console",
+ 3441: "connect-client",
+ 3442: "connect-server",
+ 3443: "ov-nnm-websrv",
+ 3444: "denali-server",
+ 3445: "monp",
+ 3446: "3comfaxrpc",
+ 3447: "directnet",
+ 3448: "dnc-port",
+ 3449: "hotu-chat",
+ 3450: "castorproxy",
+ 3451: "asam",
+ 3452: "sabp-signal",
+ 3453: "pscupd",
+ 3454: "mira",
+ 3455: "prsvp",
+ 3456: "vat",
+ 3457: "vat-control",
+ 3458: "d3winosfi",
+ 3459: "integral",
+ 3460: "edm-manager",
+ 3461: "edm-stager",
+ 3462: "edm-std-notify",
+ 3463: "edm-adm-notify",
+ 3464: "edm-mgr-sync",
+ 3465: "edm-mgr-cntrl",
+ 3466: "workflow",
+ 3467: "rcst",
+ 3468: "ttcmremotectrl",
+ 3469: "pluribus",
+ 3470: "jt400",
+ 3471: "jt400-ssl",
+ 3472: "jaugsremotec-1",
+ 3473: "jaugsremotec-2",
+ 3474: "ttntspauto",
+ 3475: "genisar-port",
+ 3476: "nppmp",
+ 3477: "ecomm",
+ 3478: "stun",
+ 3479: "twrpc",
+ 3480: "plethora",
+ 3481: "cleanerliverc",
+ 3482: "vulture",
+ 3483: "slim-devices",
+ 3484: "gbs-stp",
+ 3485: "celatalk",
+ 3486: "ifsf-hb-port",
+ 3487: "ltcudp",
+ 3488: "fs-rh-srv",
+ 3489: "dtp-dia",
+ 3490: "colubris",
+ 3491: "swr-port",
+ 3492: "tvdumtray-port",
+ 3493: "nut",
+ 3494: "ibm3494",
+ 3495: "seclayer-tcp",
+ 3496: "seclayer-tls",
+ 3497: "ipether232port",
+ 3498: "dashpas-port",
+ 3499: "sccip-media",
+ 3500: "rtmp-port",
+ 3501: "isoft-p2p",
+ 3502: "avinstalldisc",
+ 3503: "lsp-ping",
+ 3504: "ironstorm",
+ 3505: "ccmcomm",
+ 3506: "apc-3506",
+ 3507: "nesh-broker",
+ 3508: "interactionweb",
+ 3509: "vt-ssl",
+ 3510: "xss-port",
+ 3511: "webmail-2",
+ 3512: "aztec",
+ 3513: "arcpd",
+ 3514: "must-p2p",
+ 3515: "must-backplane",
+ 3516: "smartcard-port",
+ 3517: "802-11-iapp",
+ 3518: "artifact-msg",
+ 3519: "galileo",
+ 3520: "galileolog",
+ 3521: "mc3ss",
+ 3522: "nssocketport",
+ 3523: "odeumservlink",
+ 3524: "ecmport",
+ 3525: "eisport",
+ 3526: "starquiz-port",
+ 3527: "beserver-msg-q",
+ 3528: "jboss-iiop",
+ 3529: "jboss-iiop-ssl",
+ 3530: "gf",
+ 3531: "joltid",
+ 3532: "raven-rmp",
+ 3533: "raven-rdp",
+ 3534: "urld-port",
+ 3535: "ms-la",
+ 3536: "snac",
+ 3537: "ni-visa-remote",
+ 3538: "ibm-diradm",
+ 3539: "ibm-diradm-ssl",
+ 3540: "pnrp-port",
+ 3541: "voispeed-port",
+ 3542: "hacl-monitor",
+ 3543: "qftest-lookup",
+ 3544: "teredo",
+ 3545: "camac",
+ 3547: "symantec-sim",
+ 3548: "interworld",
+ 3549: "tellumat-nms",
+ 3550: "ssmpp",
+ 3551: "apcupsd",
+ 3552: "taserver",
+ 3553: "rbr-discovery",
+ 3554: "questnotify",
+ 3555: "razor",
+ 3556: "sky-transport",
+ 3557: "personalos-001",
+ 3558: "mcp-port",
+ 3559: "cctv-port",
+ 3560: "iniserve-port",
+ 3561: "bmc-onekey",
+ 3562: "sdbproxy",
+ 3563: "watcomdebug",
+ 3564: "esimport",
+ 3567: "dof-eps",
+ 3568: "dof-tunnel-sec",
+ 3569: "mbg-ctrl",
+ 3570: "mccwebsvr-port",
+ 3571: "megardsvr-port",
+ 3572: "megaregsvrport",
+ 3573: "tag-ups-1",
+ 3574: "dmaf-caster",
+ 3575: "ccm-port",
+ 3576: "cmc-port",
+ 3577: "config-port",
+ 3578: "data-port",
+ 3579: "ttat3lb",
+ 3580: "nati-svrloc",
+ 3581: "kfxaclicensing",
+ 3582: "press",
+ 3583: "canex-watch",
+ 3584: "u-dbap",
+ 3585: "emprise-lls",
+ 3586: "emprise-lsc",
+ 3587: "p2pgroup",
+ 3588: "sentinel",
+ 3589: "isomair",
+ 3590: "wv-csp-sms",
+ 3591: "gtrack-server",
+ 3592: "gtrack-ne",
+ 3593: "bpmd",
+ 3594: "mediaspace",
+ 3595: "shareapp",
+ 3596: "iw-mmogame",
+ 3597: "a14",
+ 3598: "a15",
+ 3599: "quasar-server",
+ 3600: "trap-daemon",
+ 3601: "visinet-gui",
+ 3602: "infiniswitchcl",
+ 3603: "int-rcv-cntrl",
+ 3604: "bmc-jmx-port",
+ 3605: "comcam-io",
+ 3606: "splitlock",
+ 3607: "precise-i3",
+ 3608: "trendchip-dcp",
+ 3609: "cpdi-pidas-cm",
+ 3610: "echonet",
+ 3611: "six-degrees",
+ 3612: "hp-dataprotect",
+ 3613: "alaris-disc",
+ 3614: "sigma-port",
+ 3615: "start-network",
+ 3616: "cd3o-protocol",
+ 3617: "sharp-server",
+ 3618: "aairnet-1",
+ 3619: "aairnet-2",
+ 3620: "ep-pcp",
+ 3621: "ep-nsp",
+ 3622: "ff-lr-port",
+ 3623: "haipe-discover",
+ 3624: "dist-upgrade",
+ 3625: "volley",
+ 3626: "bvcdaemon-port",
+ 3627: "jamserverport",
+ 3628: "ept-machine",
+ 3629: "escvpnet",
+ 3630: "cs-remote-db",
+ 3631: "cs-services",
+ 3632: "distcc",
+ 3633: "wacp",
+ 3634: "hlibmgr",
+ 3635: "sdo",
+ 3636: "servistaitsm",
+ 3637: "scservp",
+ 3638: "ehp-backup",
+ 3639: "xap-ha",
+ 3640: "netplay-port1",
+ 3641: "netplay-port2",
+ 3642: "juxml-port",
+ 3643: "audiojuggler",
+ 3644: "ssowatch",
+ 3645: "cyc",
+ 3646: "xss-srv-port",
+ 3647: "splitlock-gw",
+ 3648: "fjcp",
+ 3649: "nmmp",
+ 3650: "prismiq-plugin",
+ 3651: "xrpc-registry",
+ 3652: "vxcrnbuport",
+ 3653: "tsp",
+ 3654: "vaprtm",
+ 3655: "abatemgr",
+ 3656: "abatjss",
+ 3657: "immedianet-bcn",
+ 3658: "ps-ams",
+ 3659: "apple-sasl",
+ 3660: "can-nds-ssl",
+ 3661: "can-ferret-ssl",
+ 3662: "pserver",
+ 3663: "dtp",
+ 3664: "ups-engine",
+ 3665: "ent-engine",
+ 3666: "eserver-pap",
+ 3667: "infoexch",
+ 3668: "dell-rm-port",
+ 3669: "casanswmgmt",
+ 3670: "smile",
+ 3671: "efcp",
+ 3672: "lispworks-orb",
+ 3673: "mediavault-gui",
+ 3674: "wininstall-ipc",
+ 3675: "calltrax",
+ 3676: "va-pacbase",
+ 3677: "roverlog",
+ 3678: "ipr-dglt",
+ 3679: "Escale (Newton Dock)",
+ 3680: "npds-tracker",
+ 3681: "bts-x73",
+ 3682: "cas-mapi",
+ 3683: "bmc-ea",
+ 3684: "faxstfx-port",
+ 3685: "dsx-agent",
+ 3686: "tnmpv2",
+ 3687: "simple-push",
+ 3688: "simple-push-s",
+ 3689: "daap",
+ 3690: "svn",
+ 3691: "magaya-network",
+ 3692: "intelsync",
+ 3695: "bmc-data-coll",
+ 3696: "telnetcpcd",
+ 3697: "nw-license",
+ 3698: "sagectlpanel",
+ 3699: "kpn-icw",
+ 3700: "lrs-paging",
+ 3701: "netcelera",
+ 3702: "ws-discovery",
+ 3703: "adobeserver-3",
+ 3704: "adobeserver-4",
+ 3705: "adobeserver-5",
+ 3706: "rt-event",
+ 3707: "rt-event-s",
+ 3708: "sun-as-iiops",
+ 3709: "ca-idms",
+ 3710: "portgate-auth",
+ 3711: "edb-server2",
+ 3712: "sentinel-ent",
+ 3713: "tftps",
+ 3714: "delos-dms",
+ 3715: "anoto-rendezv",
+ 3716: "wv-csp-sms-cir",
+ 3717: "wv-csp-udp-cir",
+ 3718: "opus-services",
+ 3719: "itelserverport",
+ 3720: "ufastro-instr",
+ 3721: "xsync",
+ 3722: "xserveraid",
+ 3723: "sychrond",
+ 3724: "blizwow",
+ 3725: "na-er-tip",
+ 3726: "array-manager",
+ 3727: "e-mdu",
+ 3728: "e-woa",
+ 3729: "fksp-audit",
+ 3730: "client-ctrl",
+ 3731: "smap",
+ 3732: "m-wnn",
+ 3733: "multip-msg",
+ 3734: "synel-data",
+ 3735: "pwdis",
+ 3736: "rs-rmi",
+ 3738: "versatalk",
+ 3739: "launchbird-lm",
+ 3740: "heartbeat",
+ 3741: "wysdma",
+ 3742: "cst-port",
+ 3743: "ipcs-command",
+ 3744: "sasg",
+ 3745: "gw-call-port",
+ 3746: "linktest",
+ 3747: "linktest-s",
+ 3748: "webdata",
+ 3749: "cimtrak",
+ 3750: "cbos-ip-port",
+ 3751: "gprs-cube",
+ 3752: "vipremoteagent",
+ 3753: "nattyserver",
+ 3754: "timestenbroker",
+ 3755: "sas-remote-hlp",
+ 3756: "canon-capt",
+ 3757: "grf-port",
+ 3758: "apw-registry",
+ 3759: "exapt-lmgr",
+ 3760: "adtempusclient",
+ 3761: "gsakmp",
+ 3762: "gbs-smp",
+ 3763: "xo-wave",
+ 3764: "mni-prot-rout",
+ 3765: "rtraceroute",
+ 3767: "listmgr-port",
+ 3768: "rblcheckd",
+ 3769: "haipe-otnk",
+ 3770: "cindycollab",
+ 3771: "paging-port",
+ 3772: "ctp",
+ 3773: "ctdhercules",
+ 3774: "zicom",
+ 3775: "ispmmgr",
+ 3776: "dvcprov-port",
+ 3777: "jibe-eb",
+ 3778: "c-h-it-port",
+ 3779: "cognima",
+ 3780: "nnp",
+ 3781: "abcvoice-port",
+ 3782: "iso-tp0s",
+ 3783: "bim-pem",
+ 3784: "bfd-control",
+ 3785: "bfd-echo",
+ 3786: "upstriggervsw",
+ 3787: "fintrx",
+ 3788: "isrp-port",
+ 3789: "remotedeploy",
+ 3790: "quickbooksrds",
+ 3791: "tvnetworkvideo",
+ 3792: "sitewatch",
+ 3793: "dcsoftware",
+ 3794: "jaus",
+ 3795: "myblast",
+ 3796: "spw-dialer",
+ 3797: "idps",
+ 3798: "minilock",
+ 3799: "radius-dynauth",
+ 3800: "pwgpsi",
+ 3801: "ibm-mgr",
+ 3802: "vhd",
+ 3803: "soniqsync",
+ 3804: "iqnet-port",
+ 3805: "tcpdataserver",
+ 3806: "wsmlb",
+ 3807: "spugna",
+ 3808: "sun-as-iiops-ca",
+ 3809: "apocd",
+ 3810: "wlanauth",
+ 3811: "amp",
+ 3812: "neto-wol-server",
+ 3813: "rap-ip",
+ 3814: "neto-dcs",
+ 3815: "lansurveyorxml",
+ 3816: "sunlps-http",
+ 3817: "tapeware",
+ 3818: "crinis-hb",
+ 3819: "epl-slp",
+ 3820: "scp",
+ 3821: "pmcp",
+ 3822: "acp-discovery",
+ 3823: "acp-conduit",
+ 3824: "acp-policy",
+ 3825: "ffserver",
+ 3826: "warmux",
+ 3827: "netmpi",
+ 3828: "neteh",
+ 3829: "neteh-ext",
+ 3830: "cernsysmgmtagt",
+ 3831: "dvapps",
+ 3832: "xxnetserver",
+ 3833: "aipn-auth",
+ 3834: "spectardata",
+ 3835: "spectardb",
+ 3836: "markem-dcp",
+ 3837: "mkm-discovery",
+ 3838: "sos",
+ 3839: "amx-rms",
+ 3840: "flirtmitmir",
+ 3842: "nhci",
+ 3843: "quest-agent",
+ 3844: "rnm",
+ 3845: "v-one-spp",
+ 3846: "an-pcp",
+ 3847: "msfw-control",
+ 3848: "item",
+ 3849: "spw-dnspreload",
+ 3850: "qtms-bootstrap",
+ 3851: "spectraport",
+ 3852: "sse-app-config",
+ 3853: "sscan",
+ 3854: "stryker-com",
+ 3855: "opentrac",
+ 3856: "informer",
+ 3857: "trap-port",
+ 3858: "trap-port-mom",
+ 3859: "nav-port",
+ 3860: "sasp",
+ 3861: "winshadow-hd",
+ 3862: "giga-pocket",
+ 3863: "asap-udp",
+ 3865: "xpl",
+ 3866: "dzdaemon",
+ 3867: "dzoglserver",
+ 3869: "ovsam-mgmt",
+ 3870: "ovsam-d-agent",
+ 3871: "avocent-adsap",
+ 3872: "oem-agent",
+ 3873: "fagordnc",
+ 3874: "sixxsconfig",
+ 3875: "pnbscada",
+ 3876: "dl-agent",
+ 3877: "xmpcr-interface",
+ 3878: "fotogcad",
+ 3879: "appss-lm",
+ 3880: "igrs",
+ 3881: "idac",
+ 3882: "msdts1",
+ 3883: "vrpn",
+ 3884: "softrack-meter",
+ 3885: "topflow-ssl",
+ 3886: "nei-management",
+ 3887: "ciphire-data",
+ 3888: "ciphire-serv",
+ 3889: "dandv-tester",
+ 3890: "ndsconnect",
+ 3891: "rtc-pm-port",
+ 3892: "pcc-image-port",
+ 3893: "cgi-starapi",
+ 3894: "syam-agent",
+ 3895: "syam-smc",
+ 3896: "sdo-tls",
+ 3897: "sdo-ssh",
+ 3898: "senip",
+ 3899: "itv-control",
+ 3900: "udt-os",
+ 3901: "nimsh",
+ 3902: "nimaux",
+ 3903: "charsetmgr",
+ 3904: "omnilink-port",
+ 3905: "mupdate",
+ 3906: "topovista-data",
+ 3907: "imoguia-port",
+ 3908: "hppronetman",
+ 3909: "surfcontrolcpa",
+ 3910: "prnrequest",
+ 3911: "prnstatus",
+ 3912: "gbmt-stars",
+ 3913: "listcrt-port",
+ 3914: "listcrt-port-2",
+ 3915: "agcat",
+ 3916: "wysdmc",
+ 3917: "aftmux",
+ 3918: "pktcablemmcops",
+ 3919: "hyperip",
+ 3920: "exasoftport1",
+ 3921: "herodotus-net",
+ 3922: "sor-update",
+ 3923: "symb-sb-port",
+ 3924: "mpl-gprs-port",
+ 3925: "zmp",
+ 3926: "winport",
+ 3927: "natdataservice",
+ 3928: "netboot-pxe",
+ 3929: "smauth-port",
+ 3930: "syam-webserver",
+ 3931: "msr-plugin-port",
+ 3932: "dyn-site",
+ 3933: "plbserve-port",
+ 3934: "sunfm-port",
+ 3935: "sdp-portmapper",
+ 3936: "mailprox",
+ 3937: "dvbservdsc",
+ 3938: "dbcontrol-agent",
+ 3939: "aamp",
+ 3940: "xecp-node",
+ 3941: "homeportal-web",
+ 3942: "srdp",
+ 3943: "tig",
+ 3944: "sops",
+ 3945: "emcads",
+ 3946: "backupedge",
+ 3947: "ccp",
+ 3948: "apdap",
+ 3949: "drip",
+ 3950: "namemunge",
+ 3951: "pwgippfax",
+ 3952: "i3-sessionmgr",
+ 3953: "xmlink-connect",
+ 3954: "adrep",
+ 3955: "p2pcommunity",
+ 3956: "gvcp",
+ 3957: "mqe-broker",
+ 3958: "mqe-agent",
+ 3959: "treehopper",
+ 3960: "bess",
+ 3961: "proaxess",
+ 3962: "sbi-agent",
+ 3963: "thrp",
+ 3964: "sasggprs",
+ 3965: "ati-ip-to-ncpe",
+ 3966: "bflckmgr",
+ 3967: "ppsms",
+ 3968: "ianywhere-dbns",
+ 3969: "landmarks",
+ 3970: "lanrevagent",
+ 3971: "lanrevserver",
+ 3972: "iconp",
+ 3973: "progistics",
+ 3974: "citysearch",
+ 3975: "airshot",
+ 3976: "opswagent",
+ 3977: "opswmanager",
+ 3978: "secure-cfg-svr",
+ 3979: "smwan",
+ 3980: "acms",
+ 3981: "starfish",
+ 3982: "eis",
+ 3983: "eisp",
+ 3984: "mapper-nodemgr",
+ 3985: "mapper-mapethd",
+ 3986: "mapper-ws-ethd",
+ 3987: "centerline",
+ 3988: "dcs-config",
+ 3989: "bv-queryengine",
+ 3990: "bv-is",
+ 3991: "bv-smcsrv",
+ 3992: "bv-ds",
+ 3993: "bv-agent",
+ 3995: "iss-mgmt-ssl",
+ 3996: "abcsoftware",
+ 3997: "agentsease-db",
+ 3998: "dnx",
+ 3999: "nvcnet",
+ 4000: "terabase",
+ 4001: "newoak",
+ 4002: "pxc-spvr-ft",
+ 4003: "pxc-splr-ft",
+ 4004: "pxc-roid",
+ 4005: "pxc-pin",
+ 4006: "pxc-spvr",
+ 4007: "pxc-splr",
+ 4008: "netcheque",
+ 4009: "chimera-hwm",
+ 4010: "samsung-unidex",
+ 4011: "altserviceboot",
+ 4012: "pda-gate",
+ 4013: "acl-manager",
+ 4014: "taiclock",
+ 4015: "talarian-mcast1",
+ 4016: "talarian-mcast2",
+ 4017: "talarian-mcast3",
+ 4018: "talarian-mcast4",
+ 4019: "talarian-mcast5",
+ 4020: "trap",
+ 4021: "nexus-portal",
+ 4022: "dnox",
+ 4023: "esnm-zoning",
+ 4024: "tnp1-port",
+ 4025: "partimage",
+ 4026: "as-debug",
+ 4027: "bxp",
+ 4028: "dtserver-port",
+ 4029: "ip-qsig",
+ 4030: "jdmn-port",
+ 4031: "suucp",
+ 4032: "vrts-auth-port",
+ 4033: "sanavigator",
+ 4034: "ubxd",
+ 4035: "wap-push-http",
+ 4036: "wap-push-https",
+ 4037: "ravehd",
+ 4038: "fazzt-ptp",
+ 4039: "fazzt-admin",
+ 4040: "yo-main",
+ 4041: "houston",
+ 4042: "ldxp",
+ 4043: "nirp",
+ 4044: "ltp",
+ 4045: "npp",
+ 4046: "acp-proto",
+ 4047: "ctp-state",
+ 4049: "wafs",
+ 4050: "cisco-wafs",
+ 4051: "cppdp",
+ 4052: "interact",
+ 4053: "ccu-comm-1",
+ 4054: "ccu-comm-2",
+ 4055: "ccu-comm-3",
+ 4056: "lms",
+ 4057: "wfm",
+ 4058: "kingfisher",
+ 4059: "dlms-cosem",
+ 4060: "dsmeter-iatc",
+ 4061: "ice-location",
+ 4062: "ice-slocation",
+ 4063: "ice-router",
+ 4064: "ice-srouter",
+ 4065: "avanti-cdp",
+ 4066: "pmas",
+ 4067: "idp",
+ 4068: "ipfltbcst",
+ 4069: "minger",
+ 4070: "tripe",
+ 4071: "aibkup",
+ 4072: "zieto-sock",
+ 4073: "iRAPP",
+ 4074: "cequint-cityid",
+ 4075: "perimlan",
+ 4076: "seraph",
+ 4077: "ascomalarm",
+ 4079: "santools",
+ 4080: "lorica-in",
+ 4081: "lorica-in-sec",
+ 4082: "lorica-out",
+ 4083: "lorica-out-sec",
+ 4084: "fortisphere-vm",
+ 4086: "ftsync",
+ 4089: "opencore",
+ 4090: "omasgport",
+ 4091: "ewinstaller",
+ 4092: "ewdgs",
+ 4093: "pvxpluscs",
+ 4094: "sysrqd",
+ 4095: "xtgui",
+ 4096: "bre",
+ 4097: "patrolview",
+ 4098: "drmsfsd",
+ 4099: "dpcp",
+ 4100: "igo-incognito",
+ 4101: "brlp-0",
+ 4102: "brlp-1",
+ 4103: "brlp-2",
+ 4104: "brlp-3",
+ 4105: "shofar",
+ 4106: "synchronite",
+ 4107: "j-ac",
+ 4108: "accel",
+ 4109: "izm",
+ 4110: "g2tag",
+ 4111: "xgrid",
+ 4112: "apple-vpns-rp",
+ 4113: "aipn-reg",
+ 4114: "jomamqmonitor",
+ 4115: "cds",
+ 4116: "smartcard-tls",
+ 4117: "hillrserv",
+ 4118: "netscript",
+ 4119: "assuria-slm",
+ 4121: "e-builder",
+ 4122: "fprams",
+ 4123: "z-wave",
+ 4124: "tigv2",
+ 4125: "opsview-envoy",
+ 4126: "ddrepl",
+ 4127: "unikeypro",
+ 4128: "nufw",
+ 4129: "nuauth",
+ 4130: "fronet",
+ 4131: "stars",
+ 4132: "nuts-dem",
+ 4133: "nuts-bootp",
+ 4134: "nifty-hmi",
+ 4135: "cl-db-attach",
+ 4136: "cl-db-request",
+ 4137: "cl-db-remote",
+ 4138: "nettest",
+ 4139: "thrtx",
+ 4140: "cedros-fds",
+ 4141: "oirtgsvc",
+ 4142: "oidocsvc",
+ 4143: "oidsr",
+ 4145: "vvr-control",
+ 4146: "tgcconnect",
+ 4147: "vrxpservman",
+ 4148: "hhb-handheld",
+ 4149: "agslb",
+ 4150: "PowerAlert-nsa",
+ 4151: "menandmice-noh",
+ 4152: "idig-mux",
+ 4153: "mbl-battd",
+ 4154: "atlinks",
+ 4155: "bzr",
+ 4156: "stat-results",
+ 4157: "stat-scanner",
+ 4158: "stat-cc",
+ 4159: "nss",
+ 4160: "jini-discovery",
+ 4161: "omscontact",
+ 4162: "omstopology",
+ 4163: "silverpeakpeer",
+ 4164: "silverpeakcomm",
+ 4165: "altcp",
+ 4166: "joost",
+ 4167: "ddgn",
+ 4168: "pslicser",
+ 4169: "iadt-disc",
+ 4172: "pcoip",
+ 4173: "mma-discovery",
+ 4174: "sm-disc",
+ 4177: "wello",
+ 4178: "storman",
+ 4179: "MaxumSP",
+ 4180: "httpx",
+ 4181: "macbak",
+ 4182: "pcptcpservice",
+ 4183: "cyborgnet",
+ 4184: "universe-suite",
+ 4185: "wcpp",
+ 4188: "vatata",
+ 4191: "dsmipv6",
+ 4192: "azeti-bd",
+ 4197: "hctl",
+ 4199: "eims-admin",
+ 4300: "corelccam",
+ 4301: "d-data",
+ 4302: "d-data-control",
+ 4303: "srcp",
+ 4304: "owserver",
+ 4305: "batman",
+ 4306: "pinghgl",
+ 4307: "trueconf",
+ 4308: "compx-lockview",
+ 4309: "dserver",
+ 4310: "mirrtex",
+ 4320: "fdt-rcatp",
+ 4321: "rwhois",
+ 4322: "trim-event",
+ 4323: "trim-ice",
+ 4325: "geognosisman",
+ 4326: "geognosis",
+ 4327: "jaxer-web",
+ 4328: "jaxer-manager",
+ 4333: "ahsp",
+ 4340: "gaia",
+ 4341: "lisp-data",
+ 4342: "lisp-control",
+ 4343: "unicall",
+ 4344: "vinainstall",
+ 4345: "m4-network-as",
+ 4346: "elanlm",
+ 4347: "lansurveyor",
+ 4348: "itose",
+ 4349: "fsportmap",
+ 4350: "net-device",
+ 4351: "plcy-net-svcs",
+ 4352: "pjlink",
+ 4353: "f5-iquery",
+ 4354: "qsnet-trans",
+ 4355: "qsnet-workst",
+ 4356: "qsnet-assist",
+ 4357: "qsnet-cond",
+ 4358: "qsnet-nucl",
+ 4359: "omabcastltkm",
+ 4361: "nacnl",
+ 4362: "afore-vdp-disc",
+ 4366: "shadowstream",
+ 4368: "wxbrief",
+ 4369: "epmd",
+ 4370: "elpro-tunnel",
+ 4371: "l2c-disc",
+ 4372: "l2c-data",
+ 4373: "remctl",
+ 4375: "tolteces",
+ 4376: "bip",
+ 4377: "cp-spxsvr",
+ 4378: "cp-spxdpy",
+ 4379: "ctdb",
+ 4389: "xandros-cms",
+ 4390: "wiegand",
+ 4394: "apwi-disc",
+ 4395: "omnivisionesx",
+ 4400: "ds-srv",
+ 4401: "ds-srvr",
+ 4402: "ds-clnt",
+ 4403: "ds-user",
+ 4404: "ds-admin",
+ 4405: "ds-mail",
+ 4406: "ds-slp",
+ 4412: "smallchat",
+ 4413: "avi-nms-disc",
+ 4416: "pjj-player-disc",
+ 4418: "axysbridge",
+ 4420: "nvm-express",
+ 4425: "netrockey6",
+ 4426: "beacon-port-2",
+ 4430: "rsqlserver",
+ 4432: "l-acoustics",
+ 4441: "netblox",
+ 4442: "saris",
+ 4443: "pharos",
+ 4444: "krb524",
+ 4445: "upnotifyp",
+ 4446: "n1-fwp",
+ 4447: "n1-rmgmt",
+ 4448: "asc-slmd",
+ 4449: "privatewire",
+ 4450: "camp",
+ 4451: "ctisystemmsg",
+ 4452: "ctiprogramload",
+ 4453: "nssalertmgr",
+ 4454: "nssagentmgr",
+ 4455: "prchat-user",
+ 4456: "prchat-server",
+ 4457: "prRegister",
+ 4458: "mcp",
+ 4484: "hpssmgmt",
+ 4486: "icms",
+ 4488: "awacs-ice",
+ 4500: "ipsec-nat-t",
+ 4534: "armagetronad",
+ 4535: "ehs",
+ 4536: "ehs-ssl",
+ 4537: "wssauthsvc",
+ 4538: "swx-gate",
+ 4545: "worldscores",
+ 4546: "sf-lm",
+ 4547: "lanner-lm",
+ 4548: "synchromesh",
+ 4549: "aegate",
+ 4550: "gds-adppiw-db",
+ 4551: "ieee-mih",
+ 4552: "menandmice-mon",
+ 4554: "msfrs",
+ 4555: "rsip",
+ 4556: "dtn-bundle",
+ 4557: "mtcevrunqss",
+ 4558: "mtcevrunqman",
+ 4559: "hylafax",
+ 4566: "kwtc",
+ 4567: "tram",
+ 4568: "bmc-reporting",
+ 4569: "iax",
+ 4591: "l3t-at-an",
+ 4592: "hrpd-ith-at-an",
+ 4593: "ipt-anri-anri",
+ 4594: "ias-session",
+ 4595: "ias-paging",
+ 4596: "ias-neighbor",
+ 4597: "a21-an-1xbs",
+ 4598: "a16-an-an",
+ 4599: "a17-an-an",
+ 4600: "piranha1",
+ 4601: "piranha2",
+ 4621: "ventoso",
+ 4658: "playsta2-app",
+ 4659: "playsta2-lob",
+ 4660: "smaclmgr",
+ 4661: "kar2ouche",
+ 4662: "oms",
+ 4663: "noteit",
+ 4664: "ems",
+ 4665: "contclientms",
+ 4666: "eportcomm",
+ 4667: "mmacomm",
+ 4668: "mmaeds",
+ 4669: "eportcommdata",
+ 4670: "light",
+ 4671: "acter",
+ 4672: "rfa",
+ 4673: "cxws",
+ 4674: "appiq-mgmt",
+ 4675: "dhct-status",
+ 4676: "dhct-alerts",
+ 4677: "bcs",
+ 4678: "traversal",
+ 4679: "mgesupervision",
+ 4680: "mgemanagement",
+ 4681: "parliant",
+ 4682: "finisar",
+ 4683: "spike",
+ 4684: "rfid-rp1",
+ 4685: "autopac",
+ 4686: "msp-os",
+ 4687: "nst",
+ 4688: "mobile-p2p",
+ 4689: "altovacentral",
+ 4690: "prelude",
+ 4691: "mtn",
+ 4692: "conspiracy",
+ 4700: "netxms-agent",
+ 4701: "netxms-mgmt",
+ 4702: "netxms-sync",
+ 4711: "trinity-dist",
+ 4725: "truckstar",
+ 4726: "a26-fap-fgw",
+ 4727: "fcis-disc",
+ 4728: "capmux",
+ 4729: "gsmtap",
+ 4730: "gearman",
+ 4732: "ohmtrigger",
+ 4737: "ipdr-sp",
+ 4738: "solera-lpn",
+ 4739: "ipfix",
+ 4740: "ipfixs",
+ 4741: "lumimgrd",
+ 4742: "sicct-sdp",
+ 4743: "openhpid",
+ 4744: "ifsp",
+ 4745: "fmp",
+ 4746: "intelliadm-disc",
+ 4747: "buschtrommel",
+ 4749: "profilemac",
+ 4750: "ssad",
+ 4751: "spocp",
+ 4752: "snap",
+ 4753: "simon-disc",
+ 4754: "gre-in-udp",
+ 4755: "gre-udp-dtls",
+ 4784: "bfd-multi-ctl",
+ 4785: "cncp",
+ 4789: "vxlan",
+ 4790: "vxlan-gpe",
+ 4791: "roce",
+ 4800: "iims",
+ 4801: "iwec",
+ 4802: "ilss",
+ 4803: "notateit-disc",
+ 4804: "aja-ntv4-disc",
+ 4827: "htcp",
+ 4837: "varadero-0",
+ 4838: "varadero-1",
+ 4839: "varadero-2",
+ 4840: "opcua-udp",
+ 4841: "quosa",
+ 4842: "gw-asv",
+ 4843: "opcua-tls",
+ 4844: "gw-log",
+ 4845: "wcr-remlib",
+ 4846: "contamac-icm",
+ 4847: "wfc",
+ 4848: "appserv-http",
+ 4849: "appserv-https",
+ 4850: "sun-as-nodeagt",
+ 4851: "derby-repli",
+ 4867: "unify-debug",
+ 4868: "phrelay",
+ 4869: "phrelaydbg",
+ 4870: "cc-tracking",
+ 4871: "wired",
+ 4876: "tritium-can",
+ 4877: "lmcs",
+ 4878: "inst-discovery",
+ 4881: "socp-t",
+ 4882: "socp-c",
+ 4884: "hivestor",
+ 4885: "abbs",
+ 4894: "lyskom",
+ 4899: "radmin-port",
+ 4900: "hfcs",
+ 4914: "bones",
+ 4936: "an-signaling",
+ 4937: "atsc-mh-ssc",
+ 4940: "eq-office-4940",
+ 4941: "eq-office-4941",
+ 4942: "eq-office-4942",
+ 4949: "munin",
+ 4950: "sybasesrvmon",
+ 4951: "pwgwims",
+ 4952: "sagxtsds",
+ 4969: "ccss-qmm",
+ 4970: "ccss-qsm",
+ 4980: "ctxs-vpp",
+ 4986: "mrip",
+ 4987: "smar-se-port1",
+ 4988: "smar-se-port2",
+ 4989: "parallel",
+ 4990: "busycal",
+ 4991: "vrt",
+ 4999: "hfcs-manager",
+ 5000: "commplex-main",
+ 5001: "commplex-link",
+ 5002: "rfe",
+ 5003: "fmpro-internal",
+ 5004: "avt-profile-1",
+ 5005: "avt-profile-2",
+ 5006: "wsm-server",
+ 5007: "wsm-server-ssl",
+ 5008: "synapsis-edge",
+ 5009: "winfs",
+ 5010: "telelpathstart",
+ 5011: "telelpathattack",
+ 5012: "nsp",
+ 5013: "fmpro-v6",
+ 5014: "onpsocket",
+ 5020: "zenginkyo-1",
+ 5021: "zenginkyo-2",
+ 5022: "mice",
+ 5023: "htuilsrv",
+ 5024: "scpi-telnet",
+ 5025: "scpi-raw",
+ 5026: "strexec-d",
+ 5027: "strexec-s",
+ 5029: "infobright",
+ 5030: "surfpass",
+ 5031: "dmp",
+ 5042: "asnaacceler8db",
+ 5043: "swxadmin",
+ 5044: "lxi-evntsvc",
+ 5046: "vpm-udp",
+ 5047: "iscape",
+ 5049: "ivocalize",
+ 5050: "mmcc",
+ 5051: "ita-agent",
+ 5052: "ita-manager",
+ 5053: "rlm-disc",
+ 5055: "unot",
+ 5056: "intecom-ps1",
+ 5057: "intecom-ps2",
+ 5058: "locus-disc",
+ 5059: "sds",
+ 5060: "sip",
+ 5061: "sips",
+ 5062: "na-localise",
+ 5064: "ca-1",
+ 5065: "ca-2",
+ 5066: "stanag-5066",
+ 5067: "authentx",
+ 5069: "i-net-2000-npr",
+ 5070: "vtsas",
+ 5071: "powerschool",
+ 5072: "ayiya",
+ 5073: "tag-pm",
+ 5074: "alesquery",
+ 5078: "pixelpusher",
+ 5079: "cp-spxrpts",
+ 5080: "onscreen",
+ 5081: "sdl-ets",
+ 5082: "qcp",
+ 5083: "qfp",
+ 5084: "llrp",
+ 5085: "encrypted-llrp",
+ 5092: "magpie",
+ 5093: "sentinel-lm",
+ 5094: "hart-ip",
+ 5099: "sentlm-srv2srv",
+ 5100: "socalia",
+ 5101: "talarian-udp",
+ 5102: "oms-nonsecure",
+ 5104: "tinymessage",
+ 5105: "hughes-ap",
+ 5111: "taep-as-svc",
+ 5112: "pm-cmdsvr",
+ 5116: "emb-proj-cmd",
+ 5120: "barracuda-bbs",
+ 5133: "nbt-pc",
+ 5136: "minotaur-sa",
+ 5137: "ctsd",
+ 5145: "rmonitor-secure",
+ 5150: "atmp",
+ 5151: "esri-sde",
+ 5152: "sde-discovery",
+ 5154: "bzflag",
+ 5155: "asctrl-agent",
+ 5164: "vpa-disc",
+ 5165: "ife-icorp",
+ 5166: "winpcs",
+ 5167: "scte104",
+ 5168: "scte30",
+ 5190: "aol",
+ 5191: "aol-1",
+ 5192: "aol-2",
+ 5193: "aol-3",
+ 5200: "targus-getdata",
+ 5201: "targus-getdata1",
+ 5202: "targus-getdata2",
+ 5203: "targus-getdata3",
+ 5223: "hpvirtgrp",
+ 5224: "hpvirtctrl",
+ 5225: "hp-server",
+ 5226: "hp-status",
+ 5227: "perfd",
+ 5234: "eenet",
+ 5235: "galaxy-network",
+ 5236: "padl2sim",
+ 5237: "mnet-discovery",
+ 5245: "downtools-disc",
+ 5246: "capwap-control",
+ 5247: "capwap-data",
+ 5248: "caacws",
+ 5249: "caaclang2",
+ 5250: "soagateway",
+ 5251: "caevms",
+ 5252: "movaz-ssc",
+ 5264: "3com-njack-1",
+ 5265: "3com-njack-2",
+ 5270: "cartographerxmp",
+ 5271: "cuelink-disc",
+ 5272: "pk",
+ 5282: "transmit-port",
+ 5298: "presence",
+ 5299: "nlg-data",
+ 5300: "hacl-hb",
+ 5301: "hacl-gs",
+ 5302: "hacl-cfg",
+ 5303: "hacl-probe",
+ 5304: "hacl-local",
+ 5305: "hacl-test",
+ 5306: "sun-mc-grp",
+ 5307: "sco-aip",
+ 5308: "cfengine",
+ 5309: "jprinter",
+ 5310: "outlaws",
+ 5312: "permabit-cs",
+ 5313: "rrdp",
+ 5314: "opalis-rbt-ipc",
+ 5315: "hacl-poll",
+ 5343: "kfserver",
+ 5344: "xkotodrcp",
+ 5349: "stuns",
+ 5350: "pcp-multicast",
+ 5351: "pcp",
+ 5352: "dns-llq",
+ 5353: "mdns",
+ 5354: "mdnsresponder",
+ 5355: "llmnr",
+ 5356: "ms-smlbiz",
+ 5357: "wsdapi",
+ 5358: "wsdapi-s",
+ 5359: "ms-alerter",
+ 5360: "ms-sideshow",
+ 5361: "ms-s-sideshow",
+ 5362: "serverwsd2",
+ 5363: "net-projection",
+ 5364: "kdnet",
+ 5397: "stresstester",
+ 5398: "elektron-admin",
+ 5399: "securitychase",
+ 5400: "excerpt",
+ 5401: "excerpts",
+ 5402: "mftp",
+ 5403: "hpoms-ci-lstn",
+ 5404: "hpoms-dps-lstn",
+ 5405: "netsupport",
+ 5406: "systemics-sox",
+ 5407: "foresyte-clear",
+ 5408: "foresyte-sec",
+ 5409: "salient-dtasrv",
+ 5410: "salient-usrmgr",
+ 5411: "actnet",
+ 5412: "continuus",
+ 5413: "wwiotalk",
+ 5414: "statusd",
+ 5415: "ns-server",
+ 5416: "sns-gateway",
+ 5417: "sns-agent",
+ 5418: "mcntp",
+ 5419: "dj-ice",
+ 5420: "cylink-c",
+ 5421: "netsupport2",
+ 5422: "salient-mux",
+ 5423: "virtualuser",
+ 5424: "beyond-remote",
+ 5425: "br-channel",
+ 5426: "devbasic",
+ 5427: "sco-peer-tta",
+ 5428: "telaconsole",
+ 5429: "base",
+ 5430: "radec-corp",
+ 5431: "park-agent",
+ 5432: "postgresql",
+ 5433: "pyrrho",
+ 5434: "sgi-arrayd",
+ 5435: "sceanics",
+ 5436: "pmip6-cntl",
+ 5437: "pmip6-data",
+ 5443: "spss",
+ 5450: "tiepie-disc",
+ 5453: "surebox",
+ 5454: "apc-5454",
+ 5455: "apc-5455",
+ 5456: "apc-5456",
+ 5461: "silkmeter",
+ 5462: "ttl-publisher",
+ 5463: "ttlpriceproxy",
+ 5464: "quailnet",
+ 5465: "netops-broker",
+ 5474: "apsolab-rpc",
+ 5500: "fcp-addr-srvr1",
+ 5501: "fcp-addr-srvr2",
+ 5502: "fcp-srvr-inst1",
+ 5503: "fcp-srvr-inst2",
+ 5504: "fcp-cics-gw1",
+ 5505: "checkoutdb",
+ 5506: "amc",
+ 5553: "sgi-eventmond",
+ 5554: "sgi-esphttp",
+ 5555: "personal-agent",
+ 5556: "freeciv",
+ 5567: "dof-dps-mc-sec",
+ 5568: "sdt",
+ 5569: "rdmnet-device",
+ 5573: "sdmmp",
+ 5580: "tmosms0",
+ 5581: "tmosms1",
+ 5582: "fac-restore",
+ 5583: "tmo-icon-sync",
+ 5584: "bis-web",
+ 5585: "bis-sync",
+ 5597: "ininmessaging",
+ 5598: "mctfeed",
+ 5599: "esinstall",
+ 5600: "esmmanager",
+ 5601: "esmagent",
+ 5602: "a1-msc",
+ 5603: "a1-bs",
+ 5604: "a3-sdunode",
+ 5605: "a4-sdunode",
+ 5627: "ninaf",
+ 5628: "htrust",
+ 5629: "symantec-sfdb",
+ 5630: "precise-comm",
+ 5631: "pcanywheredata",
+ 5632: "pcanywherestat",
+ 5633: "beorl",
+ 5634: "xprtld",
+ 5670: "zre-disc",
+ 5671: "amqps",
+ 5672: "amqp",
+ 5673: "jms",
+ 5674: "hyperscsi-port",
+ 5675: "v5ua",
+ 5676: "raadmin",
+ 5677: "questdb2-lnchr",
+ 5678: "rrac",
+ 5679: "dccm",
+ 5680: "auriga-router",
+ 5681: "ncxcp",
+ 5682: "brightcore",
+ 5683: "coap",
+ 5684: "coaps",
+ 5687: "gog-multiplayer",
+ 5688: "ggz",
+ 5689: "qmvideo",
+ 5713: "proshareaudio",
+ 5714: "prosharevideo",
+ 5715: "prosharedata",
+ 5716: "prosharerequest",
+ 5717: "prosharenotify",
+ 5718: "dpm",
+ 5719: "dpm-agent",
+ 5720: "ms-licensing",
+ 5721: "dtpt",
+ 5722: "msdfsr",
+ 5723: "omhs",
+ 5724: "omsdk",
+ 5728: "io-dist-group",
+ 5729: "openmail",
+ 5730: "unieng",
+ 5741: "ida-discover1",
+ 5742: "ida-discover2",
+ 5743: "watchdoc-pod",
+ 5744: "watchdoc",
+ 5745: "fcopy-server",
+ 5746: "fcopys-server",
+ 5747: "tunatic",
+ 5748: "tunalyzer",
+ 5750: "rscd",
+ 5755: "openmailg",
+ 5757: "x500ms",
+ 5766: "openmailns",
+ 5767: "s-openmail",
+ 5768: "openmailpxy",
+ 5769: "spramsca",
+ 5770: "spramsd",
+ 5771: "netagent",
+ 5777: "dali-port",
+ 5781: "3par-evts",
+ 5782: "3par-mgmt",
+ 5783: "3par-mgmt-ssl",
+ 5784: "ibar",
+ 5785: "3par-rcopy",
+ 5786: "cisco-redu",
+ 5787: "waascluster",
+ 5793: "xtreamx",
+ 5794: "spdp",
+ 5813: "icmpd",
+ 5814: "spt-automation",
+ 5859: "wherehoo",
+ 5863: "ppsuitemsg",
+ 5900: "rfb",
+ 5910: "cm",
+ 5911: "cpdlc",
+ 5912: "fis",
+ 5913: "ads-c",
+ 5963: "indy",
+ 5968: "mppolicy-v5",
+ 5969: "mppolicy-mgr",
+ 5984: "couchdb",
+ 5985: "wsman",
+ 5986: "wsmans",
+ 5987: "wbem-rmi",
+ 5988: "wbem-http",
+ 5989: "wbem-https",
+ 5990: "wbem-exp-https",
+ 5991: "nuxsl",
+ 5992: "consul-insight",
+ 5999: "cvsup",
+ 6064: "ndl-ahp-svc",
+ 6065: "winpharaoh",
+ 6066: "ewctsp",
+ 6069: "trip",
+ 6070: "messageasap",
+ 6071: "ssdtp",
+ 6072: "diagnose-proc",
+ 6073: "directplay8",
+ 6074: "max",
+ 6080: "gue",
+ 6081: "geneve",
+ 6082: "p25cai",
+ 6083: "miami-bcast",
+ 6085: "konspire2b",
+ 6086: "pdtp",
+ 6087: "ldss",
+ 6088: "doglms-notify",
+ 6100: "synchronet-db",
+ 6101: "synchronet-rtc",
+ 6102: "synchronet-upd",
+ 6103: "rets",
+ 6104: "dbdb",
+ 6105: "primaserver",
+ 6106: "mpsserver",
+ 6107: "etc-control",
+ 6108: "sercomm-scadmin",
+ 6109: "globecast-id",
+ 6110: "softcm",
+ 6111: "spc",
+ 6112: "dtspcd",
+ 6118: "tipc",
+ 6122: "bex-webadmin",
+ 6123: "backup-express",
+ 6124: "pnbs",
+ 6133: "nbt-wol",
+ 6140: "pulsonixnls",
+ 6141: "meta-corp",
+ 6142: "aspentec-lm",
+ 6143: "watershed-lm",
+ 6144: "statsci1-lm",
+ 6145: "statsci2-lm",
+ 6146: "lonewolf-lm",
+ 6147: "montage-lm",
+ 6148: "ricardo-lm",
+ 6149: "tal-pod",
+ 6160: "ecmp-data",
+ 6161: "patrol-ism",
+ 6162: "patrol-coll",
+ 6163: "pscribe",
+ 6200: "lm-x",
+ 6201: "thermo-calc",
+ 6209: "qmtps",
+ 6222: "radmind",
+ 6241: "jeol-nsddp-1",
+ 6242: "jeol-nsddp-2",
+ 6243: "jeol-nsddp-3",
+ 6244: "jeol-nsddp-4",
+ 6251: "tl1-raw-ssl",
+ 6252: "tl1-ssh",
+ 6253: "crip",
+ 6268: "grid",
+ 6269: "grid-alt",
+ 6300: "bmc-grx",
+ 6301: "bmc-ctd-ldap",
+ 6306: "ufmp",
+ 6315: "scup-disc",
+ 6316: "abb-escp",
+ 6317: "nav-data",
+ 6320: "repsvc",
+ 6321: "emp-server1",
+ 6322: "emp-server2",
+ 6324: "hrd-ns-disc",
+ 6343: "sflow",
+ 6346: "gnutella-svc",
+ 6347: "gnutella-rtr",
+ 6350: "adap",
+ 6355: "pmcs",
+ 6360: "metaedit-mu",
+ 6363: "ndn",
+ 6370: "metaedit-se",
+ 6382: "metatude-mds",
+ 6389: "clariion-evr01",
+ 6390: "metaedit-ws",
+ 6417: "faxcomservice",
+ 6419: "svdrp-disc",
+ 6420: "nim-vdrshell",
+ 6421: "nim-wan",
+ 6443: "sun-sr-https",
+ 6444: "sge-qmaster",
+ 6445: "sge-execd",
+ 6446: "mysql-proxy",
+ 6455: "skip-cert-recv",
+ 6456: "skip-cert-send",
+ 6464: "ieee11073-20701",
+ 6471: "lvision-lm",
+ 6480: "sun-sr-http",
+ 6481: "servicetags",
+ 6482: "ldoms-mgmt",
+ 6483: "SunVTS-RMI",
+ 6484: "sun-sr-jms",
+ 6485: "sun-sr-iiop",
+ 6486: "sun-sr-iiops",
+ 6487: "sun-sr-iiop-aut",
+ 6488: "sun-sr-jmx",
+ 6489: "sun-sr-admin",
+ 6500: "boks",
+ 6501: "boks-servc",
+ 6502: "boks-servm",
+ 6503: "boks-clntd",
+ 6505: "badm-priv",
+ 6506: "badm-pub",
+ 6507: "bdir-priv",
+ 6508: "bdir-pub",
+ 6509: "mgcs-mfp-port",
+ 6510: "mcer-port",
+ 6511: "dccp-udp",
+ 6514: "syslog-tls",
+ 6515: "elipse-rec",
+ 6543: "lds-distrib",
+ 6544: "lds-dump",
+ 6547: "apc-6547",
+ 6548: "apc-6548",
+ 6549: "apc-6549",
+ 6550: "fg-sysupdate",
+ 6551: "sum",
+ 6558: "xdsxdm",
+ 6566: "sane-port",
+ 6568: "rp-reputation",
+ 6579: "affiliate",
+ 6580: "parsec-master",
+ 6581: "parsec-peer",
+ 6582: "parsec-game",
+ 6583: "joaJewelSuite",
+ 6619: "odette-ftps",
+ 6620: "kftp-data",
+ 6621: "kftp",
+ 6622: "mcftp",
+ 6623: "ktelnet",
+ 6626: "wago-service",
+ 6627: "nexgen",
+ 6628: "afesc-mc",
+ 6629: "nexgen-aux",
+ 6633: "cisco-vpath-tun",
+ 6634: "mpls-pm",
+ 6635: "mpls-udp",
+ 6636: "mpls-udp-dtls",
+ 6653: "openflow",
+ 6657: "palcom-disc",
+ 6670: "vocaltec-gold",
+ 6671: "p4p-portal",
+ 6672: "vision-server",
+ 6673: "vision-elmd",
+ 6678: "vfbp-disc",
+ 6679: "osaut",
+ 6689: "tsa",
+ 6696: "babel",
+ 6701: "kti-icad-srvr",
+ 6702: "e-design-net",
+ 6703: "e-design-web",
+ 6714: "ibprotocol",
+ 6715: "fibotrader-com",
+ 6767: "bmc-perf-agent",
+ 6768: "bmc-perf-mgrd",
+ 6769: "adi-gxp-srvprt",
+ 6770: "plysrv-http",
+ 6771: "plysrv-https",
+ 6784: "bfd-lag",
+ 6785: "dgpf-exchg",
+ 6786: "smc-jmx",
+ 6787: "smc-admin",
+ 6788: "smc-http",
+ 6790: "hnmp",
+ 6791: "hnm",
+ 6801: "acnet",
+ 6831: "ambit-lm",
+ 6841: "netmo-default",
+ 6842: "netmo-http",
+ 6850: "iccrushmore",
+ 6868: "acctopus-st",
+ 6888: "muse",
+ 6935: "ethoscan",
+ 6936: "xsmsvc",
+ 6946: "bioserver",
+ 6951: "otlp",
+ 6961: "jmact3",
+ 6962: "jmevt2",
+ 6963: "swismgr1",
+ 6964: "swismgr2",
+ 6965: "swistrap",
+ 6966: "swispol",
+ 6969: "acmsoda",
+ 6997: "MobilitySrv",
+ 6998: "iatp-highpri",
+ 6999: "iatp-normalpri",
+ 7000: "afs3-fileserver",
+ 7001: "afs3-callback",
+ 7002: "afs3-prserver",
+ 7003: "afs3-vlserver",
+ 7004: "afs3-kaserver",
+ 7005: "afs3-volser",
+ 7006: "afs3-errors",
+ 7007: "afs3-bos",
+ 7008: "afs3-update",
+ 7009: "afs3-rmtsys",
+ 7010: "ups-onlinet",
+ 7011: "talon-disc",
+ 7012: "talon-engine",
+ 7013: "microtalon-dis",
+ 7014: "microtalon-com",
+ 7015: "talon-webserver",
+ 7016: "spg",
+ 7017: "grasp",
+ 7019: "doceri-view",
+ 7020: "dpserve",
+ 7021: "dpserveadmin",
+ 7022: "ctdp",
+ 7023: "ct2nmcs",
+ 7024: "vmsvc",
+ 7025: "vmsvc-2",
+ 7030: "op-probe",
+ 7040: "quest-disc",
+ 7070: "arcp",
+ 7071: "iwg1",
+ 7080: "empowerid",
+ 7088: "zixi-transport",
+ 7095: "jdp-disc",
+ 7099: "lazy-ptop",
+ 7100: "font-service",
+ 7101: "elcn",
+ 7107: "aes-x170",
+ 7121: "virprot-lm",
+ 7128: "scenidm",
+ 7129: "scenccs",
+ 7161: "cabsm-comm",
+ 7162: "caistoragemgr",
+ 7163: "cacsambroker",
+ 7164: "fsr",
+ 7165: "doc-server",
+ 7166: "aruba-server",
+ 7169: "ccag-pib",
+ 7170: "nsrp",
+ 7171: "drm-production",
+ 7174: "clutild",
+ 7181: "janus-disc",
+ 7200: "fodms",
+ 7201: "dlip",
+ 7227: "ramp",
+ 7235: "aspcoordination",
+ 7244: "frc-hicp-disc",
+ 7262: "cnap",
+ 7272: "watchme-7272",
+ 7273: "oma-rlp",
+ 7274: "oma-rlp-s",
+ 7275: "oma-ulp",
+ 7276: "oma-ilp",
+ 7277: "oma-ilp-s",
+ 7278: "oma-dcdocbs",
+ 7279: "ctxlic",
+ 7280: "itactionserver1",
+ 7281: "itactionserver2",
+ 7282: "mzca-alert",
+ 7365: "lcm-server",
+ 7391: "mindfilesys",
+ 7392: "mrssrendezvous",
+ 7393: "nfoldman",
+ 7394: "fse",
+ 7395: "winqedit",
+ 7397: "hexarc",
+ 7400: "rtps-discovery",
+ 7401: "rtps-dd-ut",
+ 7402: "rtps-dd-mt",
+ 7410: "ionixnetmon",
+ 7411: "daqstream",
+ 7421: "mtportmon",
+ 7426: "pmdmgr",
+ 7427: "oveadmgr",
+ 7428: "ovladmgr",
+ 7429: "opi-sock",
+ 7430: "xmpv7",
+ 7431: "pmd",
+ 7437: "faximum",
+ 7443: "oracleas-https",
+ 7473: "rise",
+ 7491: "telops-lmd",
+ 7500: "silhouette",
+ 7501: "ovbus",
+ 7510: "ovhpas",
+ 7511: "pafec-lm",
+ 7542: "saratoga",
+ 7543: "atul",
+ 7544: "nta-ds",
+ 7545: "nta-us",
+ 7546: "cfs",
+ 7547: "cwmp",
+ 7548: "tidp",
+ 7549: "nls-tl",
+ 7550: "cloudsignaling",
+ 7560: "sncp",
+ 7566: "vsi-omega",
+ 7570: "aries-kfinder",
+ 7574: "coherence-disc",
+ 7588: "sun-lm",
+ 7606: "mipi-debug",
+ 7624: "indi",
+ 7627: "soap-http",
+ 7628: "zen-pawn",
+ 7629: "xdas",
+ 7633: "pmdfmgt",
+ 7648: "cuseeme",
+ 7674: "imqtunnels",
+ 7675: "imqtunnel",
+ 7676: "imqbrokerd",
+ 7677: "sun-user-https",
+ 7680: "pando-pub",
+ 7689: "collaber",
+ 7697: "klio",
+ 7707: "sync-em7",
+ 7708: "scinet",
+ 7720: "medimageportal",
+ 7724: "nsdeepfreezectl",
+ 7725: "nitrogen",
+ 7726: "freezexservice",
+ 7727: "trident-data",
+ 7728: "osvr",
+ 7734: "smip",
+ 7738: "aiagent",
+ 7741: "scriptview",
+ 7743: "sstp-1",
+ 7744: "raqmon-pdu",
+ 7747: "prgp",
+ 7777: "cbt",
+ 7778: "interwise",
+ 7779: "vstat",
+ 7781: "accu-lmgr",
+ 7784: "s-bfd",
+ 7786: "minivend",
+ 7787: "popup-reminders",
+ 7789: "office-tools",
+ 7794: "q3ade",
+ 7797: "pnet-conn",
+ 7798: "pnet-enc",
+ 7799: "altbsdp",
+ 7800: "asr",
+ 7801: "ssp-client",
+ 7802: "vns-tp",
+ 7810: "rbt-wanopt",
+ 7845: "apc-7845",
+ 7846: "apc-7846",
+ 7872: "mipv6tls",
+ 7880: "pss",
+ 7887: "ubroker",
+ 7900: "mevent",
+ 7901: "tnos-sp",
+ 7902: "tnos-dp",
+ 7903: "tnos-dps",
+ 7913: "qo-secure",
+ 7932: "t2-drm",
+ 7933: "t2-brm",
+ 7962: "generalsync",
+ 7967: "supercell",
+ 7979: "micromuse-ncps",
+ 7980: "quest-vista",
+ 7982: "sossd-disc",
+ 7998: "usicontentpush",
+ 7999: "irdmi2",
+ 8000: "irdmi",
+ 8001: "vcom-tunnel",
+ 8002: "teradataordbms",
+ 8003: "mcreport",
+ 8005: "mxi",
+ 8006: "wpl-disc",
+ 8007: "warppipe",
+ 8008: "http-alt",
+ 8019: "qbdb",
+ 8020: "intu-ec-svcdisc",
+ 8021: "intu-ec-client",
+ 8022: "oa-system",
+ 8025: "ca-audit-da",
+ 8026: "ca-audit-ds",
+ 8032: "pro-ed",
+ 8033: "mindprint",
+ 8034: "vantronix-mgmt",
+ 8040: "ampify",
+ 8041: "enguity-xccetp",
+ 8052: "senomix01",
+ 8053: "senomix02",
+ 8054: "senomix03",
+ 8055: "senomix04",
+ 8056: "senomix05",
+ 8057: "senomix06",
+ 8058: "senomix07",
+ 8059: "senomix08",
+ 8060: "aero",
+ 8074: "gadugadu",
+ 8080: "http-alt",
+ 8081: "sunproxyadmin",
+ 8082: "us-cli",
+ 8083: "us-srv",
+ 8086: "d-s-n",
+ 8087: "simplifymedia",
+ 8088: "radan-http",
+ 8097: "sac",
+ 8100: "xprint-server",
+ 8115: "mtl8000-matrix",
+ 8116: "cp-cluster",
+ 8118: "privoxy",
+ 8121: "apollo-data",
+ 8122: "apollo-admin",
+ 8128: "paycash-online",
+ 8129: "paycash-wbp",
+ 8130: "indigo-vrmi",
+ 8131: "indigo-vbcp",
+ 8132: "dbabble",
+ 8148: "isdd",
+ 8149: "eor-game",
+ 8160: "patrol",
+ 8161: "patrol-snmp",
+ 8182: "vmware-fdm",
+ 8184: "itach",
+ 8192: "spytechphone",
+ 8194: "blp1",
+ 8195: "blp2",
+ 8199: "vvr-data",
+ 8200: "trivnet1",
+ 8201: "trivnet2",
+ 8202: "aesop",
+ 8204: "lm-perfworks",
+ 8205: "lm-instmgr",
+ 8206: "lm-dta",
+ 8207: "lm-sserver",
+ 8208: "lm-webwatcher",
+ 8230: "rexecj",
+ 8231: "hncp-udp-port",
+ 8232: "hncp-dtls-port",
+ 8243: "synapse-nhttps",
+ 8276: "pando-sec",
+ 8280: "synapse-nhttp",
+ 8282: "libelle-disc",
+ 8292: "blp3",
+ 8294: "blp4",
+ 8300: "tmi",
+ 8301: "amberon",
+ 8320: "tnp-discover",
+ 8321: "tnp",
+ 8322: "garmin-marine",
+ 8351: "server-find",
+ 8376: "cruise-enum",
+ 8377: "cruise-swroute",
+ 8378: "cruise-config",
+ 8379: "cruise-diags",
+ 8380: "cruise-update",
+ 8383: "m2mservices",
+ 8384: "marathontp",
+ 8400: "cvd",
+ 8401: "sabarsd",
+ 8402: "abarsd",
+ 8403: "admind",
+ 8416: "espeech",
+ 8417: "espeech-rtp",
+ 8442: "cybro-a-bus",
+ 8443: "pcsync-https",
+ 8444: "pcsync-http",
+ 8445: "copy-disc",
+ 8450: "npmp",
+ 8472: "otv",
+ 8473: "vp2p",
+ 8474: "noteshare",
+ 8500: "fmtp",
+ 8501: "cmtp-av",
+ 8503: "lsp-self-ping",
+ 8554: "rtsp-alt",
+ 8555: "d-fence",
+ 8567: "dof-tunnel",
+ 8600: "asterix",
+ 8609: "canon-cpp-disc",
+ 8610: "canon-mfnp",
+ 8611: "canon-bjnp1",
+ 8612: "canon-bjnp2",
+ 8613: "canon-bjnp3",
+ 8614: "canon-bjnp4",
+ 8675: "msi-cps-rm-disc",
+ 8686: "sun-as-jmxrmi",
+ 8732: "dtp-net",
+ 8733: "ibus",
+ 8763: "mc-appserver",
+ 8764: "openqueue",
+ 8765: "ultraseek-http",
+ 8766: "amcs",
+ 8770: "dpap",
+ 8786: "msgclnt",
+ 8787: "msgsrvr",
+ 8793: "acd-pm",
+ 8800: "sunwebadmin",
+ 8804: "truecm",
+ 8805: "pfcp",
+ 8808: "ssports-bcast",
+ 8873: "dxspider",
+ 8880: "cddbp-alt",
+ 8883: "secure-mqtt",
+ 8888: "ddi-udp-1",
+ 8889: "ddi-udp-2",
+ 8890: "ddi-udp-3",
+ 8891: "ddi-udp-4",
+ 8892: "ddi-udp-5",
+ 8893: "ddi-udp-6",
+ 8894: "ddi-udp-7",
+ 8899: "ospf-lite",
+ 8900: "jmb-cds1",
+ 8901: "jmb-cds2",
+ 8910: "manyone-http",
+ 8911: "manyone-xml",
+ 8912: "wcbackup",
+ 8913: "dragonfly",
+ 8954: "cumulus-admin",
+ 8980: "nod-provider",
+ 8981: "nod-client",
+ 8989: "sunwebadmins",
+ 8990: "http-wmap",
+ 8991: "https-wmap",
+ 8999: "bctp",
+ 9000: "cslistener",
+ 9001: "etlservicemgr",
+ 9002: "dynamid",
+ 9007: "ogs-client",
+ 9009: "pichat",
+ 9020: "tambora",
+ 9021: "panagolin-ident",
+ 9022: "paragent",
+ 9023: "swa-1",
+ 9024: "swa-2",
+ 9025: "swa-3",
+ 9026: "swa-4",
+ 9060: "CardWeb-RT",
+ 9080: "glrpc",
+ 9084: "aurora",
+ 9085: "ibm-rsyscon",
+ 9086: "net2display",
+ 9087: "classic",
+ 9088: "sqlexec",
+ 9089: "sqlexec-ssl",
+ 9090: "websm",
+ 9091: "xmltec-xmlmail",
+ 9092: "XmlIpcRegSvc",
+ 9100: "hp-pdl-datastr",
+ 9101: "bacula-dir",
+ 9102: "bacula-fd",
+ 9103: "bacula-sd",
+ 9104: "peerwire",
+ 9105: "xadmin",
+ 9106: "astergate-disc",
+ 9119: "mxit",
+ 9131: "dddp",
+ 9160: "apani1",
+ 9161: "apani2",
+ 9162: "apani3",
+ 9163: "apani4",
+ 9164: "apani5",
+ 9191: "sun-as-jpda",
+ 9200: "wap-wsp",
+ 9201: "wap-wsp-wtp",
+ 9202: "wap-wsp-s",
+ 9203: "wap-wsp-wtp-s",
+ 9204: "wap-vcard",
+ 9205: "wap-vcal",
+ 9206: "wap-vcard-s",
+ 9207: "wap-vcal-s",
+ 9208: "rjcdb-vcards",
+ 9209: "almobile-system",
+ 9210: "oma-mlp",
+ 9211: "oma-mlp-s",
+ 9212: "serverviewdbms",
+ 9213: "serverstart",
+ 9214: "ipdcesgbs",
+ 9215: "insis",
+ 9216: "acme",
+ 9217: "fsc-port",
+ 9222: "teamcoherence",
+ 9255: "mon",
+ 9277: "traingpsdata",
+ 9278: "pegasus",
+ 9279: "pegasus-ctl",
+ 9280: "pgps",
+ 9281: "swtp-port1",
+ 9282: "swtp-port2",
+ 9283: "callwaveiam",
+ 9284: "visd",
+ 9285: "n2h2server",
+ 9286: "n2receive",
+ 9287: "cumulus",
+ 9292: "armtechdaemon",
+ 9293: "storview",
+ 9294: "armcenterhttp",
+ 9295: "armcenterhttps",
+ 9300: "vrace",
+ 9318: "secure-ts",
+ 9321: "guibase",
+ 9343: "mpidcmgr",
+ 9344: "mphlpdmc",
+ 9346: "ctechlicensing",
+ 9374: "fjdmimgr",
+ 9380: "boxp",
+ 9396: "fjinvmgr",
+ 9397: "mpidcagt",
+ 9400: "sec-t4net-srv",
+ 9401: "sec-t4net-clt",
+ 9402: "sec-pc2fax-srv",
+ 9418: "git",
+ 9443: "tungsten-https",
+ 9444: "wso2esb-console",
+ 9450: "sntlkeyssrvr",
+ 9500: "ismserver",
+ 9522: "sma-spw",
+ 9535: "mngsuite",
+ 9536: "laes-bf",
+ 9555: "trispen-sra",
+ 9592: "ldgateway",
+ 9593: "cba8",
+ 9594: "msgsys",
+ 9595: "pds",
+ 9596: "mercury-disc",
+ 9597: "pd-admin",
+ 9598: "vscp",
+ 9599: "robix",
+ 9600: "micromuse-ncpw",
+ 9612: "streamcomm-ds",
+ 9618: "condor",
+ 9628: "odbcpathway",
+ 9629: "uniport",
+ 9632: "mc-comm",
+ 9667: "xmms2",
+ 9668: "tec5-sdctp",
+ 9694: "client-wakeup",
+ 9695: "ccnx",
+ 9700: "board-roar",
+ 9747: "l5nas-parchan",
+ 9750: "board-voip",
+ 9753: "rasadv",
+ 9762: "tungsten-http",
+ 9800: "davsrc",
+ 9801: "sstp-2",
+ 9802: "davsrcs",
+ 9875: "sapv1",
+ 9878: "kca-service",
+ 9888: "cyborg-systems",
+ 9889: "gt-proxy",
+ 9898: "monkeycom",
+ 9899: "sctp-tunneling",
+ 9900: "iua",
+ 9901: "enrp",
+ 9903: "multicast-ping",
+ 9909: "domaintime",
+ 9911: "sype-transport",
+ 9950: "apc-9950",
+ 9951: "apc-9951",
+ 9952: "apc-9952",
+ 9953: "acis",
+ 9955: "alljoyn-mcm",
+ 9956: "alljoyn",
+ 9966: "odnsp",
+ 9987: "dsm-scm-target",
+ 9990: "osm-appsrvr",
+ 9991: "osm-oev",
+ 9992: "palace-1",
+ 9993: "palace-2",
+ 9994: "palace-3",
+ 9995: "palace-4",
+ 9996: "palace-5",
+ 9997: "palace-6",
+ 9998: "distinct32",
+ 9999: "distinct",
+ 10000: "ndmp",
+ 10001: "scp-config",
+ 10002: "documentum",
+ 10003: "documentum-s",
+ 10007: "mvs-capacity",
+ 10008: "octopus",
+ 10009: "swdtp-sv",
+ 10050: "zabbix-agent",
+ 10051: "zabbix-trapper",
+ 10080: "amanda",
+ 10081: "famdc",
+ 10100: "itap-ddtp",
+ 10101: "ezmeeting-2",
+ 10102: "ezproxy-2",
+ 10103: "ezrelay",
+ 10104: "swdtp",
+ 10107: "bctp-server",
+ 10110: "nmea-0183",
+ 10111: "nmea-onenet",
+ 10113: "netiq-endpoint",
+ 10114: "netiq-qcheck",
+ 10115: "netiq-endpt",
+ 10116: "netiq-voipa",
+ 10117: "iqrm",
+ 10128: "bmc-perf-sd",
+ 10160: "qb-db-server",
+ 10161: "snmpdtls",
+ 10162: "snmpdtls-trap",
+ 10200: "trisoap",
+ 10201: "rscs",
+ 10252: "apollo-relay",
+ 10253: "eapol-relay",
+ 10260: "axis-wimp-port",
+ 10288: "blocks",
+ 10439: "bngsync",
+ 10500: "hip-nat-t",
+ 10540: "MOS-lower",
+ 10541: "MOS-upper",
+ 10542: "MOS-aux",
+ 10543: "MOS-soap",
+ 10544: "MOS-soap-opt",
+ 10800: "gap",
+ 10805: "lpdg",
+ 10810: "nmc-disc",
+ 10860: "helix",
+ 10880: "bveapi",
+ 10990: "rmiaux",
+ 11000: "irisa",
+ 11001: "metasys",
+ 10023: "cefd-vmp",
+ 11095: "weave",
+ 11106: "sgi-lk",
+ 11108: "myq-termlink",
+ 11111: "vce",
+ 11112: "dicom",
+ 11161: "suncacao-snmp",
+ 11162: "suncacao-jmxmp",
+ 11163: "suncacao-rmi",
+ 11164: "suncacao-csa",
+ 11165: "suncacao-websvc",
+ 11171: "snss",
+ 11201: "smsqp",
+ 11208: "wifree",
+ 11211: "memcache",
+ 11319: "imip",
+ 11320: "imip-channels",
+ 11321: "arena-server",
+ 11367: "atm-uhas",
+ 11371: "hkp",
+ 11430: "lsdp",
+ 11600: "tempest-port",
+ 11720: "h323callsigalt",
+ 11723: "emc-xsw-dcache",
+ 11751: "intrepid-ssl",
+ 11796: "lanschool-mpt",
+ 11876: "xoraya",
+ 11877: "x2e-disc",
+ 11967: "sysinfo-sp",
+ 12000: "entextxid",
+ 12001: "entextnetwk",
+ 12002: "entexthigh",
+ 12003: "entextmed",
+ 12004: "entextlow",
+ 12005: "dbisamserver1",
+ 12006: "dbisamserver2",
+ 12007: "accuracer",
+ 12008: "accuracer-dbms",
+ 12009: "ghvpn",
+ 12012: "vipera",
+ 12013: "vipera-ssl",
+ 12109: "rets-ssl",
+ 12121: "nupaper-ss",
+ 12168: "cawas",
+ 12172: "hivep",
+ 12300: "linogridengine",
+ 12321: "warehouse-sss",
+ 12322: "warehouse",
+ 12345: "italk",
+ 12753: "tsaf",
+ 13160: "i-zipqd",
+ 13216: "bcslogc",
+ 13217: "rs-pias",
+ 13218: "emc-vcas-udp",
+ 13223: "powwow-client",
+ 13224: "powwow-server",
+ 13400: "doip-disc",
+ 13720: "bprd",
+ 13721: "bpdbm",
+ 13722: "bpjava-msvc",
+ 13724: "vnetd",
+ 13782: "bpcd",
+ 13783: "vopied",
+ 13785: "nbdb",
+ 13786: "nomdb",
+ 13818: "dsmcc-config",
+ 13819: "dsmcc-session",
+ 13820: "dsmcc-passthru",
+ 13821: "dsmcc-download",
+ 13822: "dsmcc-ccp",
+ 13894: "ucontrol",
+ 13929: "dta-systems",
+ 14000: "scotty-ft",
+ 14001: "sua",
+ 14002: "scotty-disc",
+ 14033: "sage-best-com1",
+ 14034: "sage-best-com2",
+ 14141: "vcs-app",
+ 14142: "icpp",
+ 14145: "gcm-app",
+ 14149: "vrts-tdd",
+ 14154: "vad",
+ 14250: "cps",
+ 14414: "ca-web-update",
+ 14936: "hde-lcesrvr-1",
+ 14937: "hde-lcesrvr-2",
+ 15000: "hydap",
+ 15118: "v2g-secc",
+ 15345: "xpilot",
+ 15363: "3link",
+ 15555: "cisco-snat",
+ 15660: "bex-xr",
+ 15740: "ptp",
+ 15998: "2ping",
+ 16003: "alfin",
+ 16161: "sun-sea-port",
+ 16309: "etb4j",
+ 16310: "pduncs",
+ 16311: "pdefmns",
+ 16360: "netserialext1",
+ 16361: "netserialext2",
+ 16367: "netserialext3",
+ 16368: "netserialext4",
+ 16384: "connected",
+ 16666: "vtp",
+ 16900: "newbay-snc-mc",
+ 16950: "sgcip",
+ 16991: "intel-rci-mp",
+ 16992: "amt-soap-http",
+ 16993: "amt-soap-https",
+ 16994: "amt-redir-tcp",
+ 16995: "amt-redir-tls",
+ 17007: "isode-dua",
+ 17185: "soundsvirtual",
+ 17219: "chipper",
+ 17220: "avtp",
+ 17221: "avdecc",
+ 17222: "cpsp",
+ 17224: "trdp-pd",
+ 17225: "trdp-md",
+ 17234: "integrius-stp",
+ 17235: "ssh-mgmt",
+ 17500: "db-lsp-disc",
+ 17729: "ea",
+ 17754: "zep",
+ 17755: "zigbee-ip",
+ 17756: "zigbee-ips",
+ 18000: "biimenu",
+ 18181: "opsec-cvp",
+ 18182: "opsec-ufp",
+ 18183: "opsec-sam",
+ 18184: "opsec-lea",
+ 18185: "opsec-omi",
+ 18186: "ohsc",
+ 18187: "opsec-ela",
+ 18241: "checkpoint-rtm",
+ 18262: "gv-pf",
+ 18463: "ac-cluster",
+ 18634: "rds-ib",
+ 18635: "rds-ip",
+ 18668: "vdmmesh-disc",
+ 18769: "ique",
+ 18881: "infotos",
+ 18888: "apc-necmp",
+ 19000: "igrid",
+ 19007: "scintilla",
+ 19191: "opsec-uaa",
+ 19194: "ua-secureagent",
+ 19220: "cora-disc",
+ 19283: "keysrvr",
+ 19315: "keyshadow",
+ 19398: "mtrgtrans",
+ 19410: "hp-sco",
+ 19411: "hp-sca",
+ 19412: "hp-sessmon",
+ 19539: "fxuptp",
+ 19540: "sxuptp",
+ 19541: "jcp",
+ 19788: "mle",
+ 19999: "dnp-sec",
+ 20000: "dnp",
+ 20001: "microsan",
+ 20002: "commtact-http",
+ 20003: "commtact-https",
+ 20005: "openwebnet",
+ 20012: "ss-idi-disc",
+ 20014: "opendeploy",
+ 20034: "nburn-id",
+ 20046: "tmophl7mts",
+ 20048: "mountd",
+ 20049: "nfsrdma",
+ 20167: "tolfab",
+ 20202: "ipdtp-port",
+ 20222: "ipulse-ics",
+ 20480: "emwavemsg",
+ 20670: "track",
+ 20999: "athand-mmp",
+ 21000: "irtrans",
+ 21554: "dfserver",
+ 21590: "vofr-gateway",
+ 21800: "tvpm",
+ 21845: "webphone",
+ 21846: "netspeak-is",
+ 21847: "netspeak-cs",
+ 21848: "netspeak-acd",
+ 21849: "netspeak-cps",
+ 22000: "snapenetio",
+ 22001: "optocontrol",
+ 22002: "optohost002",
+ 22003: "optohost003",
+ 22004: "optohost004",
+ 22005: "optohost004",
+ 22273: "wnn6",
+ 22305: "cis",
+ 22335: "shrewd-stream",
+ 22343: "cis-secure",
+ 22347: "wibukey",
+ 22350: "codemeter",
+ 22555: "vocaltec-phone",
+ 22763: "talikaserver",
+ 22800: "aws-brf",
+ 22951: "brf-gw",
+ 23000: "inovaport1",
+ 23001: "inovaport2",
+ 23002: "inovaport3",
+ 23003: "inovaport4",
+ 23004: "inovaport5",
+ 23005: "inovaport6",
+ 23272: "s102",
+ 23294: "5afe-disc",
+ 23333: "elxmgmt",
+ 23400: "novar-dbase",
+ 23401: "novar-alarm",
+ 23402: "novar-global",
+ 24000: "med-ltp",
+ 24001: "med-fsp-rx",
+ 24002: "med-fsp-tx",
+ 24003: "med-supp",
+ 24004: "med-ovw",
+ 24005: "med-ci",
+ 24006: "med-net-svc",
+ 24242: "filesphere",
+ 24249: "vista-4gl",
+ 24321: "ild",
+ 24322: "hid",
+ 24386: "intel-rci",
+ 24465: "tonidods",
+ 24554: "binkp",
+ 24577: "bilobit-update",
+ 24676: "canditv",
+ 24677: "flashfiler",
+ 24678: "proactivate",
+ 24680: "tcc-http",
+ 24850: "assoc-disc",
+ 24922: "find",
+ 25000: "icl-twobase1",
+ 25001: "icl-twobase2",
+ 25002: "icl-twobase3",
+ 25003: "icl-twobase4",
+ 25004: "icl-twobase5",
+ 25005: "icl-twobase6",
+ 25006: "icl-twobase7",
+ 25007: "icl-twobase8",
+ 25008: "icl-twobase9",
+ 25009: "icl-twobase10",
+ 25793: "vocaltec-hos",
+ 25900: "tasp-net",
+ 25901: "niobserver",
+ 25902: "nilinkanalyst",
+ 25903: "niprobe",
+ 25954: "bf-game",
+ 25955: "bf-master",
+ 26000: "quake",
+ 26133: "scscp",
+ 26208: "wnn6-ds",
+ 26260: "ezproxy",
+ 26261: "ezmeeting",
+ 26262: "k3software-svr",
+ 26263: "k3software-cli",
+ 26486: "exoline-udp",
+ 26487: "exoconfig",
+ 26489: "exonet",
+ 27345: "imagepump",
+ 27442: "jesmsjc",
+ 27504: "kopek-httphead",
+ 27782: "ars-vista",
+ 27999: "tw-auth-key",
+ 28000: "nxlmd",
+ 28119: "a27-ran-ran",
+ 28200: "voxelstorm",
+ 28240: "siemensgsm",
+ 29167: "otmp",
+ 30001: "pago-services1",
+ 30002: "pago-services2",
+ 30003: "amicon-fpsu-ra",
+ 30004: "amicon-fpsu-s",
+ 30260: "kingdomsonline",
+ 30832: "samsung-disc",
+ 30999: "ovobs",
+ 31016: "ka-kdp",
+ 31029: "yawn",
+ 31416: "xqosd",
+ 31457: "tetrinet",
+ 31620: "lm-mon",
+ 31765: "gamesmith-port",
+ 31948: "iceedcp-tx",
+ 31949: "iceedcp-rx",
+ 32034: "iracinghelper",
+ 32249: "t1distproc60",
+ 32483: "apm-link",
+ 32635: "sec-ntb-clnt",
+ 32636: "DMExpress",
+ 32767: "filenet-powsrm",
+ 32768: "filenet-tms",
+ 32769: "filenet-rpc",
+ 32770: "filenet-nch",
+ 32771: "filenet-rmi",
+ 32772: "filenet-pa",
+ 32773: "filenet-cm",
+ 32774: "filenet-re",
+ 32775: "filenet-pch",
+ 32776: "filenet-peior",
+ 32777: "filenet-obrok",
+ 32801: "mlsn",
+ 32896: "idmgratm",
+ 33123: "aurora-balaena",
+ 33331: "diamondport",
+ 33334: "speedtrace-disc",
+ 33434: "traceroute",
+ 33656: "snip-slave",
+ 34249: "turbonote-2",
+ 34378: "p-net-local",
+ 34379: "p-net-remote",
+ 34567: "edi_service",
+ 34962: "profinet-rt",
+ 34963: "profinet-rtm",
+ 34964: "profinet-cm",
+ 34980: "ethercat",
+ 35001: "rt-viewer",
+ 35004: "rt-classmanager",
+ 35100: "axio-disc",
+ 35355: "altova-lm-disc",
+ 36001: "allpeers",
+ 36411: "wlcp",
+ 36865: "kastenxpipe",
+ 37475: "neckar",
+ 37654: "unisys-eportal",
+ 38002: "crescoctrl-disc",
+ 38201: "galaxy7-data",
+ 38202: "fairview",
+ 38203: "agpolicy",
+ 39681: "turbonote-1",
+ 40000: "safetynetp",
+ 40023: "k-patentssensor",
+ 40841: "cscp",
+ 40842: "csccredir",
+ 40843: "csccfirewall",
+ 40853: "ortec-disc",
+ 41111: "fs-qos",
+ 41230: "z-wave-s",
+ 41794: "crestron-cip",
+ 41795: "crestron-ctp",
+ 42508: "candp",
+ 42509: "candrp",
+ 42510: "caerpc",
+ 43000: "recvr-rc-disc",
+ 43188: "reachout",
+ 43189: "ndm-agent-port",
+ 43190: "ip-provision",
+ 43210: "shaperai-disc",
+ 43439: "eq3-config",
+ 43440: "ew-disc-cmd",
+ 43441: "ciscocsdb",
+ 44321: "pmcd",
+ 44322: "pmcdproxy",
+ 44544: "domiq",
+ 44553: "rbr-debug",
+ 44600: "asihpi",
+ 44818: "EtherNet-IP-2",
+ 44900: "m3da-disc",
+ 45000: "asmp-mon",
+ 45054: "invision-ag",
+ 45514: "cloudcheck-ping",
+ 45678: "eba",
+ 45825: "qdb2service",
+ 45966: "ssr-servermgr",
+ 46999: "mediabox",
+ 47000: "mbus",
+ 47100: "jvl-mactalk",
+ 47557: "dbbrowse",
+ 47624: "directplaysrvr",
+ 47806: "ap",
+ 47808: "bacnet",
+ 47809: "presonus-ucnet",
+ 48000: "nimcontroller",
+ 48001: "nimspooler",
+ 48002: "nimhub",
+ 48003: "nimgtw",
+ 48128: "isnetserv",
+ 48129: "blp5",
+ 48556: "com-bardac-dw",
+ 48619: "iqobject",
+ 48653: "robotraconteur",
+ 49001: "nusdp-disc",
+}
+var sctpPortNames = map[SCTPPort]string{
+ 9: "discard",
+ 20: "ftp-data",
+ 21: "ftp",
+ 22: "ssh",
+ 80: "http",
+ 179: "bgp",
+ 443: "https",
+ 1021: "exp1",
+ 1022: "exp2",
+ 1167: "cisco-ipsla",
+ 1720: "h323hostcall",
+ 2049: "nfs",
+ 2225: "rcip-itu",
+ 2904: "m2ua",
+ 2905: "m3ua",
+ 2944: "megaco-h248",
+ 2945: "h248-binary",
+ 3097: "itu-bicc-stc",
+ 3565: "m2pa",
+ 3863: "asap-sctp",
+ 3864: "asap-sctp-tls",
+ 3868: "diameter",
+ 4333: "ahsp",
+ 4502: "a25-fap-fgw",
+ 4711: "trinity-dist",
+ 4739: "ipfix",
+ 4740: "ipfixs",
+ 5060: "sip",
+ 5061: "sips",
+ 5090: "car",
+ 5091: "cxtp",
+ 5215: "noteza",
+ 5445: "smbdirect",
+ 5672: "amqp",
+ 5675: "v5ua",
+ 5868: "diameters",
+ 5910: "cm",
+ 5911: "cpdlc",
+ 5912: "fis",
+ 5913: "ads-c",
+ 6704: "frc-hp",
+ 6705: "frc-mp",
+ 6706: "frc-lp",
+ 6970: "conductor-mpx",
+ 7626: "simco",
+ 7701: "nfapi",
+ 7728: "osvr",
+ 8471: "pim-port",
+ 9082: "lcs-ap",
+ 9084: "aurora",
+ 9900: "iua",
+ 9901: "enrp-sctp",
+ 9902: "enrp-sctp-tls",
+ 11997: "wmereceiving",
+ 11998: "wmedistribution",
+ 11999: "wmereporting",
+ 14001: "sua",
+ 20049: "nfsrdma",
+ 25471: "rna",
+ 29118: "sgsap",
+ 29168: "sbcap",
+ 29169: "iuhsctpassoc",
+ 30100: "rwp",
+ 36412: "s1-control",
+ 36422: "x2-control",
+ 36423: "slmap",
+ 36424: "nq-ap",
+ 36443: "m2ap",
+ 36444: "m3ap",
+ 36462: "xw-control",
+ 38412: "ng-control",
+ 38422: "xn-control",
+ 38472: "f1-control",
+}
diff --git a/vendor/github.com/google/gopacket/layers/icmp4.go b/vendor/github.com/google/gopacket/layers/icmp4.go
new file mode 100644
index 0000000..bd3f03f
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/icmp4.go
@@ -0,0 +1,267 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+// Copyright 2009-2011 Andreas Krennmair. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "reflect"
+
+ "github.com/google/gopacket"
+)
+
+const (
+ ICMPv4TypeEchoReply = 0
+ ICMPv4TypeDestinationUnreachable = 3
+ ICMPv4TypeSourceQuench = 4
+ ICMPv4TypeRedirect = 5
+ ICMPv4TypeEchoRequest = 8
+ ICMPv4TypeRouterAdvertisement = 9
+ ICMPv4TypeRouterSolicitation = 10
+ ICMPv4TypeTimeExceeded = 11
+ ICMPv4TypeParameterProblem = 12
+ ICMPv4TypeTimestampRequest = 13
+ ICMPv4TypeTimestampReply = 14
+ ICMPv4TypeInfoRequest = 15
+ ICMPv4TypeInfoReply = 16
+ ICMPv4TypeAddressMaskRequest = 17
+ ICMPv4TypeAddressMaskReply = 18
+)
+
+const (
+ // DestinationUnreachable
+ ICMPv4CodeNet = 0
+ ICMPv4CodeHost = 1
+ ICMPv4CodeProtocol = 2
+ ICMPv4CodePort = 3
+ ICMPv4CodeFragmentationNeeded = 4
+ ICMPv4CodeSourceRoutingFailed = 5
+ ICMPv4CodeNetUnknown = 6
+ ICMPv4CodeHostUnknown = 7
+ ICMPv4CodeSourceIsolated = 8
+ ICMPv4CodeNetAdminProhibited = 9
+ ICMPv4CodeHostAdminProhibited = 10
+ ICMPv4CodeNetTOS = 11
+ ICMPv4CodeHostTOS = 12
+ ICMPv4CodeCommAdminProhibited = 13
+ ICMPv4CodeHostPrecedence = 14
+ ICMPv4CodePrecedenceCutoff = 15
+
+ // TimeExceeded
+ ICMPv4CodeTTLExceeded = 0
+ ICMPv4CodeFragmentReassemblyTimeExceeded = 1
+
+ // ParameterProblem
+ ICMPv4CodePointerIndicatesError = 0
+ ICMPv4CodeMissingOption = 1
+ ICMPv4CodeBadLength = 2
+
+ // Redirect
+ // ICMPv4CodeNet = same as for DestinationUnreachable
+ // ICMPv4CodeHost = same as for DestinationUnreachable
+ ICMPv4CodeTOSNet = 2
+ ICMPv4CodeTOSHost = 3
+)
+
+type icmpv4TypeCodeInfoStruct struct {
+ typeStr string
+ codeStr *map[uint8]string
+}
+
+var (
+ icmpv4TypeCodeInfo = map[uint8]icmpv4TypeCodeInfoStruct{
+ ICMPv4TypeDestinationUnreachable: icmpv4TypeCodeInfoStruct{
+ "DestinationUnreachable", &map[uint8]string{
+ ICMPv4CodeNet: "Net",
+ ICMPv4CodeHost: "Host",
+ ICMPv4CodeProtocol: "Protocol",
+ ICMPv4CodePort: "Port",
+ ICMPv4CodeFragmentationNeeded: "FragmentationNeeded",
+ ICMPv4CodeSourceRoutingFailed: "SourceRoutingFailed",
+ ICMPv4CodeNetUnknown: "NetUnknown",
+ ICMPv4CodeHostUnknown: "HostUnknown",
+ ICMPv4CodeSourceIsolated: "SourceIsolated",
+ ICMPv4CodeNetAdminProhibited: "NetAdminProhibited",
+ ICMPv4CodeHostAdminProhibited: "HostAdminProhibited",
+ ICMPv4CodeNetTOS: "NetTOS",
+ ICMPv4CodeHostTOS: "HostTOS",
+ ICMPv4CodeCommAdminProhibited: "CommAdminProhibited",
+ ICMPv4CodeHostPrecedence: "HostPrecedence",
+ ICMPv4CodePrecedenceCutoff: "PrecedenceCutoff",
+ },
+ },
+ ICMPv4TypeTimeExceeded: icmpv4TypeCodeInfoStruct{
+ "TimeExceeded", &map[uint8]string{
+ ICMPv4CodeTTLExceeded: "TTLExceeded",
+ ICMPv4CodeFragmentReassemblyTimeExceeded: "FragmentReassemblyTimeExceeded",
+ },
+ },
+ ICMPv4TypeParameterProblem: icmpv4TypeCodeInfoStruct{
+ "ParameterProblem", &map[uint8]string{
+ ICMPv4CodePointerIndicatesError: "PointerIndicatesError",
+ ICMPv4CodeMissingOption: "MissingOption",
+ ICMPv4CodeBadLength: "BadLength",
+ },
+ },
+ ICMPv4TypeSourceQuench: icmpv4TypeCodeInfoStruct{
+ "SourceQuench", nil,
+ },
+ ICMPv4TypeRedirect: icmpv4TypeCodeInfoStruct{
+ "Redirect", &map[uint8]string{
+ ICMPv4CodeNet: "Net",
+ ICMPv4CodeHost: "Host",
+ ICMPv4CodeTOSNet: "TOS+Net",
+ ICMPv4CodeTOSHost: "TOS+Host",
+ },
+ },
+ ICMPv4TypeEchoRequest: icmpv4TypeCodeInfoStruct{
+ "EchoRequest", nil,
+ },
+ ICMPv4TypeEchoReply: icmpv4TypeCodeInfoStruct{
+ "EchoReply", nil,
+ },
+ ICMPv4TypeTimestampRequest: icmpv4TypeCodeInfoStruct{
+ "TimestampRequest", nil,
+ },
+ ICMPv4TypeTimestampReply: icmpv4TypeCodeInfoStruct{
+ "TimestampReply", nil,
+ },
+ ICMPv4TypeInfoRequest: icmpv4TypeCodeInfoStruct{
+ "InfoRequest", nil,
+ },
+ ICMPv4TypeInfoReply: icmpv4TypeCodeInfoStruct{
+ "InfoReply", nil,
+ },
+ ICMPv4TypeRouterSolicitation: icmpv4TypeCodeInfoStruct{
+ "RouterSolicitation", nil,
+ },
+ ICMPv4TypeRouterAdvertisement: icmpv4TypeCodeInfoStruct{
+ "RouterAdvertisement", nil,
+ },
+ ICMPv4TypeAddressMaskRequest: icmpv4TypeCodeInfoStruct{
+ "AddressMaskRequest", nil,
+ },
+ ICMPv4TypeAddressMaskReply: icmpv4TypeCodeInfoStruct{
+ "AddressMaskReply", nil,
+ },
+ }
+)
+
+type ICMPv4TypeCode uint16
+
+// Type returns the ICMPv4 type field.
+func (a ICMPv4TypeCode) Type() uint8 {
+ return uint8(a >> 8)
+}
+
+// Code returns the ICMPv4 code field.
+func (a ICMPv4TypeCode) Code() uint8 {
+ return uint8(a)
+}
+
+func (a ICMPv4TypeCode) String() string {
+ t, c := a.Type(), a.Code()
+ strInfo, ok := icmpv4TypeCodeInfo[t]
+ if !ok {
+ // Unknown ICMPv4 type field
+ return fmt.Sprintf("%d(%d)", t, c)
+ }
+ typeStr := strInfo.typeStr
+ if strInfo.codeStr == nil && c == 0 {
+ // The ICMPv4 type does not make use of the code field
+ return fmt.Sprintf("%s", strInfo.typeStr)
+ }
+ if strInfo.codeStr == nil && c != 0 {
+ // The ICMPv4 type does not make use of the code field, but it is present anyway
+ return fmt.Sprintf("%s(Code: %d)", typeStr, c)
+ }
+ codeStr, ok := (*strInfo.codeStr)[c]
+ if !ok {
+ // We don't know this ICMPv4 code; print the numerical value
+ return fmt.Sprintf("%s(Code: %d)", typeStr, c)
+ }
+ return fmt.Sprintf("%s(%s)", typeStr, codeStr)
+}
+
+func (a ICMPv4TypeCode) GoString() string {
+ t := reflect.TypeOf(a)
+ return fmt.Sprintf("%s(%d, %d)", t.String(), a.Type(), a.Code())
+}
+
+// SerializeTo writes the ICMPv4TypeCode value to the 'bytes' buffer.
+func (a ICMPv4TypeCode) SerializeTo(bytes []byte) {
+ binary.BigEndian.PutUint16(bytes, uint16(a))
+}
+
+// CreateICMPv4TypeCode is a convenience function to create an ICMPv4TypeCode
+// gopacket type from the ICMPv4 type and code values.
+func CreateICMPv4TypeCode(typ uint8, code uint8) ICMPv4TypeCode {
+ return ICMPv4TypeCode(binary.BigEndian.Uint16([]byte{typ, code}))
+}
+
+// ICMPv4 is the layer for IPv4 ICMP packet data.
+type ICMPv4 struct {
+ BaseLayer
+ TypeCode ICMPv4TypeCode
+ Checksum uint16
+ Id uint16
+ Seq uint16
+}
+
+// LayerType returns LayerTypeICMPv4.
+func (i *ICMPv4) LayerType() gopacket.LayerType { return LayerTypeICMPv4 }
+
+// DecodeFromBytes decodes the given bytes into this layer.
+func (i *ICMPv4) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ if len(data) < 8 {
+ df.SetTruncated()
+ return errors.New("ICMP layer less then 8 bytes for ICMPv4 packet")
+ }
+ i.TypeCode = CreateICMPv4TypeCode(data[0], data[1])
+ i.Checksum = binary.BigEndian.Uint16(data[2:4])
+ i.Id = binary.BigEndian.Uint16(data[4:6])
+ i.Seq = binary.BigEndian.Uint16(data[6:8])
+ i.BaseLayer = BaseLayer{data[:8], data[8:]}
+ return nil
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (i *ICMPv4) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ bytes, err := b.PrependBytes(8)
+ if err != nil {
+ return err
+ }
+ i.TypeCode.SerializeTo(bytes)
+ binary.BigEndian.PutUint16(bytes[4:], i.Id)
+ binary.BigEndian.PutUint16(bytes[6:], i.Seq)
+ if opts.ComputeChecksums {
+ bytes[2] = 0
+ bytes[3] = 0
+ i.Checksum = tcpipChecksum(b.Bytes(), 0)
+ }
+ binary.BigEndian.PutUint16(bytes[2:], i.Checksum)
+ return nil
+}
+
+// CanDecode returns the set of layer types that this DecodingLayer can decode.
+func (i *ICMPv4) CanDecode() gopacket.LayerClass {
+ return LayerTypeICMPv4
+}
+
+// NextLayerType returns the layer type contained by this DecodingLayer.
+func (i *ICMPv4) NextLayerType() gopacket.LayerType {
+ return gopacket.LayerTypePayload
+}
+
+func decodeICMPv4(data []byte, p gopacket.PacketBuilder) error {
+ i := &ICMPv4{}
+ return decodingLayerDecoder(i, data, p)
+}
diff --git a/vendor/github.com/google/gopacket/layers/icmp6.go b/vendor/github.com/google/gopacket/layers/icmp6.go
new file mode 100644
index 0000000..09afd11
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/icmp6.go
@@ -0,0 +1,266 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+// Copyright 2009-2011 Andreas Krennmair. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "reflect"
+
+ "github.com/google/gopacket"
+)
+
+const (
+ // The following are from RFC 4443
+ ICMPv6TypeDestinationUnreachable = 1
+ ICMPv6TypePacketTooBig = 2
+ ICMPv6TypeTimeExceeded = 3
+ ICMPv6TypeParameterProblem = 4
+ ICMPv6TypeEchoRequest = 128
+ ICMPv6TypeEchoReply = 129
+
+ // The following are from RFC 4861
+ ICMPv6TypeRouterSolicitation = 133
+ ICMPv6TypeRouterAdvertisement = 134
+ ICMPv6TypeNeighborSolicitation = 135
+ ICMPv6TypeNeighborAdvertisement = 136
+ ICMPv6TypeRedirect = 137
+
+ // The following are from RFC 2710
+ ICMPv6TypeMLDv1MulticastListenerQueryMessage = 130
+ ICMPv6TypeMLDv1MulticastListenerReportMessage = 131
+ ICMPv6TypeMLDv1MulticastListenerDoneMessage = 132
+
+ // The following are from RFC 3810
+ ICMPv6TypeMLDv2MulticastListenerReportMessageV2 = 143
+)
+
+const (
+ // DestinationUnreachable
+ ICMPv6CodeNoRouteToDst = 0
+ ICMPv6CodeAdminProhibited = 1
+ ICMPv6CodeBeyondScopeOfSrc = 2
+ ICMPv6CodeAddressUnreachable = 3
+ ICMPv6CodePortUnreachable = 4
+ ICMPv6CodeSrcAddressFailedPolicy = 5
+ ICMPv6CodeRejectRouteToDst = 6
+
+ // TimeExceeded
+ ICMPv6CodeHopLimitExceeded = 0
+ ICMPv6CodeFragmentReassemblyTimeExceeded = 1
+
+ // ParameterProblem
+ ICMPv6CodeErroneousHeaderField = 0
+ ICMPv6CodeUnrecognizedNextHeader = 1
+ ICMPv6CodeUnrecognizedIPv6Option = 2
+)
+
+type icmpv6TypeCodeInfoStruct struct {
+ typeStr string
+ codeStr *map[uint8]string
+}
+
+var (
+ icmpv6TypeCodeInfo = map[uint8]icmpv6TypeCodeInfoStruct{
+ ICMPv6TypeDestinationUnreachable: icmpv6TypeCodeInfoStruct{
+ "DestinationUnreachable", &map[uint8]string{
+ ICMPv6CodeNoRouteToDst: "NoRouteToDst",
+ ICMPv6CodeAdminProhibited: "AdminProhibited",
+ ICMPv6CodeBeyondScopeOfSrc: "BeyondScopeOfSrc",
+ ICMPv6CodeAddressUnreachable: "AddressUnreachable",
+ ICMPv6CodePortUnreachable: "PortUnreachable",
+ ICMPv6CodeSrcAddressFailedPolicy: "SrcAddressFailedPolicy",
+ ICMPv6CodeRejectRouteToDst: "RejectRouteToDst",
+ },
+ },
+ ICMPv6TypePacketTooBig: icmpv6TypeCodeInfoStruct{
+ "PacketTooBig", nil,
+ },
+ ICMPv6TypeTimeExceeded: icmpv6TypeCodeInfoStruct{
+ "TimeExceeded", &map[uint8]string{
+ ICMPv6CodeHopLimitExceeded: "HopLimitExceeded",
+ ICMPv6CodeFragmentReassemblyTimeExceeded: "FragmentReassemblyTimeExceeded",
+ },
+ },
+ ICMPv6TypeParameterProblem: icmpv6TypeCodeInfoStruct{
+ "ParameterProblem", &map[uint8]string{
+ ICMPv6CodeErroneousHeaderField: "ErroneousHeaderField",
+ ICMPv6CodeUnrecognizedNextHeader: "UnrecognizedNextHeader",
+ ICMPv6CodeUnrecognizedIPv6Option: "UnrecognizedIPv6Option",
+ },
+ },
+ ICMPv6TypeEchoRequest: icmpv6TypeCodeInfoStruct{
+ "EchoRequest", nil,
+ },
+ ICMPv6TypeEchoReply: icmpv6TypeCodeInfoStruct{
+ "EchoReply", nil,
+ },
+ ICMPv6TypeRouterSolicitation: icmpv6TypeCodeInfoStruct{
+ "RouterSolicitation", nil,
+ },
+ ICMPv6TypeRouterAdvertisement: icmpv6TypeCodeInfoStruct{
+ "RouterAdvertisement", nil,
+ },
+ ICMPv6TypeNeighborSolicitation: icmpv6TypeCodeInfoStruct{
+ "NeighborSolicitation", nil,
+ },
+ ICMPv6TypeNeighborAdvertisement: icmpv6TypeCodeInfoStruct{
+ "NeighborAdvertisement", nil,
+ },
+ ICMPv6TypeRedirect: icmpv6TypeCodeInfoStruct{
+ "Redirect", nil,
+ },
+ }
+)
+
+type ICMPv6TypeCode uint16
+
+// Type returns the ICMPv6 type field.
+func (a ICMPv6TypeCode) Type() uint8 {
+ return uint8(a >> 8)
+}
+
+// Code returns the ICMPv6 code field.
+func (a ICMPv6TypeCode) Code() uint8 {
+ return uint8(a)
+}
+
+func (a ICMPv6TypeCode) String() string {
+ t, c := a.Type(), a.Code()
+ strInfo, ok := icmpv6TypeCodeInfo[t]
+ if !ok {
+ // Unknown ICMPv6 type field
+ return fmt.Sprintf("%d(%d)", t, c)
+ }
+ typeStr := strInfo.typeStr
+ if strInfo.codeStr == nil && c == 0 {
+ // The ICMPv6 type does not make use of the code field
+ return fmt.Sprintf("%s", strInfo.typeStr)
+ }
+ if strInfo.codeStr == nil && c != 0 {
+ // The ICMPv6 type does not make use of the code field, but it is present anyway
+ return fmt.Sprintf("%s(Code: %d)", typeStr, c)
+ }
+ codeStr, ok := (*strInfo.codeStr)[c]
+ if !ok {
+ // We don't know this ICMPv6 code; print the numerical value
+ return fmt.Sprintf("%s(Code: %d)", typeStr, c)
+ }
+ return fmt.Sprintf("%s(%s)", typeStr, codeStr)
+}
+
+func (a ICMPv6TypeCode) GoString() string {
+ t := reflect.TypeOf(a)
+ return fmt.Sprintf("%s(%d, %d)", t.String(), a.Type(), a.Code())
+}
+
+// SerializeTo writes the ICMPv6TypeCode value to the 'bytes' buffer.
+func (a ICMPv6TypeCode) SerializeTo(bytes []byte) {
+ binary.BigEndian.PutUint16(bytes, uint16(a))
+}
+
+// CreateICMPv6TypeCode is a convenience function to create an ICMPv6TypeCode
+// gopacket type from the ICMPv6 type and code values.
+func CreateICMPv6TypeCode(typ uint8, code uint8) ICMPv6TypeCode {
+ return ICMPv6TypeCode(binary.BigEndian.Uint16([]byte{typ, code}))
+}
+
+// ICMPv6 is the layer for IPv6 ICMP packet data
+type ICMPv6 struct {
+ BaseLayer
+ TypeCode ICMPv6TypeCode
+ Checksum uint16
+ // TypeBytes is deprecated and always nil. See the different ICMPv6 message types
+ // instead (e.g. ICMPv6TypeRouterSolicitation).
+ TypeBytes []byte
+ tcpipchecksum
+}
+
+// LayerType returns LayerTypeICMPv6.
+func (i *ICMPv6) LayerType() gopacket.LayerType { return LayerTypeICMPv6 }
+
+// DecodeFromBytes decodes the given bytes into this layer.
+func (i *ICMPv6) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ if len(data) < 4 {
+ df.SetTruncated()
+ return errors.New("ICMP layer less then 4 bytes for ICMPv6 packet")
+ }
+ i.TypeCode = CreateICMPv6TypeCode(data[0], data[1])
+ i.Checksum = binary.BigEndian.Uint16(data[2:4])
+ i.BaseLayer = BaseLayer{data[:4], data[4:]}
+ return nil
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (i *ICMPv6) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ bytes, err := b.PrependBytes(4)
+ if err != nil {
+ return err
+ }
+ i.TypeCode.SerializeTo(bytes)
+
+ if opts.ComputeChecksums {
+ bytes[2] = 0
+ bytes[3] = 0
+ csum, err := i.computeChecksum(b.Bytes(), IPProtocolICMPv6)
+ if err != nil {
+ return err
+ }
+ i.Checksum = csum
+ }
+ binary.BigEndian.PutUint16(bytes[2:], i.Checksum)
+
+ return nil
+}
+
+// CanDecode returns the set of layer types that this DecodingLayer can decode.
+func (i *ICMPv6) CanDecode() gopacket.LayerClass {
+ return LayerTypeICMPv6
+}
+
+// NextLayerType returns the layer type contained by this DecodingLayer.
+func (i *ICMPv6) NextLayerType() gopacket.LayerType {
+ switch i.TypeCode.Type() {
+ case ICMPv6TypeEchoRequest:
+ return LayerTypeICMPv6Echo
+ case ICMPv6TypeEchoReply:
+ return LayerTypeICMPv6Echo
+ case ICMPv6TypeRouterSolicitation:
+ return LayerTypeICMPv6RouterSolicitation
+ case ICMPv6TypeRouterAdvertisement:
+ return LayerTypeICMPv6RouterAdvertisement
+ case ICMPv6TypeNeighborSolicitation:
+ return LayerTypeICMPv6NeighborSolicitation
+ case ICMPv6TypeNeighborAdvertisement:
+ return LayerTypeICMPv6NeighborAdvertisement
+ case ICMPv6TypeRedirect:
+ return LayerTypeICMPv6Redirect
+ case ICMPv6TypeMLDv1MulticastListenerQueryMessage: // Same Code for MLDv1 Query and MLDv2 Query
+ if len(i.Payload) > 20 { // Only payload size differs
+ return LayerTypeMLDv2MulticastListenerQuery
+ } else {
+ return LayerTypeMLDv1MulticastListenerQuery
+ }
+ case ICMPv6TypeMLDv1MulticastListenerDoneMessage:
+ return LayerTypeMLDv1MulticastListenerDone
+ case ICMPv6TypeMLDv1MulticastListenerReportMessage:
+ return LayerTypeMLDv1MulticastListenerReport
+ case ICMPv6TypeMLDv2MulticastListenerReportMessageV2:
+ return LayerTypeMLDv2MulticastListenerReport
+ }
+
+ return gopacket.LayerTypePayload
+}
+
+func decodeICMPv6(data []byte, p gopacket.PacketBuilder) error {
+ i := &ICMPv6{}
+ return decodingLayerDecoder(i, data, p)
+}
diff --git a/vendor/github.com/google/gopacket/layers/icmp6msg.go b/vendor/github.com/google/gopacket/layers/icmp6msg.go
new file mode 100644
index 0000000..d9268db
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/icmp6msg.go
@@ -0,0 +1,578 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+// Copyright 2009-2011 Andreas Krennmair. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+ "encoding/binary"
+ "encoding/hex"
+ "errors"
+ "fmt"
+ "net"
+ "time"
+
+ "github.com/google/gopacket"
+)
+
+// Based on RFC 4861
+
+// ICMPv6Opt indicate how to decode the data associated with each ICMPv6Option.
+type ICMPv6Opt uint8
+
+const (
+ _ ICMPv6Opt = iota
+
+ // ICMPv6OptSourceAddress contains the link-layer address of the sender of
+ // the packet. It is used in the Neighbor Solicitation, Router
+ // Solicitation, and Router Advertisement packets. Must be ignored for other
+ // Neighbor discovery messages.
+ ICMPv6OptSourceAddress
+
+ // ICMPv6OptTargetAddress contains the link-layer address of the target. It
+ // is used in Neighbor Advertisement and Redirect packets. Must be ignored
+ // for other Neighbor discovery messages.
+ ICMPv6OptTargetAddress
+
+ // ICMPv6OptPrefixInfo provides hosts with on-link prefixes and prefixes
+ // for Address Autoconfiguration. The Prefix Information option appears in
+ // Router Advertisement packets and MUST be silently ignored for other
+ // messages.
+ ICMPv6OptPrefixInfo
+
+ // ICMPv6OptRedirectedHeader is used in Redirect messages and contains all
+ // or part of the packet that is being redirected.
+ ICMPv6OptRedirectedHeader
+
+ // ICMPv6OptMTU is used in Router Advertisement messages to ensure that all
+ // nodes on a link use the same MTU value in those cases where the link MTU
+ // is not well known. This option MUST be silently ignored for other
+ // Neighbor Discovery messages.
+ ICMPv6OptMTU
+)
+
+// ICMPv6Echo represents the structure of a ping.
+type ICMPv6Echo struct {
+ BaseLayer
+ Identifier uint16
+ SeqNumber uint16
+}
+
+// ICMPv6RouterSolicitation is sent by hosts to find routers.
+type ICMPv6RouterSolicitation struct {
+ BaseLayer
+ Options ICMPv6Options
+}
+
+// ICMPv6RouterAdvertisement is sent by routers in response to Solicitation.
+type ICMPv6RouterAdvertisement struct {
+ BaseLayer
+ HopLimit uint8
+ Flags uint8
+ RouterLifetime uint16
+ ReachableTime uint32
+ RetransTimer uint32
+ Options ICMPv6Options
+}
+
+// ICMPv6NeighborSolicitation is sent to request the link-layer address of a
+// target node.
+type ICMPv6NeighborSolicitation struct {
+ BaseLayer
+ TargetAddress net.IP
+ Options ICMPv6Options
+}
+
+// ICMPv6NeighborAdvertisement is sent by nodes in response to Solicitation.
+type ICMPv6NeighborAdvertisement struct {
+ BaseLayer
+ Flags uint8
+ TargetAddress net.IP
+ Options ICMPv6Options
+}
+
+// ICMPv6Redirect is sent by routers to inform hosts of a better first-hop node
+// on the path to a destination.
+type ICMPv6Redirect struct {
+ BaseLayer
+ TargetAddress net.IP
+ DestinationAddress net.IP
+ Options ICMPv6Options
+}
+
+// ICMPv6Option contains the type and data for a single option.
+type ICMPv6Option struct {
+ Type ICMPv6Opt
+ Data []byte
+}
+
+// ICMPv6Options is a slice of ICMPv6Option.
+type ICMPv6Options []ICMPv6Option
+
+func (i ICMPv6Opt) String() string {
+ switch i {
+ case ICMPv6OptSourceAddress:
+ return "SourceAddress"
+ case ICMPv6OptTargetAddress:
+ return "TargetAddress"
+ case ICMPv6OptPrefixInfo:
+ return "PrefixInfo"
+ case ICMPv6OptRedirectedHeader:
+ return "RedirectedHeader"
+ case ICMPv6OptMTU:
+ return "MTU"
+ default:
+ return fmt.Sprintf("Unknown(%d)", i)
+ }
+}
+
+// CanDecode returns the set of layer types that this DecodingLayer can decode.
+func (i *ICMPv6Echo) CanDecode() gopacket.LayerClass {
+ return LayerTypeICMPv6Echo
+}
+
+// LayerType returns LayerTypeICMPv6Echo.
+func (i *ICMPv6Echo) LayerType() gopacket.LayerType {
+ return LayerTypeICMPv6Echo
+}
+
+// NextLayerType returns the layer type contained by this DecodingLayer.
+func (i *ICMPv6Echo) NextLayerType() gopacket.LayerType {
+ return gopacket.LayerTypePayload
+}
+
+// DecodeFromBytes decodes the given bytes into this layer.
+func (i *ICMPv6Echo) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ if len(data) < 4 {
+ df.SetTruncated()
+ return errors.New("ICMP layer less then 4 bytes for ICMPv6 Echo")
+ }
+ i.Identifier = binary.BigEndian.Uint16(data[0:2])
+ i.SeqNumber = binary.BigEndian.Uint16(data[2:4])
+
+ return nil
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (i *ICMPv6Echo) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ buf, err := b.PrependBytes(4)
+ if err != nil {
+ return err
+ }
+
+ binary.BigEndian.PutUint16(buf, i.Identifier)
+ binary.BigEndian.PutUint16(buf[2:], i.SeqNumber)
+ return nil
+}
+
+// LayerType returns LayerTypeICMPv6.
+func (i *ICMPv6RouterSolicitation) LayerType() gopacket.LayerType {
+ return LayerTypeICMPv6RouterSolicitation
+}
+
+// NextLayerType returns the layer type contained by this DecodingLayer.
+func (i *ICMPv6RouterSolicitation) NextLayerType() gopacket.LayerType {
+ return gopacket.LayerTypePayload
+}
+
+// DecodeFromBytes decodes the given bytes into this layer.
+func (i *ICMPv6RouterSolicitation) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ // first 4 bytes are reserved followed by options
+ if len(data) < 4 {
+ df.SetTruncated()
+ return errors.New("ICMP layer less then 4 bytes for ICMPv6 router solicitation")
+ }
+
+ // truncate old options
+ i.Options = i.Options[:0]
+
+ return i.Options.DecodeFromBytes(data[4:], df)
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (i *ICMPv6RouterSolicitation) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ if err := i.Options.SerializeTo(b, opts); err != nil {
+ return err
+ }
+
+ buf, err := b.PrependBytes(4)
+ if err != nil {
+ return err
+ }
+
+ copy(buf, lotsOfZeros[:4])
+ return nil
+}
+
+// CanDecode returns the set of layer types that this DecodingLayer can decode.
+func (i *ICMPv6RouterSolicitation) CanDecode() gopacket.LayerClass {
+ return LayerTypeICMPv6RouterSolicitation
+}
+
+// LayerType returns LayerTypeICMPv6RouterAdvertisement.
+func (i *ICMPv6RouterAdvertisement) LayerType() gopacket.LayerType {
+ return LayerTypeICMPv6RouterAdvertisement
+}
+
+// NextLayerType returns the layer type contained by this DecodingLayer.
+func (i *ICMPv6RouterAdvertisement) NextLayerType() gopacket.LayerType {
+ return gopacket.LayerTypePayload
+}
+
+// DecodeFromBytes decodes the given bytes into this layer.
+func (i *ICMPv6RouterAdvertisement) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ if len(data) < 12 {
+ df.SetTruncated()
+ return errors.New("ICMP layer less then 12 bytes for ICMPv6 router advertisement")
+ }
+
+ i.HopLimit = uint8(data[0])
+ // M, O bit followed by 6 reserved bits
+ i.Flags = uint8(data[1])
+ i.RouterLifetime = binary.BigEndian.Uint16(data[2:4])
+ i.ReachableTime = binary.BigEndian.Uint32(data[4:8])
+ i.RetransTimer = binary.BigEndian.Uint32(data[8:12])
+ i.BaseLayer = BaseLayer{data, nil} // assume no payload
+
+ // truncate old options
+ i.Options = i.Options[:0]
+
+ return i.Options.DecodeFromBytes(data[12:], df)
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (i *ICMPv6RouterAdvertisement) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ if err := i.Options.SerializeTo(b, opts); err != nil {
+ return err
+ }
+
+ buf, err := b.PrependBytes(12)
+ if err != nil {
+ return err
+ }
+
+ buf[0] = byte(i.HopLimit)
+ buf[1] = byte(i.Flags)
+ binary.BigEndian.PutUint16(buf[2:], i.RouterLifetime)
+ binary.BigEndian.PutUint32(buf[4:], i.ReachableTime)
+ binary.BigEndian.PutUint32(buf[8:], i.RetransTimer)
+ return nil
+}
+
+// CanDecode returns the set of layer types that this DecodingLayer can decode.
+func (i *ICMPv6RouterAdvertisement) CanDecode() gopacket.LayerClass {
+ return LayerTypeICMPv6RouterAdvertisement
+}
+
+// ManagedAddressConfig is true when addresses are available via DHCPv6. If
+// set, the OtherConfig flag is redundant.
+func (i *ICMPv6RouterAdvertisement) ManagedAddressConfig() bool {
+ return i.Flags&0x80 != 0
+}
+
+// OtherConfig is true when there is other configuration information available
+// via DHCPv6. For example, DNS-related information.
+func (i *ICMPv6RouterAdvertisement) OtherConfig() bool {
+ return i.Flags&0x40 != 0
+}
+
+// LayerType returns LayerTypeICMPv6NeighborSolicitation.
+func (i *ICMPv6NeighborSolicitation) LayerType() gopacket.LayerType {
+ return LayerTypeICMPv6NeighborSolicitation
+}
+
+// NextLayerType returns the layer type contained by this DecodingLayer.
+func (i *ICMPv6NeighborSolicitation) NextLayerType() gopacket.LayerType {
+ return gopacket.LayerTypePayload
+}
+
+// DecodeFromBytes decodes the given bytes into this layer.
+func (i *ICMPv6NeighborSolicitation) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ if len(data) < 20 {
+ df.SetTruncated()
+ return errors.New("ICMP layer less then 20 bytes for ICMPv6 neighbor solicitation")
+ }
+
+ i.TargetAddress = net.IP(data[4:20])
+ i.BaseLayer = BaseLayer{data, nil} // assume no payload
+
+ // truncate old options
+ i.Options = i.Options[:0]
+
+ return i.Options.DecodeFromBytes(data[20:], df)
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (i *ICMPv6NeighborSolicitation) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ if err := i.Options.SerializeTo(b, opts); err != nil {
+ return err
+ }
+
+ buf, err := b.PrependBytes(20)
+ if err != nil {
+ return err
+ }
+
+ copy(buf, lotsOfZeros[:4])
+ copy(buf[4:], i.TargetAddress)
+ return nil
+}
+
+// CanDecode returns the set of layer types that this DecodingLayer can decode.
+func (i *ICMPv6NeighborSolicitation) CanDecode() gopacket.LayerClass {
+ return LayerTypeICMPv6NeighborSolicitation
+}
+
+// LayerType returns LayerTypeICMPv6NeighborAdvertisement.
+func (i *ICMPv6NeighborAdvertisement) LayerType() gopacket.LayerType {
+ return LayerTypeICMPv6NeighborAdvertisement
+}
+
+// NextLayerType returns the layer type contained by this DecodingLayer.
+func (i *ICMPv6NeighborAdvertisement) NextLayerType() gopacket.LayerType {
+ return gopacket.LayerTypePayload
+}
+
+// DecodeFromBytes decodes the given bytes into this layer.
+func (i *ICMPv6NeighborAdvertisement) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ if len(data) < 20 {
+ df.SetTruncated()
+ return errors.New("ICMP layer less then 20 bytes for ICMPv6 neighbor advertisement")
+ }
+
+ i.Flags = uint8(data[0])
+ i.TargetAddress = net.IP(data[4:20])
+ i.BaseLayer = BaseLayer{data, nil} // assume no payload
+
+ // truncate old options
+ i.Options = i.Options[:0]
+
+ return i.Options.DecodeFromBytes(data[20:], df)
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (i *ICMPv6NeighborAdvertisement) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ if err := i.Options.SerializeTo(b, opts); err != nil {
+ return err
+ }
+
+ buf, err := b.PrependBytes(20)
+ if err != nil {
+ return err
+ }
+
+ buf[0] = byte(i.Flags)
+ copy(buf[1:], lotsOfZeros[:3])
+ copy(buf[4:], i.TargetAddress)
+ return nil
+}
+
+// CanDecode returns the set of layer types that this DecodingLayer can decode.
+func (i *ICMPv6NeighborAdvertisement) CanDecode() gopacket.LayerClass {
+ return LayerTypeICMPv6NeighborAdvertisement
+}
+
+// Router indicates whether the sender is a router or not.
+func (i *ICMPv6NeighborAdvertisement) Router() bool {
+ return i.Flags&0x80 != 0
+}
+
+// Solicited indicates whether the advertisement was solicited or not.
+func (i *ICMPv6NeighborAdvertisement) Solicited() bool {
+ return i.Flags&0x40 != 0
+}
+
+// Override indicates whether the advertisement should Override an existing
+// cache entry.
+func (i *ICMPv6NeighborAdvertisement) Override() bool {
+ return i.Flags&0x20 != 0
+}
+
+// LayerType returns LayerTypeICMPv6Redirect.
+func (i *ICMPv6Redirect) LayerType() gopacket.LayerType {
+ return LayerTypeICMPv6Redirect
+}
+
+// NextLayerType returns the layer type contained by this DecodingLayer.
+func (i *ICMPv6Redirect) NextLayerType() gopacket.LayerType {
+ return gopacket.LayerTypePayload
+}
+
+// DecodeFromBytes decodes the given bytes into this layer.
+func (i *ICMPv6Redirect) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ if len(data) < 36 {
+ df.SetTruncated()
+ return errors.New("ICMP layer less then 36 bytes for ICMPv6 redirect")
+ }
+
+ i.TargetAddress = net.IP(data[4:20])
+ i.DestinationAddress = net.IP(data[20:36])
+ i.BaseLayer = BaseLayer{data, nil} // assume no payload
+
+ // truncate old options
+ i.Options = i.Options[:0]
+
+ return i.Options.DecodeFromBytes(data[36:], df)
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (i *ICMPv6Redirect) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ if err := i.Options.SerializeTo(b, opts); err != nil {
+ return err
+ }
+
+ buf, err := b.PrependBytes(36)
+ if err != nil {
+ return err
+ }
+
+ copy(buf, lotsOfZeros[:4])
+ copy(buf[4:], i.TargetAddress)
+ copy(buf[20:], i.DestinationAddress)
+ return nil
+}
+
+// CanDecode returns the set of layer types that this DecodingLayer can decode.
+func (i *ICMPv6Redirect) CanDecode() gopacket.LayerClass {
+ return LayerTypeICMPv6Redirect
+}
+
+func (i ICMPv6Option) String() string {
+ hd := hex.EncodeToString(i.Data)
+ if len(hd) > 0 {
+ hd = " 0x" + hd
+ }
+
+ switch i.Type {
+ case ICMPv6OptSourceAddress, ICMPv6OptTargetAddress:
+ return fmt.Sprintf("ICMPv6Option(%s:%v)",
+ i.Type,
+ net.HardwareAddr(i.Data))
+ case ICMPv6OptPrefixInfo:
+ if len(i.Data) == 30 {
+ prefixLen := uint8(i.Data[0])
+ onLink := (i.Data[1]&0x80 != 0)
+ autonomous := (i.Data[1]&0x40 != 0)
+ validLifetime := time.Duration(binary.BigEndian.Uint32(i.Data[2:6])) * time.Second
+ preferredLifetime := time.Duration(binary.BigEndian.Uint32(i.Data[6:10])) * time.Second
+
+ prefix := net.IP(i.Data[14:])
+
+ return fmt.Sprintf("ICMPv6Option(%s:%v/%v:%t:%t:%v:%v)",
+ i.Type,
+ prefix, prefixLen,
+ onLink, autonomous,
+ validLifetime, preferredLifetime)
+ }
+ case ICMPv6OptRedirectedHeader:
+ // could invoke IP decoder on data... probably best not to
+ break
+ case ICMPv6OptMTU:
+ if len(i.Data) == 6 {
+ return fmt.Sprintf("ICMPv6Option(%s:%v)",
+ i.Type,
+ binary.BigEndian.Uint32(i.Data[2:]))
+ }
+
+ }
+ return fmt.Sprintf("ICMPv6Option(%s:%s)", i.Type, hd)
+}
+
+// DecodeFromBytes decodes the given bytes into this layer.
+func (i *ICMPv6Options) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ for len(data) > 0 {
+ if len(data) < 2 {
+ df.SetTruncated()
+ return errors.New("ICMP layer less then 2 bytes for ICMPv6 message option")
+ }
+
+ // unit is 8 octets, convert to bytes
+ length := int(data[1]) * 8
+
+ if length == 0 {
+ df.SetTruncated()
+ return errors.New("ICMPv6 message option with length 0")
+ }
+
+ if len(data) < length {
+ df.SetTruncated()
+ return fmt.Errorf("ICMP layer only %v bytes for ICMPv6 message option with length %v", len(data), length)
+ }
+
+ o := ICMPv6Option{
+ Type: ICMPv6Opt(data[0]),
+ Data: data[2:length],
+ }
+
+ // chop off option we just consumed
+ data = data[length:]
+
+ *i = append(*i, o)
+ }
+
+ return nil
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (i *ICMPv6Options) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ for _, opt := range []ICMPv6Option(*i) {
+ length := len(opt.Data) + 2
+ buf, err := b.PrependBytes(length)
+ if err != nil {
+ return err
+ }
+
+ buf[0] = byte(opt.Type)
+ buf[1] = byte(length / 8)
+ copy(buf[2:], opt.Data)
+ }
+
+ return nil
+}
+
+func decodeICMPv6Echo(data []byte, p gopacket.PacketBuilder) error {
+ i := &ICMPv6Echo{}
+ return decodingLayerDecoder(i, data, p)
+}
+
+func decodeICMPv6RouterSolicitation(data []byte, p gopacket.PacketBuilder) error {
+ i := &ICMPv6RouterSolicitation{}
+ return decodingLayerDecoder(i, data, p)
+}
+
+func decodeICMPv6RouterAdvertisement(data []byte, p gopacket.PacketBuilder) error {
+ i := &ICMPv6RouterAdvertisement{}
+ return decodingLayerDecoder(i, data, p)
+}
+
+func decodeICMPv6NeighborSolicitation(data []byte, p gopacket.PacketBuilder) error {
+ i := &ICMPv6NeighborSolicitation{}
+ return decodingLayerDecoder(i, data, p)
+}
+
+func decodeICMPv6NeighborAdvertisement(data []byte, p gopacket.PacketBuilder) error {
+ i := &ICMPv6NeighborAdvertisement{}
+ return decodingLayerDecoder(i, data, p)
+}
+
+func decodeICMPv6Redirect(data []byte, p gopacket.PacketBuilder) error {
+ i := &ICMPv6Redirect{}
+ return decodingLayerDecoder(i, data, p)
+}
diff --git a/vendor/github.com/google/gopacket/layers/igmp.go b/vendor/github.com/google/gopacket/layers/igmp.go
new file mode 100644
index 0000000..d008415
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/igmp.go
@@ -0,0 +1,355 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+// Copyright 2009-2011 Andreas Krennmair. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+ "encoding/binary"
+ "errors"
+ "net"
+ "time"
+
+ "github.com/google/gopacket"
+)
+
+type IGMPType uint8
+
+const (
+ IGMPMembershipQuery IGMPType = 0x11 // General or group specific query
+ IGMPMembershipReportV1 IGMPType = 0x12 // Version 1 Membership Report
+ IGMPMembershipReportV2 IGMPType = 0x16 // Version 2 Membership Report
+ IGMPLeaveGroup IGMPType = 0x17 // Leave Group
+ IGMPMembershipReportV3 IGMPType = 0x22 // Version 3 Membership Report
+)
+
+// String conversions for IGMP message types
+func (i IGMPType) String() string {
+ switch i {
+ case IGMPMembershipQuery:
+ return "IGMP Membership Query"
+ case IGMPMembershipReportV1:
+ return "IGMPv1 Membership Report"
+ case IGMPMembershipReportV2:
+ return "IGMPv2 Membership Report"
+ case IGMPMembershipReportV3:
+ return "IGMPv3 Membership Report"
+ case IGMPLeaveGroup:
+ return "Leave Group"
+ default:
+ return ""
+ }
+}
+
+type IGMPv3GroupRecordType uint8
+
+const (
+ IGMPIsIn IGMPv3GroupRecordType = 0x01 // Type MODE_IS_INCLUDE, source addresses x
+ IGMPIsEx IGMPv3GroupRecordType = 0x02 // Type MODE_IS_EXCLUDE, source addresses x
+ IGMPToIn IGMPv3GroupRecordType = 0x03 // Type CHANGE_TO_INCLUDE_MODE, source addresses x
+ IGMPToEx IGMPv3GroupRecordType = 0x04 // Type CHANGE_TO_EXCLUDE_MODE, source addresses x
+ IGMPAllow IGMPv3GroupRecordType = 0x05 // Type ALLOW_NEW_SOURCES, source addresses x
+ IGMPBlock IGMPv3GroupRecordType = 0x06 // Type BLOCK_OLD_SOURCES, source addresses x
+)
+
+func (i IGMPv3GroupRecordType) String() string {
+ switch i {
+ case IGMPIsIn:
+ return "MODE_IS_INCLUDE"
+ case IGMPIsEx:
+ return "MODE_IS_EXCLUDE"
+ case IGMPToIn:
+ return "CHANGE_TO_INCLUDE_MODE"
+ case IGMPToEx:
+ return "CHANGE_TO_EXCLUDE_MODE"
+ case IGMPAllow:
+ return "ALLOW_NEW_SOURCES"
+ case IGMPBlock:
+ return "BLOCK_OLD_SOURCES"
+ default:
+ return ""
+ }
+}
+
+// IGMP represents an IGMPv3 message.
+type IGMP struct {
+ BaseLayer
+ Type IGMPType
+ MaxResponseTime time.Duration
+ Checksum uint16
+ GroupAddress net.IP
+ SupressRouterProcessing bool
+ RobustnessValue uint8
+ IntervalTime time.Duration
+ SourceAddresses []net.IP
+ NumberOfGroupRecords uint16
+ NumberOfSources uint16
+ GroupRecords []IGMPv3GroupRecord
+ Version uint8 // IGMP protocol version
+}
+
+// IGMPv1or2 stores header details for an IGMPv1 or IGMPv2 packet.
+//
+// 0 1 2 3
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | Type | Max Resp Time | Checksum |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | Group Address |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+type IGMPv1or2 struct {
+ BaseLayer
+ Type IGMPType // IGMP message type
+ MaxResponseTime time.Duration // meaningful only in Membership Query messages
+ Checksum uint16 // 16-bit checksum of entire ip payload
+ GroupAddress net.IP // either 0 or an IP multicast address
+ Version uint8
+}
+
+// decodeResponse dissects IGMPv1 or IGMPv2 packet.
+func (i *IGMPv1or2) decodeResponse(data []byte) error {
+ if len(data) < 8 {
+ return errors.New("IGMP packet too small")
+ }
+
+ i.MaxResponseTime = igmpTimeDecode(data[1])
+ i.Checksum = binary.BigEndian.Uint16(data[2:4])
+ i.GroupAddress = net.IP(data[4:8])
+
+ return nil
+}
+
+// 0 1 2 3
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | Type = 0x22 | Reserved | Checksum |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | Reserved | Number of Group Records (M) |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | |
+// . Group Record [1] .
+// | |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | |
+// . Group Record [2] .
+// | |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | |
+// . Group Record [M] .
+// | |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | Record Type | Aux Data Len | Number of Sources (N) |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | Multicast Address |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | Source Address [1] |
+// +- -+
+// | Source Address [2] |
+// +- -+
+// | Source Address [N] |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | |
+// . Auxiliary Data .
+// | |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+// IGMPv3GroupRecord stores individual group records for a V3 Membership Report message.
+type IGMPv3GroupRecord struct {
+ Type IGMPv3GroupRecordType
+ AuxDataLen uint8 // this should always be 0 as per IGMPv3 spec.
+ NumberOfSources uint16
+ MulticastAddress net.IP
+ SourceAddresses []net.IP
+ AuxData uint32 // NOT USED
+}
+
+func (i *IGMP) decodeIGMPv3MembershipReport(data []byte) error {
+ if len(data) < 8 {
+ return errors.New("IGMPv3 Membership Report too small #1")
+ }
+
+ i.Checksum = binary.BigEndian.Uint16(data[2:4])
+ i.NumberOfGroupRecords = binary.BigEndian.Uint16(data[6:8])
+
+ recordOffset := 8
+ for j := 0; j < int(i.NumberOfGroupRecords); j++ {
+ if len(data) < recordOffset+8 {
+ return errors.New("IGMPv3 Membership Report too small #2")
+ }
+
+ var gr IGMPv3GroupRecord
+ gr.Type = IGMPv3GroupRecordType(data[recordOffset])
+ gr.AuxDataLen = data[recordOffset+1]
+ gr.NumberOfSources = binary.BigEndian.Uint16(data[recordOffset+2 : recordOffset+4])
+ gr.MulticastAddress = net.IP(data[recordOffset+4 : recordOffset+8])
+
+ if len(data) < recordOffset+8+int(gr.NumberOfSources)*4 {
+ return errors.New("IGMPv3 Membership Report too small #3")
+ }
+
+ // append source address records.
+ for i := 0; i < int(gr.NumberOfSources); i++ {
+ sourceAddr := net.IP(data[recordOffset+8+i*4 : recordOffset+12+i*4])
+ gr.SourceAddresses = append(gr.SourceAddresses, sourceAddr)
+ }
+
+ i.GroupRecords = append(i.GroupRecords, gr)
+ recordOffset += 8 + 4*int(gr.NumberOfSources)
+ }
+ return nil
+}
+
+// 0 1 2 3
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | Type = 0x11 | Max Resp Code | Checksum |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | Group Address |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | Resv |S| QRV | QQIC | Number of Sources (N) |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | Source Address [1] |
+// +- -+
+// | Source Address [2] |
+// +- . -+
+// | Source Address [N] |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+//
+// decodeIGMPv3MembershipQuery parses the IGMPv3 message of type 0x11
+func (i *IGMP) decodeIGMPv3MembershipQuery(data []byte) error {
+ if len(data) < 12 {
+ return errors.New("IGMPv3 Membership Query too small #1")
+ }
+
+ i.MaxResponseTime = igmpTimeDecode(data[1])
+ i.Checksum = binary.BigEndian.Uint16(data[2:4])
+ i.SupressRouterProcessing = data[8]&0x8 != 0
+ i.GroupAddress = net.IP(data[4:8])
+ i.RobustnessValue = data[8] & 0x7
+ i.IntervalTime = igmpTimeDecode(data[9])
+ i.NumberOfSources = binary.BigEndian.Uint16(data[10:12])
+
+ if len(data) < 12+int(i.NumberOfSources)*4 {
+ return errors.New("IGMPv3 Membership Query too small #2")
+ }
+
+ for j := 0; j < int(i.NumberOfSources); j++ {
+ i.SourceAddresses = append(i.SourceAddresses, net.IP(data[12+j*4:16+j*4]))
+ }
+
+ return nil
+}
+
+// igmpTimeDecode decodes the duration created by the given byte, using the
+// algorithm in http://www.rfc-base.org/txt/rfc-3376.txt section 4.1.1.
+func igmpTimeDecode(t uint8) time.Duration {
+ if t&0x80 == 0 {
+ return time.Millisecond * 100 * time.Duration(t)
+ }
+ mant := (t & 0x70) >> 4
+ exp := t & 0x0F
+ return time.Millisecond * 100 * time.Duration((mant|0x10)<<(exp+3))
+}
+
+// LayerType returns LayerTypeIGMP for the V1,2,3 message protocol formats.
+func (i *IGMP) LayerType() gopacket.LayerType { return LayerTypeIGMP }
+func (i *IGMPv1or2) LayerType() gopacket.LayerType { return LayerTypeIGMP }
+
+func (i *IGMPv1or2) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ if len(data) < 8 {
+ return errors.New("IGMP Packet too small")
+ }
+
+ i.Type = IGMPType(data[0])
+ i.MaxResponseTime = igmpTimeDecode(data[1])
+ i.Checksum = binary.BigEndian.Uint16(data[2:4])
+ i.GroupAddress = net.IP(data[4:8])
+
+ return nil
+}
+
+func (i *IGMPv1or2) NextLayerType() gopacket.LayerType {
+ return gopacket.LayerTypeZero
+}
+
+func (i *IGMPv1or2) CanDecode() gopacket.LayerClass {
+ return LayerTypeIGMP
+}
+
+// DecodeFromBytes decodes the given bytes into this layer.
+func (i *IGMP) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ if len(data) < 1 {
+ return errors.New("IGMP packet is too small")
+ }
+
+ // common IGMP header values between versions 1..3 of IGMP specification..
+ i.Type = IGMPType(data[0])
+
+ switch i.Type {
+ case IGMPMembershipQuery:
+ i.decodeIGMPv3MembershipQuery(data)
+ case IGMPMembershipReportV3:
+ i.decodeIGMPv3MembershipReport(data)
+ default:
+ return errors.New("unsupported IGMP type")
+ }
+
+ return nil
+}
+
+// CanDecode returns the set of layer types that this DecodingLayer can decode.
+func (i *IGMP) CanDecode() gopacket.LayerClass {
+ return LayerTypeIGMP
+}
+
+// NextLayerType returns the layer type contained by this DecodingLayer.
+func (i *IGMP) NextLayerType() gopacket.LayerType {
+ return gopacket.LayerTypeZero
+}
+
+// decodeIGMP will parse IGMP v1,2 or 3 protocols. Checks against the
+// IGMP type are performed against byte[0], logic then iniitalizes and
+// passes the appropriate struct (IGMP or IGMPv1or2) to
+// decodingLayerDecoder.
+func decodeIGMP(data []byte, p gopacket.PacketBuilder) error {
+ if len(data) < 1 {
+ return errors.New("IGMP packet is too small")
+ }
+
+ // byte 0 contains IGMP message type.
+ switch IGMPType(data[0]) {
+ case IGMPMembershipQuery:
+ // IGMPv3 Membership Query payload is >= 12
+ if len(data) >= 12 {
+ i := &IGMP{Version: 3}
+ return decodingLayerDecoder(i, data, p)
+ } else if len(data) == 8 {
+ i := &IGMPv1or2{}
+ if data[1] == 0x00 {
+ i.Version = 1 // IGMPv1 has a query length of 8 and MaxResp = 0
+ } else {
+ i.Version = 2 // IGMPv2 has a query length of 8 and MaxResp != 0
+ }
+
+ return decodingLayerDecoder(i, data, p)
+ }
+ case IGMPMembershipReportV3:
+ i := &IGMP{Version: 3}
+ return decodingLayerDecoder(i, data, p)
+ case IGMPMembershipReportV1:
+ i := &IGMPv1or2{Version: 1}
+ return decodingLayerDecoder(i, data, p)
+ case IGMPLeaveGroup, IGMPMembershipReportV2:
+ // leave group and Query Report v2 used in IGMPv2 only.
+ i := &IGMPv1or2{Version: 2}
+ return decodingLayerDecoder(i, data, p)
+ default:
+ }
+
+ return errors.New("Unable to determine IGMP type.")
+}
diff --git a/vendor/github.com/google/gopacket/layers/ip4.go b/vendor/github.com/google/gopacket/layers/ip4.go
new file mode 100644
index 0000000..2b3c0c6
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/ip4.go
@@ -0,0 +1,325 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+// Copyright 2009-2011 Andreas Krennmair. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "net"
+ "strings"
+
+ "github.com/google/gopacket"
+)
+
+type IPv4Flag uint8
+
+const (
+ IPv4EvilBit IPv4Flag = 1 << 2 // http://tools.ietf.org/html/rfc3514 ;)
+ IPv4DontFragment IPv4Flag = 1 << 1
+ IPv4MoreFragments IPv4Flag = 1 << 0
+)
+
+func (f IPv4Flag) String() string {
+ var s []string
+ if f&IPv4EvilBit != 0 {
+ s = append(s, "Evil")
+ }
+ if f&IPv4DontFragment != 0 {
+ s = append(s, "DF")
+ }
+ if f&IPv4MoreFragments != 0 {
+ s = append(s, "MF")
+ }
+ return strings.Join(s, "|")
+}
+
+// IPv4 is the header of an IP packet.
+type IPv4 struct {
+ BaseLayer
+ Version uint8
+ IHL uint8
+ TOS uint8
+ Length uint16
+ Id uint16
+ Flags IPv4Flag
+ FragOffset uint16
+ TTL uint8
+ Protocol IPProtocol
+ Checksum uint16
+ SrcIP net.IP
+ DstIP net.IP
+ Options []IPv4Option
+ Padding []byte
+}
+
+// LayerType returns LayerTypeIPv4
+func (i *IPv4) LayerType() gopacket.LayerType { return LayerTypeIPv4 }
+func (i *IPv4) NetworkFlow() gopacket.Flow {
+ return gopacket.NewFlow(EndpointIPv4, i.SrcIP, i.DstIP)
+}
+
+type IPv4Option struct {
+ OptionType uint8
+ OptionLength uint8
+ OptionData []byte
+}
+
+func (i IPv4Option) String() string {
+ return fmt.Sprintf("IPv4Option(%v:%v)", i.OptionType, i.OptionData)
+}
+
+// for the current ipv4 options, return the number of bytes (including
+// padding that the options used)
+func (ip *IPv4) getIPv4OptionSize() uint8 {
+ optionSize := uint8(0)
+ for _, opt := range ip.Options {
+ switch opt.OptionType {
+ case 0:
+ // this is the end of option lists
+ optionSize++
+ case 1:
+ // this is the padding
+ optionSize++
+ default:
+ optionSize += opt.OptionLength
+
+ }
+ }
+ // make sure the options are aligned to 32 bit boundary
+ if (optionSize % 4) != 0 {
+ optionSize += 4 - (optionSize % 4)
+ }
+ return optionSize
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+func (ip *IPv4) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ optionLength := ip.getIPv4OptionSize()
+ bytes, err := b.PrependBytes(20 + int(optionLength))
+ if err != nil {
+ return err
+ }
+ if opts.FixLengths {
+ ip.IHL = 5 + (optionLength / 4)
+ ip.Length = uint16(len(b.Bytes()))
+ }
+ bytes[0] = (ip.Version << 4) | ip.IHL
+ bytes[1] = ip.TOS
+ binary.BigEndian.PutUint16(bytes[2:], ip.Length)
+ binary.BigEndian.PutUint16(bytes[4:], ip.Id)
+ binary.BigEndian.PutUint16(bytes[6:], ip.flagsfrags())
+ bytes[8] = ip.TTL
+ bytes[9] = byte(ip.Protocol)
+ if err := ip.AddressTo4(); err != nil {
+ return err
+ }
+ copy(bytes[12:16], ip.SrcIP)
+ copy(bytes[16:20], ip.DstIP)
+
+ curLocation := 20
+ // Now, we will encode the options
+ for _, opt := range ip.Options {
+ switch opt.OptionType {
+ case 0:
+ // this is the end of option lists
+ bytes[curLocation] = 0
+ curLocation++
+ case 1:
+ // this is the padding
+ bytes[curLocation] = 1
+ curLocation++
+ default:
+ bytes[curLocation] = opt.OptionType
+ bytes[curLocation+1] = opt.OptionLength
+
+ // sanity checking to protect us from buffer overrun
+ if len(opt.OptionData) > int(opt.OptionLength-2) {
+ return errors.New("option length is smaller than length of option data")
+ }
+ copy(bytes[curLocation+2:curLocation+int(opt.OptionLength)], opt.OptionData)
+ curLocation += int(opt.OptionLength)
+ }
+ }
+
+ if opts.ComputeChecksums {
+ ip.Checksum = checksum(bytes)
+ }
+ binary.BigEndian.PutUint16(bytes[10:], ip.Checksum)
+ return nil
+}
+
+func checksum(bytes []byte) uint16 {
+ // Clear checksum bytes
+ bytes[10] = 0
+ bytes[11] = 0
+
+ // Compute checksum
+ var csum uint32
+ for i := 0; i < len(bytes); i += 2 {
+ csum += uint32(bytes[i]) << 8
+ csum += uint32(bytes[i+1])
+ }
+ for {
+ // Break when sum is less or equals to 0xFFFF
+ if csum <= 65535 {
+ break
+ }
+ // Add carry to the sum
+ csum = (csum >> 16) + uint32(uint16(csum))
+ }
+ // Flip all the bits
+ return ^uint16(csum)
+}
+
+func (ip *IPv4) flagsfrags() (ff uint16) {
+ ff |= uint16(ip.Flags) << 13
+ ff |= ip.FragOffset
+ return
+}
+
+// DecodeFromBytes decodes the given bytes into this layer.
+func (ip *IPv4) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ if len(data) < 20 {
+ df.SetTruncated()
+ return fmt.Errorf("Invalid ip4 header. Length %d less than 20", len(data))
+ }
+ flagsfrags := binary.BigEndian.Uint16(data[6:8])
+
+ ip.Version = uint8(data[0]) >> 4
+ ip.IHL = uint8(data[0]) & 0x0F
+ ip.TOS = data[1]
+ ip.Length = binary.BigEndian.Uint16(data[2:4])
+ ip.Id = binary.BigEndian.Uint16(data[4:6])
+ ip.Flags = IPv4Flag(flagsfrags >> 13)
+ ip.FragOffset = flagsfrags & 0x1FFF
+ ip.TTL = data[8]
+ ip.Protocol = IPProtocol(data[9])
+ ip.Checksum = binary.BigEndian.Uint16(data[10:12])
+ ip.SrcIP = data[12:16]
+ ip.DstIP = data[16:20]
+ ip.Options = ip.Options[:0]
+ ip.Padding = nil
+ // Set up an initial guess for contents/payload... we'll reset these soon.
+ ip.BaseLayer = BaseLayer{Contents: data}
+
+ // This code is added for the following enviroment:
+ // * Windows 10 with TSO option activated. ( tested on Hyper-V, RealTek ethernet driver )
+ if ip.Length == 0 {
+ // If using TSO(TCP Segmentation Offload), length is zero.
+ // The actual packet length is the length of data.
+ ip.Length = uint16(len(data))
+ }
+
+ if ip.Length < 20 {
+ return fmt.Errorf("Invalid (too small) IP length (%d < 20)", ip.Length)
+ } else if ip.IHL < 5 {
+ return fmt.Errorf("Invalid (too small) IP header length (%d < 5)", ip.IHL)
+ } else if int(ip.IHL*4) > int(ip.Length) {
+ return fmt.Errorf("Invalid IP header length > IP length (%d > %d)", ip.IHL, ip.Length)
+ }
+ if cmp := len(data) - int(ip.Length); cmp > 0 {
+ data = data[:ip.Length]
+ } else if cmp < 0 {
+ df.SetTruncated()
+ if int(ip.IHL)*4 > len(data) {
+ return errors.New("Not all IP header bytes available")
+ }
+ }
+ ip.Contents = data[:ip.IHL*4]
+ ip.Payload = data[ip.IHL*4:]
+ // From here on, data contains the header options.
+ data = data[20 : ip.IHL*4]
+ // Pull out IP options
+ for len(data) > 0 {
+ if ip.Options == nil {
+ // Pre-allocate to avoid growing the slice too much.
+ ip.Options = make([]IPv4Option, 0, 4)
+ }
+ opt := IPv4Option{OptionType: data[0]}
+ switch opt.OptionType {
+ case 0: // End of options
+ opt.OptionLength = 1
+ ip.Options = append(ip.Options, opt)
+ ip.Padding = data[1:]
+ return nil
+ case 1: // 1 byte padding
+ opt.OptionLength = 1
+ data = data[1:]
+ ip.Options = append(ip.Options, opt)
+ default:
+ if len(data) < 2 {
+ df.SetTruncated()
+ return fmt.Errorf("Invalid ip4 option length. Length %d less than 2", len(data))
+ }
+ opt.OptionLength = data[1]
+ if len(data) < int(opt.OptionLength) {
+ df.SetTruncated()
+ return fmt.Errorf("IP option length exceeds remaining IP header size, option type %v length %v", opt.OptionType, opt.OptionLength)
+ }
+ if opt.OptionLength <= 2 {
+ return fmt.Errorf("Invalid IP option type %v length %d. Must be greater than 2", opt.OptionType, opt.OptionLength)
+ }
+ opt.OptionData = data[2:opt.OptionLength]
+ data = data[opt.OptionLength:]
+ ip.Options = append(ip.Options, opt)
+ }
+ }
+ return nil
+}
+
+func (i *IPv4) CanDecode() gopacket.LayerClass {
+ return LayerTypeIPv4
+}
+
+func (i *IPv4) NextLayerType() gopacket.LayerType {
+ if i.Flags&IPv4MoreFragments != 0 || i.FragOffset != 0 {
+ return gopacket.LayerTypeFragment
+ }
+ return i.Protocol.LayerType()
+}
+
+func decodeIPv4(data []byte, p gopacket.PacketBuilder) error {
+ ip := &IPv4{}
+ err := ip.DecodeFromBytes(data, p)
+ p.AddLayer(ip)
+ p.SetNetworkLayer(ip)
+ if err != nil {
+ return err
+ }
+ return p.NextDecoder(ip.NextLayerType())
+}
+
+func checkIPv4Address(addr net.IP) (net.IP, error) {
+ if c := addr.To4(); c != nil {
+ return c, nil
+ }
+ if len(addr) == net.IPv6len {
+ return nil, errors.New("address is IPv6")
+ }
+ return nil, fmt.Errorf("wrong length of %d bytes instead of %d", len(addr), net.IPv4len)
+}
+
+func (ip *IPv4) AddressTo4() error {
+ var src, dst net.IP
+
+ if addr, err := checkIPv4Address(ip.SrcIP); err != nil {
+ return fmt.Errorf("Invalid source IPv4 address (%s)", err)
+ } else {
+ src = addr
+ }
+ if addr, err := checkIPv4Address(ip.DstIP); err != nil {
+ return fmt.Errorf("Invalid destination IPv4 address (%s)", err)
+ } else {
+ dst = addr
+ }
+ ip.SrcIP = src
+ ip.DstIP = dst
+ return nil
+}
diff --git a/vendor/github.com/google/gopacket/layers/ip6.go b/vendor/github.com/google/gopacket/layers/ip6.go
new file mode 100644
index 0000000..70e9c8d
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/ip6.go
@@ -0,0 +1,707 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+// Copyright 2009-2011 Andreas Krennmair. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "net"
+
+ "github.com/google/gopacket"
+)
+
+const (
+ // IPv6HopByHopOptionJumbogram code as defined in RFC 2675
+ IPv6HopByHopOptionJumbogram = 0xC2
+)
+
+const (
+ ipv6MaxPayloadLength = 65535
+)
+
+// IPv6 is the layer for the IPv6 header.
+type IPv6 struct {
+ // http://www.networksorcery.com/enp/protocol/ipv6.htm
+ BaseLayer
+ Version uint8
+ TrafficClass uint8
+ FlowLabel uint32
+ Length uint16
+ NextHeader IPProtocol
+ HopLimit uint8
+ SrcIP net.IP
+ DstIP net.IP
+ HopByHop *IPv6HopByHop
+ // hbh will be pointed to by HopByHop if that layer exists.
+ hbh IPv6HopByHop
+}
+
+// LayerType returns LayerTypeIPv6
+func (ipv6 *IPv6) LayerType() gopacket.LayerType { return LayerTypeIPv6 }
+
+// NetworkFlow returns this new Flow (EndpointIPv6, SrcIP, DstIP)
+func (ipv6 *IPv6) NetworkFlow() gopacket.Flow {
+ return gopacket.NewFlow(EndpointIPv6, ipv6.SrcIP, ipv6.DstIP)
+}
+
+// Search for Jumbo Payload TLV in IPv6HopByHop and return (length, true) if found
+func getIPv6HopByHopJumboLength(hopopts *IPv6HopByHop) (uint32, bool, error) {
+ var tlv *IPv6HopByHopOption
+
+ for _, t := range hopopts.Options {
+ if t.OptionType == IPv6HopByHopOptionJumbogram {
+ tlv = t
+ break
+ }
+ }
+ if tlv == nil {
+ // Not found
+ return 0, false, nil
+ }
+ if len(tlv.OptionData) != 4 {
+ return 0, false, errors.New("Jumbo length TLV data must have length 4")
+ }
+ l := binary.BigEndian.Uint32(tlv.OptionData)
+ if l <= ipv6MaxPayloadLength {
+ return 0, false, fmt.Errorf("Jumbo length cannot be less than %d", ipv6MaxPayloadLength+1)
+ }
+ // Found
+ return l, true, nil
+}
+
+// Adds zero-valued Jumbo TLV to IPv6 header if it does not exist
+// (if necessary add hop-by-hop header)
+func addIPv6JumboOption(ip6 *IPv6) {
+ var tlv *IPv6HopByHopOption
+
+ if ip6.HopByHop == nil {
+ // Add IPv6 HopByHop
+ ip6.HopByHop = &IPv6HopByHop{}
+ ip6.HopByHop.NextHeader = ip6.NextHeader
+ ip6.HopByHop.HeaderLength = 0
+ ip6.NextHeader = IPProtocolIPv6HopByHop
+ }
+ for _, t := range ip6.HopByHop.Options {
+ if t.OptionType == IPv6HopByHopOptionJumbogram {
+ tlv = t
+ break
+ }
+ }
+ if tlv == nil {
+ // Add Jumbo TLV
+ tlv = &IPv6HopByHopOption{}
+ ip6.HopByHop.Options = append(ip6.HopByHop.Options, tlv)
+ }
+ tlv.SetJumboLength(0)
+}
+
+// Set jumbo length in serialized IPv6 payload (starting with HopByHop header)
+func setIPv6PayloadJumboLength(hbh []byte) error {
+ pLen := len(hbh)
+ if pLen < 8 {
+ //HopByHop is minimum 8 bytes
+ return fmt.Errorf("Invalid IPv6 payload (length %d)", pLen)
+ }
+ hbhLen := int((hbh[1] + 1) * 8)
+ if hbhLen > pLen {
+ return fmt.Errorf("Invalid hop-by-hop length (length: %d, payload: %d", hbhLen, pLen)
+ }
+ offset := 2 //start with options
+ for offset < hbhLen {
+ opt := hbh[offset]
+ if opt == 0 {
+ //Pad1
+ offset++
+ continue
+ }
+ optLen := int(hbh[offset+1])
+ if opt == IPv6HopByHopOptionJumbogram {
+ if optLen == 4 {
+ binary.BigEndian.PutUint32(hbh[offset+2:], uint32(pLen))
+ return nil
+ }
+ return fmt.Errorf("Jumbo TLV too short (%d bytes)", optLen)
+ }
+ offset += 2 + optLen
+ }
+ return errors.New("Jumbo TLV not found")
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (ipv6 *IPv6) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ var jumbo bool
+ var err error
+
+ payload := b.Bytes()
+ pLen := len(payload)
+ if pLen > ipv6MaxPayloadLength {
+ jumbo = true
+ if opts.FixLengths {
+ // We need to set the length later because the hop-by-hop header may
+ // not exist or else need padding, so pLen may yet change
+ addIPv6JumboOption(ipv6)
+ } else if ipv6.HopByHop == nil {
+ return fmt.Errorf("Cannot fit payload length of %d into IPv6 packet", pLen)
+ } else {
+ _, ok, err := getIPv6HopByHopJumboLength(ipv6.HopByHop)
+ if err != nil {
+ return err
+ }
+ if !ok {
+ return errors.New("Missing jumbo length hop-by-hop option")
+ }
+ }
+ }
+
+ hbhAlreadySerialized := false
+ if ipv6.HopByHop != nil {
+ for _, l := range b.Layers() {
+ if l == LayerTypeIPv6HopByHop {
+ hbhAlreadySerialized = true
+ break
+ }
+ }
+ }
+ if ipv6.HopByHop != nil && !hbhAlreadySerialized {
+ if ipv6.NextHeader != IPProtocolIPv6HopByHop {
+ // Just fix it instead of throwing an error
+ ipv6.NextHeader = IPProtocolIPv6HopByHop
+ }
+ err = ipv6.HopByHop.SerializeTo(b, opts)
+ if err != nil {
+ return err
+ }
+ payload = b.Bytes()
+ pLen = len(payload)
+ if opts.FixLengths && jumbo {
+ err := setIPv6PayloadJumboLength(payload)
+ if err != nil {
+ return err
+ }
+ }
+ }
+
+ if !jumbo && pLen > ipv6MaxPayloadLength {
+ return errors.New("Cannot fit payload into IPv6 header")
+ }
+ bytes, err := b.PrependBytes(40)
+ if err != nil {
+ return err
+ }
+ bytes[0] = (ipv6.Version << 4) | (ipv6.TrafficClass >> 4)
+ bytes[1] = (ipv6.TrafficClass << 4) | uint8(ipv6.FlowLabel>>16)
+ binary.BigEndian.PutUint16(bytes[2:], uint16(ipv6.FlowLabel))
+ if opts.FixLengths {
+ if jumbo {
+ ipv6.Length = 0
+ } else {
+ ipv6.Length = uint16(pLen)
+ }
+ }
+ binary.BigEndian.PutUint16(bytes[4:], ipv6.Length)
+ bytes[6] = byte(ipv6.NextHeader)
+ bytes[7] = byte(ipv6.HopLimit)
+ if err := ipv6.AddressTo16(); err != nil {
+ return err
+ }
+ copy(bytes[8:], ipv6.SrcIP)
+ copy(bytes[24:], ipv6.DstIP)
+ return nil
+}
+
+// DecodeFromBytes implementation according to gopacket.DecodingLayer
+func (ipv6 *IPv6) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ if len(data) < 40 {
+ df.SetTruncated()
+ return fmt.Errorf("Invalid ip6 header. Length %d less than 40", len(data))
+ }
+ ipv6.Version = uint8(data[0]) >> 4
+ ipv6.TrafficClass = uint8((binary.BigEndian.Uint16(data[0:2]) >> 4) & 0x00FF)
+ ipv6.FlowLabel = binary.BigEndian.Uint32(data[0:4]) & 0x000FFFFF
+ ipv6.Length = binary.BigEndian.Uint16(data[4:6])
+ ipv6.NextHeader = IPProtocol(data[6])
+ ipv6.HopLimit = data[7]
+ ipv6.SrcIP = data[8:24]
+ ipv6.DstIP = data[24:40]
+ ipv6.HopByHop = nil
+ ipv6.BaseLayer = BaseLayer{data[:40], data[40:]}
+
+ // We treat a HopByHop IPv6 option as part of the IPv6 packet, since its
+ // options are crucial for understanding what's actually happening per packet.
+ if ipv6.NextHeader == IPProtocolIPv6HopByHop {
+ err := ipv6.hbh.DecodeFromBytes(ipv6.Payload, df)
+ if err != nil {
+ return err
+ }
+ ipv6.HopByHop = &ipv6.hbh
+ pEnd, jumbo, err := getIPv6HopByHopJumboLength(ipv6.HopByHop)
+ if err != nil {
+ return err
+ }
+ if jumbo && ipv6.Length == 0 {
+ pEnd := int(pEnd)
+ if pEnd > len(ipv6.Payload) {
+ df.SetTruncated()
+ pEnd = len(ipv6.Payload)
+ }
+ ipv6.Payload = ipv6.Payload[:pEnd]
+ return nil
+ } else if jumbo && ipv6.Length != 0 {
+ return errors.New("IPv6 has jumbo length and IPv6 length is not 0")
+ } else if !jumbo && ipv6.Length == 0 {
+ return errors.New("IPv6 length 0, but HopByHop header does not have jumbogram option")
+ } else {
+ ipv6.Payload = ipv6.Payload[ipv6.hbh.ActualLength:]
+ }
+ }
+
+ if ipv6.Length == 0 {
+ return fmt.Errorf("IPv6 length 0, but next header is %v, not HopByHop", ipv6.NextHeader)
+ }
+
+ pEnd := int(ipv6.Length)
+ if pEnd > len(ipv6.Payload) {
+ df.SetTruncated()
+ pEnd = len(ipv6.Payload)
+ }
+ ipv6.Payload = ipv6.Payload[:pEnd]
+
+ return nil
+}
+
+// CanDecode implementation according to gopacket.DecodingLayer
+func (ipv6 *IPv6) CanDecode() gopacket.LayerClass {
+ return LayerTypeIPv6
+}
+
+// NextLayerType implementation according to gopacket.DecodingLayer
+func (ipv6 *IPv6) NextLayerType() gopacket.LayerType {
+ if ipv6.HopByHop != nil {
+ return ipv6.HopByHop.NextHeader.LayerType()
+ }
+ return ipv6.NextHeader.LayerType()
+}
+
+func decodeIPv6(data []byte, p gopacket.PacketBuilder) error {
+ ip6 := &IPv6{}
+ err := ip6.DecodeFromBytes(data, p)
+ p.AddLayer(ip6)
+ p.SetNetworkLayer(ip6)
+ if ip6.HopByHop != nil {
+ p.AddLayer(ip6.HopByHop)
+ }
+ if err != nil {
+ return err
+ }
+ return p.NextDecoder(ip6.NextLayerType())
+}
+
+type ipv6HeaderTLVOption struct {
+ OptionType, OptionLength uint8
+ ActualLength int
+ OptionData []byte
+ OptionAlignment [2]uint8 // Xn+Y = [2]uint8{X, Y}
+}
+
+func (h *ipv6HeaderTLVOption) serializeTo(data []byte, fixLengths bool, dryrun bool) int {
+ if fixLengths {
+ h.OptionLength = uint8(len(h.OptionData))
+ }
+ length := int(h.OptionLength) + 2
+ if !dryrun {
+ data[0] = h.OptionType
+ data[1] = h.OptionLength
+ copy(data[2:], h.OptionData)
+ }
+ return length
+}
+
+func decodeIPv6HeaderTLVOption(data []byte) (h *ipv6HeaderTLVOption) {
+ h = &ipv6HeaderTLVOption{}
+ if data[0] == 0 {
+ h.ActualLength = 1
+ return
+ }
+ h.OptionType = data[0]
+ h.OptionLength = data[1]
+ h.ActualLength = int(h.OptionLength) + 2
+ h.OptionData = data[2:h.ActualLength]
+ return
+}
+
+func serializeTLVOptionPadding(data []byte, padLength int) {
+ if padLength <= 0 {
+ return
+ }
+ if padLength == 1 {
+ data[0] = 0x0
+ return
+ }
+ tlvLength := uint8(padLength) - 2
+ data[0] = 0x1
+ data[1] = tlvLength
+ if tlvLength != 0 {
+ for k := range data[2:] {
+ data[k+2] = 0x0
+ }
+ }
+ return
+}
+
+// If buf is 'nil' do a serialize dry run
+func serializeIPv6HeaderTLVOptions(buf []byte, options []*ipv6HeaderTLVOption, fixLengths bool) int {
+ var l int
+
+ dryrun := buf == nil
+ length := 2
+ for _, opt := range options {
+ if fixLengths {
+ x := int(opt.OptionAlignment[0])
+ y := int(opt.OptionAlignment[1])
+ if x != 0 {
+ n := length / x
+ offset := x*n + y
+ if offset < length {
+ offset += x
+ }
+ if length != offset {
+ pad := offset - length
+ if !dryrun {
+ serializeTLVOptionPadding(buf[length-2:], pad)
+ }
+ length += pad
+ }
+ }
+ }
+ if dryrun {
+ l = opt.serializeTo(nil, fixLengths, true)
+ } else {
+ l = opt.serializeTo(buf[length-2:], fixLengths, false)
+ }
+ length += l
+ }
+ if fixLengths {
+ pad := length % 8
+ if pad != 0 {
+ if !dryrun {
+ serializeTLVOptionPadding(buf[length-2:], pad)
+ }
+ length += pad
+ }
+ }
+ return length - 2
+}
+
+type ipv6ExtensionBase struct {
+ BaseLayer
+ NextHeader IPProtocol
+ HeaderLength uint8
+ ActualLength int
+}
+
+func decodeIPv6ExtensionBase(data []byte, df gopacket.DecodeFeedback) (i ipv6ExtensionBase, returnedErr error) {
+ if len(data) < 2 {
+ df.SetTruncated()
+ return ipv6ExtensionBase{}, fmt.Errorf("Invalid ip6-extension header. Length %d less than 2", len(data))
+ }
+ i.NextHeader = IPProtocol(data[0])
+ i.HeaderLength = data[1]
+ i.ActualLength = int(i.HeaderLength)*8 + 8
+ if len(data) < i.ActualLength {
+ return ipv6ExtensionBase{}, fmt.Errorf("Invalid ip6-extension header. Length %d less than specified length %d", len(data), i.ActualLength)
+ }
+ i.Contents = data[:i.ActualLength]
+ i.Payload = data[i.ActualLength:]
+ return
+}
+
+// IPv6ExtensionSkipper is a DecodingLayer which decodes and ignores v6
+// extensions. You can use it with a DecodingLayerParser to handle IPv6 stacks
+// which may or may not have extensions.
+type IPv6ExtensionSkipper struct {
+ NextHeader IPProtocol
+ BaseLayer
+}
+
+// DecodeFromBytes implementation according to gopacket.DecodingLayer
+func (i *IPv6ExtensionSkipper) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ extension, err := decodeIPv6ExtensionBase(data, df)
+ if err != nil {
+ return err
+ }
+ i.BaseLayer = BaseLayer{data[:extension.ActualLength], data[extension.ActualLength:]}
+ i.NextHeader = extension.NextHeader
+ return nil
+}
+
+// CanDecode implementation according to gopacket.DecodingLayer
+func (i *IPv6ExtensionSkipper) CanDecode() gopacket.LayerClass {
+ return LayerClassIPv6Extension
+}
+
+// NextLayerType implementation according to gopacket.DecodingLayer
+func (i *IPv6ExtensionSkipper) NextLayerType() gopacket.LayerType {
+ return i.NextHeader.LayerType()
+}
+
+// IPv6HopByHopOption is a TLV option present in an IPv6 hop-by-hop extension.
+type IPv6HopByHopOption ipv6HeaderTLVOption
+
+// IPv6HopByHop is the IPv6 hop-by-hop extension.
+type IPv6HopByHop struct {
+ ipv6ExtensionBase
+ Options []*IPv6HopByHopOption
+}
+
+// LayerType returns LayerTypeIPv6HopByHop.
+func (i *IPv6HopByHop) LayerType() gopacket.LayerType { return LayerTypeIPv6HopByHop }
+
+// SerializeTo implementation according to gopacket.SerializableLayer
+func (i *IPv6HopByHop) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ var bytes []byte
+ var err error
+
+ o := make([]*ipv6HeaderTLVOption, 0, len(i.Options))
+ for _, v := range i.Options {
+ o = append(o, (*ipv6HeaderTLVOption)(v))
+ }
+
+ l := serializeIPv6HeaderTLVOptions(nil, o, opts.FixLengths)
+ bytes, err = b.PrependBytes(l)
+ if err != nil {
+ return err
+ }
+ serializeIPv6HeaderTLVOptions(bytes, o, opts.FixLengths)
+
+ length := len(bytes) + 2
+ if length%8 != 0 {
+ return errors.New("IPv6HopByHop actual length must be multiple of 8")
+ }
+ bytes, err = b.PrependBytes(2)
+ if err != nil {
+ return err
+ }
+ bytes[0] = uint8(i.NextHeader)
+ if opts.FixLengths {
+ i.HeaderLength = uint8((length / 8) - 1)
+ }
+ bytes[1] = uint8(i.HeaderLength)
+ return nil
+}
+
+// DecodeFromBytes implementation according to gopacket.DecodingLayer
+func (i *IPv6HopByHop) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ var err error
+ i.ipv6ExtensionBase, err = decodeIPv6ExtensionBase(data, df)
+ if err != nil {
+ return err
+ }
+ offset := 2
+ for offset < i.ActualLength {
+ opt := decodeIPv6HeaderTLVOption(data[offset:])
+ i.Options = append(i.Options, (*IPv6HopByHopOption)(opt))
+ offset += opt.ActualLength
+ }
+ return nil
+}
+
+func decodeIPv6HopByHop(data []byte, p gopacket.PacketBuilder) error {
+ i := &IPv6HopByHop{}
+ err := i.DecodeFromBytes(data, p)
+ p.AddLayer(i)
+ if err != nil {
+ return err
+ }
+ return p.NextDecoder(i.NextHeader)
+}
+
+// SetJumboLength adds the IPv6HopByHopOptionJumbogram with the given length
+func (o *IPv6HopByHopOption) SetJumboLength(len uint32) {
+ o.OptionType = IPv6HopByHopOptionJumbogram
+ o.OptionLength = 4
+ o.ActualLength = 6
+ if o.OptionData == nil {
+ o.OptionData = make([]byte, 4)
+ }
+ binary.BigEndian.PutUint32(o.OptionData, len)
+ o.OptionAlignment = [2]uint8{4, 2}
+}
+
+// IPv6Routing is the IPv6 routing extension.
+type IPv6Routing struct {
+ ipv6ExtensionBase
+ RoutingType uint8
+ SegmentsLeft uint8
+ // This segment is supposed to be zero according to RFC2460, the second set of
+ // 4 bytes in the extension.
+ Reserved []byte
+ // SourceRoutingIPs is the set of IPv6 addresses requested for source routing,
+ // set only if RoutingType == 0.
+ SourceRoutingIPs []net.IP
+}
+
+// LayerType returns LayerTypeIPv6Routing.
+func (i *IPv6Routing) LayerType() gopacket.LayerType { return LayerTypeIPv6Routing }
+
+func decodeIPv6Routing(data []byte, p gopacket.PacketBuilder) error {
+ base, err := decodeIPv6ExtensionBase(data, p)
+ if err != nil {
+ return err
+ }
+ i := &IPv6Routing{
+ ipv6ExtensionBase: base,
+ RoutingType: data[2],
+ SegmentsLeft: data[3],
+ Reserved: data[4:8],
+ }
+ switch i.RoutingType {
+ case 0: // Source routing
+ if (i.ActualLength-8)%16 != 0 {
+ return fmt.Errorf("Invalid IPv6 source routing, length of type 0 packet %d", i.ActualLength)
+ }
+ for d := i.Contents[8:]; len(d) >= 16; d = d[16:] {
+ i.SourceRoutingIPs = append(i.SourceRoutingIPs, net.IP(d[:16]))
+ }
+ default:
+ return fmt.Errorf("Unknown IPv6 routing header type %d", i.RoutingType)
+ }
+ p.AddLayer(i)
+ return p.NextDecoder(i.NextHeader)
+}
+
+// IPv6Fragment is the IPv6 fragment header, used for packet
+// fragmentation/defragmentation.
+type IPv6Fragment struct {
+ BaseLayer
+ NextHeader IPProtocol
+ // Reserved1 is bits [8-16), from least to most significant, 0-indexed
+ Reserved1 uint8
+ FragmentOffset uint16
+ // Reserved2 is bits [29-31), from least to most significant, 0-indexed
+ Reserved2 uint8
+ MoreFragments bool
+ Identification uint32
+}
+
+// LayerType returns LayerTypeIPv6Fragment.
+func (i *IPv6Fragment) LayerType() gopacket.LayerType { return LayerTypeIPv6Fragment }
+
+func decodeIPv6Fragment(data []byte, p gopacket.PacketBuilder) error {
+ if len(data) < 8 {
+ p.SetTruncated()
+ return fmt.Errorf("Invalid ip6-fragment header. Length %d less than 8", len(data))
+ }
+ i := &IPv6Fragment{
+ BaseLayer: BaseLayer{data[:8], data[8:]},
+ NextHeader: IPProtocol(data[0]),
+ Reserved1: data[1],
+ FragmentOffset: binary.BigEndian.Uint16(data[2:4]) >> 3,
+ Reserved2: data[3] & 0x6 >> 1,
+ MoreFragments: data[3]&0x1 != 0,
+ Identification: binary.BigEndian.Uint32(data[4:8]),
+ }
+ p.AddLayer(i)
+ return p.NextDecoder(gopacket.DecodeFragment)
+}
+
+// IPv6DestinationOption is a TLV option present in an IPv6 destination options extension.
+type IPv6DestinationOption ipv6HeaderTLVOption
+
+// IPv6Destination is the IPv6 destination options header.
+type IPv6Destination struct {
+ ipv6ExtensionBase
+ Options []*IPv6DestinationOption
+}
+
+// LayerType returns LayerTypeIPv6Destination.
+func (i *IPv6Destination) LayerType() gopacket.LayerType { return LayerTypeIPv6Destination }
+
+// DecodeFromBytes implementation according to gopacket.DecodingLayer
+func (i *IPv6Destination) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ var err error
+ i.ipv6ExtensionBase, err = decodeIPv6ExtensionBase(data, df)
+ if err != nil {
+ return err
+ }
+ offset := 2
+ for offset < i.ActualLength {
+ opt := decodeIPv6HeaderTLVOption(data[offset:])
+ i.Options = append(i.Options, (*IPv6DestinationOption)(opt))
+ offset += opt.ActualLength
+ }
+ return nil
+}
+
+func decodeIPv6Destination(data []byte, p gopacket.PacketBuilder) error {
+ i := &IPv6Destination{}
+ err := i.DecodeFromBytes(data, p)
+ p.AddLayer(i)
+ if err != nil {
+ return err
+ }
+ return p.NextDecoder(i.NextHeader)
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (i *IPv6Destination) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ var bytes []byte
+ var err error
+
+ o := make([]*ipv6HeaderTLVOption, 0, len(i.Options))
+ for _, v := range i.Options {
+ o = append(o, (*ipv6HeaderTLVOption)(v))
+ }
+
+ l := serializeIPv6HeaderTLVOptions(nil, o, opts.FixLengths)
+ bytes, err = b.PrependBytes(l)
+ if err != nil {
+ return err
+ }
+ serializeIPv6HeaderTLVOptions(bytes, o, opts.FixLengths)
+
+ length := len(bytes) + 2
+ if length%8 != 0 {
+ return errors.New("IPv6Destination actual length must be multiple of 8")
+ }
+ bytes, err = b.PrependBytes(2)
+ if err != nil {
+ return err
+ }
+ bytes[0] = uint8(i.NextHeader)
+ if opts.FixLengths {
+ i.HeaderLength = uint8((length / 8) - 1)
+ }
+ bytes[1] = uint8(i.HeaderLength)
+ return nil
+}
+
+func checkIPv6Address(addr net.IP) error {
+ if len(addr) == net.IPv6len {
+ return nil
+ }
+ if len(addr) == net.IPv4len {
+ return errors.New("address is IPv4")
+ }
+ return fmt.Errorf("wrong length of %d bytes instead of %d", len(addr), net.IPv6len)
+}
+
+// AddressTo16 ensures IPv6.SrcIP and IPv6.DstIP are actually IPv6 addresses (i.e. 16 byte addresses)
+func (ipv6 *IPv6) AddressTo16() error {
+ if err := checkIPv6Address(ipv6.SrcIP); err != nil {
+ return fmt.Errorf("Invalid source IPv6 address (%s)", err)
+ }
+ if err := checkIPv6Address(ipv6.DstIP); err != nil {
+ return fmt.Errorf("Invalid destination IPv6 address (%s)", err)
+ }
+ return nil
+}
diff --git a/vendor/github.com/google/gopacket/layers/ipsec.go b/vendor/github.com/google/gopacket/layers/ipsec.go
new file mode 100644
index 0000000..19163fa
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/ipsec.go
@@ -0,0 +1,68 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+ "encoding/binary"
+ "github.com/google/gopacket"
+)
+
+// IPSecAH is the authentication header for IPv4/6 defined in
+// http://tools.ietf.org/html/rfc2402
+type IPSecAH struct {
+ // While the auth header can be used for both IPv4 and v6, its format is that of
+ // an IPv6 extension (NextHeader, PayloadLength, etc...), so we use ipv6ExtensionBase
+ // to build it.
+ ipv6ExtensionBase
+ Reserved uint16
+ SPI, Seq uint32
+ AuthenticationData []byte
+}
+
+// LayerType returns LayerTypeIPSecAH.
+func (i *IPSecAH) LayerType() gopacket.LayerType { return LayerTypeIPSecAH }
+
+func decodeIPSecAH(data []byte, p gopacket.PacketBuilder) error {
+ i := &IPSecAH{
+ ipv6ExtensionBase: ipv6ExtensionBase{
+ NextHeader: IPProtocol(data[0]),
+ HeaderLength: data[1],
+ },
+ Reserved: binary.BigEndian.Uint16(data[2:4]),
+ SPI: binary.BigEndian.Uint32(data[4:8]),
+ Seq: binary.BigEndian.Uint32(data[8:12]),
+ }
+ i.ActualLength = (int(i.HeaderLength) + 2) * 4
+ i.AuthenticationData = data[12:i.ActualLength]
+ i.Contents = data[:i.ActualLength]
+ i.Payload = data[i.ActualLength:]
+ p.AddLayer(i)
+ return p.NextDecoder(i.NextHeader)
+}
+
+// IPSecESP is the encapsulating security payload defined in
+// http://tools.ietf.org/html/rfc2406
+type IPSecESP struct {
+ BaseLayer
+ SPI, Seq uint32
+ // Encrypted contains the encrypted set of bytes sent in an ESP
+ Encrypted []byte
+}
+
+// LayerType returns LayerTypeIPSecESP.
+func (i *IPSecESP) LayerType() gopacket.LayerType { return LayerTypeIPSecESP }
+
+func decodeIPSecESP(data []byte, p gopacket.PacketBuilder) error {
+ i := &IPSecESP{
+ BaseLayer: BaseLayer{data, nil},
+ SPI: binary.BigEndian.Uint32(data[:4]),
+ Seq: binary.BigEndian.Uint32(data[4:8]),
+ Encrypted: data[8:],
+ }
+ p.AddLayer(i)
+ return nil
+}
diff --git a/vendor/github.com/google/gopacket/layers/layertypes.go b/vendor/github.com/google/gopacket/layers/layertypes.go
new file mode 100644
index 0000000..f66fd9b
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/layertypes.go
@@ -0,0 +1,221 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+ "github.com/google/gopacket"
+)
+
+var (
+ LayerTypeARP = gopacket.RegisterLayerType(10, gopacket.LayerTypeMetadata{Name: "ARP", Decoder: gopacket.DecodeFunc(decodeARP)})
+ LayerTypeCiscoDiscovery = gopacket.RegisterLayerType(11, gopacket.LayerTypeMetadata{Name: "CiscoDiscovery", Decoder: gopacket.DecodeFunc(decodeCiscoDiscovery)})
+ LayerTypeEthernetCTP = gopacket.RegisterLayerType(12, gopacket.LayerTypeMetadata{Name: "EthernetCTP", Decoder: gopacket.DecodeFunc(decodeEthernetCTP)})
+ LayerTypeEthernetCTPForwardData = gopacket.RegisterLayerType(13, gopacket.LayerTypeMetadata{Name: "EthernetCTPForwardData", Decoder: nil})
+ LayerTypeEthernetCTPReply = gopacket.RegisterLayerType(14, gopacket.LayerTypeMetadata{Name: "EthernetCTPReply", Decoder: nil})
+ LayerTypeDot1Q = gopacket.RegisterLayerType(15, gopacket.LayerTypeMetadata{Name: "Dot1Q", Decoder: gopacket.DecodeFunc(decodeDot1Q)})
+ LayerTypeEtherIP = gopacket.RegisterLayerType(16, gopacket.LayerTypeMetadata{Name: "EtherIP", Decoder: gopacket.DecodeFunc(decodeEtherIP)})
+ LayerTypeEthernet = gopacket.RegisterLayerType(17, gopacket.LayerTypeMetadata{Name: "Ethernet", Decoder: gopacket.DecodeFunc(decodeEthernet)})
+ LayerTypeGRE = gopacket.RegisterLayerType(18, gopacket.LayerTypeMetadata{Name: "GRE", Decoder: gopacket.DecodeFunc(decodeGRE)})
+ LayerTypeICMPv4 = gopacket.RegisterLayerType(19, gopacket.LayerTypeMetadata{Name: "ICMPv4", Decoder: gopacket.DecodeFunc(decodeICMPv4)})
+ LayerTypeIPv4 = gopacket.RegisterLayerType(20, gopacket.LayerTypeMetadata{Name: "IPv4", Decoder: gopacket.DecodeFunc(decodeIPv4)})
+ LayerTypeIPv6 = gopacket.RegisterLayerType(21, gopacket.LayerTypeMetadata{Name: "IPv6", Decoder: gopacket.DecodeFunc(decodeIPv6)})
+ LayerTypeLLC = gopacket.RegisterLayerType(22, gopacket.LayerTypeMetadata{Name: "LLC", Decoder: gopacket.DecodeFunc(decodeLLC)})
+ LayerTypeSNAP = gopacket.RegisterLayerType(23, gopacket.LayerTypeMetadata{Name: "SNAP", Decoder: gopacket.DecodeFunc(decodeSNAP)})
+ LayerTypeMPLS = gopacket.RegisterLayerType(24, gopacket.LayerTypeMetadata{Name: "MPLS", Decoder: gopacket.DecodeFunc(decodeMPLS)})
+ LayerTypePPP = gopacket.RegisterLayerType(25, gopacket.LayerTypeMetadata{Name: "PPP", Decoder: gopacket.DecodeFunc(decodePPP)})
+ LayerTypePPPoE = gopacket.RegisterLayerType(26, gopacket.LayerTypeMetadata{Name: "PPPoE", Decoder: gopacket.DecodeFunc(decodePPPoE)})
+ LayerTypeRUDP = gopacket.RegisterLayerType(27, gopacket.LayerTypeMetadata{Name: "RUDP", Decoder: gopacket.DecodeFunc(decodeRUDP)})
+ LayerTypeSCTP = gopacket.RegisterLayerType(28, gopacket.LayerTypeMetadata{Name: "SCTP", Decoder: gopacket.DecodeFunc(decodeSCTP)})
+ LayerTypeSCTPUnknownChunkType = gopacket.RegisterLayerType(29, gopacket.LayerTypeMetadata{Name: "SCTPUnknownChunkType", Decoder: nil})
+ LayerTypeSCTPData = gopacket.RegisterLayerType(30, gopacket.LayerTypeMetadata{Name: "SCTPData", Decoder: nil})
+ LayerTypeSCTPInit = gopacket.RegisterLayerType(31, gopacket.LayerTypeMetadata{Name: "SCTPInit", Decoder: nil})
+ LayerTypeSCTPSack = gopacket.RegisterLayerType(32, gopacket.LayerTypeMetadata{Name: "SCTPSack", Decoder: nil})
+ LayerTypeSCTPHeartbeat = gopacket.RegisterLayerType(33, gopacket.LayerTypeMetadata{Name: "SCTPHeartbeat", Decoder: nil})
+ LayerTypeSCTPError = gopacket.RegisterLayerType(34, gopacket.LayerTypeMetadata{Name: "SCTPError", Decoder: nil})
+ LayerTypeSCTPShutdown = gopacket.RegisterLayerType(35, gopacket.LayerTypeMetadata{Name: "SCTPShutdown", Decoder: nil})
+ LayerTypeSCTPShutdownAck = gopacket.RegisterLayerType(36, gopacket.LayerTypeMetadata{Name: "SCTPShutdownAck", Decoder: nil})
+ LayerTypeSCTPCookieEcho = gopacket.RegisterLayerType(37, gopacket.LayerTypeMetadata{Name: "SCTPCookieEcho", Decoder: nil})
+ LayerTypeSCTPEmptyLayer = gopacket.RegisterLayerType(38, gopacket.LayerTypeMetadata{Name: "SCTPEmptyLayer", Decoder: nil})
+ LayerTypeSCTPInitAck = gopacket.RegisterLayerType(39, gopacket.LayerTypeMetadata{Name: "SCTPInitAck", Decoder: nil})
+ LayerTypeSCTPHeartbeatAck = gopacket.RegisterLayerType(40, gopacket.LayerTypeMetadata{Name: "SCTPHeartbeatAck", Decoder: nil})
+ LayerTypeSCTPAbort = gopacket.RegisterLayerType(41, gopacket.LayerTypeMetadata{Name: "SCTPAbort", Decoder: nil})
+ LayerTypeSCTPShutdownComplete = gopacket.RegisterLayerType(42, gopacket.LayerTypeMetadata{Name: "SCTPShutdownComplete", Decoder: nil})
+ LayerTypeSCTPCookieAck = gopacket.RegisterLayerType(43, gopacket.LayerTypeMetadata{Name: "SCTPCookieAck", Decoder: nil})
+ LayerTypeTCP = gopacket.RegisterLayerType(44, gopacket.LayerTypeMetadata{Name: "TCP", Decoder: gopacket.DecodeFunc(decodeTCP)})
+ LayerTypeUDP = gopacket.RegisterLayerType(45, gopacket.LayerTypeMetadata{Name: "UDP", Decoder: gopacket.DecodeFunc(decodeUDP)})
+ LayerTypeIPv6HopByHop = gopacket.RegisterLayerType(46, gopacket.LayerTypeMetadata{Name: "IPv6HopByHop", Decoder: gopacket.DecodeFunc(decodeIPv6HopByHop)})
+ LayerTypeIPv6Routing = gopacket.RegisterLayerType(47, gopacket.LayerTypeMetadata{Name: "IPv6Routing", Decoder: gopacket.DecodeFunc(decodeIPv6Routing)})
+ LayerTypeIPv6Fragment = gopacket.RegisterLayerType(48, gopacket.LayerTypeMetadata{Name: "IPv6Fragment", Decoder: gopacket.DecodeFunc(decodeIPv6Fragment)})
+ LayerTypeIPv6Destination = gopacket.RegisterLayerType(49, gopacket.LayerTypeMetadata{Name: "IPv6Destination", Decoder: gopacket.DecodeFunc(decodeIPv6Destination)})
+ LayerTypeIPSecAH = gopacket.RegisterLayerType(50, gopacket.LayerTypeMetadata{Name: "IPSecAH", Decoder: gopacket.DecodeFunc(decodeIPSecAH)})
+ LayerTypeIPSecESP = gopacket.RegisterLayerType(51, gopacket.LayerTypeMetadata{Name: "IPSecESP", Decoder: gopacket.DecodeFunc(decodeIPSecESP)})
+ LayerTypeUDPLite = gopacket.RegisterLayerType(52, gopacket.LayerTypeMetadata{Name: "UDPLite", Decoder: gopacket.DecodeFunc(decodeUDPLite)})
+ LayerTypeFDDI = gopacket.RegisterLayerType(53, gopacket.LayerTypeMetadata{Name: "FDDI", Decoder: gopacket.DecodeFunc(decodeFDDI)})
+ LayerTypeLoopback = gopacket.RegisterLayerType(54, gopacket.LayerTypeMetadata{Name: "Loopback", Decoder: gopacket.DecodeFunc(decodeLoopback)})
+ LayerTypeEAP = gopacket.RegisterLayerType(55, gopacket.LayerTypeMetadata{Name: "EAP", Decoder: gopacket.DecodeFunc(decodeEAP)})
+ LayerTypeEAPOL = gopacket.RegisterLayerType(56, gopacket.LayerTypeMetadata{Name: "EAPOL", Decoder: gopacket.DecodeFunc(decodeEAPOL)})
+ LayerTypeICMPv6 = gopacket.RegisterLayerType(57, gopacket.LayerTypeMetadata{Name: "ICMPv6", Decoder: gopacket.DecodeFunc(decodeICMPv6)})
+ LayerTypeLinkLayerDiscovery = gopacket.RegisterLayerType(58, gopacket.LayerTypeMetadata{Name: "LinkLayerDiscovery", Decoder: gopacket.DecodeFunc(decodeLinkLayerDiscovery)})
+ LayerTypeCiscoDiscoveryInfo = gopacket.RegisterLayerType(59, gopacket.LayerTypeMetadata{Name: "CiscoDiscoveryInfo", Decoder: gopacket.DecodeFunc(decodeCiscoDiscoveryInfo)})
+ LayerTypeLinkLayerDiscoveryInfo = gopacket.RegisterLayerType(60, gopacket.LayerTypeMetadata{Name: "LinkLayerDiscoveryInfo", Decoder: nil})
+ LayerTypeNortelDiscovery = gopacket.RegisterLayerType(61, gopacket.LayerTypeMetadata{Name: "NortelDiscovery", Decoder: gopacket.DecodeFunc(decodeNortelDiscovery)})
+ LayerTypeIGMP = gopacket.RegisterLayerType(62, gopacket.LayerTypeMetadata{Name: "IGMP", Decoder: gopacket.DecodeFunc(decodeIGMP)})
+ LayerTypePFLog = gopacket.RegisterLayerType(63, gopacket.LayerTypeMetadata{Name: "PFLog", Decoder: gopacket.DecodeFunc(decodePFLog)})
+ LayerTypeRadioTap = gopacket.RegisterLayerType(64, gopacket.LayerTypeMetadata{Name: "RadioTap", Decoder: gopacket.DecodeFunc(decodeRadioTap)})
+ LayerTypeDot11 = gopacket.RegisterLayerType(65, gopacket.LayerTypeMetadata{Name: "Dot11", Decoder: gopacket.DecodeFunc(decodeDot11)})
+ LayerTypeDot11Ctrl = gopacket.RegisterLayerType(66, gopacket.LayerTypeMetadata{Name: "Dot11Ctrl", Decoder: gopacket.DecodeFunc(decodeDot11Ctrl)})
+ LayerTypeDot11Data = gopacket.RegisterLayerType(67, gopacket.LayerTypeMetadata{Name: "Dot11Data", Decoder: gopacket.DecodeFunc(decodeDot11Data)})
+ LayerTypeDot11DataCFAck = gopacket.RegisterLayerType(68, gopacket.LayerTypeMetadata{Name: "Dot11DataCFAck", Decoder: gopacket.DecodeFunc(decodeDot11DataCFAck)})
+ LayerTypeDot11DataCFPoll = gopacket.RegisterLayerType(69, gopacket.LayerTypeMetadata{Name: "Dot11DataCFPoll", Decoder: gopacket.DecodeFunc(decodeDot11DataCFPoll)})
+ LayerTypeDot11DataCFAckPoll = gopacket.RegisterLayerType(70, gopacket.LayerTypeMetadata{Name: "Dot11DataCFAckPoll", Decoder: gopacket.DecodeFunc(decodeDot11DataCFAckPoll)})
+ LayerTypeDot11DataNull = gopacket.RegisterLayerType(71, gopacket.LayerTypeMetadata{Name: "Dot11DataNull", Decoder: gopacket.DecodeFunc(decodeDot11DataNull)})
+ LayerTypeDot11DataCFAckNoData = gopacket.RegisterLayerType(72, gopacket.LayerTypeMetadata{Name: "Dot11DataCFAck", Decoder: gopacket.DecodeFunc(decodeDot11DataCFAck)})
+ LayerTypeDot11DataCFPollNoData = gopacket.RegisterLayerType(73, gopacket.LayerTypeMetadata{Name: "Dot11DataCFPoll", Decoder: gopacket.DecodeFunc(decodeDot11DataCFPoll)})
+ LayerTypeDot11DataCFAckPollNoData = gopacket.RegisterLayerType(74, gopacket.LayerTypeMetadata{Name: "Dot11DataCFAckPoll", Decoder: gopacket.DecodeFunc(decodeDot11DataCFAckPoll)})
+ LayerTypeDot11DataQOSData = gopacket.RegisterLayerType(75, gopacket.LayerTypeMetadata{Name: "Dot11DataQOSData", Decoder: gopacket.DecodeFunc(decodeDot11DataQOSData)})
+ LayerTypeDot11DataQOSDataCFAck = gopacket.RegisterLayerType(76, gopacket.LayerTypeMetadata{Name: "Dot11DataQOSDataCFAck", Decoder: gopacket.DecodeFunc(decodeDot11DataQOSDataCFAck)})
+ LayerTypeDot11DataQOSDataCFPoll = gopacket.RegisterLayerType(77, gopacket.LayerTypeMetadata{Name: "Dot11DataQOSDataCFPoll", Decoder: gopacket.DecodeFunc(decodeDot11DataQOSDataCFPoll)})
+ LayerTypeDot11DataQOSDataCFAckPoll = gopacket.RegisterLayerType(78, gopacket.LayerTypeMetadata{Name: "Dot11DataQOSDataCFAckPoll", Decoder: gopacket.DecodeFunc(decodeDot11DataQOSDataCFAckPoll)})
+ LayerTypeDot11DataQOSNull = gopacket.RegisterLayerType(79, gopacket.LayerTypeMetadata{Name: "Dot11DataQOSNull", Decoder: gopacket.DecodeFunc(decodeDot11DataQOSNull)})
+ LayerTypeDot11DataQOSCFPollNoData = gopacket.RegisterLayerType(80, gopacket.LayerTypeMetadata{Name: "Dot11DataQOSCFPoll", Decoder: gopacket.DecodeFunc(decodeDot11DataQOSCFPollNoData)})
+ LayerTypeDot11DataQOSCFAckPollNoData = gopacket.RegisterLayerType(81, gopacket.LayerTypeMetadata{Name: "Dot11DataQOSCFAckPoll", Decoder: gopacket.DecodeFunc(decodeDot11DataQOSCFAckPollNoData)})
+ LayerTypeDot11InformationElement = gopacket.RegisterLayerType(82, gopacket.LayerTypeMetadata{Name: "Dot11InformationElement", Decoder: gopacket.DecodeFunc(decodeDot11InformationElement)})
+ LayerTypeDot11CtrlCTS = gopacket.RegisterLayerType(83, gopacket.LayerTypeMetadata{Name: "Dot11CtrlCTS", Decoder: gopacket.DecodeFunc(decodeDot11CtrlCTS)})
+ LayerTypeDot11CtrlRTS = gopacket.RegisterLayerType(84, gopacket.LayerTypeMetadata{Name: "Dot11CtrlRTS", Decoder: gopacket.DecodeFunc(decodeDot11CtrlRTS)})
+ LayerTypeDot11CtrlBlockAckReq = gopacket.RegisterLayerType(85, gopacket.LayerTypeMetadata{Name: "Dot11CtrlBlockAckReq", Decoder: gopacket.DecodeFunc(decodeDot11CtrlBlockAckReq)})
+ LayerTypeDot11CtrlBlockAck = gopacket.RegisterLayerType(86, gopacket.LayerTypeMetadata{Name: "Dot11CtrlBlockAck", Decoder: gopacket.DecodeFunc(decodeDot11CtrlBlockAck)})
+ LayerTypeDot11CtrlPowersavePoll = gopacket.RegisterLayerType(87, gopacket.LayerTypeMetadata{Name: "Dot11CtrlPowersavePoll", Decoder: gopacket.DecodeFunc(decodeDot11CtrlPowersavePoll)})
+ LayerTypeDot11CtrlAck = gopacket.RegisterLayerType(88, gopacket.LayerTypeMetadata{Name: "Dot11CtrlAck", Decoder: gopacket.DecodeFunc(decodeDot11CtrlAck)})
+ LayerTypeDot11CtrlCFEnd = gopacket.RegisterLayerType(89, gopacket.LayerTypeMetadata{Name: "Dot11CtrlCFEnd", Decoder: gopacket.DecodeFunc(decodeDot11CtrlCFEnd)})
+ LayerTypeDot11CtrlCFEndAck = gopacket.RegisterLayerType(90, gopacket.LayerTypeMetadata{Name: "Dot11CtrlCFEndAck", Decoder: gopacket.DecodeFunc(decodeDot11CtrlCFEndAck)})
+ LayerTypeDot11MgmtAssociationReq = gopacket.RegisterLayerType(91, gopacket.LayerTypeMetadata{Name: "Dot11MgmtAssociationReq", Decoder: gopacket.DecodeFunc(decodeDot11MgmtAssociationReq)})
+ LayerTypeDot11MgmtAssociationResp = gopacket.RegisterLayerType(92, gopacket.LayerTypeMetadata{Name: "Dot11MgmtAssociationResp", Decoder: gopacket.DecodeFunc(decodeDot11MgmtAssociationResp)})
+ LayerTypeDot11MgmtReassociationReq = gopacket.RegisterLayerType(93, gopacket.LayerTypeMetadata{Name: "Dot11MgmtReassociationReq", Decoder: gopacket.DecodeFunc(decodeDot11MgmtReassociationReq)})
+ LayerTypeDot11MgmtReassociationResp = gopacket.RegisterLayerType(94, gopacket.LayerTypeMetadata{Name: "Dot11MgmtReassociationResp", Decoder: gopacket.DecodeFunc(decodeDot11MgmtReassociationResp)})
+ LayerTypeDot11MgmtProbeReq = gopacket.RegisterLayerType(95, gopacket.LayerTypeMetadata{Name: "Dot11MgmtProbeReq", Decoder: gopacket.DecodeFunc(decodeDot11MgmtProbeReq)})
+ LayerTypeDot11MgmtProbeResp = gopacket.RegisterLayerType(96, gopacket.LayerTypeMetadata{Name: "Dot11MgmtProbeResp", Decoder: gopacket.DecodeFunc(decodeDot11MgmtProbeResp)})
+ LayerTypeDot11MgmtMeasurementPilot = gopacket.RegisterLayerType(97, gopacket.LayerTypeMetadata{Name: "Dot11MgmtMeasurementPilot", Decoder: gopacket.DecodeFunc(decodeDot11MgmtMeasurementPilot)})
+ LayerTypeDot11MgmtBeacon = gopacket.RegisterLayerType(98, gopacket.LayerTypeMetadata{Name: "Dot11MgmtBeacon", Decoder: gopacket.DecodeFunc(decodeDot11MgmtBeacon)})
+ LayerTypeDot11MgmtATIM = gopacket.RegisterLayerType(99, gopacket.LayerTypeMetadata{Name: "Dot11MgmtATIM", Decoder: gopacket.DecodeFunc(decodeDot11MgmtATIM)})
+ LayerTypeDot11MgmtDisassociation = gopacket.RegisterLayerType(100, gopacket.LayerTypeMetadata{Name: "Dot11MgmtDisassociation", Decoder: gopacket.DecodeFunc(decodeDot11MgmtDisassociation)})
+ LayerTypeDot11MgmtAuthentication = gopacket.RegisterLayerType(101, gopacket.LayerTypeMetadata{Name: "Dot11MgmtAuthentication", Decoder: gopacket.DecodeFunc(decodeDot11MgmtAuthentication)})
+ LayerTypeDot11MgmtDeauthentication = gopacket.RegisterLayerType(102, gopacket.LayerTypeMetadata{Name: "Dot11MgmtDeauthentication", Decoder: gopacket.DecodeFunc(decodeDot11MgmtDeauthentication)})
+ LayerTypeDot11MgmtAction = gopacket.RegisterLayerType(103, gopacket.LayerTypeMetadata{Name: "Dot11MgmtAction", Decoder: gopacket.DecodeFunc(decodeDot11MgmtAction)})
+ LayerTypeDot11MgmtActionNoAck = gopacket.RegisterLayerType(104, gopacket.LayerTypeMetadata{Name: "Dot11MgmtActionNoAck", Decoder: gopacket.DecodeFunc(decodeDot11MgmtActionNoAck)})
+ LayerTypeDot11MgmtArubaWLAN = gopacket.RegisterLayerType(105, gopacket.LayerTypeMetadata{Name: "Dot11MgmtArubaWLAN", Decoder: gopacket.DecodeFunc(decodeDot11MgmtArubaWLAN)})
+ LayerTypeDot11WEP = gopacket.RegisterLayerType(106, gopacket.LayerTypeMetadata{Name: "Dot11WEP", Decoder: gopacket.DecodeFunc(decodeDot11WEP)})
+ LayerTypeDNS = gopacket.RegisterLayerType(107, gopacket.LayerTypeMetadata{Name: "DNS", Decoder: gopacket.DecodeFunc(decodeDNS)})
+ LayerTypeUSB = gopacket.RegisterLayerType(108, gopacket.LayerTypeMetadata{Name: "USB", Decoder: gopacket.DecodeFunc(decodeUSB)})
+ LayerTypeUSBRequestBlockSetup = gopacket.RegisterLayerType(109, gopacket.LayerTypeMetadata{Name: "USBRequestBlockSetup", Decoder: gopacket.DecodeFunc(decodeUSBRequestBlockSetup)})
+ LayerTypeUSBControl = gopacket.RegisterLayerType(110, gopacket.LayerTypeMetadata{Name: "USBControl", Decoder: gopacket.DecodeFunc(decodeUSBControl)})
+ LayerTypeUSBInterrupt = gopacket.RegisterLayerType(111, gopacket.LayerTypeMetadata{Name: "USBInterrupt", Decoder: gopacket.DecodeFunc(decodeUSBInterrupt)})
+ LayerTypeUSBBulk = gopacket.RegisterLayerType(112, gopacket.LayerTypeMetadata{Name: "USBBulk", Decoder: gopacket.DecodeFunc(decodeUSBBulk)})
+ LayerTypeLinuxSLL = gopacket.RegisterLayerType(113, gopacket.LayerTypeMetadata{Name: "Linux SLL", Decoder: gopacket.DecodeFunc(decodeLinuxSLL)})
+ LayerTypeSFlow = gopacket.RegisterLayerType(114, gopacket.LayerTypeMetadata{Name: "SFlow", Decoder: gopacket.DecodeFunc(decodeSFlow)})
+ LayerTypePrismHeader = gopacket.RegisterLayerType(115, gopacket.LayerTypeMetadata{Name: "Prism monitor mode header", Decoder: gopacket.DecodeFunc(decodePrismHeader)})
+ LayerTypeVXLAN = gopacket.RegisterLayerType(116, gopacket.LayerTypeMetadata{Name: "VXLAN", Decoder: gopacket.DecodeFunc(decodeVXLAN)})
+ LayerTypeNTP = gopacket.RegisterLayerType(117, gopacket.LayerTypeMetadata{Name: "NTP", Decoder: gopacket.DecodeFunc(decodeNTP)})
+ LayerTypeDHCPv4 = gopacket.RegisterLayerType(118, gopacket.LayerTypeMetadata{Name: "DHCPv4", Decoder: gopacket.DecodeFunc(decodeDHCPv4)})
+ LayerTypeVRRP = gopacket.RegisterLayerType(119, gopacket.LayerTypeMetadata{Name: "VRRP", Decoder: gopacket.DecodeFunc(decodeVRRP)})
+ LayerTypeGeneve = gopacket.RegisterLayerType(120, gopacket.LayerTypeMetadata{Name: "Geneve", Decoder: gopacket.DecodeFunc(decodeGeneve)})
+ LayerTypeSTP = gopacket.RegisterLayerType(121, gopacket.LayerTypeMetadata{Name: "STP", Decoder: gopacket.DecodeFunc(decodeSTP)})
+ LayerTypeBFD = gopacket.RegisterLayerType(122, gopacket.LayerTypeMetadata{Name: "BFD", Decoder: gopacket.DecodeFunc(decodeBFD)})
+ LayerTypeOSPF = gopacket.RegisterLayerType(123, gopacket.LayerTypeMetadata{Name: "OSPF", Decoder: gopacket.DecodeFunc(decodeOSPF)})
+ LayerTypeICMPv6RouterSolicitation = gopacket.RegisterLayerType(124, gopacket.LayerTypeMetadata{Name: "ICMPv6RouterSolicitation", Decoder: gopacket.DecodeFunc(decodeICMPv6RouterSolicitation)})
+ LayerTypeICMPv6RouterAdvertisement = gopacket.RegisterLayerType(125, gopacket.LayerTypeMetadata{Name: "ICMPv6RouterAdvertisement", Decoder: gopacket.DecodeFunc(decodeICMPv6RouterAdvertisement)})
+ LayerTypeICMPv6NeighborSolicitation = gopacket.RegisterLayerType(126, gopacket.LayerTypeMetadata{Name: "ICMPv6NeighborSolicitation", Decoder: gopacket.DecodeFunc(decodeICMPv6NeighborSolicitation)})
+ LayerTypeICMPv6NeighborAdvertisement = gopacket.RegisterLayerType(127, gopacket.LayerTypeMetadata{Name: "ICMPv6NeighborAdvertisement", Decoder: gopacket.DecodeFunc(decodeICMPv6NeighborAdvertisement)})
+ LayerTypeICMPv6Redirect = gopacket.RegisterLayerType(128, gopacket.LayerTypeMetadata{Name: "ICMPv6Redirect", Decoder: gopacket.DecodeFunc(decodeICMPv6Redirect)})
+ LayerTypeGTPv1U = gopacket.RegisterLayerType(129, gopacket.LayerTypeMetadata{Name: "GTPv1U", Decoder: gopacket.DecodeFunc(decodeGTPv1u)})
+ LayerTypeEAPOLKey = gopacket.RegisterLayerType(130, gopacket.LayerTypeMetadata{Name: "EAPOLKey", Decoder: gopacket.DecodeFunc(decodeEAPOLKey)})
+ LayerTypeLCM = gopacket.RegisterLayerType(131, gopacket.LayerTypeMetadata{Name: "LCM", Decoder: gopacket.DecodeFunc(decodeLCM)})
+ LayerTypeICMPv6Echo = gopacket.RegisterLayerType(132, gopacket.LayerTypeMetadata{Name: "ICMPv6Echo", Decoder: gopacket.DecodeFunc(decodeICMPv6Echo)})
+ LayerTypeSIP = gopacket.RegisterLayerType(133, gopacket.LayerTypeMetadata{Name: "SIP", Decoder: gopacket.DecodeFunc(decodeSIP)})
+ LayerTypeDHCPv6 = gopacket.RegisterLayerType(134, gopacket.LayerTypeMetadata{Name: "DHCPv6", Decoder: gopacket.DecodeFunc(decodeDHCPv6)})
+ LayerTypeMLDv1MulticastListenerReport = gopacket.RegisterLayerType(135, gopacket.LayerTypeMetadata{Name: "MLDv1MulticastListenerReport", Decoder: gopacket.DecodeFunc(decodeMLDv1MulticastListenerReport)})
+ LayerTypeMLDv1MulticastListenerDone = gopacket.RegisterLayerType(136, gopacket.LayerTypeMetadata{Name: "MLDv1MulticastListenerDone", Decoder: gopacket.DecodeFunc(decodeMLDv1MulticastListenerDone)})
+ LayerTypeMLDv1MulticastListenerQuery = gopacket.RegisterLayerType(137, gopacket.LayerTypeMetadata{Name: "MLDv1MulticastListenerQuery", Decoder: gopacket.DecodeFunc(decodeMLDv1MulticastListenerQuery)})
+ LayerTypeMLDv2MulticastListenerReport = gopacket.RegisterLayerType(138, gopacket.LayerTypeMetadata{Name: "MLDv2MulticastListenerReport", Decoder: gopacket.DecodeFunc(decodeMLDv2MulticastListenerReport)})
+ LayerTypeMLDv2MulticastListenerQuery = gopacket.RegisterLayerType(139, gopacket.LayerTypeMetadata{Name: "MLDv2MulticastListenerQuery", Decoder: gopacket.DecodeFunc(decodeMLDv2MulticastListenerQuery)})
+ LayerTypeTLS = gopacket.RegisterLayerType(140, gopacket.LayerTypeMetadata{Name: "TLS", Decoder: gopacket.DecodeFunc(decodeTLS)})
+ LayerTypeModbusTCP = gopacket.RegisterLayerType(141, gopacket.LayerTypeMetadata{Name: "ModbusTCP", Decoder: gopacket.DecodeFunc(decodeModbusTCP)})
+ LayerTypeRMCP = gopacket.RegisterLayerType(142, gopacket.LayerTypeMetadata{Name: "RMCP", Decoder: gopacket.DecodeFunc(decodeRMCP)})
+ LayerTypeASF = gopacket.RegisterLayerType(143, gopacket.LayerTypeMetadata{Name: "ASF", Decoder: gopacket.DecodeFunc(decodeASF)})
+ LayerTypeASFPresencePong = gopacket.RegisterLayerType(144, gopacket.LayerTypeMetadata{Name: "ASFPresencePong", Decoder: gopacket.DecodeFunc(decodeASFPresencePong)})
+)
+
+var (
+ // LayerClassIPNetwork contains TCP/IP network layer types.
+ LayerClassIPNetwork = gopacket.NewLayerClass([]gopacket.LayerType{
+ LayerTypeIPv4,
+ LayerTypeIPv6,
+ })
+ // LayerClassIPTransport contains TCP/IP transport layer types.
+ LayerClassIPTransport = gopacket.NewLayerClass([]gopacket.LayerType{
+ LayerTypeTCP,
+ LayerTypeUDP,
+ LayerTypeSCTP,
+ })
+ // LayerClassIPControl contains TCP/IP control protocols.
+ LayerClassIPControl = gopacket.NewLayerClass([]gopacket.LayerType{
+ LayerTypeICMPv4,
+ LayerTypeICMPv6,
+ })
+ // LayerClassSCTPChunk contains SCTP chunk types (not the top-level SCTP
+ // layer).
+ LayerClassSCTPChunk = gopacket.NewLayerClass([]gopacket.LayerType{
+ LayerTypeSCTPUnknownChunkType,
+ LayerTypeSCTPData,
+ LayerTypeSCTPInit,
+ LayerTypeSCTPSack,
+ LayerTypeSCTPHeartbeat,
+ LayerTypeSCTPError,
+ LayerTypeSCTPShutdown,
+ LayerTypeSCTPShutdownAck,
+ LayerTypeSCTPCookieEcho,
+ LayerTypeSCTPEmptyLayer,
+ LayerTypeSCTPInitAck,
+ LayerTypeSCTPHeartbeatAck,
+ LayerTypeSCTPAbort,
+ LayerTypeSCTPShutdownComplete,
+ LayerTypeSCTPCookieAck,
+ })
+ // LayerClassIPv6Extension contains IPv6 extension headers.
+ LayerClassIPv6Extension = gopacket.NewLayerClass([]gopacket.LayerType{
+ LayerTypeIPv6HopByHop,
+ LayerTypeIPv6Routing,
+ LayerTypeIPv6Fragment,
+ LayerTypeIPv6Destination,
+ })
+ LayerClassIPSec = gopacket.NewLayerClass([]gopacket.LayerType{
+ LayerTypeIPSecAH,
+ LayerTypeIPSecESP,
+ })
+ // LayerClassICMPv6NDP contains ICMPv6 neighbor discovery protocol
+ // messages.
+ LayerClassICMPv6NDP = gopacket.NewLayerClass([]gopacket.LayerType{
+ LayerTypeICMPv6RouterSolicitation,
+ LayerTypeICMPv6RouterAdvertisement,
+ LayerTypeICMPv6NeighborSolicitation,
+ LayerTypeICMPv6NeighborAdvertisement,
+ LayerTypeICMPv6Redirect,
+ })
+ // LayerClassMLDv1 contains multicast listener discovery protocol
+ LayerClassMLDv1 = gopacket.NewLayerClass([]gopacket.LayerType{
+ LayerTypeMLDv1MulticastListenerQuery,
+ LayerTypeMLDv1MulticastListenerReport,
+ LayerTypeMLDv1MulticastListenerDone,
+ })
+ // LayerClassMLDv2 contains multicast listener discovery protocol v2
+ LayerClassMLDv2 = gopacket.NewLayerClass([]gopacket.LayerType{
+ LayerTypeMLDv1MulticastListenerReport,
+ LayerTypeMLDv1MulticastListenerDone,
+ LayerTypeMLDv2MulticastListenerReport,
+ LayerTypeMLDv1MulticastListenerQuery,
+ LayerTypeMLDv2MulticastListenerQuery,
+ })
+)
diff --git a/vendor/github.com/google/gopacket/layers/lcm.go b/vendor/github.com/google/gopacket/layers/lcm.go
new file mode 100644
index 0000000..5fe9fa5
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/lcm.go
@@ -0,0 +1,213 @@
+// Copyright 2018 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+ "encoding/binary"
+ "fmt"
+
+ "github.com/google/gopacket"
+)
+
+const (
+ // LCMShortHeaderMagic is the LCM small message header magic number
+ LCMShortHeaderMagic uint32 = 0x4c433032
+ // LCMFragmentedHeaderMagic is the LCM fragmented message header magic number
+ LCMFragmentedHeaderMagic uint32 = 0x4c433033
+)
+
+// LCM (Lightweight Communications and Marshalling) is a set of libraries and
+// tools for message passing and data marshalling, targeted at real-time systems
+// where high-bandwidth and low latency are critical. It provides a
+// publish/subscribe message passing model and automatic
+// marshalling/unmarshalling code generation with bindings for applications in a
+// variety of programming languages.
+//
+// References
+// https://lcm-proj.github.io/
+// https://github.com/lcm-proj/lcm
+type LCM struct {
+ // Common (short & fragmented header) fields
+ Magic uint32
+ SequenceNumber uint32
+ // Fragmented header only fields
+ PayloadSize uint32
+ FragmentOffset uint32
+ FragmentNumber uint16
+ TotalFragments uint16
+ // Common field
+ ChannelName string
+ // Gopacket helper fields
+ Fragmented bool
+ fingerprint LCMFingerprint
+ contents []byte
+ payload []byte
+}
+
+// LCMFingerprint is the type of a LCM fingerprint.
+type LCMFingerprint uint64
+
+var (
+ // lcmLayerTypes contains a map of all LCM fingerprints that we support and
+ // their LayerType
+ lcmLayerTypes = map[LCMFingerprint]gopacket.LayerType{}
+ layerTypeIndex = 1001
+)
+
+// RegisterLCMLayerType allows users to register decoders for the underlying
+// LCM payload. This is done based on the fingerprint that every LCM message
+// contains and which identifies it uniquely. If num is not the zero value it
+// will be used when registering with RegisterLayerType towards gopacket,
+// otherwise an incremental value starting from 1001 will be used.
+func RegisterLCMLayerType(num int, name string, fingerprint LCMFingerprint,
+ decoder gopacket.Decoder) gopacket.LayerType {
+ metadata := gopacket.LayerTypeMetadata{Name: name, Decoder: decoder}
+
+ if num == 0 {
+ num = layerTypeIndex
+ layerTypeIndex++
+ }
+
+ lcmLayerTypes[fingerprint] = gopacket.RegisterLayerType(num, metadata)
+
+ return lcmLayerTypes[fingerprint]
+}
+
+// SupportedLCMFingerprints returns a slice of all LCM fingerprints that has
+// been registered so far.
+func SupportedLCMFingerprints() []LCMFingerprint {
+ fingerprints := make([]LCMFingerprint, 0, len(lcmLayerTypes))
+ for fp := range lcmLayerTypes {
+ fingerprints = append(fingerprints, fp)
+ }
+ return fingerprints
+}
+
+// GetLCMLayerType returns the underlying LCM message's LayerType.
+// This LayerType has to be registered by using RegisterLCMLayerType.
+func GetLCMLayerType(fingerprint LCMFingerprint) gopacket.LayerType {
+ layerType, ok := lcmLayerTypes[fingerprint]
+ if !ok {
+ return gopacket.LayerTypePayload
+ }
+
+ return layerType
+}
+
+func decodeLCM(data []byte, p gopacket.PacketBuilder) error {
+ lcm := &LCM{}
+
+ err := lcm.DecodeFromBytes(data, p)
+ if err != nil {
+ return err
+ }
+
+ p.AddLayer(lcm)
+ p.SetApplicationLayer(lcm)
+
+ return p.NextDecoder(lcm.NextLayerType())
+}
+
+// DecodeFromBytes decodes the given bytes into this layer.
+func (lcm *LCM) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ offset := 0
+
+ lcm.Magic = binary.BigEndian.Uint32(data[offset:4])
+ offset += 4
+
+ if lcm.Magic != LCMShortHeaderMagic && lcm.Magic != LCMFragmentedHeaderMagic {
+ return fmt.Errorf("Received LCM header magic %v does not match know "+
+ "LCM magic numbers. Dropping packet.", lcm.Magic)
+ }
+
+ lcm.SequenceNumber = binary.BigEndian.Uint32(data[offset:8])
+ offset += 4
+
+ if lcm.Magic == LCMFragmentedHeaderMagic {
+ lcm.Fragmented = true
+
+ lcm.PayloadSize = binary.BigEndian.Uint32(data[offset : offset+4])
+ offset += 4
+
+ lcm.FragmentOffset = binary.BigEndian.Uint32(data[offset : offset+4])
+ offset += 4
+
+ lcm.FragmentNumber = binary.BigEndian.Uint16(data[offset : offset+2])
+ offset += 2
+
+ lcm.TotalFragments = binary.BigEndian.Uint16(data[offset : offset+2])
+ offset += 2
+ } else {
+ lcm.Fragmented = false
+ }
+
+ if !lcm.Fragmented || (lcm.Fragmented && lcm.FragmentNumber == 0) {
+ buffer := make([]byte, 0)
+ for _, b := range data[offset:] {
+ offset++
+
+ if b == 0 {
+ break
+ }
+
+ buffer = append(buffer, b)
+ }
+
+ lcm.ChannelName = string(buffer)
+ }
+
+ lcm.fingerprint = LCMFingerprint(
+ binary.BigEndian.Uint64(data[offset : offset+8]))
+
+ lcm.contents = data[:offset]
+ lcm.payload = data[offset:]
+
+ return nil
+}
+
+// CanDecode returns a set of layers that LCM objects can decode.
+// As LCM objects can only decode the LCM layer, we just return that layer.
+func (lcm LCM) CanDecode() gopacket.LayerClass {
+ return LayerTypeLCM
+}
+
+// NextLayerType specifies the LCM payload layer type following this header.
+// As LCM packets are serialized structs with uniq fingerprints for each uniq
+// combination of data types, lookup of correct layer type is based on that
+// fingerprint.
+func (lcm LCM) NextLayerType() gopacket.LayerType {
+ if !lcm.Fragmented || (lcm.Fragmented && lcm.FragmentNumber == 0) {
+ return GetLCMLayerType(lcm.fingerprint)
+ }
+
+ return gopacket.LayerTypeFragment
+}
+
+// LayerType returns LayerTypeLCM
+func (lcm LCM) LayerType() gopacket.LayerType {
+ return LayerTypeLCM
+}
+
+// LayerContents returns the contents of the LCM header.
+func (lcm LCM) LayerContents() []byte {
+ return lcm.contents
+}
+
+// LayerPayload returns the payload following this LCM header.
+func (lcm LCM) LayerPayload() []byte {
+ return lcm.payload
+}
+
+// Payload returns the payload following this LCM header.
+func (lcm LCM) Payload() []byte {
+ return lcm.LayerPayload()
+}
+
+// Fingerprint returns the LCM fingerprint of the underlying message.
+func (lcm LCM) Fingerprint() LCMFingerprint {
+ return lcm.fingerprint
+}
diff --git a/vendor/github.com/google/gopacket/layers/linux_sll.go b/vendor/github.com/google/gopacket/layers/linux_sll.go
new file mode 100644
index 0000000..85a4f8b
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/linux_sll.go
@@ -0,0 +1,98 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "net"
+
+ "github.com/google/gopacket"
+)
+
+type LinuxSLLPacketType uint16
+
+const (
+ LinuxSLLPacketTypeHost LinuxSLLPacketType = 0 // To us
+ LinuxSLLPacketTypeBroadcast LinuxSLLPacketType = 1 // To all
+ LinuxSLLPacketTypeMulticast LinuxSLLPacketType = 2 // To group
+ LinuxSLLPacketTypeOtherhost LinuxSLLPacketType = 3 // To someone else
+ LinuxSLLPacketTypeOutgoing LinuxSLLPacketType = 4 // Outgoing of any type
+ // These ones are invisible by user level
+ LinuxSLLPacketTypeLoopback LinuxSLLPacketType = 5 // MC/BRD frame looped back
+ LinuxSLLPacketTypeFastroute LinuxSLLPacketType = 6 // Fastrouted frame
+)
+
+func (l LinuxSLLPacketType) String() string {
+ switch l {
+ case LinuxSLLPacketTypeHost:
+ return "host"
+ case LinuxSLLPacketTypeBroadcast:
+ return "broadcast"
+ case LinuxSLLPacketTypeMulticast:
+ return "multicast"
+ case LinuxSLLPacketTypeOtherhost:
+ return "otherhost"
+ case LinuxSLLPacketTypeOutgoing:
+ return "outgoing"
+ case LinuxSLLPacketTypeLoopback:
+ return "loopback"
+ case LinuxSLLPacketTypeFastroute:
+ return "fastroute"
+ }
+ return fmt.Sprintf("Unknown(%d)", int(l))
+}
+
+type LinuxSLL struct {
+ BaseLayer
+ PacketType LinuxSLLPacketType
+ AddrLen uint16
+ Addr net.HardwareAddr
+ EthernetType EthernetType
+ AddrType uint16
+}
+
+// LayerType returns LayerTypeLinuxSLL.
+func (sll *LinuxSLL) LayerType() gopacket.LayerType { return LayerTypeLinuxSLL }
+
+func (sll *LinuxSLL) CanDecode() gopacket.LayerClass {
+ return LayerTypeLinuxSLL
+}
+
+func (sll *LinuxSLL) LinkFlow() gopacket.Flow {
+ return gopacket.NewFlow(EndpointMAC, sll.Addr, nil)
+}
+
+func (sll *LinuxSLL) NextLayerType() gopacket.LayerType {
+ return sll.EthernetType.LayerType()
+}
+
+func (sll *LinuxSLL) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ if len(data) < 16 {
+ return errors.New("Linux SLL packet too small")
+ }
+ sll.PacketType = LinuxSLLPacketType(binary.BigEndian.Uint16(data[0:2]))
+ sll.AddrType = binary.BigEndian.Uint16(data[2:4])
+ sll.AddrLen = binary.BigEndian.Uint16(data[4:6])
+
+ sll.Addr = net.HardwareAddr(data[6 : sll.AddrLen+6])
+ sll.EthernetType = EthernetType(binary.BigEndian.Uint16(data[14:16]))
+ sll.BaseLayer = BaseLayer{data[:16], data[16:]}
+
+ return nil
+}
+
+func decodeLinuxSLL(data []byte, p gopacket.PacketBuilder) error {
+ sll := &LinuxSLL{}
+ if err := sll.DecodeFromBytes(data, p); err != nil {
+ return err
+ }
+ p.AddLayer(sll)
+ p.SetLinkLayer(sll)
+ return p.NextDecoder(sll.EthernetType)
+}
diff --git a/vendor/github.com/google/gopacket/layers/llc.go b/vendor/github.com/google/gopacket/layers/llc.go
new file mode 100644
index 0000000..cad6803
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/llc.go
@@ -0,0 +1,193 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+ "encoding/binary"
+ "errors"
+
+ "github.com/google/gopacket"
+)
+
+// LLC is the layer used for 802.2 Logical Link Control headers.
+// See http://standards.ieee.org/getieee802/download/802.2-1998.pdf
+type LLC struct {
+ BaseLayer
+ DSAP uint8
+ IG bool // true means group, false means individual
+ SSAP uint8
+ CR bool // true means response, false means command
+ Control uint16
+}
+
+// LayerType returns gopacket.LayerTypeLLC.
+func (l *LLC) LayerType() gopacket.LayerType { return LayerTypeLLC }
+
+// DecodeFromBytes decodes the given bytes into this layer.
+func (l *LLC) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ if len(data) < 3 {
+ return errors.New("LLC header too small")
+ }
+ l.DSAP = data[0] & 0xFE
+ l.IG = data[0]&0x1 != 0
+ l.SSAP = data[1] & 0xFE
+ l.CR = data[1]&0x1 != 0
+ l.Control = uint16(data[2])
+
+ if l.Control&0x1 == 0 || l.Control&0x3 == 0x1 {
+ if len(data) < 4 {
+ return errors.New("LLC header too small")
+ }
+ l.Control = l.Control<<8 | uint16(data[3])
+ l.Contents = data[:4]
+ l.Payload = data[4:]
+ } else {
+ l.Contents = data[:3]
+ l.Payload = data[3:]
+ }
+ return nil
+}
+
+// CanDecode returns the set of layer types that this DecodingLayer can decode.
+func (l *LLC) CanDecode() gopacket.LayerClass {
+ return LayerTypeLLC
+}
+
+// NextLayerType returns the layer type contained by this DecodingLayer.
+func (l *LLC) NextLayerType() gopacket.LayerType {
+ switch {
+ case l.DSAP == 0xAA && l.SSAP == 0xAA:
+ return LayerTypeSNAP
+ case l.DSAP == 0x42 && l.SSAP == 0x42:
+ return LayerTypeSTP
+ }
+ return gopacket.LayerTypeZero // Not implemented
+}
+
+// SNAP is used inside LLC. See
+// http://standards.ieee.org/getieee802/download/802-2001.pdf.
+// From http://en.wikipedia.org/wiki/Subnetwork_Access_Protocol:
+// "[T]he Subnetwork Access Protocol (SNAP) is a mechanism for multiplexing,
+// on networks using IEEE 802.2 LLC, more protocols than can be distinguished
+// by the 8-bit 802.2 Service Access Point (SAP) fields."
+type SNAP struct {
+ BaseLayer
+ OrganizationalCode []byte
+ Type EthernetType
+}
+
+// LayerType returns gopacket.LayerTypeSNAP.
+func (s *SNAP) LayerType() gopacket.LayerType { return LayerTypeSNAP }
+
+// DecodeFromBytes decodes the given bytes into this layer.
+func (s *SNAP) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ if len(data) < 5 {
+ return errors.New("SNAP header too small")
+ }
+ s.OrganizationalCode = data[:3]
+ s.Type = EthernetType(binary.BigEndian.Uint16(data[3:5]))
+ s.BaseLayer = BaseLayer{data[:5], data[5:]}
+ return nil
+}
+
+// CanDecode returns the set of layer types that this DecodingLayer can decode.
+func (s *SNAP) CanDecode() gopacket.LayerClass {
+ return LayerTypeSNAP
+}
+
+// NextLayerType returns the layer type contained by this DecodingLayer.
+func (s *SNAP) NextLayerType() gopacket.LayerType {
+ // See BUG(gconnel) in decodeSNAP
+ return s.Type.LayerType()
+}
+
+func decodeLLC(data []byte, p gopacket.PacketBuilder) error {
+ l := &LLC{}
+ err := l.DecodeFromBytes(data, p)
+ if err != nil {
+ return err
+ }
+ p.AddLayer(l)
+ return p.NextDecoder(l.NextLayerType())
+}
+
+func decodeSNAP(data []byte, p gopacket.PacketBuilder) error {
+ s := &SNAP{}
+ err := s.DecodeFromBytes(data, p)
+ if err != nil {
+ return err
+ }
+ p.AddLayer(s)
+ // BUG(gconnell): When decoding SNAP, we treat the SNAP type as an Ethernet
+ // type. This may not actually be an ethernet type in all cases,
+ // depending on the organizational code. Right now, we don't check.
+ return p.NextDecoder(s.Type)
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (l *LLC) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ var igFlag, crFlag byte
+ var length int
+
+ if l.Control&0xFF00 != 0 {
+ length = 4
+ } else {
+ length = 3
+ }
+
+ if l.DSAP&0x1 != 0 {
+ return errors.New("DSAP value invalid, should not include IG flag bit")
+ }
+
+ if l.SSAP&0x1 != 0 {
+ return errors.New("SSAP value invalid, should not include CR flag bit")
+ }
+
+ if buf, err := b.PrependBytes(length); err != nil {
+ return err
+ } else {
+ igFlag = 0
+ if l.IG {
+ igFlag = 0x1
+ }
+
+ crFlag = 0
+ if l.CR {
+ crFlag = 0x1
+ }
+
+ buf[0] = l.DSAP + igFlag
+ buf[1] = l.SSAP + crFlag
+
+ if length == 4 {
+ buf[2] = uint8(l.Control >> 8)
+ buf[3] = uint8(l.Control)
+ } else {
+ buf[2] = uint8(l.Control)
+ }
+ }
+
+ return nil
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (s *SNAP) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ if buf, err := b.PrependBytes(5); err != nil {
+ return err
+ } else {
+ buf[0] = s.OrganizationalCode[0]
+ buf[1] = s.OrganizationalCode[1]
+ buf[2] = s.OrganizationalCode[2]
+ binary.BigEndian.PutUint16(buf[3:5], uint16(s.Type))
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/google/gopacket/layers/lldp.go b/vendor/github.com/google/gopacket/layers/lldp.go
new file mode 100644
index 0000000..e128260
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/lldp.go
@@ -0,0 +1,1585 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+ "encoding/binary"
+ "errors"
+ "fmt"
+
+ "github.com/google/gopacket"
+)
+
+// LLDPTLVType is the type of each TLV value in a LinkLayerDiscovery packet.
+type LLDPTLVType byte
+
+const (
+ LLDPTLVEnd LLDPTLVType = 0
+ LLDPTLVChassisID LLDPTLVType = 1
+ LLDPTLVPortID LLDPTLVType = 2
+ LLDPTLVTTL LLDPTLVType = 3
+ LLDPTLVPortDescription LLDPTLVType = 4
+ LLDPTLVSysName LLDPTLVType = 5
+ LLDPTLVSysDescription LLDPTLVType = 6
+ LLDPTLVSysCapabilities LLDPTLVType = 7
+ LLDPTLVMgmtAddress LLDPTLVType = 8
+ LLDPTLVOrgSpecific LLDPTLVType = 127
+)
+
+// LinkLayerDiscoveryValue is a TLV value inside a LinkLayerDiscovery packet layer.
+type LinkLayerDiscoveryValue struct {
+ Type LLDPTLVType
+ Length uint16
+ Value []byte
+}
+
+func (c *LinkLayerDiscoveryValue) len() int {
+ return 0
+}
+
+// LLDPChassisIDSubType specifies the value type for a single LLDPChassisID.ID
+type LLDPChassisIDSubType byte
+
+// LLDP Chassis Types
+const (
+ LLDPChassisIDSubTypeReserved LLDPChassisIDSubType = 0
+ LLDPChassisIDSubTypeChassisComp LLDPChassisIDSubType = 1
+ LLDPChassisIDSubtypeIfaceAlias LLDPChassisIDSubType = 2
+ LLDPChassisIDSubTypePortComp LLDPChassisIDSubType = 3
+ LLDPChassisIDSubTypeMACAddr LLDPChassisIDSubType = 4
+ LLDPChassisIDSubTypeNetworkAddr LLDPChassisIDSubType = 5
+ LLDPChassisIDSubtypeIfaceName LLDPChassisIDSubType = 6
+ LLDPChassisIDSubTypeLocal LLDPChassisIDSubType = 7
+)
+
+type LLDPChassisID struct {
+ Subtype LLDPChassisIDSubType
+ ID []byte
+}
+
+func (c *LLDPChassisID) serialize() []byte {
+
+ var buf = make([]byte, c.serializedLen())
+ idLen := uint16(LLDPTLVChassisID)<<9 | uint16(len(c.ID)+1) //id should take 7 bits, length should take 9 bits, +1 for subtype
+ binary.BigEndian.PutUint16(buf[0:2], idLen)
+ buf[2] = byte(c.Subtype)
+ copy(buf[3:], c.ID)
+ return buf
+}
+
+func (c *LLDPChassisID) serializedLen() int {
+ return len(c.ID) + 3 // +2 for id and length, +1 for subtype
+}
+
+// LLDPPortIDSubType specifies the value type for a single LLDPPortID.ID
+type LLDPPortIDSubType byte
+
+// LLDP PortID types
+const (
+ LLDPPortIDSubtypeReserved LLDPPortIDSubType = 0
+ LLDPPortIDSubtypeIfaceAlias LLDPPortIDSubType = 1
+ LLDPPortIDSubtypePortComp LLDPPortIDSubType = 2
+ LLDPPortIDSubtypeMACAddr LLDPPortIDSubType = 3
+ LLDPPortIDSubtypeNetworkAddr LLDPPortIDSubType = 4
+ LLDPPortIDSubtypeIfaceName LLDPPortIDSubType = 5
+ LLDPPortIDSubtypeAgentCircuitID LLDPPortIDSubType = 6
+ LLDPPortIDSubtypeLocal LLDPPortIDSubType = 7
+)
+
+type LLDPPortID struct {
+ Subtype LLDPPortIDSubType
+ ID []byte
+}
+
+func (c *LLDPPortID) serialize() []byte {
+
+ var buf = make([]byte, c.serializedLen())
+ idLen := uint16(LLDPTLVPortID)<<9 | uint16(len(c.ID)+1) //id should take 7 bits, length should take 9 bits, +1 for subtype
+ binary.BigEndian.PutUint16(buf[0:2], idLen)
+ buf[2] = byte(c.Subtype)
+ copy(buf[3:], c.ID)
+ return buf
+}
+
+func (c *LLDPPortID) serializedLen() int {
+ return len(c.ID) + 3 // +2 for id and length, +1 for subtype
+}
+
+// LinkLayerDiscovery is a packet layer containing the LinkLayer Discovery Protocol.
+// See http:http://standards.ieee.org/getieee802/download/802.1AB-2009.pdf
+// ChassisID, PortID and TTL are mandatory TLV's. Other values can be decoded
+// with DecodeValues()
+type LinkLayerDiscovery struct {
+ BaseLayer
+ ChassisID LLDPChassisID
+ PortID LLDPPortID
+ TTL uint16
+ Values []LinkLayerDiscoveryValue
+}
+
+type IEEEOUI uint32
+
+// http://standards.ieee.org/develop/regauth/oui/oui.txt
+const (
+ IEEEOUI8021 IEEEOUI = 0x0080c2
+ IEEEOUI8023 IEEEOUI = 0x00120f
+ IEEEOUI80211 IEEEOUI = 0x000fac
+ IEEEOUI8021Qbg IEEEOUI = 0x0013BF
+ IEEEOUICisco2 IEEEOUI = 0x000142
+ IEEEOUIMedia IEEEOUI = 0x0012bb // TR-41
+ IEEEOUIProfinet IEEEOUI = 0x000ecf
+ IEEEOUIDCBX IEEEOUI = 0x001b21
+)
+
+// LLDPOrgSpecificTLV is an Organisation-specific TLV
+type LLDPOrgSpecificTLV struct {
+ OUI IEEEOUI
+ SubType uint8
+ Info []byte
+}
+
+// LLDPCapabilities Types
+const (
+ LLDPCapsOther uint16 = 1 << 0
+ LLDPCapsRepeater uint16 = 1 << 1
+ LLDPCapsBridge uint16 = 1 << 2
+ LLDPCapsWLANAP uint16 = 1 << 3
+ LLDPCapsRouter uint16 = 1 << 4
+ LLDPCapsPhone uint16 = 1 << 5
+ LLDPCapsDocSis uint16 = 1 << 6
+ LLDPCapsStationOnly uint16 = 1 << 7
+ LLDPCapsCVLAN uint16 = 1 << 8
+ LLDPCapsSVLAN uint16 = 1 << 9
+ LLDPCapsTmpr uint16 = 1 << 10
+)
+
+// LLDPCapabilities represents the capabilities of a device
+type LLDPCapabilities struct {
+ Other bool
+ Repeater bool
+ Bridge bool
+ WLANAP bool
+ Router bool
+ Phone bool
+ DocSis bool
+ StationOnly bool
+ CVLAN bool
+ SVLAN bool
+ TMPR bool
+}
+
+type LLDPSysCapabilities struct {
+ SystemCap LLDPCapabilities
+ EnabledCap LLDPCapabilities
+}
+
+type IANAAddressFamily byte
+
+// LLDP Management Address Subtypes
+// http://www.iana.org/assignments/address-family-numbers/address-family-numbers.xml
+const (
+ IANAAddressFamilyReserved IANAAddressFamily = 0
+ IANAAddressFamilyIPV4 IANAAddressFamily = 1
+ IANAAddressFamilyIPV6 IANAAddressFamily = 2
+ IANAAddressFamilyNSAP IANAAddressFamily = 3
+ IANAAddressFamilyHDLC IANAAddressFamily = 4
+ IANAAddressFamilyBBN1822 IANAAddressFamily = 5
+ IANAAddressFamily802 IANAAddressFamily = 6
+ IANAAddressFamilyE163 IANAAddressFamily = 7
+ IANAAddressFamilyE164 IANAAddressFamily = 8
+ IANAAddressFamilyF69 IANAAddressFamily = 9
+ IANAAddressFamilyX121 IANAAddressFamily = 10
+ IANAAddressFamilyIPX IANAAddressFamily = 11
+ IANAAddressFamilyAtalk IANAAddressFamily = 12
+ IANAAddressFamilyDecnet IANAAddressFamily = 13
+ IANAAddressFamilyBanyan IANAAddressFamily = 14
+ IANAAddressFamilyE164NSAP IANAAddressFamily = 15
+ IANAAddressFamilyDNS IANAAddressFamily = 16
+ IANAAddressFamilyDistname IANAAddressFamily = 17
+ IANAAddressFamilyASNumber IANAAddressFamily = 18
+ IANAAddressFamilyXTPIPV4 IANAAddressFamily = 19
+ IANAAddressFamilyXTPIPV6 IANAAddressFamily = 20
+ IANAAddressFamilyXTP IANAAddressFamily = 21
+ IANAAddressFamilyFcWWPN IANAAddressFamily = 22
+ IANAAddressFamilyFcWWNN IANAAddressFamily = 23
+ IANAAddressFamilyGWID IANAAddressFamily = 24
+ IANAAddressFamilyL2VPN IANAAddressFamily = 25
+)
+
+type LLDPInterfaceSubtype byte
+
+// LLDP Interface Subtypes
+const (
+ LLDPInterfaceSubtypeUnknown LLDPInterfaceSubtype = 1
+ LLDPInterfaceSubtypeifIndex LLDPInterfaceSubtype = 2
+ LLDPInterfaceSubtypeSysPort LLDPInterfaceSubtype = 3
+)
+
+type LLDPMgmtAddress struct {
+ Subtype IANAAddressFamily
+ Address []byte
+ InterfaceSubtype LLDPInterfaceSubtype
+ InterfaceNumber uint32
+ OID string
+}
+
+// LinkLayerDiscoveryInfo represents the decoded details for a set of LinkLayerDiscoveryValues
+// Organisation-specific TLV's can be decoded using the various Decode() methods
+type LinkLayerDiscoveryInfo struct {
+ BaseLayer
+ PortDescription string
+ SysName string
+ SysDescription string
+ SysCapabilities LLDPSysCapabilities
+ MgmtAddress LLDPMgmtAddress
+ OrgTLVs []LLDPOrgSpecificTLV // Private TLVs
+ Unknown []LinkLayerDiscoveryValue // undecoded TLVs
+}
+
+/// IEEE 802.1 TLV Subtypes
+const (
+ LLDP8021SubtypePortVLANID uint8 = 1
+ LLDP8021SubtypeProtocolVLANID uint8 = 2
+ LLDP8021SubtypeVLANName uint8 = 3
+ LLDP8021SubtypeProtocolIdentity uint8 = 4
+ LLDP8021SubtypeVDIUsageDigest uint8 = 5
+ LLDP8021SubtypeManagementVID uint8 = 6
+ LLDP8021SubtypeLinkAggregation uint8 = 7
+)
+
+// VLAN Port Protocol ID options
+const (
+ LLDPProtocolVLANIDCapability byte = 1 << 1
+ LLDPProtocolVLANIDStatus byte = 1 << 2
+)
+
+type PortProtocolVLANID struct {
+ Supported bool
+ Enabled bool
+ ID uint16
+}
+
+type VLANName struct {
+ ID uint16
+ Name string
+}
+
+type ProtocolIdentity []byte
+
+// LACP options
+const (
+ LLDPAggregationCapability byte = 1 << 0
+ LLDPAggregationStatus byte = 1 << 1
+)
+
+// IEEE 802 Link Aggregation parameters
+type LLDPLinkAggregation struct {
+ Supported bool
+ Enabled bool
+ PortID uint32
+}
+
+// LLDPInfo8021 represents the information carried in 802.1 Org-specific TLVs
+type LLDPInfo8021 struct {
+ PVID uint16
+ PPVIDs []PortProtocolVLANID
+ VLANNames []VLANName
+ ProtocolIdentities []ProtocolIdentity
+ VIDUsageDigest uint32
+ ManagementVID uint16
+ LinkAggregation LLDPLinkAggregation
+}
+
+// IEEE 802.3 TLV Subtypes
+const (
+ LLDP8023SubtypeMACPHY uint8 = 1
+ LLDP8023SubtypeMDIPower uint8 = 2
+ LLDP8023SubtypeLinkAggregation uint8 = 3
+ LLDP8023SubtypeMTU uint8 = 4
+)
+
+// MACPHY options
+const (
+ LLDPMACPHYCapability byte = 1 << 0
+ LLDPMACPHYStatus byte = 1 << 1
+)
+
+// From IANA-MAU-MIB (introduced by RFC 4836) - dot3MauType
+const (
+ LLDPMAUTypeUnknown uint16 = 0
+ LLDPMAUTypeAUI uint16 = 1
+ LLDPMAUType10Base5 uint16 = 2
+ LLDPMAUTypeFOIRL uint16 = 3
+ LLDPMAUType10Base2 uint16 = 4
+ LLDPMAUType10BaseT uint16 = 5
+ LLDPMAUType10BaseFP uint16 = 6
+ LLDPMAUType10BaseFB uint16 = 7
+ LLDPMAUType10BaseFL uint16 = 8
+ LLDPMAUType10BROAD36 uint16 = 9
+ LLDPMAUType10BaseT_HD uint16 = 10
+ LLDPMAUType10BaseT_FD uint16 = 11
+ LLDPMAUType10BaseFL_HD uint16 = 12
+ LLDPMAUType10BaseFL_FD uint16 = 13
+ LLDPMAUType100BaseT4 uint16 = 14
+ LLDPMAUType100BaseTX_HD uint16 = 15
+ LLDPMAUType100BaseTX_FD uint16 = 16
+ LLDPMAUType100BaseFX_HD uint16 = 17
+ LLDPMAUType100BaseFX_FD uint16 = 18
+ LLDPMAUType100BaseT2_HD uint16 = 19
+ LLDPMAUType100BaseT2_FD uint16 = 20
+ LLDPMAUType1000BaseX_HD uint16 = 21
+ LLDPMAUType1000BaseX_FD uint16 = 22
+ LLDPMAUType1000BaseLX_HD uint16 = 23
+ LLDPMAUType1000BaseLX_FD uint16 = 24
+ LLDPMAUType1000BaseSX_HD uint16 = 25
+ LLDPMAUType1000BaseSX_FD uint16 = 26
+ LLDPMAUType1000BaseCX_HD uint16 = 27
+ LLDPMAUType1000BaseCX_FD uint16 = 28
+ LLDPMAUType1000BaseT_HD uint16 = 29
+ LLDPMAUType1000BaseT_FD uint16 = 30
+ LLDPMAUType10GBaseX uint16 = 31
+ LLDPMAUType10GBaseLX4 uint16 = 32
+ LLDPMAUType10GBaseR uint16 = 33
+ LLDPMAUType10GBaseER uint16 = 34
+ LLDPMAUType10GBaseLR uint16 = 35
+ LLDPMAUType10GBaseSR uint16 = 36
+ LLDPMAUType10GBaseW uint16 = 37
+ LLDPMAUType10GBaseEW uint16 = 38
+ LLDPMAUType10GBaseLW uint16 = 39
+ LLDPMAUType10GBaseSW uint16 = 40
+ LLDPMAUType10GBaseCX4 uint16 = 41
+ LLDPMAUType2BaseTL uint16 = 42
+ LLDPMAUType10PASS_TS uint16 = 43
+ LLDPMAUType100BaseBX10D uint16 = 44
+ LLDPMAUType100BaseBX10U uint16 = 45
+ LLDPMAUType100BaseLX10 uint16 = 46
+ LLDPMAUType1000BaseBX10D uint16 = 47
+ LLDPMAUType1000BaseBX10U uint16 = 48
+ LLDPMAUType1000BaseLX10 uint16 = 49
+ LLDPMAUType1000BasePX10D uint16 = 50
+ LLDPMAUType1000BasePX10U uint16 = 51
+ LLDPMAUType1000BasePX20D uint16 = 52
+ LLDPMAUType1000BasePX20U uint16 = 53
+ LLDPMAUType10GBaseT uint16 = 54
+ LLDPMAUType10GBaseLRM uint16 = 55
+ LLDPMAUType1000BaseKX uint16 = 56
+ LLDPMAUType10GBaseKX4 uint16 = 57
+ LLDPMAUType10GBaseKR uint16 = 58
+ LLDPMAUType10_1GBasePRX_D1 uint16 = 59
+ LLDPMAUType10_1GBasePRX_D2 uint16 = 60
+ LLDPMAUType10_1GBasePRX_D3 uint16 = 61
+ LLDPMAUType10_1GBasePRX_U1 uint16 = 62
+ LLDPMAUType10_1GBasePRX_U2 uint16 = 63
+ LLDPMAUType10_1GBasePRX_U3 uint16 = 64
+ LLDPMAUType10GBasePR_D1 uint16 = 65
+ LLDPMAUType10GBasePR_D2 uint16 = 66
+ LLDPMAUType10GBasePR_D3 uint16 = 67
+ LLDPMAUType10GBasePR_U1 uint16 = 68
+ LLDPMAUType10GBasePR_U3 uint16 = 69
+)
+
+// From RFC 3636 - ifMauAutoNegCapAdvertisedBits
+const (
+ LLDPMAUPMDOther uint16 = 1 << 15
+ LLDPMAUPMD10BaseT uint16 = 1 << 14
+ LLDPMAUPMD10BaseT_FD uint16 = 1 << 13
+ LLDPMAUPMD100BaseT4 uint16 = 1 << 12
+ LLDPMAUPMD100BaseTX uint16 = 1 << 11
+ LLDPMAUPMD100BaseTX_FD uint16 = 1 << 10
+ LLDPMAUPMD100BaseT2 uint16 = 1 << 9
+ LLDPMAUPMD100BaseT2_FD uint16 = 1 << 8
+ LLDPMAUPMDFDXPAUSE uint16 = 1 << 7
+ LLDPMAUPMDFDXAPAUSE uint16 = 1 << 6
+ LLDPMAUPMDFDXSPAUSE uint16 = 1 << 5
+ LLDPMAUPMDFDXBPAUSE uint16 = 1 << 4
+ LLDPMAUPMD1000BaseX uint16 = 1 << 3
+ LLDPMAUPMD1000BaseX_FD uint16 = 1 << 2
+ LLDPMAUPMD1000BaseT uint16 = 1 << 1
+ LLDPMAUPMD1000BaseT_FD uint16 = 1 << 0
+)
+
+// Inverted ifMauAutoNegCapAdvertisedBits if required
+// (Some manufacturers misinterpreted the spec -
+// see https://bugs.wireshark.org/bugzilla/show_bug.cgi?id=1455)
+const (
+ LLDPMAUPMDOtherInv uint16 = 1 << 0
+ LLDPMAUPMD10BaseTInv uint16 = 1 << 1
+ LLDPMAUPMD10BaseT_FDInv uint16 = 1 << 2
+ LLDPMAUPMD100BaseT4Inv uint16 = 1 << 3
+ LLDPMAUPMD100BaseTXInv uint16 = 1 << 4
+ LLDPMAUPMD100BaseTX_FDInv uint16 = 1 << 5
+ LLDPMAUPMD100BaseT2Inv uint16 = 1 << 6
+ LLDPMAUPMD100BaseT2_FDInv uint16 = 1 << 7
+ LLDPMAUPMDFDXPAUSEInv uint16 = 1 << 8
+ LLDPMAUPMDFDXAPAUSEInv uint16 = 1 << 9
+ LLDPMAUPMDFDXSPAUSEInv uint16 = 1 << 10
+ LLDPMAUPMDFDXBPAUSEInv uint16 = 1 << 11
+ LLDPMAUPMD1000BaseXInv uint16 = 1 << 12
+ LLDPMAUPMD1000BaseX_FDInv uint16 = 1 << 13
+ LLDPMAUPMD1000BaseTInv uint16 = 1 << 14
+ LLDPMAUPMD1000BaseT_FDInv uint16 = 1 << 15
+)
+
+type LLDPMACPHYConfigStatus struct {
+ AutoNegSupported bool
+ AutoNegEnabled bool
+ AutoNegCapability uint16
+ MAUType uint16
+}
+
+// MDI Power options
+const (
+ LLDPMDIPowerPortClass byte = 1 << 0
+ LLDPMDIPowerCapability byte = 1 << 1
+ LLDPMDIPowerStatus byte = 1 << 2
+ LLDPMDIPowerPairsAbility byte = 1 << 3
+)
+
+type LLDPPowerType byte
+
+type LLDPPowerSource byte
+
+type LLDPPowerPriority byte
+
+const (
+ LLDPPowerPriorityUnknown LLDPPowerPriority = 0
+ LLDPPowerPriorityMedium LLDPPowerPriority = 1
+ LLDPPowerPriorityHigh LLDPPowerPriority = 2
+ LLDPPowerPriorityLow LLDPPowerPriority = 3
+)
+
+type LLDPPowerViaMDI8023 struct {
+ PortClassPSE bool // false = PD
+ PSESupported bool
+ PSEEnabled bool
+ PSEPairsAbility bool
+ PSEPowerPair uint8
+ PSEClass uint8
+ Type LLDPPowerType
+ Source LLDPPowerSource
+ Priority LLDPPowerPriority
+ Requested uint16 // 1-510 Watts
+ Allocated uint16 // 1-510 Watts
+}
+
+// LLDPInfo8023 represents the information carried in 802.3 Org-specific TLVs
+type LLDPInfo8023 struct {
+ MACPHYConfigStatus LLDPMACPHYConfigStatus
+ PowerViaMDI LLDPPowerViaMDI8023
+ LinkAggregation LLDPLinkAggregation
+ MTU uint16
+}
+
+// IEEE 802.1Qbg TLV Subtypes
+const (
+ LLDP8021QbgEVB uint8 = 0
+ LLDP8021QbgCDCP uint8 = 1
+ LLDP8021QbgVDP uint8 = 2
+ LLDP8021QbgEVB22 uint8 = 13
+)
+
+// LLDPEVBCapabilities Types
+const (
+ LLDPEVBCapsSTD uint16 = 1 << 7
+ LLDPEVBCapsRR uint16 = 1 << 6
+ LLDPEVBCapsRTE uint16 = 1 << 2
+ LLDPEVBCapsECP uint16 = 1 << 1
+ LLDPEVBCapsVDP uint16 = 1 << 0
+)
+
+// LLDPEVBCapabilities represents the EVB capabilities of a device
+type LLDPEVBCapabilities struct {
+ StandardBridging bool
+ ReflectiveRelay bool
+ RetransmissionTimerExponent bool
+ EdgeControlProtocol bool
+ VSIDiscoveryProtocol bool
+}
+
+type LLDPEVBSettings struct {
+ Supported LLDPEVBCapabilities
+ Enabled LLDPEVBCapabilities
+ SupportedVSIs uint16
+ ConfiguredVSIs uint16
+ RTEExponent uint8
+}
+
+// LLDPInfo8021Qbg represents the information carried in 802.1Qbg Org-specific TLVs
+type LLDPInfo8021Qbg struct {
+ EVBSettings LLDPEVBSettings
+}
+
+type LLDPMediaSubtype uint8
+
+// Media TLV Subtypes
+const (
+ LLDPMediaTypeCapabilities LLDPMediaSubtype = 1
+ LLDPMediaTypeNetwork LLDPMediaSubtype = 2
+ LLDPMediaTypeLocation LLDPMediaSubtype = 3
+ LLDPMediaTypePower LLDPMediaSubtype = 4
+ LLDPMediaTypeHardware LLDPMediaSubtype = 5
+ LLDPMediaTypeFirmware LLDPMediaSubtype = 6
+ LLDPMediaTypeSoftware LLDPMediaSubtype = 7
+ LLDPMediaTypeSerial LLDPMediaSubtype = 8
+ LLDPMediaTypeManufacturer LLDPMediaSubtype = 9
+ LLDPMediaTypeModel LLDPMediaSubtype = 10
+ LLDPMediaTypeAssetID LLDPMediaSubtype = 11
+)
+
+type LLDPMediaClass uint8
+
+// Media Class Values
+const (
+ LLDPMediaClassUndefined LLDPMediaClass = 0
+ LLDPMediaClassEndpointI LLDPMediaClass = 1
+ LLDPMediaClassEndpointII LLDPMediaClass = 2
+ LLDPMediaClassEndpointIII LLDPMediaClass = 3
+ LLDPMediaClassNetwork LLDPMediaClass = 4
+)
+
+// LLDPMediaCapabilities Types
+const (
+ LLDPMediaCapsLLDP uint16 = 1 << 0
+ LLDPMediaCapsNetwork uint16 = 1 << 1
+ LLDPMediaCapsLocation uint16 = 1 << 2
+ LLDPMediaCapsPowerPSE uint16 = 1 << 3
+ LLDPMediaCapsPowerPD uint16 = 1 << 4
+ LLDPMediaCapsInventory uint16 = 1 << 5
+)
+
+// LLDPMediaCapabilities represents the LLDP Media capabilities of a device
+type LLDPMediaCapabilities struct {
+ Capabilities bool
+ NetworkPolicy bool
+ Location bool
+ PowerPSE bool
+ PowerPD bool
+ Inventory bool
+ Class LLDPMediaClass
+}
+
+type LLDPApplicationType uint8
+
+const (
+ LLDPAppTypeReserved LLDPApplicationType = 0
+ LLDPAppTypeVoice LLDPApplicationType = 1
+ LLDPappTypeVoiceSignaling LLDPApplicationType = 2
+ LLDPappTypeGuestVoice LLDPApplicationType = 3
+ LLDPappTypeGuestVoiceSignaling LLDPApplicationType = 4
+ LLDPappTypeSoftphoneVoice LLDPApplicationType = 5
+ LLDPappTypeVideoConferencing LLDPApplicationType = 6
+ LLDPappTypeStreamingVideo LLDPApplicationType = 7
+ LLDPappTypeVideoSignaling LLDPApplicationType = 8
+)
+
+type LLDPNetworkPolicy struct {
+ ApplicationType LLDPApplicationType
+ Defined bool
+ Tagged bool
+ VLANId uint16
+ L2Priority uint16
+ DSCPValue uint8
+}
+
+type LLDPLocationFormat uint8
+
+const (
+ LLDPLocationFormatInvalid LLDPLocationFormat = 0
+ LLDPLocationFormatCoordinate LLDPLocationFormat = 1
+ LLDPLocationFormatAddress LLDPLocationFormat = 2
+ LLDPLocationFormatECS LLDPLocationFormat = 3
+)
+
+type LLDPLocationAddressWhat uint8
+
+const (
+ LLDPLocationAddressWhatDHCP LLDPLocationAddressWhat = 0
+ LLDPLocationAddressWhatNetwork LLDPLocationAddressWhat = 1
+ LLDPLocationAddressWhatClient LLDPLocationAddressWhat = 2
+)
+
+type LLDPLocationAddressType uint8
+
+const (
+ LLDPLocationAddressTypeLanguage LLDPLocationAddressType = 0
+ LLDPLocationAddressTypeNational LLDPLocationAddressType = 1
+ LLDPLocationAddressTypeCounty LLDPLocationAddressType = 2
+ LLDPLocationAddressTypeCity LLDPLocationAddressType = 3
+ LLDPLocationAddressTypeCityDivision LLDPLocationAddressType = 4
+ LLDPLocationAddressTypeNeighborhood LLDPLocationAddressType = 5
+ LLDPLocationAddressTypeStreet LLDPLocationAddressType = 6
+ LLDPLocationAddressTypeLeadingStreet LLDPLocationAddressType = 16
+ LLDPLocationAddressTypeTrailingStreet LLDPLocationAddressType = 17
+ LLDPLocationAddressTypeStreetSuffix LLDPLocationAddressType = 18
+ LLDPLocationAddressTypeHouseNum LLDPLocationAddressType = 19
+ LLDPLocationAddressTypeHouseSuffix LLDPLocationAddressType = 20
+ LLDPLocationAddressTypeLandmark LLDPLocationAddressType = 21
+ LLDPLocationAddressTypeAdditional LLDPLocationAddressType = 22
+ LLDPLocationAddressTypeName LLDPLocationAddressType = 23
+ LLDPLocationAddressTypePostal LLDPLocationAddressType = 24
+ LLDPLocationAddressTypeBuilding LLDPLocationAddressType = 25
+ LLDPLocationAddressTypeUnit LLDPLocationAddressType = 26
+ LLDPLocationAddressTypeFloor LLDPLocationAddressType = 27
+ LLDPLocationAddressTypeRoom LLDPLocationAddressType = 28
+ LLDPLocationAddressTypePlace LLDPLocationAddressType = 29
+ LLDPLocationAddressTypeScript LLDPLocationAddressType = 128
+)
+
+type LLDPLocationCoordinate struct {
+ LatitudeResolution uint8
+ Latitude uint64
+ LongitudeResolution uint8
+ Longitude uint64
+ AltitudeType uint8
+ AltitudeResolution uint16
+ Altitude uint32
+ Datum uint8
+}
+
+type LLDPLocationAddressLine struct {
+ Type LLDPLocationAddressType
+ Value string
+}
+
+type LLDPLocationAddress struct {
+ What LLDPLocationAddressWhat
+ CountryCode string
+ AddressLines []LLDPLocationAddressLine
+}
+
+type LLDPLocationECS struct {
+ ELIN string
+}
+
+// LLDP represents a physical location.
+// Only one of the embedded types will contain values, depending on Format.
+type LLDPLocation struct {
+ Format LLDPLocationFormat
+ Coordinate LLDPLocationCoordinate
+ Address LLDPLocationAddress
+ ECS LLDPLocationECS
+}
+
+type LLDPPowerViaMDI struct {
+ Type LLDPPowerType
+ Source LLDPPowerSource
+ Priority LLDPPowerPriority
+ Value uint16
+}
+
+// LLDPInfoMedia represents the information carried in TR-41 Org-specific TLVs
+type LLDPInfoMedia struct {
+ MediaCapabilities LLDPMediaCapabilities
+ NetworkPolicy LLDPNetworkPolicy
+ Location LLDPLocation
+ PowerViaMDI LLDPPowerViaMDI
+ HardwareRevision string
+ FirmwareRevision string
+ SoftwareRevision string
+ SerialNumber string
+ Manufacturer string
+ Model string
+ AssetID string
+}
+
+type LLDPCisco2Subtype uint8
+
+// Cisco2 TLV Subtypes
+const (
+ LLDPCisco2PowerViaMDI LLDPCisco2Subtype = 1
+)
+
+const (
+ LLDPCiscoPSESupport uint8 = 1 << 0
+ LLDPCiscoArchShared uint8 = 1 << 1
+ LLDPCiscoPDSparePair uint8 = 1 << 2
+ LLDPCiscoPSESparePair uint8 = 1 << 3
+)
+
+// LLDPInfoCisco2 represents the information carried in Cisco Org-specific TLVs
+type LLDPInfoCisco2 struct {
+ PSEFourWirePoESupported bool
+ PDSparePairArchitectureShared bool
+ PDRequestSparePairPoEOn bool
+ PSESparePairPoEOn bool
+}
+
+// Profinet Subtypes
+type LLDPProfinetSubtype uint8
+
+const (
+ LLDPProfinetPNIODelay LLDPProfinetSubtype = 1
+ LLDPProfinetPNIOPortStatus LLDPProfinetSubtype = 2
+ LLDPProfinetPNIOMRPPortStatus LLDPProfinetSubtype = 4
+ LLDPProfinetPNIOChassisMAC LLDPProfinetSubtype = 5
+ LLDPProfinetPNIOPTCPStatus LLDPProfinetSubtype = 6
+)
+
+type LLDPPNIODelay struct {
+ RXLocal uint32
+ RXRemote uint32
+ TXLocal uint32
+ TXRemote uint32
+ CableLocal uint32
+}
+
+type LLDPPNIOPortStatus struct {
+ Class2 uint16
+ Class3 uint16
+}
+
+type LLDPPNIOMRPPortStatus struct {
+ UUID []byte
+ Status uint16
+}
+
+type LLDPPNIOPTCPStatus struct {
+ MasterAddress []byte
+ SubdomainUUID []byte
+ IRDataUUID []byte
+ PeriodValid bool
+ PeriodLength uint32
+ RedPeriodValid bool
+ RedPeriodBegin uint32
+ OrangePeriodValid bool
+ OrangePeriodBegin uint32
+ GreenPeriodValid bool
+ GreenPeriodBegin uint32
+}
+
+// LLDPInfoProfinet represents the information carried in Profinet Org-specific TLVs
+type LLDPInfoProfinet struct {
+ PNIODelay LLDPPNIODelay
+ PNIOPortStatus LLDPPNIOPortStatus
+ PNIOMRPPortStatus LLDPPNIOMRPPortStatus
+ ChassisMAC []byte
+ PNIOPTCPStatus LLDPPNIOPTCPStatus
+}
+
+// LayerType returns gopacket.LayerTypeLinkLayerDiscovery.
+func (c *LinkLayerDiscovery) LayerType() gopacket.LayerType {
+ return LayerTypeLinkLayerDiscovery
+}
+
+// SerializeTo serializes LLDP packet to bytes and writes on SerializeBuffer.
+func (c *LinkLayerDiscovery) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ chassIDLen := c.ChassisID.serializedLen()
+ portIDLen := c.PortID.serializedLen()
+ vb, err := b.AppendBytes(chassIDLen + portIDLen + 4) // +4 for TTL
+ if err != nil {
+ return err
+ }
+ copy(vb[:chassIDLen], c.ChassisID.serialize())
+ copy(vb[chassIDLen:], c.PortID.serialize())
+ ttlIDLen := uint16(LLDPTLVTTL)<<9 | uint16(2)
+ binary.BigEndian.PutUint16(vb[chassIDLen+portIDLen:], ttlIDLen)
+ binary.BigEndian.PutUint16(vb[chassIDLen+portIDLen+2:], c.TTL)
+
+ vb, err = b.AppendBytes(2) // End Tlv, 2 bytes
+ if err != nil {
+ return err
+ }
+ binary.BigEndian.PutUint16(vb[len(vb)-2:], uint16(0)) //End tlv, 2 bytes, all zero
+ return nil
+
+}
+
+func decodeLinkLayerDiscovery(data []byte, p gopacket.PacketBuilder) error {
+ var vals []LinkLayerDiscoveryValue
+ vData := data[0:]
+ for len(vData) > 0 {
+ nbit := vData[0] & 0x01
+ t := LLDPTLVType(vData[0] >> 1)
+ val := LinkLayerDiscoveryValue{Type: t, Length: uint16(nbit)<<8 + uint16(vData[1])}
+ if val.Length > 0 {
+ val.Value = vData[2 : val.Length+2]
+ }
+ vals = append(vals, val)
+ if t == LLDPTLVEnd {
+ break
+ }
+ if len(vData) < int(2+val.Length) {
+ return errors.New("Malformed LinkLayerDiscovery Header")
+ }
+ vData = vData[2+val.Length:]
+ }
+ if len(vals) < 4 {
+ return errors.New("Missing mandatory LinkLayerDiscovery TLV")
+ }
+ c := &LinkLayerDiscovery{}
+ gotEnd := false
+ for _, v := range vals {
+ switch v.Type {
+ case LLDPTLVEnd:
+ gotEnd = true
+ case LLDPTLVChassisID:
+ if len(v.Value) < 2 {
+ return errors.New("Malformed LinkLayerDiscovery ChassisID TLV")
+ }
+ c.ChassisID.Subtype = LLDPChassisIDSubType(v.Value[0])
+ c.ChassisID.ID = v.Value[1:]
+ case LLDPTLVPortID:
+ if len(v.Value) < 2 {
+ return errors.New("Malformed LinkLayerDiscovery PortID TLV")
+ }
+ c.PortID.Subtype = LLDPPortIDSubType(v.Value[0])
+ c.PortID.ID = v.Value[1:]
+ case LLDPTLVTTL:
+ if len(v.Value) < 2 {
+ return errors.New("Malformed LinkLayerDiscovery TTL TLV")
+ }
+ c.TTL = binary.BigEndian.Uint16(v.Value[0:2])
+ default:
+ c.Values = append(c.Values, v)
+ }
+ }
+ if c.ChassisID.Subtype == 0 || c.PortID.Subtype == 0 || !gotEnd {
+ return errors.New("Missing mandatory LinkLayerDiscovery TLV")
+ }
+ c.Contents = data
+ p.AddLayer(c)
+
+ info := &LinkLayerDiscoveryInfo{}
+ p.AddLayer(info)
+ for _, v := range c.Values {
+ switch v.Type {
+ case LLDPTLVPortDescription:
+ info.PortDescription = string(v.Value)
+ case LLDPTLVSysName:
+ info.SysName = string(v.Value)
+ case LLDPTLVSysDescription:
+ info.SysDescription = string(v.Value)
+ case LLDPTLVSysCapabilities:
+ if err := checkLLDPTLVLen(v, 4); err != nil {
+ return err
+ }
+ info.SysCapabilities.SystemCap = getCapabilities(binary.BigEndian.Uint16(v.Value[0:2]))
+ info.SysCapabilities.EnabledCap = getCapabilities(binary.BigEndian.Uint16(v.Value[2:4]))
+ case LLDPTLVMgmtAddress:
+ if err := checkLLDPTLVLen(v, 9); err != nil {
+ return err
+ }
+ mlen := v.Value[0]
+ if err := checkLLDPTLVLen(v, int(mlen+7)); err != nil {
+ return err
+ }
+ info.MgmtAddress.Subtype = IANAAddressFamily(v.Value[1])
+ info.MgmtAddress.Address = v.Value[2 : mlen+1]
+ info.MgmtAddress.InterfaceSubtype = LLDPInterfaceSubtype(v.Value[mlen+1])
+ info.MgmtAddress.InterfaceNumber = binary.BigEndian.Uint32(v.Value[mlen+2 : mlen+6])
+ olen := v.Value[mlen+6]
+ if err := checkLLDPTLVLen(v, int(mlen+6+olen)); err != nil {
+ return err
+ }
+ info.MgmtAddress.OID = string(v.Value[mlen+9 : mlen+9+olen])
+ case LLDPTLVOrgSpecific:
+ if err := checkLLDPTLVLen(v, 4); err != nil {
+ return err
+ }
+ info.OrgTLVs = append(info.OrgTLVs, LLDPOrgSpecificTLV{IEEEOUI(binary.BigEndian.Uint32(append([]byte{byte(0)}, v.Value[0:3]...))), uint8(v.Value[3]), v.Value[4:]})
+ }
+ }
+ return nil
+}
+
+func (l *LinkLayerDiscoveryInfo) Decode8021() (info LLDPInfo8021, err error) {
+ for _, o := range l.OrgTLVs {
+ if o.OUI != IEEEOUI8021 {
+ continue
+ }
+ switch o.SubType {
+ case LLDP8021SubtypePortVLANID:
+ if err = checkLLDPOrgSpecificLen(o, 2); err != nil {
+ return
+ }
+ info.PVID = binary.BigEndian.Uint16(o.Info[0:2])
+ case LLDP8021SubtypeProtocolVLANID:
+ if err = checkLLDPOrgSpecificLen(o, 3); err != nil {
+ return
+ }
+ sup := (o.Info[0]&LLDPProtocolVLANIDCapability > 0)
+ en := (o.Info[0]&LLDPProtocolVLANIDStatus > 0)
+ id := binary.BigEndian.Uint16(o.Info[1:3])
+ info.PPVIDs = append(info.PPVIDs, PortProtocolVLANID{sup, en, id})
+ case LLDP8021SubtypeVLANName:
+ if err = checkLLDPOrgSpecificLen(o, 2); err != nil {
+ return
+ }
+ id := binary.BigEndian.Uint16(o.Info[0:2])
+ info.VLANNames = append(info.VLANNames, VLANName{id, string(o.Info[3:])})
+ case LLDP8021SubtypeProtocolIdentity:
+ if err = checkLLDPOrgSpecificLen(o, 1); err != nil {
+ return
+ }
+ l := int(o.Info[0])
+ if l > 0 {
+ info.ProtocolIdentities = append(info.ProtocolIdentities, o.Info[1:1+l])
+ }
+ case LLDP8021SubtypeVDIUsageDigest:
+ if err = checkLLDPOrgSpecificLen(o, 4); err != nil {
+ return
+ }
+ info.VIDUsageDigest = binary.BigEndian.Uint32(o.Info[0:4])
+ case LLDP8021SubtypeManagementVID:
+ if err = checkLLDPOrgSpecificLen(o, 2); err != nil {
+ return
+ }
+ info.ManagementVID = binary.BigEndian.Uint16(o.Info[0:2])
+ case LLDP8021SubtypeLinkAggregation:
+ if err = checkLLDPOrgSpecificLen(o, 5); err != nil {
+ return
+ }
+ sup := (o.Info[0]&LLDPAggregationCapability > 0)
+ en := (o.Info[0]&LLDPAggregationStatus > 0)
+ info.LinkAggregation = LLDPLinkAggregation{sup, en, binary.BigEndian.Uint32(o.Info[1:5])}
+ }
+ }
+ return
+}
+
+func (l *LinkLayerDiscoveryInfo) Decode8023() (info LLDPInfo8023, err error) {
+ for _, o := range l.OrgTLVs {
+ if o.OUI != IEEEOUI8023 {
+ continue
+ }
+ switch o.SubType {
+ case LLDP8023SubtypeMACPHY:
+ if err = checkLLDPOrgSpecificLen(o, 5); err != nil {
+ return
+ }
+ sup := (o.Info[0]&LLDPMACPHYCapability > 0)
+ en := (o.Info[0]&LLDPMACPHYStatus > 0)
+ ca := binary.BigEndian.Uint16(o.Info[1:3])
+ mau := binary.BigEndian.Uint16(o.Info[3:5])
+ info.MACPHYConfigStatus = LLDPMACPHYConfigStatus{sup, en, ca, mau}
+ case LLDP8023SubtypeMDIPower:
+ if err = checkLLDPOrgSpecificLen(o, 3); err != nil {
+ return
+ }
+ info.PowerViaMDI.PortClassPSE = (o.Info[0]&LLDPMDIPowerPortClass > 0)
+ info.PowerViaMDI.PSESupported = (o.Info[0]&LLDPMDIPowerCapability > 0)
+ info.PowerViaMDI.PSEEnabled = (o.Info[0]&LLDPMDIPowerStatus > 0)
+ info.PowerViaMDI.PSEPairsAbility = (o.Info[0]&LLDPMDIPowerPairsAbility > 0)
+ info.PowerViaMDI.PSEPowerPair = uint8(o.Info[1])
+ info.PowerViaMDI.PSEClass = uint8(o.Info[2])
+ if len(o.Info) >= 7 {
+ info.PowerViaMDI.Type = LLDPPowerType((o.Info[3] & 0xc0) >> 6)
+ info.PowerViaMDI.Source = LLDPPowerSource((o.Info[3] & 0x30) >> 4)
+ if info.PowerViaMDI.Type == 1 || info.PowerViaMDI.Type == 3 {
+ info.PowerViaMDI.Source += 128 // For Stringify purposes
+ }
+ info.PowerViaMDI.Priority = LLDPPowerPriority(o.Info[3] & 0x0f)
+ info.PowerViaMDI.Requested = binary.BigEndian.Uint16(o.Info[4:6])
+ info.PowerViaMDI.Allocated = binary.BigEndian.Uint16(o.Info[6:8])
+ }
+ case LLDP8023SubtypeLinkAggregation:
+ if err = checkLLDPOrgSpecificLen(o, 5); err != nil {
+ return
+ }
+ sup := (o.Info[0]&LLDPAggregationCapability > 0)
+ en := (o.Info[0]&LLDPAggregationStatus > 0)
+ info.LinkAggregation = LLDPLinkAggregation{sup, en, binary.BigEndian.Uint32(o.Info[1:5])}
+ case LLDP8023SubtypeMTU:
+ if err = checkLLDPOrgSpecificLen(o, 2); err != nil {
+ return
+ }
+ info.MTU = binary.BigEndian.Uint16(o.Info[0:2])
+ }
+ }
+ return
+}
+
+func (l *LinkLayerDiscoveryInfo) Decode8021Qbg() (info LLDPInfo8021Qbg, err error) {
+ for _, o := range l.OrgTLVs {
+ if o.OUI != IEEEOUI8021Qbg {
+ continue
+ }
+ switch o.SubType {
+ case LLDP8021QbgEVB:
+ if err = checkLLDPOrgSpecificLen(o, 9); err != nil {
+ return
+ }
+ info.EVBSettings.Supported = getEVBCapabilities(binary.BigEndian.Uint16(o.Info[0:2]))
+ info.EVBSettings.Enabled = getEVBCapabilities(binary.BigEndian.Uint16(o.Info[2:4]))
+ info.EVBSettings.SupportedVSIs = binary.BigEndian.Uint16(o.Info[4:6])
+ info.EVBSettings.ConfiguredVSIs = binary.BigEndian.Uint16(o.Info[6:8])
+ info.EVBSettings.RTEExponent = uint8(o.Info[8])
+ }
+ }
+ return
+}
+
+func (l *LinkLayerDiscoveryInfo) DecodeMedia() (info LLDPInfoMedia, err error) {
+ for _, o := range l.OrgTLVs {
+ if o.OUI != IEEEOUIMedia {
+ continue
+ }
+ switch LLDPMediaSubtype(o.SubType) {
+ case LLDPMediaTypeCapabilities:
+ if err = checkLLDPOrgSpecificLen(o, 3); err != nil {
+ return
+ }
+ b := binary.BigEndian.Uint16(o.Info[0:2])
+ info.MediaCapabilities.Capabilities = (b & LLDPMediaCapsLLDP) > 0
+ info.MediaCapabilities.NetworkPolicy = (b & LLDPMediaCapsNetwork) > 0
+ info.MediaCapabilities.Location = (b & LLDPMediaCapsLocation) > 0
+ info.MediaCapabilities.PowerPSE = (b & LLDPMediaCapsPowerPSE) > 0
+ info.MediaCapabilities.PowerPD = (b & LLDPMediaCapsPowerPD) > 0
+ info.MediaCapabilities.Inventory = (b & LLDPMediaCapsInventory) > 0
+ info.MediaCapabilities.Class = LLDPMediaClass(o.Info[2])
+ case LLDPMediaTypeNetwork:
+ if err = checkLLDPOrgSpecificLen(o, 4); err != nil {
+ return
+ }
+ info.NetworkPolicy.ApplicationType = LLDPApplicationType(o.Info[0])
+ b := binary.BigEndian.Uint16(o.Info[1:3])
+ info.NetworkPolicy.Defined = (b & 0x8000) == 0
+ info.NetworkPolicy.Tagged = (b & 0x4000) > 0
+ info.NetworkPolicy.VLANId = (b & 0x1ffe) >> 1
+ b = binary.BigEndian.Uint16(o.Info[2:4])
+ info.NetworkPolicy.L2Priority = (b & 0x01c0) >> 6
+ info.NetworkPolicy.DSCPValue = uint8(o.Info[3] & 0x3f)
+ case LLDPMediaTypeLocation:
+ if err = checkLLDPOrgSpecificLen(o, 1); err != nil {
+ return
+ }
+ info.Location.Format = LLDPLocationFormat(o.Info[0])
+ o.Info = o.Info[1:]
+ switch info.Location.Format {
+ case LLDPLocationFormatCoordinate:
+ if err = checkLLDPOrgSpecificLen(o, 16); err != nil {
+ return
+ }
+ info.Location.Coordinate.LatitudeResolution = uint8(o.Info[0]&0xfc) >> 2
+ b := binary.BigEndian.Uint64(o.Info[0:8])
+ info.Location.Coordinate.Latitude = (b & 0x03ffffffff000000) >> 24
+ info.Location.Coordinate.LongitudeResolution = uint8(o.Info[5]&0xfc) >> 2
+ b = binary.BigEndian.Uint64(o.Info[5:13])
+ info.Location.Coordinate.Longitude = (b & 0x03ffffffff000000) >> 24
+ info.Location.Coordinate.AltitudeType = uint8((o.Info[10] & 0x30) >> 4)
+ b1 := binary.BigEndian.Uint16(o.Info[10:12])
+ info.Location.Coordinate.AltitudeResolution = (b1 & 0xfc0) >> 6
+ b2 := binary.BigEndian.Uint32(o.Info[11:15])
+ info.Location.Coordinate.Altitude = b2 & 0x3fffffff
+ info.Location.Coordinate.Datum = uint8(o.Info[15])
+ case LLDPLocationFormatAddress:
+ if err = checkLLDPOrgSpecificLen(o, 3); err != nil {
+ return
+ }
+ //ll := uint8(o.Info[0])
+ info.Location.Address.What = LLDPLocationAddressWhat(o.Info[1])
+ info.Location.Address.CountryCode = string(o.Info[2:4])
+ data := o.Info[4:]
+ for len(data) > 1 {
+ aType := LLDPLocationAddressType(data[0])
+ aLen := int(data[1])
+ if len(data) >= aLen+2 {
+ info.Location.Address.AddressLines = append(info.Location.Address.AddressLines, LLDPLocationAddressLine{aType, string(data[2 : aLen+2])})
+ data = data[aLen+2:]
+ } else {
+ break
+ }
+ }
+ case LLDPLocationFormatECS:
+ info.Location.ECS.ELIN = string(o.Info)
+ }
+ case LLDPMediaTypePower:
+ if err = checkLLDPOrgSpecificLen(o, 3); err != nil {
+ return
+ }
+ info.PowerViaMDI.Type = LLDPPowerType((o.Info[0] & 0xc0) >> 6)
+ info.PowerViaMDI.Source = LLDPPowerSource((o.Info[0] & 0x30) >> 4)
+ if info.PowerViaMDI.Type == 1 || info.PowerViaMDI.Type == 3 {
+ info.PowerViaMDI.Source += 128 // For Stringify purposes
+ }
+ info.PowerViaMDI.Priority = LLDPPowerPriority(o.Info[0] & 0x0f)
+ info.PowerViaMDI.Value = binary.BigEndian.Uint16(o.Info[1:3]) * 100 // 0 to 102.3 w, 0.1W increments
+ case LLDPMediaTypeHardware:
+ info.HardwareRevision = string(o.Info)
+ case LLDPMediaTypeFirmware:
+ info.FirmwareRevision = string(o.Info)
+ case LLDPMediaTypeSoftware:
+ info.SoftwareRevision = string(o.Info)
+ case LLDPMediaTypeSerial:
+ info.SerialNumber = string(o.Info)
+ case LLDPMediaTypeManufacturer:
+ info.Manufacturer = string(o.Info)
+ case LLDPMediaTypeModel:
+ info.Model = string(o.Info)
+ case LLDPMediaTypeAssetID:
+ info.AssetID = string(o.Info)
+ }
+ }
+ return
+}
+
+func (l *LinkLayerDiscoveryInfo) DecodeCisco2() (info LLDPInfoCisco2, err error) {
+ for _, o := range l.OrgTLVs {
+ if o.OUI != IEEEOUICisco2 {
+ continue
+ }
+ switch LLDPCisco2Subtype(o.SubType) {
+ case LLDPCisco2PowerViaMDI:
+ if err = checkLLDPOrgSpecificLen(o, 1); err != nil {
+ return
+ }
+ info.PSEFourWirePoESupported = (o.Info[0] & LLDPCiscoPSESupport) > 0
+ info.PDSparePairArchitectureShared = (o.Info[0] & LLDPCiscoArchShared) > 0
+ info.PDRequestSparePairPoEOn = (o.Info[0] & LLDPCiscoPDSparePair) > 0
+ info.PSESparePairPoEOn = (o.Info[0] & LLDPCiscoPSESparePair) > 0
+ }
+ }
+ return
+}
+
+func (l *LinkLayerDiscoveryInfo) DecodeProfinet() (info LLDPInfoProfinet, err error) {
+ for _, o := range l.OrgTLVs {
+ if o.OUI != IEEEOUIProfinet {
+ continue
+ }
+ switch LLDPProfinetSubtype(o.SubType) {
+ case LLDPProfinetPNIODelay:
+ if err = checkLLDPOrgSpecificLen(o, 20); err != nil {
+ return
+ }
+ info.PNIODelay.RXLocal = binary.BigEndian.Uint32(o.Info[0:4])
+ info.PNIODelay.RXRemote = binary.BigEndian.Uint32(o.Info[4:8])
+ info.PNIODelay.TXLocal = binary.BigEndian.Uint32(o.Info[8:12])
+ info.PNIODelay.TXRemote = binary.BigEndian.Uint32(o.Info[12:16])
+ info.PNIODelay.CableLocal = binary.BigEndian.Uint32(o.Info[16:20])
+ case LLDPProfinetPNIOPortStatus:
+ if err = checkLLDPOrgSpecificLen(o, 4); err != nil {
+ return
+ }
+ info.PNIOPortStatus.Class2 = binary.BigEndian.Uint16(o.Info[0:2])
+ info.PNIOPortStatus.Class3 = binary.BigEndian.Uint16(o.Info[2:4])
+ case LLDPProfinetPNIOMRPPortStatus:
+ if err = checkLLDPOrgSpecificLen(o, 18); err != nil {
+ return
+ }
+ info.PNIOMRPPortStatus.UUID = o.Info[0:16]
+ info.PNIOMRPPortStatus.Status = binary.BigEndian.Uint16(o.Info[16:18])
+ case LLDPProfinetPNIOChassisMAC:
+ if err = checkLLDPOrgSpecificLen(o, 6); err != nil {
+ return
+ }
+ info.ChassisMAC = o.Info[0:6]
+ case LLDPProfinetPNIOPTCPStatus:
+ if err = checkLLDPOrgSpecificLen(o, 54); err != nil {
+ return
+ }
+ info.PNIOPTCPStatus.MasterAddress = o.Info[0:6]
+ info.PNIOPTCPStatus.SubdomainUUID = o.Info[6:22]
+ info.PNIOPTCPStatus.IRDataUUID = o.Info[22:38]
+ b := binary.BigEndian.Uint32(o.Info[38:42])
+ info.PNIOPTCPStatus.PeriodValid = (b & 0x80000000) > 0
+ info.PNIOPTCPStatus.PeriodLength = b & 0x7fffffff
+ b = binary.BigEndian.Uint32(o.Info[42:46])
+ info.PNIOPTCPStatus.RedPeriodValid = (b & 0x80000000) > 0
+ info.PNIOPTCPStatus.RedPeriodBegin = b & 0x7fffffff
+ b = binary.BigEndian.Uint32(o.Info[46:50])
+ info.PNIOPTCPStatus.OrangePeriodValid = (b & 0x80000000) > 0
+ info.PNIOPTCPStatus.OrangePeriodBegin = b & 0x7fffffff
+ b = binary.BigEndian.Uint32(o.Info[50:54])
+ info.PNIOPTCPStatus.GreenPeriodValid = (b & 0x80000000) > 0
+ info.PNIOPTCPStatus.GreenPeriodBegin = b & 0x7fffffff
+ }
+ }
+ return
+}
+
+// LayerType returns gopacket.LayerTypeLinkLayerDiscoveryInfo.
+func (c *LinkLayerDiscoveryInfo) LayerType() gopacket.LayerType {
+ return LayerTypeLinkLayerDiscoveryInfo
+}
+
+func getCapabilities(v uint16) (c LLDPCapabilities) {
+ c.Other = (v&LLDPCapsOther > 0)
+ c.Repeater = (v&LLDPCapsRepeater > 0)
+ c.Bridge = (v&LLDPCapsBridge > 0)
+ c.WLANAP = (v&LLDPCapsWLANAP > 0)
+ c.Router = (v&LLDPCapsRouter > 0)
+ c.Phone = (v&LLDPCapsPhone > 0)
+ c.DocSis = (v&LLDPCapsDocSis > 0)
+ c.StationOnly = (v&LLDPCapsStationOnly > 0)
+ c.CVLAN = (v&LLDPCapsCVLAN > 0)
+ c.SVLAN = (v&LLDPCapsSVLAN > 0)
+ c.TMPR = (v&LLDPCapsTmpr > 0)
+ return
+}
+
+func getEVBCapabilities(v uint16) (c LLDPEVBCapabilities) {
+ c.StandardBridging = (v & LLDPEVBCapsSTD) > 0
+ c.StandardBridging = (v & LLDPEVBCapsSTD) > 0
+ c.ReflectiveRelay = (v & LLDPEVBCapsRR) > 0
+ c.RetransmissionTimerExponent = (v & LLDPEVBCapsRTE) > 0
+ c.EdgeControlProtocol = (v & LLDPEVBCapsECP) > 0
+ c.VSIDiscoveryProtocol = (v & LLDPEVBCapsVDP) > 0
+ return
+}
+
+func (t LLDPTLVType) String() (s string) {
+ switch t {
+ case LLDPTLVEnd:
+ s = "TLV End"
+ case LLDPTLVChassisID:
+ s = "Chassis ID"
+ case LLDPTLVPortID:
+ s = "Port ID"
+ case LLDPTLVTTL:
+ s = "TTL"
+ case LLDPTLVPortDescription:
+ s = "Port Description"
+ case LLDPTLVSysName:
+ s = "System Name"
+ case LLDPTLVSysDescription:
+ s = "System Description"
+ case LLDPTLVSysCapabilities:
+ s = "System Capabilities"
+ case LLDPTLVMgmtAddress:
+ s = "Management Address"
+ case LLDPTLVOrgSpecific:
+ s = "Organisation Specific"
+ default:
+ s = "Unknown"
+ }
+ return
+}
+
+func (t LLDPChassisIDSubType) String() (s string) {
+ switch t {
+ case LLDPChassisIDSubTypeReserved:
+ s = "Reserved"
+ case LLDPChassisIDSubTypeChassisComp:
+ s = "Chassis Component"
+ case LLDPChassisIDSubtypeIfaceAlias:
+ s = "Interface Alias"
+ case LLDPChassisIDSubTypePortComp:
+ s = "Port Component"
+ case LLDPChassisIDSubTypeMACAddr:
+ s = "MAC Address"
+ case LLDPChassisIDSubTypeNetworkAddr:
+ s = "Network Address"
+ case LLDPChassisIDSubtypeIfaceName:
+ s = "Interface Name"
+ case LLDPChassisIDSubTypeLocal:
+ s = "Local"
+ default:
+ s = "Unknown"
+ }
+ return
+}
+
+func (t LLDPPortIDSubType) String() (s string) {
+ switch t {
+ case LLDPPortIDSubtypeReserved:
+ s = "Reserved"
+ case LLDPPortIDSubtypeIfaceAlias:
+ s = "Interface Alias"
+ case LLDPPortIDSubtypePortComp:
+ s = "Port Component"
+ case LLDPPortIDSubtypeMACAddr:
+ s = "MAC Address"
+ case LLDPPortIDSubtypeNetworkAddr:
+ s = "Network Address"
+ case LLDPPortIDSubtypeIfaceName:
+ s = "Interface Name"
+ case LLDPPortIDSubtypeAgentCircuitID:
+ s = "Agent Circuit ID"
+ case LLDPPortIDSubtypeLocal:
+ s = "Local"
+ default:
+ s = "Unknown"
+ }
+ return
+}
+
+func (t IANAAddressFamily) String() (s string) {
+ switch t {
+ case IANAAddressFamilyReserved:
+ s = "Reserved"
+ case IANAAddressFamilyIPV4:
+ s = "IPv4"
+ case IANAAddressFamilyIPV6:
+ s = "IPv6"
+ case IANAAddressFamilyNSAP:
+ s = "NSAP"
+ case IANAAddressFamilyHDLC:
+ s = "HDLC"
+ case IANAAddressFamilyBBN1822:
+ s = "BBN 1822"
+ case IANAAddressFamily802:
+ s = "802 media plus Ethernet 'canonical format'"
+ case IANAAddressFamilyE163:
+ s = "E.163"
+ case IANAAddressFamilyE164:
+ s = "E.164 (SMDS, Frame Relay, ATM)"
+ case IANAAddressFamilyF69:
+ s = "F.69 (Telex)"
+ case IANAAddressFamilyX121:
+ s = "X.121, X.25, Frame Relay"
+ case IANAAddressFamilyIPX:
+ s = "IPX"
+ case IANAAddressFamilyAtalk:
+ s = "Appletalk"
+ case IANAAddressFamilyDecnet:
+ s = "Decnet IV"
+ case IANAAddressFamilyBanyan:
+ s = "Banyan Vines"
+ case IANAAddressFamilyE164NSAP:
+ s = "E.164 with NSAP format subaddress"
+ case IANAAddressFamilyDNS:
+ s = "DNS"
+ case IANAAddressFamilyDistname:
+ s = "Distinguished Name"
+ case IANAAddressFamilyASNumber:
+ s = "AS Number"
+ case IANAAddressFamilyXTPIPV4:
+ s = "XTP over IP version 4"
+ case IANAAddressFamilyXTPIPV6:
+ s = "XTP over IP version 6"
+ case IANAAddressFamilyXTP:
+ s = "XTP native mode XTP"
+ case IANAAddressFamilyFcWWPN:
+ s = "Fibre Channel World-Wide Port Name"
+ case IANAAddressFamilyFcWWNN:
+ s = "Fibre Channel World-Wide Node Name"
+ case IANAAddressFamilyGWID:
+ s = "GWID"
+ case IANAAddressFamilyL2VPN:
+ s = "AFI for Layer 2 VPN"
+ default:
+ s = "Unknown"
+ }
+ return
+}
+
+func (t LLDPInterfaceSubtype) String() (s string) {
+ switch t {
+ case LLDPInterfaceSubtypeUnknown:
+ s = "Unknown"
+ case LLDPInterfaceSubtypeifIndex:
+ s = "IfIndex"
+ case LLDPInterfaceSubtypeSysPort:
+ s = "System Port Number"
+ default:
+ s = "Unknown"
+ }
+ return
+}
+
+func (t LLDPPowerType) String() (s string) {
+ switch t {
+ case 0:
+ s = "Type 2 PSE Device"
+ case 1:
+ s = "Type 2 PD Device"
+ case 2:
+ s = "Type 1 PSE Device"
+ case 3:
+ s = "Type 1 PD Device"
+ default:
+ s = "Unknown"
+ }
+ return
+}
+
+func (t LLDPPowerSource) String() (s string) {
+ switch t {
+ // PD Device
+ case 0:
+ s = "Unknown"
+ case 1:
+ s = "PSE"
+ case 2:
+ s = "Local"
+ case 3:
+ s = "PSE and Local"
+ // PSE Device (Actual value + 128)
+ case 128:
+ s = "Unknown"
+ case 129:
+ s = "Primary Power Source"
+ case 130:
+ s = "Backup Power Source"
+ default:
+ s = "Unknown"
+ }
+ return
+}
+
+func (t LLDPPowerPriority) String() (s string) {
+ switch t {
+ case 0:
+ s = "Unknown"
+ case 1:
+ s = "Critical"
+ case 2:
+ s = "High"
+ case 3:
+ s = "Low"
+ default:
+ s = "Unknown"
+ }
+ return
+}
+
+func (t LLDPMediaSubtype) String() (s string) {
+ switch t {
+ case LLDPMediaTypeCapabilities:
+ s = "Media Capabilities "
+ case LLDPMediaTypeNetwork:
+ s = "Network Policy"
+ case LLDPMediaTypeLocation:
+ s = "Location Identification"
+ case LLDPMediaTypePower:
+ s = "Extended Power-via-MDI"
+ case LLDPMediaTypeHardware:
+ s = "Hardware Revision"
+ case LLDPMediaTypeFirmware:
+ s = "Firmware Revision"
+ case LLDPMediaTypeSoftware:
+ s = "Software Revision"
+ case LLDPMediaTypeSerial:
+ s = "Serial Number"
+ case LLDPMediaTypeManufacturer:
+ s = "Manufacturer"
+ case LLDPMediaTypeModel:
+ s = "Model"
+ case LLDPMediaTypeAssetID:
+ s = "Asset ID"
+ default:
+ s = "Unknown"
+ }
+ return
+}
+
+func (t LLDPMediaClass) String() (s string) {
+ switch t {
+ case LLDPMediaClassUndefined:
+ s = "Undefined"
+ case LLDPMediaClassEndpointI:
+ s = "Endpoint Class I"
+ case LLDPMediaClassEndpointII:
+ s = "Endpoint Class II"
+ case LLDPMediaClassEndpointIII:
+ s = "Endpoint Class III"
+ case LLDPMediaClassNetwork:
+ s = "Network connectivity "
+ default:
+ s = "Unknown"
+ }
+ return
+}
+
+func (t LLDPApplicationType) String() (s string) {
+ switch t {
+ case LLDPAppTypeReserved:
+ s = "Reserved"
+ case LLDPAppTypeVoice:
+ s = "Voice"
+ case LLDPappTypeVoiceSignaling:
+ s = "Voice Signaling"
+ case LLDPappTypeGuestVoice:
+ s = "Guest Voice"
+ case LLDPappTypeGuestVoiceSignaling:
+ s = "Guest Voice Signaling"
+ case LLDPappTypeSoftphoneVoice:
+ s = "Softphone Voice"
+ case LLDPappTypeVideoConferencing:
+ s = "Video Conferencing"
+ case LLDPappTypeStreamingVideo:
+ s = "Streaming Video"
+ case LLDPappTypeVideoSignaling:
+ s = "Video Signaling"
+ default:
+ s = "Unknown"
+ }
+ return
+}
+
+func (t LLDPLocationFormat) String() (s string) {
+ switch t {
+ case LLDPLocationFormatInvalid:
+ s = "Invalid"
+ case LLDPLocationFormatCoordinate:
+ s = "Coordinate-based LCI"
+ case LLDPLocationFormatAddress:
+ s = "Address-based LCO"
+ case LLDPLocationFormatECS:
+ s = "ECS ELIN"
+ default:
+ s = "Unknown"
+ }
+ return
+}
+
+func (t LLDPLocationAddressType) String() (s string) {
+ switch t {
+ case LLDPLocationAddressTypeLanguage:
+ s = "Language"
+ case LLDPLocationAddressTypeNational:
+ s = "National subdivisions (province, state, etc)"
+ case LLDPLocationAddressTypeCounty:
+ s = "County, parish, district"
+ case LLDPLocationAddressTypeCity:
+ s = "City, township"
+ case LLDPLocationAddressTypeCityDivision:
+ s = "City division, borough, ward"
+ case LLDPLocationAddressTypeNeighborhood:
+ s = "Neighborhood, block"
+ case LLDPLocationAddressTypeStreet:
+ s = "Street"
+ case LLDPLocationAddressTypeLeadingStreet:
+ s = "Leading street direction"
+ case LLDPLocationAddressTypeTrailingStreet:
+ s = "Trailing street suffix"
+ case LLDPLocationAddressTypeStreetSuffix:
+ s = "Street suffix"
+ case LLDPLocationAddressTypeHouseNum:
+ s = "House number"
+ case LLDPLocationAddressTypeHouseSuffix:
+ s = "House number suffix"
+ case LLDPLocationAddressTypeLandmark:
+ s = "Landmark or vanity address"
+ case LLDPLocationAddressTypeAdditional:
+ s = "Additional location information"
+ case LLDPLocationAddressTypeName:
+ s = "Name"
+ case LLDPLocationAddressTypePostal:
+ s = "Postal/ZIP code"
+ case LLDPLocationAddressTypeBuilding:
+ s = "Building"
+ case LLDPLocationAddressTypeUnit:
+ s = "Unit"
+ case LLDPLocationAddressTypeFloor:
+ s = "Floor"
+ case LLDPLocationAddressTypeRoom:
+ s = "Room number"
+ case LLDPLocationAddressTypePlace:
+ s = "Place type"
+ case LLDPLocationAddressTypeScript:
+ s = "Script"
+ default:
+ s = "Unknown"
+ }
+ return
+}
+
+func checkLLDPTLVLen(v LinkLayerDiscoveryValue, l int) (err error) {
+ if len(v.Value) < l {
+ err = fmt.Errorf("Invalid TLV %v length %d (wanted mimimum %v", v.Type, len(v.Value), l)
+ }
+ return
+}
+
+func checkLLDPOrgSpecificLen(o LLDPOrgSpecificTLV, l int) (err error) {
+ if len(o.Info) < l {
+ err = fmt.Errorf("Invalid Org Specific TLV %v length %d (wanted minimum %v)", o.SubType, len(o.Info), l)
+ }
+ return
+}
diff --git a/vendor/github.com/google/gopacket/layers/loopback.go b/vendor/github.com/google/gopacket/layers/loopback.go
new file mode 100644
index 0000000..839f760
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/loopback.go
@@ -0,0 +1,80 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+ "encoding/binary"
+ "errors"
+ "fmt"
+
+ "github.com/google/gopacket"
+)
+
+// Loopback contains the header for loopback encapsulation. This header is
+// used by both BSD and OpenBSD style loopback decoding (pcap's DLT_NULL
+// and DLT_LOOP, respectively).
+type Loopback struct {
+ BaseLayer
+ Family ProtocolFamily
+}
+
+// LayerType returns LayerTypeLoopback.
+func (l *Loopback) LayerType() gopacket.LayerType { return LayerTypeLoopback }
+
+// DecodeFromBytes decodes the given bytes into this layer.
+func (l *Loopback) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ if len(data) < 4 {
+ return errors.New("Loopback packet too small")
+ }
+
+ // The protocol could be either big-endian or little-endian, we're
+ // not sure. But we're PRETTY sure that the value is less than
+ // 256, so we can check the first two bytes.
+ var prot uint32
+ if data[0] == 0 && data[1] == 0 {
+ prot = binary.BigEndian.Uint32(data[:4])
+ } else {
+ prot = binary.LittleEndian.Uint32(data[:4])
+ }
+ if prot > 0xFF {
+ return fmt.Errorf("Invalid loopback protocol %q", data[:4])
+ }
+
+ l.Family = ProtocolFamily(prot)
+ l.BaseLayer = BaseLayer{data[:4], data[4:]}
+ return nil
+}
+
+// CanDecode returns the set of layer types that this DecodingLayer can decode.
+func (l *Loopback) CanDecode() gopacket.LayerClass {
+ return LayerTypeLoopback
+}
+
+// NextLayerType returns the layer type contained by this DecodingLayer.
+func (l *Loopback) NextLayerType() gopacket.LayerType {
+ return l.Family.LayerType()
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+func (l *Loopback) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ bytes, err := b.PrependBytes(4)
+ if err != nil {
+ return err
+ }
+ binary.LittleEndian.PutUint32(bytes, uint32(l.Family))
+ return nil
+}
+
+func decodeLoopback(data []byte, p gopacket.PacketBuilder) error {
+ l := Loopback{}
+ if err := l.DecodeFromBytes(data, gopacket.NilDecodeFeedback); err != nil {
+ return err
+ }
+ p.AddLayer(&l)
+ return p.NextDecoder(l.Family)
+}
diff --git a/vendor/github.com/google/gopacket/layers/mldv1.go b/vendor/github.com/google/gopacket/layers/mldv1.go
new file mode 100644
index 0000000..e1bb1dc
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/mldv1.go
@@ -0,0 +1,182 @@
+// Copyright 2018 GoPacket Authors. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "math"
+ "net"
+ "time"
+
+ "github.com/google/gopacket"
+)
+
+// MLDv1Message represents the common structure of all MLDv1 messages
+type MLDv1Message struct {
+ BaseLayer
+ // 3.4. Maximum Response Delay
+ MaximumResponseDelay time.Duration
+ // 3.6. Multicast Address
+ // Zero in general query
+ // Specific IPv6 multicast address otherwise
+ MulticastAddress net.IP
+}
+
+// DecodeFromBytes decodes the given bytes into this layer.
+func (m *MLDv1Message) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ if len(data) < 20 {
+ df.SetTruncated()
+ return errors.New("ICMP layer less than 20 bytes for Multicast Listener Query Message V1")
+ }
+
+ m.MaximumResponseDelay = time.Duration(binary.BigEndian.Uint16(data[0:2])) * time.Millisecond
+ // data[2:4] is reserved and not used in mldv1
+ m.MulticastAddress = data[4:20]
+
+ return nil
+}
+
+// NextLayerType returns the layer type contained by this DecodingLayer.
+func (*MLDv1Message) NextLayerType() gopacket.LayerType {
+ return gopacket.LayerTypeZero
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (m *MLDv1Message) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ buf, err := b.PrependBytes(20)
+ if err != nil {
+ return err
+ }
+
+ if m.MaximumResponseDelay < 0 {
+ return errors.New("maximum response delay must not be negative")
+ }
+ dms := m.MaximumResponseDelay / time.Millisecond
+ if dms > math.MaxUint16 {
+ return fmt.Errorf("maximum response delay %dms is more than the allowed 65535ms", dms)
+ }
+ binary.BigEndian.PutUint16(buf[0:2], uint16(dms))
+
+ copy(buf[2:4], []byte{0x0, 0x0})
+
+ ma16 := m.MulticastAddress.To16()
+ if ma16 == nil {
+ return fmt.Errorf("invalid multicast address '%s'", m.MulticastAddress)
+ }
+ copy(buf[4:20], ma16)
+
+ return nil
+}
+
+// Sums this layer up nicely formatted
+func (m *MLDv1Message) String() string {
+ return fmt.Sprintf(
+ "Maximum Response Delay: %dms, Multicast Address: %s",
+ m.MaximumResponseDelay/time.Millisecond,
+ m.MulticastAddress)
+}
+
+// MLDv1MulticastListenerQueryMessage are sent by the router to determine
+// whether there are multicast listeners on the link.
+// https://tools.ietf.org/html/rfc2710 Page 5
+type MLDv1MulticastListenerQueryMessage struct {
+ MLDv1Message
+}
+
+// DecodeFromBytes decodes the given bytes into this layer.
+func (m *MLDv1MulticastListenerQueryMessage) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ err := m.MLDv1Message.DecodeFromBytes(data, df)
+ if err != nil {
+ return err
+ }
+
+ if len(data) > 20 {
+ m.Payload = data[20:]
+ }
+
+ return nil
+}
+
+// LayerType returns LayerTypeMLDv1MulticastListenerQuery.
+func (*MLDv1MulticastListenerQueryMessage) LayerType() gopacket.LayerType {
+ return LayerTypeMLDv1MulticastListenerQuery
+}
+
+// CanDecode returns the set of layer types that this DecodingLayer can decode.
+func (*MLDv1MulticastListenerQueryMessage) CanDecode() gopacket.LayerClass {
+ return LayerTypeMLDv1MulticastListenerQuery
+}
+
+// IsGeneralQuery is true when this is a general query.
+// In a Query message, the Multicast Address field is set to zero when
+// sending a General Query.
+// https://tools.ietf.org/html/rfc2710#section-3.6
+func (m *MLDv1MulticastListenerQueryMessage) IsGeneralQuery() bool {
+ return net.IPv6zero.Equal(m.MulticastAddress)
+}
+
+// IsSpecificQuery is true when this is not a general query.
+// In a Query message, the Multicast Address field is set to a specific
+// IPv6 multicast address when sending a Multicast-Address-Specific Query.
+// https://tools.ietf.org/html/rfc2710#section-3.6
+func (m *MLDv1MulticastListenerQueryMessage) IsSpecificQuery() bool {
+ return !m.IsGeneralQuery()
+}
+
+// MLDv1MulticastListenerReportMessage is sent by a client listening on
+// a specific multicast address to indicate that it is (still) listening
+// on the specific multicast address.
+// https://tools.ietf.org/html/rfc2710 Page 6
+type MLDv1MulticastListenerReportMessage struct {
+ MLDv1Message
+}
+
+// LayerType returns LayerTypeMLDv1MulticastListenerReport.
+func (*MLDv1MulticastListenerReportMessage) LayerType() gopacket.LayerType {
+ return LayerTypeMLDv1MulticastListenerReport
+}
+
+// CanDecode returns the set of layer types that this DecodingLayer can decode.
+func (*MLDv1MulticastListenerReportMessage) CanDecode() gopacket.LayerClass {
+ return LayerTypeMLDv1MulticastListenerReport
+}
+
+// MLDv1MulticastListenerDoneMessage should be sent by a client when it ceases
+// to listen to a multicast address on an interface.
+// https://tools.ietf.org/html/rfc2710 Page 7
+type MLDv1MulticastListenerDoneMessage struct {
+ MLDv1Message
+}
+
+// LayerType returns LayerTypeMLDv1MulticastListenerDone.
+func (*MLDv1MulticastListenerDoneMessage) LayerType() gopacket.LayerType {
+ return LayerTypeMLDv1MulticastListenerDone
+}
+
+// CanDecode returns the set of layer types that this DecodingLayer can decode.
+func (*MLDv1MulticastListenerDoneMessage) CanDecode() gopacket.LayerClass {
+ return LayerTypeMLDv1MulticastListenerDone
+}
+
+func decodeMLDv1MulticastListenerReport(data []byte, p gopacket.PacketBuilder) error {
+ m := &MLDv1MulticastListenerReportMessage{}
+ return decodingLayerDecoder(m, data, p)
+}
+
+func decodeMLDv1MulticastListenerQuery(data []byte, p gopacket.PacketBuilder) error {
+ m := &MLDv1MulticastListenerQueryMessage{}
+ return decodingLayerDecoder(m, data, p)
+}
+
+func decodeMLDv1MulticastListenerDone(data []byte, p gopacket.PacketBuilder) error {
+ m := &MLDv1MulticastListenerDoneMessage{}
+ return decodingLayerDecoder(m, data, p)
+}
diff --git a/vendor/github.com/google/gopacket/layers/mldv2.go b/vendor/github.com/google/gopacket/layers/mldv2.go
new file mode 100644
index 0000000..248cf74
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/mldv2.go
@@ -0,0 +1,619 @@
+// Copyright 2018 GoPacket Authors. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "math"
+ "net"
+ "time"
+
+ "github.com/google/gopacket"
+)
+
+const (
+ // S Flag bit is 1
+ mldv2STrue uint8 = 0x8
+
+ // S Flag value mask
+ // mldv2STrue & mldv2SMask == mldv2STrue // true
+ // 0x1 & mldv2SMask == mldv2STrue // true
+ // 0x0 & mldv2SMask == mldv2STrue // false
+ mldv2SMask uint8 = 0x8
+
+ // QRV value mask
+ mldv2QRVMask uint8 = 0x7
+)
+
+// MLDv2MulticastListenerQueryMessage are sent by multicast routers to query the
+// multicast listening state of neighboring interfaces.
+// https://tools.ietf.org/html/rfc3810#section-5.1
+//
+// Some information, like Maximum Response Code and Multicast Address are in the
+// previous layer LayerTypeMLDv1MulticastListenerQuery
+type MLDv2MulticastListenerQueryMessage struct {
+ BaseLayer
+ // 5.1.3. Maximum Response Delay COde
+ MaximumResponseCode uint16
+ // 5.1.5. Multicast Address
+ // Zero in general query
+ // Specific IPv6 multicast address otherwise
+ MulticastAddress net.IP
+ // 5.1.7. S Flag (Suppress Router-Side Processing)
+ SuppressRoutersideProcessing bool
+ // 5.1.8. QRV (Querier's Robustness Variable)
+ QueriersRobustnessVariable uint8
+ // 5.1.9. QQIC (Querier's Query Interval Code)
+ QueriersQueryIntervalCode uint8
+ // 5.1.10. Number of Sources (N)
+ NumberOfSources uint16
+ // 5.1.11 Source Address [i]
+ SourceAddresses []net.IP
+}
+
+// DecodeFromBytes decodes the given bytes into this layer.
+func (m *MLDv2MulticastListenerQueryMessage) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ if len(data) < 24 {
+ df.SetTruncated()
+ return errors.New("ICMP layer less than 24 bytes for Multicast Listener Query Message V2")
+ }
+
+ m.MaximumResponseCode = binary.BigEndian.Uint16(data[0:2])
+ // ignore data[2:4] as per https://tools.ietf.org/html/rfc3810#section-5.1.4
+ m.MulticastAddress = data[4:20]
+ m.SuppressRoutersideProcessing = (data[20] & mldv2SMask) == mldv2STrue
+ m.QueriersRobustnessVariable = data[20] & mldv2QRVMask
+ m.QueriersQueryIntervalCode = data[21]
+
+ m.NumberOfSources = binary.BigEndian.Uint16(data[22:24])
+
+ var end int
+ for i := uint16(0); i < m.NumberOfSources; i++ {
+ begin := 24 + (int(i) * 16)
+ end = begin + 16
+
+ if end > len(data) {
+ df.SetTruncated()
+ return fmt.Errorf("ICMP layer less than %d bytes for Multicast Listener Query Message V2", end)
+ }
+
+ m.SourceAddresses = append(m.SourceAddresses, data[begin:end])
+ }
+
+ return nil
+}
+
+// NextLayerType returns the layer type contained by this DecodingLayer.
+func (*MLDv2MulticastListenerQueryMessage) NextLayerType() gopacket.LayerType {
+ return gopacket.LayerTypeZero
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (m *MLDv2MulticastListenerQueryMessage) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ if err := m.serializeSourceAddressesTo(b, opts); err != nil {
+ return err
+ }
+
+ buf, err := b.PrependBytes(24)
+ if err != nil {
+ return err
+ }
+
+ binary.BigEndian.PutUint16(buf[0:2], m.MaximumResponseCode)
+ copy(buf[2:4], []byte{0x00, 0x00}) // set reserved bytes to zero
+
+ ma16 := m.MulticastAddress.To16()
+ if ma16 == nil {
+ return fmt.Errorf("invalid MulticastAddress '%s'", m.MulticastAddress)
+ }
+ copy(buf[4:20], ma16)
+
+ byte20 := m.QueriersRobustnessVariable & mldv2QRVMask
+ if m.SuppressRoutersideProcessing {
+ byte20 |= mldv2STrue
+ } else {
+ byte20 &= ^mldv2STrue // the complement of mldv2STrue
+ }
+ byte20 &= 0x0F // set reserved bits to zero
+ buf[20] = byte20
+
+ binary.BigEndian.PutUint16(buf[22:24], m.NumberOfSources)
+ buf[21] = m.QueriersQueryIntervalCode
+
+ return nil
+}
+
+// writes each source address to the buffer preserving the order
+func (m *MLDv2MulticastListenerQueryMessage) serializeSourceAddressesTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ numberOfSourceAddresses := len(m.SourceAddresses)
+ if numberOfSourceAddresses > math.MaxUint16 {
+ return fmt.Errorf(
+ "there are more than %d source addresses, but 65535 is the maximum number of supported addresses",
+ numberOfSourceAddresses)
+ }
+
+ if opts.FixLengths {
+ m.NumberOfSources = uint16(numberOfSourceAddresses)
+ }
+
+ lastSAIdx := numberOfSourceAddresses - 1
+ for k := range m.SourceAddresses {
+ i := lastSAIdx - k // reverse order
+
+ buf, err := b.PrependBytes(16)
+ if err != nil {
+ return err
+ }
+
+ sa16 := m.SourceAddresses[i].To16()
+ if sa16 == nil {
+ return fmt.Errorf("invalid source address [%d] '%s'", i, m.SourceAddresses[i])
+ }
+ copy(buf[0:16], sa16)
+ }
+
+ return nil
+}
+
+// String sums this layer up nicely formatted
+func (m *MLDv2MulticastListenerQueryMessage) String() string {
+ return fmt.Sprintf(
+ "Maximum Response Code: %#x (%dms), Multicast Address: %s, Suppress Routerside Processing: %t, QRV: %#x, QQIC: %#x (%ds), Number of Source Address: %d (actual: %d), Source Addresses: %s",
+ m.MaximumResponseCode,
+ m.MaximumResponseDelay(),
+ m.MulticastAddress,
+ m.SuppressRoutersideProcessing,
+ m.QueriersRobustnessVariable,
+ m.QueriersQueryIntervalCode,
+ m.QQI()/time.Second,
+ m.NumberOfSources,
+ len(m.SourceAddresses),
+ m.SourceAddresses)
+}
+
+// LayerType returns LayerTypeMLDv2MulticastListenerQuery.
+func (*MLDv2MulticastListenerQueryMessage) LayerType() gopacket.LayerType {
+ return LayerTypeMLDv2MulticastListenerQuery
+}
+
+// CanDecode returns the set of layer types that this DecodingLayer can decode.
+func (*MLDv2MulticastListenerQueryMessage) CanDecode() gopacket.LayerClass {
+ return LayerTypeMLDv2MulticastListenerQuery
+}
+
+// QQI calculates the Querier's Query Interval based on the QQIC
+// according to https://tools.ietf.org/html/rfc3810#section-5.1.9
+func (m *MLDv2MulticastListenerQueryMessage) QQI() time.Duration {
+ data := m.QueriersQueryIntervalCode
+ if data < 128 {
+ return time.Second * time.Duration(data)
+ }
+
+ exp := uint16(data) & 0x70 >> 4
+ mant := uint16(data) & 0x0F
+ return time.Second * time.Duration(mant|0x1000<<(exp+3))
+}
+
+// SetQQI calculates and updates the Querier's Query Interval Code (QQIC)
+// according to https://tools.ietf.org/html/rfc3810#section-5.1.9
+func (m *MLDv2MulticastListenerQueryMessage) SetQQI(d time.Duration) error {
+ if d < 0 {
+ m.QueriersQueryIntervalCode = 0
+ return errors.New("QQI duration is negative")
+ }
+
+ if d == 0 {
+ m.QueriersQueryIntervalCode = 0
+ return nil
+ }
+
+ dms := d / time.Second
+ if dms < 128 {
+ m.QueriersQueryIntervalCode = uint8(dms)
+ }
+
+ if dms > 31744 { // mant=0xF, exp=0x7
+ m.QueriersQueryIntervalCode = 0xFF
+ return fmt.Errorf("QQI duration %ds is, maximum allowed is 31744s", dms)
+ }
+
+ value := uint16(dms) // ok, because 31744 < math.MaxUint16
+ exp := uint8(7)
+ for mask := uint16(0x4000); exp > 0; exp-- {
+ if mask&value != 0 {
+ break
+ }
+
+ mask >>= 1
+ }
+
+ mant := uint8(0x000F & (value >> (exp + 3)))
+ sig := uint8(0x10)
+ m.QueriersQueryIntervalCode = sig | exp<<4 | mant
+
+ return nil
+}
+
+// MaximumResponseDelay returns the Maximum Response Delay based on the
+// Maximum Response Code according to
+// https://tools.ietf.org/html/rfc3810#section-5.1.3
+func (m *MLDv2MulticastListenerQueryMessage) MaximumResponseDelay() time.Duration {
+ if m.MaximumResponseCode < 0x8000 {
+ return time.Duration(m.MaximumResponseCode)
+ }
+
+ exp := m.MaximumResponseCode & 0x7000 >> 12
+ mant := m.MaximumResponseCode & 0x0FFF
+
+ return time.Millisecond * time.Duration(mant|0x1000<<(exp+3))
+}
+
+// SetMLDv2MaximumResponseDelay updates the Maximum Response Code according to
+// https://tools.ietf.org/html/rfc3810#section-5.1.3
+func (m *MLDv2MulticastListenerQueryMessage) SetMLDv2MaximumResponseDelay(d time.Duration) error {
+ if d == 0 {
+ m.MaximumResponseCode = 0
+ return nil
+ }
+
+ if d < 0 {
+ return errors.New("maximum response delay must not be negative")
+ }
+
+ dms := d / time.Millisecond
+
+ if dms < 32768 {
+ m.MaximumResponseCode = uint16(dms)
+ }
+
+ if dms > 4193280 { // mant=0xFFF, exp=0x7
+ return fmt.Errorf("maximum response delay %dms is bigger the than maximum of 4193280ms", dms)
+ }
+
+ value := uint32(dms) // ok, because 4193280 < math.MaxUint32
+ exp := uint8(7)
+ for mask := uint32(0x40000000); exp > 0; exp-- {
+ if mask&value != 0 {
+ break
+ }
+
+ mask >>= 1
+ }
+
+ mant := uint16(0x00000FFF & (value >> (exp + 3)))
+ sig := uint16(0x1000)
+ m.MaximumResponseCode = sig | uint16(exp)<<12 | mant
+ return nil
+}
+
+// MLDv2MulticastListenerReportMessage is sent by an IP node to report the
+// current multicast listening state, or changes therein.
+// https://tools.ietf.org/html/rfc3810#section-5.2
+type MLDv2MulticastListenerReportMessage struct {
+ BaseLayer
+ // 5.2.3. Nr of Mcast Address Records
+ NumberOfMulticastAddressRecords uint16
+ // 5.2.4. Multicast Address Record [i]
+ MulticastAddressRecords []MLDv2MulticastAddressRecord
+}
+
+// DecodeFromBytes decodes the given bytes into this layer.
+func (m *MLDv2MulticastListenerReportMessage) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ if len(data) < 4 {
+ df.SetTruncated()
+ return errors.New("ICMP layer less than 4 bytes for Multicast Listener Report Message V2")
+ }
+
+ // ignore data[0:2] as per RFC
+ // https://tools.ietf.org/html/rfc3810#section-5.2.1
+ m.NumberOfMulticastAddressRecords = binary.BigEndian.Uint16(data[2:4])
+
+ begin := 4
+ for i := uint16(0); i < m.NumberOfMulticastAddressRecords; i++ {
+ mar := MLDv2MulticastAddressRecord{}
+ read, err := mar.decode(data[begin:], df)
+ if err != nil {
+ return err
+ }
+
+ m.MulticastAddressRecords = append(m.MulticastAddressRecords, mar)
+
+ begin += read
+ }
+
+ return nil
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (m *MLDv2MulticastListenerReportMessage) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ lastItemIdx := len(m.MulticastAddressRecords) - 1
+ for k := range m.MulticastAddressRecords {
+ i := lastItemIdx - k // reverse order
+
+ err := m.MulticastAddressRecords[i].serializeTo(b, opts)
+ if err != nil {
+ return err
+ }
+ }
+
+ if opts.FixLengths {
+ numberOfMAR := len(m.MulticastAddressRecords)
+ if numberOfMAR > math.MaxUint16 {
+ return fmt.Errorf(
+ "%d multicast address records added, but the maximum is 65535",
+ numberOfMAR)
+ }
+
+ m.NumberOfMulticastAddressRecords = uint16(numberOfMAR)
+ }
+
+ buf, err := b.PrependBytes(4)
+ if err != nil {
+ return err
+ }
+
+ copy(buf[0:2], []byte{0x0, 0x0})
+ binary.BigEndian.PutUint16(buf[2:4], m.NumberOfMulticastAddressRecords)
+ return nil
+}
+
+// Sums this layer up nicely formatted
+func (m *MLDv2MulticastListenerReportMessage) String() string {
+ return fmt.Sprintf(
+ "Number of Mcast Addr Records: %d (actual %d), Multicast Address Records: %+v",
+ m.NumberOfMulticastAddressRecords,
+ len(m.MulticastAddressRecords),
+ m.MulticastAddressRecords)
+}
+
+// LayerType returns LayerTypeMLDv2MulticastListenerQuery.
+func (*MLDv2MulticastListenerReportMessage) LayerType() gopacket.LayerType {
+ return LayerTypeMLDv2MulticastListenerReport
+}
+
+// CanDecode returns the set of layer types that this DecodingLayer can decode.
+func (*MLDv2MulticastListenerReportMessage) CanDecode() gopacket.LayerClass {
+ return LayerTypeMLDv2MulticastListenerReport
+}
+
+// NextLayerType returns the layer type contained by this DecodingLayer.
+func (*MLDv2MulticastListenerReportMessage) NextLayerType() gopacket.LayerType {
+ return gopacket.LayerTypePayload
+}
+
+// MLDv2MulticastAddressRecordType holds the type of a
+// Multicast Address Record, according to
+// https://tools.ietf.org/html/rfc3810#section-5.2.5 and
+// https://tools.ietf.org/html/rfc3810#section-5.2.12
+type MLDv2MulticastAddressRecordType uint8
+
+const (
+ // MLDv2MulticastAddressRecordTypeModeIsIncluded stands for
+ // MODE_IS_INCLUDE - indicates that the interface has a filter
+ // mode of INCLUDE for the specified multicast address.
+ MLDv2MulticastAddressRecordTypeModeIsIncluded MLDv2MulticastAddressRecordType = 1
+ // MLDv2MulticastAddressRecordTypeModeIsExcluded stands for
+ // MODE_IS_EXCLUDE - indicates that the interface has a filter
+ // mode of EXCLUDE for the specified multicast address.
+ MLDv2MulticastAddressRecordTypeModeIsExcluded MLDv2MulticastAddressRecordType = 2
+ // MLDv2MulticastAddressRecordTypeChangeToIncludeMode stands for
+ // CHANGE_TO_INCLUDE_MODE - indicates that the interface has
+ // changed to INCLUDE filter mode for the specified multicast
+ // address.
+ MLDv2MulticastAddressRecordTypeChangeToIncludeMode MLDv2MulticastAddressRecordType = 3
+ // MLDv2MulticastAddressRecordTypeChangeToExcludeMode stands for
+ // CHANGE_TO_EXCLUDE_MODE - indicates that the interface has
+ // changed to EXCLUDE filter mode for the specified multicast
+ // address
+ MLDv2MulticastAddressRecordTypeChangeToExcludeMode MLDv2MulticastAddressRecordType = 4
+ // MLDv2MulticastAddressRecordTypeAllowNewSources stands for
+ // ALLOW_NEW_SOURCES - indicates that the Source Address [i]
+ // fields in this Multicast Address Record contain a list of
+ // the additional sources that the node wishes to listen to,
+ // for packets sent to the specified multicast address.
+ MLDv2MulticastAddressRecordTypeAllowNewSources MLDv2MulticastAddressRecordType = 5
+ // MLDv2MulticastAddressRecordTypeBlockOldSources stands for
+ // BLOCK_OLD_SOURCES - indicates that the Source Address [i]
+ // fields in this Multicast Address Record contain a list of
+ // the sources that the node no longer wishes to listen to,
+ // for packets sent to the specified multicast address.
+ MLDv2MulticastAddressRecordTypeBlockOldSources MLDv2MulticastAddressRecordType = 6
+)
+
+// Human readable record types
+// Naming follows https://tools.ietf.org/html/rfc3810#section-5.2.12
+func (m MLDv2MulticastAddressRecordType) String() string {
+ switch m {
+ case MLDv2MulticastAddressRecordTypeModeIsIncluded:
+ return "MODE_IS_INCLUDE"
+ case MLDv2MulticastAddressRecordTypeModeIsExcluded:
+ return "MODE_IS_EXCLUDE"
+ case MLDv2MulticastAddressRecordTypeChangeToIncludeMode:
+ return "CHANGE_TO_INCLUDE_MODE"
+ case MLDv2MulticastAddressRecordTypeChangeToExcludeMode:
+ return "CHANGE_TO_EXCLUDE_MODE"
+ case MLDv2MulticastAddressRecordTypeAllowNewSources:
+ return "ALLOW_NEW_SOURCES"
+ case MLDv2MulticastAddressRecordTypeBlockOldSources:
+ return "BLOCK_OLD_SOURCES"
+ default:
+ return fmt.Sprintf("UNKNOWN(%d)", m)
+ }
+}
+
+// MLDv2MulticastAddressRecord contains information on the sender listening to a
+// single multicast address on the interface the report is sent.
+// https://tools.ietf.org/html/rfc3810#section-5.2.4
+type MLDv2MulticastAddressRecord struct {
+ // 5.2.5. Record Type
+ RecordType MLDv2MulticastAddressRecordType
+ // 5.2.6. Auxiliary Data Length (number of 32-bit words)
+ AuxDataLen uint8
+ // 5.2.7. Number Of Sources (N)
+ N uint16
+ // 5.2.8. Multicast Address
+ MulticastAddress net.IP
+ // 5.2.9 Source Address [i]
+ SourceAddresses []net.IP
+ // 5.2.10 Auxiliary Data
+ AuxiliaryData []byte
+}
+
+// decodes a multicast address record from bytes
+func (m *MLDv2MulticastAddressRecord) decode(data []byte, df gopacket.DecodeFeedback) (int, error) {
+ if len(data) < 4 {
+ df.SetTruncated()
+ return 0, errors.New(
+ "Multicast Listener Report Message V2 layer less than 4 bytes for Multicast Address Record")
+ }
+
+ m.RecordType = MLDv2MulticastAddressRecordType(data[0])
+ m.AuxDataLen = data[1]
+ m.N = binary.BigEndian.Uint16(data[2:4])
+ m.MulticastAddress = data[4:20]
+
+ for i := uint16(0); i < m.N; i++ {
+ begin := 20 + (int(i) * 16)
+ end := begin + 16
+
+ if len(data) < end {
+ df.SetTruncated()
+ return begin, fmt.Errorf(
+ "Multicast Listener Report Message V2 layer less than %d bytes for Multicast Address Record", end)
+ }
+
+ m.SourceAddresses = append(m.SourceAddresses, data[begin:end])
+ }
+
+ expectedLengthWithouAuxData := 20 + (int(m.N) * 16)
+ expectedTotalLength := (int(m.AuxDataLen) * 4) + expectedLengthWithouAuxData // *4 because AuxDataLen are 32bit words
+ if len(data) < expectedTotalLength {
+ return expectedLengthWithouAuxData, fmt.Errorf(
+ "Multicast Listener Report Message V2 layer less than %d bytes for Multicast Address Record",
+ expectedLengthWithouAuxData)
+ }
+
+ m.AuxiliaryData = data[expectedLengthWithouAuxData:expectedTotalLength]
+
+ return expectedTotalLength, nil
+}
+
+// String sums this layer up nicely formatted
+func (m *MLDv2MulticastAddressRecord) String() string {
+ return fmt.Sprintf(
+ "RecordType: %d (%s), AuxDataLen: %d [32-bit words], N: %d, Multicast Address: %s, SourceAddresses: %s, Auxiliary Data: %#x",
+ m.RecordType,
+ m.RecordType.String(),
+ m.AuxDataLen,
+ m.N,
+ m.MulticastAddress.To16(),
+ m.SourceAddresses,
+ m.AuxiliaryData)
+}
+
+// serializes a multicast address record
+func (m *MLDv2MulticastAddressRecord) serializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ if err := m.serializeAuxiliaryDataTo(b, opts); err != nil {
+ return err
+ }
+
+ if err := m.serializeSourceAddressesTo(b, opts); err != nil {
+ return err
+ }
+
+ buf, err := b.PrependBytes(20)
+ if err != nil {
+ return err
+ }
+
+ buf[0] = uint8(m.RecordType)
+ buf[1] = m.AuxDataLen
+ binary.BigEndian.PutUint16(buf[2:4], m.N)
+
+ ma16 := m.MulticastAddress.To16()
+ if ma16 == nil {
+ return fmt.Errorf("invalid multicast address '%s'", m.MulticastAddress)
+ }
+ copy(buf[4:20], ma16)
+
+ return nil
+}
+
+// serializes the auxiliary data of a multicast address record
+func (m *MLDv2MulticastAddressRecord) serializeAuxiliaryDataTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ if remainder := len(m.AuxiliaryData) % 4; remainder != 0 {
+ zeroWord := []byte{0x0, 0x0, 0x0, 0x0}
+ m.AuxiliaryData = append(m.AuxiliaryData, zeroWord[:remainder]...)
+ }
+
+ if opts.FixLengths {
+ auxDataLen := len(m.AuxiliaryData) / 4
+
+ if auxDataLen > math.MaxUint8 {
+ return fmt.Errorf("auxilary data is %d 32-bit words, but the maximum is 255 32-bit words", auxDataLen)
+ }
+
+ m.AuxDataLen = uint8(auxDataLen)
+ }
+
+ buf, err := b.PrependBytes(len(m.AuxiliaryData))
+ if err != nil {
+ return err
+ }
+
+ copy(buf, m.AuxiliaryData)
+ return nil
+}
+
+// serializes the source addresses of a multicast address record preserving the order
+func (m *MLDv2MulticastAddressRecord) serializeSourceAddressesTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ if opts.FixLengths {
+ numberOfSourceAddresses := len(m.SourceAddresses)
+
+ if numberOfSourceAddresses > math.MaxUint16 {
+ return fmt.Errorf(
+ "%d source addresses added, but the maximum is 65535",
+ numberOfSourceAddresses)
+ }
+
+ m.N = uint16(numberOfSourceAddresses)
+ }
+
+ lastItemIdx := len(m.SourceAddresses) - 1
+ for k := range m.SourceAddresses {
+ i := lastItemIdx - k // reverse order
+
+ buf, err := b.PrependBytes(16)
+ if err != nil {
+ return err
+ }
+
+ sa16 := m.SourceAddresses[i].To16()
+ if sa16 == nil {
+ return fmt.Errorf("invalid source address [%d] '%s'", i, m.SourceAddresses[i])
+ }
+ copy(buf, sa16)
+ }
+
+ return nil
+}
+
+func decodeMLDv2MulticastListenerReport(data []byte, p gopacket.PacketBuilder) error {
+ m := &MLDv2MulticastListenerReportMessage{}
+ return decodingLayerDecoder(m, data, p)
+}
+
+func decodeMLDv2MulticastListenerQuery(data []byte, p gopacket.PacketBuilder) error {
+ m := &MLDv2MulticastListenerQueryMessage{}
+ return decodingLayerDecoder(m, data, p)
+}
diff --git a/vendor/github.com/google/gopacket/layers/modbustcp.go b/vendor/github.com/google/gopacket/layers/modbustcp.go
new file mode 100644
index 0000000..bafbd74
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/modbustcp.go
@@ -0,0 +1,150 @@
+// Copyright 2018, The GoPacket Authors, All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+//
+//******************************************************************************
+
+package layers
+
+import (
+ "encoding/binary"
+ "errors"
+ "github.com/google/gopacket"
+)
+
+//******************************************************************************
+//
+// ModbusTCP Decoding Layer
+// ------------------------------------------
+// This file provides a GoPacket decoding layer for ModbusTCP.
+//
+//******************************************************************************
+
+const mbapRecordSizeInBytes int = 7
+const modbusPDUMinimumRecordSizeInBytes int = 2
+const modbusPDUMaximumRecordSizeInBytes int = 253
+
+// ModbusProtocol type
+type ModbusProtocol uint16
+
+// ModbusProtocol known values.
+const (
+ ModbusProtocolModbus ModbusProtocol = 0
+)
+
+func (mp ModbusProtocol) String() string {
+ switch mp {
+ default:
+ return "Unknown"
+ case ModbusProtocolModbus:
+ return "Modbus"
+ }
+}
+
+//******************************************************************************
+
+// ModbusTCP Type
+// --------
+// Type ModbusTCP implements the DecodingLayer interface. Each ModbusTCP object
+// represents in a structured form the MODBUS Application Protocol header (MBAP) record present as the TCP
+// payload in an ModbusTCP TCP packet.
+//
+type ModbusTCP struct {
+ BaseLayer // Stores the packet bytes and payload (Modbus PDU) bytes .
+
+ TransactionIdentifier uint16 // Identification of a MODBUS Request/Response transaction
+ ProtocolIdentifier ModbusProtocol // It is used for intra-system multiplexing
+ Length uint16 // Number of following bytes (includes 1 byte for UnitIdentifier + Modbus data length
+ UnitIdentifier uint8 // Identification of a remote slave connected on a serial line or on other buses
+}
+
+//******************************************************************************
+
+// LayerType returns the layer type of the ModbusTCP object, which is LayerTypeModbusTCP.
+func (d *ModbusTCP) LayerType() gopacket.LayerType {
+ return LayerTypeModbusTCP
+}
+
+//******************************************************************************
+
+// decodeModbusTCP analyses a byte slice and attempts to decode it as an ModbusTCP
+// record of a TCP packet.
+//
+// If it succeeds, it loads p with information about the packet and returns nil.
+// If it fails, it returns an error (non nil).
+//
+// This function is employed in layertypes.go to register the ModbusTCP layer.
+func decodeModbusTCP(data []byte, p gopacket.PacketBuilder) error {
+
+ // Attempt to decode the byte slice.
+ d := &ModbusTCP{}
+ err := d.DecodeFromBytes(data, p)
+ if err != nil {
+ return err
+ }
+ // If the decoding worked, add the layer to the packet and set it
+ // as the application layer too, if there isn't already one.
+ p.AddLayer(d)
+ p.SetApplicationLayer(d)
+
+ return p.NextDecoder(d.NextLayerType())
+
+}
+
+//******************************************************************************
+
+// DecodeFromBytes analyses a byte slice and attempts to decode it as an ModbusTCP
+// record of a TCP packet.
+//
+// Upon succeeds, it loads the ModbusTCP object with information about the packet
+// and returns nil.
+// Upon failure, it returns an error (non nil).
+func (d *ModbusTCP) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+
+ // If the data block is too short to be a MBAP record, then return an error.
+ if len(data) < mbapRecordSizeInBytes+modbusPDUMinimumRecordSizeInBytes {
+ df.SetTruncated()
+ return errors.New("ModbusTCP packet too short")
+ }
+
+ if len(data) > mbapRecordSizeInBytes+modbusPDUMaximumRecordSizeInBytes {
+ df.SetTruncated()
+ return errors.New("ModbusTCP packet too long")
+ }
+
+ // ModbusTCP type embeds type BaseLayer which contains two fields:
+ // Contents is supposed to contain the bytes of the data at this level (MPBA).
+ // Payload is supposed to contain the payload of this level (PDU).
+ d.BaseLayer = BaseLayer{Contents: data[:mbapRecordSizeInBytes], Payload: data[mbapRecordSizeInBytes:len(data)]}
+
+ // Extract the fields from the block of bytes.
+ // The fields can just be copied in big endian order.
+ d.TransactionIdentifier = binary.BigEndian.Uint16(data[:2])
+ d.ProtocolIdentifier = ModbusProtocol(binary.BigEndian.Uint16(data[2:4]))
+ d.Length = binary.BigEndian.Uint16(data[4:6])
+
+ // Length should have the size of the payload plus one byte (size of UnitIdentifier)
+ if d.Length != uint16(len(d.BaseLayer.Payload)+1) {
+ df.SetTruncated()
+ return errors.New("ModbusTCP packet with wrong field value (Length)")
+ }
+ d.UnitIdentifier = uint8(data[6])
+
+ return nil
+}
+
+//******************************************************************************
+
+// NextLayerType returns the layer type of the ModbusTCP payload, which is LayerTypePayload.
+func (d *ModbusTCP) NextLayerType() gopacket.LayerType {
+ return gopacket.LayerTypePayload
+}
+
+//******************************************************************************
+
+// Payload returns Modbus Protocol Data Unit (PDU) composed by Function Code and Data, it is carried within ModbusTCP packets
+func (d *ModbusTCP) Payload() []byte {
+ return d.BaseLayer.Payload
+}
diff --git a/vendor/github.com/google/gopacket/layers/mpls.go b/vendor/github.com/google/gopacket/layers/mpls.go
new file mode 100644
index 0000000..83079a0
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/mpls.go
@@ -0,0 +1,87 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+ "encoding/binary"
+ "errors"
+ "github.com/google/gopacket"
+)
+
+// MPLS is the MPLS packet header.
+type MPLS struct {
+ BaseLayer
+ Label uint32
+ TrafficClass uint8
+ StackBottom bool
+ TTL uint8
+}
+
+// LayerType returns gopacket.LayerTypeMPLS.
+func (m *MPLS) LayerType() gopacket.LayerType { return LayerTypeMPLS }
+
+// ProtocolGuessingDecoder attempts to guess the protocol of the bytes it's
+// given, then decode the packet accordingly. Its algorithm for guessing is:
+// If the packet starts with byte 0x45-0x4F: IPv4
+// If the packet starts with byte 0x60-0x6F: IPv6
+// Otherwise: Error
+// See draft-hsmit-isis-aal5mux-00.txt for more detail on this approach.
+type ProtocolGuessingDecoder struct{}
+
+func (ProtocolGuessingDecoder) Decode(data []byte, p gopacket.PacketBuilder) error {
+ switch data[0] {
+ // 0x40 | header_len, where header_len is at least 5.
+ case 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f:
+ return decodeIPv4(data, p)
+ // IPv6 can start with any byte whose first 4 bits are 0x6.
+ case 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f:
+ return decodeIPv6(data, p)
+ }
+ return errors.New("Unable to guess protocol of packet data")
+}
+
+// MPLSPayloadDecoder is the decoder used to data encapsulated by each MPLS
+// layer. MPLS contains no type information, so we have to explicitly decide
+// which decoder to use. This is initially set to ProtocolGuessingDecoder, our
+// simple attempt at guessing protocols based on the first few bytes of data
+// available to us. However, if you know that in your environment MPLS always
+// encapsulates a specific protocol, you may reset this.
+var MPLSPayloadDecoder gopacket.Decoder = ProtocolGuessingDecoder{}
+
+func decodeMPLS(data []byte, p gopacket.PacketBuilder) error {
+ decoded := binary.BigEndian.Uint32(data[:4])
+ mpls := &MPLS{
+ Label: decoded >> 12,
+ TrafficClass: uint8(decoded>>9) & 0x7,
+ StackBottom: decoded&0x100 != 0,
+ TTL: uint8(decoded),
+ BaseLayer: BaseLayer{data[:4], data[4:]},
+ }
+ p.AddLayer(mpls)
+ if mpls.StackBottom {
+ return p.NextDecoder(MPLSPayloadDecoder)
+ }
+ return p.NextDecoder(gopacket.DecodeFunc(decodeMPLS))
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (m *MPLS) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ bytes, err := b.PrependBytes(4)
+ if err != nil {
+ return err
+ }
+ encoded := m.Label << 12
+ encoded |= uint32(m.TrafficClass) << 9
+ encoded |= uint32(m.TTL)
+ if m.StackBottom {
+ encoded |= 0x100
+ }
+ binary.BigEndian.PutUint32(bytes, encoded)
+ return nil
+}
diff --git a/vendor/github.com/google/gopacket/layers/ndp.go b/vendor/github.com/google/gopacket/layers/ndp.go
new file mode 100644
index 0000000..f7ca1b2
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/ndp.go
@@ -0,0 +1,611 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+// Enum types courtesy of...
+// http://anonsvn.wireshark.org/wireshark/trunk/epan/dissectors/packet-ndp.c
+
+package layers
+
+import (
+ "fmt"
+ "github.com/google/gopacket"
+ "net"
+)
+
+type NDPChassisType uint8
+
+// Nortel Chassis Types
+const (
+ NDPChassisother NDPChassisType = 1
+ NDPChassis3000 NDPChassisType = 2
+ NDPChassis3030 NDPChassisType = 3
+ NDPChassis2310 NDPChassisType = 4
+ NDPChassis2810 NDPChassisType = 5
+ NDPChassis2912 NDPChassisType = 6
+ NDPChassis2914 NDPChassisType = 7
+ NDPChassis271x NDPChassisType = 8
+ NDPChassis2813 NDPChassisType = 9
+ NDPChassis2814 NDPChassisType = 10
+ NDPChassis2915 NDPChassisType = 11
+ NDPChassis5000 NDPChassisType = 12
+ NDPChassis2813SA NDPChassisType = 13
+ NDPChassis2814SA NDPChassisType = 14
+ NDPChassis810M NDPChassisType = 15
+ NDPChassisEthercell NDPChassisType = 16
+ NDPChassis5005 NDPChassisType = 17
+ NDPChassisAlcatelEWC NDPChassisType = 18
+ NDPChassis2715SA NDPChassisType = 20
+ NDPChassis2486 NDPChassisType = 21
+ NDPChassis28000series NDPChassisType = 22
+ NDPChassis23000series NDPChassisType = 23
+ NDPChassis5DN00xseries NDPChassisType = 24
+ NDPChassisBayStackEthernet NDPChassisType = 25
+ NDPChassis23100series NDPChassisType = 26
+ NDPChassis100BaseTHub NDPChassisType = 27
+ NDPChassis3000FastEthernet NDPChassisType = 28
+ NDPChassisOrionSwitch NDPChassisType = 29
+ NDPChassisDDS NDPChassisType = 31
+ NDPChassisCentillion6slot NDPChassisType = 32
+ NDPChassisCentillion12slot NDPChassisType = 33
+ NDPChassisCentillion1slot NDPChassisType = 34
+ NDPChassisBayStack301 NDPChassisType = 35
+ NDPChassisBayStackTokenRingHub NDPChassisType = 36
+ NDPChassisFVCMultimediaSwitch NDPChassisType = 37
+ NDPChassisSwitchNode NDPChassisType = 38
+ NDPChassisBayStack302Switch NDPChassisType = 39
+ NDPChassisBayStack350Switch NDPChassisType = 40
+ NDPChassisBayStack150EthernetHub NDPChassisType = 41
+ NDPChassisCentillion50NSwitch NDPChassisType = 42
+ NDPChassisCentillion50TSwitch NDPChassisType = 43
+ NDPChassisBayStack303304Switches NDPChassisType = 44
+ NDPChassisBayStack200EthernetHub NDPChassisType = 45
+ NDPChassisBayStack25010100EthernetHub NDPChassisType = 46
+ NDPChassisBayStack450101001000Switches NDPChassisType = 48
+ NDPChassisBayStack41010100Switches NDPChassisType = 49
+ NDPChassisPassport1200L3Switch NDPChassisType = 50
+ NDPChassisPassport1250L3Switch NDPChassisType = 51
+ NDPChassisPassport1100L3Switch NDPChassisType = 52
+ NDPChassisPassport1150L3Switch NDPChassisType = 53
+ NDPChassisPassport1050L3Switch NDPChassisType = 54
+ NDPChassisPassport1051L3Switch NDPChassisType = 55
+ NDPChassisPassport8610L3Switch NDPChassisType = 56
+ NDPChassisPassport8606L3Switch NDPChassisType = 57
+ NDPChassisPassport8010 NDPChassisType = 58
+ NDPChassisPassport8006 NDPChassisType = 59
+ NDPChassisBayStack670wirelessaccesspoint NDPChassisType = 60
+ NDPChassisPassport740 NDPChassisType = 61
+ NDPChassisPassport750 NDPChassisType = 62
+ NDPChassisPassport790 NDPChassisType = 63
+ NDPChassisBusinessPolicySwitch200010100Switches NDPChassisType = 64
+ NDPChassisPassport8110L2Switch NDPChassisType = 65
+ NDPChassisPassport8106L2Switch NDPChassisType = 66
+ NDPChassisBayStack3580GigSwitch NDPChassisType = 67
+ NDPChassisBayStack10PowerSupplyUnit NDPChassisType = 68
+ NDPChassisBayStack42010100Switch NDPChassisType = 69
+ NDPChassisOPTeraMetro1200EthernetServiceModule NDPChassisType = 70
+ NDPChassisOPTera8010co NDPChassisType = 71
+ NDPChassisOPTera8610coL3Switch NDPChassisType = 72
+ NDPChassisOPTera8110coL2Switch NDPChassisType = 73
+ NDPChassisOPTera8003 NDPChassisType = 74
+ NDPChassisOPTera8603L3Switch NDPChassisType = 75
+ NDPChassisOPTera8103L2Switch NDPChassisType = 76
+ NDPChassisBayStack380101001000Switch NDPChassisType = 77
+ NDPChassisEthernetSwitch47048T NDPChassisType = 78
+ NDPChassisOPTeraMetro1450EthernetServiceModule NDPChassisType = 79
+ NDPChassisOPTeraMetro1400EthernetServiceModule NDPChassisType = 80
+ NDPChassisAlteonSwitchFamily NDPChassisType = 81
+ NDPChassisEthernetSwitch46024TPWR NDPChassisType = 82
+ NDPChassisOPTeraMetro8010OPML2Switch NDPChassisType = 83
+ NDPChassisOPTeraMetro8010coOPML2Switch NDPChassisType = 84
+ NDPChassisOPTeraMetro8006OPML2Switch NDPChassisType = 85
+ NDPChassisOPTeraMetro8003OPML2Switch NDPChassisType = 86
+ NDPChassisAlteon180e NDPChassisType = 87
+ NDPChassisAlteonAD3 NDPChassisType = 88
+ NDPChassisAlteon184 NDPChassisType = 89
+ NDPChassisAlteonAD4 NDPChassisType = 90
+ NDPChassisPassport1424L3Switch NDPChassisType = 91
+ NDPChassisPassport1648L3Switch NDPChassisType = 92
+ NDPChassisPassport1612L3Switch NDPChassisType = 93
+ NDPChassisPassport1624L3Switch NDPChassisType = 94
+ NDPChassisBayStack38024FFiber1000Switch NDPChassisType = 95
+ NDPChassisEthernetRoutingSwitch551024T NDPChassisType = 96
+ NDPChassisEthernetRoutingSwitch551048T NDPChassisType = 97
+ NDPChassisEthernetSwitch47024T NDPChassisType = 98
+ NDPChassisNortelNetworksWirelessLANAccessPoint2220 NDPChassisType = 99
+ NDPChassisPassportRBS2402L3Switch NDPChassisType = 100
+ NDPChassisAlteonApplicationSwitch2424 NDPChassisType = 101
+ NDPChassisAlteonApplicationSwitch2224 NDPChassisType = 102
+ NDPChassisAlteonApplicationSwitch2208 NDPChassisType = 103
+ NDPChassisAlteonApplicationSwitch2216 NDPChassisType = 104
+ NDPChassisAlteonApplicationSwitch3408 NDPChassisType = 105
+ NDPChassisAlteonApplicationSwitch3416 NDPChassisType = 106
+ NDPChassisNortelNetworksWirelessLANSecuritySwitch2250 NDPChassisType = 107
+ NDPChassisEthernetSwitch42548T NDPChassisType = 108
+ NDPChassisEthernetSwitch42524T NDPChassisType = 109
+ NDPChassisNortelNetworksWirelessLANAccessPoint2221 NDPChassisType = 110
+ NDPChassisNortelMetroEthernetServiceUnit24TSPFswitch NDPChassisType = 111
+ NDPChassisNortelMetroEthernetServiceUnit24TLXDCswitch NDPChassisType = 112
+ NDPChassisPassport830010slotchassis NDPChassisType = 113
+ NDPChassisPassport83006slotchassis NDPChassisType = 114
+ NDPChassisEthernetRoutingSwitch552024TPWR NDPChassisType = 115
+ NDPChassisEthernetRoutingSwitch552048TPWR NDPChassisType = 116
+ NDPChassisNortelNetworksVPNGateway3050 NDPChassisType = 117
+ NDPChassisAlteonSSL31010100 NDPChassisType = 118
+ NDPChassisAlteonSSL31010100Fiber NDPChassisType = 119
+ NDPChassisAlteonSSL31010100FIPS NDPChassisType = 120
+ NDPChassisAlteonSSL410101001000 NDPChassisType = 121
+ NDPChassisAlteonSSL410101001000Fiber NDPChassisType = 122
+ NDPChassisAlteonApplicationSwitch2424SSL NDPChassisType = 123
+ NDPChassisEthernetSwitch32524T NDPChassisType = 124
+ NDPChassisEthernetSwitch32524G NDPChassisType = 125
+ NDPChassisNortelNetworksWirelessLANAccessPoint2225 NDPChassisType = 126
+ NDPChassisNortelNetworksWirelessLANSecuritySwitch2270 NDPChassisType = 127
+ NDPChassis24portEthernetSwitch47024TPWR NDPChassisType = 128
+ NDPChassis48portEthernetSwitch47048TPWR NDPChassisType = 129
+ NDPChassisEthernetRoutingSwitch553024TFD NDPChassisType = 130
+ NDPChassisEthernetSwitch351024T NDPChassisType = 131
+ NDPChassisNortelMetroEthernetServiceUnit12GACL3Switch NDPChassisType = 132
+ NDPChassisNortelMetroEthernetServiceUnit12GDCL3Switch NDPChassisType = 133
+ NDPChassisNortelSecureAccessSwitch NDPChassisType = 134
+ NDPChassisNortelNetworksVPNGateway3070 NDPChassisType = 135
+ NDPChassisOPTeraMetro3500 NDPChassisType = 136
+ NDPChassisSMBBES101024T NDPChassisType = 137
+ NDPChassisSMBBES101048T NDPChassisType = 138
+ NDPChassisSMBBES102024TPWR NDPChassisType = 139
+ NDPChassisSMBBES102048TPWR NDPChassisType = 140
+ NDPChassisSMBBES201024T NDPChassisType = 141
+ NDPChassisSMBBES201048T NDPChassisType = 142
+ NDPChassisSMBBES202024TPWR NDPChassisType = 143
+ NDPChassisSMBBES202048TPWR NDPChassisType = 144
+ NDPChassisSMBBES11024T NDPChassisType = 145
+ NDPChassisSMBBES11048T NDPChassisType = 146
+ NDPChassisSMBBES12024TPWR NDPChassisType = 147
+ NDPChassisSMBBES12048TPWR NDPChassisType = 148
+ NDPChassisSMBBES21024T NDPChassisType = 149
+ NDPChassisSMBBES21048T NDPChassisType = 150
+ NDPChassisSMBBES22024TPWR NDPChassisType = 151
+ NDPChassisSMBBES22048TPWR NDPChassisType = 152
+ NDPChassisOME6500 NDPChassisType = 153
+ NDPChassisEthernetRoutingSwitch4548GT NDPChassisType = 154
+ NDPChassisEthernetRoutingSwitch4548GTPWR NDPChassisType = 155
+ NDPChassisEthernetRoutingSwitch4550T NDPChassisType = 156
+ NDPChassisEthernetRoutingSwitch4550TPWR NDPChassisType = 157
+ NDPChassisEthernetRoutingSwitch4526FX NDPChassisType = 158
+ NDPChassisEthernetRoutingSwitch250026T NDPChassisType = 159
+ NDPChassisEthernetRoutingSwitch250026TPWR NDPChassisType = 160
+ NDPChassisEthernetRoutingSwitch250050T NDPChassisType = 161
+ NDPChassisEthernetRoutingSwitch250050TPWR NDPChassisType = 162
+)
+
+type NDPBackplaneType uint8
+
+// Nortel Backplane Types
+const (
+ NDPBackplaneOther NDPBackplaneType = 1
+ NDPBackplaneEthernet NDPBackplaneType = 2
+ NDPBackplaneEthernetTokenring NDPBackplaneType = 3
+ NDPBackplaneEthernetFDDI NDPBackplaneType = 4
+ NDPBackplaneEthernetTokenringFDDI NDPBackplaneType = 5
+ NDPBackplaneEthernetTokenringRedundantPower NDPBackplaneType = 6
+ NDPBackplaneEthernetTokenringFDDIRedundantPower NDPBackplaneType = 7
+ NDPBackplaneTokenRing NDPBackplaneType = 8
+ NDPBackplaneEthernetTokenringFastEthernet NDPBackplaneType = 9
+ NDPBackplaneEthernetFastEthernet NDPBackplaneType = 10
+ NDPBackplaneEthernetTokenringFastEthernetRedundantPower NDPBackplaneType = 11
+ NDPBackplaneEthernetFastEthernetGigabitEthernet NDPBackplaneType = 12
+)
+
+type NDPState uint8
+
+// Device State
+const (
+ NDPStateTopology NDPState = 1
+ NDPStateHeartbeat NDPState = 2
+ NDPStateNew NDPState = 3
+)
+
+// NortelDiscovery is a packet layer containing the Nortel Discovery Protocol.
+type NortelDiscovery struct {
+ BaseLayer
+ IPAddress net.IP
+ SegmentID []byte
+ Chassis NDPChassisType
+ Backplane NDPBackplaneType
+ State NDPState
+ NumLinks uint8
+}
+
+// LayerType returns gopacket.LayerTypeNortelDiscovery.
+func (c *NortelDiscovery) LayerType() gopacket.LayerType {
+ return LayerTypeNortelDiscovery
+}
+
+func decodeNortelDiscovery(data []byte, p gopacket.PacketBuilder) error {
+ c := &NortelDiscovery{}
+ if len(data) < 11 {
+ return fmt.Errorf("Invalid NortelDiscovery packet length %d", len(data))
+ }
+ c.IPAddress = data[0:4]
+ c.SegmentID = data[4:7]
+ c.Chassis = NDPChassisType(data[7])
+ c.Backplane = NDPBackplaneType(data[8])
+ c.State = NDPState(data[9])
+ c.NumLinks = uint8(data[10])
+ p.AddLayer(c)
+ return nil
+}
+
+func (t NDPChassisType) String() (s string) {
+ switch t {
+ case NDPChassisother:
+ s = "other"
+ case NDPChassis3000:
+ s = "3000"
+ case NDPChassis3030:
+ s = "3030"
+ case NDPChassis2310:
+ s = "2310"
+ case NDPChassis2810:
+ s = "2810"
+ case NDPChassis2912:
+ s = "2912"
+ case NDPChassis2914:
+ s = "2914"
+ case NDPChassis271x:
+ s = "271x"
+ case NDPChassis2813:
+ s = "2813"
+ case NDPChassis2814:
+ s = "2814"
+ case NDPChassis2915:
+ s = "2915"
+ case NDPChassis5000:
+ s = "5000"
+ case NDPChassis2813SA:
+ s = "2813SA"
+ case NDPChassis2814SA:
+ s = "2814SA"
+ case NDPChassis810M:
+ s = "810M"
+ case NDPChassisEthercell:
+ s = "Ethercell"
+ case NDPChassis5005:
+ s = "5005"
+ case NDPChassisAlcatelEWC:
+ s = "Alcatel Ethernet workgroup conc."
+ case NDPChassis2715SA:
+ s = "2715SA"
+ case NDPChassis2486:
+ s = "2486"
+ case NDPChassis28000series:
+ s = "28000 series"
+ case NDPChassis23000series:
+ s = "23000 series"
+ case NDPChassis5DN00xseries:
+ s = "5DN00x series"
+ case NDPChassisBayStackEthernet:
+ s = "BayStack Ethernet"
+ case NDPChassis23100series:
+ s = "23100 series"
+ case NDPChassis100BaseTHub:
+ s = "100Base-T Hub"
+ case NDPChassis3000FastEthernet:
+ s = "3000 Fast Ethernet"
+ case NDPChassisOrionSwitch:
+ s = "Orion switch"
+ case NDPChassisDDS:
+ s = "DDS"
+ case NDPChassisCentillion6slot:
+ s = "Centillion (6 slot)"
+ case NDPChassisCentillion12slot:
+ s = "Centillion (12 slot)"
+ case NDPChassisCentillion1slot:
+ s = "Centillion (1 slot)"
+ case NDPChassisBayStack301:
+ s = "BayStack 301"
+ case NDPChassisBayStackTokenRingHub:
+ s = "BayStack TokenRing Hub"
+ case NDPChassisFVCMultimediaSwitch:
+ s = "FVC Multimedia Switch"
+ case NDPChassisSwitchNode:
+ s = "Switch Node"
+ case NDPChassisBayStack302Switch:
+ s = "BayStack 302 Switch"
+ case NDPChassisBayStack350Switch:
+ s = "BayStack 350 Switch"
+ case NDPChassisBayStack150EthernetHub:
+ s = "BayStack 150 Ethernet Hub"
+ case NDPChassisCentillion50NSwitch:
+ s = "Centillion 50N switch"
+ case NDPChassisCentillion50TSwitch:
+ s = "Centillion 50T switch"
+ case NDPChassisBayStack303304Switches:
+ s = "BayStack 303 and 304 Switches"
+ case NDPChassisBayStack200EthernetHub:
+ s = "BayStack 200 Ethernet Hub"
+ case NDPChassisBayStack25010100EthernetHub:
+ s = "BayStack 250 10/100 Ethernet Hub"
+ case NDPChassisBayStack450101001000Switches:
+ s = "BayStack 450 10/100/1000 Switches"
+ case NDPChassisBayStack41010100Switches:
+ s = "BayStack 410 10/100 Switches"
+ case NDPChassisPassport1200L3Switch:
+ s = "Passport 1200 L3 Switch"
+ case NDPChassisPassport1250L3Switch:
+ s = "Passport 1250 L3 Switch"
+ case NDPChassisPassport1100L3Switch:
+ s = "Passport 1100 L3 Switch"
+ case NDPChassisPassport1150L3Switch:
+ s = "Passport 1150 L3 Switch"
+ case NDPChassisPassport1050L3Switch:
+ s = "Passport 1050 L3 Switch"
+ case NDPChassisPassport1051L3Switch:
+ s = "Passport 1051 L3 Switch"
+ case NDPChassisPassport8610L3Switch:
+ s = "Passport 8610 L3 Switch"
+ case NDPChassisPassport8606L3Switch:
+ s = "Passport 8606 L3 Switch"
+ case NDPChassisPassport8010:
+ s = "Passport 8010"
+ case NDPChassisPassport8006:
+ s = "Passport 8006"
+ case NDPChassisBayStack670wirelessaccesspoint:
+ s = "BayStack 670 wireless access point"
+ case NDPChassisPassport740:
+ s = "Passport 740"
+ case NDPChassisPassport750:
+ s = "Passport 750"
+ case NDPChassisPassport790:
+ s = "Passport 790"
+ case NDPChassisBusinessPolicySwitch200010100Switches:
+ s = "Business Policy Switch 2000 10/100 Switches"
+ case NDPChassisPassport8110L2Switch:
+ s = "Passport 8110 L2 Switch"
+ case NDPChassisPassport8106L2Switch:
+ s = "Passport 8106 L2 Switch"
+ case NDPChassisBayStack3580GigSwitch:
+ s = "BayStack 3580 Gig Switch"
+ case NDPChassisBayStack10PowerSupplyUnit:
+ s = "BayStack 10 Power Supply Unit"
+ case NDPChassisBayStack42010100Switch:
+ s = "BayStack 420 10/100 Switch"
+ case NDPChassisOPTeraMetro1200EthernetServiceModule:
+ s = "OPTera Metro 1200 Ethernet Service Module"
+ case NDPChassisOPTera8010co:
+ s = "OPTera 8010co"
+ case NDPChassisOPTera8610coL3Switch:
+ s = "OPTera 8610co L3 switch"
+ case NDPChassisOPTera8110coL2Switch:
+ s = "OPTera 8110co L2 switch"
+ case NDPChassisOPTera8003:
+ s = "OPTera 8003"
+ case NDPChassisOPTera8603L3Switch:
+ s = "OPTera 8603 L3 switch"
+ case NDPChassisOPTera8103L2Switch:
+ s = "OPTera 8103 L2 switch"
+ case NDPChassisBayStack380101001000Switch:
+ s = "BayStack 380 10/100/1000 Switch"
+ case NDPChassisEthernetSwitch47048T:
+ s = "Ethernet Switch 470-48T"
+ case NDPChassisOPTeraMetro1450EthernetServiceModule:
+ s = "OPTera Metro 1450 Ethernet Service Module"
+ case NDPChassisOPTeraMetro1400EthernetServiceModule:
+ s = "OPTera Metro 1400 Ethernet Service Module"
+ case NDPChassisAlteonSwitchFamily:
+ s = "Alteon Switch Family"
+ case NDPChassisEthernetSwitch46024TPWR:
+ s = "Ethernet Switch 460-24T-PWR"
+ case NDPChassisOPTeraMetro8010OPML2Switch:
+ s = "OPTera Metro 8010 OPM L2 Switch"
+ case NDPChassisOPTeraMetro8010coOPML2Switch:
+ s = "OPTera Metro 8010co OPM L2 Switch"
+ case NDPChassisOPTeraMetro8006OPML2Switch:
+ s = "OPTera Metro 8006 OPM L2 Switch"
+ case NDPChassisOPTeraMetro8003OPML2Switch:
+ s = "OPTera Metro 8003 OPM L2 Switch"
+ case NDPChassisAlteon180e:
+ s = "Alteon 180e"
+ case NDPChassisAlteonAD3:
+ s = "Alteon AD3"
+ case NDPChassisAlteon184:
+ s = "Alteon 184"
+ case NDPChassisAlteonAD4:
+ s = "Alteon AD4"
+ case NDPChassisPassport1424L3Switch:
+ s = "Passport 1424 L3 switch"
+ case NDPChassisPassport1648L3Switch:
+ s = "Passport 1648 L3 switch"
+ case NDPChassisPassport1612L3Switch:
+ s = "Passport 1612 L3 switch"
+ case NDPChassisPassport1624L3Switch:
+ s = "Passport 1624 L3 switch"
+ case NDPChassisBayStack38024FFiber1000Switch:
+ s = "BayStack 380-24F Fiber 1000 Switch"
+ case NDPChassisEthernetRoutingSwitch551024T:
+ s = "Ethernet Routing Switch 5510-24T"
+ case NDPChassisEthernetRoutingSwitch551048T:
+ s = "Ethernet Routing Switch 5510-48T"
+ case NDPChassisEthernetSwitch47024T:
+ s = "Ethernet Switch 470-24T"
+ case NDPChassisNortelNetworksWirelessLANAccessPoint2220:
+ s = "Nortel Networks Wireless LAN Access Point 2220"
+ case NDPChassisPassportRBS2402L3Switch:
+ s = "Passport RBS 2402 L3 switch"
+ case NDPChassisAlteonApplicationSwitch2424:
+ s = "Alteon Application Switch 2424"
+ case NDPChassisAlteonApplicationSwitch2224:
+ s = "Alteon Application Switch 2224"
+ case NDPChassisAlteonApplicationSwitch2208:
+ s = "Alteon Application Switch 2208"
+ case NDPChassisAlteonApplicationSwitch2216:
+ s = "Alteon Application Switch 2216"
+ case NDPChassisAlteonApplicationSwitch3408:
+ s = "Alteon Application Switch 3408"
+ case NDPChassisAlteonApplicationSwitch3416:
+ s = "Alteon Application Switch 3416"
+ case NDPChassisNortelNetworksWirelessLANSecuritySwitch2250:
+ s = "Nortel Networks Wireless LAN SecuritySwitch 2250"
+ case NDPChassisEthernetSwitch42548T:
+ s = "Ethernet Switch 425-48T"
+ case NDPChassisEthernetSwitch42524T:
+ s = "Ethernet Switch 425-24T"
+ case NDPChassisNortelNetworksWirelessLANAccessPoint2221:
+ s = "Nortel Networks Wireless LAN Access Point 2221"
+ case NDPChassisNortelMetroEthernetServiceUnit24TSPFswitch:
+ s = "Nortel Metro Ethernet Service Unit 24-T SPF switch"
+ case NDPChassisNortelMetroEthernetServiceUnit24TLXDCswitch:
+ s = " Nortel Metro Ethernet Service Unit 24-T LX DC switch"
+ case NDPChassisPassport830010slotchassis:
+ s = "Passport 8300 10-slot chassis"
+ case NDPChassisPassport83006slotchassis:
+ s = "Passport 8300 6-slot chassis"
+ case NDPChassisEthernetRoutingSwitch552024TPWR:
+ s = "Ethernet Routing Switch 5520-24T-PWR"
+ case NDPChassisEthernetRoutingSwitch552048TPWR:
+ s = "Ethernet Routing Switch 5520-48T-PWR"
+ case NDPChassisNortelNetworksVPNGateway3050:
+ s = "Nortel Networks VPN Gateway 3050"
+ case NDPChassisAlteonSSL31010100:
+ s = "Alteon SSL 310 10/100"
+ case NDPChassisAlteonSSL31010100Fiber:
+ s = "Alteon SSL 310 10/100 Fiber"
+ case NDPChassisAlteonSSL31010100FIPS:
+ s = "Alteon SSL 310 10/100 FIPS"
+ case NDPChassisAlteonSSL410101001000:
+ s = "Alteon SSL 410 10/100/1000"
+ case NDPChassisAlteonSSL410101001000Fiber:
+ s = "Alteon SSL 410 10/100/1000 Fiber"
+ case NDPChassisAlteonApplicationSwitch2424SSL:
+ s = "Alteon Application Switch 2424-SSL"
+ case NDPChassisEthernetSwitch32524T:
+ s = "Ethernet Switch 325-24T"
+ case NDPChassisEthernetSwitch32524G:
+ s = "Ethernet Switch 325-24G"
+ case NDPChassisNortelNetworksWirelessLANAccessPoint2225:
+ s = "Nortel Networks Wireless LAN Access Point 2225"
+ case NDPChassisNortelNetworksWirelessLANSecuritySwitch2270:
+ s = "Nortel Networks Wireless LAN SecuritySwitch 2270"
+ case NDPChassis24portEthernetSwitch47024TPWR:
+ s = "24-port Ethernet Switch 470-24T-PWR"
+ case NDPChassis48portEthernetSwitch47048TPWR:
+ s = "48-port Ethernet Switch 470-48T-PWR"
+ case NDPChassisEthernetRoutingSwitch553024TFD:
+ s = "Ethernet Routing Switch 5530-24TFD"
+ case NDPChassisEthernetSwitch351024T:
+ s = "Ethernet Switch 3510-24T"
+ case NDPChassisNortelMetroEthernetServiceUnit12GACL3Switch:
+ s = "Nortel Metro Ethernet Service Unit 12G AC L3 switch"
+ case NDPChassisNortelMetroEthernetServiceUnit12GDCL3Switch:
+ s = "Nortel Metro Ethernet Service Unit 12G DC L3 switch"
+ case NDPChassisNortelSecureAccessSwitch:
+ s = "Nortel Secure Access Switch"
+ case NDPChassisNortelNetworksVPNGateway3070:
+ s = "Nortel Networks VPN Gateway 3070"
+ case NDPChassisOPTeraMetro3500:
+ s = "OPTera Metro 3500"
+ case NDPChassisSMBBES101024T:
+ s = "SMB BES 1010 24T"
+ case NDPChassisSMBBES101048T:
+ s = "SMB BES 1010 48T"
+ case NDPChassisSMBBES102024TPWR:
+ s = "SMB BES 1020 24T PWR"
+ case NDPChassisSMBBES102048TPWR:
+ s = "SMB BES 1020 48T PWR"
+ case NDPChassisSMBBES201024T:
+ s = "SMB BES 2010 24T"
+ case NDPChassisSMBBES201048T:
+ s = "SMB BES 2010 48T"
+ case NDPChassisSMBBES202024TPWR:
+ s = "SMB BES 2020 24T PWR"
+ case NDPChassisSMBBES202048TPWR:
+ s = "SMB BES 2020 48T PWR"
+ case NDPChassisSMBBES11024T:
+ s = "SMB BES 110 24T"
+ case NDPChassisSMBBES11048T:
+ s = "SMB BES 110 48T"
+ case NDPChassisSMBBES12024TPWR:
+ s = "SMB BES 120 24T PWR"
+ case NDPChassisSMBBES12048TPWR:
+ s = "SMB BES 120 48T PWR"
+ case NDPChassisSMBBES21024T:
+ s = "SMB BES 210 24T"
+ case NDPChassisSMBBES21048T:
+ s = "SMB BES 210 48T"
+ case NDPChassisSMBBES22024TPWR:
+ s = "SMB BES 220 24T PWR"
+ case NDPChassisSMBBES22048TPWR:
+ s = "SMB BES 220 48T PWR"
+ case NDPChassisOME6500:
+ s = "OME 6500"
+ case NDPChassisEthernetRoutingSwitch4548GT:
+ s = "Ethernet Routing Switch 4548GT"
+ case NDPChassisEthernetRoutingSwitch4548GTPWR:
+ s = "Ethernet Routing Switch 4548GT-PWR"
+ case NDPChassisEthernetRoutingSwitch4550T:
+ s = "Ethernet Routing Switch 4550T"
+ case NDPChassisEthernetRoutingSwitch4550TPWR:
+ s = "Ethernet Routing Switch 4550T-PWR"
+ case NDPChassisEthernetRoutingSwitch4526FX:
+ s = "Ethernet Routing Switch 4526FX"
+ case NDPChassisEthernetRoutingSwitch250026T:
+ s = "Ethernet Routing Switch 2500-26T"
+ case NDPChassisEthernetRoutingSwitch250026TPWR:
+ s = "Ethernet Routing Switch 2500-26T-PWR"
+ case NDPChassisEthernetRoutingSwitch250050T:
+ s = "Ethernet Routing Switch 2500-50T"
+ case NDPChassisEthernetRoutingSwitch250050TPWR:
+ s = "Ethernet Routing Switch 2500-50T-PWR"
+ default:
+ s = "Unknown"
+ }
+ return
+}
+
+func (t NDPBackplaneType) String() (s string) {
+ switch t {
+ case NDPBackplaneOther:
+ s = "Other"
+ case NDPBackplaneEthernet:
+ s = "Ethernet"
+ case NDPBackplaneEthernetTokenring:
+ s = "Ethernet and Tokenring"
+ case NDPBackplaneEthernetFDDI:
+ s = "Ethernet and FDDI"
+ case NDPBackplaneEthernetTokenringFDDI:
+ s = "Ethernet, Tokenring and FDDI"
+ case NDPBackplaneEthernetTokenringRedundantPower:
+ s = "Ethernet and Tokenring with redundant power"
+ case NDPBackplaneEthernetTokenringFDDIRedundantPower:
+ s = "Ethernet, Tokenring, FDDI with redundant power"
+ case NDPBackplaneTokenRing:
+ s = "Token Ring"
+ case NDPBackplaneEthernetTokenringFastEthernet:
+ s = "Ethernet, Tokenring and Fast Ethernet"
+ case NDPBackplaneEthernetFastEthernet:
+ s = "Ethernet and Fast Ethernet"
+ case NDPBackplaneEthernetTokenringFastEthernetRedundantPower:
+ s = "Ethernet, Tokenring, Fast Ethernet with redundant power"
+ case NDPBackplaneEthernetFastEthernetGigabitEthernet:
+ s = "Ethernet, Fast Ethernet and Gigabit Ethernet"
+ default:
+ s = "Unknown"
+ }
+ return
+}
+
+func (t NDPState) String() (s string) {
+ switch t {
+ case NDPStateTopology:
+ s = "Topology Change"
+ case NDPStateHeartbeat:
+ s = "Heartbeat"
+ case NDPStateNew:
+ s = "New"
+ default:
+ s = "Unknown"
+ }
+ return
+}
diff --git a/vendor/github.com/google/gopacket/layers/ntp.go b/vendor/github.com/google/gopacket/layers/ntp.go
new file mode 100644
index 0000000..33c15b3
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/ntp.go
@@ -0,0 +1,416 @@
+// Copyright 2016 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+//
+//******************************************************************************
+
+package layers
+
+import (
+ "encoding/binary"
+ "errors"
+
+ "github.com/google/gopacket"
+)
+
+//******************************************************************************
+//
+// Network Time Protocol (NTP) Decoding Layer
+// ------------------------------------------
+// This file provides a GoPacket decoding layer for NTP.
+//
+//******************************************************************************
+//
+// About The Network Time Protocol (NTP)
+// -------------------------------------
+// NTP is a protocol that enables computers on the internet to set their
+// clocks to the correct time (or to a time that is acceptably close to the
+// correct time). NTP runs on top of UDP.
+//
+// There have been a series of versions of the NTP protocol. The latest
+// version is V4 and is specified in RFC 5905:
+// http://www.ietf.org/rfc/rfc5905.txt
+//
+//******************************************************************************
+//
+// References
+// ----------
+//
+// Wikipedia's NTP entry:
+// https://en.wikipedia.org/wiki/Network_Time_Protocol
+// This is the best place to get an overview of NTP.
+//
+// Network Time Protocol Home Website:
+// http://www.ntp.org/
+// This appears to be the official website of NTP.
+//
+// List of current NTP Protocol RFCs:
+// http://www.ntp.org/rfc.html
+//
+// RFC 958: "Network Time Protocol (NTP)" (1985)
+// https://tools.ietf.org/html/rfc958
+// This is the original NTP specification.
+//
+// RFC 1305: "Network Time Protocol (Version 3) Specification, Implementation and Analysis" (1992)
+// https://tools.ietf.org/html/rfc1305
+// The protocol was updated in 1992 yielding NTP V3.
+//
+// RFC 5905: "Network Time Protocol Version 4: Protocol and Algorithms Specification" (2010)
+// https://www.ietf.org/rfc/rfc5905.txt
+// The protocol was updated in 2010 yielding NTP V4.
+// V4 is backwards compatible with all previous versions of NTP.
+//
+// RFC 5906: "Network Time Protocol Version 4: Autokey Specification"
+// https://tools.ietf.org/html/rfc5906
+// This document addresses the security of the NTP protocol
+// and is probably not relevant to this package.
+//
+// RFC 5907: "Definitions of Managed Objects for Network Time Protocol Version 4 (NTPv4)"
+// https://tools.ietf.org/html/rfc5907
+// This document addresses the management of NTP servers and
+// is probably not relevant to this package.
+//
+// RFC 5908: "Network Time Protocol (NTP) Server Option for DHCPv6"
+// https://tools.ietf.org/html/rfc5908
+// This document addresses the use of NTP in DHCPv6 and is
+// probably not relevant to this package.
+//
+// "Let's make a NTP Client in C"
+// https://lettier.github.io/posts/2016-04-26-lets-make-a-ntp-client-in-c.html
+// This web page contains useful information about the details of NTP,
+// including an NTP record struture in C, and C code.
+//
+// "NTP Packet Header (NTP Reference Implementation) (Computer Network Time Synchronization)"
+// http://what-when-how.com/computer-network-time-synchronization/
+// ntp-packet-header-ntp-reference-implementation-computer-network-time-synchronization/
+// This web page contains useful information on the details of NTP.
+//
+// "Technical information - NTP Data Packet"
+// https://www.meinbergglobal.com/english/info/ntp-packet.htm
+// This page has a helpful diagram of an NTP V4 packet.
+//
+//******************************************************************************
+//
+// Obsolete References
+// -------------------
+//
+// RFC 1119: "RFC-1119 "Network Time Protocol (Version 2) Specification and Implementation" (1989)
+// https://tools.ietf.org/html/rfc1119
+// Version 2 was drafted in 1989.
+// It is unclear whether V2 was ever implememented or whether the
+// ideas ended up in V3 (which was implemented in 1992).
+//
+// RFC 1361: "Simple Network Time Protocol (SNTP)"
+// https://tools.ietf.org/html/rfc1361
+// This document is obsoleted by RFC 1769 and is included only for completeness.
+//
+// RFC 1769: "Simple Network Time Protocol (SNTP)"
+// https://tools.ietf.org/html/rfc1769
+// This document is obsoleted by RFC 2030 and RFC 4330 and is included only for completeness.
+//
+// RFC 2030: "Simple Network Time Protocol (SNTP) Version 4 for IPv4, IPv6 and OSI"
+// https://tools.ietf.org/html/rfc2030
+// This document is obsoleted by RFC 4330 and is included only for completeness.
+//
+// RFC 4330: "Simple Network Time Protocol (SNTP) Version 4 for IPv4, IPv6 and OSI"
+// https://tools.ietf.org/html/rfc4330
+// This document is obsoleted by RFC 5905 and is included only for completeness.
+//
+//******************************************************************************
+//
+// Endian And Bit Numbering Issues
+// -------------------------------
+//
+// Endian and bit numbering issues can be confusing. Here is some
+// clarification:
+//
+// ENDIAN: Values are sent big endian.
+// https://en.wikipedia.org/wiki/Endianness
+//
+// BIT NUMBERING: Bits are numbered 0 upwards from the most significant
+// bit to the least significant bit. This means that if there is a 32-bit
+// value, the most significant bit is called bit 0 and the least
+// significant bit is called bit 31.
+//
+// See RFC 791 Appendix B for more discussion.
+//
+//******************************************************************************
+//
+// NTP V3 and V4 Packet Format
+// ---------------------------
+// NTP packets are UDP packets whose payload contains an NTP record.
+//
+// The NTP RFC defines the format of the NTP record.
+//
+// There have been four versions of the protocol:
+//
+// V1 in 1985
+// V2 in 1989
+// V3 in 1992
+// V4 in 2010
+//
+// It is clear that V1 and V2 are obsolete, and there is no need to
+// cater for these formats.
+//
+// V3 and V4 essentially use the same format, with V4 adding some optional
+// fields on the end. So this package supports the V3 and V4 formats.
+//
+// The current version of NTP (NTP V4)'s RFC (V4 - RFC 5905) contains
+// the following diagram for the NTP record format:
+
+// 0 1 2 3
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// |LI | VN |Mode | Stratum | Poll | Precision |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | Root Delay |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | Root Dispersion |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | Reference ID |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | |
+// + Reference Timestamp (64) +
+// | |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | |
+// + Origin Timestamp (64) +
+// | |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | |
+// + Receive Timestamp (64) +
+// | |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | |
+// + Transmit Timestamp (64) +
+// | |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | |
+// . .
+// . Extension Field 1 (variable) .
+// . .
+// | |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | |
+// . .
+// . Extension Field 2 (variable) .
+// . .
+// | |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | Key Identifier |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | |
+// | dgst (128) |
+// | |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// From http://www.ietf.org/rfc/rfc5905.txt
+//
+// The fields "Extension Field 1 (variable)" and later are optional fields,
+// and so we can set a minimum NTP record size of 48 bytes.
+//
+const ntpMinimumRecordSizeInBytes int = 48
+
+//******************************************************************************
+
+// NTP Type
+// --------
+// Type NTP implements the DecodingLayer interface. Each NTP object
+// represents in a structured form the NTP record present as the UDP
+// payload in an NTP UDP packet.
+//
+
+type NTPLeapIndicator uint8
+type NTPVersion uint8
+type NTPMode uint8
+type NTPStratum uint8
+type NTPLog2Seconds int8
+type NTPFixed16Seconds uint32
+type NTPReferenceID uint32
+type NTPTimestamp uint64
+
+type NTP struct {
+ BaseLayer // Stores the packet bytes and payload bytes.
+
+ LeapIndicator NTPLeapIndicator // [0,3]. Indicates whether leap second(s) is to be added.
+ Version NTPVersion // [0,7]. Version of the NTP protocol.
+ Mode NTPMode // [0,7]. Mode.
+ Stratum NTPStratum // [0,255]. Stratum of time server in the server tree.
+ Poll NTPLog2Seconds // [-128,127]. The maximum interval between successive messages, in log2 seconds.
+ Precision NTPLog2Seconds // [-128,127]. The precision of the system clock, in log2 seconds.
+ RootDelay NTPFixed16Seconds // [0,2^32-1]. Total round trip delay to the reference clock in seconds times 2^16.
+ RootDispersion NTPFixed16Seconds // [0,2^32-1]. Total dispersion to the reference clock, in seconds times 2^16.
+ ReferenceID NTPReferenceID // ID code of reference clock [0,2^32-1].
+ ReferenceTimestamp NTPTimestamp // Most recent timestamp from the reference clock.
+ OriginTimestamp NTPTimestamp // Local time when request was sent from local host.
+ ReceiveTimestamp NTPTimestamp // Local time (on server) that request arrived at server host.
+ TransmitTimestamp NTPTimestamp // Local time (on server) that request departed server host.
+
+ // FIX: This package should analyse the extension fields and represent the extension fields too.
+ ExtensionBytes []byte // Just put extensions in a byte slice.
+}
+
+//******************************************************************************
+
+// LayerType returns the layer type of the NTP object, which is LayerTypeNTP.
+func (d *NTP) LayerType() gopacket.LayerType {
+ return LayerTypeNTP
+}
+
+//******************************************************************************
+
+// decodeNTP analyses a byte slice and attempts to decode it as an NTP
+// record of a UDP packet.
+//
+// If it succeeds, it loads p with information about the packet and returns nil.
+// If it fails, it returns an error (non nil).
+//
+// This function is employed in layertypes.go to register the NTP layer.
+func decodeNTP(data []byte, p gopacket.PacketBuilder) error {
+
+ // Attempt to decode the byte slice.
+ d := &NTP{}
+ err := d.DecodeFromBytes(data, p)
+ if err != nil {
+ return err
+ }
+
+ // If the decoding worked, add the layer to the packet and set it
+ // as the application layer too, if there isn't already one.
+ p.AddLayer(d)
+ p.SetApplicationLayer(d)
+
+ return nil
+}
+
+//******************************************************************************
+
+// DecodeFromBytes analyses a byte slice and attempts to decode it as an NTP
+// record of a UDP packet.
+//
+// Upon succeeds, it loads the NTP object with information about the packet
+// and returns nil.
+// Upon failure, it returns an error (non nil).
+func (d *NTP) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+
+ // If the data block is too short to be a NTP record, then return an error.
+ if len(data) < ntpMinimumRecordSizeInBytes {
+ df.SetTruncated()
+ return errors.New("NTP packet too short")
+ }
+
+ // RFC 5905 does not appear to define a maximum NTP record length.
+ // The protocol allows "extension fields" to be included in the record,
+ // and states about these fields:"
+ //
+ // "While the minimum field length containing required fields is
+ // four words (16 octets), a maximum field length remains to be
+ // established."
+ //
+ // For this reason, the packet length is not checked here for being too long.
+
+ // NTP type embeds type BaseLayer which contains two fields:
+ // Contents is supposed to contain the bytes of the data at this level.
+ // Payload is supposed to contain the payload of this level.
+ // Here we set the baselayer to be the bytes of the NTP record.
+ d.BaseLayer = BaseLayer{Contents: data[:len(data)]}
+
+ // Extract the fields from the block of bytes.
+ // To make sense of this, refer to the packet diagram
+ // above and the section on endian conventions.
+
+ // The first few fields are all packed into the first 32 bits. Unpack them.
+ f := data[0]
+ d.LeapIndicator = NTPLeapIndicator((f & 0xC0) >> 6)
+ d.Version = NTPVersion((f & 0x38) >> 3)
+ d.Mode = NTPMode(f & 0x07)
+ d.Stratum = NTPStratum(data[1])
+ d.Poll = NTPLog2Seconds(data[2])
+ d.Precision = NTPLog2Seconds(data[3])
+
+ // The remaining fields can just be copied in big endian order.
+ d.RootDelay = NTPFixed16Seconds(binary.BigEndian.Uint32(data[4:8]))
+ d.RootDispersion = NTPFixed16Seconds(binary.BigEndian.Uint32(data[8:12]))
+ d.ReferenceID = NTPReferenceID(binary.BigEndian.Uint32(data[12:16]))
+ d.ReferenceTimestamp = NTPTimestamp(binary.BigEndian.Uint64(data[16:24]))
+ d.OriginTimestamp = NTPTimestamp(binary.BigEndian.Uint64(data[24:32]))
+ d.ReceiveTimestamp = NTPTimestamp(binary.BigEndian.Uint64(data[32:40]))
+ d.TransmitTimestamp = NTPTimestamp(binary.BigEndian.Uint64(data[40:48]))
+
+ // This layer does not attempt to analyse the extension bytes.
+ // But if there are any, we'd like the user to know. So we just
+ // place them all in an ExtensionBytes field.
+ d.ExtensionBytes = data[48:]
+
+ // Return no error.
+ return nil
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (d *NTP) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ data, err := b.PrependBytes(ntpMinimumRecordSizeInBytes)
+ if err != nil {
+ return err
+ }
+
+ // Pack the first few fields into the first 32 bits.
+ h := uint8(0)
+ h |= (uint8(d.LeapIndicator) << 6) & 0xC0
+ h |= (uint8(d.Version) << 3) & 0x38
+ h |= (uint8(d.Mode)) & 0x07
+ data[0] = byte(h)
+ data[1] = byte(d.Stratum)
+ data[2] = byte(d.Poll)
+ data[3] = byte(d.Precision)
+
+ // The remaining fields can just be copied in big endian order.
+ binary.BigEndian.PutUint32(data[4:8], uint32(d.RootDelay))
+ binary.BigEndian.PutUint32(data[8:12], uint32(d.RootDispersion))
+ binary.BigEndian.PutUint32(data[12:16], uint32(d.ReferenceID))
+ binary.BigEndian.PutUint64(data[16:24], uint64(d.ReferenceTimestamp))
+ binary.BigEndian.PutUint64(data[24:32], uint64(d.OriginTimestamp))
+ binary.BigEndian.PutUint64(data[32:40], uint64(d.ReceiveTimestamp))
+ binary.BigEndian.PutUint64(data[40:48], uint64(d.TransmitTimestamp))
+
+ ex, err := b.AppendBytes(len(d.ExtensionBytes))
+ if err != nil {
+ return err
+ }
+ copy(ex, d.ExtensionBytes)
+
+ return nil
+}
+
+//******************************************************************************
+
+// CanDecode returns a set of layers that NTP objects can decode.
+// As NTP objects can only decide the NTP layer, we can return just that layer.
+// Apparently a single layer type implements LayerClass.
+func (d *NTP) CanDecode() gopacket.LayerClass {
+ return LayerTypeNTP
+}
+
+//******************************************************************************
+
+// NextLayerType specifies the next layer that GoPacket should attempt to
+// analyse after this (NTP) layer. As NTP packets do not contain any payload
+// bytes, there are no further layers to analyse.
+func (d *NTP) NextLayerType() gopacket.LayerType {
+ return gopacket.LayerTypeZero
+}
+
+//******************************************************************************
+
+// NTP packets do not carry any data payload, so the empty byte slice is retured.
+// In Go, a nil slice is functionally identical to an empty slice, so we
+// return nil to avoid a heap allocation.
+func (d *NTP) Payload() []byte {
+ return nil
+}
+
+//******************************************************************************
+//* End Of NTP File *
+//******************************************************************************
diff --git a/vendor/github.com/google/gopacket/layers/ospf.go b/vendor/github.com/google/gopacket/layers/ospf.go
new file mode 100644
index 0000000..04a95c6
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/ospf.go
@@ -0,0 +1,699 @@
+// Copyright 2017 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+ "encoding/binary"
+ "fmt"
+
+ "github.com/google/gopacket"
+)
+
+// OSPFType denotes what kind of OSPF type it is
+type OSPFType uint8
+
+// Potential values for OSPF.Type.
+const (
+ OSPFHello OSPFType = 1
+ OSPFDatabaseDescription OSPFType = 2
+ OSPFLinkStateRequest OSPFType = 3
+ OSPFLinkStateUpdate OSPFType = 4
+ OSPFLinkStateAcknowledgment OSPFType = 5
+)
+
+// LSA Function Codes for LSAheader.LSType
+const (
+ RouterLSAtypeV2 = 0x1
+ RouterLSAtype = 0x2001
+ NetworkLSAtypeV2 = 0x2
+ NetworkLSAtype = 0x2002
+ SummaryLSANetworktypeV2 = 0x3
+ InterAreaPrefixLSAtype = 0x2003
+ SummaryLSAASBRtypeV2 = 0x4
+ InterAreaRouterLSAtype = 0x2004
+ ASExternalLSAtypeV2 = 0x5
+ ASExternalLSAtype = 0x4005
+ NSSALSAtype = 0x2007
+ NSSALSAtypeV2 = 0x7
+ LinkLSAtype = 0x0008
+ IntraAreaPrefixLSAtype = 0x2009
+)
+
+// String conversions for OSPFType
+func (i OSPFType) String() string {
+ switch i {
+ case OSPFHello:
+ return "Hello"
+ case OSPFDatabaseDescription:
+ return "Database Description"
+ case OSPFLinkStateRequest:
+ return "Link State Request"
+ case OSPFLinkStateUpdate:
+ return "Link State Update"
+ case OSPFLinkStateAcknowledgment:
+ return "Link State Acknowledgment"
+ default:
+ return ""
+ }
+}
+
+// Prefix extends IntraAreaPrefixLSA
+type Prefix struct {
+ PrefixLength uint8
+ PrefixOptions uint8
+ Metric uint16
+ AddressPrefix []byte
+}
+
+// IntraAreaPrefixLSA is the struct from RFC 5340 A.4.10.
+type IntraAreaPrefixLSA struct {
+ NumOfPrefixes uint16
+ RefLSType uint16
+ RefLinkStateID uint32
+ RefAdvRouter uint32
+ Prefixes []Prefix
+}
+
+// LinkLSA is the struct from RFC 5340 A.4.9.
+type LinkLSA struct {
+ RtrPriority uint8
+ Options uint32
+ LinkLocalAddress []byte
+ NumOfPrefixes uint32
+ Prefixes []Prefix
+}
+
+// ASExternalLSAV2 is the struct from RFC 2328 A.4.5.
+type ASExternalLSAV2 struct {
+ NetworkMask uint32
+ ExternalBit uint8
+ Metric uint32
+ ForwardingAddress uint32
+ ExternalRouteTag uint32
+}
+
+// ASExternalLSA is the struct from RFC 5340 A.4.7.
+type ASExternalLSA struct {
+ Flags uint8
+ Metric uint32
+ PrefixLength uint8
+ PrefixOptions uint8
+ RefLSType uint16
+ AddressPrefix []byte
+ ForwardingAddress []byte
+ ExternalRouteTag uint32
+ RefLinkStateID uint32
+}
+
+// InterAreaRouterLSA is the struct from RFC 5340 A.4.6.
+type InterAreaRouterLSA struct {
+ Options uint32
+ Metric uint32
+ DestinationRouterID uint32
+}
+
+// InterAreaPrefixLSA is the struct from RFC 5340 A.4.5.
+type InterAreaPrefixLSA struct {
+ Metric uint32
+ PrefixLength uint8
+ PrefixOptions uint8
+ AddressPrefix []byte
+}
+
+// NetworkLSA is the struct from RFC 5340 A.4.4.
+type NetworkLSA struct {
+ Options uint32
+ AttachedRouter []uint32
+}
+
+// NetworkLSAV2 is the struct from RFC 2328 A.4.3.
+type NetworkLSAV2 struct {
+ NetworkMask uint32
+ AttachedRouter []uint32
+}
+
+// RouterV2 extends RouterLSAV2
+type RouterV2 struct {
+ Type uint8
+ LinkID uint32
+ LinkData uint32
+ Metric uint16
+}
+
+// RouterLSAV2 is the struct from RFC 2328 A.4.2.
+type RouterLSAV2 struct {
+ Flags uint8
+ Links uint16
+ Routers []RouterV2
+}
+
+// Router extends RouterLSA
+type Router struct {
+ Type uint8
+ Metric uint16
+ InterfaceID uint32
+ NeighborInterfaceID uint32
+ NeighborRouterID uint32
+}
+
+// RouterLSA is the struct from RFC 5340 A.4.3.
+type RouterLSA struct {
+ Flags uint8
+ Options uint32
+ Routers []Router
+}
+
+// LSAheader is the struct from RFC 5340 A.4.2 and RFC 2328 A.4.1.
+type LSAheader struct {
+ LSAge uint16
+ LSType uint16
+ LinkStateID uint32
+ AdvRouter uint32
+ LSSeqNumber uint32
+ LSChecksum uint16
+ Length uint16
+ LSOptions uint8
+}
+
+// LSA links LSAheader with the structs from RFC 5340 A.4.
+type LSA struct {
+ LSAheader
+ Content interface{}
+}
+
+// LSUpdate is the struct from RFC 5340 A.3.5.
+type LSUpdate struct {
+ NumOfLSAs uint32
+ LSAs []LSA
+}
+
+// LSReq is the struct from RFC 5340 A.3.4.
+type LSReq struct {
+ LSType uint16
+ LSID uint32
+ AdvRouter uint32
+}
+
+// DbDescPkg is the struct from RFC 5340 A.3.3.
+type DbDescPkg struct {
+ Options uint32
+ InterfaceMTU uint16
+ Flags uint16
+ DDSeqNumber uint32
+ LSAinfo []LSAheader
+}
+
+// HelloPkg is the struct from RFC 5340 A.3.2.
+type HelloPkg struct {
+ InterfaceID uint32
+ RtrPriority uint8
+ Options uint32
+ HelloInterval uint16
+ RouterDeadInterval uint32
+ DesignatedRouterID uint32
+ BackupDesignatedRouterID uint32
+ NeighborID []uint32
+}
+
+// HelloPkgV2 extends the HelloPkg struct with OSPFv2 information
+type HelloPkgV2 struct {
+ HelloPkg
+ NetworkMask uint32
+}
+
+// OSPF is a basic OSPF packet header with common fields of Version 2 and Version 3.
+type OSPF struct {
+ Version uint8
+ Type OSPFType
+ PacketLength uint16
+ RouterID uint32
+ AreaID uint32
+ Checksum uint16
+ Content interface{}
+}
+
+//OSPFv2 extend the OSPF head with version 2 specific fields
+type OSPFv2 struct {
+ BaseLayer
+ OSPF
+ AuType uint16
+ Authentication uint64
+}
+
+// OSPFv3 extend the OSPF head with version 3 specific fields
+type OSPFv3 struct {
+ BaseLayer
+ OSPF
+ Instance uint8
+ Reserved uint8
+}
+
+// getLSAsv2 parses the LSA information from the packet for OSPFv2
+func getLSAsv2(num uint32, data []byte) ([]LSA, error) {
+ var lsas []LSA
+ var i uint32 = 0
+ var offset uint32 = 0
+ for ; i < num; i++ {
+ lstype := uint16(data[offset+3])
+ lsalength := binary.BigEndian.Uint16(data[offset+18 : offset+20])
+ content, err := extractLSAInformation(lstype, lsalength, data[offset:])
+ if err != nil {
+ return nil, fmt.Errorf("Could not extract Link State type.")
+ }
+ lsa := LSA{
+ LSAheader: LSAheader{
+ LSAge: binary.BigEndian.Uint16(data[offset : offset+2]),
+ LSOptions: data[offset+2],
+ LSType: lstype,
+ LinkStateID: binary.BigEndian.Uint32(data[offset+4 : offset+8]),
+ AdvRouter: binary.BigEndian.Uint32(data[offset+8 : offset+12]),
+ LSSeqNumber: binary.BigEndian.Uint32(data[offset+12 : offset+16]),
+ LSChecksum: binary.BigEndian.Uint16(data[offset+16 : offset+18]),
+ Length: lsalength,
+ },
+ Content: content,
+ }
+ lsas = append(lsas, lsa)
+ offset += uint32(lsalength)
+ }
+ return lsas, nil
+}
+
+// extractLSAInformation extracts all the LSA information
+func extractLSAInformation(lstype, lsalength uint16, data []byte) (interface{}, error) {
+ if lsalength < 20 {
+ return nil, fmt.Errorf("Link State header length %v too short, %v required", lsalength, 20)
+ }
+ if len(data) < int(lsalength) {
+ return nil, fmt.Errorf("Link State header length %v too short, %v required", len(data), lsalength)
+ }
+ var content interface{}
+ switch lstype {
+ case RouterLSAtypeV2:
+ var routers []RouterV2
+ links := binary.BigEndian.Uint16(data[22:24])
+ content = RouterLSAV2{
+ Flags: data[20],
+ Links: links,
+ Routers: routers,
+ }
+ case NSSALSAtypeV2:
+ fallthrough
+ case ASExternalLSAtypeV2:
+ content = ASExternalLSAV2{
+ NetworkMask: binary.BigEndian.Uint32(data[20:24]),
+ ExternalBit: data[24] & 0x80,
+ Metric: binary.BigEndian.Uint32(data[24:28]) & 0x00FFFFFF,
+ ForwardingAddress: binary.BigEndian.Uint32(data[28:32]),
+ ExternalRouteTag: binary.BigEndian.Uint32(data[32:36]),
+ }
+ case NetworkLSAtypeV2:
+ var routers []uint32
+ var j uint32
+ for j = 24; j < uint32(lsalength); j += 4 {
+ routers = append(routers, binary.BigEndian.Uint32(data[j:j+4]))
+ }
+ content = NetworkLSAV2{
+ NetworkMask: binary.BigEndian.Uint32(data[20:24]),
+ AttachedRouter: routers,
+ }
+ case RouterLSAtype:
+ var routers []Router
+ var j uint32
+ for j = 24; j < uint32(lsalength); j += 16 {
+ router := Router{
+ Type: uint8(data[j]),
+ Metric: binary.BigEndian.Uint16(data[j+2 : j+4]),
+ InterfaceID: binary.BigEndian.Uint32(data[j+4 : j+8]),
+ NeighborInterfaceID: binary.BigEndian.Uint32(data[j+8 : j+12]),
+ NeighborRouterID: binary.BigEndian.Uint32(data[j+12 : j+16]),
+ }
+ routers = append(routers, router)
+ }
+ content = RouterLSA{
+ Flags: uint8(data[20]),
+ Options: binary.BigEndian.Uint32(data[20:24]) & 0x00FFFFFF,
+ Routers: routers,
+ }
+ case NetworkLSAtype:
+ var routers []uint32
+ var j uint32
+ for j = 24; j < uint32(lsalength); j += 4 {
+ routers = append(routers, binary.BigEndian.Uint32(data[j:j+4]))
+ }
+ content = NetworkLSA{
+ Options: binary.BigEndian.Uint32(data[20:24]) & 0x00FFFFFF,
+ AttachedRouter: routers,
+ }
+ case InterAreaPrefixLSAtype:
+ content = InterAreaPrefixLSA{
+ Metric: binary.BigEndian.Uint32(data[20:24]) & 0x00FFFFFF,
+ PrefixLength: uint8(data[24]),
+ PrefixOptions: uint8(data[25]),
+ AddressPrefix: data[28:uint32(lsalength)],
+ }
+ case InterAreaRouterLSAtype:
+ content = InterAreaRouterLSA{
+ Options: binary.BigEndian.Uint32(data[20:24]) & 0x00FFFFFF,
+ Metric: binary.BigEndian.Uint32(data[24:28]) & 0x00FFFFFF,
+ DestinationRouterID: binary.BigEndian.Uint32(data[28:32]),
+ }
+ case ASExternalLSAtype:
+ fallthrough
+ case NSSALSAtype:
+
+ flags := uint8(data[20])
+ prefixLen := uint8(data[24]) / 8
+ var forwardingAddress []byte
+ if (flags & 0x02) == 0x02 {
+ forwardingAddress = data[28+uint32(prefixLen) : 28+uint32(prefixLen)+16]
+ }
+ content = ASExternalLSA{
+ Flags: flags,
+ Metric: binary.BigEndian.Uint32(data[20:24]) & 0x00FFFFFF,
+ PrefixLength: prefixLen,
+ PrefixOptions: uint8(data[25]),
+ RefLSType: binary.BigEndian.Uint16(data[26:28]),
+ AddressPrefix: data[28 : 28+uint32(prefixLen)],
+ ForwardingAddress: forwardingAddress,
+ }
+ case LinkLSAtype:
+ var prefixes []Prefix
+ var prefixOffset uint32 = 44
+ var j uint32
+ numOfPrefixes := binary.BigEndian.Uint32(data[40:44])
+ for j = 0; j < numOfPrefixes; j++ {
+ prefixLen := uint8(data[prefixOffset])
+ prefix := Prefix{
+ PrefixLength: prefixLen,
+ PrefixOptions: uint8(data[prefixOffset+1]),
+ AddressPrefix: data[prefixOffset+4 : prefixOffset+4+uint32(prefixLen)/8],
+ }
+ prefixes = append(prefixes, prefix)
+ prefixOffset = prefixOffset + 4 + uint32(prefixLen)/8
+ }
+ content = LinkLSA{
+ RtrPriority: uint8(data[20]),
+ Options: binary.BigEndian.Uint32(data[20:24]) & 0x00FFFFFF,
+ LinkLocalAddress: data[24:40],
+ NumOfPrefixes: numOfPrefixes,
+ Prefixes: prefixes,
+ }
+ case IntraAreaPrefixLSAtype:
+ var prefixes []Prefix
+ var prefixOffset uint32 = 32
+ var j uint16
+ numOfPrefixes := binary.BigEndian.Uint16(data[20:22])
+ for j = 0; j < numOfPrefixes; j++ {
+ prefixLen := uint8(data[prefixOffset])
+ prefix := Prefix{
+ PrefixLength: prefixLen,
+ PrefixOptions: uint8(data[prefixOffset+1]),
+ Metric: binary.BigEndian.Uint16(data[prefixOffset+2 : prefixOffset+4]),
+ AddressPrefix: data[prefixOffset+4 : prefixOffset+4+uint32(prefixLen)/8],
+ }
+ prefixes = append(prefixes, prefix)
+ prefixOffset = prefixOffset + 4 + uint32(prefixLen)
+ }
+ content = IntraAreaPrefixLSA{
+ NumOfPrefixes: numOfPrefixes,
+ RefLSType: binary.BigEndian.Uint16(data[22:24]),
+ RefLinkStateID: binary.BigEndian.Uint32(data[24:28]),
+ RefAdvRouter: binary.BigEndian.Uint32(data[28:32]),
+ Prefixes: prefixes,
+ }
+ default:
+ return nil, fmt.Errorf("Unknown Link State type.")
+ }
+ return content, nil
+}
+
+// getLSAs parses the LSA information from the packet for OSPFv3
+func getLSAs(num uint32, data []byte) ([]LSA, error) {
+ var lsas []LSA
+ var i uint32 = 0
+ var offset uint32 = 0
+ for ; i < num; i++ {
+ var content interface{}
+ lstype := binary.BigEndian.Uint16(data[offset+2 : offset+4])
+ lsalength := binary.BigEndian.Uint16(data[offset+18 : offset+20])
+
+ content, err := extractLSAInformation(lstype, lsalength, data[offset:])
+ if err != nil {
+ return nil, fmt.Errorf("Could not extract Link State type.")
+ }
+ lsa := LSA{
+ LSAheader: LSAheader{
+ LSAge: binary.BigEndian.Uint16(data[offset : offset+2]),
+ LSType: lstype,
+ LinkStateID: binary.BigEndian.Uint32(data[offset+4 : offset+8]),
+ AdvRouter: binary.BigEndian.Uint32(data[offset+8 : offset+12]),
+ LSSeqNumber: binary.BigEndian.Uint32(data[offset+12 : offset+16]),
+ LSChecksum: binary.BigEndian.Uint16(data[offset+16 : offset+18]),
+ Length: lsalength,
+ },
+ Content: content,
+ }
+ lsas = append(lsas, lsa)
+ offset += uint32(lsalength)
+ }
+ return lsas, nil
+}
+
+// DecodeFromBytes decodes the given bytes into the OSPF layer.
+func (ospf *OSPFv2) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ if len(data) < 24 {
+ return fmt.Errorf("Packet too smal for OSPF Version 2")
+ }
+
+ ospf.Version = uint8(data[0])
+ ospf.Type = OSPFType(data[1])
+ ospf.PacketLength = binary.BigEndian.Uint16(data[2:4])
+ ospf.RouterID = binary.BigEndian.Uint32(data[4:8])
+ ospf.AreaID = binary.BigEndian.Uint32(data[8:12])
+ ospf.Checksum = binary.BigEndian.Uint16(data[12:14])
+ ospf.AuType = binary.BigEndian.Uint16(data[14:16])
+ ospf.Authentication = binary.BigEndian.Uint64(data[16:24])
+
+ switch ospf.Type {
+ case OSPFHello:
+ var neighbors []uint32
+ for i := 44; uint16(i+4) <= ospf.PacketLength; i += 4 {
+ neighbors = append(neighbors, binary.BigEndian.Uint32(data[i:i+4]))
+ }
+ ospf.Content = HelloPkgV2{
+ NetworkMask: binary.BigEndian.Uint32(data[24:28]),
+ HelloPkg: HelloPkg{
+ HelloInterval: binary.BigEndian.Uint16(data[28:30]),
+ Options: uint32(data[30]),
+ RtrPriority: uint8(data[31]),
+ RouterDeadInterval: binary.BigEndian.Uint32(data[32:36]),
+ DesignatedRouterID: binary.BigEndian.Uint32(data[36:40]),
+ BackupDesignatedRouterID: binary.BigEndian.Uint32(data[40:44]),
+ NeighborID: neighbors,
+ },
+ }
+ case OSPFDatabaseDescription:
+ var lsas []LSAheader
+ for i := 32; uint16(i+20) <= ospf.PacketLength; i += 20 {
+ lsa := LSAheader{
+ LSAge: binary.BigEndian.Uint16(data[i : i+2]),
+ LSType: binary.BigEndian.Uint16(data[i+2 : i+4]),
+ LinkStateID: binary.BigEndian.Uint32(data[i+4 : i+8]),
+ AdvRouter: binary.BigEndian.Uint32(data[i+8 : i+12]),
+ LSSeqNumber: binary.BigEndian.Uint32(data[i+12 : i+16]),
+ LSChecksum: binary.BigEndian.Uint16(data[i+16 : i+18]),
+ Length: binary.BigEndian.Uint16(data[i+18 : i+20]),
+ }
+ lsas = append(lsas, lsa)
+ }
+ ospf.Content = DbDescPkg{
+ InterfaceMTU: binary.BigEndian.Uint16(data[24:26]),
+ Options: uint32(data[26]),
+ Flags: uint16(data[27]),
+ DDSeqNumber: binary.BigEndian.Uint32(data[28:32]),
+ LSAinfo: lsas,
+ }
+ case OSPFLinkStateRequest:
+ var lsrs []LSReq
+ for i := 24; uint16(i+12) <= ospf.PacketLength; i += 12 {
+ lsr := LSReq{
+ LSType: binary.BigEndian.Uint16(data[i+2 : i+4]),
+ LSID: binary.BigEndian.Uint32(data[i+4 : i+8]),
+ AdvRouter: binary.BigEndian.Uint32(data[i+8 : i+12]),
+ }
+ lsrs = append(lsrs, lsr)
+ }
+ ospf.Content = lsrs
+ case OSPFLinkStateUpdate:
+ num := binary.BigEndian.Uint32(data[24:28])
+
+ lsas, err := getLSAsv2(num, data[28:])
+ if err != nil {
+ return fmt.Errorf("Cannot parse Link State Update packet: %v", err)
+ }
+ ospf.Content = LSUpdate{
+ NumOfLSAs: num,
+ LSAs: lsas,
+ }
+ case OSPFLinkStateAcknowledgment:
+ var lsas []LSAheader
+ for i := 24; uint16(i+20) <= ospf.PacketLength; i += 20 {
+ lsa := LSAheader{
+ LSAge: binary.BigEndian.Uint16(data[i : i+2]),
+ LSOptions: data[i+2],
+ LSType: uint16(data[i+3]),
+ LinkStateID: binary.BigEndian.Uint32(data[i+4 : i+8]),
+ AdvRouter: binary.BigEndian.Uint32(data[i+8 : i+12]),
+ LSSeqNumber: binary.BigEndian.Uint32(data[i+12 : i+16]),
+ LSChecksum: binary.BigEndian.Uint16(data[i+16 : i+18]),
+ Length: binary.BigEndian.Uint16(data[i+18 : i+20]),
+ }
+ lsas = append(lsas, lsa)
+ }
+ ospf.Content = lsas
+ }
+ return nil
+}
+
+// DecodeFromBytes decodes the given bytes into the OSPF layer.
+func (ospf *OSPFv3) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+
+ if len(data) < 16 {
+ return fmt.Errorf("Packet too smal for OSPF Version 3")
+ }
+
+ ospf.Version = uint8(data[0])
+ ospf.Type = OSPFType(data[1])
+ ospf.PacketLength = binary.BigEndian.Uint16(data[2:4])
+ ospf.RouterID = binary.BigEndian.Uint32(data[4:8])
+ ospf.AreaID = binary.BigEndian.Uint32(data[8:12])
+ ospf.Checksum = binary.BigEndian.Uint16(data[12:14])
+ ospf.Instance = uint8(data[14])
+ ospf.Reserved = uint8(data[15])
+
+ switch ospf.Type {
+ case OSPFHello:
+ var neighbors []uint32
+ for i := 36; uint16(i+4) <= ospf.PacketLength; i += 4 {
+ neighbors = append(neighbors, binary.BigEndian.Uint32(data[i:i+4]))
+ }
+ ospf.Content = HelloPkg{
+ InterfaceID: binary.BigEndian.Uint32(data[16:20]),
+ RtrPriority: uint8(data[20]),
+ Options: binary.BigEndian.Uint32(data[21:25]) >> 8,
+ HelloInterval: binary.BigEndian.Uint16(data[24:26]),
+ RouterDeadInterval: uint32(binary.BigEndian.Uint16(data[26:28])),
+ DesignatedRouterID: binary.BigEndian.Uint32(data[28:32]),
+ BackupDesignatedRouterID: binary.BigEndian.Uint32(data[32:36]),
+ NeighborID: neighbors,
+ }
+ case OSPFDatabaseDescription:
+ var lsas []LSAheader
+ for i := 28; uint16(i+20) <= ospf.PacketLength; i += 20 {
+ lsa := LSAheader{
+ LSAge: binary.BigEndian.Uint16(data[i : i+2]),
+ LSType: binary.BigEndian.Uint16(data[i+2 : i+4]),
+ LinkStateID: binary.BigEndian.Uint32(data[i+4 : i+8]),
+ AdvRouter: binary.BigEndian.Uint32(data[i+8 : i+12]),
+ LSSeqNumber: binary.BigEndian.Uint32(data[i+12 : i+16]),
+ LSChecksum: binary.BigEndian.Uint16(data[i+16 : i+18]),
+ Length: binary.BigEndian.Uint16(data[i+18 : i+20]),
+ }
+ lsas = append(lsas, lsa)
+ }
+ ospf.Content = DbDescPkg{
+ Options: binary.BigEndian.Uint32(data[16:20]) & 0x00FFFFFF,
+ InterfaceMTU: binary.BigEndian.Uint16(data[20:22]),
+ Flags: binary.BigEndian.Uint16(data[22:24]),
+ DDSeqNumber: binary.BigEndian.Uint32(data[24:28]),
+ LSAinfo: lsas,
+ }
+ case OSPFLinkStateRequest:
+ var lsrs []LSReq
+ for i := 16; uint16(i+12) <= ospf.PacketLength; i += 12 {
+ lsr := LSReq{
+ LSType: binary.BigEndian.Uint16(data[i+2 : i+4]),
+ LSID: binary.BigEndian.Uint32(data[i+4 : i+8]),
+ AdvRouter: binary.BigEndian.Uint32(data[i+8 : i+12]),
+ }
+ lsrs = append(lsrs, lsr)
+ }
+ ospf.Content = lsrs
+ case OSPFLinkStateUpdate:
+ num := binary.BigEndian.Uint32(data[16:20])
+ lsas, err := getLSAs(num, data[20:])
+ if err != nil {
+ return fmt.Errorf("Cannot parse Link State Update packet: %v", err)
+ }
+ ospf.Content = LSUpdate{
+ NumOfLSAs: num,
+ LSAs: lsas,
+ }
+
+ case OSPFLinkStateAcknowledgment:
+ var lsas []LSAheader
+ for i := 16; uint16(i+20) <= ospf.PacketLength; i += 20 {
+ lsa := LSAheader{
+ LSAge: binary.BigEndian.Uint16(data[i : i+2]),
+ LSType: binary.BigEndian.Uint16(data[i+2 : i+4]),
+ LinkStateID: binary.BigEndian.Uint32(data[i+4 : i+8]),
+ AdvRouter: binary.BigEndian.Uint32(data[i+8 : i+12]),
+ LSSeqNumber: binary.BigEndian.Uint32(data[i+12 : i+16]),
+ LSChecksum: binary.BigEndian.Uint16(data[i+16 : i+18]),
+ Length: binary.BigEndian.Uint16(data[i+18 : i+20]),
+ }
+ lsas = append(lsas, lsa)
+ }
+ ospf.Content = lsas
+ default:
+ }
+
+ return nil
+}
+
+// LayerType returns LayerTypeOSPF
+func (ospf *OSPFv2) LayerType() gopacket.LayerType {
+ return LayerTypeOSPF
+}
+func (ospf *OSPFv3) LayerType() gopacket.LayerType {
+ return LayerTypeOSPF
+}
+
+// NextLayerType returns the layer type contained by this DecodingLayer.
+func (ospf *OSPFv2) NextLayerType() gopacket.LayerType {
+ return gopacket.LayerTypeZero
+}
+func (ospf *OSPFv3) NextLayerType() gopacket.LayerType {
+ return gopacket.LayerTypeZero
+}
+
+// CanDecode returns the set of layer types that this DecodingLayer can decode.
+func (ospf *OSPFv2) CanDecode() gopacket.LayerClass {
+ return LayerTypeOSPF
+}
+func (ospf *OSPFv3) CanDecode() gopacket.LayerClass {
+ return LayerTypeOSPF
+}
+
+func decodeOSPF(data []byte, p gopacket.PacketBuilder) error {
+ if len(data) < 14 {
+ return fmt.Errorf("Packet too smal for OSPF")
+ }
+
+ switch uint8(data[0]) {
+ case 2:
+ ospf := &OSPFv2{}
+ return decodingLayerDecoder(ospf, data, p)
+ case 3:
+ ospf := &OSPFv3{}
+ return decodingLayerDecoder(ospf, data, p)
+ default:
+ }
+
+ return fmt.Errorf("Unable to determine OSPF type.")
+}
diff --git a/vendor/github.com/google/gopacket/layers/pflog.go b/vendor/github.com/google/gopacket/layers/pflog.go
new file mode 100644
index 0000000..853882f
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/pflog.go
@@ -0,0 +1,76 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+ "encoding/binary"
+ "errors"
+
+ "github.com/google/gopacket"
+)
+
+type PFDirection uint8
+
+const (
+ PFDirectionInOut PFDirection = 0
+ PFDirectionIn PFDirection = 1
+ PFDirectionOut PFDirection = 2
+)
+
+// PFLog provides the layer for 'pf' packet-filter logging, as described at
+// http://www.freebsd.org/cgi/man.cgi?query=pflog&sektion=4
+type PFLog struct {
+ BaseLayer
+ Length uint8
+ Family ProtocolFamily
+ Action, Reason uint8
+ IFName, Ruleset []byte
+ RuleNum, SubruleNum uint32
+ UID uint32
+ PID int32
+ RuleUID uint32
+ RulePID int32
+ Direction PFDirection
+ // The remainder is padding
+}
+
+func (pf *PFLog) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ pf.Length = data[0]
+ pf.Family = ProtocolFamily(data[1])
+ pf.Action = data[2]
+ pf.Reason = data[3]
+ pf.IFName = data[4:20]
+ pf.Ruleset = data[20:36]
+ pf.RuleNum = binary.BigEndian.Uint32(data[36:40])
+ pf.SubruleNum = binary.BigEndian.Uint32(data[40:44])
+ pf.UID = binary.BigEndian.Uint32(data[44:48])
+ pf.PID = int32(binary.BigEndian.Uint32(data[48:52]))
+ pf.RuleUID = binary.BigEndian.Uint32(data[52:56])
+ pf.RulePID = int32(binary.BigEndian.Uint32(data[56:60]))
+ pf.Direction = PFDirection(data[60])
+ if pf.Length%4 != 1 {
+ return errors.New("PFLog header length should be 3 less than multiple of 4")
+ }
+ actualLength := int(pf.Length) + 3
+ pf.Contents = data[:actualLength]
+ pf.Payload = data[actualLength:]
+ return nil
+}
+
+// LayerType returns layers.LayerTypePFLog
+func (pf *PFLog) LayerType() gopacket.LayerType { return LayerTypePFLog }
+
+func (pf *PFLog) CanDecode() gopacket.LayerClass { return LayerTypePFLog }
+
+func (pf *PFLog) NextLayerType() gopacket.LayerType {
+ return pf.Family.LayerType()
+}
+
+func decodePFLog(data []byte, p gopacket.PacketBuilder) error {
+ pf := &PFLog{}
+ return decodingLayerDecoder(pf, data, p)
+}
diff --git a/vendor/github.com/google/gopacket/layers/ports.go b/vendor/github.com/google/gopacket/layers/ports.go
new file mode 100644
index 0000000..7ea5ada
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/ports.go
@@ -0,0 +1,155 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+ "fmt"
+ "strconv"
+
+ "github.com/google/gopacket"
+)
+
+// TCPPort is a port in a TCP layer.
+type TCPPort uint16
+
+// UDPPort is a port in a UDP layer.
+type UDPPort uint16
+
+// RUDPPort is a port in a RUDP layer.
+type RUDPPort uint8
+
+// SCTPPort is a port in a SCTP layer.
+type SCTPPort uint16
+
+// UDPLitePort is a port in a UDPLite layer.
+type UDPLitePort uint16
+
+// RUDPPortNames contains the string names for all RUDP ports.
+var RUDPPortNames = map[RUDPPort]string{}
+
+// UDPLitePortNames contains the string names for all UDPLite ports.
+var UDPLitePortNames = map[UDPLitePort]string{}
+
+// {TCP,UDP,SCTP}PortNames can be found in iana_ports.go
+
+// String returns the port as "number(name)" if there's a well-known port name,
+// or just "number" if there isn't. Well-known names are stored in
+// TCPPortNames.
+func (a TCPPort) String() string {
+ if name, ok := TCPPortNames[a]; ok {
+ return fmt.Sprintf("%d(%s)", a, name)
+ }
+ return strconv.Itoa(int(a))
+}
+
+// LayerType returns a LayerType that would be able to decode the
+// application payload. It uses some well-known ports such as 53 for
+// DNS.
+//
+// Returns gopacket.LayerTypePayload for unknown/unsupported port numbers.
+func (a TCPPort) LayerType() gopacket.LayerType {
+ lt := tcpPortLayerType[uint16(a)]
+ if lt != 0 {
+ return lt
+ }
+ return gopacket.LayerTypePayload
+}
+
+var tcpPortLayerType = [65536]gopacket.LayerType{
+ 53: LayerTypeDNS,
+ 443: LayerTypeTLS, // https
+ 502: LayerTypeModbusTCP, // modbustcp
+ 636: LayerTypeTLS, // ldaps
+ 989: LayerTypeTLS, // ftps-data
+ 990: LayerTypeTLS, // ftps
+ 992: LayerTypeTLS, // telnets
+ 993: LayerTypeTLS, // imaps
+ 994: LayerTypeTLS, // ircs
+ 995: LayerTypeTLS, // pop3s
+ 5061: LayerTypeTLS, // ips
+}
+
+// RegisterTCPPortLayerType creates a new mapping between a TCPPort
+// and an underlaying LayerType.
+func RegisterTCPPortLayerType(port TCPPort, layerType gopacket.LayerType) {
+ tcpPortLayerType[port] = layerType
+}
+
+// String returns the port as "number(name)" if there's a well-known port name,
+// or just "number" if there isn't. Well-known names are stored in
+// UDPPortNames.
+func (a UDPPort) String() string {
+ if name, ok := UDPPortNames[a]; ok {
+ return fmt.Sprintf("%d(%s)", a, name)
+ }
+ return strconv.Itoa(int(a))
+}
+
+// LayerType returns a LayerType that would be able to decode the
+// application payload. It uses some well-known ports such as 53 for
+// DNS.
+//
+// Returns gopacket.LayerTypePayload for unknown/unsupported port numbers.
+func (a UDPPort) LayerType() gopacket.LayerType {
+ lt := udpPortLayerType[uint16(a)]
+ if lt != 0 {
+ return lt
+ }
+ return gopacket.LayerTypePayload
+}
+
+var udpPortLayerType = [65536]gopacket.LayerType{
+ 53: LayerTypeDNS,
+ 123: LayerTypeNTP,
+ 4789: LayerTypeVXLAN,
+ 67: LayerTypeDHCPv4,
+ 68: LayerTypeDHCPv4,
+ 546: LayerTypeDHCPv6,
+ 547: LayerTypeDHCPv6,
+ 5060: LayerTypeSIP,
+ 6343: LayerTypeSFlow,
+ 6081: LayerTypeGeneve,
+ 3784: LayerTypeBFD,
+ 2152: LayerTypeGTPv1U,
+ 623: LayerTypeRMCP,
+}
+
+// RegisterUDPPortLayerType creates a new mapping between a UDPPort
+// and an underlaying LayerType.
+func RegisterUDPPortLayerType(port UDPPort, layerType gopacket.LayerType) {
+ udpPortLayerType[port] = layerType
+}
+
+// String returns the port as "number(name)" if there's a well-known port name,
+// or just "number" if there isn't. Well-known names are stored in
+// RUDPPortNames.
+func (a RUDPPort) String() string {
+ if name, ok := RUDPPortNames[a]; ok {
+ return fmt.Sprintf("%d(%s)", a, name)
+ }
+ return strconv.Itoa(int(a))
+}
+
+// String returns the port as "number(name)" if there's a well-known port name,
+// or just "number" if there isn't. Well-known names are stored in
+// SCTPPortNames.
+func (a SCTPPort) String() string {
+ if name, ok := SCTPPortNames[a]; ok {
+ return fmt.Sprintf("%d(%s)", a, name)
+ }
+ return strconv.Itoa(int(a))
+}
+
+// String returns the port as "number(name)" if there's a well-known port name,
+// or just "number" if there isn't. Well-known names are stored in
+// UDPLitePortNames.
+func (a UDPLitePort) String() string {
+ if name, ok := UDPLitePortNames[a]; ok {
+ return fmt.Sprintf("%d(%s)", a, name)
+ }
+ return strconv.Itoa(int(a))
+}
diff --git a/vendor/github.com/google/gopacket/layers/ppp.go b/vendor/github.com/google/gopacket/layers/ppp.go
new file mode 100644
index 0000000..e534d69
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/ppp.go
@@ -0,0 +1,88 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+ "encoding/binary"
+ "errors"
+ "github.com/google/gopacket"
+)
+
+// PPP is the layer for PPP encapsulation headers.
+type PPP struct {
+ BaseLayer
+ PPPType PPPType
+ HasPPTPHeader bool
+}
+
+// PPPEndpoint is a singleton endpoint for PPP. Since there is no actual
+// addressing for the two ends of a PPP connection, we use a singleton value
+// named 'point' for each endpoint.
+var PPPEndpoint = gopacket.NewEndpoint(EndpointPPP, nil)
+
+// PPPFlow is a singleton flow for PPP. Since there is no actual addressing for
+// the two ends of a PPP connection, we use a singleton value to represent the
+// flow for all PPP connections.
+var PPPFlow = gopacket.NewFlow(EndpointPPP, nil, nil)
+
+// LayerType returns LayerTypePPP
+func (p *PPP) LayerType() gopacket.LayerType { return LayerTypePPP }
+
+// LinkFlow returns PPPFlow.
+func (p *PPP) LinkFlow() gopacket.Flow { return PPPFlow }
+
+func decodePPP(data []byte, p gopacket.PacketBuilder) error {
+ ppp := &PPP{}
+ offset := 0
+ if data[0] == 0xff && data[1] == 0x03 {
+ offset = 2
+ ppp.HasPPTPHeader = true
+ }
+ if data[offset]&0x1 == 0 {
+ if data[offset+1]&0x1 == 0 {
+ return errors.New("PPP has invalid type")
+ }
+ ppp.PPPType = PPPType(binary.BigEndian.Uint16(data[offset : offset+2]))
+ ppp.Contents = data[offset : offset+2]
+ ppp.Payload = data[offset+2:]
+ } else {
+ ppp.PPPType = PPPType(data[offset])
+ ppp.Contents = data[offset : offset+1]
+ ppp.Payload = data[offset+1:]
+ }
+ p.AddLayer(ppp)
+ p.SetLinkLayer(ppp)
+ return p.NextDecoder(ppp.PPPType)
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (p *PPP) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ if p.PPPType&0x100 == 0 {
+ bytes, err := b.PrependBytes(2)
+ if err != nil {
+ return err
+ }
+ binary.BigEndian.PutUint16(bytes, uint16(p.PPPType))
+ } else {
+ bytes, err := b.PrependBytes(1)
+ if err != nil {
+ return err
+ }
+ bytes[0] = uint8(p.PPPType)
+ }
+ if p.HasPPTPHeader {
+ bytes, err := b.PrependBytes(2)
+ if err != nil {
+ return err
+ }
+ bytes[0] = 0xff
+ bytes[1] = 0x03
+ }
+ return nil
+}
diff --git a/vendor/github.com/google/gopacket/layers/pppoe.go b/vendor/github.com/google/gopacket/layers/pppoe.go
new file mode 100644
index 0000000..14cd63a
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/pppoe.go
@@ -0,0 +1,60 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+ "encoding/binary"
+ "github.com/google/gopacket"
+)
+
+// PPPoE is the layer for PPPoE encapsulation headers.
+type PPPoE struct {
+ BaseLayer
+ Version uint8
+ Type uint8
+ Code PPPoECode
+ SessionId uint16
+ Length uint16
+}
+
+// LayerType returns gopacket.LayerTypePPPoE.
+func (p *PPPoE) LayerType() gopacket.LayerType {
+ return LayerTypePPPoE
+}
+
+// decodePPPoE decodes the PPPoE header (see http://tools.ietf.org/html/rfc2516).
+func decodePPPoE(data []byte, p gopacket.PacketBuilder) error {
+ pppoe := &PPPoE{
+ Version: data[0] >> 4,
+ Type: data[0] & 0x0F,
+ Code: PPPoECode(data[1]),
+ SessionId: binary.BigEndian.Uint16(data[2:4]),
+ Length: binary.BigEndian.Uint16(data[4:6]),
+ }
+ pppoe.BaseLayer = BaseLayer{data[:6], data[6 : 6+pppoe.Length]}
+ p.AddLayer(pppoe)
+ return p.NextDecoder(pppoe.Code)
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (p *PPPoE) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ payload := b.Bytes()
+ bytes, err := b.PrependBytes(6)
+ if err != nil {
+ return err
+ }
+ bytes[0] = (p.Version << 4) | p.Type
+ bytes[1] = byte(p.Code)
+ binary.BigEndian.PutUint16(bytes[2:], p.SessionId)
+ if opts.FixLengths {
+ p.Length = uint16(len(payload))
+ }
+ binary.BigEndian.PutUint16(bytes[4:], p.Length)
+ return nil
+}
diff --git a/vendor/github.com/google/gopacket/layers/prism.go b/vendor/github.com/google/gopacket/layers/prism.go
new file mode 100644
index 0000000..e1711e7
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/prism.go
@@ -0,0 +1,146 @@
+// Copyright 2015 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+// http://www.tcpdump.org/linktypes/LINKTYPE_IEEE802_11_PRISM.html
+
+package layers
+
+import (
+ "encoding/binary"
+ "errors"
+
+ "github.com/google/gopacket"
+)
+
+func decodePrismValue(data []byte, pv *PrismValue) {
+ pv.DID = PrismDID(binary.LittleEndian.Uint32(data[0:4]))
+ pv.Status = binary.LittleEndian.Uint16(data[4:6])
+ pv.Length = binary.LittleEndian.Uint16(data[6:8])
+ pv.Data = data[8 : 8+pv.Length]
+}
+
+type PrismDID uint32
+
+const (
+ PrismDIDType1HostTime PrismDID = 0x10044
+ PrismDIDType2HostTime PrismDID = 0x01041
+ PrismDIDType1MACTime PrismDID = 0x20044
+ PrismDIDType2MACTime PrismDID = 0x02041
+ PrismDIDType1Channel PrismDID = 0x30044
+ PrismDIDType2Channel PrismDID = 0x03041
+ PrismDIDType1RSSI PrismDID = 0x40044
+ PrismDIDType2RSSI PrismDID = 0x04041
+ PrismDIDType1SignalQuality PrismDID = 0x50044
+ PrismDIDType2SignalQuality PrismDID = 0x05041
+ PrismDIDType1Signal PrismDID = 0x60044
+ PrismDIDType2Signal PrismDID = 0x06041
+ PrismDIDType1Noise PrismDID = 0x70044
+ PrismDIDType2Noise PrismDID = 0x07041
+ PrismDIDType1Rate PrismDID = 0x80044
+ PrismDIDType2Rate PrismDID = 0x08041
+ PrismDIDType1TransmittedFrameIndicator PrismDID = 0x90044
+ PrismDIDType2TransmittedFrameIndicator PrismDID = 0x09041
+ PrismDIDType1FrameLength PrismDID = 0xA0044
+ PrismDIDType2FrameLength PrismDID = 0x0A041
+)
+
+const (
+ PrismType1MessageCode uint16 = 0x00000044
+ PrismType2MessageCode uint16 = 0x00000041
+)
+
+func (p PrismDID) String() string {
+ dids := map[PrismDID]string{
+ PrismDIDType1HostTime: "Host Time",
+ PrismDIDType2HostTime: "Host Time",
+ PrismDIDType1MACTime: "MAC Time",
+ PrismDIDType2MACTime: "MAC Time",
+ PrismDIDType1Channel: "Channel",
+ PrismDIDType2Channel: "Channel",
+ PrismDIDType1RSSI: "RSSI",
+ PrismDIDType2RSSI: "RSSI",
+ PrismDIDType1SignalQuality: "Signal Quality",
+ PrismDIDType2SignalQuality: "Signal Quality",
+ PrismDIDType1Signal: "Signal",
+ PrismDIDType2Signal: "Signal",
+ PrismDIDType1Noise: "Noise",
+ PrismDIDType2Noise: "Noise",
+ PrismDIDType1Rate: "Rate",
+ PrismDIDType2Rate: "Rate",
+ PrismDIDType1TransmittedFrameIndicator: "Transmitted Frame Indicator",
+ PrismDIDType2TransmittedFrameIndicator: "Transmitted Frame Indicator",
+ PrismDIDType1FrameLength: "Frame Length",
+ PrismDIDType2FrameLength: "Frame Length",
+ }
+
+ if str, ok := dids[p]; ok {
+ return str
+ }
+
+ return "Unknown DID"
+}
+
+type PrismValue struct {
+ DID PrismDID
+ Status uint16
+ Length uint16
+ Data []byte
+}
+
+func (pv *PrismValue) IsSupplied() bool {
+ return pv.Status == 1
+}
+
+var ErrPrismExpectedMoreData = errors.New("Expected more data.")
+var ErrPrismInvalidCode = errors.New("Invalid header code.")
+
+func decodePrismHeader(data []byte, p gopacket.PacketBuilder) error {
+ d := &PrismHeader{}
+ return decodingLayerDecoder(d, data, p)
+}
+
+type PrismHeader struct {
+ BaseLayer
+ Code uint16
+ Length uint16
+ DeviceName string
+ Values []PrismValue
+}
+
+func (m *PrismHeader) LayerType() gopacket.LayerType { return LayerTypePrismHeader }
+
+func (m *PrismHeader) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ m.Code = binary.LittleEndian.Uint16(data[0:4])
+ m.Length = binary.LittleEndian.Uint16(data[4:8])
+ m.DeviceName = string(data[8:24])
+ m.BaseLayer = BaseLayer{Contents: data[:m.Length], Payload: data[m.Length:len(data)]}
+
+ switch m.Code {
+ case PrismType1MessageCode:
+ fallthrough
+ case PrismType2MessageCode:
+ // valid message code
+ default:
+ return ErrPrismInvalidCode
+ }
+
+ offset := uint16(24)
+
+ m.Values = make([]PrismValue, (m.Length-offset)/12)
+ for i := 0; i < len(m.Values); i++ {
+ decodePrismValue(data[offset:offset+12], &m.Values[i])
+ offset += 12
+ }
+
+ if offset != m.Length {
+ return ErrPrismExpectedMoreData
+ }
+
+ return nil
+}
+
+func (m *PrismHeader) CanDecode() gopacket.LayerClass { return LayerTypePrismHeader }
+func (m *PrismHeader) NextLayerType() gopacket.LayerType { return LayerTypeDot11 }
diff --git a/vendor/github.com/google/gopacket/layers/radiotap.go b/vendor/github.com/google/gopacket/layers/radiotap.go
new file mode 100644
index 0000000..17c6133
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/radiotap.go
@@ -0,0 +1,1069 @@
+// Copyright 2014 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+ "bytes"
+ "encoding/binary"
+ "fmt"
+ "hash/crc32"
+ "strings"
+
+ "github.com/google/gopacket"
+)
+
+// align calculates the number of bytes needed to align with the width
+// on the offset, returning the number of bytes we need to skip to
+// align to the offset (width).
+func align(offset uint16, width uint16) uint16 {
+ return ((((offset) + ((width) - 1)) & (^((width) - 1))) - offset)
+}
+
+type RadioTapPresent uint32
+
+const (
+ RadioTapPresentTSFT RadioTapPresent = 1 << iota
+ RadioTapPresentFlags
+ RadioTapPresentRate
+ RadioTapPresentChannel
+ RadioTapPresentFHSS
+ RadioTapPresentDBMAntennaSignal
+ RadioTapPresentDBMAntennaNoise
+ RadioTapPresentLockQuality
+ RadioTapPresentTxAttenuation
+ RadioTapPresentDBTxAttenuation
+ RadioTapPresentDBMTxPower
+ RadioTapPresentAntenna
+ RadioTapPresentDBAntennaSignal
+ RadioTapPresentDBAntennaNoise
+ RadioTapPresentRxFlags
+ RadioTapPresentTxFlags
+ RadioTapPresentRtsRetries
+ RadioTapPresentDataRetries
+ _
+ RadioTapPresentMCS
+ RadioTapPresentAMPDUStatus
+ RadioTapPresentVHT
+ RadioTapPresentEXT RadioTapPresent = 1 << 31
+)
+
+func (r RadioTapPresent) TSFT() bool {
+ return r&RadioTapPresentTSFT != 0
+}
+func (r RadioTapPresent) Flags() bool {
+ return r&RadioTapPresentFlags != 0
+}
+func (r RadioTapPresent) Rate() bool {
+ return r&RadioTapPresentRate != 0
+}
+func (r RadioTapPresent) Channel() bool {
+ return r&RadioTapPresentChannel != 0
+}
+func (r RadioTapPresent) FHSS() bool {
+ return r&RadioTapPresentFHSS != 0
+}
+func (r RadioTapPresent) DBMAntennaSignal() bool {
+ return r&RadioTapPresentDBMAntennaSignal != 0
+}
+func (r RadioTapPresent) DBMAntennaNoise() bool {
+ return r&RadioTapPresentDBMAntennaNoise != 0
+}
+func (r RadioTapPresent) LockQuality() bool {
+ return r&RadioTapPresentLockQuality != 0
+}
+func (r RadioTapPresent) TxAttenuation() bool {
+ return r&RadioTapPresentTxAttenuation != 0
+}
+func (r RadioTapPresent) DBTxAttenuation() bool {
+ return r&RadioTapPresentDBTxAttenuation != 0
+}
+func (r RadioTapPresent) DBMTxPower() bool {
+ return r&RadioTapPresentDBMTxPower != 0
+}
+func (r RadioTapPresent) Antenna() bool {
+ return r&RadioTapPresentAntenna != 0
+}
+func (r RadioTapPresent) DBAntennaSignal() bool {
+ return r&RadioTapPresentDBAntennaSignal != 0
+}
+func (r RadioTapPresent) DBAntennaNoise() bool {
+ return r&RadioTapPresentDBAntennaNoise != 0
+}
+func (r RadioTapPresent) RxFlags() bool {
+ return r&RadioTapPresentRxFlags != 0
+}
+func (r RadioTapPresent) TxFlags() bool {
+ return r&RadioTapPresentTxFlags != 0
+}
+func (r RadioTapPresent) RtsRetries() bool {
+ return r&RadioTapPresentRtsRetries != 0
+}
+func (r RadioTapPresent) DataRetries() bool {
+ return r&RadioTapPresentDataRetries != 0
+}
+func (r RadioTapPresent) MCS() bool {
+ return r&RadioTapPresentMCS != 0
+}
+func (r RadioTapPresent) AMPDUStatus() bool {
+ return r&RadioTapPresentAMPDUStatus != 0
+}
+func (r RadioTapPresent) VHT() bool {
+ return r&RadioTapPresentVHT != 0
+}
+func (r RadioTapPresent) EXT() bool {
+ return r&RadioTapPresentEXT != 0
+}
+
+type RadioTapChannelFlags uint16
+
+const (
+ RadioTapChannelFlagsTurbo RadioTapChannelFlags = 0x0010 // Turbo channel
+ RadioTapChannelFlagsCCK RadioTapChannelFlags = 0x0020 // CCK channel
+ RadioTapChannelFlagsOFDM RadioTapChannelFlags = 0x0040 // OFDM channel
+ RadioTapChannelFlagsGhz2 RadioTapChannelFlags = 0x0080 // 2 GHz spectrum channel.
+ RadioTapChannelFlagsGhz5 RadioTapChannelFlags = 0x0100 // 5 GHz spectrum channel
+ RadioTapChannelFlagsPassive RadioTapChannelFlags = 0x0200 // Only passive scan allowed
+ RadioTapChannelFlagsDynamic RadioTapChannelFlags = 0x0400 // Dynamic CCK-OFDM channel
+ RadioTapChannelFlagsGFSK RadioTapChannelFlags = 0x0800 // GFSK channel (FHSS PHY)
+)
+
+func (r RadioTapChannelFlags) Turbo() bool {
+ return r&RadioTapChannelFlagsTurbo != 0
+}
+func (r RadioTapChannelFlags) CCK() bool {
+ return r&RadioTapChannelFlagsCCK != 0
+}
+func (r RadioTapChannelFlags) OFDM() bool {
+ return r&RadioTapChannelFlagsOFDM != 0
+}
+func (r RadioTapChannelFlags) Ghz2() bool {
+ return r&RadioTapChannelFlagsGhz2 != 0
+}
+func (r RadioTapChannelFlags) Ghz5() bool {
+ return r&RadioTapChannelFlagsGhz5 != 0
+}
+func (r RadioTapChannelFlags) Passive() bool {
+ return r&RadioTapChannelFlagsPassive != 0
+}
+func (r RadioTapChannelFlags) Dynamic() bool {
+ return r&RadioTapChannelFlagsDynamic != 0
+}
+func (r RadioTapChannelFlags) GFSK() bool {
+ return r&RadioTapChannelFlagsGFSK != 0
+}
+
+// String provides a human readable string for RadioTapChannelFlags.
+// This string is possibly subject to change over time; if you're storing this
+// persistently, you should probably store the RadioTapChannelFlags value, not its string.
+func (a RadioTapChannelFlags) String() string {
+ var out bytes.Buffer
+ if a.Turbo() {
+ out.WriteString("Turbo,")
+ }
+ if a.CCK() {
+ out.WriteString("CCK,")
+ }
+ if a.OFDM() {
+ out.WriteString("OFDM,")
+ }
+ if a.Ghz2() {
+ out.WriteString("Ghz2,")
+ }
+ if a.Ghz5() {
+ out.WriteString("Ghz5,")
+ }
+ if a.Passive() {
+ out.WriteString("Passive,")
+ }
+ if a.Dynamic() {
+ out.WriteString("Dynamic,")
+ }
+ if a.GFSK() {
+ out.WriteString("GFSK,")
+ }
+
+ if length := out.Len(); length > 0 {
+ return string(out.Bytes()[:length-1]) // strip final comma
+ }
+ return ""
+}
+
+type RadioTapFlags uint8
+
+const (
+ RadioTapFlagsCFP RadioTapFlags = 1 << iota // sent/received during CFP
+ RadioTapFlagsShortPreamble // sent/received * with short * preamble
+ RadioTapFlagsWEP // sent/received * with WEP encryption
+ RadioTapFlagsFrag // sent/received * with fragmentation
+ RadioTapFlagsFCS // frame includes FCS
+ RadioTapFlagsDatapad // frame has padding between * 802.11 header and payload * (to 32-bit boundary)
+ RadioTapFlagsBadFCS // does not pass FCS check
+ RadioTapFlagsShortGI // HT short GI
+)
+
+func (r RadioTapFlags) CFP() bool {
+ return r&RadioTapFlagsCFP != 0
+}
+func (r RadioTapFlags) ShortPreamble() bool {
+ return r&RadioTapFlagsShortPreamble != 0
+}
+func (r RadioTapFlags) WEP() bool {
+ return r&RadioTapFlagsWEP != 0
+}
+func (r RadioTapFlags) Frag() bool {
+ return r&RadioTapFlagsFrag != 0
+}
+func (r RadioTapFlags) FCS() bool {
+ return r&RadioTapFlagsFCS != 0
+}
+func (r RadioTapFlags) Datapad() bool {
+ return r&RadioTapFlagsDatapad != 0
+}
+func (r RadioTapFlags) BadFCS() bool {
+ return r&RadioTapFlagsBadFCS != 0
+}
+func (r RadioTapFlags) ShortGI() bool {
+ return r&RadioTapFlagsShortGI != 0
+}
+
+// String provides a human readable string for RadioTapFlags.
+// This string is possibly subject to change over time; if you're storing this
+// persistently, you should probably store the RadioTapFlags value, not its string.
+func (a RadioTapFlags) String() string {
+ var out bytes.Buffer
+ if a.CFP() {
+ out.WriteString("CFP,")
+ }
+ if a.ShortPreamble() {
+ out.WriteString("SHORT-PREAMBLE,")
+ }
+ if a.WEP() {
+ out.WriteString("WEP,")
+ }
+ if a.Frag() {
+ out.WriteString("FRAG,")
+ }
+ if a.FCS() {
+ out.WriteString("FCS,")
+ }
+ if a.Datapad() {
+ out.WriteString("DATAPAD,")
+ }
+ if a.ShortGI() {
+ out.WriteString("SHORT-GI,")
+ }
+
+ if length := out.Len(); length > 0 {
+ return string(out.Bytes()[:length-1]) // strip final comma
+ }
+ return ""
+}
+
+type RadioTapRate uint8
+
+func (a RadioTapRate) String() string {
+ return fmt.Sprintf("%v Mb/s", 0.5*float32(a))
+}
+
+type RadioTapChannelFrequency uint16
+
+func (a RadioTapChannelFrequency) String() string {
+ return fmt.Sprintf("%d MHz", a)
+}
+
+type RadioTapRxFlags uint16
+
+const (
+ RadioTapRxFlagsBadPlcp RadioTapRxFlags = 0x0002
+)
+
+func (self RadioTapRxFlags) BadPlcp() bool {
+ return self&RadioTapRxFlagsBadPlcp != 0
+}
+
+func (self RadioTapRxFlags) String() string {
+ if self.BadPlcp() {
+ return "BADPLCP"
+ }
+ return ""
+}
+
+type RadioTapTxFlags uint16
+
+const (
+ RadioTapTxFlagsFail RadioTapTxFlags = 1 << iota
+ RadioTapTxFlagsCTS
+ RadioTapTxFlagsRTS
+ RadioTapTxFlagsNoACK
+)
+
+func (self RadioTapTxFlags) Fail() bool { return self&RadioTapTxFlagsFail != 0 }
+func (self RadioTapTxFlags) CTS() bool { return self&RadioTapTxFlagsCTS != 0 }
+func (self RadioTapTxFlags) RTS() bool { return self&RadioTapTxFlagsRTS != 0 }
+func (self RadioTapTxFlags) NoACK() bool { return self&RadioTapTxFlagsNoACK != 0 }
+
+func (self RadioTapTxFlags) String() string {
+ var tokens []string
+ if self.Fail() {
+ tokens = append(tokens, "Fail")
+ }
+ if self.CTS() {
+ tokens = append(tokens, "CTS")
+ }
+ if self.RTS() {
+ tokens = append(tokens, "RTS")
+ }
+ if self.NoACK() {
+ tokens = append(tokens, "NoACK")
+ }
+ return strings.Join(tokens, ",")
+}
+
+type RadioTapMCS struct {
+ Known RadioTapMCSKnown
+ Flags RadioTapMCSFlags
+ MCS uint8
+}
+
+func (self RadioTapMCS) String() string {
+ var tokens []string
+ if self.Known.Bandwidth() {
+ token := "?"
+ switch self.Flags.Bandwidth() {
+ case 0:
+ token = "20"
+ case 1:
+ token = "40"
+ case 2:
+ token = "40(20L)"
+ case 3:
+ token = "40(20U)"
+ }
+ tokens = append(tokens, token)
+ }
+ if self.Known.MCSIndex() {
+ tokens = append(tokens, fmt.Sprintf("MCSIndex#%d", self.MCS))
+ }
+ if self.Known.GuardInterval() {
+ if self.Flags.ShortGI() {
+ tokens = append(tokens, fmt.Sprintf("shortGI"))
+ } else {
+ tokens = append(tokens, fmt.Sprintf("longGI"))
+ }
+ }
+ if self.Known.HTFormat() {
+ if self.Flags.Greenfield() {
+ tokens = append(tokens, fmt.Sprintf("HT-greenfield"))
+ } else {
+ tokens = append(tokens, fmt.Sprintf("HT-mixed"))
+ }
+ }
+ if self.Known.FECType() {
+ if self.Flags.FECLDPC() {
+ tokens = append(tokens, fmt.Sprintf("LDPC"))
+ } else {
+ tokens = append(tokens, fmt.Sprintf("BCC"))
+ }
+ }
+ if self.Known.STBC() {
+ tokens = append(tokens, fmt.Sprintf("STBC#%d", self.Flags.STBC()))
+ }
+ if self.Known.NESS() {
+ num := 0
+ if self.Known.NESS1() {
+ num |= 0x02
+ }
+ if self.Flags.NESS0() {
+ num |= 0x01
+ }
+ tokens = append(tokens, fmt.Sprintf("num-of-ESS#%d", num))
+ }
+ return strings.Join(tokens, ",")
+}
+
+type RadioTapMCSKnown uint8
+
+const (
+ RadioTapMCSKnownBandwidth RadioTapMCSKnown = 1 << iota
+ RadioTapMCSKnownMCSIndex
+ RadioTapMCSKnownGuardInterval
+ RadioTapMCSKnownHTFormat
+ RadioTapMCSKnownFECType
+ RadioTapMCSKnownSTBC
+ RadioTapMCSKnownNESS
+ RadioTapMCSKnownNESS1
+)
+
+func (self RadioTapMCSKnown) Bandwidth() bool { return self&RadioTapMCSKnownBandwidth != 0 }
+func (self RadioTapMCSKnown) MCSIndex() bool { return self&RadioTapMCSKnownMCSIndex != 0 }
+func (self RadioTapMCSKnown) GuardInterval() bool { return self&RadioTapMCSKnownGuardInterval != 0 }
+func (self RadioTapMCSKnown) HTFormat() bool { return self&RadioTapMCSKnownHTFormat != 0 }
+func (self RadioTapMCSKnown) FECType() bool { return self&RadioTapMCSKnownFECType != 0 }
+func (self RadioTapMCSKnown) STBC() bool { return self&RadioTapMCSKnownSTBC != 0 }
+func (self RadioTapMCSKnown) NESS() bool { return self&RadioTapMCSKnownNESS != 0 }
+func (self RadioTapMCSKnown) NESS1() bool { return self&RadioTapMCSKnownNESS1 != 0 }
+
+type RadioTapMCSFlags uint8
+
+const (
+ RadioTapMCSFlagsBandwidthMask RadioTapMCSFlags = 0x03
+ RadioTapMCSFlagsShortGI = 0x04
+ RadioTapMCSFlagsGreenfield = 0x08
+ RadioTapMCSFlagsFECLDPC = 0x10
+ RadioTapMCSFlagsSTBCMask = 0x60
+ RadioTapMCSFlagsNESS0 = 0x80
+)
+
+func (self RadioTapMCSFlags) Bandwidth() int {
+ return int(self & RadioTapMCSFlagsBandwidthMask)
+}
+func (self RadioTapMCSFlags) ShortGI() bool { return self&RadioTapMCSFlagsShortGI != 0 }
+func (self RadioTapMCSFlags) Greenfield() bool { return self&RadioTapMCSFlagsGreenfield != 0 }
+func (self RadioTapMCSFlags) FECLDPC() bool { return self&RadioTapMCSFlagsFECLDPC != 0 }
+func (self RadioTapMCSFlags) STBC() int {
+ return int(self&RadioTapMCSFlagsSTBCMask) >> 5
+}
+func (self RadioTapMCSFlags) NESS0() bool { return self&RadioTapMCSFlagsNESS0 != 0 }
+
+type RadioTapAMPDUStatus struct {
+ Reference uint32
+ Flags RadioTapAMPDUStatusFlags
+ CRC uint8
+}
+
+func (self RadioTapAMPDUStatus) String() string {
+ tokens := []string{
+ fmt.Sprintf("ref#%x", self.Reference),
+ }
+ if self.Flags.ReportZerolen() && self.Flags.IsZerolen() {
+ tokens = append(tokens, fmt.Sprintf("zero-length"))
+ }
+ if self.Flags.LastKnown() && self.Flags.IsLast() {
+ tokens = append(tokens, "last")
+ }
+ if self.Flags.DelimCRCErr() {
+ tokens = append(tokens, "delimiter CRC error")
+ }
+ if self.Flags.DelimCRCKnown() {
+ tokens = append(tokens, fmt.Sprintf("delimiter-CRC=%02x", self.CRC))
+ }
+ return strings.Join(tokens, ",")
+}
+
+type RadioTapAMPDUStatusFlags uint16
+
+const (
+ RadioTapAMPDUStatusFlagsReportZerolen RadioTapAMPDUStatusFlags = 1 << iota
+ RadioTapAMPDUIsZerolen
+ RadioTapAMPDULastKnown
+ RadioTapAMPDUIsLast
+ RadioTapAMPDUDelimCRCErr
+ RadioTapAMPDUDelimCRCKnown
+)
+
+func (self RadioTapAMPDUStatusFlags) ReportZerolen() bool {
+ return self&RadioTapAMPDUStatusFlagsReportZerolen != 0
+}
+func (self RadioTapAMPDUStatusFlags) IsZerolen() bool { return self&RadioTapAMPDUIsZerolen != 0 }
+func (self RadioTapAMPDUStatusFlags) LastKnown() bool { return self&RadioTapAMPDULastKnown != 0 }
+func (self RadioTapAMPDUStatusFlags) IsLast() bool { return self&RadioTapAMPDUIsLast != 0 }
+func (self RadioTapAMPDUStatusFlags) DelimCRCErr() bool { return self&RadioTapAMPDUDelimCRCErr != 0 }
+func (self RadioTapAMPDUStatusFlags) DelimCRCKnown() bool { return self&RadioTapAMPDUDelimCRCKnown != 0 }
+
+type RadioTapVHT struct {
+ Known RadioTapVHTKnown
+ Flags RadioTapVHTFlags
+ Bandwidth uint8
+ MCSNSS [4]RadioTapVHTMCSNSS
+ Coding uint8
+ GroupId uint8
+ PartialAID uint16
+}
+
+func (self RadioTapVHT) String() string {
+ var tokens []string
+ if self.Known.STBC() {
+ if self.Flags.STBC() {
+ tokens = append(tokens, "STBC")
+ } else {
+ tokens = append(tokens, "no STBC")
+ }
+ }
+ if self.Known.TXOPPSNotAllowed() {
+ if self.Flags.TXOPPSNotAllowed() {
+ tokens = append(tokens, "TXOP doze not allowed")
+ } else {
+ tokens = append(tokens, "TXOP doze allowed")
+ }
+ }
+ if self.Known.GI() {
+ if self.Flags.SGI() {
+ tokens = append(tokens, "short GI")
+ } else {
+ tokens = append(tokens, "long GI")
+ }
+ }
+ if self.Known.SGINSYMDisambiguation() {
+ if self.Flags.SGINSYMMod() {
+ tokens = append(tokens, "NSYM mod 10=9")
+ } else {
+ tokens = append(tokens, "NSYM mod 10!=9 or no short GI")
+ }
+ }
+ if self.Known.LDPCExtraOFDMSymbol() {
+ if self.Flags.LDPCExtraOFDMSymbol() {
+ tokens = append(tokens, "LDPC extra OFDM symbols")
+ } else {
+ tokens = append(tokens, "no LDPC extra OFDM symbols")
+ }
+ }
+ if self.Known.Beamformed() {
+ if self.Flags.Beamformed() {
+ tokens = append(tokens, "beamformed")
+ } else {
+ tokens = append(tokens, "no beamformed")
+ }
+ }
+ if self.Known.Bandwidth() {
+ token := "?"
+ switch self.Bandwidth & 0x1f {
+ case 0:
+ token = "20"
+ case 1:
+ token = "40"
+ case 2:
+ token = "40(20L)"
+ case 3:
+ token = "40(20U)"
+ case 4:
+ token = "80"
+ case 5:
+ token = "80(40L)"
+ case 6:
+ token = "80(40U)"
+ case 7:
+ token = "80(20LL)"
+ case 8:
+ token = "80(20LU)"
+ case 9:
+ token = "80(20UL)"
+ case 10:
+ token = "80(20UU)"
+ case 11:
+ token = "160"
+ case 12:
+ token = "160(80L)"
+ case 13:
+ token = "160(80U)"
+ case 14:
+ token = "160(40LL)"
+ case 15:
+ token = "160(40LU)"
+ case 16:
+ token = "160(40UL)"
+ case 17:
+ token = "160(40UU)"
+ case 18:
+ token = "160(20LLL)"
+ case 19:
+ token = "160(20LLU)"
+ case 20:
+ token = "160(20LUL)"
+ case 21:
+ token = "160(20LUU)"
+ case 22:
+ token = "160(20ULL)"
+ case 23:
+ token = "160(20ULU)"
+ case 24:
+ token = "160(20UUL)"
+ case 25:
+ token = "160(20UUU)"
+ }
+ tokens = append(tokens, token)
+ }
+ for i, MCSNSS := range self.MCSNSS {
+ if MCSNSS.Present() {
+ fec := "?"
+ switch self.Coding & (1 << uint8(i)) {
+ case 0:
+ fec = "BCC"
+ case 1:
+ fec = "LDPC"
+ }
+ tokens = append(tokens, fmt.Sprintf("user%d(%s,%s)", i, MCSNSS.String(), fec))
+ }
+ }
+ if self.Known.GroupId() {
+ tokens = append(tokens,
+ fmt.Sprintf("group=%d", self.GroupId))
+ }
+ if self.Known.PartialAID() {
+ tokens = append(tokens,
+ fmt.Sprintf("partial-AID=%d", self.PartialAID))
+ }
+ return strings.Join(tokens, ",")
+}
+
+type RadioTapVHTKnown uint16
+
+const (
+ RadioTapVHTKnownSTBC RadioTapVHTKnown = 1 << iota
+ RadioTapVHTKnownTXOPPSNotAllowed
+ RadioTapVHTKnownGI
+ RadioTapVHTKnownSGINSYMDisambiguation
+ RadioTapVHTKnownLDPCExtraOFDMSymbol
+ RadioTapVHTKnownBeamformed
+ RadioTapVHTKnownBandwidth
+ RadioTapVHTKnownGroupId
+ RadioTapVHTKnownPartialAID
+)
+
+func (self RadioTapVHTKnown) STBC() bool { return self&RadioTapVHTKnownSTBC != 0 }
+func (self RadioTapVHTKnown) TXOPPSNotAllowed() bool {
+ return self&RadioTapVHTKnownTXOPPSNotAllowed != 0
+}
+func (self RadioTapVHTKnown) GI() bool { return self&RadioTapVHTKnownGI != 0 }
+func (self RadioTapVHTKnown) SGINSYMDisambiguation() bool {
+ return self&RadioTapVHTKnownSGINSYMDisambiguation != 0
+}
+func (self RadioTapVHTKnown) LDPCExtraOFDMSymbol() bool {
+ return self&RadioTapVHTKnownLDPCExtraOFDMSymbol != 0
+}
+func (self RadioTapVHTKnown) Beamformed() bool { return self&RadioTapVHTKnownBeamformed != 0 }
+func (self RadioTapVHTKnown) Bandwidth() bool { return self&RadioTapVHTKnownBandwidth != 0 }
+func (self RadioTapVHTKnown) GroupId() bool { return self&RadioTapVHTKnownGroupId != 0 }
+func (self RadioTapVHTKnown) PartialAID() bool { return self&RadioTapVHTKnownPartialAID != 0 }
+
+type RadioTapVHTFlags uint8
+
+const (
+ RadioTapVHTFlagsSTBC RadioTapVHTFlags = 1 << iota
+ RadioTapVHTFlagsTXOPPSNotAllowed
+ RadioTapVHTFlagsSGI
+ RadioTapVHTFlagsSGINSYMMod
+ RadioTapVHTFlagsLDPCExtraOFDMSymbol
+ RadioTapVHTFlagsBeamformed
+)
+
+func (self RadioTapVHTFlags) STBC() bool { return self&RadioTapVHTFlagsSTBC != 0 }
+func (self RadioTapVHTFlags) TXOPPSNotAllowed() bool {
+ return self&RadioTapVHTFlagsTXOPPSNotAllowed != 0
+}
+func (self RadioTapVHTFlags) SGI() bool { return self&RadioTapVHTFlagsSGI != 0 }
+func (self RadioTapVHTFlags) SGINSYMMod() bool { return self&RadioTapVHTFlagsSGINSYMMod != 0 }
+func (self RadioTapVHTFlags) LDPCExtraOFDMSymbol() bool {
+ return self&RadioTapVHTFlagsLDPCExtraOFDMSymbol != 0
+}
+func (self RadioTapVHTFlags) Beamformed() bool { return self&RadioTapVHTFlagsBeamformed != 0 }
+
+type RadioTapVHTMCSNSS uint8
+
+func (self RadioTapVHTMCSNSS) Present() bool {
+ return self&0x0F != 0
+}
+
+func (self RadioTapVHTMCSNSS) String() string {
+ return fmt.Sprintf("NSS#%dMCS#%d", uint32(self&0xf), uint32(self>>4))
+}
+
+func decodeRadioTap(data []byte, p gopacket.PacketBuilder) error {
+ d := &RadioTap{}
+ // TODO: Should we set LinkLayer here? And implement LinkFlow
+ return decodingLayerDecoder(d, data, p)
+}
+
+type RadioTap struct {
+ BaseLayer
+
+ // Version 0. Only increases for drastic changes, introduction of compatible new fields does not count.
+ Version uint8
+ // Length of the whole header in bytes, including it_version, it_pad, it_len, and data fields.
+ Length uint16
+ // Present is a bitmap telling which fields are present. Set bit 31 (0x80000000) to extend the bitmap by another 32 bits. Additional extensions are made by setting bit 31.
+ Present RadioTapPresent
+ // TSFT: value in microseconds of the MAC's 64-bit 802.11 Time Synchronization Function timer when the first bit of the MPDU arrived at the MAC. For received frames, only.
+ TSFT uint64
+ Flags RadioTapFlags
+ // Rate Tx/Rx data rate
+ Rate RadioTapRate
+ // ChannelFrequency Tx/Rx frequency in MHz, followed by flags
+ ChannelFrequency RadioTapChannelFrequency
+ ChannelFlags RadioTapChannelFlags
+ // FHSS For frequency-hopping radios, the hop set (first byte) and pattern (second byte).
+ FHSS uint16
+ // DBMAntennaSignal RF signal power at the antenna, decibel difference from one milliwatt.
+ DBMAntennaSignal int8
+ // DBMAntennaNoise RF noise power at the antenna, decibel difference from one milliwatt.
+ DBMAntennaNoise int8
+ // LockQuality Quality of Barker code lock. Unitless. Monotonically nondecreasing with "better" lock strength. Called "Signal Quality" in datasheets.
+ LockQuality uint16
+ // TxAttenuation Transmit power expressed as unitless distance from max power set at factory calibration. 0 is max power. Monotonically nondecreasing with lower power levels.
+ TxAttenuation uint16
+ // DBTxAttenuation Transmit power expressed as decibel distance from max power set at factory calibration. 0 is max power. Monotonically nondecreasing with lower power levels.
+ DBTxAttenuation uint16
+ // DBMTxPower Transmit power expressed as dBm (decibels from a 1 milliwatt reference). This is the absolute power level measured at the antenna port.
+ DBMTxPower int8
+ // Antenna Unitless indication of the Rx/Tx antenna for this packet. The first antenna is antenna 0.
+ Antenna uint8
+ // DBAntennaSignal RF signal power at the antenna, decibel difference from an arbitrary, fixed reference.
+ DBAntennaSignal uint8
+ // DBAntennaNoise RF noise power at the antenna, decibel difference from an arbitrary, fixed reference point.
+ DBAntennaNoise uint8
+ //
+ RxFlags RadioTapRxFlags
+ TxFlags RadioTapTxFlags
+ RtsRetries uint8
+ DataRetries uint8
+ MCS RadioTapMCS
+ AMPDUStatus RadioTapAMPDUStatus
+ VHT RadioTapVHT
+}
+
+func (m *RadioTap) LayerType() gopacket.LayerType { return LayerTypeRadioTap }
+
+func (m *RadioTap) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ m.Version = uint8(data[0])
+ m.Length = binary.LittleEndian.Uint16(data[2:4])
+ m.Present = RadioTapPresent(binary.LittleEndian.Uint32(data[4:8]))
+
+ offset := uint16(4)
+
+ for (binary.LittleEndian.Uint32(data[offset:offset+4]) & 0x80000000) != 0 {
+ // This parser only handles standard radiotap namespace,
+ // and expects all fields are packed in the first it_present.
+ // Extended bitmap will be just ignored.
+ offset += 4
+ }
+ offset += 4 // skip the bitmap
+
+ if m.Present.TSFT() {
+ offset += align(offset, 8)
+ m.TSFT = binary.LittleEndian.Uint64(data[offset : offset+8])
+ offset += 8
+ }
+ if m.Present.Flags() {
+ m.Flags = RadioTapFlags(data[offset])
+ offset++
+ }
+ if m.Present.Rate() {
+ m.Rate = RadioTapRate(data[offset])
+ offset++
+ }
+ if m.Present.Channel() {
+ offset += align(offset, 2)
+ m.ChannelFrequency = RadioTapChannelFrequency(binary.LittleEndian.Uint16(data[offset : offset+2]))
+ offset += 2
+ m.ChannelFlags = RadioTapChannelFlags(binary.LittleEndian.Uint16(data[offset : offset+2]))
+ offset += 2
+ }
+ if m.Present.FHSS() {
+ m.FHSS = binary.LittleEndian.Uint16(data[offset : offset+2])
+ offset += 2
+ }
+ if m.Present.DBMAntennaSignal() {
+ m.DBMAntennaSignal = int8(data[offset])
+ offset++
+ }
+ if m.Present.DBMAntennaNoise() {
+ m.DBMAntennaNoise = int8(data[offset])
+ offset++
+ }
+ if m.Present.LockQuality() {
+ offset += align(offset, 2)
+ m.LockQuality = binary.LittleEndian.Uint16(data[offset : offset+2])
+ offset += 2
+ }
+ if m.Present.TxAttenuation() {
+ offset += align(offset, 2)
+ m.TxAttenuation = binary.LittleEndian.Uint16(data[offset : offset+2])
+ offset += 2
+ }
+ if m.Present.DBTxAttenuation() {
+ offset += align(offset, 2)
+ m.DBTxAttenuation = binary.LittleEndian.Uint16(data[offset : offset+2])
+ offset += 2
+ }
+ if m.Present.DBMTxPower() {
+ m.DBMTxPower = int8(data[offset])
+ offset++
+ }
+ if m.Present.Antenna() {
+ m.Antenna = uint8(data[offset])
+ offset++
+ }
+ if m.Present.DBAntennaSignal() {
+ m.DBAntennaSignal = uint8(data[offset])
+ offset++
+ }
+ if m.Present.DBAntennaNoise() {
+ m.DBAntennaNoise = uint8(data[offset])
+ offset++
+ }
+ if m.Present.RxFlags() {
+ offset += align(offset, 2)
+ m.RxFlags = RadioTapRxFlags(binary.LittleEndian.Uint16(data[offset:]))
+ offset += 2
+ }
+ if m.Present.TxFlags() {
+ offset += align(offset, 2)
+ m.TxFlags = RadioTapTxFlags(binary.LittleEndian.Uint16(data[offset:]))
+ offset += 2
+ }
+ if m.Present.RtsRetries() {
+ m.RtsRetries = uint8(data[offset])
+ offset++
+ }
+ if m.Present.DataRetries() {
+ m.DataRetries = uint8(data[offset])
+ offset++
+ }
+ if m.Present.MCS() {
+ m.MCS = RadioTapMCS{
+ RadioTapMCSKnown(data[offset]),
+ RadioTapMCSFlags(data[offset+1]),
+ uint8(data[offset+2]),
+ }
+ offset += 3
+ }
+ if m.Present.AMPDUStatus() {
+ offset += align(offset, 4)
+ m.AMPDUStatus = RadioTapAMPDUStatus{
+ Reference: binary.LittleEndian.Uint32(data[offset:]),
+ Flags: RadioTapAMPDUStatusFlags(binary.LittleEndian.Uint16(data[offset+4:])),
+ CRC: uint8(data[offset+6]),
+ }
+ offset += 8
+ }
+ if m.Present.VHT() {
+ offset += align(offset, 2)
+ m.VHT = RadioTapVHT{
+ Known: RadioTapVHTKnown(binary.LittleEndian.Uint16(data[offset:])),
+ Flags: RadioTapVHTFlags(data[offset+2]),
+ Bandwidth: uint8(data[offset+3]),
+ MCSNSS: [4]RadioTapVHTMCSNSS{
+ RadioTapVHTMCSNSS(data[offset+4]),
+ RadioTapVHTMCSNSS(data[offset+5]),
+ RadioTapVHTMCSNSS(data[offset+6]),
+ RadioTapVHTMCSNSS(data[offset+7]),
+ },
+ Coding: uint8(data[offset+8]),
+ GroupId: uint8(data[offset+9]),
+ PartialAID: binary.LittleEndian.Uint16(data[offset+10:]),
+ }
+ offset += 12
+ }
+
+ payload := data[m.Length:]
+
+ // Remove non standard padding used by some Wi-Fi drivers
+ if m.Flags.Datapad() &&
+ payload[0]&0xC == 0x8 { //&& // Data frame
+ headlen := 24
+ if payload[0]&0x8C == 0x88 { // QoS
+ headlen += 2
+ }
+ if payload[1]&0x3 == 0x3 { // 4 addresses
+ headlen += 2
+ }
+ if headlen%4 == 2 {
+ payload = append(payload[:headlen], payload[headlen+2:len(payload)]...)
+ }
+ }
+
+ if !m.Flags.FCS() {
+ // Dot11.DecodeFromBytes() expects FCS present and performs a hard chop on the checksum
+ // If a user is handing in subslices or packets from a buffered stream, the capacity of the slice
+ // may extend beyond the len, rather than expecting callers to enforce cap==len on every packet
+ // we take the hit in this one case and do a reallocation. If the user DOES enforce cap==len
+ // then the reallocation will happen anyway on the append. This is requried because the append
+ // write to the memory directly after the payload if there is sufficient capacity, which callers
+ // may not expect.
+ reallocPayload := make([]byte, len(payload)+4)
+ copy(reallocPayload[0:len(payload)], payload)
+ h := crc32.NewIEEE()
+ h.Write(payload)
+ binary.LittleEndian.PutUint32(reallocPayload[len(payload):], h.Sum32())
+ payload = reallocPayload
+ }
+ m.BaseLayer = BaseLayer{Contents: data[:m.Length], Payload: payload}
+
+ return nil
+}
+
+func (m RadioTap) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ buf := make([]byte, 1024)
+
+ buf[0] = m.Version
+ buf[1] = 0
+
+ binary.LittleEndian.PutUint32(buf[4:8], uint32(m.Present))
+
+ offset := uint16(4)
+
+ for (binary.LittleEndian.Uint32(buf[offset:offset+4]) & 0x80000000) != 0 {
+ offset += 4
+ }
+
+ offset += 4
+
+ if m.Present.TSFT() {
+ offset += align(offset, 8)
+ binary.LittleEndian.PutUint64(buf[offset:offset+8], m.TSFT)
+ offset += 8
+ }
+
+ if m.Present.Flags() {
+ buf[offset] = uint8(m.Flags)
+ offset++
+ }
+
+ if m.Present.Rate() {
+ buf[offset] = uint8(m.Rate)
+ offset++
+ }
+
+ if m.Present.Channel() {
+ offset += align(offset, 2)
+ binary.LittleEndian.PutUint16(buf[offset:offset+2], uint16(m.ChannelFrequency))
+ offset += 2
+ binary.LittleEndian.PutUint16(buf[offset:offset+2], uint16(m.ChannelFlags))
+ offset += 2
+ }
+
+ if m.Present.FHSS() {
+ binary.LittleEndian.PutUint16(buf[offset:offset+2], m.FHSS)
+ offset += 2
+ }
+
+ if m.Present.DBMAntennaSignal() {
+ buf[offset] = byte(m.DBMAntennaSignal)
+ offset++
+ }
+
+ if m.Present.DBMAntennaNoise() {
+ buf[offset] = byte(m.DBMAntennaNoise)
+ offset++
+ }
+
+ if m.Present.LockQuality() {
+ offset += align(offset, 2)
+ binary.LittleEndian.PutUint16(buf[offset:offset+2], m.LockQuality)
+ offset += 2
+ }
+
+ if m.Present.TxAttenuation() {
+ offset += align(offset, 2)
+ binary.LittleEndian.PutUint16(buf[offset:offset+2], m.TxAttenuation)
+ offset += 2
+ }
+
+ if m.Present.DBTxAttenuation() {
+ offset += align(offset, 2)
+ binary.LittleEndian.PutUint16(buf[offset:offset+2], m.DBTxAttenuation)
+ offset += 2
+ }
+
+ if m.Present.DBMTxPower() {
+ buf[offset] = byte(m.DBMTxPower)
+ offset++
+ }
+
+ if m.Present.Antenna() {
+ buf[offset] = uint8(m.Antenna)
+ offset++
+ }
+
+ if m.Present.DBAntennaSignal() {
+ buf[offset] = uint8(m.DBAntennaSignal)
+ offset++
+ }
+
+ if m.Present.DBAntennaNoise() {
+ buf[offset] = uint8(m.DBAntennaNoise)
+ offset++
+ }
+
+ if m.Present.RxFlags() {
+ offset += align(offset, 2)
+ binary.LittleEndian.PutUint16(buf[offset:offset+2], uint16(m.RxFlags))
+ offset += 2
+ }
+
+ if m.Present.TxFlags() {
+ offset += align(offset, 2)
+ binary.LittleEndian.PutUint16(buf[offset:offset+2], uint16(m.TxFlags))
+ offset += 2
+ }
+
+ if m.Present.RtsRetries() {
+ buf[offset] = m.RtsRetries
+ offset++
+ }
+
+ if m.Present.DataRetries() {
+ buf[offset] = m.DataRetries
+ offset++
+ }
+
+ if m.Present.MCS() {
+ buf[offset] = uint8(m.MCS.Known)
+ buf[offset+1] = uint8(m.MCS.Flags)
+ buf[offset+2] = uint8(m.MCS.MCS)
+
+ offset += 3
+ }
+
+ if m.Present.AMPDUStatus() {
+ offset += align(offset, 4)
+
+ binary.LittleEndian.PutUint32(buf[offset:offset+4], m.AMPDUStatus.Reference)
+ binary.LittleEndian.PutUint16(buf[offset+4:offset+6], uint16(m.AMPDUStatus.Flags))
+
+ buf[offset+6] = m.AMPDUStatus.CRC
+
+ offset += 8
+ }
+
+ if m.Present.VHT() {
+ offset += align(offset, 2)
+
+ binary.LittleEndian.PutUint16(buf[offset:], uint16(m.VHT.Known))
+
+ buf[offset+2] = uint8(m.VHT.Flags)
+ buf[offset+3] = uint8(m.VHT.Bandwidth)
+ buf[offset+4] = uint8(m.VHT.MCSNSS[0])
+ buf[offset+5] = uint8(m.VHT.MCSNSS[1])
+ buf[offset+6] = uint8(m.VHT.MCSNSS[2])
+ buf[offset+7] = uint8(m.VHT.MCSNSS[3])
+ buf[offset+8] = uint8(m.VHT.Coding)
+ buf[offset+9] = uint8(m.VHT.GroupId)
+
+ binary.LittleEndian.PutUint16(buf[offset+10:offset+12], m.VHT.PartialAID)
+
+ offset += 12
+ }
+
+ packetBuf, err := b.PrependBytes(int(offset))
+
+ if err != nil {
+ return err
+ }
+
+ if opts.FixLengths {
+ m.Length = offset
+ }
+
+ binary.LittleEndian.PutUint16(buf[2:4], m.Length)
+
+ copy(packetBuf, buf)
+
+ return nil
+}
+
+func (m *RadioTap) CanDecode() gopacket.LayerClass { return LayerTypeRadioTap }
+func (m *RadioTap) NextLayerType() gopacket.LayerType { return LayerTypeDot11 }
diff --git a/vendor/github.com/google/gopacket/layers/rmcp.go b/vendor/github.com/google/gopacket/layers/rmcp.go
new file mode 100644
index 0000000..5474fee
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/rmcp.go
@@ -0,0 +1,170 @@
+// Copyright 2019 The GoPacket Authors. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be found
+// in the LICENSE file in the root of the source tree.
+
+package layers
+
+// This file implements the ASF-RMCP header specified in section 3.2.2.2 of
+// https://www.dmtf.org/sites/default/files/standards/documents/DSP0136.pdf
+
+import (
+ "fmt"
+
+ "github.com/google/gopacket"
+)
+
+// RMCPClass is the class of a RMCP layer's payload, e.g. ASF or IPMI. This is a
+// 4-bit unsigned int on the wire; all but 6 (ASF), 7 (IPMI) and 8 (OEM-defined)
+// are currently reserved.
+type RMCPClass uint8
+
+// LayerType returns the payload layer type corresponding to a RMCP class.
+func (c RMCPClass) LayerType() gopacket.LayerType {
+ if lt := rmcpClassLayerTypes[uint8(c)]; lt != 0 {
+ return lt
+ }
+ return gopacket.LayerTypePayload
+}
+
+func (c RMCPClass) String() string {
+ return fmt.Sprintf("%v(%v)", uint8(c), c.LayerType())
+}
+
+const (
+ // RMCPVersion1 identifies RMCP v1.0 in the Version header field. Lower
+ // values are considered legacy, while higher values are reserved by the
+ // specification.
+ RMCPVersion1 uint8 = 0x06
+
+ // RMCPNormal indicates a "normal" message, i.e. not an acknowledgement.
+ RMCPNormal uint8 = 0
+
+ // RMCPAck indicates a message is acknowledging a received normal message.
+ RMCPAck uint8 = 1 << 7
+
+ // RMCPClassASF identifies an RMCP message as containing an ASF-RMCP
+ // payload.
+ RMCPClassASF RMCPClass = 0x06
+
+ // RMCPClassIPMI identifies an RMCP message as containing an IPMI payload.
+ RMCPClassIPMI RMCPClass = 0x07
+
+ // RMCPClassOEM identifies an RMCP message as containing an OEM-defined
+ // payload.
+ RMCPClassOEM RMCPClass = 0x08
+)
+
+var (
+ rmcpClassLayerTypes = [16]gopacket.LayerType{
+ RMCPClassASF: LayerTypeASF,
+ // RMCPClassIPMI is to implement; RMCPClassOEM is deliberately not
+ // implemented, so we return LayerTypePayload
+ }
+)
+
+// RegisterRMCPLayerType allows specifying that the payload of a RMCP packet of
+// a certain class should processed by the provided layer type. This overrides
+// any existing registrations, including defaults.
+func RegisterRMCPLayerType(c RMCPClass, l gopacket.LayerType) {
+ rmcpClassLayerTypes[c] = l
+}
+
+// RMCP describes the format of an RMCP header, which forms a UDP payload. See
+// section 3.2.2.2.
+type RMCP struct {
+ BaseLayer
+
+ // Version identifies the version of the RMCP header. 0x06 indicates RMCP
+ // v1.0; lower values are legacy, higher values are reserved.
+ Version uint8
+
+ // Sequence is the sequence number assicated with the message. Note that
+ // this rolls over to 0 after 254, not 255. Seq num 255 indicates the
+ // receiver must not send an ACK.
+ Sequence uint8
+
+ // Ack indicates whether this packet is an acknowledgement. If it is, the
+ // payload will be empty.
+ Ack bool
+
+ // Class idicates the structure of the payload. There are only 2^4 valid
+ // values, however there is no uint4 data type. N.B. the Ack bit has been
+ // split off into another field. The most significant 4 bits of this field
+ // will always be 0.
+ Class RMCPClass
+}
+
+// LayerType returns LayerTypeRMCP. It partially satisfies Layer and
+// SerializableLayer.
+func (*RMCP) LayerType() gopacket.LayerType {
+ return LayerTypeRMCP
+}
+
+// CanDecode returns LayerTypeRMCP. It partially satisfies DecodingLayer.
+func (r *RMCP) CanDecode() gopacket.LayerClass {
+ return r.LayerType()
+}
+
+// DecodeFromBytes makes the layer represent the provided bytes. It partially
+// satisfies DecodingLayer.
+func (r *RMCP) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ if len(data) < 4 {
+ df.SetTruncated()
+ return fmt.Errorf("invalid RMCP header, length %v less than 4",
+ len(data))
+ }
+
+ r.BaseLayer.Contents = data[:4]
+ r.BaseLayer.Payload = data[4:]
+
+ r.Version = uint8(data[0])
+ // 1 byte reserved
+ r.Sequence = uint8(data[2])
+ r.Ack = data[3]&RMCPAck != 0
+ r.Class = RMCPClass(data[3] & 0xF)
+ return nil
+}
+
+// NextLayerType returns the data layer of this RMCP layer. This partially
+// satisfies DecodingLayer.
+func (r *RMCP) NextLayerType() gopacket.LayerType {
+ return r.Class.LayerType()
+}
+
+// Payload returns the data layer. It partially satisfies ApplicationLayer.
+func (r *RMCP) Payload() []byte {
+ return r.BaseLayer.Payload
+}
+
+// SerializeTo writes the serialized fom of this layer into the SerializeBuffer,
+// partially satisfying SerializableLayer.
+func (r *RMCP) SerializeTo(b gopacket.SerializeBuffer, _ gopacket.SerializeOptions) error {
+ // The IPMI v1.5 spec contains a pad byte for frame sizes of certain lengths
+ // to work around issues in LAN chips. This is no longer necessary as of
+ // IPMI v2.0 (renamed to "legacy pad") so we do not attempt to add it. The
+ // same approach is taken by FreeIPMI:
+ // http://git.savannah.gnu.org/cgit/freeipmi.git/tree/libfreeipmi/interface/ipmi-lan-interface.c?id=b5ffcd38317daf42074458879f4c55ba6804a595#n836
+ bytes, err := b.PrependBytes(4)
+ if err != nil {
+ return err
+ }
+ bytes[0] = r.Version
+ bytes[1] = 0x00
+ bytes[2] = r.Sequence
+ bytes[3] = bool2uint8(r.Ack)<<7 | uint8(r.Class) // thanks, BFD layer
+ return nil
+}
+
+// decodeRMCP decodes the byte slice into an RMCP type, and sets the application
+// layer to it.
+func decodeRMCP(data []byte, p gopacket.PacketBuilder) error {
+ rmcp := &RMCP{}
+ err := rmcp.DecodeFromBytes(data, p)
+ p.AddLayer(rmcp)
+ p.SetApplicationLayer(rmcp)
+ if err != nil {
+ return err
+ }
+ return p.NextDecoder(rmcp.NextLayerType())
+}
diff --git a/vendor/github.com/google/gopacket/layers/rudp.go b/vendor/github.com/google/gopacket/layers/rudp.go
new file mode 100644
index 0000000..8435129
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/rudp.go
@@ -0,0 +1,93 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+ "encoding/binary"
+ "fmt"
+ "github.com/google/gopacket"
+)
+
+type RUDP struct {
+ BaseLayer
+ SYN, ACK, EACK, RST, NUL bool
+ Version uint8
+ HeaderLength uint8
+ SrcPort, DstPort RUDPPort
+ DataLength uint16
+ Seq, Ack, Checksum uint32
+ VariableHeaderArea []byte
+ // RUDPHeaderSyn contains SYN information for the RUDP packet,
+ // if the SYN flag is set
+ *RUDPHeaderSYN
+ // RUDPHeaderEack contains EACK information for the RUDP packet,
+ // if the EACK flag is set.
+ *RUDPHeaderEACK
+}
+
+type RUDPHeaderSYN struct {
+ MaxOutstandingSegments, MaxSegmentSize, OptionFlags uint16
+}
+
+type RUDPHeaderEACK struct {
+ SeqsReceivedOK []uint32
+}
+
+// LayerType returns gopacket.LayerTypeRUDP.
+func (r *RUDP) LayerType() gopacket.LayerType { return LayerTypeRUDP }
+
+func decodeRUDP(data []byte, p gopacket.PacketBuilder) error {
+ r := &RUDP{
+ SYN: data[0]&0x80 != 0,
+ ACK: data[0]&0x40 != 0,
+ EACK: data[0]&0x20 != 0,
+ RST: data[0]&0x10 != 0,
+ NUL: data[0]&0x08 != 0,
+ Version: data[0] & 0x3,
+ HeaderLength: data[1],
+ SrcPort: RUDPPort(data[2]),
+ DstPort: RUDPPort(data[3]),
+ DataLength: binary.BigEndian.Uint16(data[4:6]),
+ Seq: binary.BigEndian.Uint32(data[6:10]),
+ Ack: binary.BigEndian.Uint32(data[10:14]),
+ Checksum: binary.BigEndian.Uint32(data[14:18]),
+ }
+ if r.HeaderLength < 9 {
+ return fmt.Errorf("RUDP packet with too-short header length %d", r.HeaderLength)
+ }
+ hlen := int(r.HeaderLength) * 2
+ r.Contents = data[:hlen]
+ r.Payload = data[hlen : hlen+int(r.DataLength)]
+ r.VariableHeaderArea = data[18:hlen]
+ headerData := r.VariableHeaderArea
+ switch {
+ case r.SYN:
+ if len(headerData) != 6 {
+ return fmt.Errorf("RUDP packet invalid SYN header length: %d", len(headerData))
+ }
+ r.RUDPHeaderSYN = &RUDPHeaderSYN{
+ MaxOutstandingSegments: binary.BigEndian.Uint16(headerData[:2]),
+ MaxSegmentSize: binary.BigEndian.Uint16(headerData[2:4]),
+ OptionFlags: binary.BigEndian.Uint16(headerData[4:6]),
+ }
+ case r.EACK:
+ if len(headerData)%4 != 0 {
+ return fmt.Errorf("RUDP packet invalid EACK header length: %d", len(headerData))
+ }
+ r.RUDPHeaderEACK = &RUDPHeaderEACK{make([]uint32, len(headerData)/4)}
+ for i := 0; i < len(headerData); i += 4 {
+ r.SeqsReceivedOK[i/4] = binary.BigEndian.Uint32(headerData[i : i+4])
+ }
+ }
+ p.AddLayer(r)
+ p.SetTransportLayer(r)
+ return p.NextDecoder(gopacket.LayerTypePayload)
+}
+
+func (r *RUDP) TransportFlow() gopacket.Flow {
+ return gopacket.NewFlow(EndpointRUDPPort, []byte{byte(r.SrcPort)}, []byte{byte(r.DstPort)})
+}
diff --git a/vendor/github.com/google/gopacket/layers/sctp.go b/vendor/github.com/google/gopacket/layers/sctp.go
new file mode 100644
index 0000000..511176e
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/sctp.go
@@ -0,0 +1,746 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "hash/crc32"
+
+ "github.com/google/gopacket"
+)
+
+// SCTP contains information on the top level of an SCTP packet.
+type SCTP struct {
+ BaseLayer
+ SrcPort, DstPort SCTPPort
+ VerificationTag uint32
+ Checksum uint32
+ sPort, dPort []byte
+}
+
+// LayerType returns gopacket.LayerTypeSCTP
+func (s *SCTP) LayerType() gopacket.LayerType { return LayerTypeSCTP }
+
+func decodeSCTP(data []byte, p gopacket.PacketBuilder) error {
+ sctp := &SCTP{}
+ err := sctp.DecodeFromBytes(data, p)
+ p.AddLayer(sctp)
+ p.SetTransportLayer(sctp)
+ if err != nil {
+ return err
+ }
+ return p.NextDecoder(sctpChunkTypePrefixDecoder)
+}
+
+var sctpChunkTypePrefixDecoder = gopacket.DecodeFunc(decodeWithSCTPChunkTypePrefix)
+
+// TransportFlow returns a flow based on the source and destination SCTP port.
+func (s *SCTP) TransportFlow() gopacket.Flow {
+ return gopacket.NewFlow(EndpointSCTPPort, s.sPort, s.dPort)
+}
+
+func decodeWithSCTPChunkTypePrefix(data []byte, p gopacket.PacketBuilder) error {
+ chunkType := SCTPChunkType(data[0])
+ return chunkType.Decode(data, p)
+}
+
+// SerializeTo is for gopacket.SerializableLayer.
+func (s SCTP) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ bytes, err := b.PrependBytes(12)
+ if err != nil {
+ return err
+ }
+ binary.BigEndian.PutUint16(bytes[0:2], uint16(s.SrcPort))
+ binary.BigEndian.PutUint16(bytes[2:4], uint16(s.DstPort))
+ binary.BigEndian.PutUint32(bytes[4:8], s.VerificationTag)
+ if opts.ComputeChecksums {
+ // Note: MakeTable(Castagnoli) actually only creates the table once, then
+ // passes back a singleton on every other call, so this shouldn't cause
+ // excessive memory allocation.
+ binary.LittleEndian.PutUint32(bytes[8:12], crc32.Checksum(b.Bytes(), crc32.MakeTable(crc32.Castagnoli)))
+ }
+ return nil
+}
+
+func (sctp *SCTP) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ if len(data) < 12 {
+ return errors.New("Invalid SCTP common header length")
+ }
+ sctp.SrcPort = SCTPPort(binary.BigEndian.Uint16(data[:2]))
+ sctp.sPort = data[:2]
+ sctp.DstPort = SCTPPort(binary.BigEndian.Uint16(data[2:4]))
+ sctp.dPort = data[2:4]
+ sctp.VerificationTag = binary.BigEndian.Uint32(data[4:8])
+ sctp.Checksum = binary.BigEndian.Uint32(data[8:12])
+ sctp.BaseLayer = BaseLayer{data[:12], data[12:]}
+
+ return nil
+}
+
+func (t *SCTP) CanDecode() gopacket.LayerClass {
+ return LayerTypeSCTP
+}
+
+func (t *SCTP) NextLayerType() gopacket.LayerType {
+ return gopacket.LayerTypePayload
+}
+
+// SCTPChunk contains the common fields in all SCTP chunks.
+type SCTPChunk struct {
+ BaseLayer
+ Type SCTPChunkType
+ Flags uint8
+ Length uint16
+ // ActualLength is the total length of an SCTP chunk, including padding.
+ // SCTP chunks start and end on 4-byte boundaries. So if a chunk has a length
+ // of 18, it means that it has data up to and including byte 18, then padding
+ // up to the next 4-byte boundary, 20. In this case, Length would be 18, and
+ // ActualLength would be 20.
+ ActualLength int
+}
+
+func roundUpToNearest4(i int) int {
+ if i%4 == 0 {
+ return i
+ }
+ return i + 4 - (i % 4)
+}
+
+func decodeSCTPChunk(data []byte) (SCTPChunk, error) {
+ length := binary.BigEndian.Uint16(data[2:4])
+ if length < 4 {
+ return SCTPChunk{}, errors.New("invalid SCTP chunk length")
+ }
+ actual := roundUpToNearest4(int(length))
+ ct := SCTPChunkType(data[0])
+
+ // For SCTP Data, use a separate layer for the payload
+ delta := 0
+ if ct == SCTPChunkTypeData {
+ delta = int(actual) - int(length)
+ actual = 16
+ }
+
+ return SCTPChunk{
+ Type: ct,
+ Flags: data[1],
+ Length: length,
+ ActualLength: actual,
+ BaseLayer: BaseLayer{data[:actual], data[actual : len(data)-delta]},
+ }, nil
+}
+
+// SCTPParameter is a TLV parameter inside a SCTPChunk.
+type SCTPParameter struct {
+ Type uint16
+ Length uint16
+ ActualLength int
+ Value []byte
+}
+
+func decodeSCTPParameter(data []byte) SCTPParameter {
+ length := binary.BigEndian.Uint16(data[2:4])
+ return SCTPParameter{
+ Type: binary.BigEndian.Uint16(data[0:2]),
+ Length: length,
+ Value: data[4:length],
+ ActualLength: roundUpToNearest4(int(length)),
+ }
+}
+
+func (p SCTPParameter) Bytes() []byte {
+ length := 4 + len(p.Value)
+ data := make([]byte, roundUpToNearest4(length))
+ binary.BigEndian.PutUint16(data[0:2], p.Type)
+ binary.BigEndian.PutUint16(data[2:4], uint16(length))
+ copy(data[4:], p.Value)
+ return data
+}
+
+// SCTPUnknownChunkType is the layer type returned when we don't recognize the
+// chunk type. Since there's a length in a known location, we can skip over
+// it even if we don't know what it is, and continue parsing the rest of the
+// chunks. This chunk is stored as an ErrorLayer in the packet.
+type SCTPUnknownChunkType struct {
+ SCTPChunk
+ bytes []byte
+}
+
+func decodeSCTPChunkTypeUnknown(data []byte, p gopacket.PacketBuilder) error {
+ chunk, err := decodeSCTPChunk(data)
+ if err != nil {
+ return err
+ }
+ sc := &SCTPUnknownChunkType{SCTPChunk: chunk}
+ sc.bytes = data[:sc.ActualLength]
+ p.AddLayer(sc)
+ p.SetErrorLayer(sc)
+ return p.NextDecoder(gopacket.DecodeFunc(decodeWithSCTPChunkTypePrefix))
+}
+
+// SerializeTo is for gopacket.SerializableLayer.
+func (s SCTPUnknownChunkType) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ bytes, err := b.PrependBytes(s.ActualLength)
+ if err != nil {
+ return err
+ }
+ copy(bytes, s.bytes)
+ return nil
+}
+
+// LayerType returns gopacket.LayerTypeSCTPUnknownChunkType.
+func (s *SCTPUnknownChunkType) LayerType() gopacket.LayerType { return LayerTypeSCTPUnknownChunkType }
+
+// Payload returns all bytes in this header, including the decoded Type, Length,
+// and Flags.
+func (s *SCTPUnknownChunkType) Payload() []byte { return s.bytes }
+
+// Error implements ErrorLayer.
+func (s *SCTPUnknownChunkType) Error() error {
+ return fmt.Errorf("No decode method available for SCTP chunk type %s", s.Type)
+}
+
+// SCTPData is the SCTP Data chunk layer.
+type SCTPData struct {
+ SCTPChunk
+ Unordered, BeginFragment, EndFragment bool
+ TSN uint32
+ StreamId uint16
+ StreamSequence uint16
+ PayloadProtocol SCTPPayloadProtocol
+}
+
+// LayerType returns gopacket.LayerTypeSCTPData.
+func (s *SCTPData) LayerType() gopacket.LayerType { return LayerTypeSCTPData }
+
+// SCTPPayloadProtocol represents a payload protocol
+type SCTPPayloadProtocol uint32
+
+// SCTPPayloadProtocol constonts from http://www.iana.org/assignments/sctp-parameters/sctp-parameters.xhtml
+const (
+ SCTPProtocolReserved SCTPPayloadProtocol = 0
+ SCTPPayloadUIA = 1
+ SCTPPayloadM2UA = 2
+ SCTPPayloadM3UA = 3
+ SCTPPayloadSUA = 4
+ SCTPPayloadM2PA = 5
+ SCTPPayloadV5UA = 6
+ SCTPPayloadH248 = 7
+ SCTPPayloadBICC = 8
+ SCTPPayloadTALI = 9
+ SCTPPayloadDUA = 10
+ SCTPPayloadASAP = 11
+ SCTPPayloadENRP = 12
+ SCTPPayloadH323 = 13
+ SCTPPayloadQIPC = 14
+ SCTPPayloadSIMCO = 15
+ SCTPPayloadDDPSegment = 16
+ SCTPPayloadDDPStream = 17
+ SCTPPayloadS1AP = 18
+)
+
+func (p SCTPPayloadProtocol) String() string {
+ switch p {
+ case SCTPProtocolReserved:
+ return "Reserved"
+ case SCTPPayloadUIA:
+ return "UIA"
+ case SCTPPayloadM2UA:
+ return "M2UA"
+ case SCTPPayloadM3UA:
+ return "M3UA"
+ case SCTPPayloadSUA:
+ return "SUA"
+ case SCTPPayloadM2PA:
+ return "M2PA"
+ case SCTPPayloadV5UA:
+ return "V5UA"
+ case SCTPPayloadH248:
+ return "H.248"
+ case SCTPPayloadBICC:
+ return "BICC"
+ case SCTPPayloadTALI:
+ return "TALI"
+ case SCTPPayloadDUA:
+ return "DUA"
+ case SCTPPayloadASAP:
+ return "ASAP"
+ case SCTPPayloadENRP:
+ return "ENRP"
+ case SCTPPayloadH323:
+ return "H.323"
+ case SCTPPayloadQIPC:
+ return "QIPC"
+ case SCTPPayloadSIMCO:
+ return "SIMCO"
+ case SCTPPayloadDDPSegment:
+ return "DDPSegment"
+ case SCTPPayloadDDPStream:
+ return "DDPStream"
+ case SCTPPayloadS1AP:
+ return "S1AP"
+ }
+ return fmt.Sprintf("Unknown(%d)", p)
+}
+
+func decodeSCTPData(data []byte, p gopacket.PacketBuilder) error {
+ chunk, err := decodeSCTPChunk(data)
+ if err != nil {
+ return err
+ }
+ sc := &SCTPData{
+ SCTPChunk: chunk,
+ Unordered: data[1]&0x4 != 0,
+ BeginFragment: data[1]&0x2 != 0,
+ EndFragment: data[1]&0x1 != 0,
+ TSN: binary.BigEndian.Uint32(data[4:8]),
+ StreamId: binary.BigEndian.Uint16(data[8:10]),
+ StreamSequence: binary.BigEndian.Uint16(data[10:12]),
+ PayloadProtocol: SCTPPayloadProtocol(binary.BigEndian.Uint32(data[12:16])),
+ }
+ // Length is the length in bytes of the data, INCLUDING the 16-byte header.
+ p.AddLayer(sc)
+ return p.NextDecoder(gopacket.LayerTypePayload)
+}
+
+// SerializeTo is for gopacket.SerializableLayer.
+func (sc SCTPData) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ payload := b.Bytes()
+ // Pad the payload to a 32 bit boundary
+ if rem := len(payload) % 4; rem != 0 {
+ b.AppendBytes(4 - rem)
+ }
+ length := 16
+ bytes, err := b.PrependBytes(length)
+ if err != nil {
+ return err
+ }
+ bytes[0] = uint8(sc.Type)
+ flags := uint8(0)
+ if sc.Unordered {
+ flags |= 0x4
+ }
+ if sc.BeginFragment {
+ flags |= 0x2
+ }
+ if sc.EndFragment {
+ flags |= 0x1
+ }
+ bytes[1] = flags
+ binary.BigEndian.PutUint16(bytes[2:4], uint16(length+len(payload)))
+ binary.BigEndian.PutUint32(bytes[4:8], sc.TSN)
+ binary.BigEndian.PutUint16(bytes[8:10], sc.StreamId)
+ binary.BigEndian.PutUint16(bytes[10:12], sc.StreamSequence)
+ binary.BigEndian.PutUint32(bytes[12:16], uint32(sc.PayloadProtocol))
+ return nil
+}
+
+// SCTPInitParameter is a parameter for an SCTP Init or InitAck packet.
+type SCTPInitParameter SCTPParameter
+
+// SCTPInit is used as the return value for both SCTPInit and SCTPInitAck
+// messages.
+type SCTPInit struct {
+ SCTPChunk
+ InitiateTag uint32
+ AdvertisedReceiverWindowCredit uint32
+ OutboundStreams, InboundStreams uint16
+ InitialTSN uint32
+ Parameters []SCTPInitParameter
+}
+
+// LayerType returns either gopacket.LayerTypeSCTPInit or gopacket.LayerTypeSCTPInitAck.
+func (sc *SCTPInit) LayerType() gopacket.LayerType {
+ if sc.Type == SCTPChunkTypeInitAck {
+ return LayerTypeSCTPInitAck
+ }
+ // sc.Type == SCTPChunkTypeInit
+ return LayerTypeSCTPInit
+}
+
+func decodeSCTPInit(data []byte, p gopacket.PacketBuilder) error {
+ chunk, err := decodeSCTPChunk(data)
+ if err != nil {
+ return err
+ }
+ sc := &SCTPInit{
+ SCTPChunk: chunk,
+ InitiateTag: binary.BigEndian.Uint32(data[4:8]),
+ AdvertisedReceiverWindowCredit: binary.BigEndian.Uint32(data[8:12]),
+ OutboundStreams: binary.BigEndian.Uint16(data[12:14]),
+ InboundStreams: binary.BigEndian.Uint16(data[14:16]),
+ InitialTSN: binary.BigEndian.Uint32(data[16:20]),
+ }
+ paramData := data[20:sc.ActualLength]
+ for len(paramData) > 0 {
+ p := SCTPInitParameter(decodeSCTPParameter(paramData))
+ paramData = paramData[p.ActualLength:]
+ sc.Parameters = append(sc.Parameters, p)
+ }
+ p.AddLayer(sc)
+ return p.NextDecoder(gopacket.DecodeFunc(decodeWithSCTPChunkTypePrefix))
+}
+
+// SerializeTo is for gopacket.SerializableLayer.
+func (sc SCTPInit) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ var payload []byte
+ for _, param := range sc.Parameters {
+ payload = append(payload, SCTPParameter(param).Bytes()...)
+ }
+ length := 20 + len(payload)
+ bytes, err := b.PrependBytes(roundUpToNearest4(length))
+ if err != nil {
+ return err
+ }
+ bytes[0] = uint8(sc.Type)
+ bytes[1] = sc.Flags
+ binary.BigEndian.PutUint16(bytes[2:4], uint16(length))
+ binary.BigEndian.PutUint32(bytes[4:8], sc.InitiateTag)
+ binary.BigEndian.PutUint32(bytes[8:12], sc.AdvertisedReceiverWindowCredit)
+ binary.BigEndian.PutUint16(bytes[12:14], sc.OutboundStreams)
+ binary.BigEndian.PutUint16(bytes[14:16], sc.InboundStreams)
+ binary.BigEndian.PutUint32(bytes[16:20], sc.InitialTSN)
+ copy(bytes[20:], payload)
+ return nil
+}
+
+// SCTPSack is the SCTP Selective ACK chunk layer.
+type SCTPSack struct {
+ SCTPChunk
+ CumulativeTSNAck uint32
+ AdvertisedReceiverWindowCredit uint32
+ NumGapACKs, NumDuplicateTSNs uint16
+ GapACKs []uint16
+ DuplicateTSNs []uint32
+}
+
+// LayerType return LayerTypeSCTPSack
+func (sc *SCTPSack) LayerType() gopacket.LayerType {
+ return LayerTypeSCTPSack
+}
+
+func decodeSCTPSack(data []byte, p gopacket.PacketBuilder) error {
+ chunk, err := decodeSCTPChunk(data)
+ if err != nil {
+ return err
+ }
+ sc := &SCTPSack{
+ SCTPChunk: chunk,
+ CumulativeTSNAck: binary.BigEndian.Uint32(data[4:8]),
+ AdvertisedReceiverWindowCredit: binary.BigEndian.Uint32(data[8:12]),
+ NumGapACKs: binary.BigEndian.Uint16(data[12:14]),
+ NumDuplicateTSNs: binary.BigEndian.Uint16(data[14:16]),
+ }
+ // We maximize gapAcks and dupTSNs here so we're not allocating tons
+ // of memory based on a user-controlable field. Our maximums are not exact,
+ // but should give us sane defaults... we'll still hit slice boundaries and
+ // fail if the user-supplied values are too high (in the for loops below), but
+ // the amount of memory we'll have allocated because of that should be small
+ // (< sc.ActualLength)
+ gapAcks := sc.SCTPChunk.ActualLength / 2
+ dupTSNs := (sc.SCTPChunk.ActualLength - gapAcks*2) / 4
+ if gapAcks > int(sc.NumGapACKs) {
+ gapAcks = int(sc.NumGapACKs)
+ }
+ if dupTSNs > int(sc.NumDuplicateTSNs) {
+ dupTSNs = int(sc.NumDuplicateTSNs)
+ }
+ sc.GapACKs = make([]uint16, 0, gapAcks)
+ sc.DuplicateTSNs = make([]uint32, 0, dupTSNs)
+ bytesRemaining := data[16:]
+ for i := 0; i < int(sc.NumGapACKs); i++ {
+ sc.GapACKs = append(sc.GapACKs, binary.BigEndian.Uint16(bytesRemaining[:2]))
+ bytesRemaining = bytesRemaining[2:]
+ }
+ for i := 0; i < int(sc.NumDuplicateTSNs); i++ {
+ sc.DuplicateTSNs = append(sc.DuplicateTSNs, binary.BigEndian.Uint32(bytesRemaining[:4]))
+ bytesRemaining = bytesRemaining[4:]
+ }
+ p.AddLayer(sc)
+ return p.NextDecoder(gopacket.DecodeFunc(decodeWithSCTPChunkTypePrefix))
+}
+
+// SerializeTo is for gopacket.SerializableLayer.
+func (sc SCTPSack) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ length := 16 + 2*len(sc.GapACKs) + 4*len(sc.DuplicateTSNs)
+ bytes, err := b.PrependBytes(roundUpToNearest4(length))
+ if err != nil {
+ return err
+ }
+ bytes[0] = uint8(sc.Type)
+ bytes[1] = sc.Flags
+ binary.BigEndian.PutUint16(bytes[2:4], uint16(length))
+ binary.BigEndian.PutUint32(bytes[4:8], sc.CumulativeTSNAck)
+ binary.BigEndian.PutUint32(bytes[8:12], sc.AdvertisedReceiverWindowCredit)
+ binary.BigEndian.PutUint16(bytes[12:14], uint16(len(sc.GapACKs)))
+ binary.BigEndian.PutUint16(bytes[14:16], uint16(len(sc.DuplicateTSNs)))
+ for i, v := range sc.GapACKs {
+ binary.BigEndian.PutUint16(bytes[16+i*2:], v)
+ }
+ offset := 16 + 2*len(sc.GapACKs)
+ for i, v := range sc.DuplicateTSNs {
+ binary.BigEndian.PutUint32(bytes[offset+i*4:], v)
+ }
+ return nil
+}
+
+// SCTPHeartbeatParameter is the parameter type used by SCTP heartbeat and
+// heartbeat ack layers.
+type SCTPHeartbeatParameter SCTPParameter
+
+// SCTPHeartbeat is the SCTP heartbeat layer, also used for heatbeat ack.
+type SCTPHeartbeat struct {
+ SCTPChunk
+ Parameters []SCTPHeartbeatParameter
+}
+
+// LayerType returns gopacket.LayerTypeSCTPHeartbeat.
+func (sc *SCTPHeartbeat) LayerType() gopacket.LayerType {
+ if sc.Type == SCTPChunkTypeHeartbeatAck {
+ return LayerTypeSCTPHeartbeatAck
+ }
+ // sc.Type == SCTPChunkTypeHeartbeat
+ return LayerTypeSCTPHeartbeat
+}
+
+func decodeSCTPHeartbeat(data []byte, p gopacket.PacketBuilder) error {
+ chunk, err := decodeSCTPChunk(data)
+ if err != nil {
+ return err
+ }
+ sc := &SCTPHeartbeat{
+ SCTPChunk: chunk,
+ }
+ paramData := data[4:sc.Length]
+ for len(paramData) > 0 {
+ p := SCTPHeartbeatParameter(decodeSCTPParameter(paramData))
+ paramData = paramData[p.ActualLength:]
+ sc.Parameters = append(sc.Parameters, p)
+ }
+ p.AddLayer(sc)
+ return p.NextDecoder(gopacket.DecodeFunc(decodeWithSCTPChunkTypePrefix))
+}
+
+// SerializeTo is for gopacket.SerializableLayer.
+func (sc SCTPHeartbeat) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ var payload []byte
+ for _, param := range sc.Parameters {
+ payload = append(payload, SCTPParameter(param).Bytes()...)
+ }
+ length := 4 + len(payload)
+
+ bytes, err := b.PrependBytes(roundUpToNearest4(length))
+ if err != nil {
+ return err
+ }
+ bytes[0] = uint8(sc.Type)
+ bytes[1] = sc.Flags
+ binary.BigEndian.PutUint16(bytes[2:4], uint16(length))
+ copy(bytes[4:], payload)
+ return nil
+}
+
+// SCTPErrorParameter is the parameter type used by SCTP Abort and Error layers.
+type SCTPErrorParameter SCTPParameter
+
+// SCTPError is the SCTP error layer, also used for SCTP aborts.
+type SCTPError struct {
+ SCTPChunk
+ Parameters []SCTPErrorParameter
+}
+
+// LayerType returns LayerTypeSCTPAbort or LayerTypeSCTPError.
+func (sc *SCTPError) LayerType() gopacket.LayerType {
+ if sc.Type == SCTPChunkTypeAbort {
+ return LayerTypeSCTPAbort
+ }
+ // sc.Type == SCTPChunkTypeError
+ return LayerTypeSCTPError
+}
+
+func decodeSCTPError(data []byte, p gopacket.PacketBuilder) error {
+ // remarkably similar to decodeSCTPHeartbeat ;)
+ chunk, err := decodeSCTPChunk(data)
+ if err != nil {
+ return err
+ }
+ sc := &SCTPError{
+ SCTPChunk: chunk,
+ }
+ paramData := data[4:sc.Length]
+ for len(paramData) > 0 {
+ p := SCTPErrorParameter(decodeSCTPParameter(paramData))
+ paramData = paramData[p.ActualLength:]
+ sc.Parameters = append(sc.Parameters, p)
+ }
+ p.AddLayer(sc)
+ return p.NextDecoder(gopacket.DecodeFunc(decodeWithSCTPChunkTypePrefix))
+}
+
+// SerializeTo is for gopacket.SerializableLayer.
+func (sc SCTPError) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ var payload []byte
+ for _, param := range sc.Parameters {
+ payload = append(payload, SCTPParameter(param).Bytes()...)
+ }
+ length := 4 + len(payload)
+
+ bytes, err := b.PrependBytes(roundUpToNearest4(length))
+ if err != nil {
+ return err
+ }
+ bytes[0] = uint8(sc.Type)
+ bytes[1] = sc.Flags
+ binary.BigEndian.PutUint16(bytes[2:4], uint16(length))
+ copy(bytes[4:], payload)
+ return nil
+}
+
+// SCTPShutdown is the SCTP shutdown layer.
+type SCTPShutdown struct {
+ SCTPChunk
+ CumulativeTSNAck uint32
+}
+
+// LayerType returns gopacket.LayerTypeSCTPShutdown.
+func (sc *SCTPShutdown) LayerType() gopacket.LayerType { return LayerTypeSCTPShutdown }
+
+func decodeSCTPShutdown(data []byte, p gopacket.PacketBuilder) error {
+ chunk, err := decodeSCTPChunk(data)
+ if err != nil {
+ return err
+ }
+ sc := &SCTPShutdown{
+ SCTPChunk: chunk,
+ CumulativeTSNAck: binary.BigEndian.Uint32(data[4:8]),
+ }
+ p.AddLayer(sc)
+ return p.NextDecoder(gopacket.DecodeFunc(decodeWithSCTPChunkTypePrefix))
+}
+
+// SerializeTo is for gopacket.SerializableLayer.
+func (sc SCTPShutdown) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ bytes, err := b.PrependBytes(8)
+ if err != nil {
+ return err
+ }
+ bytes[0] = uint8(sc.Type)
+ bytes[1] = sc.Flags
+ binary.BigEndian.PutUint16(bytes[2:4], 8)
+ binary.BigEndian.PutUint32(bytes[4:8], sc.CumulativeTSNAck)
+ return nil
+}
+
+// SCTPShutdownAck is the SCTP shutdown layer.
+type SCTPShutdownAck struct {
+ SCTPChunk
+}
+
+// LayerType returns gopacket.LayerTypeSCTPShutdownAck.
+func (sc *SCTPShutdownAck) LayerType() gopacket.LayerType { return LayerTypeSCTPShutdownAck }
+
+func decodeSCTPShutdownAck(data []byte, p gopacket.PacketBuilder) error {
+ chunk, err := decodeSCTPChunk(data)
+ if err != nil {
+ return err
+ }
+ sc := &SCTPShutdownAck{
+ SCTPChunk: chunk,
+ }
+ p.AddLayer(sc)
+ return p.NextDecoder(gopacket.DecodeFunc(decodeWithSCTPChunkTypePrefix))
+}
+
+// SerializeTo is for gopacket.SerializableLayer.
+func (sc SCTPShutdownAck) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ bytes, err := b.PrependBytes(4)
+ if err != nil {
+ return err
+ }
+ bytes[0] = uint8(sc.Type)
+ bytes[1] = sc.Flags
+ binary.BigEndian.PutUint16(bytes[2:4], 4)
+ return nil
+}
+
+// SCTPCookieEcho is the SCTP Cookie Echo layer.
+type SCTPCookieEcho struct {
+ SCTPChunk
+ Cookie []byte
+}
+
+// LayerType returns gopacket.LayerTypeSCTPCookieEcho.
+func (sc *SCTPCookieEcho) LayerType() gopacket.LayerType { return LayerTypeSCTPCookieEcho }
+
+func decodeSCTPCookieEcho(data []byte, p gopacket.PacketBuilder) error {
+ chunk, err := decodeSCTPChunk(data)
+ if err != nil {
+ return err
+ }
+ sc := &SCTPCookieEcho{
+ SCTPChunk: chunk,
+ }
+ sc.Cookie = data[4:sc.Length]
+ p.AddLayer(sc)
+ return p.NextDecoder(gopacket.DecodeFunc(decodeWithSCTPChunkTypePrefix))
+}
+
+// SerializeTo is for gopacket.SerializableLayer.
+func (sc SCTPCookieEcho) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ length := 4 + len(sc.Cookie)
+ bytes, err := b.PrependBytes(roundUpToNearest4(length))
+ if err != nil {
+ return err
+ }
+ bytes[0] = uint8(sc.Type)
+ bytes[1] = sc.Flags
+ binary.BigEndian.PutUint16(bytes[2:4], uint16(length))
+ copy(bytes[4:], sc.Cookie)
+ return nil
+}
+
+// This struct is used by all empty SCTP chunks (currently CookieAck and
+// ShutdownComplete).
+type SCTPEmptyLayer struct {
+ SCTPChunk
+}
+
+// LayerType returns either gopacket.LayerTypeSCTPShutdownComplete or
+// LayerTypeSCTPCookieAck.
+func (sc *SCTPEmptyLayer) LayerType() gopacket.LayerType {
+ if sc.Type == SCTPChunkTypeShutdownComplete {
+ return LayerTypeSCTPShutdownComplete
+ }
+ // sc.Type == SCTPChunkTypeCookieAck
+ return LayerTypeSCTPCookieAck
+}
+
+func decodeSCTPEmptyLayer(data []byte, p gopacket.PacketBuilder) error {
+ chunk, err := decodeSCTPChunk(data)
+ if err != nil {
+ return err
+ }
+ sc := &SCTPEmptyLayer{
+ SCTPChunk: chunk,
+ }
+ p.AddLayer(sc)
+ return p.NextDecoder(gopacket.DecodeFunc(decodeWithSCTPChunkTypePrefix))
+}
+
+// SerializeTo is for gopacket.SerializableLayer.
+func (sc SCTPEmptyLayer) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ bytes, err := b.PrependBytes(4)
+ if err != nil {
+ return err
+ }
+ bytes[0] = uint8(sc.Type)
+ bytes[1] = sc.Flags
+ binary.BigEndian.PutUint16(bytes[2:4], 4)
+ return nil
+}
diff --git a/vendor/github.com/google/gopacket/layers/sflow.go b/vendor/github.com/google/gopacket/layers/sflow.go
new file mode 100644
index 0000000..c56fe89
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/sflow.go
@@ -0,0 +1,2480 @@
+// Copyright 2014 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+/*
+This layer decodes SFlow version 5 datagrams.
+
+The specification can be found here: http://sflow.org/sflow_version_5.txt
+
+Additional developer information about sflow can be found at:
+http://sflow.org/developers/specifications.php
+
+And SFlow in general:
+http://sflow.org/index.php
+
+Two forms of sample data are defined: compact and expanded. The
+Specification has this to say:
+
+ Compact and expand forms of counter and flow samples are defined.
+ An agent must not mix compact/expanded encodings. If an agent
+ will never use ifIndex numbers >= 2^24 then it must use compact
+ encodings for all interfaces. Otherwise the expanded formats must
+ be used for all interfaces.
+
+This decoder only supports the compact form, because that is the only
+one for which data was avaialble.
+
+The datagram is composed of one or more samples of type flow or counter,
+and each sample is composed of one or more records describing the sample.
+A sample is a single instance of sampled inforamtion, and each record in
+the sample gives additional / supplimentary information about the sample.
+
+The following sample record types are supported:
+
+ Raw Packet Header
+ opaque = flow_data; enterprise = 0; format = 1
+
+ Extended Switch Data
+ opaque = flow_data; enterprise = 0; format = 1001
+
+ Extended Router Data
+ opaque = flow_data; enterprise = 0; format = 1002
+
+ Extended Gateway Data
+ opaque = flow_data; enterprise = 0; format = 1003
+
+ Extended User Data
+ opaque = flow_data; enterprise = 0; format = 1004
+
+ Extended URL Data
+ opaque = flow_data; enterprise = 0; format = 1005
+
+The following types of counter records are supported:
+
+ Generic Interface Counters - see RFC 2233
+ opaque = counter_data; enterprise = 0; format = 1
+
+ Ethernet Interface Counters - see RFC 2358
+ opaque = counter_data; enterprise = 0; format = 2
+
+SFlow is encoded using XDR (RFC4506). There are a few places
+where the standard 4-byte fields are partitioned into two
+bitfields of different lengths. I'm not sure why the designers
+chose to pack together two values like this in some places, and
+in others they use the entire 4-byte value to store a number that
+will never be more than a few bits. In any case, there are a couple
+of types defined to handle the decoding of these bitfields, and
+that's why they're there. */
+
+package layers
+
+import (
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "net"
+
+ "github.com/google/gopacket"
+)
+
+// SFlowRecord holds both flow sample records and counter sample records.
+// A Record is the structure that actually holds the sampled data
+// and / or counters.
+type SFlowRecord interface {
+}
+
+// SFlowDataSource encodes a 2-bit SFlowSourceFormat in its most significant
+// 2 bits, and an SFlowSourceValue in its least significant 30 bits.
+// These types and values define the meaning of the inteface information
+// presented in the sample metadata.
+type SFlowDataSource int32
+
+func (sdc SFlowDataSource) decode() (SFlowSourceFormat, SFlowSourceValue) {
+ leftField := sdc >> 30
+ rightField := uint32(0x3FFFFFFF) & uint32(sdc)
+ return SFlowSourceFormat(leftField), SFlowSourceValue(rightField)
+}
+
+type SFlowDataSourceExpanded struct {
+ SourceIDClass SFlowSourceFormat
+ SourceIDIndex SFlowSourceValue
+}
+
+func (sdce SFlowDataSourceExpanded) decode() (SFlowSourceFormat, SFlowSourceValue) {
+ leftField := sdce.SourceIDClass >> 30
+ rightField := uint32(0x3FFFFFFF) & uint32(sdce.SourceIDIndex)
+ return SFlowSourceFormat(leftField), SFlowSourceValue(rightField)
+}
+
+type SFlowSourceFormat uint32
+
+type SFlowSourceValue uint32
+
+const (
+ SFlowTypeSingleInterface SFlowSourceFormat = 0
+ SFlowTypePacketDiscarded SFlowSourceFormat = 1
+ SFlowTypeMultipleDestinations SFlowSourceFormat = 2
+)
+
+func (sdf SFlowSourceFormat) String() string {
+ switch sdf {
+ case SFlowTypeSingleInterface:
+ return "Single Interface"
+ case SFlowTypePacketDiscarded:
+ return "Packet Discarded"
+ case SFlowTypeMultipleDestinations:
+ return "Multiple Destinations"
+ default:
+ return "UNKNOWN"
+ }
+}
+
+func decodeSFlow(data []byte, p gopacket.PacketBuilder) error {
+ s := &SFlowDatagram{}
+ err := s.DecodeFromBytes(data, p)
+ if err != nil {
+ return err
+ }
+ p.AddLayer(s)
+ p.SetApplicationLayer(s)
+ return nil
+}
+
+// SFlowDatagram is the outermost container which holds some basic information
+// about the reporting agent, and holds at least one sample record
+type SFlowDatagram struct {
+ BaseLayer
+
+ DatagramVersion uint32
+ AgentAddress net.IP
+ SubAgentID uint32
+ SequenceNumber uint32
+ AgentUptime uint32
+ SampleCount uint32
+ FlowSamples []SFlowFlowSample
+ CounterSamples []SFlowCounterSample
+}
+
+// An SFlow datagram's outer container has the following
+// structure:
+
+// 0 15 31
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | int sFlow version (2|4|5) |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | int IP version of the Agent (1=v4|2=v6) |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// / Agent IP address (v4=4byte|v6=16byte) /
+// / /
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | int sub agent id |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | int datagram sequence number |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | int switch uptime in ms |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | int n samples in datagram |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// / n samples /
+// / /
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+
+// SFlowDataFormat encodes the EnterpriseID in the most
+// significant 12 bits, and the SampleType in the least significant
+// 20 bits.
+type SFlowDataFormat uint32
+
+func (sdf SFlowDataFormat) decode() (SFlowEnterpriseID, SFlowSampleType) {
+ leftField := sdf >> 12
+ rightField := uint32(0xFFF) & uint32(sdf)
+ return SFlowEnterpriseID(leftField), SFlowSampleType(rightField)
+}
+
+// SFlowEnterpriseID is used to differentiate between the
+// official SFlow standard, and other, vendor-specific
+// types of flow data. (Similiar to SNMP's enterprise MIB
+// OIDs) Only the office SFlow Enterprise ID is decoded
+// here.
+type SFlowEnterpriseID uint32
+
+const (
+ SFlowStandard SFlowEnterpriseID = 0
+)
+
+func (eid SFlowEnterpriseID) String() string {
+ switch eid {
+ case SFlowStandard:
+ return "Standard SFlow"
+ default:
+ return ""
+ }
+}
+
+func (eid SFlowEnterpriseID) GetType() SFlowEnterpriseID {
+ return SFlowStandard
+}
+
+// SFlowSampleType specifies the type of sample. Only flow samples
+// and counter samples are supported
+type SFlowSampleType uint32
+
+const (
+ SFlowTypeFlowSample SFlowSampleType = 1
+ SFlowTypeCounterSample SFlowSampleType = 2
+ SFlowTypeExpandedFlowSample SFlowSampleType = 3
+ SFlowTypeExpandedCounterSample SFlowSampleType = 4
+)
+
+func (st SFlowSampleType) GetType() SFlowSampleType {
+ switch st {
+ case SFlowTypeFlowSample:
+ return SFlowTypeFlowSample
+ case SFlowTypeCounterSample:
+ return SFlowTypeCounterSample
+ case SFlowTypeExpandedFlowSample:
+ return SFlowTypeExpandedFlowSample
+ case SFlowTypeExpandedCounterSample:
+ return SFlowTypeExpandedCounterSample
+ default:
+ panic("Invalid Sample Type")
+ }
+}
+
+func (st SFlowSampleType) String() string {
+ switch st {
+ case SFlowTypeFlowSample:
+ return "Flow Sample"
+ case SFlowTypeCounterSample:
+ return "Counter Sample"
+ case SFlowTypeExpandedFlowSample:
+ return "Expanded Flow Sample"
+ case SFlowTypeExpandedCounterSample:
+ return "Expanded Counter Sample"
+ default:
+ return ""
+ }
+}
+
+func (s *SFlowDatagram) LayerType() gopacket.LayerType { return LayerTypeSFlow }
+
+func (d *SFlowDatagram) Payload() []byte { return nil }
+
+func (d *SFlowDatagram) CanDecode() gopacket.LayerClass { return LayerTypeSFlow }
+
+func (d *SFlowDatagram) NextLayerType() gopacket.LayerType { return gopacket.LayerTypePayload }
+
+// SFlowIPType determines what form the IP address being decoded will
+// take. This is an XDR union type allowing for both IPv4 and IPv6
+type SFlowIPType uint32
+
+const (
+ SFlowIPv4 SFlowIPType = 1
+ SFlowIPv6 SFlowIPType = 2
+)
+
+func (s SFlowIPType) String() string {
+ switch s {
+ case SFlowIPv4:
+ return "IPv4"
+ case SFlowIPv6:
+ return "IPv6"
+ default:
+ return ""
+ }
+}
+
+func (s SFlowIPType) Length() int {
+ switch s {
+ case SFlowIPv4:
+ return 4
+ case SFlowIPv6:
+ return 16
+ default:
+ return 0
+ }
+}
+
+func (s *SFlowDatagram) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ var agentAddressType SFlowIPType
+
+ data, s.DatagramVersion = data[4:], binary.BigEndian.Uint32(data[:4])
+ data, agentAddressType = data[4:], SFlowIPType(binary.BigEndian.Uint32(data[:4]))
+ data, s.AgentAddress = data[agentAddressType.Length():], data[:agentAddressType.Length()]
+ data, s.SubAgentID = data[4:], binary.BigEndian.Uint32(data[:4])
+ data, s.SequenceNumber = data[4:], binary.BigEndian.Uint32(data[:4])
+ data, s.AgentUptime = data[4:], binary.BigEndian.Uint32(data[:4])
+ data, s.SampleCount = data[4:], binary.BigEndian.Uint32(data[:4])
+
+ if s.SampleCount < 1 {
+ return fmt.Errorf("SFlow Datagram has invalid sample length: %d", s.SampleCount)
+ }
+ for i := uint32(0); i < s.SampleCount; i++ {
+ sdf := SFlowDataFormat(binary.BigEndian.Uint32(data[:4]))
+ _, sampleType := sdf.decode()
+ switch sampleType {
+ case SFlowTypeFlowSample:
+ if flowSample, err := decodeFlowSample(&data, false); err == nil {
+ s.FlowSamples = append(s.FlowSamples, flowSample)
+ } else {
+ return err
+ }
+ case SFlowTypeCounterSample:
+ if counterSample, err := decodeCounterSample(&data, false); err == nil {
+ s.CounterSamples = append(s.CounterSamples, counterSample)
+ } else {
+ return err
+ }
+ case SFlowTypeExpandedFlowSample:
+ if flowSample, err := decodeFlowSample(&data, true); err == nil {
+ s.FlowSamples = append(s.FlowSamples, flowSample)
+ } else {
+ return err
+ }
+ case SFlowTypeExpandedCounterSample:
+ if counterSample, err := decodeCounterSample(&data, true); err == nil {
+ s.CounterSamples = append(s.CounterSamples, counterSample)
+ } else {
+ return err
+ }
+
+ default:
+ return fmt.Errorf("Unsupported SFlow sample type %d", sampleType)
+ }
+ }
+ return nil
+}
+
+// SFlowFlowSample represents a sampled packet and contains
+// one or more records describing the packet
+type SFlowFlowSample struct {
+ EnterpriseID SFlowEnterpriseID
+ Format SFlowSampleType
+ SampleLength uint32
+ SequenceNumber uint32
+ SourceIDClass SFlowSourceFormat
+ SourceIDIndex SFlowSourceValue
+ SamplingRate uint32
+ SamplePool uint32
+ Dropped uint32
+ InputInterfaceFormat uint32
+ InputInterface uint32
+ OutputInterfaceFormat uint32
+ OutputInterface uint32
+ RecordCount uint32
+ Records []SFlowRecord
+}
+
+// Flow samples have the following structure. Note
+// the bit fields to encode the Enterprise ID and the
+// Flow record format: type 1
+
+// 0 15 31
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | 20 bit Interprise (0) |12 bit format |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | sample length |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | int sample sequence number |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// |id type | src id index value |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | int sampling rate |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | int sample pool |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | int drops |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | int input ifIndex |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | int output ifIndex |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | int number of records |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// / flow records /
+// / /
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+
+// Flow samples have the following structure.
+// Flow record format: type 3
+
+// 0 15 31
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | 20 bit Interprise (0) |12 bit format |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | sample length |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | int sample sequence number |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | int src id type |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | int src id index value |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | int sampling rate |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | int sample pool |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | int drops |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | int input interface format |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | int input interface value |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | int output interface format |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | int output interface value |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | int number of records |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// / flow records /
+// / /
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+
+type SFlowFlowDataFormat uint32
+
+func (fdf SFlowFlowDataFormat) decode() (SFlowEnterpriseID, SFlowFlowRecordType) {
+ leftField := fdf >> 12
+ rightField := uint32(0xFFF) & uint32(fdf)
+ return SFlowEnterpriseID(leftField), SFlowFlowRecordType(rightField)
+}
+
+func (fs SFlowFlowSample) GetRecords() []SFlowRecord {
+ return fs.Records
+}
+
+func (fs SFlowFlowSample) GetType() SFlowSampleType {
+ return SFlowTypeFlowSample
+}
+
+func skipRecord(data *[]byte) {
+ recordLength := int(binary.BigEndian.Uint32((*data)[4:]))
+ *data = (*data)[(recordLength+((4-recordLength)%4))+8:]
+}
+
+func decodeFlowSample(data *[]byte, expanded bool) (SFlowFlowSample, error) {
+ s := SFlowFlowSample{}
+ var sdf SFlowDataFormat
+ *data, sdf = (*data)[4:], SFlowDataFormat(binary.BigEndian.Uint32((*data)[:4]))
+ var sdc SFlowDataSource
+
+ s.EnterpriseID, s.Format = sdf.decode()
+ *data, s.SampleLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, s.SequenceNumber = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ if expanded {
+ *data, s.SourceIDClass = (*data)[4:], SFlowSourceFormat(binary.BigEndian.Uint32((*data)[:4]))
+ *data, s.SourceIDIndex = (*data)[4:], SFlowSourceValue(binary.BigEndian.Uint32((*data)[:4]))
+ } else {
+ *data, sdc = (*data)[4:], SFlowDataSource(binary.BigEndian.Uint32((*data)[:4]))
+ s.SourceIDClass, s.SourceIDIndex = sdc.decode()
+ }
+ *data, s.SamplingRate = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, s.SamplePool = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, s.Dropped = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+
+ if expanded {
+ *data, s.InputInterfaceFormat = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, s.InputInterface = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, s.OutputInterfaceFormat = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, s.OutputInterface = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ } else {
+ *data, s.InputInterface = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, s.OutputInterface = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ }
+ *data, s.RecordCount = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+
+ for i := uint32(0); i < s.RecordCount; i++ {
+ rdf := SFlowFlowDataFormat(binary.BigEndian.Uint32((*data)[:4]))
+ enterpriseID, flowRecordType := rdf.decode()
+
+ // Try to decode when EnterpriseID is 0 signaling
+ // default sflow structs are used according specification
+ // Unexpected behavior detected for e.g. with pmacct
+ if enterpriseID == 0 {
+ switch flowRecordType {
+ case SFlowTypeRawPacketFlow:
+ if record, err := decodeRawPacketFlowRecord(data); err == nil {
+ s.Records = append(s.Records, record)
+ } else {
+ return s, err
+ }
+ case SFlowTypeExtendedUserFlow:
+ if record, err := decodeExtendedUserFlow(data); err == nil {
+ s.Records = append(s.Records, record)
+ } else {
+ return s, err
+ }
+ case SFlowTypeExtendedUrlFlow:
+ if record, err := decodeExtendedURLRecord(data); err == nil {
+ s.Records = append(s.Records, record)
+ } else {
+ return s, err
+ }
+ case SFlowTypeExtendedSwitchFlow:
+ if record, err := decodeExtendedSwitchFlowRecord(data); err == nil {
+ s.Records = append(s.Records, record)
+ } else {
+ return s, err
+ }
+ case SFlowTypeExtendedRouterFlow:
+ if record, err := decodeExtendedRouterFlowRecord(data); err == nil {
+ s.Records = append(s.Records, record)
+ } else {
+ return s, err
+ }
+ case SFlowTypeExtendedGatewayFlow:
+ if record, err := decodeExtendedGatewayFlowRecord(data); err == nil {
+ s.Records = append(s.Records, record)
+ } else {
+ return s, err
+ }
+ case SFlowTypeEthernetFrameFlow:
+ if record, err := decodeEthernetFrameFlowRecord(data); err == nil {
+ s.Records = append(s.Records, record)
+ } else {
+ return s, err
+ }
+ case SFlowTypeIpv4Flow:
+ if record, err := decodeSFlowIpv4Record(data); err == nil {
+ s.Records = append(s.Records, record)
+ } else {
+ return s, err
+ }
+ case SFlowTypeIpv6Flow:
+ if record, err := decodeSFlowIpv6Record(data); err == nil {
+ s.Records = append(s.Records, record)
+ } else {
+ return s, err
+ }
+ case SFlowTypeExtendedMlpsFlow:
+ // TODO
+ skipRecord(data)
+ return s, errors.New("skipping TypeExtendedMlpsFlow")
+ case SFlowTypeExtendedNatFlow:
+ // TODO
+ skipRecord(data)
+ return s, errors.New("skipping TypeExtendedNatFlow")
+ case SFlowTypeExtendedMlpsTunnelFlow:
+ // TODO
+ skipRecord(data)
+ return s, errors.New("skipping TypeExtendedMlpsTunnelFlow")
+ case SFlowTypeExtendedMlpsVcFlow:
+ // TODO
+ skipRecord(data)
+ return s, errors.New("skipping TypeExtendedMlpsVcFlow")
+ case SFlowTypeExtendedMlpsFecFlow:
+ // TODO
+ skipRecord(data)
+ return s, errors.New("skipping TypeExtendedMlpsFecFlow")
+ case SFlowTypeExtendedMlpsLvpFecFlow:
+ // TODO
+ skipRecord(data)
+ return s, errors.New("skipping TypeExtendedMlpsLvpFecFlow")
+ case SFlowTypeExtendedVlanFlow:
+ // TODO
+ skipRecord(data)
+ return s, errors.New("skipping TypeExtendedVlanFlow")
+ case SFlowTypeExtendedIpv4TunnelEgressFlow:
+ if record, err := decodeExtendedIpv4TunnelEgress(data); err == nil {
+ s.Records = append(s.Records, record)
+ } else {
+ return s, err
+ }
+ case SFlowTypeExtendedIpv4TunnelIngressFlow:
+ if record, err := decodeExtendedIpv4TunnelIngress(data); err == nil {
+ s.Records = append(s.Records, record)
+ } else {
+ return s, err
+ }
+ case SFlowTypeExtendedIpv6TunnelEgressFlow:
+ if record, err := decodeExtendedIpv6TunnelEgress(data); err == nil {
+ s.Records = append(s.Records, record)
+ } else {
+ return s, err
+ }
+ case SFlowTypeExtendedIpv6TunnelIngressFlow:
+ if record, err := decodeExtendedIpv6TunnelIngress(data); err == nil {
+ s.Records = append(s.Records, record)
+ } else {
+ return s, err
+ }
+ case SFlowTypeExtendedDecapsulateEgressFlow:
+ if record, err := decodeExtendedDecapsulateEgress(data); err == nil {
+ s.Records = append(s.Records, record)
+ } else {
+ return s, err
+ }
+ case SFlowTypeExtendedDecapsulateIngressFlow:
+ if record, err := decodeExtendedDecapsulateIngress(data); err == nil {
+ s.Records = append(s.Records, record)
+ } else {
+ return s, err
+ }
+ case SFlowTypeExtendedVniEgressFlow:
+ if record, err := decodeExtendedVniEgress(data); err == nil {
+ s.Records = append(s.Records, record)
+ } else {
+ return s, err
+ }
+ case SFlowTypeExtendedVniIngressFlow:
+ if record, err := decodeExtendedVniIngress(data); err == nil {
+ s.Records = append(s.Records, record)
+ } else {
+ return s, err
+ }
+ default:
+ return s, fmt.Errorf("Unsupported flow record type: %d", flowRecordType)
+ }
+ } else {
+ skipRecord(data)
+ }
+ }
+ return s, nil
+}
+
+// Counter samples report information about various counter
+// objects. Typically these are items like IfInOctets, or
+// CPU / Memory stats, etc. SFlow will report these at regular
+// intervals as configured on the agent. If one were sufficiently
+// industrious, this could be used to replace the typical
+// SNMP polling used for such things.
+type SFlowCounterSample struct {
+ EnterpriseID SFlowEnterpriseID
+ Format SFlowSampleType
+ SampleLength uint32
+ SequenceNumber uint32
+ SourceIDClass SFlowSourceFormat
+ SourceIDIndex SFlowSourceValue
+ RecordCount uint32
+ Records []SFlowRecord
+}
+
+// Counter samples have the following structure:
+
+// 0 15 31
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | int sample sequence number |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// |id type | src id index value |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | int number of records |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// / counter records /
+// / /
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+
+type SFlowCounterDataFormat uint32
+
+func (cdf SFlowCounterDataFormat) decode() (SFlowEnterpriseID, SFlowCounterRecordType) {
+ leftField := cdf >> 12
+ rightField := uint32(0xFFF) & uint32(cdf)
+ return SFlowEnterpriseID(leftField), SFlowCounterRecordType(rightField)
+}
+
+// GetRecords will return a slice of interface types
+// representing records. A type switch can be used to
+// get at the underlying SFlowCounterRecordType.
+func (cs SFlowCounterSample) GetRecords() []SFlowRecord {
+ return cs.Records
+}
+
+// GetType will report the type of sample. Only the
+// compact form of counter samples is supported
+func (cs SFlowCounterSample) GetType() SFlowSampleType {
+ return SFlowTypeCounterSample
+}
+
+type SFlowCounterRecordType uint32
+
+const (
+ SFlowTypeGenericInterfaceCounters SFlowCounterRecordType = 1
+ SFlowTypeEthernetInterfaceCounters SFlowCounterRecordType = 2
+ SFlowTypeTokenRingInterfaceCounters SFlowCounterRecordType = 3
+ SFlowType100BaseVGInterfaceCounters SFlowCounterRecordType = 4
+ SFlowTypeVLANCounters SFlowCounterRecordType = 5
+ SFlowTypeLACPCounters SFlowCounterRecordType = 7
+ SFlowTypeProcessorCounters SFlowCounterRecordType = 1001
+ SFlowTypeOpenflowPortCounters SFlowCounterRecordType = 1004
+ SFlowTypePORTNAMECounters SFlowCounterRecordType = 1005
+ SFLowTypeAPPRESOURCESCounters SFlowCounterRecordType = 2203
+ SFlowTypeOVSDPCounters SFlowCounterRecordType = 2207
+)
+
+func (cr SFlowCounterRecordType) String() string {
+ switch cr {
+ case SFlowTypeGenericInterfaceCounters:
+ return "Generic Interface Counters"
+ case SFlowTypeEthernetInterfaceCounters:
+ return "Ethernet Interface Counters"
+ case SFlowTypeTokenRingInterfaceCounters:
+ return "Token Ring Interface Counters"
+ case SFlowType100BaseVGInterfaceCounters:
+ return "100BaseVG Interface Counters"
+ case SFlowTypeVLANCounters:
+ return "VLAN Counters"
+ case SFlowTypeLACPCounters:
+ return "LACP Counters"
+ case SFlowTypeProcessorCounters:
+ return "Processor Counters"
+ case SFlowTypeOpenflowPortCounters:
+ return "Openflow Port Counters"
+ case SFlowTypePORTNAMECounters:
+ return "PORT NAME Counters"
+ case SFLowTypeAPPRESOURCESCounters:
+ return "App Resources Counters"
+ case SFlowTypeOVSDPCounters:
+ return "OVSDP Counters"
+ default:
+ return ""
+
+ }
+}
+
+func decodeCounterSample(data *[]byte, expanded bool) (SFlowCounterSample, error) {
+ s := SFlowCounterSample{}
+ var sdc SFlowDataSource
+ var sdce SFlowDataSourceExpanded
+ var sdf SFlowDataFormat
+
+ *data, sdf = (*data)[4:], SFlowDataFormat(binary.BigEndian.Uint32((*data)[:4]))
+ s.EnterpriseID, s.Format = sdf.decode()
+ *data, s.SampleLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, s.SequenceNumber = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ if expanded {
+ *data, sdce = (*data)[8:], SFlowDataSourceExpanded{SFlowSourceFormat(binary.BigEndian.Uint32((*data)[:4])), SFlowSourceValue(binary.BigEndian.Uint32((*data)[4:8]))}
+ s.SourceIDClass, s.SourceIDIndex = sdce.decode()
+ } else {
+ *data, sdc = (*data)[4:], SFlowDataSource(binary.BigEndian.Uint32((*data)[:4]))
+ s.SourceIDClass, s.SourceIDIndex = sdc.decode()
+ }
+ *data, s.RecordCount = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+
+ for i := uint32(0); i < s.RecordCount; i++ {
+ cdf := SFlowCounterDataFormat(binary.BigEndian.Uint32((*data)[:4]))
+ _, counterRecordType := cdf.decode()
+ switch counterRecordType {
+ case SFlowTypeGenericInterfaceCounters:
+ if record, err := decodeGenericInterfaceCounters(data); err == nil {
+ s.Records = append(s.Records, record)
+ } else {
+ return s, err
+ }
+ case SFlowTypeEthernetInterfaceCounters:
+ if record, err := decodeEthernetCounters(data); err == nil {
+ s.Records = append(s.Records, record)
+ } else {
+ return s, err
+ }
+ case SFlowTypeTokenRingInterfaceCounters:
+ skipRecord(data)
+ return s, errors.New("skipping TypeTokenRingInterfaceCounters")
+ case SFlowType100BaseVGInterfaceCounters:
+ skipRecord(data)
+ return s, errors.New("skipping Type100BaseVGInterfaceCounters")
+ case SFlowTypeVLANCounters:
+ if record, err := decodeVLANCounters(data); err == nil {
+ s.Records = append(s.Records, record)
+ } else {
+ return s, err
+ }
+ case SFlowTypeLACPCounters:
+ if record, err := decodeLACPCounters(data); err == nil {
+ s.Records = append(s.Records, record)
+ } else {
+ return s, err
+ }
+ case SFlowTypeProcessorCounters:
+ if record, err := decodeProcessorCounters(data); err == nil {
+ s.Records = append(s.Records, record)
+ } else {
+ return s, err
+ }
+ case SFlowTypeOpenflowPortCounters:
+ if record, err := decodeOpenflowportCounters(data); err == nil {
+ s.Records = append(s.Records, record)
+ } else {
+ return s, err
+ }
+ case SFlowTypePORTNAMECounters:
+ if record, err := decodePortnameCounters(data); err == nil {
+ s.Records = append(s.Records, record)
+ } else {
+ return s, err
+ }
+ case SFLowTypeAPPRESOURCESCounters:
+ if record, err := decodeAppresourcesCounters(data); err == nil {
+ s.Records = append(s.Records, record)
+ } else {
+ return s, err
+ }
+ case SFlowTypeOVSDPCounters:
+ if record, err := decodeOVSDPCounters(data); err == nil {
+ s.Records = append(s.Records, record)
+ } else {
+ return s, err
+ }
+ default:
+ return s, fmt.Errorf("Invalid counter record type: %d", counterRecordType)
+ }
+ }
+ return s, nil
+}
+
+// SFlowBaseFlowRecord holds the fields common to all records
+// of type SFlowFlowRecordType
+type SFlowBaseFlowRecord struct {
+ EnterpriseID SFlowEnterpriseID
+ Format SFlowFlowRecordType
+ FlowDataLength uint32
+}
+
+func (bfr SFlowBaseFlowRecord) GetType() SFlowFlowRecordType {
+ return bfr.Format
+}
+
+// SFlowFlowRecordType denotes what kind of Flow Record is
+// represented. See RFC 3176
+type SFlowFlowRecordType uint32
+
+const (
+ SFlowTypeRawPacketFlow SFlowFlowRecordType = 1
+ SFlowTypeEthernetFrameFlow SFlowFlowRecordType = 2
+ SFlowTypeIpv4Flow SFlowFlowRecordType = 3
+ SFlowTypeIpv6Flow SFlowFlowRecordType = 4
+ SFlowTypeExtendedSwitchFlow SFlowFlowRecordType = 1001
+ SFlowTypeExtendedRouterFlow SFlowFlowRecordType = 1002
+ SFlowTypeExtendedGatewayFlow SFlowFlowRecordType = 1003
+ SFlowTypeExtendedUserFlow SFlowFlowRecordType = 1004
+ SFlowTypeExtendedUrlFlow SFlowFlowRecordType = 1005
+ SFlowTypeExtendedMlpsFlow SFlowFlowRecordType = 1006
+ SFlowTypeExtendedNatFlow SFlowFlowRecordType = 1007
+ SFlowTypeExtendedMlpsTunnelFlow SFlowFlowRecordType = 1008
+ SFlowTypeExtendedMlpsVcFlow SFlowFlowRecordType = 1009
+ SFlowTypeExtendedMlpsFecFlow SFlowFlowRecordType = 1010
+ SFlowTypeExtendedMlpsLvpFecFlow SFlowFlowRecordType = 1011
+ SFlowTypeExtendedVlanFlow SFlowFlowRecordType = 1012
+ SFlowTypeExtendedIpv4TunnelEgressFlow SFlowFlowRecordType = 1023
+ SFlowTypeExtendedIpv4TunnelIngressFlow SFlowFlowRecordType = 1024
+ SFlowTypeExtendedIpv6TunnelEgressFlow SFlowFlowRecordType = 1025
+ SFlowTypeExtendedIpv6TunnelIngressFlow SFlowFlowRecordType = 1026
+ SFlowTypeExtendedDecapsulateEgressFlow SFlowFlowRecordType = 1027
+ SFlowTypeExtendedDecapsulateIngressFlow SFlowFlowRecordType = 1028
+ SFlowTypeExtendedVniEgressFlow SFlowFlowRecordType = 1029
+ SFlowTypeExtendedVniIngressFlow SFlowFlowRecordType = 1030
+)
+
+func (rt SFlowFlowRecordType) String() string {
+ switch rt {
+ case SFlowTypeRawPacketFlow:
+ return "Raw Packet Flow Record"
+ case SFlowTypeEthernetFrameFlow:
+ return "Ethernet Frame Flow Record"
+ case SFlowTypeIpv4Flow:
+ return "IPv4 Flow Record"
+ case SFlowTypeIpv6Flow:
+ return "IPv6 Flow Record"
+ case SFlowTypeExtendedSwitchFlow:
+ return "Extended Switch Flow Record"
+ case SFlowTypeExtendedRouterFlow:
+ return "Extended Router Flow Record"
+ case SFlowTypeExtendedGatewayFlow:
+ return "Extended Gateway Flow Record"
+ case SFlowTypeExtendedUserFlow:
+ return "Extended User Flow Record"
+ case SFlowTypeExtendedUrlFlow:
+ return "Extended URL Flow Record"
+ case SFlowTypeExtendedMlpsFlow:
+ return "Extended MPLS Flow Record"
+ case SFlowTypeExtendedNatFlow:
+ return "Extended NAT Flow Record"
+ case SFlowTypeExtendedMlpsTunnelFlow:
+ return "Extended MPLS Tunnel Flow Record"
+ case SFlowTypeExtendedMlpsVcFlow:
+ return "Extended MPLS VC Flow Record"
+ case SFlowTypeExtendedMlpsFecFlow:
+ return "Extended MPLS FEC Flow Record"
+ case SFlowTypeExtendedMlpsLvpFecFlow:
+ return "Extended MPLS LVP FEC Flow Record"
+ case SFlowTypeExtendedVlanFlow:
+ return "Extended VLAN Flow Record"
+ case SFlowTypeExtendedIpv4TunnelEgressFlow:
+ return "Extended IPv4 Tunnel Egress Record"
+ case SFlowTypeExtendedIpv4TunnelIngressFlow:
+ return "Extended IPv4 Tunnel Ingress Record"
+ case SFlowTypeExtendedIpv6TunnelEgressFlow:
+ return "Extended IPv6 Tunnel Egress Record"
+ case SFlowTypeExtendedIpv6TunnelIngressFlow:
+ return "Extended IPv6 Tunnel Ingress Record"
+ case SFlowTypeExtendedDecapsulateEgressFlow:
+ return "Extended Decapsulate Egress Record"
+ case SFlowTypeExtendedDecapsulateIngressFlow:
+ return "Extended Decapsulate Ingress Record"
+ case SFlowTypeExtendedVniEgressFlow:
+ return "Extended VNI Ingress Record"
+ case SFlowTypeExtendedVniIngressFlow:
+ return "Extended VNI Ingress Record"
+ default:
+ return ""
+ }
+}
+
+// SFlowRawPacketFlowRecords hold information about a sampled
+// packet grabbed as it transited the agent. This is
+// perhaps the most useful and interesting record type,
+// as it holds the headers of the sampled packet and
+// can be used to build up a complete picture of the
+// traffic patterns on a network.
+//
+// The raw packet header is sent back into gopacket for
+// decoding, and the resulting gopackt.Packet is stored
+// in the Header member
+type SFlowRawPacketFlowRecord struct {
+ SFlowBaseFlowRecord
+ HeaderProtocol SFlowRawHeaderProtocol
+ FrameLength uint32
+ PayloadRemoved uint32
+ HeaderLength uint32
+ Header gopacket.Packet
+}
+
+// Raw packet record types have the following structure:
+
+// 0 15 31
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | 20 bit Interprise (0) |12 bit format |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | record length |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | Header Protocol |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | Frame Length |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | Payload Removed |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | Header Length |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// \ Header \
+// \ \
+// \ \
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+
+type SFlowRawHeaderProtocol uint32
+
+const (
+ SFlowProtoEthernet SFlowRawHeaderProtocol = 1
+ SFlowProtoISO88024 SFlowRawHeaderProtocol = 2
+ SFlowProtoISO88025 SFlowRawHeaderProtocol = 3
+ SFlowProtoFDDI SFlowRawHeaderProtocol = 4
+ SFlowProtoFrameRelay SFlowRawHeaderProtocol = 5
+ SFlowProtoX25 SFlowRawHeaderProtocol = 6
+ SFlowProtoPPP SFlowRawHeaderProtocol = 7
+ SFlowProtoSMDS SFlowRawHeaderProtocol = 8
+ SFlowProtoAAL5 SFlowRawHeaderProtocol = 9
+ SFlowProtoAAL5_IP SFlowRawHeaderProtocol = 10 /* e.g. Cisco AAL5 mux */
+ SFlowProtoIPv4 SFlowRawHeaderProtocol = 11
+ SFlowProtoIPv6 SFlowRawHeaderProtocol = 12
+ SFlowProtoMPLS SFlowRawHeaderProtocol = 13
+ SFlowProtoPOS SFlowRawHeaderProtocol = 14 /* RFC 1662, 2615 */
+)
+
+func (sfhp SFlowRawHeaderProtocol) String() string {
+ switch sfhp {
+ case SFlowProtoEthernet:
+ return "ETHERNET-ISO88023"
+ case SFlowProtoISO88024:
+ return "ISO88024-TOKENBUS"
+ case SFlowProtoISO88025:
+ return "ISO88025-TOKENRING"
+ case SFlowProtoFDDI:
+ return "FDDI"
+ case SFlowProtoFrameRelay:
+ return "FRAME-RELAY"
+ case SFlowProtoX25:
+ return "X25"
+ case SFlowProtoPPP:
+ return "PPP"
+ case SFlowProtoSMDS:
+ return "SMDS"
+ case SFlowProtoAAL5:
+ return "AAL5"
+ case SFlowProtoAAL5_IP:
+ return "AAL5-IP"
+ case SFlowProtoIPv4:
+ return "IPv4"
+ case SFlowProtoIPv6:
+ return "IPv6"
+ case SFlowProtoMPLS:
+ return "MPLS"
+ case SFlowProtoPOS:
+ return "POS"
+ }
+ return "UNKNOWN"
+}
+
+func decodeRawPacketFlowRecord(data *[]byte) (SFlowRawPacketFlowRecord, error) {
+ rec := SFlowRawPacketFlowRecord{}
+ header := []byte{}
+ var fdf SFlowFlowDataFormat
+
+ *data, fdf = (*data)[4:], SFlowFlowDataFormat(binary.BigEndian.Uint32((*data)[:4]))
+ rec.EnterpriseID, rec.Format = fdf.decode()
+ *data, rec.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, rec.HeaderProtocol = (*data)[4:], SFlowRawHeaderProtocol(binary.BigEndian.Uint32((*data)[:4]))
+ *data, rec.FrameLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, rec.PayloadRemoved = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, rec.HeaderLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ headerLenWithPadding := int(rec.HeaderLength + ((4 - rec.HeaderLength) % 4))
+ *data, header = (*data)[headerLenWithPadding:], (*data)[:headerLenWithPadding]
+ rec.Header = gopacket.NewPacket(header, LayerTypeEthernet, gopacket.Default)
+ return rec, nil
+}
+
+// SFlowExtendedSwitchFlowRecord give additional information
+// about the sampled packet if it's available. It's mainly
+// useful for getting at the incoming and outgoing VLANs
+// An agent may or may not provide this information.
+type SFlowExtendedSwitchFlowRecord struct {
+ SFlowBaseFlowRecord
+ IncomingVLAN uint32
+ IncomingVLANPriority uint32
+ OutgoingVLAN uint32
+ OutgoingVLANPriority uint32
+}
+
+// Extended switch records have the following structure:
+
+// 0 15 31
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | 20 bit Interprise (0) |12 bit format |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | record length |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | Incoming VLAN |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | Incoming VLAN Priority |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | Outgoing VLAN |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | Outgoing VLAN Priority |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+
+func decodeExtendedSwitchFlowRecord(data *[]byte) (SFlowExtendedSwitchFlowRecord, error) {
+ es := SFlowExtendedSwitchFlowRecord{}
+ var fdf SFlowFlowDataFormat
+
+ *data, fdf = (*data)[4:], SFlowFlowDataFormat(binary.BigEndian.Uint32((*data)[:4]))
+ es.EnterpriseID, es.Format = fdf.decode()
+ *data, es.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, es.IncomingVLAN = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, es.IncomingVLANPriority = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, es.OutgoingVLAN = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, es.OutgoingVLANPriority = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ return es, nil
+}
+
+// SFlowExtendedRouterFlowRecord gives additional information
+// about the layer 3 routing information used to forward
+// the packet
+type SFlowExtendedRouterFlowRecord struct {
+ SFlowBaseFlowRecord
+ NextHop net.IP
+ NextHopSourceMask uint32
+ NextHopDestinationMask uint32
+}
+
+// Extended router records have the following structure:
+
+// 0 15 31
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | 20 bit Interprise (0) |12 bit format |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | record length |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | IP version of next hop router (1=v4|2=v6) |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// / Next Hop address (v4=4byte|v6=16byte) /
+// / /
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | Next Hop Source Mask |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | Next Hop Destination Mask |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+
+func decodeExtendedRouterFlowRecord(data *[]byte) (SFlowExtendedRouterFlowRecord, error) {
+ er := SFlowExtendedRouterFlowRecord{}
+ var fdf SFlowFlowDataFormat
+ var extendedRouterAddressType SFlowIPType
+
+ *data, fdf = (*data)[4:], SFlowFlowDataFormat(binary.BigEndian.Uint32((*data)[:4]))
+ er.EnterpriseID, er.Format = fdf.decode()
+ *data, er.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, extendedRouterAddressType = (*data)[4:], SFlowIPType(binary.BigEndian.Uint32((*data)[:4]))
+ *data, er.NextHop = (*data)[extendedRouterAddressType.Length():], (*data)[:extendedRouterAddressType.Length()]
+ *data, er.NextHopSourceMask = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, er.NextHopDestinationMask = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ return er, nil
+}
+
+// SFlowExtendedGatewayFlowRecord describes information treasured by
+// nework engineers everywhere: AS path information listing which
+// BGP peer sent the packet, and various other BGP related info.
+// This information is vital because it gives a picture of how much
+// traffic is being sent from / received by various BGP peers.
+
+// Extended gateway records have the following structure:
+
+// 0 15 31
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | 20 bit Interprise (0) |12 bit format |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | record length |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | IP version of next hop router (1=v4|2=v6) |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// / Next Hop address (v4=4byte|v6=16byte) /
+// / /
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | AS |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | Source AS |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | Peer AS |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | AS Path Count |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// / AS Path / Sequence /
+// / /
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// / Communities /
+// / /
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | Local Pref |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+
+// AS Path / Sequence:
+
+// 0 15 31
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | AS Source Type (Path=1 / Sequence=2) |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | Path / Sequence length |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// / Path / Sequence Members /
+// / /
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+
+// Communities:
+
+// 0 15 31
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | communitiy length |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// / communitiy Members /
+// / /
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+
+type SFlowExtendedGatewayFlowRecord struct {
+ SFlowBaseFlowRecord
+ NextHop net.IP
+ AS uint32
+ SourceAS uint32
+ PeerAS uint32
+ ASPathCount uint32
+ ASPath []SFlowASDestination
+ Communities []uint32
+ LocalPref uint32
+}
+
+type SFlowASPathType uint32
+
+const (
+ SFlowASSet SFlowASPathType = 1
+ SFlowASSequence SFlowASPathType = 2
+)
+
+func (apt SFlowASPathType) String() string {
+ switch apt {
+ case SFlowASSet:
+ return "AS Set"
+ case SFlowASSequence:
+ return "AS Sequence"
+ default:
+ return ""
+ }
+}
+
+type SFlowASDestination struct {
+ Type SFlowASPathType
+ Count uint32
+ Members []uint32
+}
+
+func (asd SFlowASDestination) String() string {
+ switch asd.Type {
+ case SFlowASSet:
+ return fmt.Sprint("AS Set:", asd.Members)
+ case SFlowASSequence:
+ return fmt.Sprint("AS Sequence:", asd.Members)
+ default:
+ return ""
+ }
+}
+
+func (ad *SFlowASDestination) decodePath(data *[]byte) {
+ *data, ad.Type = (*data)[4:], SFlowASPathType(binary.BigEndian.Uint32((*data)[:4]))
+ *data, ad.Count = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ ad.Members = make([]uint32, ad.Count)
+ for i := uint32(0); i < ad.Count; i++ {
+ var member uint32
+ *data, member = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ ad.Members[i] = member
+ }
+}
+
+func decodeExtendedGatewayFlowRecord(data *[]byte) (SFlowExtendedGatewayFlowRecord, error) {
+ eg := SFlowExtendedGatewayFlowRecord{}
+ var fdf SFlowFlowDataFormat
+ var extendedGatewayAddressType SFlowIPType
+ var communitiesLength uint32
+ var community uint32
+
+ *data, fdf = (*data)[4:], SFlowFlowDataFormat(binary.BigEndian.Uint32((*data)[:4]))
+ eg.EnterpriseID, eg.Format = fdf.decode()
+ *data, eg.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, extendedGatewayAddressType = (*data)[4:], SFlowIPType(binary.BigEndian.Uint32((*data)[:4]))
+ *data, eg.NextHop = (*data)[extendedGatewayAddressType.Length():], (*data)[:extendedGatewayAddressType.Length()]
+ *data, eg.AS = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, eg.SourceAS = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, eg.PeerAS = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, eg.ASPathCount = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ for i := uint32(0); i < eg.ASPathCount; i++ {
+ asPath := SFlowASDestination{}
+ asPath.decodePath(data)
+ eg.ASPath = append(eg.ASPath, asPath)
+ }
+ *data, communitiesLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ eg.Communities = make([]uint32, communitiesLength)
+ for j := uint32(0); j < communitiesLength; j++ {
+ *data, community = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ eg.Communities[j] = community
+ }
+ *data, eg.LocalPref = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ return eg, nil
+}
+
+// **************************************************
+// Extended URL Flow Record
+// **************************************************
+
+// 0 15 31
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | 20 bit Interprise (0) |12 bit format |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | record length |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | direction |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | URL |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | Host |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+
+type SFlowURLDirection uint32
+
+const (
+ SFlowURLsrc SFlowURLDirection = 1
+ SFlowURLdst SFlowURLDirection = 2
+)
+
+func (urld SFlowURLDirection) String() string {
+ switch urld {
+ case SFlowURLsrc:
+ return "Source address is the server"
+ case SFlowURLdst:
+ return "Destination address is the server"
+ default:
+ return ""
+ }
+}
+
+type SFlowExtendedURLRecord struct {
+ SFlowBaseFlowRecord
+ Direction SFlowURLDirection
+ URL string
+ Host string
+}
+
+func decodeExtendedURLRecord(data *[]byte) (SFlowExtendedURLRecord, error) {
+ eur := SFlowExtendedURLRecord{}
+ var fdf SFlowFlowDataFormat
+ var urlLen uint32
+ var urlLenWithPad int
+ var hostLen uint32
+ var hostLenWithPad int
+ var urlBytes []byte
+ var hostBytes []byte
+
+ *data, fdf = (*data)[4:], SFlowFlowDataFormat(binary.BigEndian.Uint32((*data)[:4]))
+ eur.EnterpriseID, eur.Format = fdf.decode()
+ *data, eur.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, eur.Direction = (*data)[4:], SFlowURLDirection(binary.BigEndian.Uint32((*data)[:4]))
+ *data, urlLen = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ urlLenWithPad = int(urlLen + ((4 - urlLen) % 4))
+ *data, urlBytes = (*data)[urlLenWithPad:], (*data)[:urlLenWithPad]
+ eur.URL = string(urlBytes[:urlLen])
+ *data, hostLen = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ hostLenWithPad = int(hostLen + ((4 - hostLen) % 4))
+ *data, hostBytes = (*data)[hostLenWithPad:], (*data)[:hostLenWithPad]
+ eur.Host = string(hostBytes[:hostLen])
+ return eur, nil
+}
+
+// **************************************************
+// Extended User Flow Record
+// **************************************************
+
+// 0 15 31
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | 20 bit Interprise (0) |12 bit format |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | record length |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | Source Character Set |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | Source User Id |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | Destination Character Set |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | Destination User ID |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+
+type SFlowExtendedUserFlow struct {
+ SFlowBaseFlowRecord
+ SourceCharSet SFlowCharSet
+ SourceUserID string
+ DestinationCharSet SFlowCharSet
+ DestinationUserID string
+}
+
+type SFlowCharSet uint32
+
+const (
+ SFlowCSunknown SFlowCharSet = 2
+ SFlowCSASCII SFlowCharSet = 3
+ SFlowCSISOLatin1 SFlowCharSet = 4
+ SFlowCSISOLatin2 SFlowCharSet = 5
+ SFlowCSISOLatin3 SFlowCharSet = 6
+ SFlowCSISOLatin4 SFlowCharSet = 7
+ SFlowCSISOLatinCyrillic SFlowCharSet = 8
+ SFlowCSISOLatinArabic SFlowCharSet = 9
+ SFlowCSISOLatinGreek SFlowCharSet = 10
+ SFlowCSISOLatinHebrew SFlowCharSet = 11
+ SFlowCSISOLatin5 SFlowCharSet = 12
+ SFlowCSISOLatin6 SFlowCharSet = 13
+ SFlowCSISOTextComm SFlowCharSet = 14
+ SFlowCSHalfWidthKatakana SFlowCharSet = 15
+ SFlowCSJISEncoding SFlowCharSet = 16
+ SFlowCSShiftJIS SFlowCharSet = 17
+ SFlowCSEUCPkdFmtJapanese SFlowCharSet = 18
+ SFlowCSEUCFixWidJapanese SFlowCharSet = 19
+ SFlowCSISO4UnitedKingdom SFlowCharSet = 20
+ SFlowCSISO11SwedishForNames SFlowCharSet = 21
+ SFlowCSISO15Italian SFlowCharSet = 22
+ SFlowCSISO17Spanish SFlowCharSet = 23
+ SFlowCSISO21German SFlowCharSet = 24
+ SFlowCSISO60DanishNorwegian SFlowCharSet = 25
+ SFlowCSISO69French SFlowCharSet = 26
+ SFlowCSISO10646UTF1 SFlowCharSet = 27
+ SFlowCSISO646basic1983 SFlowCharSet = 28
+ SFlowCSINVARIANT SFlowCharSet = 29
+ SFlowCSISO2IntlRefVersion SFlowCharSet = 30
+ SFlowCSNATSSEFI SFlowCharSet = 31
+ SFlowCSNATSSEFIADD SFlowCharSet = 32
+ SFlowCSNATSDANO SFlowCharSet = 33
+ SFlowCSNATSDANOADD SFlowCharSet = 34
+ SFlowCSISO10Swedish SFlowCharSet = 35
+ SFlowCSKSC56011987 SFlowCharSet = 36
+ SFlowCSISO2022KR SFlowCharSet = 37
+ SFlowCSEUCKR SFlowCharSet = 38
+ SFlowCSISO2022JP SFlowCharSet = 39
+ SFlowCSISO2022JP2 SFlowCharSet = 40
+ SFlowCSISO13JISC6220jp SFlowCharSet = 41
+ SFlowCSISO14JISC6220ro SFlowCharSet = 42
+ SFlowCSISO16Portuguese SFlowCharSet = 43
+ SFlowCSISO18Greek7Old SFlowCharSet = 44
+ SFlowCSISO19LatinGreek SFlowCharSet = 45
+ SFlowCSISO25French SFlowCharSet = 46
+ SFlowCSISO27LatinGreek1 SFlowCharSet = 47
+ SFlowCSISO5427Cyrillic SFlowCharSet = 48
+ SFlowCSISO42JISC62261978 SFlowCharSet = 49
+ SFlowCSISO47BSViewdata SFlowCharSet = 50
+ SFlowCSISO49INIS SFlowCharSet = 51
+ SFlowCSISO50INIS8 SFlowCharSet = 52
+ SFlowCSISO51INISCyrillic SFlowCharSet = 53
+ SFlowCSISO54271981 SFlowCharSet = 54
+ SFlowCSISO5428Greek SFlowCharSet = 55
+ SFlowCSISO57GB1988 SFlowCharSet = 56
+ SFlowCSISO58GB231280 SFlowCharSet = 57
+ SFlowCSISO61Norwegian2 SFlowCharSet = 58
+ SFlowCSISO70VideotexSupp1 SFlowCharSet = 59
+ SFlowCSISO84Portuguese2 SFlowCharSet = 60
+ SFlowCSISO85Spanish2 SFlowCharSet = 61
+ SFlowCSISO86Hungarian SFlowCharSet = 62
+ SFlowCSISO87JISX0208 SFlowCharSet = 63
+ SFlowCSISO88Greek7 SFlowCharSet = 64
+ SFlowCSISO89ASMO449 SFlowCharSet = 65
+ SFlowCSISO90 SFlowCharSet = 66
+ SFlowCSISO91JISC62291984a SFlowCharSet = 67
+ SFlowCSISO92JISC62991984b SFlowCharSet = 68
+ SFlowCSISO93JIS62291984badd SFlowCharSet = 69
+ SFlowCSISO94JIS62291984hand SFlowCharSet = 70
+ SFlowCSISO95JIS62291984handadd SFlowCharSet = 71
+ SFlowCSISO96JISC62291984kana SFlowCharSet = 72
+ SFlowCSISO2033 SFlowCharSet = 73
+ SFlowCSISO99NAPLPS SFlowCharSet = 74
+ SFlowCSISO102T617bit SFlowCharSet = 75
+ SFlowCSISO103T618bit SFlowCharSet = 76
+ SFlowCSISO111ECMACyrillic SFlowCharSet = 77
+ SFlowCSa71 SFlowCharSet = 78
+ SFlowCSa72 SFlowCharSet = 79
+ SFlowCSISO123CSAZ24341985gr SFlowCharSet = 80
+ SFlowCSISO88596E SFlowCharSet = 81
+ SFlowCSISO88596I SFlowCharSet = 82
+ SFlowCSISO128T101G2 SFlowCharSet = 83
+ SFlowCSISO88598E SFlowCharSet = 84
+ SFlowCSISO88598I SFlowCharSet = 85
+ SFlowCSISO139CSN369103 SFlowCharSet = 86
+ SFlowCSISO141JUSIB1002 SFlowCharSet = 87
+ SFlowCSISO143IECP271 SFlowCharSet = 88
+ SFlowCSISO146Serbian SFlowCharSet = 89
+ SFlowCSISO147Macedonian SFlowCharSet = 90
+ SFlowCSISO150 SFlowCharSet = 91
+ SFlowCSISO151Cuba SFlowCharSet = 92
+ SFlowCSISO6937Add SFlowCharSet = 93
+ SFlowCSISO153GOST1976874 SFlowCharSet = 94
+ SFlowCSISO8859Supp SFlowCharSet = 95
+ SFlowCSISO10367Box SFlowCharSet = 96
+ SFlowCSISO158Lap SFlowCharSet = 97
+ SFlowCSISO159JISX02121990 SFlowCharSet = 98
+ SFlowCSISO646Danish SFlowCharSet = 99
+ SFlowCSUSDK SFlowCharSet = 100
+ SFlowCSDKUS SFlowCharSet = 101
+ SFlowCSKSC5636 SFlowCharSet = 102
+ SFlowCSUnicode11UTF7 SFlowCharSet = 103
+ SFlowCSISO2022CN SFlowCharSet = 104
+ SFlowCSISO2022CNEXT SFlowCharSet = 105
+ SFlowCSUTF8 SFlowCharSet = 106
+ SFlowCSISO885913 SFlowCharSet = 109
+ SFlowCSISO885914 SFlowCharSet = 110
+ SFlowCSISO885915 SFlowCharSet = 111
+ SFlowCSISO885916 SFlowCharSet = 112
+ SFlowCSGBK SFlowCharSet = 113
+ SFlowCSGB18030 SFlowCharSet = 114
+ SFlowCSOSDEBCDICDF0415 SFlowCharSet = 115
+ SFlowCSOSDEBCDICDF03IRV SFlowCharSet = 116
+ SFlowCSOSDEBCDICDF041 SFlowCharSet = 117
+ SFlowCSISO115481 SFlowCharSet = 118
+ SFlowCSKZ1048 SFlowCharSet = 119
+ SFlowCSUnicode SFlowCharSet = 1000
+ SFlowCSUCS4 SFlowCharSet = 1001
+ SFlowCSUnicodeASCII SFlowCharSet = 1002
+ SFlowCSUnicodeLatin1 SFlowCharSet = 1003
+ SFlowCSUnicodeJapanese SFlowCharSet = 1004
+ SFlowCSUnicodeIBM1261 SFlowCharSet = 1005
+ SFlowCSUnicodeIBM1268 SFlowCharSet = 1006
+ SFlowCSUnicodeIBM1276 SFlowCharSet = 1007
+ SFlowCSUnicodeIBM1264 SFlowCharSet = 1008
+ SFlowCSUnicodeIBM1265 SFlowCharSet = 1009
+ SFlowCSUnicode11 SFlowCharSet = 1010
+ SFlowCSSCSU SFlowCharSet = 1011
+ SFlowCSUTF7 SFlowCharSet = 1012
+ SFlowCSUTF16BE SFlowCharSet = 1013
+ SFlowCSUTF16LE SFlowCharSet = 1014
+ SFlowCSUTF16 SFlowCharSet = 1015
+ SFlowCSCESU8 SFlowCharSet = 1016
+ SFlowCSUTF32 SFlowCharSet = 1017
+ SFlowCSUTF32BE SFlowCharSet = 1018
+ SFlowCSUTF32LE SFlowCharSet = 1019
+ SFlowCSBOCU1 SFlowCharSet = 1020
+ SFlowCSWindows30Latin1 SFlowCharSet = 2000
+ SFlowCSWindows31Latin1 SFlowCharSet = 2001
+ SFlowCSWindows31Latin2 SFlowCharSet = 2002
+ SFlowCSWindows31Latin5 SFlowCharSet = 2003
+ SFlowCSHPRoman8 SFlowCharSet = 2004
+ SFlowCSAdobeStandardEncoding SFlowCharSet = 2005
+ SFlowCSVenturaUS SFlowCharSet = 2006
+ SFlowCSVenturaInternational SFlowCharSet = 2007
+ SFlowCSDECMCS SFlowCharSet = 2008
+ SFlowCSPC850Multilingual SFlowCharSet = 2009
+ SFlowCSPCp852 SFlowCharSet = 2010
+ SFlowCSPC8CodePage437 SFlowCharSet = 2011
+ SFlowCSPC8DanishNorwegian SFlowCharSet = 2012
+ SFlowCSPC862LatinHebrew SFlowCharSet = 2013
+ SFlowCSPC8Turkish SFlowCharSet = 2014
+ SFlowCSIBMSymbols SFlowCharSet = 2015
+ SFlowCSIBMThai SFlowCharSet = 2016
+ SFlowCSHPLegal SFlowCharSet = 2017
+ SFlowCSHPPiFont SFlowCharSet = 2018
+ SFlowCSHPMath8 SFlowCharSet = 2019
+ SFlowCSHPPSMath SFlowCharSet = 2020
+ SFlowCSHPDesktop SFlowCharSet = 2021
+ SFlowCSVenturaMath SFlowCharSet = 2022
+ SFlowCSMicrosoftPublishing SFlowCharSet = 2023
+ SFlowCSWindows31J SFlowCharSet = 2024
+ SFlowCSGB2312 SFlowCharSet = 2025
+ SFlowCSBig5 SFlowCharSet = 2026
+ SFlowCSMacintosh SFlowCharSet = 2027
+ SFlowCSIBM037 SFlowCharSet = 2028
+ SFlowCSIBM038 SFlowCharSet = 2029
+ SFlowCSIBM273 SFlowCharSet = 2030
+ SFlowCSIBM274 SFlowCharSet = 2031
+ SFlowCSIBM275 SFlowCharSet = 2032
+ SFlowCSIBM277 SFlowCharSet = 2033
+ SFlowCSIBM278 SFlowCharSet = 2034
+ SFlowCSIBM280 SFlowCharSet = 2035
+ SFlowCSIBM281 SFlowCharSet = 2036
+ SFlowCSIBM284 SFlowCharSet = 2037
+ SFlowCSIBM285 SFlowCharSet = 2038
+ SFlowCSIBM290 SFlowCharSet = 2039
+ SFlowCSIBM297 SFlowCharSet = 2040
+ SFlowCSIBM420 SFlowCharSet = 2041
+ SFlowCSIBM423 SFlowCharSet = 2042
+ SFlowCSIBM424 SFlowCharSet = 2043
+ SFlowCSIBM500 SFlowCharSet = 2044
+ SFlowCSIBM851 SFlowCharSet = 2045
+ SFlowCSIBM855 SFlowCharSet = 2046
+ SFlowCSIBM857 SFlowCharSet = 2047
+ SFlowCSIBM860 SFlowCharSet = 2048
+ SFlowCSIBM861 SFlowCharSet = 2049
+ SFlowCSIBM863 SFlowCharSet = 2050
+ SFlowCSIBM864 SFlowCharSet = 2051
+ SFlowCSIBM865 SFlowCharSet = 2052
+ SFlowCSIBM868 SFlowCharSet = 2053
+ SFlowCSIBM869 SFlowCharSet = 2054
+ SFlowCSIBM870 SFlowCharSet = 2055
+ SFlowCSIBM871 SFlowCharSet = 2056
+ SFlowCSIBM880 SFlowCharSet = 2057
+ SFlowCSIBM891 SFlowCharSet = 2058
+ SFlowCSIBM903 SFlowCharSet = 2059
+ SFlowCSIBBM904 SFlowCharSet = 2060
+ SFlowCSIBM905 SFlowCharSet = 2061
+ SFlowCSIBM918 SFlowCharSet = 2062
+ SFlowCSIBM1026 SFlowCharSet = 2063
+ SFlowCSIBMEBCDICATDE SFlowCharSet = 2064
+ SFlowCSEBCDICATDEA SFlowCharSet = 2065
+ SFlowCSEBCDICCAFR SFlowCharSet = 2066
+ SFlowCSEBCDICDKNO SFlowCharSet = 2067
+ SFlowCSEBCDICDKNOA SFlowCharSet = 2068
+ SFlowCSEBCDICFISE SFlowCharSet = 2069
+ SFlowCSEBCDICFISEA SFlowCharSet = 2070
+ SFlowCSEBCDICFR SFlowCharSet = 2071
+ SFlowCSEBCDICIT SFlowCharSet = 2072
+ SFlowCSEBCDICPT SFlowCharSet = 2073
+ SFlowCSEBCDICES SFlowCharSet = 2074
+ SFlowCSEBCDICESA SFlowCharSet = 2075
+ SFlowCSEBCDICESS SFlowCharSet = 2076
+ SFlowCSEBCDICUK SFlowCharSet = 2077
+ SFlowCSEBCDICUS SFlowCharSet = 2078
+ SFlowCSUnknown8BiT SFlowCharSet = 2079
+ SFlowCSMnemonic SFlowCharSet = 2080
+ SFlowCSMnem SFlowCharSet = 2081
+ SFlowCSVISCII SFlowCharSet = 2082
+ SFlowCSVIQR SFlowCharSet = 2083
+ SFlowCSKOI8R SFlowCharSet = 2084
+ SFlowCSHZGB2312 SFlowCharSet = 2085
+ SFlowCSIBM866 SFlowCharSet = 2086
+ SFlowCSPC775Baltic SFlowCharSet = 2087
+ SFlowCSKOI8U SFlowCharSet = 2088
+ SFlowCSIBM00858 SFlowCharSet = 2089
+ SFlowCSIBM00924 SFlowCharSet = 2090
+ SFlowCSIBM01140 SFlowCharSet = 2091
+ SFlowCSIBM01141 SFlowCharSet = 2092
+ SFlowCSIBM01142 SFlowCharSet = 2093
+ SFlowCSIBM01143 SFlowCharSet = 2094
+ SFlowCSIBM01144 SFlowCharSet = 2095
+ SFlowCSIBM01145 SFlowCharSet = 2096
+ SFlowCSIBM01146 SFlowCharSet = 2097
+ SFlowCSIBM01147 SFlowCharSet = 2098
+ SFlowCSIBM01148 SFlowCharSet = 2099
+ SFlowCSIBM01149 SFlowCharSet = 2100
+ SFlowCSBig5HKSCS SFlowCharSet = 2101
+ SFlowCSIBM1047 SFlowCharSet = 2102
+ SFlowCSPTCP154 SFlowCharSet = 2103
+ SFlowCSAmiga1251 SFlowCharSet = 2104
+ SFlowCSKOI7switched SFlowCharSet = 2105
+ SFlowCSBRF SFlowCharSet = 2106
+ SFlowCSTSCII SFlowCharSet = 2107
+ SFlowCSCP51932 SFlowCharSet = 2108
+ SFlowCSWindows874 SFlowCharSet = 2109
+ SFlowCSWindows1250 SFlowCharSet = 2250
+ SFlowCSWindows1251 SFlowCharSet = 2251
+ SFlowCSWindows1252 SFlowCharSet = 2252
+ SFlowCSWindows1253 SFlowCharSet = 2253
+ SFlowCSWindows1254 SFlowCharSet = 2254
+ SFlowCSWindows1255 SFlowCharSet = 2255
+ SFlowCSWindows1256 SFlowCharSet = 2256
+ SFlowCSWindows1257 SFlowCharSet = 2257
+ SFlowCSWindows1258 SFlowCharSet = 2258
+ SFlowCSTIS620 SFlowCharSet = 2259
+ SFlowCS50220 SFlowCharSet = 2260
+ SFlowCSreserved SFlowCharSet = 3000
+)
+
+func decodeExtendedUserFlow(data *[]byte) (SFlowExtendedUserFlow, error) {
+ eu := SFlowExtendedUserFlow{}
+ var fdf SFlowFlowDataFormat
+ var srcUserLen uint32
+ var srcUserLenWithPad int
+ var srcUserBytes []byte
+ var dstUserLen uint32
+ var dstUserLenWithPad int
+ var dstUserBytes []byte
+
+ *data, fdf = (*data)[4:], SFlowFlowDataFormat(binary.BigEndian.Uint32((*data)[:4]))
+ eu.EnterpriseID, eu.Format = fdf.decode()
+ *data, eu.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, eu.SourceCharSet = (*data)[4:], SFlowCharSet(binary.BigEndian.Uint32((*data)[:4]))
+ *data, srcUserLen = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ srcUserLenWithPad = int(srcUserLen + ((4 - srcUserLen) % 4))
+ *data, srcUserBytes = (*data)[srcUserLenWithPad:], (*data)[:srcUserLenWithPad]
+ eu.SourceUserID = string(srcUserBytes[:srcUserLen])
+ *data, eu.DestinationCharSet = (*data)[4:], SFlowCharSet(binary.BigEndian.Uint32((*data)[:4]))
+ *data, dstUserLen = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ dstUserLenWithPad = int(dstUserLen + ((4 - dstUserLen) % 4))
+ *data, dstUserBytes = (*data)[dstUserLenWithPad:], (*data)[:dstUserLenWithPad]
+ eu.DestinationUserID = string(dstUserBytes[:dstUserLen])
+ return eu, nil
+}
+
+// **************************************************
+// Packet IP version 4 Record
+// **************************************************
+
+// 0 15 31
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | Length |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | Protocol |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | Source IPv4 |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | Destination IPv4 |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | Source Port |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | Destionation Port |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | TCP Flags |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | TOS |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+type SFlowIpv4Record struct {
+ // The length of the IP packet excluding ower layer encapsulations
+ Length uint32
+ // IP Protocol type (for example, TCP = 6, UDP = 17)
+ Protocol uint32
+ // Source IP Address
+ IPSrc net.IP
+ // Destination IP Address
+ IPDst net.IP
+ // TCP/UDP source port number or equivalent
+ PortSrc uint32
+ // TCP/UDP destination port number or equivalent
+ PortDst uint32
+ // TCP flags
+ TCPFlags uint32
+ // IP type of service
+ TOS uint32
+}
+
+func decodeSFlowIpv4Record(data *[]byte) (SFlowIpv4Record, error) {
+ si := SFlowIpv4Record{}
+
+ *data, si.Length = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, si.Protocol = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, si.IPSrc = (*data)[4:], net.IP((*data)[:4])
+ *data, si.IPDst = (*data)[4:], net.IP((*data)[:4])
+ *data, si.PortSrc = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, si.PortDst = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, si.TCPFlags = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, si.TOS = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+
+ return si, nil
+}
+
+// **************************************************
+// Packet IP version 6 Record
+// **************************************************
+
+// 0 15 31
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | Length |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | Protocol |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | Source IPv4 |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | Destination IPv4 |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | Source Port |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | Destionation Port |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | TCP Flags |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | Priority |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+type SFlowIpv6Record struct {
+ // The length of the IP packet excluding ower layer encapsulations
+ Length uint32
+ // IP Protocol type (for example, TCP = 6, UDP = 17)
+ Protocol uint32
+ // Source IP Address
+ IPSrc net.IP
+ // Destination IP Address
+ IPDst net.IP
+ // TCP/UDP source port number or equivalent
+ PortSrc uint32
+ // TCP/UDP destination port number or equivalent
+ PortDst uint32
+ // TCP flags
+ TCPFlags uint32
+ // IP priority
+ Priority uint32
+}
+
+func decodeSFlowIpv6Record(data *[]byte) (SFlowIpv6Record, error) {
+ si := SFlowIpv6Record{}
+
+ *data, si.Length = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, si.Protocol = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, si.IPSrc = (*data)[16:], net.IP((*data)[:16])
+ *data, si.IPDst = (*data)[16:], net.IP((*data)[:16])
+ *data, si.PortSrc = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, si.PortDst = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, si.TCPFlags = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, si.Priority = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+
+ return si, nil
+}
+
+// **************************************************
+// Extended IPv4 Tunnel Egress
+// **************************************************
+
+// 0 15 31
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | 20 bit Interprise (0) |12 bit format |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | record length |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// / Packet IP version 4 Record /
+// / /
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+type SFlowExtendedIpv4TunnelEgressRecord struct {
+ SFlowBaseFlowRecord
+ SFlowIpv4Record SFlowIpv4Record
+}
+
+func decodeExtendedIpv4TunnelEgress(data *[]byte) (SFlowExtendedIpv4TunnelEgressRecord, error) {
+ rec := SFlowExtendedIpv4TunnelEgressRecord{}
+ var fdf SFlowFlowDataFormat
+
+ *data, fdf = (*data)[4:], SFlowFlowDataFormat(binary.BigEndian.Uint32((*data)[:4]))
+ rec.EnterpriseID, rec.Format = fdf.decode()
+ *data, rec.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ rec.SFlowIpv4Record, _ = decodeSFlowIpv4Record(data)
+
+ return rec, nil
+}
+
+// **************************************************
+// Extended IPv4 Tunnel Ingress
+// **************************************************
+
+// 0 15 31
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | 20 bit Interprise (0) |12 bit format |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | record length |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// / Packet IP version 4 Record /
+// / /
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+type SFlowExtendedIpv4TunnelIngressRecord struct {
+ SFlowBaseFlowRecord
+ SFlowIpv4Record SFlowIpv4Record
+}
+
+func decodeExtendedIpv4TunnelIngress(data *[]byte) (SFlowExtendedIpv4TunnelIngressRecord, error) {
+ rec := SFlowExtendedIpv4TunnelIngressRecord{}
+ var fdf SFlowFlowDataFormat
+
+ *data, fdf = (*data)[4:], SFlowFlowDataFormat(binary.BigEndian.Uint32((*data)[:4]))
+ rec.EnterpriseID, rec.Format = fdf.decode()
+ *data, rec.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ rec.SFlowIpv4Record, _ = decodeSFlowIpv4Record(data)
+
+ return rec, nil
+}
+
+// **************************************************
+// Extended IPv6 Tunnel Egress
+// **************************************************
+
+// 0 15 31
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | 20 bit Interprise (0) |12 bit format |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | record length |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// / Packet IP version 6 Record /
+// / /
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+type SFlowExtendedIpv6TunnelEgressRecord struct {
+ SFlowBaseFlowRecord
+ SFlowIpv6Record
+}
+
+func decodeExtendedIpv6TunnelEgress(data *[]byte) (SFlowExtendedIpv6TunnelEgressRecord, error) {
+ rec := SFlowExtendedIpv6TunnelEgressRecord{}
+ var fdf SFlowFlowDataFormat
+
+ *data, fdf = (*data)[4:], SFlowFlowDataFormat(binary.BigEndian.Uint32((*data)[:4]))
+ rec.EnterpriseID, rec.Format = fdf.decode()
+ *data, rec.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ rec.SFlowIpv6Record, _ = decodeSFlowIpv6Record(data)
+
+ return rec, nil
+}
+
+// **************************************************
+// Extended IPv6 Tunnel Ingress
+// **************************************************
+
+// 0 15 31
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | 20 bit Interprise (0) |12 bit format |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | record length |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// / Packet IP version 6 Record /
+// / /
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+type SFlowExtendedIpv6TunnelIngressRecord struct {
+ SFlowBaseFlowRecord
+ SFlowIpv6Record
+}
+
+func decodeExtendedIpv6TunnelIngress(data *[]byte) (SFlowExtendedIpv6TunnelIngressRecord, error) {
+ rec := SFlowExtendedIpv6TunnelIngressRecord{}
+ var fdf SFlowFlowDataFormat
+
+ *data, fdf = (*data)[4:], SFlowFlowDataFormat(binary.BigEndian.Uint32((*data)[:4]))
+ rec.EnterpriseID, rec.Format = fdf.decode()
+ *data, rec.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ rec.SFlowIpv6Record, _ = decodeSFlowIpv6Record(data)
+
+ return rec, nil
+}
+
+// **************************************************
+// Extended Decapsulate Egress
+// **************************************************
+
+// 0 15 31
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | 20 bit Interprise (0) |12 bit format |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | record length |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | Inner Header Offset |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+type SFlowExtendedDecapsulateEgressRecord struct {
+ SFlowBaseFlowRecord
+ InnerHeaderOffset uint32
+}
+
+func decodeExtendedDecapsulateEgress(data *[]byte) (SFlowExtendedDecapsulateEgressRecord, error) {
+ rec := SFlowExtendedDecapsulateEgressRecord{}
+ var fdf SFlowFlowDataFormat
+
+ *data, fdf = (*data)[4:], SFlowFlowDataFormat(binary.BigEndian.Uint32((*data)[:4]))
+ rec.EnterpriseID, rec.Format = fdf.decode()
+ *data, rec.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, rec.InnerHeaderOffset = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+
+ return rec, nil
+}
+
+// **************************************************
+// Extended Decapsulate Ingress
+// **************************************************
+
+// 0 15 31
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | 20 bit Interprise (0) |12 bit format |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | record length |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | Inner Header Offset |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+type SFlowExtendedDecapsulateIngressRecord struct {
+ SFlowBaseFlowRecord
+ InnerHeaderOffset uint32
+}
+
+func decodeExtendedDecapsulateIngress(data *[]byte) (SFlowExtendedDecapsulateIngressRecord, error) {
+ rec := SFlowExtendedDecapsulateIngressRecord{}
+ var fdf SFlowFlowDataFormat
+
+ *data, fdf = (*data)[4:], SFlowFlowDataFormat(binary.BigEndian.Uint32((*data)[:4]))
+ rec.EnterpriseID, rec.Format = fdf.decode()
+ *data, rec.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, rec.InnerHeaderOffset = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+
+ return rec, nil
+}
+
+// **************************************************
+// Extended VNI Egress
+// **************************************************
+
+// 0 15 31
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | 20 bit Interprise (0) |12 bit format |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | record length |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | VNI |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+type SFlowExtendedVniEgressRecord struct {
+ SFlowBaseFlowRecord
+ VNI uint32
+}
+
+func decodeExtendedVniEgress(data *[]byte) (SFlowExtendedVniEgressRecord, error) {
+ rec := SFlowExtendedVniEgressRecord{}
+ var fdf SFlowFlowDataFormat
+
+ *data, fdf = (*data)[4:], SFlowFlowDataFormat(binary.BigEndian.Uint32((*data)[:4]))
+ rec.EnterpriseID, rec.Format = fdf.decode()
+ *data, rec.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, rec.VNI = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+
+ return rec, nil
+}
+
+// **************************************************
+// Extended VNI Ingress
+// **************************************************
+
+// 0 15 31
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | 20 bit Interprise (0) |12 bit format |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | record length |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | VNI |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+type SFlowExtendedVniIngressRecord struct {
+ SFlowBaseFlowRecord
+ VNI uint32
+}
+
+func decodeExtendedVniIngress(data *[]byte) (SFlowExtendedVniIngressRecord, error) {
+ rec := SFlowExtendedVniIngressRecord{}
+ var fdf SFlowFlowDataFormat
+
+ *data, fdf = (*data)[4:], SFlowFlowDataFormat(binary.BigEndian.Uint32((*data)[:4]))
+ rec.EnterpriseID, rec.Format = fdf.decode()
+ *data, rec.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, rec.VNI = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+
+ return rec, nil
+}
+
+// **************************************************
+// Counter Record
+// **************************************************
+
+// 0 15 31
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | 20 bit Interprise (0) |12 bit format |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | counter length |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// / counter data /
+// / /
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+
+type SFlowBaseCounterRecord struct {
+ EnterpriseID SFlowEnterpriseID
+ Format SFlowCounterRecordType
+ FlowDataLength uint32
+}
+
+func (bcr SFlowBaseCounterRecord) GetType() SFlowCounterRecordType {
+ switch bcr.Format {
+ case SFlowTypeGenericInterfaceCounters:
+ return SFlowTypeGenericInterfaceCounters
+ case SFlowTypeEthernetInterfaceCounters:
+ return SFlowTypeEthernetInterfaceCounters
+ case SFlowTypeTokenRingInterfaceCounters:
+ return SFlowTypeTokenRingInterfaceCounters
+ case SFlowType100BaseVGInterfaceCounters:
+ return SFlowType100BaseVGInterfaceCounters
+ case SFlowTypeVLANCounters:
+ return SFlowTypeVLANCounters
+ case SFlowTypeLACPCounters:
+ return SFlowTypeLACPCounters
+ case SFlowTypeProcessorCounters:
+ return SFlowTypeProcessorCounters
+ case SFlowTypeOpenflowPortCounters:
+ return SFlowTypeOpenflowPortCounters
+ case SFlowTypePORTNAMECounters:
+ return SFlowTypePORTNAMECounters
+ case SFLowTypeAPPRESOURCESCounters:
+ return SFLowTypeAPPRESOURCESCounters
+ case SFlowTypeOVSDPCounters:
+ return SFlowTypeOVSDPCounters
+ }
+ unrecognized := fmt.Sprint("Unrecognized counter record type:", bcr.Format)
+ panic(unrecognized)
+}
+
+// **************************************************
+// Counter Record
+// **************************************************
+
+// 0 15 31
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | 20 bit Interprise (0) |12 bit format |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | counter length |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | IfIndex |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | IfType |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | IfSpeed |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | IfDirection |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | IfStatus |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | IFInOctets |
+// | |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | IfInUcastPkts |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | IfInMulticastPkts |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | IfInBroadcastPkts |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | IfInDiscards |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | InInErrors |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | IfInUnknownProtos |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | IfOutOctets |
+// | |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | IfOutUcastPkts |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | IfOutMulticastPkts |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | IfOutBroadcastPkts |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | IfOutDiscards |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | IfOUtErrors |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | IfPromiscouousMode |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+
+type SFlowGenericInterfaceCounters struct {
+ SFlowBaseCounterRecord
+ IfIndex uint32
+ IfType uint32
+ IfSpeed uint64
+ IfDirection uint32
+ IfStatus uint32
+ IfInOctets uint64
+ IfInUcastPkts uint32
+ IfInMulticastPkts uint32
+ IfInBroadcastPkts uint32
+ IfInDiscards uint32
+ IfInErrors uint32
+ IfInUnknownProtos uint32
+ IfOutOctets uint64
+ IfOutUcastPkts uint32
+ IfOutMulticastPkts uint32
+ IfOutBroadcastPkts uint32
+ IfOutDiscards uint32
+ IfOutErrors uint32
+ IfPromiscuousMode uint32
+}
+
+func decodeGenericInterfaceCounters(data *[]byte) (SFlowGenericInterfaceCounters, error) {
+ gic := SFlowGenericInterfaceCounters{}
+ var cdf SFlowCounterDataFormat
+
+ *data, cdf = (*data)[4:], SFlowCounterDataFormat(binary.BigEndian.Uint32((*data)[:4]))
+ gic.EnterpriseID, gic.Format = cdf.decode()
+ *data, gic.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, gic.IfIndex = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, gic.IfType = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, gic.IfSpeed = (*data)[8:], binary.BigEndian.Uint64((*data)[:8])
+ *data, gic.IfDirection = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, gic.IfStatus = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, gic.IfInOctets = (*data)[8:], binary.BigEndian.Uint64((*data)[:8])
+ *data, gic.IfInUcastPkts = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, gic.IfInMulticastPkts = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, gic.IfInBroadcastPkts = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, gic.IfInDiscards = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, gic.IfInErrors = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, gic.IfInUnknownProtos = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, gic.IfOutOctets = (*data)[8:], binary.BigEndian.Uint64((*data)[:8])
+ *data, gic.IfOutUcastPkts = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, gic.IfOutMulticastPkts = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, gic.IfOutBroadcastPkts = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, gic.IfOutDiscards = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, gic.IfOutErrors = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, gic.IfPromiscuousMode = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ return gic, nil
+}
+
+// **************************************************
+// Counter Record
+// **************************************************
+
+// 0 15 31
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | 20 bit Interprise (0) |12 bit format |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | counter length |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// / counter data /
+// / /
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+
+type SFlowEthernetCounters struct {
+ SFlowBaseCounterRecord
+ AlignmentErrors uint32
+ FCSErrors uint32
+ SingleCollisionFrames uint32
+ MultipleCollisionFrames uint32
+ SQETestErrors uint32
+ DeferredTransmissions uint32
+ LateCollisions uint32
+ ExcessiveCollisions uint32
+ InternalMacTransmitErrors uint32
+ CarrierSenseErrors uint32
+ FrameTooLongs uint32
+ InternalMacReceiveErrors uint32
+ SymbolErrors uint32
+}
+
+func decodeEthernetCounters(data *[]byte) (SFlowEthernetCounters, error) {
+ ec := SFlowEthernetCounters{}
+ var cdf SFlowCounterDataFormat
+
+ *data, cdf = (*data)[4:], SFlowCounterDataFormat(binary.BigEndian.Uint32((*data)[:4]))
+ ec.EnterpriseID, ec.Format = cdf.decode()
+ *data, ec.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, ec.AlignmentErrors = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, ec.FCSErrors = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, ec.SingleCollisionFrames = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, ec.MultipleCollisionFrames = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, ec.SQETestErrors = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, ec.DeferredTransmissions = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, ec.LateCollisions = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, ec.ExcessiveCollisions = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, ec.InternalMacTransmitErrors = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, ec.CarrierSenseErrors = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, ec.FrameTooLongs = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, ec.InternalMacReceiveErrors = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, ec.SymbolErrors = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ return ec, nil
+}
+
+// VLAN Counter
+
+type SFlowVLANCounters struct {
+ SFlowBaseCounterRecord
+ VlanID uint32
+ Octets uint64
+ UcastPkts uint32
+ MulticastPkts uint32
+ BroadcastPkts uint32
+ Discards uint32
+}
+
+func decodeVLANCounters(data *[]byte) (SFlowVLANCounters, error) {
+ vc := SFlowVLANCounters{}
+ var cdf SFlowCounterDataFormat
+
+ *data, cdf = (*data)[4:], SFlowCounterDataFormat(binary.BigEndian.Uint32((*data)[:4]))
+ vc.EnterpriseID, vc.Format = cdf.decode()
+ vc.EnterpriseID, vc.Format = cdf.decode()
+ *data, vc.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, vc.VlanID = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, vc.Octets = (*data)[8:], binary.BigEndian.Uint64((*data)[:8])
+ *data, vc.UcastPkts = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, vc.MulticastPkts = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, vc.BroadcastPkts = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, vc.Discards = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ return vc, nil
+}
+
+//SFLLACPportState : SFlow LACP Port State (All(4) - 32 bit)
+type SFLLACPPortState struct {
+ PortStateAll uint32
+}
+
+//LACPcounters : LACP SFlow Counters ( 64 Bytes )
+type SFlowLACPCounters struct {
+ SFlowBaseCounterRecord
+ ActorSystemID net.HardwareAddr
+ PartnerSystemID net.HardwareAddr
+ AttachedAggID uint32
+ LacpPortState SFLLACPPortState
+ LACPDUsRx uint32
+ MarkerPDUsRx uint32
+ MarkerResponsePDUsRx uint32
+ UnknownRx uint32
+ IllegalRx uint32
+ LACPDUsTx uint32
+ MarkerPDUsTx uint32
+ MarkerResponsePDUsTx uint32
+}
+
+func decodeLACPCounters(data *[]byte) (SFlowLACPCounters, error) {
+ la := SFlowLACPCounters{}
+ var cdf SFlowCounterDataFormat
+
+ *data, cdf = (*data)[4:], SFlowCounterDataFormat(binary.BigEndian.Uint32((*data)[:4]))
+ la.EnterpriseID, la.Format = cdf.decode()
+ *data, la.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, la.ActorSystemID = (*data)[6:], (*data)[:6]
+ *data = (*data)[2:] // remove padding
+ *data, la.PartnerSystemID = (*data)[6:], (*data)[:6]
+ *data = (*data)[2:] //remove padding
+ *data, la.AttachedAggID = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, la.LacpPortState.PortStateAll = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, la.LACPDUsRx = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, la.MarkerPDUsRx = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, la.MarkerResponsePDUsRx = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, la.UnknownRx = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, la.IllegalRx = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, la.LACPDUsTx = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, la.MarkerPDUsTx = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, la.MarkerResponsePDUsTx = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+
+ return la, nil
+
+}
+
+// **************************************************
+// Processor Counter Record
+// **************************************************
+// 0 15 31
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | 20 bit Interprise (0) |12 bit format |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | counter length |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | FiveSecCpu |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | OneMinCpu |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | GiveMinCpu |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | TotalMemory |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | FreeMemory |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+
+type SFlowProcessorCounters struct {
+ SFlowBaseCounterRecord
+ FiveSecCpu uint32 // 5 second average CPU utilization
+ OneMinCpu uint32 // 1 minute average CPU utilization
+ FiveMinCpu uint32 // 5 minute average CPU utilization
+ TotalMemory uint64 // total memory (in bytes)
+ FreeMemory uint64 // free memory (in bytes)
+}
+
+func decodeProcessorCounters(data *[]byte) (SFlowProcessorCounters, error) {
+ pc := SFlowProcessorCounters{}
+ var cdf SFlowCounterDataFormat
+ var high32, low32 uint32
+
+ *data, cdf = (*data)[4:], SFlowCounterDataFormat(binary.BigEndian.Uint32((*data)[:4]))
+ pc.EnterpriseID, pc.Format = cdf.decode()
+ *data, pc.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+
+ *data, pc.FiveSecCpu = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, pc.OneMinCpu = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, pc.FiveMinCpu = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, high32 = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, low32 = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ pc.TotalMemory = (uint64(high32) << 32) + uint64(low32)
+ *data, high32 = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, low32 = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ pc.FreeMemory = (uint64(high32)) + uint64(low32)
+
+ return pc, nil
+}
+
+// SFlowEthernetFrameFlowRecord give additional information
+// about the sampled packet if it's available.
+// An agent may or may not provide this information.
+type SFlowEthernetFrameFlowRecord struct {
+ SFlowBaseFlowRecord
+ FrameLength uint32
+ SrcMac net.HardwareAddr
+ DstMac net.HardwareAddr
+ Type uint32
+}
+
+// Ethernet frame flow records have the following structure:
+
+// 0 15 31
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | 20 bit Interprise (0) |12 bit format |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | record length |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | Source Mac Address |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | Destination Mac Address |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+// | Ethernet Packet Type |
+// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+
+func decodeEthernetFrameFlowRecord(data *[]byte) (SFlowEthernetFrameFlowRecord, error) {
+ es := SFlowEthernetFrameFlowRecord{}
+ var fdf SFlowFlowDataFormat
+
+ *data, fdf = (*data)[4:], SFlowFlowDataFormat(binary.BigEndian.Uint32((*data)[:4]))
+ es.EnterpriseID, es.Format = fdf.decode()
+ *data, es.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+
+ *data, es.FrameLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, es.SrcMac = (*data)[8:], net.HardwareAddr((*data)[:6])
+ *data, es.DstMac = (*data)[8:], net.HardwareAddr((*data)[:6])
+ *data, es.Type = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ return es, nil
+}
+
+//SFlowOpenflowPortCounters : OVS-Sflow OpenFlow Port Counter ( 20 Bytes )
+type SFlowOpenflowPortCounters struct {
+ SFlowBaseCounterRecord
+ DatapathID uint64
+ PortNo uint32
+}
+
+func decodeOpenflowportCounters(data *[]byte) (SFlowOpenflowPortCounters, error) {
+ ofp := SFlowOpenflowPortCounters{}
+ var cdf SFlowCounterDataFormat
+
+ *data, cdf = (*data)[4:], SFlowCounterDataFormat(binary.BigEndian.Uint32((*data)[:4]))
+ ofp.EnterpriseID, ofp.Format = cdf.decode()
+ *data, ofp.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, ofp.DatapathID = (*data)[8:], binary.BigEndian.Uint64((*data)[:8])
+ *data, ofp.PortNo = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+
+ return ofp, nil
+}
+
+//SFlowAppresourcesCounters : OVS_Sflow App Resources Counter ( 48 Bytes )
+type SFlowAppresourcesCounters struct {
+ SFlowBaseCounterRecord
+ UserTime uint32
+ SystemTime uint32
+ MemUsed uint64
+ MemMax uint64
+ FdOpen uint32
+ FdMax uint32
+ ConnOpen uint32
+ ConnMax uint32
+}
+
+func decodeAppresourcesCounters(data *[]byte) (SFlowAppresourcesCounters, error) {
+ app := SFlowAppresourcesCounters{}
+ var cdf SFlowCounterDataFormat
+
+ *data, cdf = (*data)[4:], SFlowCounterDataFormat(binary.BigEndian.Uint32((*data)[:4]))
+ app.EnterpriseID, app.Format = cdf.decode()
+ *data, app.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, app.UserTime = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, app.SystemTime = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, app.MemUsed = (*data)[8:], binary.BigEndian.Uint64((*data)[:8])
+ *data, app.MemMax = (*data)[8:], binary.BigEndian.Uint64((*data)[:8])
+ *data, app.FdOpen = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, app.FdMax = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, app.ConnOpen = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, app.ConnMax = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+
+ return app, nil
+}
+
+//SFlowOVSDPCounters : OVS-Sflow DataPath Counter ( 32 Bytes )
+type SFlowOVSDPCounters struct {
+ SFlowBaseCounterRecord
+ NHit uint32
+ NMissed uint32
+ NLost uint32
+ NMaskHit uint32
+ NFlows uint32
+ NMasks uint32
+}
+
+func decodeOVSDPCounters(data *[]byte) (SFlowOVSDPCounters, error) {
+ dp := SFlowOVSDPCounters{}
+ var cdf SFlowCounterDataFormat
+
+ *data, cdf = (*data)[4:], SFlowCounterDataFormat(binary.BigEndian.Uint32((*data)[:4]))
+ dp.EnterpriseID, dp.Format = cdf.decode()
+ *data, dp.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, dp.NHit = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, dp.NMissed = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, dp.NLost = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, dp.NMaskHit = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, dp.NFlows = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ *data, dp.NMasks = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+
+ return dp, nil
+}
+
+//SFlowPORTNAME : OVS-Sflow PORTNAME Counter Sampletype ( 20 Bytes )
+type SFlowPORTNAME struct {
+ SFlowBaseCounterRecord
+ Len uint32
+ Str string
+}
+
+func decodeString(data *[]byte) (len uint32, str string) {
+ *data, len = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ str = string((*data)[:len])
+ if (len % 4) != 0 {
+ len += 4 - len%4
+ }
+ *data = (*data)[len:]
+ return
+}
+
+func decodePortnameCounters(data *[]byte) (SFlowPORTNAME, error) {
+ pn := SFlowPORTNAME{}
+ var cdf SFlowCounterDataFormat
+
+ *data, cdf = (*data)[4:], SFlowCounterDataFormat(binary.BigEndian.Uint32((*data)[:4]))
+ pn.EnterpriseID, pn.Format = cdf.decode()
+ *data, pn.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+ pn.Len, pn.Str = decodeString(data)
+
+ return pn, nil
+}
diff --git a/vendor/github.com/google/gopacket/layers/sip.go b/vendor/github.com/google/gopacket/layers/sip.go
new file mode 100644
index 0000000..5403688
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/sip.go
@@ -0,0 +1,546 @@
+// Copyright 2017 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "strconv"
+ "strings"
+
+ "github.com/google/gopacket"
+)
+
+// SIPVersion defines the different versions of the SIP Protocol
+type SIPVersion uint8
+
+// Represents all the versions of SIP protocol
+const (
+ SIPVersion1 SIPVersion = 1
+ SIPVersion2 SIPVersion = 2
+)
+
+func (sv SIPVersion) String() string {
+ switch sv {
+ default:
+ // Defaulting to SIP/2.0
+ return "SIP/2.0"
+ case SIPVersion1:
+ return "SIP/1.0"
+ case SIPVersion2:
+ return "SIP/2.0"
+ }
+}
+
+// GetSIPVersion is used to get SIP version constant
+func GetSIPVersion(version string) (SIPVersion, error) {
+ switch strings.ToUpper(version) {
+ case "SIP/1.0":
+ return SIPVersion1, nil
+ case "SIP/2.0":
+ return SIPVersion2, nil
+ default:
+ return 0, fmt.Errorf("Unknown SIP version: '%s'", version)
+
+ }
+}
+
+// SIPMethod defines the different methods of the SIP Protocol
+// defined in the different RFC's
+type SIPMethod uint16
+
+// Here are all the SIP methods
+const (
+ SIPMethodInvite SIPMethod = 1 // INVITE [RFC3261]
+ SIPMethodAck SIPMethod = 2 // ACK [RFC3261]
+ SIPMethodBye SIPMethod = 3 // BYE [RFC3261]
+ SIPMethodCancel SIPMethod = 4 // CANCEL [RFC3261]
+ SIPMethodOptions SIPMethod = 5 // OPTIONS [RFC3261]
+ SIPMethodRegister SIPMethod = 6 // REGISTER [RFC3261]
+ SIPMethodPrack SIPMethod = 7 // PRACK [RFC3262]
+ SIPMethodSubscribe SIPMethod = 8 // SUBSCRIBE [RFC6665]
+ SIPMethodNotify SIPMethod = 9 // NOTIFY [RFC6665]
+ SIPMethodPublish SIPMethod = 10 // PUBLISH [RFC3903]
+ SIPMethodInfo SIPMethod = 11 // INFO [RFC6086]
+ SIPMethodRefer SIPMethod = 12 // REFER [RFC3515]
+ SIPMethodMessage SIPMethod = 13 // MESSAGE [RFC3428]
+ SIPMethodUpdate SIPMethod = 14 // UPDATE [RFC3311]
+ SIPMethodPing SIPMethod = 15 // PING [https://tools.ietf.org/html/draft-fwmiller-ping-03]
+)
+
+func (sm SIPMethod) String() string {
+ switch sm {
+ default:
+ return "Unknown method"
+ case SIPMethodInvite:
+ return "INVITE"
+ case SIPMethodAck:
+ return "ACK"
+ case SIPMethodBye:
+ return "BYE"
+ case SIPMethodCancel:
+ return "CANCEL"
+ case SIPMethodOptions:
+ return "OPTIONS"
+ case SIPMethodRegister:
+ return "REGISTER"
+ case SIPMethodPrack:
+ return "PRACK"
+ case SIPMethodSubscribe:
+ return "SUBSCRIBE"
+ case SIPMethodNotify:
+ return "NOTIFY"
+ case SIPMethodPublish:
+ return "PUBLISH"
+ case SIPMethodInfo:
+ return "INFO"
+ case SIPMethodRefer:
+ return "REFER"
+ case SIPMethodMessage:
+ return "MESSAGE"
+ case SIPMethodUpdate:
+ return "UPDATE"
+ case SIPMethodPing:
+ return "PING"
+ }
+}
+
+// GetSIPMethod returns the constant of a SIP method
+// from its string
+func GetSIPMethod(method string) (SIPMethod, error) {
+ switch strings.ToUpper(method) {
+ case "INVITE":
+ return SIPMethodInvite, nil
+ case "ACK":
+ return SIPMethodAck, nil
+ case "BYE":
+ return SIPMethodBye, nil
+ case "CANCEL":
+ return SIPMethodCancel, nil
+ case "OPTIONS":
+ return SIPMethodOptions, nil
+ case "REGISTER":
+ return SIPMethodRegister, nil
+ case "PRACK":
+ return SIPMethodPrack, nil
+ case "SUBSCRIBE":
+ return SIPMethodSubscribe, nil
+ case "NOTIFY":
+ return SIPMethodNotify, nil
+ case "PUBLISH":
+ return SIPMethodPublish, nil
+ case "INFO":
+ return SIPMethodInfo, nil
+ case "REFER":
+ return SIPMethodRefer, nil
+ case "MESSAGE":
+ return SIPMethodMessage, nil
+ case "UPDATE":
+ return SIPMethodUpdate, nil
+ case "PING":
+ return SIPMethodPing, nil
+ default:
+ return 0, fmt.Errorf("Unknown SIP method: '%s'", method)
+ }
+}
+
+// Here is a correspondance between long header names and short
+// as defined in rfc3261 in section 20
+var compactSipHeadersCorrespondance = map[string]string{
+ "accept-contact": "a",
+ "allow-events": "u",
+ "call-id": "i",
+ "contact": "m",
+ "content-encoding": "e",
+ "content-length": "l",
+ "content-type": "c",
+ "event": "o",
+ "from": "f",
+ "identity": "y",
+ "refer-to": "r",
+ "referred-by": "b",
+ "reject-contact": "j",
+ "request-disposition": "d",
+ "session-expires": "x",
+ "subject": "s",
+ "supported": "k",
+ "to": "t",
+ "via": "v",
+}
+
+// SIP object will contains information about decoded SIP packet.
+// -> The SIP Version
+// -> The SIP Headers (in a map[string][]string because of multiple headers with the same name
+// -> The SIP Method
+// -> The SIP Response code (if it's a response)
+// -> The SIP Status line (if it's a response)
+// You can easily know the type of the packet with the IsResponse boolean
+//
+type SIP struct {
+ BaseLayer
+
+ // Base information
+ Version SIPVersion
+ Method SIPMethod
+ Headers map[string][]string
+
+ // Request
+ RequestURI string
+
+ // Response
+ IsResponse bool
+ ResponseCode int
+ ResponseStatus string
+
+ // Private fields
+ cseq int64
+ contentLength int64
+ lastHeaderParsed string
+}
+
+// decodeSIP decodes the byte slice into a SIP type. It also
+// setups the application Layer in PacketBuilder.
+func decodeSIP(data []byte, p gopacket.PacketBuilder) error {
+ s := NewSIP()
+ err := s.DecodeFromBytes(data, p)
+ if err != nil {
+ return err
+ }
+ p.AddLayer(s)
+ p.SetApplicationLayer(s)
+ return nil
+}
+
+// NewSIP instantiates a new empty SIP object
+func NewSIP() *SIP {
+ s := new(SIP)
+ s.Headers = make(map[string][]string)
+ return s
+}
+
+// LayerType returns gopacket.LayerTypeSIP.
+func (s *SIP) LayerType() gopacket.LayerType {
+ return LayerTypeSIP
+}
+
+// Payload returns the base layer payload
+func (s *SIP) Payload() []byte {
+ return s.BaseLayer.Payload
+}
+
+// CanDecode returns the set of layer types that this DecodingLayer can decode
+func (s *SIP) CanDecode() gopacket.LayerClass {
+ return LayerTypeSIP
+}
+
+// NextLayerType returns the layer type contained by this DecodingLayer
+func (s *SIP) NextLayerType() gopacket.LayerType {
+ return gopacket.LayerTypePayload
+}
+
+// DecodeFromBytes decodes the slice into the SIP struct.
+func (s *SIP) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+
+ // Init some vars for parsing follow-up
+ var countLines int
+ var line []byte
+ var err error
+
+ // Clean leading new line
+ data = bytes.Trim(data, "\n")
+
+ // Iterate on all lines of the SIP Headers
+ // and stop when we reach the SDP (aka when the new line
+ // is at index 0 of the remaining packet)
+ buffer := bytes.NewBuffer(data)
+
+ for {
+
+ // Read next line
+ line, err = buffer.ReadBytes(byte('\n'))
+ if err != nil {
+ if err == io.EOF {
+ break
+ } else {
+ return err
+ }
+ }
+
+ // Trim the new line delimiters
+ line = bytes.Trim(line, "\r\n")
+
+ // Empty line, we hit Body
+ // Putting packet remain in Paypload
+ if len(line) == 0 {
+ s.BaseLayer.Payload = buffer.Bytes()
+ break
+ }
+
+ // First line is the SIP request/response line
+ // Other lines are headers
+ if countLines == 0 {
+ err = s.ParseFirstLine(line)
+ if err != nil {
+ return err
+ }
+
+ } else {
+ err = s.ParseHeader(line)
+ if err != nil {
+ return err
+ }
+ }
+
+ countLines++
+ }
+
+ return nil
+}
+
+// ParseFirstLine will compute the first line of a SIP packet.
+// The first line will tell us if it's a request or a response.
+//
+// Examples of first line of SIP Prococol :
+//
+// Request : INVITE bob@example.com SIP/2.0
+// Response : SIP/2.0 200 OK
+// Response : SIP/2.0 501 Not Implemented
+//
+func (s *SIP) ParseFirstLine(firstLine []byte) error {
+
+ var err error
+
+ // Splits line by space
+ splits := strings.SplitN(string(firstLine), " ", 3)
+
+ // We must have at least 3 parts
+ if len(splits) < 3 {
+ return fmt.Errorf("invalid first SIP line: '%s'", string(firstLine))
+ }
+
+ // Determine the SIP packet type
+ if strings.HasPrefix(splits[0], "SIP") {
+
+ // --> Response
+ s.IsResponse = true
+
+ // Validate SIP Version
+ s.Version, err = GetSIPVersion(splits[0])
+ if err != nil {
+ return err
+ }
+
+ // Compute code
+ s.ResponseCode, err = strconv.Atoi(splits[1])
+ if err != nil {
+ return err
+ }
+
+ // Compute status line
+ s.ResponseStatus = splits[2]
+
+ } else {
+
+ // --> Request
+
+ // Validate method
+ s.Method, err = GetSIPMethod(splits[0])
+ if err != nil {
+ return err
+ }
+
+ s.RequestURI = splits[1]
+
+ // Validate SIP Version
+ s.Version, err = GetSIPVersion(splits[2])
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// ParseHeader will parse a SIP Header
+// SIP Headers are quite simple, there are colon separated name and value
+// Headers can be spread over multiple lines
+//
+// Examples of header :
+//
+// CSeq: 1 REGISTER
+// Via: SIP/2.0/UDP there.com:5060
+// Authorization:Digest username="UserB",
+// realm="MCI WorldCom SIP",
+// nonce="1cec4341ae6cbe5a359ea9c8e88df84f", opaque="",
+// uri="sip:ss2.wcom.com", response="71ba27c64bd01de719686aa4590d5824"
+//
+func (s *SIP) ParseHeader(header []byte) (err error) {
+
+ // Ignore empty headers
+ if len(header) == 0 {
+ return
+ }
+
+ // Check if this is the following of last header
+ // RFC 3261 - 7.3.1 - Header Field Format specify that following lines of
+ // multiline headers must begin by SP or TAB
+ if header[0] == '\t' || header[0] == ' ' {
+
+ header = bytes.TrimSpace(header)
+ s.Headers[s.lastHeaderParsed][len(s.Headers[s.lastHeaderParsed])-1] += fmt.Sprintf(" %s", string(header))
+ return
+ }
+
+ // Find the ':' to separate header name and value
+ index := bytes.Index(header, []byte(":"))
+ if index >= 0 {
+
+ headerName := strings.ToLower(string(bytes.Trim(header[:index], " ")))
+ headerValue := string(bytes.Trim(header[index+1:], " "))
+
+ // Add header to object
+ s.Headers[headerName] = append(s.Headers[headerName], headerValue)
+ s.lastHeaderParsed = headerName
+
+ // Compute specific headers
+ err = s.ParseSpecificHeaders(headerName, headerValue)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// ParseSpecificHeaders will parse some specific key values from
+// specific headers like CSeq or Content-Length integer values
+func (s *SIP) ParseSpecificHeaders(headerName string, headerValue string) (err error) {
+
+ switch headerName {
+ case "cseq":
+
+ // CSeq header value is formatted like that :
+ // CSeq: 123 INVITE
+ // We split the value to parse Cseq integer value, and method
+ splits := strings.Split(headerValue, " ")
+ if len(splits) > 1 {
+
+ // Parse Cseq
+ s.cseq, err = strconv.ParseInt(splits[0], 10, 64)
+ if err != nil {
+ return err
+ }
+
+ // Validate method
+ if s.IsResponse {
+ s.Method, err = GetSIPMethod(splits[1])
+ if err != nil {
+ return err
+ }
+ }
+ }
+
+ case "content-length":
+
+ // Parse Content-Length
+ s.contentLength, err = strconv.ParseInt(headerValue, 10, 64)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// GetAllHeaders will return the full headers of the
+// current SIP packets in a map[string][]string
+func (s *SIP) GetAllHeaders() map[string][]string {
+ return s.Headers
+}
+
+// GetHeader will return all the headers with
+// the specified name.
+func (s *SIP) GetHeader(headerName string) []string {
+ headerName = strings.ToLower(headerName)
+ h := make([]string, 0)
+ if _, ok := s.Headers[headerName]; ok {
+ if len(s.Headers[headerName]) > 0 {
+ return s.Headers[headerName]
+ } else if len(s.Headers[compactSipHeadersCorrespondance[headerName]]) > 0 {
+ return s.Headers[compactSipHeadersCorrespondance[headerName]]
+ }
+ }
+ return h
+}
+
+// GetFirstHeader will return the first header with
+// the specified name. If the current SIP packet has multiple
+// headers with the same name, it returns the first.
+func (s *SIP) GetFirstHeader(headerName string) string {
+ headerName = strings.ToLower(headerName)
+ if _, ok := s.Headers[headerName]; ok {
+ if len(s.Headers[headerName]) > 0 {
+ return s.Headers[headerName][0]
+ } else if len(s.Headers[compactSipHeadersCorrespondance[headerName]]) > 0 {
+ return s.Headers[compactSipHeadersCorrespondance[headerName]][0]
+ }
+ }
+ return ""
+}
+
+//
+// Some handy getters for most used SIP headers
+//
+
+// GetAuthorization will return the Authorization
+// header of the current SIP packet
+func (s *SIP) GetAuthorization() string {
+ return s.GetFirstHeader("Authorization")
+}
+
+// GetFrom will return the From
+// header of the current SIP packet
+func (s *SIP) GetFrom() string {
+ return s.GetFirstHeader("From")
+}
+
+// GetTo will return the To
+// header of the current SIP packet
+func (s *SIP) GetTo() string {
+ return s.GetFirstHeader("To")
+}
+
+// GetContact will return the Contact
+// header of the current SIP packet
+func (s *SIP) GetContact() string {
+ return s.GetFirstHeader("Contact")
+}
+
+// GetCallID will return the Call-ID
+// header of the current SIP packet
+func (s *SIP) GetCallID() string {
+ return s.GetFirstHeader("Call-ID")
+}
+
+// GetUserAgent will return the User-Agent
+// header of the current SIP packet
+func (s *SIP) GetUserAgent() string {
+ return s.GetFirstHeader("User-Agent")
+}
+
+// GetContentLength will return the parsed integer
+// Content-Length header of the current SIP packet
+func (s *SIP) GetContentLength() int64 {
+ return s.contentLength
+}
+
+// GetCSeq will return the parsed integer CSeq header
+// header of the current SIP packet
+func (s *SIP) GetCSeq() int64 {
+ return s.cseq
+}
diff --git a/vendor/github.com/google/gopacket/layers/stp.go b/vendor/github.com/google/gopacket/layers/stp.go
new file mode 100644
index 0000000..bde7d7c
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/stp.go
@@ -0,0 +1,27 @@
+// Copyright 2017 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+ "github.com/google/gopacket"
+)
+
+// STP decode spanning tree protocol packets to transport BPDU (bridge protocol data unit) message.
+type STP struct {
+ BaseLayer
+}
+
+// LayerType returns gopacket.LayerTypeSTP.
+func (s *STP) LayerType() gopacket.LayerType { return LayerTypeSTP }
+
+func decodeSTP(data []byte, p gopacket.PacketBuilder) error {
+ stp := &STP{}
+ stp.Contents = data[:]
+ // TODO: parse the STP protocol into actual subfields.
+ p.AddLayer(stp)
+ return nil
+}
diff --git a/vendor/github.com/google/gopacket/layers/tcp.go b/vendor/github.com/google/gopacket/layers/tcp.go
new file mode 100644
index 0000000..6b37f56
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/tcp.go
@@ -0,0 +1,337 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+// Copyright 2009-2011 Andreas Krennmair. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+ "encoding/binary"
+ "encoding/hex"
+ "errors"
+ "fmt"
+
+ "github.com/google/gopacket"
+)
+
+// TCP is the layer for TCP headers.
+type TCP struct {
+ BaseLayer
+ SrcPort, DstPort TCPPort
+ Seq uint32
+ Ack uint32
+ DataOffset uint8
+ FIN, SYN, RST, PSH, ACK, URG, ECE, CWR, NS bool
+ Window uint16
+ Checksum uint16
+ Urgent uint16
+ sPort, dPort []byte
+ Options []TCPOption
+ Padding []byte
+ opts [4]TCPOption
+ tcpipchecksum
+}
+
+// TCPOptionKind represents a TCP option code.
+type TCPOptionKind uint8
+
+const (
+ TCPOptionKindEndList = 0
+ TCPOptionKindNop = 1
+ TCPOptionKindMSS = 2 // len = 4
+ TCPOptionKindWindowScale = 3 // len = 3
+ TCPOptionKindSACKPermitted = 4 // len = 2
+ TCPOptionKindSACK = 5 // len = n
+ TCPOptionKindEcho = 6 // len = 6, obsolete
+ TCPOptionKindEchoReply = 7 // len = 6, obsolete
+ TCPOptionKindTimestamps = 8 // len = 10
+ TCPOptionKindPartialOrderConnectionPermitted = 9 // len = 2, obsolete
+ TCPOptionKindPartialOrderServiceProfile = 10 // len = 3, obsolete
+ TCPOptionKindCC = 11 // obsolete
+ TCPOptionKindCCNew = 12 // obsolete
+ TCPOptionKindCCEcho = 13 // obsolete
+ TCPOptionKindAltChecksum = 14 // len = 3, obsolete
+ TCPOptionKindAltChecksumData = 15 // len = n, obsolete
+)
+
+func (k TCPOptionKind) String() string {
+ switch k {
+ case TCPOptionKindEndList:
+ return "EndList"
+ case TCPOptionKindNop:
+ return "NOP"
+ case TCPOptionKindMSS:
+ return "MSS"
+ case TCPOptionKindWindowScale:
+ return "WindowScale"
+ case TCPOptionKindSACKPermitted:
+ return "SACKPermitted"
+ case TCPOptionKindSACK:
+ return "SACK"
+ case TCPOptionKindEcho:
+ return "Echo"
+ case TCPOptionKindEchoReply:
+ return "EchoReply"
+ case TCPOptionKindTimestamps:
+ return "Timestamps"
+ case TCPOptionKindPartialOrderConnectionPermitted:
+ return "PartialOrderConnectionPermitted"
+ case TCPOptionKindPartialOrderServiceProfile:
+ return "PartialOrderServiceProfile"
+ case TCPOptionKindCC:
+ return "CC"
+ case TCPOptionKindCCNew:
+ return "CCNew"
+ case TCPOptionKindCCEcho:
+ return "CCEcho"
+ case TCPOptionKindAltChecksum:
+ return "AltChecksum"
+ case TCPOptionKindAltChecksumData:
+ return "AltChecksumData"
+ default:
+ return fmt.Sprintf("Unknown(%d)", k)
+ }
+}
+
+type TCPOption struct {
+ OptionType TCPOptionKind
+ OptionLength uint8
+ OptionData []byte
+}
+
+func (t TCPOption) String() string {
+ hd := hex.EncodeToString(t.OptionData)
+ if len(hd) > 0 {
+ hd = " 0x" + hd
+ }
+ switch t.OptionType {
+ case TCPOptionKindMSS:
+ return fmt.Sprintf("TCPOption(%s:%v%s)",
+ t.OptionType,
+ binary.BigEndian.Uint16(t.OptionData),
+ hd)
+
+ case TCPOptionKindTimestamps:
+ if len(t.OptionData) == 8 {
+ return fmt.Sprintf("TCPOption(%s:%v/%v%s)",
+ t.OptionType,
+ binary.BigEndian.Uint32(t.OptionData[:4]),
+ binary.BigEndian.Uint32(t.OptionData[4:8]),
+ hd)
+ }
+ }
+ return fmt.Sprintf("TCPOption(%s:%s)", t.OptionType, hd)
+}
+
+// LayerType returns gopacket.LayerTypeTCP
+func (t *TCP) LayerType() gopacket.LayerType { return LayerTypeTCP }
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (t *TCP) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ var optionLength int
+ for _, o := range t.Options {
+ switch o.OptionType {
+ case 0, 1:
+ optionLength += 1
+ default:
+ optionLength += 2 + len(o.OptionData)
+ }
+ }
+ if opts.FixLengths {
+ if rem := optionLength % 4; rem != 0 {
+ t.Padding = lotsOfZeros[:4-rem]
+ }
+ t.DataOffset = uint8((len(t.Padding) + optionLength + 20) / 4)
+ }
+ bytes, err := b.PrependBytes(20 + optionLength + len(t.Padding))
+ if err != nil {
+ return err
+ }
+ binary.BigEndian.PutUint16(bytes, uint16(t.SrcPort))
+ binary.BigEndian.PutUint16(bytes[2:], uint16(t.DstPort))
+ binary.BigEndian.PutUint32(bytes[4:], t.Seq)
+ binary.BigEndian.PutUint32(bytes[8:], t.Ack)
+ binary.BigEndian.PutUint16(bytes[12:], t.flagsAndOffset())
+ binary.BigEndian.PutUint16(bytes[14:], t.Window)
+ binary.BigEndian.PutUint16(bytes[18:], t.Urgent)
+ start := 20
+ for _, o := range t.Options {
+ bytes[start] = byte(o.OptionType)
+ switch o.OptionType {
+ case 0, 1:
+ start++
+ default:
+ if opts.FixLengths {
+ o.OptionLength = uint8(len(o.OptionData) + 2)
+ }
+ bytes[start+1] = o.OptionLength
+ copy(bytes[start+2:start+len(o.OptionData)+2], o.OptionData)
+ start += len(o.OptionData) + 2
+ }
+ }
+ copy(bytes[start:], t.Padding)
+ if opts.ComputeChecksums {
+ // zero out checksum bytes in current serialization.
+ bytes[16] = 0
+ bytes[17] = 0
+ csum, err := t.computeChecksum(b.Bytes(), IPProtocolTCP)
+ if err != nil {
+ return err
+ }
+ t.Checksum = csum
+ }
+ binary.BigEndian.PutUint16(bytes[16:], t.Checksum)
+ return nil
+}
+
+func (t *TCP) ComputeChecksum() (uint16, error) {
+ return t.computeChecksum(append(t.Contents, t.Payload...), IPProtocolTCP)
+}
+
+func (t *TCP) flagsAndOffset() uint16 {
+ f := uint16(t.DataOffset) << 12
+ if t.FIN {
+ f |= 0x0001
+ }
+ if t.SYN {
+ f |= 0x0002
+ }
+ if t.RST {
+ f |= 0x0004
+ }
+ if t.PSH {
+ f |= 0x0008
+ }
+ if t.ACK {
+ f |= 0x0010
+ }
+ if t.URG {
+ f |= 0x0020
+ }
+ if t.ECE {
+ f |= 0x0040
+ }
+ if t.CWR {
+ f |= 0x0080
+ }
+ if t.NS {
+ f |= 0x0100
+ }
+ return f
+}
+
+func (tcp *TCP) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ if len(data) < 20 {
+ df.SetTruncated()
+ return fmt.Errorf("Invalid TCP header. Length %d less than 20", len(data))
+ }
+ tcp.SrcPort = TCPPort(binary.BigEndian.Uint16(data[0:2]))
+ tcp.sPort = data[0:2]
+ tcp.DstPort = TCPPort(binary.BigEndian.Uint16(data[2:4]))
+ tcp.dPort = data[2:4]
+ tcp.Seq = binary.BigEndian.Uint32(data[4:8])
+ tcp.Ack = binary.BigEndian.Uint32(data[8:12])
+ tcp.DataOffset = data[12] >> 4
+ tcp.FIN = data[13]&0x01 != 0
+ tcp.SYN = data[13]&0x02 != 0
+ tcp.RST = data[13]&0x04 != 0
+ tcp.PSH = data[13]&0x08 != 0
+ tcp.ACK = data[13]&0x10 != 0
+ tcp.URG = data[13]&0x20 != 0
+ tcp.ECE = data[13]&0x40 != 0
+ tcp.CWR = data[13]&0x80 != 0
+ tcp.NS = data[12]&0x01 != 0
+ tcp.Window = binary.BigEndian.Uint16(data[14:16])
+ tcp.Checksum = binary.BigEndian.Uint16(data[16:18])
+ tcp.Urgent = binary.BigEndian.Uint16(data[18:20])
+ if tcp.Options == nil {
+ // Pre-allocate to avoid allocating a slice.
+ tcp.Options = tcp.opts[:0]
+ } else {
+ tcp.Options = tcp.Options[:0]
+ }
+ if tcp.DataOffset < 5 {
+ return fmt.Errorf("Invalid TCP data offset %d < 5", tcp.DataOffset)
+ }
+ dataStart := int(tcp.DataOffset) * 4
+ if dataStart > len(data) {
+ df.SetTruncated()
+ tcp.Payload = nil
+ tcp.Contents = data
+ return errors.New("TCP data offset greater than packet length")
+ }
+ tcp.Contents = data[:dataStart]
+ tcp.Payload = data[dataStart:]
+ // From here on, data points just to the header options.
+ data = data[20:dataStart]
+ for len(data) > 0 {
+ tcp.Options = append(tcp.Options, TCPOption{OptionType: TCPOptionKind(data[0])})
+ opt := &tcp.Options[len(tcp.Options)-1]
+ switch opt.OptionType {
+ case TCPOptionKindEndList: // End of options
+ opt.OptionLength = 1
+ tcp.Padding = data[1:]
+ break
+ case TCPOptionKindNop: // 1 byte padding
+ opt.OptionLength = 1
+ default:
+ if len(data) < 2 {
+ df.SetTruncated()
+ return fmt.Errorf("Invalid TCP option length. Length %d less than 2", len(data))
+ }
+ opt.OptionLength = data[1]
+ if opt.OptionLength < 2 {
+ return fmt.Errorf("Invalid TCP option length %d < 2", opt.OptionLength)
+ } else if int(opt.OptionLength) > len(data) {
+ df.SetTruncated()
+ return fmt.Errorf("Invalid TCP option length %d exceeds remaining %d bytes", opt.OptionLength, len(data))
+ }
+ opt.OptionData = data[2:opt.OptionLength]
+ }
+ data = data[opt.OptionLength:]
+ }
+ return nil
+}
+
+func (t *TCP) CanDecode() gopacket.LayerClass {
+ return LayerTypeTCP
+}
+
+func (t *TCP) NextLayerType() gopacket.LayerType {
+ lt := t.DstPort.LayerType()
+ if lt == gopacket.LayerTypePayload {
+ lt = t.SrcPort.LayerType()
+ }
+ return lt
+}
+
+func decodeTCP(data []byte, p gopacket.PacketBuilder) error {
+ tcp := &TCP{}
+ err := tcp.DecodeFromBytes(data, p)
+ p.AddLayer(tcp)
+ p.SetTransportLayer(tcp)
+ if err != nil {
+ return err
+ }
+ if p.DecodeOptions().DecodeStreamsAsDatagrams {
+ return p.NextDecoder(tcp.NextLayerType())
+ } else {
+ return p.NextDecoder(gopacket.LayerTypePayload)
+ }
+}
+
+func (t *TCP) TransportFlow() gopacket.Flow {
+ return gopacket.NewFlow(EndpointTCPPort, t.sPort, t.dPort)
+}
+
+// For testing only
+func (t *TCP) SetInternalPortsForTesting() {
+ t.sPort = make([]byte, 2)
+ t.dPort = make([]byte, 2)
+ binary.BigEndian.PutUint16(t.sPort, uint16(t.SrcPort))
+ binary.BigEndian.PutUint16(t.dPort, uint16(t.DstPort))
+}
diff --git a/vendor/github.com/google/gopacket/layers/tcpip.go b/vendor/github.com/google/gopacket/layers/tcpip.go
new file mode 100644
index 0000000..64ba51c
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/tcpip.go
@@ -0,0 +1,104 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+// Copyright 2009-2011 Andreas Krennmair. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+ "errors"
+ "fmt"
+
+ "github.com/google/gopacket"
+)
+
+// Checksum computation for TCP/UDP.
+type tcpipchecksum struct {
+ pseudoheader tcpipPseudoHeader
+}
+
+type tcpipPseudoHeader interface {
+ pseudoheaderChecksum() (uint32, error)
+}
+
+func (ip *IPv4) pseudoheaderChecksum() (csum uint32, err error) {
+ if err := ip.AddressTo4(); err != nil {
+ return 0, err
+ }
+ csum += (uint32(ip.SrcIP[0]) + uint32(ip.SrcIP[2])) << 8
+ csum += uint32(ip.SrcIP[1]) + uint32(ip.SrcIP[3])
+ csum += (uint32(ip.DstIP[0]) + uint32(ip.DstIP[2])) << 8
+ csum += uint32(ip.DstIP[1]) + uint32(ip.DstIP[3])
+ return csum, nil
+}
+
+func (ip *IPv6) pseudoheaderChecksum() (csum uint32, err error) {
+ if err := ip.AddressTo16(); err != nil {
+ return 0, err
+ }
+ for i := 0; i < 16; i += 2 {
+ csum += uint32(ip.SrcIP[i]) << 8
+ csum += uint32(ip.SrcIP[i+1])
+ csum += uint32(ip.DstIP[i]) << 8
+ csum += uint32(ip.DstIP[i+1])
+ }
+ return csum, nil
+}
+
+// Calculate the TCP/IP checksum defined in rfc1071. The passed-in csum is any
+// initial checksum data that's already been computed.
+func tcpipChecksum(data []byte, csum uint32) uint16 {
+ // to handle odd lengths, we loop to length - 1, incrementing by 2, then
+ // handle the last byte specifically by checking against the original
+ // length.
+ length := len(data) - 1
+ for i := 0; i < length; i += 2 {
+ // For our test packet, doing this manually is about 25% faster
+ // (740 ns vs. 1000ns) than doing it by calling binary.BigEndian.Uint16.
+ csum += uint32(data[i]) << 8
+ csum += uint32(data[i+1])
+ }
+ if len(data)%2 == 1 {
+ csum += uint32(data[length]) << 8
+ }
+ for csum > 0xffff {
+ csum = (csum >> 16) + (csum & 0xffff)
+ }
+ return ^uint16(csum)
+}
+
+// computeChecksum computes a TCP or UDP checksum. headerAndPayload is the
+// serialized TCP or UDP header plus its payload, with the checksum zero'd
+// out. headerProtocol is the IP protocol number of the upper-layer header.
+func (c *tcpipchecksum) computeChecksum(headerAndPayload []byte, headerProtocol IPProtocol) (uint16, error) {
+ if c.pseudoheader == nil {
+ return 0, errors.New("TCP/IP layer 4 checksum cannot be computed without network layer... call SetNetworkLayerForChecksum to set which layer to use")
+ }
+ length := uint32(len(headerAndPayload))
+ csum, err := c.pseudoheader.pseudoheaderChecksum()
+ if err != nil {
+ return 0, err
+ }
+ csum += uint32(headerProtocol)
+ csum += length & 0xffff
+ csum += length >> 16
+ return tcpipChecksum(headerAndPayload, csum), nil
+}
+
+// SetNetworkLayerForChecksum tells this layer which network layer is wrapping it.
+// This is needed for computing the checksum when serializing, since TCP/IP transport
+// layer checksums depends on fields in the IPv4 or IPv6 layer that contains it.
+// The passed in layer must be an *IPv4 or *IPv6.
+func (i *tcpipchecksum) SetNetworkLayerForChecksum(l gopacket.NetworkLayer) error {
+ switch v := l.(type) {
+ case *IPv4:
+ i.pseudoheader = v
+ case *IPv6:
+ i.pseudoheader = v
+ default:
+ return fmt.Errorf("cannot use layer type %v for tcp checksum network layer", l.LayerType())
+ }
+ return nil
+}
diff --git a/vendor/github.com/google/gopacket/layers/test_creator.py b/vendor/github.com/google/gopacket/layers/test_creator.py
new file mode 100755
index 0000000..c92d276
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/test_creator.py
@@ -0,0 +1,103 @@
+#!/usr/bin/python
+# Copyright 2012 Google, Inc. All rights reserved.
+
+"""TestCreator creates test templates from pcap files."""
+
+import argparse
+import base64
+import glob
+import re
+import string
+import subprocess
+import sys
+
+
+class Packet(object):
+ """Helper class encapsulating packet from a pcap file."""
+
+ def __init__(self, packet_lines):
+ self.packet_lines = packet_lines
+ self.data = self._DecodeText(packet_lines)
+
+ @classmethod
+ def _DecodeText(cls, packet_lines):
+ packet_bytes = []
+ # First line is timestamp and stuff, skip it.
+ # Format: 0x0010: 0000 0020 3aff 3ffe 0000 0000 0000 0000 ....:.?.........
+
+ for line in packet_lines[1:]:
+ m = re.match(r'\s+0x[a-f\d]+:\s+((?:[\da-f]{2,4}\s)*)', line, re.IGNORECASE)
+ if m is None: continue
+ for hexpart in m.group(1).split():
+ packet_bytes.append(base64.b16decode(hexpart.upper()))
+ return ''.join(packet_bytes)
+
+ def Test(self, name, link_type):
+ """Yields a test using this packet, as a set of lines."""
+ yield '// testPacket%s is the packet:' % name
+ for line in self.packet_lines:
+ yield '// ' + line
+ yield 'var testPacket%s = []byte{' % name
+ data = list(self.data)
+ while data:
+ linebytes, data = data[:16], data[16:]
+ yield ''.join(['\t'] + ['0x%02x, ' % ord(c) for c in linebytes])
+ yield '}'
+ yield 'func TestPacket%s(t *testing.T) {' % name
+ yield '\tp := gopacket.NewPacket(testPacket%s, LinkType%s, gopacket.Default)' % (name, link_type)
+ yield '\tif p.ErrorLayer() != nil {'
+ yield '\t\tt.Error("Failed to decode packet:", p.ErrorLayer().Error())'
+ yield '\t}'
+ yield '\tcheckLayers(p, []gopacket.LayerType{LayerType%s, FILL_ME_IN_WITH_ACTUAL_LAYERS}, t)' % link_type
+ yield '}'
+ yield 'func BenchmarkDecodePacket%s(b *testing.B) {' % name
+ yield '\tfor i := 0; i < b.N; i++ {'
+ yield '\t\tgopacket.NewPacket(testPacket%s, LinkType%s, gopacket.NoCopy)' % (name, link_type)
+ yield '\t}'
+ yield '}'
+
+
+
+def GetTcpdumpOutput(filename):
+ """Runs tcpdump on the given file, returning output as string."""
+ return subprocess.check_output(
+ ['tcpdump', '-XX', '-s', '0', '-n', '-r', filename])
+
+
+def TcpdumpOutputToPackets(output):
+ """Reads a pcap file with TCPDump, yielding Packet objects."""
+ pdata = []
+ for line in output.splitlines():
+ if line[0] not in string.whitespace and pdata:
+ yield Packet(pdata)
+ pdata = []
+ pdata.append(line)
+ if pdata:
+ yield Packet(pdata)
+
+
+def main():
+ class CustomHelpFormatter(argparse.ArgumentDefaultsHelpFormatter):
+ def _format_usage(self, usage, actions, groups, prefix=None):
+ header =('TestCreator creates gopacket tests using a pcap file.\n\n'
+ 'Tests are written to standard out... they can then be \n'
+ 'copied into the file of your choice and modified as \n'
+ 'you see.\n\n')
+ return header + argparse.ArgumentDefaultsHelpFormatter._format_usage(
+ self, usage, actions, groups, prefix)
+
+ parser = argparse.ArgumentParser(formatter_class=CustomHelpFormatter)
+ parser.add_argument('--link_type', default='Ethernet', help='the link type (default: %(default)s)')
+ parser.add_argument('--name', default='Packet%d', help='the layer type, must have "%d" inside it')
+ parser.add_argument('files', metavar='file.pcap', type=str, nargs='+', help='the files to process')
+
+ args = parser.parse_args()
+
+ for arg in args.files:
+ for path in glob.glob(arg):
+ for i, packet in enumerate(TcpdumpOutputToPackets(GetTcpdumpOutput(path))):
+ print '\n'.join(packet.Test(
+ args.name % i, args.link_type))
+
+if __name__ == '__main__':
+ main()
diff --git a/vendor/github.com/google/gopacket/layers/tls.go b/vendor/github.com/google/gopacket/layers/tls.go
new file mode 100644
index 0000000..ddb6ff9
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/tls.go
@@ -0,0 +1,208 @@
+// Copyright 2018 The GoPacket Authors. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+ "encoding/binary"
+ "errors"
+
+ "github.com/google/gopacket"
+)
+
+// TLSType defines the type of data after the TLS Record
+type TLSType uint8
+
+// TLSType known values.
+const (
+ TLSChangeCipherSpec TLSType = 20
+ TLSAlert TLSType = 21
+ TLSHandshake TLSType = 22
+ TLSApplicationData TLSType = 23
+ TLSUnknown TLSType = 255
+)
+
+// String shows the register type nicely formatted
+func (tt TLSType) String() string {
+ switch tt {
+ default:
+ return "Unknown"
+ case TLSChangeCipherSpec:
+ return "Change Cipher Spec"
+ case TLSAlert:
+ return "Alert"
+ case TLSHandshake:
+ return "Handshake"
+ case TLSApplicationData:
+ return "Application Data"
+ }
+}
+
+// TLSVersion represents the TLS version in numeric format
+type TLSVersion uint16
+
+// Strings shows the TLS version nicely formatted
+func (tv TLSVersion) String() string {
+ switch tv {
+ default:
+ return "Unknown"
+ case 0x0200:
+ return "SSL 2.0"
+ case 0x0300:
+ return "SSL 3.0"
+ case 0x0301:
+ return "TLS 1.0"
+ case 0x0302:
+ return "TLS 1.1"
+ case 0x0303:
+ return "TLS 1.2"
+ case 0x0304:
+ return "TLS 1.3"
+ }
+}
+
+// TLS is specified in RFC 5246
+//
+// TLS Record Protocol
+// 0 1 2 3 4 5 6 7 8
+// +--+--+--+--+--+--+--+--+
+// | Content Type |
+// +--+--+--+--+--+--+--+--+
+// | Version (major) |
+// +--+--+--+--+--+--+--+--+
+// | Version (minor) |
+// +--+--+--+--+--+--+--+--+
+// | Length |
+// +--+--+--+--+--+--+--+--+
+// | Length |
+// +--+--+--+--+--+--+--+--+
+
+// TLS is actually a slide of TLSrecord structures
+type TLS struct {
+ BaseLayer
+
+ // TLS Records
+ ChangeCipherSpec []TLSChangeCipherSpecRecord
+ Handshake []TLSHandshakeRecord
+ AppData []TLSAppDataRecord
+ Alert []TLSAlertRecord
+}
+
+// TLSRecordHeader contains all the information that each TLS Record types should have
+type TLSRecordHeader struct {
+ ContentType TLSType
+ Version TLSVersion
+ Length uint16
+}
+
+// LayerType returns gopacket.LayerTypeTLS.
+func (t *TLS) LayerType() gopacket.LayerType { return LayerTypeTLS }
+
+// decodeTLS decodes the byte slice into a TLS type. It also
+// setups the application Layer in PacketBuilder.
+func decodeTLS(data []byte, p gopacket.PacketBuilder) error {
+ t := &TLS{}
+ err := t.DecodeFromBytes(data, p)
+ if err != nil {
+ return err
+ }
+ p.AddLayer(t)
+ p.SetApplicationLayer(t)
+ return nil
+}
+
+// DecodeFromBytes decodes the slice into the TLS struct.
+func (t *TLS) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ t.BaseLayer.Contents = data
+ t.BaseLayer.Payload = nil
+
+ t.ChangeCipherSpec = t.ChangeCipherSpec[:0]
+ t.Handshake = t.Handshake[:0]
+ t.AppData = t.AppData[:0]
+ t.Alert = t.Alert[:0]
+
+ return t.decodeTLSRecords(data, df)
+}
+
+func (t *TLS) decodeTLSRecords(data []byte, df gopacket.DecodeFeedback) error {
+ if len(data) < 5 {
+ df.SetTruncated()
+ return errors.New("TLS record too short")
+ }
+
+ // since there are no further layers, the baselayer's content is
+ // pointing to this layer
+ t.BaseLayer = BaseLayer{Contents: data[:len(data)]}
+
+ var h TLSRecordHeader
+ h.ContentType = TLSType(data[0])
+ h.Version = TLSVersion(binary.BigEndian.Uint16(data[1:3]))
+ h.Length = binary.BigEndian.Uint16(data[3:5])
+
+ if h.ContentType.String() == "Unknown" {
+ return errors.New("Unknown TLS record type")
+ }
+
+ hl := 5 // header length
+ tl := hl + int(h.Length)
+ if len(data) < tl {
+ df.SetTruncated()
+ return errors.New("TLS packet length mismatch")
+ }
+
+ switch h.ContentType {
+ default:
+ return errors.New("Unknown TLS record type")
+ case TLSChangeCipherSpec:
+ var r TLSChangeCipherSpecRecord
+ e := r.decodeFromBytes(h, data[hl:tl], df)
+ if e != nil {
+ return e
+ }
+ t.ChangeCipherSpec = append(t.ChangeCipherSpec, r)
+ case TLSAlert:
+ var r TLSAlertRecord
+ e := r.decodeFromBytes(h, data[hl:tl], df)
+ if e != nil {
+ return e
+ }
+ t.Alert = append(t.Alert, r)
+ case TLSHandshake:
+ var r TLSHandshakeRecord
+ e := r.decodeFromBytes(h, data[hl:tl], df)
+ if e != nil {
+ return e
+ }
+ t.Handshake = append(t.Handshake, r)
+ case TLSApplicationData:
+ var r TLSAppDataRecord
+ e := r.decodeFromBytes(h, data[hl:tl], df)
+ if e != nil {
+ return e
+ }
+ t.AppData = append(t.AppData, r)
+ }
+
+ if len(data) == tl {
+ return nil
+ }
+ return t.decodeTLSRecords(data[tl:len(data)], df)
+}
+
+// CanDecode implements gopacket.DecodingLayer.
+func (t *TLS) CanDecode() gopacket.LayerClass {
+ return LayerTypeTLS
+}
+
+// NextLayerType implements gopacket.DecodingLayer.
+func (t *TLS) NextLayerType() gopacket.LayerType {
+ return gopacket.LayerTypeZero
+}
+
+// Payload returns nil, since TLS encrypted payload is inside TLSAppDataRecord
+func (t *TLS) Payload() []byte {
+ return nil
+}
diff --git a/vendor/github.com/google/gopacket/layers/tls_alert.go b/vendor/github.com/google/gopacket/layers/tls_alert.go
new file mode 100644
index 0000000..0c5aee0
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/tls_alert.go
@@ -0,0 +1,165 @@
+// Copyright 2018 The GoPacket Authors. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+ "errors"
+ "fmt"
+
+ "github.com/google/gopacket"
+)
+
+// TLSAlertLevel defines the alert level data type
+type TLSAlertLevel uint8
+
+// TLSAlertDescr defines the alert descrption data type
+type TLSAlertDescr uint8
+
+const (
+ TLSAlertWarning TLSAlertLevel = 1
+ TLSAlertFatal TLSAlertLevel = 2
+ TLSAlertUnknownLevel TLSAlertLevel = 255
+
+ TLSAlertCloseNotify TLSAlertDescr = 0
+ TLSAlertUnexpectedMessage TLSAlertDescr = 10
+ TLSAlertBadRecordMac TLSAlertDescr = 20
+ TLSAlertDecryptionFailedRESERVED TLSAlertDescr = 21
+ TLSAlertRecordOverflow TLSAlertDescr = 22
+ TLSAlertDecompressionFailure TLSAlertDescr = 30
+ TLSAlertHandshakeFailure TLSAlertDescr = 40
+ TLSAlertNoCertificateRESERVED TLSAlertDescr = 41
+ TLSAlertBadCertificate TLSAlertDescr = 42
+ TLSAlertUnsupportedCertificate TLSAlertDescr = 43
+ TLSAlertCertificateRevoked TLSAlertDescr = 44
+ TLSAlertCertificateExpired TLSAlertDescr = 45
+ TLSAlertCertificateUnknown TLSAlertDescr = 46
+ TLSAlertIllegalParameter TLSAlertDescr = 47
+ TLSAlertUnknownCa TLSAlertDescr = 48
+ TLSAlertAccessDenied TLSAlertDescr = 49
+ TLSAlertDecodeError TLSAlertDescr = 50
+ TLSAlertDecryptError TLSAlertDescr = 51
+ TLSAlertExportRestrictionRESERVED TLSAlertDescr = 60
+ TLSAlertProtocolVersion TLSAlertDescr = 70
+ TLSAlertInsufficientSecurity TLSAlertDescr = 71
+ TLSAlertInternalError TLSAlertDescr = 80
+ TLSAlertUserCanceled TLSAlertDescr = 90
+ TLSAlertNoRenegotiation TLSAlertDescr = 100
+ TLSAlertUnsupportedExtension TLSAlertDescr = 110
+ TLSAlertUnknownDescription TLSAlertDescr = 255
+)
+
+// TLS Alert
+// 0 1 2 3 4 5 6 7 8
+// +--+--+--+--+--+--+--+--+
+// | Level |
+// +--+--+--+--+--+--+--+--+
+// | Description |
+// +--+--+--+--+--+--+--+--+
+
+// TLSAlertRecord contains all the information that each Alert Record type should have
+type TLSAlertRecord struct {
+ TLSRecordHeader
+
+ Level TLSAlertLevel
+ Description TLSAlertDescr
+
+ EncryptedMsg []byte
+}
+
+// DecodeFromBytes decodes the slice into the TLS struct.
+func (t *TLSAlertRecord) decodeFromBytes(h TLSRecordHeader, data []byte, df gopacket.DecodeFeedback) error {
+ // TLS Record Header
+ t.ContentType = h.ContentType
+ t.Version = h.Version
+ t.Length = h.Length
+
+ if len(data) < 2 {
+ df.SetTruncated()
+ return errors.New("TLS Alert packet too short")
+ }
+
+ if t.Length == 2 {
+ t.Level = TLSAlertLevel(data[0])
+ t.Description = TLSAlertDescr(data[1])
+ } else {
+ t.Level = TLSAlertUnknownLevel
+ t.Description = TLSAlertUnknownDescription
+ t.EncryptedMsg = data
+ }
+
+ return nil
+}
+
+// Strings shows the TLS alert level nicely formatted
+func (al TLSAlertLevel) String() string {
+ switch al {
+ default:
+ return fmt.Sprintf("Unknown(%d)", al)
+ case TLSAlertWarning:
+ return "Warning"
+ case TLSAlertFatal:
+ return "Fatal"
+ }
+}
+
+// Strings shows the TLS alert description nicely formatted
+func (ad TLSAlertDescr) String() string {
+ switch ad {
+ default:
+ return "Unknown"
+ case TLSAlertCloseNotify:
+ return "close_notify"
+ case TLSAlertUnexpectedMessage:
+ return "unexpected_message"
+ case TLSAlertBadRecordMac:
+ return "bad_record_mac"
+ case TLSAlertDecryptionFailedRESERVED:
+ return "decryption_failed_RESERVED"
+ case TLSAlertRecordOverflow:
+ return "record_overflow"
+ case TLSAlertDecompressionFailure:
+ return "decompression_failure"
+ case TLSAlertHandshakeFailure:
+ return "handshake_failure"
+ case TLSAlertNoCertificateRESERVED:
+ return "no_certificate_RESERVED"
+ case TLSAlertBadCertificate:
+ return "bad_certificate"
+ case TLSAlertUnsupportedCertificate:
+ return "unsupported_certificate"
+ case TLSAlertCertificateRevoked:
+ return "certificate_revoked"
+ case TLSAlertCertificateExpired:
+ return "certificate_expired"
+ case TLSAlertCertificateUnknown:
+ return "certificate_unknown"
+ case TLSAlertIllegalParameter:
+ return "illegal_parameter"
+ case TLSAlertUnknownCa:
+ return "unknown_ca"
+ case TLSAlertAccessDenied:
+ return "access_denied"
+ case TLSAlertDecodeError:
+ return "decode_error"
+ case TLSAlertDecryptError:
+ return "decrypt_error"
+ case TLSAlertExportRestrictionRESERVED:
+ return "export_restriction_RESERVED"
+ case TLSAlertProtocolVersion:
+ return "protocol_version"
+ case TLSAlertInsufficientSecurity:
+ return "insufficient_security"
+ case TLSAlertInternalError:
+ return "internal_error"
+ case TLSAlertUserCanceled:
+ return "user_canceled"
+ case TLSAlertNoRenegotiation:
+ return "no_renegotiation"
+ case TLSAlertUnsupportedExtension:
+ return "unsupported_extension"
+ }
+}
diff --git a/vendor/github.com/google/gopacket/layers/tls_appdata.go b/vendor/github.com/google/gopacket/layers/tls_appdata.go
new file mode 100644
index 0000000..dedd1d5
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/tls_appdata.go
@@ -0,0 +1,34 @@
+// Copyright 2018 The GoPacket Authors. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+ "errors"
+
+ "github.com/google/gopacket"
+)
+
+// TLSAppDataRecord contains all the information that each AppData Record types should have
+type TLSAppDataRecord struct {
+ TLSRecordHeader
+ Payload []byte
+}
+
+// DecodeFromBytes decodes the slice into the TLS struct.
+func (t *TLSAppDataRecord) decodeFromBytes(h TLSRecordHeader, data []byte, df gopacket.DecodeFeedback) error {
+ // TLS Record Header
+ t.ContentType = h.ContentType
+ t.Version = h.Version
+ t.Length = h.Length
+
+ if len(data) != int(t.Length) {
+ return errors.New("TLS Application Data length mismatch")
+ }
+
+ t.Payload = data
+ return nil
+}
diff --git a/vendor/github.com/google/gopacket/layers/tls_cipherspec.go b/vendor/github.com/google/gopacket/layers/tls_cipherspec.go
new file mode 100644
index 0000000..8f3dc62
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/tls_cipherspec.go
@@ -0,0 +1,64 @@
+// Copyright 2018 The GoPacket Authors. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+ "errors"
+
+ "github.com/google/gopacket"
+)
+
+// TLSchangeCipherSpec defines the message value inside ChangeCipherSpec Record
+type TLSchangeCipherSpec uint8
+
+const (
+ TLSChangecipherspecMessage TLSchangeCipherSpec = 1
+ TLSChangecipherspecUnknown TLSchangeCipherSpec = 255
+)
+
+// TLS Change Cipher Spec
+// 0 1 2 3 4 5 6 7 8
+// +--+--+--+--+--+--+--+--+
+// | Message |
+// +--+--+--+--+--+--+--+--+
+
+// TLSChangeCipherSpecRecord defines the type of data inside ChangeCipherSpec Record
+type TLSChangeCipherSpecRecord struct {
+ TLSRecordHeader
+
+ Message TLSchangeCipherSpec
+}
+
+// DecodeFromBytes decodes the slice into the TLS struct.
+func (t *TLSChangeCipherSpecRecord) decodeFromBytes(h TLSRecordHeader, data []byte, df gopacket.DecodeFeedback) error {
+ // TLS Record Header
+ t.ContentType = h.ContentType
+ t.Version = h.Version
+ t.Length = h.Length
+
+ if len(data) != 1 {
+ df.SetTruncated()
+ return errors.New("TLS Change Cipher Spec record incorrect length")
+ }
+
+ t.Message = TLSchangeCipherSpec(data[0])
+ if t.Message != TLSChangecipherspecMessage {
+ t.Message = TLSChangecipherspecUnknown
+ }
+
+ return nil
+}
+
+// String shows the message value nicely formatted
+func (ccs TLSchangeCipherSpec) String() string {
+ switch ccs {
+ default:
+ return "Unknown"
+ case TLSChangecipherspecMessage:
+ return "Change Cipher Spec Message"
+ }
+}
diff --git a/vendor/github.com/google/gopacket/layers/tls_handshake.go b/vendor/github.com/google/gopacket/layers/tls_handshake.go
new file mode 100644
index 0000000..e45e2c7
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/tls_handshake.go
@@ -0,0 +1,28 @@
+// Copyright 2018 The GoPacket Authors. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+ "github.com/google/gopacket"
+)
+
+// TLSHandshakeRecord defines the structure of a Handshare Record
+type TLSHandshakeRecord struct {
+ TLSRecordHeader
+}
+
+// DecodeFromBytes decodes the slice into the TLS struct.
+func (t *TLSHandshakeRecord) decodeFromBytes(h TLSRecordHeader, data []byte, df gopacket.DecodeFeedback) error {
+ // TLS Record Header
+ t.ContentType = h.ContentType
+ t.Version = h.Version
+ t.Length = h.Length
+
+ // TODO
+
+ return nil
+}
diff --git a/vendor/github.com/google/gopacket/layers/udp.go b/vendor/github.com/google/gopacket/layers/udp.go
new file mode 100644
index 0000000..97e81c6
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/udp.go
@@ -0,0 +1,133 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+// Copyright 2009-2011 Andreas Krennmair. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+ "encoding/binary"
+ "fmt"
+
+ "github.com/google/gopacket"
+)
+
+// UDP is the layer for UDP headers.
+type UDP struct {
+ BaseLayer
+ SrcPort, DstPort UDPPort
+ Length uint16
+ Checksum uint16
+ sPort, dPort []byte
+ tcpipchecksum
+}
+
+// LayerType returns gopacket.LayerTypeUDP
+func (u *UDP) LayerType() gopacket.LayerType { return LayerTypeUDP }
+
+func (udp *UDP) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ if len(data) < 8 {
+ df.SetTruncated()
+ return fmt.Errorf("Invalid UDP header. Length %d less than 8", len(data))
+ }
+ udp.SrcPort = UDPPort(binary.BigEndian.Uint16(data[0:2]))
+ udp.sPort = data[0:2]
+ udp.DstPort = UDPPort(binary.BigEndian.Uint16(data[2:4]))
+ udp.dPort = data[2:4]
+ udp.Length = binary.BigEndian.Uint16(data[4:6])
+ udp.Checksum = binary.BigEndian.Uint16(data[6:8])
+ udp.BaseLayer = BaseLayer{Contents: data[:8]}
+ switch {
+ case udp.Length >= 8:
+ hlen := int(udp.Length)
+ if hlen > len(data) {
+ df.SetTruncated()
+ hlen = len(data)
+ }
+ udp.Payload = data[8:hlen]
+ case udp.Length == 0: // Jumbogram, use entire rest of data
+ udp.Payload = data[8:]
+ default:
+ return fmt.Errorf("UDP packet too small: %d bytes", udp.Length)
+ }
+ return nil
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (u *UDP) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ var jumbo bool
+
+ payload := b.Bytes()
+ if _, ok := u.pseudoheader.(*IPv6); ok {
+ if len(payload)+8 > 65535 {
+ jumbo = true
+ }
+ }
+ bytes, err := b.PrependBytes(8)
+ if err != nil {
+ return err
+ }
+ binary.BigEndian.PutUint16(bytes, uint16(u.SrcPort))
+ binary.BigEndian.PutUint16(bytes[2:], uint16(u.DstPort))
+ if opts.FixLengths {
+ if jumbo {
+ u.Length = 0
+ } else {
+ u.Length = uint16(len(payload)) + 8
+ }
+ }
+ binary.BigEndian.PutUint16(bytes[4:], u.Length)
+ if opts.ComputeChecksums {
+ // zero out checksum bytes
+ bytes[6] = 0
+ bytes[7] = 0
+ csum, err := u.computeChecksum(b.Bytes(), IPProtocolUDP)
+ if err != nil {
+ return err
+ }
+ u.Checksum = csum
+ }
+ binary.BigEndian.PutUint16(bytes[6:], u.Checksum)
+ return nil
+}
+
+func (u *UDP) CanDecode() gopacket.LayerClass {
+ return LayerTypeUDP
+}
+
+// NextLayerType use the destination port to select the
+// right next decoder. It tries first to decode via the
+// destination port, then the source port.
+func (u *UDP) NextLayerType() gopacket.LayerType {
+ if lt := u.DstPort.LayerType(); lt != gopacket.LayerTypePayload {
+ return lt
+ }
+ return u.SrcPort.LayerType()
+}
+
+func decodeUDP(data []byte, p gopacket.PacketBuilder) error {
+ udp := &UDP{}
+ err := udp.DecodeFromBytes(data, p)
+ p.AddLayer(udp)
+ p.SetTransportLayer(udp)
+ if err != nil {
+ return err
+ }
+ return p.NextDecoder(udp.NextLayerType())
+}
+
+func (u *UDP) TransportFlow() gopacket.Flow {
+ return gopacket.NewFlow(EndpointUDPPort, u.sPort, u.dPort)
+}
+
+// For testing only
+func (u *UDP) SetInternalPortsForTesting() {
+ u.sPort = make([]byte, 2)
+ u.dPort = make([]byte, 2)
+ binary.BigEndian.PutUint16(u.sPort, uint16(u.SrcPort))
+ binary.BigEndian.PutUint16(u.dPort, uint16(u.DstPort))
+}
diff --git a/vendor/github.com/google/gopacket/layers/udplite.go b/vendor/github.com/google/gopacket/layers/udplite.go
new file mode 100644
index 0000000..7d84c51
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/udplite.go
@@ -0,0 +1,44 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+// Copyright 2009-2011 Andreas Krennmair. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+ "encoding/binary"
+ "github.com/google/gopacket"
+)
+
+// UDPLite is the layer for UDP-Lite headers (rfc 3828).
+type UDPLite struct {
+ BaseLayer
+ SrcPort, DstPort UDPLitePort
+ ChecksumCoverage uint16
+ Checksum uint16
+ sPort, dPort []byte
+}
+
+// LayerType returns gopacket.LayerTypeUDPLite
+func (u *UDPLite) LayerType() gopacket.LayerType { return LayerTypeUDPLite }
+
+func decodeUDPLite(data []byte, p gopacket.PacketBuilder) error {
+ udp := &UDPLite{
+ SrcPort: UDPLitePort(binary.BigEndian.Uint16(data[0:2])),
+ sPort: data[0:2],
+ DstPort: UDPLitePort(binary.BigEndian.Uint16(data[2:4])),
+ dPort: data[2:4],
+ ChecksumCoverage: binary.BigEndian.Uint16(data[4:6]),
+ Checksum: binary.BigEndian.Uint16(data[6:8]),
+ BaseLayer: BaseLayer{data[:8], data[8:]},
+ }
+ p.AddLayer(udp)
+ p.SetTransportLayer(udp)
+ return p.NextDecoder(gopacket.LayerTypePayload)
+}
+
+func (u *UDPLite) TransportFlow() gopacket.Flow {
+ return gopacket.NewFlow(EndpointUDPLitePort, u.sPort, u.dPort)
+}
diff --git a/vendor/github.com/google/gopacket/layers/usb.go b/vendor/github.com/google/gopacket/layers/usb.go
new file mode 100644
index 0000000..0b4d4af
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/usb.go
@@ -0,0 +1,287 @@
+// Copyright 2014 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+ "encoding/binary"
+ "github.com/google/gopacket"
+)
+
+type USBEventType uint8
+
+const (
+ USBEventTypeSubmit USBEventType = 'S'
+ USBEventTypeComplete USBEventType = 'C'
+ USBEventTypeError USBEventType = 'E'
+)
+
+func (a USBEventType) String() string {
+ switch a {
+ case USBEventTypeSubmit:
+ return "SUBMIT"
+ case USBEventTypeComplete:
+ return "COMPLETE"
+ case USBEventTypeError:
+ return "ERROR"
+ default:
+ return "Unknown event type"
+ }
+}
+
+type USBRequestBlockSetupRequest uint8
+
+const (
+ USBRequestBlockSetupRequestGetStatus USBRequestBlockSetupRequest = 0x00
+ USBRequestBlockSetupRequestClearFeature USBRequestBlockSetupRequest = 0x01
+ USBRequestBlockSetupRequestSetFeature USBRequestBlockSetupRequest = 0x03
+ USBRequestBlockSetupRequestSetAddress USBRequestBlockSetupRequest = 0x05
+ USBRequestBlockSetupRequestGetDescriptor USBRequestBlockSetupRequest = 0x06
+ USBRequestBlockSetupRequestSetDescriptor USBRequestBlockSetupRequest = 0x07
+ USBRequestBlockSetupRequestGetConfiguration USBRequestBlockSetupRequest = 0x08
+ USBRequestBlockSetupRequestSetConfiguration USBRequestBlockSetupRequest = 0x09
+ USBRequestBlockSetupRequestSetIdle USBRequestBlockSetupRequest = 0x0a
+)
+
+func (a USBRequestBlockSetupRequest) String() string {
+ switch a {
+ case USBRequestBlockSetupRequestGetStatus:
+ return "GET_STATUS"
+ case USBRequestBlockSetupRequestClearFeature:
+ return "CLEAR_FEATURE"
+ case USBRequestBlockSetupRequestSetFeature:
+ return "SET_FEATURE"
+ case USBRequestBlockSetupRequestSetAddress:
+ return "SET_ADDRESS"
+ case USBRequestBlockSetupRequestGetDescriptor:
+ return "GET_DESCRIPTOR"
+ case USBRequestBlockSetupRequestSetDescriptor:
+ return "SET_DESCRIPTOR"
+ case USBRequestBlockSetupRequestGetConfiguration:
+ return "GET_CONFIGURATION"
+ case USBRequestBlockSetupRequestSetConfiguration:
+ return "SET_CONFIGURATION"
+ case USBRequestBlockSetupRequestSetIdle:
+ return "SET_IDLE"
+ default:
+ return "UNKNOWN"
+ }
+}
+
+type USBTransportType uint8
+
+const (
+ USBTransportTypeTransferIn USBTransportType = 0x80 // Indicates send or receive
+ USBTransportTypeIsochronous USBTransportType = 0x00 // Isochronous transfers occur continuously and periodically. They typically contain time sensitive information, such as an audio or video stream.
+ USBTransportTypeInterrupt USBTransportType = 0x01 // Interrupt transfers are typically non-periodic, small device "initiated" communication requiring bounded latency, such as pointing devices or keyboards.
+ USBTransportTypeControl USBTransportType = 0x02 // Control transfers are typically used for command and status operations.
+ USBTransportTypeBulk USBTransportType = 0x03 // Bulk transfers can be used for large bursty data, using all remaining available bandwidth, no guarantees on bandwidth or latency, such as file transfers.
+)
+
+type USBDirectionType uint8
+
+const (
+ USBDirectionTypeUnknown USBDirectionType = iota
+ USBDirectionTypeIn
+ USBDirectionTypeOut
+)
+
+func (a USBDirectionType) String() string {
+ switch a {
+ case USBDirectionTypeIn:
+ return "In"
+ case USBDirectionTypeOut:
+ return "Out"
+ default:
+ return "Unknown direction type"
+ }
+}
+
+// The reference at http://www.beyondlogic.org/usbnutshell/usb1.shtml contains more information about the protocol.
+type USB struct {
+ BaseLayer
+ ID uint64
+ EventType USBEventType
+ TransferType USBTransportType
+ Direction USBDirectionType
+ EndpointNumber uint8
+ DeviceAddress uint8
+ BusID uint16
+ TimestampSec int64
+ TimestampUsec int32
+ Setup bool
+ Data bool
+ Status int32
+ UrbLength uint32
+ UrbDataLength uint32
+
+ UrbInterval uint32
+ UrbStartFrame uint32
+ UrbCopyOfTransferFlags uint32
+ IsoNumDesc uint32
+}
+
+func (u *USB) LayerType() gopacket.LayerType { return LayerTypeUSB }
+
+func (m *USB) NextLayerType() gopacket.LayerType {
+ if m.Setup {
+ return LayerTypeUSBRequestBlockSetup
+ } else if m.Data {
+ }
+
+ return m.TransferType.LayerType()
+}
+
+func decodeUSB(data []byte, p gopacket.PacketBuilder) error {
+ d := &USB{}
+
+ return decodingLayerDecoder(d, data, p)
+}
+
+func (m *USB) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ m.ID = binary.LittleEndian.Uint64(data[0:8])
+ m.EventType = USBEventType(data[8])
+ m.TransferType = USBTransportType(data[9])
+
+ m.EndpointNumber = data[10] & 0x7f
+ if data[10]&uint8(USBTransportTypeTransferIn) > 0 {
+ m.Direction = USBDirectionTypeIn
+ } else {
+ m.Direction = USBDirectionTypeOut
+ }
+
+ m.DeviceAddress = data[11]
+ m.BusID = binary.LittleEndian.Uint16(data[12:14])
+
+ if uint(data[14]) == 0 {
+ m.Setup = true
+ }
+
+ if uint(data[15]) == 0 {
+ m.Data = true
+ }
+
+ m.TimestampSec = int64(binary.LittleEndian.Uint64(data[16:24]))
+ m.TimestampUsec = int32(binary.LittleEndian.Uint32(data[24:28]))
+ m.Status = int32(binary.LittleEndian.Uint32(data[28:32]))
+ m.UrbLength = binary.LittleEndian.Uint32(data[32:36])
+ m.UrbDataLength = binary.LittleEndian.Uint32(data[36:40])
+
+ m.Contents = data[:40]
+ m.Payload = data[40:]
+
+ if m.Setup {
+ m.Payload = data[40:]
+ } else if m.Data {
+ m.Payload = data[uint32(len(data))-m.UrbDataLength:]
+ }
+
+ // if 64 bit, dissect_linux_usb_pseudo_header_ext
+ if false {
+ m.UrbInterval = binary.LittleEndian.Uint32(data[40:44])
+ m.UrbStartFrame = binary.LittleEndian.Uint32(data[44:48])
+ m.UrbDataLength = binary.LittleEndian.Uint32(data[48:52])
+ m.IsoNumDesc = binary.LittleEndian.Uint32(data[52:56])
+ m.Contents = data[:56]
+ m.Payload = data[56:]
+ }
+
+ // crc5 or crc16
+ // eop (end of packet)
+
+ return nil
+}
+
+type USBRequestBlockSetup struct {
+ BaseLayer
+ RequestType uint8
+ Request USBRequestBlockSetupRequest
+ Value uint16
+ Index uint16
+ Length uint16
+}
+
+func (u *USBRequestBlockSetup) LayerType() gopacket.LayerType { return LayerTypeUSBRequestBlockSetup }
+
+func (m *USBRequestBlockSetup) NextLayerType() gopacket.LayerType {
+ return gopacket.LayerTypePayload
+}
+
+func (m *USBRequestBlockSetup) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ m.RequestType = data[0]
+ m.Request = USBRequestBlockSetupRequest(data[1])
+ m.Value = binary.LittleEndian.Uint16(data[2:4])
+ m.Index = binary.LittleEndian.Uint16(data[4:6])
+ m.Length = binary.LittleEndian.Uint16(data[6:8])
+ m.Contents = data[:8]
+ m.Payload = data[8:]
+ return nil
+}
+
+func decodeUSBRequestBlockSetup(data []byte, p gopacket.PacketBuilder) error {
+ d := &USBRequestBlockSetup{}
+ return decodingLayerDecoder(d, data, p)
+}
+
+type USBControl struct {
+ BaseLayer
+}
+
+func (u *USBControl) LayerType() gopacket.LayerType { return LayerTypeUSBControl }
+
+func (m *USBControl) NextLayerType() gopacket.LayerType {
+ return gopacket.LayerTypePayload
+}
+
+func (m *USBControl) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ m.Contents = data
+ return nil
+}
+
+func decodeUSBControl(data []byte, p gopacket.PacketBuilder) error {
+ d := &USBControl{}
+ return decodingLayerDecoder(d, data, p)
+}
+
+type USBInterrupt struct {
+ BaseLayer
+}
+
+func (u *USBInterrupt) LayerType() gopacket.LayerType { return LayerTypeUSBInterrupt }
+
+func (m *USBInterrupt) NextLayerType() gopacket.LayerType {
+ return gopacket.LayerTypePayload
+}
+
+func (m *USBInterrupt) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ m.Contents = data
+ return nil
+}
+
+func decodeUSBInterrupt(data []byte, p gopacket.PacketBuilder) error {
+ d := &USBInterrupt{}
+ return decodingLayerDecoder(d, data, p)
+}
+
+type USBBulk struct {
+ BaseLayer
+}
+
+func (u *USBBulk) LayerType() gopacket.LayerType { return LayerTypeUSBBulk }
+
+func (m *USBBulk) NextLayerType() gopacket.LayerType {
+ return gopacket.LayerTypePayload
+}
+
+func (m *USBBulk) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+ m.Contents = data
+ return nil
+}
+
+func decodeUSBBulk(data []byte, p gopacket.PacketBuilder) error {
+ d := &USBBulk{}
+ return decodingLayerDecoder(d, data, p)
+}
diff --git a/vendor/github.com/google/gopacket/layers/vrrp.go b/vendor/github.com/google/gopacket/layers/vrrp.go
new file mode 100644
index 0000000..ffaafe6
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/vrrp.go
@@ -0,0 +1,156 @@
+// Copyright 2016 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+ "encoding/binary"
+ "errors"
+ "net"
+
+ "github.com/google/gopacket"
+)
+
+/*
+ This layer provides decoding for Virtual Router Redundancy Protocol (VRRP) v2.
+ https://tools.ietf.org/html/rfc3768#section-5
+ 0 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ |Version| Type | Virtual Rtr ID| Priority | Count IP Addrs|
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Auth Type | Adver Int | Checksum |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | IP Address (1) |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | . |
+ | . |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | IP Address (n) |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Authentication Data (1) |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Authentication Data (2) |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+*/
+
+type VRRPv2Type uint8
+type VRRPv2AuthType uint8
+
+const (
+ VRRPv2Advertisement VRRPv2Type = 0x01 // router advertisement
+)
+
+// String conversions for VRRP message types
+func (v VRRPv2Type) String() string {
+ switch v {
+ case VRRPv2Advertisement:
+ return "VRRPv2 Advertisement"
+ default:
+ return ""
+ }
+}
+
+const (
+ VRRPv2AuthNoAuth VRRPv2AuthType = 0x00 // No Authentication
+ VRRPv2AuthReserved1 VRRPv2AuthType = 0x01 // Reserved field 1
+ VRRPv2AuthReserved2 VRRPv2AuthType = 0x02 // Reserved field 2
+)
+
+func (v VRRPv2AuthType) String() string {
+ switch v {
+ case VRRPv2AuthNoAuth:
+ return "No Authentication"
+ case VRRPv2AuthReserved1:
+ return "Reserved"
+ case VRRPv2AuthReserved2:
+ return "Reserved"
+ default:
+ return ""
+ }
+}
+
+// VRRPv2 represents an VRRP v2 message.
+type VRRPv2 struct {
+ BaseLayer
+ Version uint8 // The version field specifies the VRRP protocol version of this packet (v2)
+ Type VRRPv2Type // The type field specifies the type of this VRRP packet. The only type defined in v2 is ADVERTISEMENT
+ VirtualRtrID uint8 // identifies the virtual router this packet is reporting status for
+ Priority uint8 // specifies the sending VRRP router's priority for the virtual router (100 = default)
+ CountIPAddr uint8 // The number of IP addresses contained in this VRRP advertisement.
+ AuthType VRRPv2AuthType // identifies the authentication method being utilized
+ AdverInt uint8 // The Advertisement interval indicates the time interval (in seconds) between ADVERTISEMENTS. The default is 1 second
+ Checksum uint16 // used to detect data corruption in the VRRP message.
+ IPAddress []net.IP // one or more IP addresses associated with the virtual router. Specified in the CountIPAddr field.
+}
+
+// LayerType returns LayerTypeVRRP for VRRP v2 message.
+func (v *VRRPv2) LayerType() gopacket.LayerType { return LayerTypeVRRP }
+
+func (v *VRRPv2) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+
+ v.BaseLayer = BaseLayer{Contents: data[:len(data)]}
+ v.Version = data[0] >> 4 // high nibble == VRRP version. We're expecting v2
+
+ v.Type = VRRPv2Type(data[0] & 0x0F) // low nibble == VRRP type. Expecting 1 (advertisement)
+ if v.Type != 1 {
+ // rfc3768: A packet with unknown type MUST be discarded.
+ return errors.New("Unrecognized VRRPv2 type field.")
+ }
+
+ v.VirtualRtrID = data[1]
+ v.Priority = data[2]
+
+ v.CountIPAddr = data[3]
+ if v.CountIPAddr < 1 {
+ return errors.New("VRRPv2 number of IP addresses is not valid.")
+ }
+
+ v.AuthType = VRRPv2AuthType(data[4])
+ v.AdverInt = uint8(data[5])
+ v.Checksum = binary.BigEndian.Uint16(data[6:8])
+
+ // populate the IPAddress field. The number of addresses is specified in the v.CountIPAddr field
+ // offset references the starting byte containing the list of ip addresses
+ offset := 8
+ for i := uint8(0); i < v.CountIPAddr; i++ {
+ v.IPAddress = append(v.IPAddress, data[offset:offset+4])
+ offset += 4
+ }
+
+ // any trailing packets here may be authentication data and *should* be ignored in v2 as per RFC
+ //
+ // 5.3.10. Authentication Data
+ //
+ // The authentication string is currently only used to maintain
+ // backwards compatibility with RFC 2338. It SHOULD be set to zero on
+ // transmission and ignored on reception.
+ return nil
+}
+
+// CanDecode specifies the layer type in which we are attempting to unwrap.
+func (v *VRRPv2) CanDecode() gopacket.LayerClass {
+ return LayerTypeVRRP
+}
+
+// NextLayerType specifies the next layer that should be decoded. VRRP does not contain any further payload, so we set to 0
+func (v *VRRPv2) NextLayerType() gopacket.LayerType {
+ return gopacket.LayerTypeZero
+}
+
+// The VRRP packet does not include payload data. Setting byte slice to nil
+func (v *VRRPv2) Payload() []byte {
+ return nil
+}
+
+// decodeVRRP will parse VRRP v2
+func decodeVRRP(data []byte, p gopacket.PacketBuilder) error {
+ if len(data) < 8 {
+ return errors.New("Not a valid VRRP packet. Packet length is too small.")
+ }
+ v := &VRRPv2{}
+ return decodingLayerDecoder(v, data, p)
+}
diff --git a/vendor/github.com/google/gopacket/layers/vxlan.go b/vendor/github.com/google/gopacket/layers/vxlan.go
new file mode 100644
index 0000000..4f79ea4
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/vxlan.go
@@ -0,0 +1,98 @@
+// Copyright 2016 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+ "encoding/binary"
+ "fmt"
+ "github.com/google/gopacket"
+)
+
+// VXLAN is specifed in RFC 7348 https://tools.ietf.org/html/rfc7348
+// G, D, A, Group Policy ID from https://tools.ietf.org/html/draft-smith-vxlan-group-policy-00
+// 0 1 2 3
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+// 0 8 16 24 32
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// |G|R|R|R|I|R|R|R|R|D|R|R|A|R|R|R| Group Policy ID |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | 24 bit VXLAN Network Identifier | Reserved |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+// VXLAN is a VXLAN packet header
+type VXLAN struct {
+ BaseLayer
+ ValidIDFlag bool // 'I' bit per RFC 7348
+ VNI uint32 // 'VXLAN Network Identifier' 24 bits per RFC 7348
+ GBPExtension bool // 'G' bit per Group Policy https://tools.ietf.org/html/draft-smith-vxlan-group-policy-00
+ GBPDontLearn bool // 'D' bit per Group Policy
+ GBPApplied bool // 'A' bit per Group Policy
+ GBPGroupPolicyID uint16 // 'Group Policy ID' 16 bits per Group Policy
+}
+
+// LayerType returns LayerTypeVXLAN
+func (vx *VXLAN) LayerType() gopacket.LayerType { return LayerTypeVXLAN }
+
+func decodeVXLAN(data []byte, p gopacket.PacketBuilder) error {
+ vx := &VXLAN{}
+
+ // VNI is a 24bit number, Uint32 requires 32 bits
+ var buf [4]byte
+ copy(buf[1:], data[4:7])
+
+ // RFC 7348 https://tools.ietf.org/html/rfc7348
+ vx.ValidIDFlag = data[0]&0x08 > 0 // 'I' bit per RFC7348
+ vx.VNI = binary.BigEndian.Uint32(buf[:]) // VXLAN Network Identifier per RFC7348
+
+ // Group Based Policy https://tools.ietf.org/html/draft-smith-vxlan-group-policy-00
+ vx.GBPExtension = data[0]&0x80 > 0 // 'G' bit per the group policy draft
+ vx.GBPDontLearn = data[1]&0x40 > 0 // 'D' bit - the egress VTEP MUST NOT learn the source address of the encapsulated frame.
+ vx.GBPApplied = data[1]&0x80 > 0 // 'A' bit - indicates that the group policy has already been applied to this packet.
+ vx.GBPGroupPolicyID = binary.BigEndian.Uint16(data[2:4]) // Policy ID as per the group policy draft
+
+ // Layer information
+ const vxlanLength = 8
+ vx.Contents = data[:vxlanLength]
+ vx.Payload = data[vxlanLength:]
+
+ p.AddLayer(vx)
+ return p.NextDecoder(LinkTypeEthernet)
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (vx *VXLAN) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+ bytes, err := b.PrependBytes(8)
+ if err != nil {
+ return err
+ }
+
+ // PrependBytes does not guarantee that bytes are zeroed. Setting flags via OR requires that they start off at zero
+ bytes[0] = 0
+ bytes[1] = 0
+
+ if vx.ValidIDFlag {
+ bytes[0] |= 0x08
+ }
+ if vx.GBPExtension {
+ bytes[0] |= 0x80
+ }
+ if vx.GBPDontLearn {
+ bytes[1] |= 0x40
+ }
+ if vx.GBPApplied {
+ bytes[1] |= 0x80
+ }
+
+ binary.BigEndian.PutUint16(bytes[2:4], vx.GBPGroupPolicyID)
+ if vx.VNI >= 1<<24 {
+ return fmt.Errorf("Virtual Network Identifier = %x exceeds max for 24-bit uint", vx.VNI)
+ }
+ binary.BigEndian.PutUint32(bytes[4:8], vx.VNI<<8)
+ return nil
+}
diff --git a/vendor/github.com/google/gopacket/layers_decoder.go b/vendor/github.com/google/gopacket/layers_decoder.go
new file mode 100644
index 0000000..8c1f108
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers_decoder.go
@@ -0,0 +1,101 @@
+// Copyright 2019 The GoPacket Authors. All rights reserved.
+
+package gopacket
+
+// Created by gen.go, don't edit manually
+// Generated at 2019-06-18 11:37:31.308731293 +0600 +06 m=+0.000842599
+
+// LayersDecoder returns DecodingLayerFunc for specified
+// DecodingLayerContainer, LayerType value to start decoding with and
+// some DecodeFeedback.
+func LayersDecoder(dl DecodingLayerContainer, first LayerType, df DecodeFeedback) DecodingLayerFunc {
+ firstDec, ok := dl.Decoder(first)
+ if !ok {
+ return func([]byte, *[]LayerType) (LayerType, error) {
+ return first, nil
+ }
+ }
+ if dlc, ok := dl.(DecodingLayerSparse); ok {
+ return func(data []byte, decoded *[]LayerType) (LayerType, error) {
+ *decoded = (*decoded)[:0] // Truncated decoded layers.
+ typ := first
+ decoder := firstDec
+ for {
+ if err := decoder.DecodeFromBytes(data, df); err != nil {
+ return LayerTypeZero, err
+ }
+ *decoded = append(*decoded, typ)
+ typ = decoder.NextLayerType()
+ if data = decoder.LayerPayload(); len(data) == 0 {
+ break
+ }
+ if decoder, ok = dlc.Decoder(typ); !ok {
+ return typ, nil
+ }
+ }
+ return LayerTypeZero, nil
+ }
+ }
+ if dlc, ok := dl.(DecodingLayerArray); ok {
+ return func(data []byte, decoded *[]LayerType) (LayerType, error) {
+ *decoded = (*decoded)[:0] // Truncated decoded layers.
+ typ := first
+ decoder := firstDec
+ for {
+ if err := decoder.DecodeFromBytes(data, df); err != nil {
+ return LayerTypeZero, err
+ }
+ *decoded = append(*decoded, typ)
+ typ = decoder.NextLayerType()
+ if data = decoder.LayerPayload(); len(data) == 0 {
+ break
+ }
+ if decoder, ok = dlc.Decoder(typ); !ok {
+ return typ, nil
+ }
+ }
+ return LayerTypeZero, nil
+ }
+ }
+ if dlc, ok := dl.(DecodingLayerMap); ok {
+ return func(data []byte, decoded *[]LayerType) (LayerType, error) {
+ *decoded = (*decoded)[:0] // Truncated decoded layers.
+ typ := first
+ decoder := firstDec
+ for {
+ if err := decoder.DecodeFromBytes(data, df); err != nil {
+ return LayerTypeZero, err
+ }
+ *decoded = append(*decoded, typ)
+ typ = decoder.NextLayerType()
+ if data = decoder.LayerPayload(); len(data) == 0 {
+ break
+ }
+ if decoder, ok = dlc.Decoder(typ); !ok {
+ return typ, nil
+ }
+ }
+ return LayerTypeZero, nil
+ }
+ }
+ dlc := dl
+ return func(data []byte, decoded *[]LayerType) (LayerType, error) {
+ *decoded = (*decoded)[:0] // Truncated decoded layers.
+ typ := first
+ decoder := firstDec
+ for {
+ if err := decoder.DecodeFromBytes(data, df); err != nil {
+ return LayerTypeZero, err
+ }
+ *decoded = append(*decoded, typ)
+ typ = decoder.NextLayerType()
+ if data = decoder.LayerPayload(); len(data) == 0 {
+ break
+ }
+ if decoder, ok = dlc.Decoder(typ); !ok {
+ return typ, nil
+ }
+ }
+ return LayerTypeZero, nil
+ }
+}
diff --git a/vendor/github.com/google/gopacket/layertype.go b/vendor/github.com/google/gopacket/layertype.go
new file mode 100644
index 0000000..3abfee1
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layertype.go
@@ -0,0 +1,111 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package gopacket
+
+import (
+ "fmt"
+ "strconv"
+)
+
+// LayerType is a unique identifier for each type of layer. This enumeration
+// does not match with any externally available numbering scheme... it's solely
+// usable/useful within this library as a means for requesting layer types
+// (see Packet.Layer) and determining which types of layers have been decoded.
+//
+// New LayerTypes may be created by calling gopacket.RegisterLayerType.
+type LayerType int64
+
+// LayerTypeMetadata contains metadata associated with each LayerType.
+type LayerTypeMetadata struct {
+ // Name is the string returned by each layer type's String method.
+ Name string
+ // Decoder is the decoder to use when the layer type is passed in as a
+ // Decoder.
+ Decoder Decoder
+}
+
+type layerTypeMetadata struct {
+ inUse bool
+ LayerTypeMetadata
+}
+
+// DecodersByLayerName maps layer names to decoders for those layers.
+// This allows users to specify decoders by name to a program and have that
+// program pick the correct decoder accordingly.
+var DecodersByLayerName = map[string]Decoder{}
+
+const maxLayerType = 2000
+
+var ltMeta [maxLayerType]layerTypeMetadata
+var ltMetaMap = map[LayerType]layerTypeMetadata{}
+
+// RegisterLayerType creates a new layer type and registers it globally.
+// The number passed in must be unique, or a runtime panic will occur. Numbers
+// 0-999 are reserved for the gopacket library. Numbers 1000-1999 should be
+// used for common application-specific types, and are very fast. Any other
+// number (negative or >= 2000) may be used for uncommon application-specific
+// types, and are somewhat slower (they require a map lookup over an array
+// index).
+func RegisterLayerType(num int, meta LayerTypeMetadata) LayerType {
+ if 0 <= num && num < maxLayerType {
+ if ltMeta[num].inUse {
+ panic("Layer type already exists")
+ }
+ } else {
+ if ltMetaMap[LayerType(num)].inUse {
+ panic("Layer type already exists")
+ }
+ }
+ return OverrideLayerType(num, meta)
+}
+
+// OverrideLayerType acts like RegisterLayerType, except that if the layer type
+// has already been registered, it overrides the metadata with the passed-in
+// metadata intead of panicing.
+func OverrideLayerType(num int, meta LayerTypeMetadata) LayerType {
+ if 0 <= num && num < maxLayerType {
+ ltMeta[num] = layerTypeMetadata{
+ inUse: true,
+ LayerTypeMetadata: meta,
+ }
+ } else {
+ ltMetaMap[LayerType(num)] = layerTypeMetadata{
+ inUse: true,
+ LayerTypeMetadata: meta,
+ }
+ }
+ DecodersByLayerName[meta.Name] = meta.Decoder
+ return LayerType(num)
+}
+
+// Decode decodes the given data using the decoder registered with the layer
+// type.
+func (t LayerType) Decode(data []byte, c PacketBuilder) error {
+ var d Decoder
+ if 0 <= int(t) && int(t) < maxLayerType {
+ d = ltMeta[int(t)].Decoder
+ } else {
+ d = ltMetaMap[t].Decoder
+ }
+ if d != nil {
+ return d.Decode(data, c)
+ }
+ return fmt.Errorf("Layer type %v has no associated decoder", t)
+}
+
+// String returns the string associated with this layer type.
+func (t LayerType) String() (s string) {
+ if 0 <= int(t) && int(t) < maxLayerType {
+ s = ltMeta[int(t)].Name
+ } else {
+ s = ltMetaMap[t].Name
+ }
+ if s == "" {
+ s = strconv.Itoa(int(t))
+ }
+ return
+}
diff --git a/vendor/github.com/google/gopacket/packet.go b/vendor/github.com/google/gopacket/packet.go
new file mode 100644
index 0000000..3a7c4b3
--- /dev/null
+++ b/vendor/github.com/google/gopacket/packet.go
@@ -0,0 +1,864 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package gopacket
+
+import (
+ "bytes"
+ "encoding/hex"
+ "errors"
+ "fmt"
+ "io"
+ "net"
+ "os"
+ "reflect"
+ "runtime/debug"
+ "strings"
+ "syscall"
+ "time"
+)
+
+// CaptureInfo provides standardized information about a packet captured off
+// the wire or read from a file.
+type CaptureInfo struct {
+ // Timestamp is the time the packet was captured, if that is known.
+ Timestamp time.Time
+ // CaptureLength is the total number of bytes read off of the wire.
+ CaptureLength int
+ // Length is the size of the original packet. Should always be >=
+ // CaptureLength.
+ Length int
+ // InterfaceIndex
+ InterfaceIndex int
+ // The packet source can place ancillary data of various types here.
+ // For example, the afpacket source can report the VLAN of captured
+ // packets this way.
+ AncillaryData []interface{}
+}
+
+// PacketMetadata contains metadata for a packet.
+type PacketMetadata struct {
+ CaptureInfo
+ // Truncated is true if packet decoding logic detects that there are fewer
+ // bytes in the packet than are detailed in various headers (for example, if
+ // the number of bytes in the IPv4 contents/payload is less than IPv4.Length).
+ // This is also set automatically for packets captured off the wire if
+ // CaptureInfo.CaptureLength < CaptureInfo.Length.
+ Truncated bool
+}
+
+// Packet is the primary object used by gopacket. Packets are created by a
+// Decoder's Decode call. A packet is made up of a set of Data, which
+// is broken into a number of Layers as it is decoded.
+type Packet interface {
+ //// Functions for outputting the packet as a human-readable string:
+ //// ------------------------------------------------------------------
+ // String returns a human-readable string representation of the packet.
+ // It uses LayerString on each layer to output the layer.
+ String() string
+ // Dump returns a verbose human-readable string representation of the packet,
+ // including a hex dump of all layers. It uses LayerDump on each layer to
+ // output the layer.
+ Dump() string
+
+ //// Functions for accessing arbitrary packet layers:
+ //// ------------------------------------------------------------------
+ // Layers returns all layers in this packet, computing them as necessary
+ Layers() []Layer
+ // Layer returns the first layer in this packet of the given type, or nil
+ Layer(LayerType) Layer
+ // LayerClass returns the first layer in this packet of the given class,
+ // or nil.
+ LayerClass(LayerClass) Layer
+
+ //// Functions for accessing specific types of packet layers. These functions
+ //// return the first layer of each type found within the packet.
+ //// ------------------------------------------------------------------
+ // LinkLayer returns the first link layer in the packet
+ LinkLayer() LinkLayer
+ // NetworkLayer returns the first network layer in the packet
+ NetworkLayer() NetworkLayer
+ // TransportLayer returns the first transport layer in the packet
+ TransportLayer() TransportLayer
+ // ApplicationLayer returns the first application layer in the packet
+ ApplicationLayer() ApplicationLayer
+ // ErrorLayer is particularly useful, since it returns nil if the packet
+ // was fully decoded successfully, and non-nil if an error was encountered
+ // in decoding and the packet was only partially decoded. Thus, its output
+ // can be used to determine if the entire packet was able to be decoded.
+ ErrorLayer() ErrorLayer
+
+ //// Functions for accessing data specific to the packet:
+ //// ------------------------------------------------------------------
+ // Data returns the set of bytes that make up this entire packet.
+ Data() []byte
+ // Metadata returns packet metadata associated with this packet.
+ Metadata() *PacketMetadata
+}
+
+// packet contains all the information we need to fulfill the Packet interface,
+// and its two "subclasses" (yes, no such thing in Go, bear with me),
+// eagerPacket and lazyPacket, provide eager and lazy decoding logic around the
+// various functions needed to access this information.
+type packet struct {
+ // data contains the entire packet data for a packet
+ data []byte
+ // initialLayers is space for an initial set of layers already created inside
+ // the packet.
+ initialLayers [6]Layer
+ // layers contains each layer we've already decoded
+ layers []Layer
+ // last is the last layer added to the packet
+ last Layer
+ // metadata is the PacketMetadata for this packet
+ metadata PacketMetadata
+
+ decodeOptions DecodeOptions
+
+ // Pointers to the various important layers
+ link LinkLayer
+ network NetworkLayer
+ transport TransportLayer
+ application ApplicationLayer
+ failure ErrorLayer
+}
+
+func (p *packet) SetTruncated() {
+ p.metadata.Truncated = true
+}
+
+func (p *packet) SetLinkLayer(l LinkLayer) {
+ if p.link == nil {
+ p.link = l
+ }
+}
+
+func (p *packet) SetNetworkLayer(l NetworkLayer) {
+ if p.network == nil {
+ p.network = l
+ }
+}
+
+func (p *packet) SetTransportLayer(l TransportLayer) {
+ if p.transport == nil {
+ p.transport = l
+ }
+}
+
+func (p *packet) SetApplicationLayer(l ApplicationLayer) {
+ if p.application == nil {
+ p.application = l
+ }
+}
+
+func (p *packet) SetErrorLayer(l ErrorLayer) {
+ if p.failure == nil {
+ p.failure = l
+ }
+}
+
+func (p *packet) AddLayer(l Layer) {
+ p.layers = append(p.layers, l)
+ p.last = l
+}
+
+func (p *packet) DumpPacketData() {
+ fmt.Fprint(os.Stderr, p.packetDump())
+ os.Stderr.Sync()
+}
+
+func (p *packet) Metadata() *PacketMetadata {
+ return &p.metadata
+}
+
+func (p *packet) Data() []byte {
+ return p.data
+}
+
+func (p *packet) DecodeOptions() *DecodeOptions {
+ return &p.decodeOptions
+}
+
+func (p *packet) addFinalDecodeError(err error, stack []byte) {
+ fail := &DecodeFailure{err: err, stack: stack}
+ if p.last == nil {
+ fail.data = p.data
+ } else {
+ fail.data = p.last.LayerPayload()
+ }
+ p.AddLayer(fail)
+ p.SetErrorLayer(fail)
+}
+
+func (p *packet) recoverDecodeError() {
+ if !p.decodeOptions.SkipDecodeRecovery {
+ if r := recover(); r != nil {
+ p.addFinalDecodeError(fmt.Errorf("%v", r), debug.Stack())
+ }
+ }
+}
+
+// LayerString outputs an individual layer as a string. The layer is output
+// in a single line, with no trailing newline. This function is specifically
+// designed to do the right thing for most layers... it follows the following
+// rules:
+// * If the Layer has a String function, just output that.
+// * Otherwise, output all exported fields in the layer, recursing into
+// exported slices and structs.
+// NOTE: This is NOT THE SAME AS fmt's "%#v". %#v will output both exported
+// and unexported fields... many times packet layers contain unexported stuff
+// that would just mess up the output of the layer, see for example the
+// Payload layer and it's internal 'data' field, which contains a large byte
+// array that would really mess up formatting.
+func LayerString(l Layer) string {
+ return fmt.Sprintf("%v\t%s", l.LayerType(), layerString(reflect.ValueOf(l), false, false))
+}
+
+// Dumper dumps verbose information on a value. If a layer type implements
+// Dumper, then its LayerDump() string will include the results in its output.
+type Dumper interface {
+ Dump() string
+}
+
+// LayerDump outputs a very verbose string representation of a layer. Its
+// output is a concatenation of LayerString(l) and hex.Dump(l.LayerContents()).
+// It contains newlines and ends with a newline.
+func LayerDump(l Layer) string {
+ var b bytes.Buffer
+ b.WriteString(LayerString(l))
+ b.WriteByte('\n')
+ if d, ok := l.(Dumper); ok {
+ dump := d.Dump()
+ if dump != "" {
+ b.WriteString(dump)
+ if dump[len(dump)-1] != '\n' {
+ b.WriteByte('\n')
+ }
+ }
+ }
+ b.WriteString(hex.Dump(l.LayerContents()))
+ return b.String()
+}
+
+// layerString outputs, recursively, a layer in a "smart" way. See docs for
+// LayerString for more details.
+//
+// Params:
+// i - value to write out
+// anonymous: if we're currently recursing an anonymous member of a struct
+// writeSpace: if we've already written a value in a struct, and need to
+// write a space before writing more. This happens when we write various
+// anonymous values, and need to keep writing more.
+func layerString(v reflect.Value, anonymous bool, writeSpace bool) string {
+ // Let String() functions take precedence.
+ if v.CanInterface() {
+ if s, ok := v.Interface().(fmt.Stringer); ok {
+ return s.String()
+ }
+ }
+ // Reflect, and spit out all the exported fields as key=value.
+ switch v.Type().Kind() {
+ case reflect.Interface, reflect.Ptr:
+ if v.IsNil() {
+ return "nil"
+ }
+ r := v.Elem()
+ return layerString(r, anonymous, writeSpace)
+ case reflect.Struct:
+ var b bytes.Buffer
+ typ := v.Type()
+ if !anonymous {
+ b.WriteByte('{')
+ }
+ for i := 0; i < v.NumField(); i++ {
+ // Check if this is upper-case.
+ ftype := typ.Field(i)
+ f := v.Field(i)
+ if ftype.Anonymous {
+ anonStr := layerString(f, true, writeSpace)
+ writeSpace = writeSpace || anonStr != ""
+ b.WriteString(anonStr)
+ } else if ftype.PkgPath == "" { // exported
+ if writeSpace {
+ b.WriteByte(' ')
+ }
+ writeSpace = true
+ fmt.Fprintf(&b, "%s=%s", typ.Field(i).Name, layerString(f, false, writeSpace))
+ }
+ }
+ if !anonymous {
+ b.WriteByte('}')
+ }
+ return b.String()
+ case reflect.Slice:
+ var b bytes.Buffer
+ b.WriteByte('[')
+ if v.Len() > 4 {
+ fmt.Fprintf(&b, "..%d..", v.Len())
+ } else {
+ for j := 0; j < v.Len(); j++ {
+ if j != 0 {
+ b.WriteString(", ")
+ }
+ b.WriteString(layerString(v.Index(j), false, false))
+ }
+ }
+ b.WriteByte(']')
+ return b.String()
+ }
+ return fmt.Sprintf("%v", v.Interface())
+}
+
+const (
+ longBytesLength = 128
+)
+
+// LongBytesGoString returns a string representation of the byte slice shortened
+// using the format '{ ... ( bytes)}' if it
+// exceeds a predetermined length. Can be used to avoid filling the display with
+// very long byte strings.
+func LongBytesGoString(buf []byte) string {
+ if len(buf) < longBytesLength {
+ return fmt.Sprintf("%#v", buf)
+ }
+ s := fmt.Sprintf("%#v", buf[:longBytesLength-1])
+ s = strings.TrimSuffix(s, "}")
+ return fmt.Sprintf("%s ... (%d bytes)}", s, len(buf))
+}
+
+func baseLayerString(value reflect.Value) string {
+ t := value.Type()
+ content := value.Field(0)
+ c := make([]byte, content.Len())
+ for i := range c {
+ c[i] = byte(content.Index(i).Uint())
+ }
+ payload := value.Field(1)
+ p := make([]byte, payload.Len())
+ for i := range p {
+ p[i] = byte(payload.Index(i).Uint())
+ }
+ return fmt.Sprintf("%s{Contents:%s, Payload:%s}", t.String(),
+ LongBytesGoString(c),
+ LongBytesGoString(p))
+}
+
+func layerGoString(i interface{}, b *bytes.Buffer) {
+ if s, ok := i.(fmt.GoStringer); ok {
+ b.WriteString(s.GoString())
+ return
+ }
+
+ var v reflect.Value
+ var ok bool
+ if v, ok = i.(reflect.Value); !ok {
+ v = reflect.ValueOf(i)
+ }
+ switch v.Kind() {
+ case reflect.Ptr, reflect.Interface:
+ if v.Kind() == reflect.Ptr {
+ b.WriteByte('&')
+ }
+ layerGoString(v.Elem().Interface(), b)
+ case reflect.Struct:
+ t := v.Type()
+ b.WriteString(t.String())
+ b.WriteByte('{')
+ for i := 0; i < v.NumField(); i++ {
+ if i > 0 {
+ b.WriteString(", ")
+ }
+ if t.Field(i).Name == "BaseLayer" {
+ fmt.Fprintf(b, "BaseLayer:%s", baseLayerString(v.Field(i)))
+ } else if v.Field(i).Kind() == reflect.Struct {
+ fmt.Fprintf(b, "%s:", t.Field(i).Name)
+ layerGoString(v.Field(i), b)
+ } else if v.Field(i).Kind() == reflect.Ptr {
+ b.WriteByte('&')
+ layerGoString(v.Field(i), b)
+ } else {
+ fmt.Fprintf(b, "%s:%#v", t.Field(i).Name, v.Field(i))
+ }
+ }
+ b.WriteByte('}')
+ default:
+ fmt.Fprintf(b, "%#v", i)
+ }
+}
+
+// LayerGoString returns a representation of the layer in Go syntax,
+// taking care to shorten "very long" BaseLayer byte slices
+func LayerGoString(l Layer) string {
+ b := new(bytes.Buffer)
+ layerGoString(l, b)
+ return b.String()
+}
+
+func (p *packet) packetString() string {
+ var b bytes.Buffer
+ fmt.Fprintf(&b, "PACKET: %d bytes", len(p.Data()))
+ if p.metadata.Truncated {
+ b.WriteString(", truncated")
+ }
+ if p.metadata.Length > 0 {
+ fmt.Fprintf(&b, ", wire length %d cap length %d", p.metadata.Length, p.metadata.CaptureLength)
+ }
+ if !p.metadata.Timestamp.IsZero() {
+ fmt.Fprintf(&b, " @ %v", p.metadata.Timestamp)
+ }
+ b.WriteByte('\n')
+ for i, l := range p.layers {
+ fmt.Fprintf(&b, "- Layer %d (%02d bytes) = %s\n", i+1, len(l.LayerContents()), LayerString(l))
+ }
+ return b.String()
+}
+
+func (p *packet) packetDump() string {
+ var b bytes.Buffer
+ fmt.Fprintf(&b, "-- FULL PACKET DATA (%d bytes) ------------------------------------\n%s", len(p.data), hex.Dump(p.data))
+ for i, l := range p.layers {
+ fmt.Fprintf(&b, "--- Layer %d ---\n%s", i+1, LayerDump(l))
+ }
+ return b.String()
+}
+
+// eagerPacket is a packet implementation that does eager decoding. Upon
+// initial construction, it decodes all the layers it can from packet data.
+// eagerPacket implements Packet and PacketBuilder.
+type eagerPacket struct {
+ packet
+}
+
+var errNilDecoder = errors.New("NextDecoder passed nil decoder, probably an unsupported decode type")
+
+func (p *eagerPacket) NextDecoder(next Decoder) error {
+ if next == nil {
+ return errNilDecoder
+ }
+ if p.last == nil {
+ return errors.New("NextDecoder called, but no layers added yet")
+ }
+ d := p.last.LayerPayload()
+ if len(d) == 0 {
+ return nil
+ }
+ // Since we're eager, immediately call the next decoder.
+ return next.Decode(d, p)
+}
+func (p *eagerPacket) initialDecode(dec Decoder) {
+ defer p.recoverDecodeError()
+ err := dec.Decode(p.data, p)
+ if err != nil {
+ p.addFinalDecodeError(err, nil)
+ }
+}
+func (p *eagerPacket) LinkLayer() LinkLayer {
+ return p.link
+}
+func (p *eagerPacket) NetworkLayer() NetworkLayer {
+ return p.network
+}
+func (p *eagerPacket) TransportLayer() TransportLayer {
+ return p.transport
+}
+func (p *eagerPacket) ApplicationLayer() ApplicationLayer {
+ return p.application
+}
+func (p *eagerPacket) ErrorLayer() ErrorLayer {
+ return p.failure
+}
+func (p *eagerPacket) Layers() []Layer {
+ return p.layers
+}
+func (p *eagerPacket) Layer(t LayerType) Layer {
+ for _, l := range p.layers {
+ if l.LayerType() == t {
+ return l
+ }
+ }
+ return nil
+}
+func (p *eagerPacket) LayerClass(lc LayerClass) Layer {
+ for _, l := range p.layers {
+ if lc.Contains(l.LayerType()) {
+ return l
+ }
+ }
+ return nil
+}
+func (p *eagerPacket) String() string { return p.packetString() }
+func (p *eagerPacket) Dump() string { return p.packetDump() }
+
+// lazyPacket does lazy decoding on its packet data. On construction it does
+// no initial decoding. For each function call, it decodes only as many layers
+// as are necessary to compute the return value for that function.
+// lazyPacket implements Packet and PacketBuilder.
+type lazyPacket struct {
+ packet
+ next Decoder
+}
+
+func (p *lazyPacket) NextDecoder(next Decoder) error {
+ if next == nil {
+ return errNilDecoder
+ }
+ p.next = next
+ return nil
+}
+func (p *lazyPacket) decodeNextLayer() {
+ if p.next == nil {
+ return
+ }
+ d := p.data
+ if p.last != nil {
+ d = p.last.LayerPayload()
+ }
+ next := p.next
+ p.next = nil
+ // We've just set p.next to nil, so if we see we have no data, this should be
+ // the final call we get to decodeNextLayer if we return here.
+ if len(d) == 0 {
+ return
+ }
+ defer p.recoverDecodeError()
+ err := next.Decode(d, p)
+ if err != nil {
+ p.addFinalDecodeError(err, nil)
+ }
+}
+func (p *lazyPacket) LinkLayer() LinkLayer {
+ for p.link == nil && p.next != nil {
+ p.decodeNextLayer()
+ }
+ return p.link
+}
+func (p *lazyPacket) NetworkLayer() NetworkLayer {
+ for p.network == nil && p.next != nil {
+ p.decodeNextLayer()
+ }
+ return p.network
+}
+func (p *lazyPacket) TransportLayer() TransportLayer {
+ for p.transport == nil && p.next != nil {
+ p.decodeNextLayer()
+ }
+ return p.transport
+}
+func (p *lazyPacket) ApplicationLayer() ApplicationLayer {
+ for p.application == nil && p.next != nil {
+ p.decodeNextLayer()
+ }
+ return p.application
+}
+func (p *lazyPacket) ErrorLayer() ErrorLayer {
+ for p.failure == nil && p.next != nil {
+ p.decodeNextLayer()
+ }
+ return p.failure
+}
+func (p *lazyPacket) Layers() []Layer {
+ for p.next != nil {
+ p.decodeNextLayer()
+ }
+ return p.layers
+}
+func (p *lazyPacket) Layer(t LayerType) Layer {
+ for _, l := range p.layers {
+ if l.LayerType() == t {
+ return l
+ }
+ }
+ numLayers := len(p.layers)
+ for p.next != nil {
+ p.decodeNextLayer()
+ for _, l := range p.layers[numLayers:] {
+ if l.LayerType() == t {
+ return l
+ }
+ }
+ numLayers = len(p.layers)
+ }
+ return nil
+}
+func (p *lazyPacket) LayerClass(lc LayerClass) Layer {
+ for _, l := range p.layers {
+ if lc.Contains(l.LayerType()) {
+ return l
+ }
+ }
+ numLayers := len(p.layers)
+ for p.next != nil {
+ p.decodeNextLayer()
+ for _, l := range p.layers[numLayers:] {
+ if lc.Contains(l.LayerType()) {
+ return l
+ }
+ }
+ numLayers = len(p.layers)
+ }
+ return nil
+}
+func (p *lazyPacket) String() string { p.Layers(); return p.packetString() }
+func (p *lazyPacket) Dump() string { p.Layers(); return p.packetDump() }
+
+// DecodeOptions tells gopacket how to decode a packet.
+type DecodeOptions struct {
+ // Lazy decoding decodes the minimum number of layers needed to return data
+ // for a packet at each function call. Be careful using this with concurrent
+ // packet processors, as each call to packet.* could mutate the packet, and
+ // two concurrent function calls could interact poorly.
+ Lazy bool
+ // NoCopy decoding doesn't copy its input buffer into storage that's owned by
+ // the packet. If you can guarantee that the bytes underlying the slice
+ // passed into NewPacket aren't going to be modified, this can be faster. If
+ // there's any chance that those bytes WILL be changed, this will invalidate
+ // your packets.
+ NoCopy bool
+ // SkipDecodeRecovery skips over panic recovery during packet decoding.
+ // Normally, when packets decode, if a panic occurs, that panic is captured
+ // by a recover(), and a DecodeFailure layer is added to the packet detailing
+ // the issue. If this flag is set, panics are instead allowed to continue up
+ // the stack.
+ SkipDecodeRecovery bool
+ // DecodeStreamsAsDatagrams enables routing of application-level layers in the TCP
+ // decoder. If true, we should try to decode layers after TCP in single packets.
+ // This is disabled by default because the reassembly package drives the decoding
+ // of TCP payload data after reassembly.
+ DecodeStreamsAsDatagrams bool
+}
+
+// Default decoding provides the safest (but slowest) method for decoding
+// packets. It eagerly processes all layers (so it's concurrency-safe) and it
+// copies its input buffer upon creation of the packet (so the packet remains
+// valid if the underlying slice is modified. Both of these take time,
+// though, so beware. If you can guarantee that the packet will only be used
+// by one goroutine at a time, set Lazy decoding. If you can guarantee that
+// the underlying slice won't change, set NoCopy decoding.
+var Default = DecodeOptions{}
+
+// Lazy is a DecodeOptions with just Lazy set.
+var Lazy = DecodeOptions{Lazy: true}
+
+// NoCopy is a DecodeOptions with just NoCopy set.
+var NoCopy = DecodeOptions{NoCopy: true}
+
+// DecodeStreamsAsDatagrams is a DecodeOptions with just DecodeStreamsAsDatagrams set.
+var DecodeStreamsAsDatagrams = DecodeOptions{DecodeStreamsAsDatagrams: true}
+
+// NewPacket creates a new Packet object from a set of bytes. The
+// firstLayerDecoder tells it how to interpret the first layer from the bytes,
+// future layers will be generated from that first layer automatically.
+func NewPacket(data []byte, firstLayerDecoder Decoder, options DecodeOptions) Packet {
+ if !options.NoCopy {
+ dataCopy := make([]byte, len(data))
+ copy(dataCopy, data)
+ data = dataCopy
+ }
+ if options.Lazy {
+ p := &lazyPacket{
+ packet: packet{data: data, decodeOptions: options},
+ next: firstLayerDecoder,
+ }
+ p.layers = p.initialLayers[:0]
+ // Crazy craziness:
+ // If the following return statemet is REMOVED, and Lazy is FALSE, then
+ // eager packet processing becomes 17% FASTER. No, there is no logical
+ // explanation for this. However, it's such a hacky micro-optimization that
+ // we really can't rely on it. It appears to have to do with the size the
+ // compiler guesses for this function's stack space, since one symptom is
+ // that with the return statement in place, we more than double calls to
+ // runtime.morestack/runtime.lessstack. We'll hope the compiler gets better
+ // over time and we get this optimization for free. Until then, we'll have
+ // to live with slower packet processing.
+ return p
+ }
+ p := &eagerPacket{
+ packet: packet{data: data, decodeOptions: options},
+ }
+ p.layers = p.initialLayers[:0]
+ p.initialDecode(firstLayerDecoder)
+ return p
+}
+
+// PacketDataSource is an interface for some source of packet data. Users may
+// create their own implementations, or use the existing implementations in
+// gopacket/pcap (libpcap, allows reading from live interfaces or from
+// pcap files) or gopacket/pfring (PF_RING, allows reading from live
+// interfaces).
+type PacketDataSource interface {
+ // ReadPacketData returns the next packet available from this data source.
+ // It returns:
+ // data: The bytes of an individual packet.
+ // ci: Metadata about the capture
+ // err: An error encountered while reading packet data. If err != nil,
+ // then data/ci will be ignored.
+ ReadPacketData() (data []byte, ci CaptureInfo, err error)
+}
+
+// ConcatFinitePacketDataSources returns a PacketDataSource that wraps a set
+// of internal PacketDataSources, each of which will stop with io.EOF after
+// reading a finite number of packets. The returned PacketDataSource will
+// return all packets from the first finite source, followed by all packets from
+// the second, etc. Once all finite sources have returned io.EOF, the returned
+// source will as well.
+func ConcatFinitePacketDataSources(pds ...PacketDataSource) PacketDataSource {
+ c := concat(pds)
+ return &c
+}
+
+type concat []PacketDataSource
+
+func (c *concat) ReadPacketData() (data []byte, ci CaptureInfo, err error) {
+ for len(*c) > 0 {
+ data, ci, err = (*c)[0].ReadPacketData()
+ if err == io.EOF {
+ *c = (*c)[1:]
+ continue
+ }
+ return
+ }
+ return nil, CaptureInfo{}, io.EOF
+}
+
+// ZeroCopyPacketDataSource is an interface to pull packet data from sources
+// that allow data to be returned without copying to a user-controlled buffer.
+// It's very similar to PacketDataSource, except that the caller must be more
+// careful in how the returned buffer is handled.
+type ZeroCopyPacketDataSource interface {
+ // ZeroCopyReadPacketData returns the next packet available from this data source.
+ // It returns:
+ // data: The bytes of an individual packet. Unlike with
+ // PacketDataSource's ReadPacketData, the slice returned here points
+ // to a buffer owned by the data source. In particular, the bytes in
+ // this buffer may be changed by future calls to
+ // ZeroCopyReadPacketData. Do not use the returned buffer after
+ // subsequent ZeroCopyReadPacketData calls.
+ // ci: Metadata about the capture
+ // err: An error encountered while reading packet data. If err != nil,
+ // then data/ci will be ignored.
+ ZeroCopyReadPacketData() (data []byte, ci CaptureInfo, err error)
+}
+
+// PacketSource reads in packets from a PacketDataSource, decodes them, and
+// returns them.
+//
+// There are currently two different methods for reading packets in through
+// a PacketSource:
+//
+// Reading With Packets Function
+//
+// This method is the most convenient and easiest to code, but lacks
+// flexibility. Packets returns a 'chan Packet', then asynchronously writes
+// packets into that channel. Packets uses a blocking channel, and closes
+// it if an io.EOF is returned by the underlying PacketDataSource. All other
+// PacketDataSource errors are ignored and discarded.
+// for packet := range packetSource.Packets() {
+// ...
+// }
+//
+// Reading With NextPacket Function
+//
+// This method is the most flexible, and exposes errors that may be
+// encountered by the underlying PacketDataSource. It's also the fastest
+// in a tight loop, since it doesn't have the overhead of a channel
+// read/write. However, it requires the user to handle errors, most
+// importantly the io.EOF error in cases where packets are being read from
+// a file.
+// for {
+// packet, err := packetSource.NextPacket()
+// if err == io.EOF {
+// break
+// } else if err != nil {
+// log.Println("Error:", err)
+// continue
+// }
+// handlePacket(packet) // Do something with each packet.
+// }
+type PacketSource struct {
+ source PacketDataSource
+ decoder Decoder
+ // DecodeOptions is the set of options to use for decoding each piece
+ // of packet data. This can/should be changed by the user to reflect the
+ // way packets should be decoded.
+ DecodeOptions
+ c chan Packet
+}
+
+// NewPacketSource creates a packet data source.
+func NewPacketSource(source PacketDataSource, decoder Decoder) *PacketSource {
+ return &PacketSource{
+ source: source,
+ decoder: decoder,
+ }
+}
+
+// NextPacket returns the next decoded packet from the PacketSource. On error,
+// it returns a nil packet and a non-nil error.
+func (p *PacketSource) NextPacket() (Packet, error) {
+ data, ci, err := p.source.ReadPacketData()
+ if err != nil {
+ return nil, err
+ }
+ packet := NewPacket(data, p.decoder, p.DecodeOptions)
+ m := packet.Metadata()
+ m.CaptureInfo = ci
+ m.Truncated = m.Truncated || ci.CaptureLength < ci.Length
+ return packet, nil
+}
+
+// packetsToChannel reads in all packets from the packet source and sends them
+// to the given channel. This routine terminates when a non-temporary error
+// is returned by NextPacket().
+func (p *PacketSource) packetsToChannel() {
+ defer close(p.c)
+ for {
+ packet, err := p.NextPacket()
+ if err == nil {
+ p.c <- packet
+ continue
+ }
+
+ // Immediately retry for temporary network errors
+ if nerr, ok := err.(net.Error); ok && nerr.Temporary() {
+ continue
+ }
+
+ // Immediately retry for EAGAIN
+ if err == syscall.EAGAIN {
+ continue
+ }
+
+ // Immediately break for known unrecoverable errors
+ if err == io.EOF || err == io.ErrUnexpectedEOF ||
+ err == io.ErrNoProgress || err == io.ErrClosedPipe || err == io.ErrShortBuffer ||
+ err == syscall.EBADF ||
+ strings.Contains(err.Error(), "use of closed file") {
+ break
+ }
+
+ // Sleep briefly and try again
+ time.Sleep(time.Millisecond * time.Duration(5))
+ }
+}
+
+// Packets returns a channel of packets, allowing easy iterating over
+// packets. Packets will be asynchronously read in from the underlying
+// PacketDataSource and written to the returned channel. If the underlying
+// PacketDataSource returns an io.EOF error, the channel will be closed.
+// If any other error is encountered, it is ignored.
+//
+// for packet := range packetSource.Packets() {
+// handlePacket(packet) // Do something with each packet.
+// }
+//
+// If called more than once, returns the same channel.
+func (p *PacketSource) Packets() chan Packet {
+ if p.c == nil {
+ p.c = make(chan Packet, 1000)
+ go p.packetsToChannel()
+ }
+ return p.c
+}
diff --git a/vendor/github.com/google/gopacket/parser.go b/vendor/github.com/google/gopacket/parser.go
new file mode 100644
index 0000000..4a4676f
--- /dev/null
+++ b/vendor/github.com/google/gopacket/parser.go
@@ -0,0 +1,350 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package gopacket
+
+import (
+ "fmt"
+)
+
+// A container for single LayerType->DecodingLayer mapping.
+type decodingLayerElem struct {
+ typ LayerType
+ dec DecodingLayer
+}
+
+// DecodingLayer is an interface for packet layers that can decode themselves.
+//
+// The important part of DecodingLayer is that they decode themselves in-place.
+// Calling DecodeFromBytes on a DecodingLayer totally resets the entire layer to
+// the new state defined by the data passed in. A returned error leaves the
+// DecodingLayer in an unknown intermediate state, thus its fields should not be
+// trusted.
+//
+// Because the DecodingLayer is resetting its own fields, a call to
+// DecodeFromBytes should normally not require any memory allocation.
+type DecodingLayer interface {
+ // DecodeFromBytes resets the internal state of this layer to the state
+ // defined by the passed-in bytes. Slices in the DecodingLayer may
+ // reference the passed-in data, so care should be taken to copy it
+ // first should later modification of data be required before the
+ // DecodingLayer is discarded.
+ DecodeFromBytes(data []byte, df DecodeFeedback) error
+ // CanDecode returns the set of LayerTypes this DecodingLayer can
+ // decode. For Layers that are also DecodingLayers, this will most
+ // often be that Layer's LayerType().
+ CanDecode() LayerClass
+ // NextLayerType returns the LayerType which should be used to decode
+ // the LayerPayload.
+ NextLayerType() LayerType
+ // LayerPayload is the set of bytes remaining to decode after a call to
+ // DecodeFromBytes.
+ LayerPayload() []byte
+}
+
+// DecodingLayerFunc decodes given packet and stores decoded LayerType
+// values into specified slice. Returns either first encountered
+// unsupported LayerType value or decoding error. In case of success,
+// returns (LayerTypeZero, nil).
+type DecodingLayerFunc func([]byte, *[]LayerType) (LayerType, error)
+
+// DecodingLayerContainer stores all DecodingLayer-s and serves as a
+// searching tool for DecodingLayerParser.
+type DecodingLayerContainer interface {
+ // Put adds new DecodingLayer to container. The new instance of
+ // the same DecodingLayerContainer is returned so it may be
+ // implemented as a value receiver.
+ Put(DecodingLayer) DecodingLayerContainer
+ // Decoder returns DecodingLayer to decode given LayerType and
+ // true if it was found. If no decoder found, return false.
+ Decoder(LayerType) (DecodingLayer, bool)
+ // LayersDecoder returns DecodingLayerFunc which decodes given
+ // packet, starting with specified LayerType and DecodeFeedback.
+ LayersDecoder(first LayerType, df DecodeFeedback) DecodingLayerFunc
+}
+
+// DecodingLayerSparse is a sparse array-based implementation of
+// DecodingLayerContainer. Each DecodingLayer is addressed in an
+// allocated slice by LayerType value itself. Though this is the
+// fastest container it may be memory-consuming if used with big
+// LayerType values.
+type DecodingLayerSparse []DecodingLayer
+
+// Put implements DecodingLayerContainer interface.
+func (dl DecodingLayerSparse) Put(d DecodingLayer) DecodingLayerContainer {
+ maxLayerType := LayerType(len(dl) - 1)
+ for _, typ := range d.CanDecode().LayerTypes() {
+ if typ > maxLayerType {
+ maxLayerType = typ
+ }
+ }
+
+ if extra := maxLayerType - LayerType(len(dl)) + 1; extra > 0 {
+ dl = append(dl, make([]DecodingLayer, extra)...)
+ }
+
+ for _, typ := range d.CanDecode().LayerTypes() {
+ dl[typ] = d
+ }
+ return dl
+}
+
+// LayersDecoder implements DecodingLayerContainer interface.
+func (dl DecodingLayerSparse) LayersDecoder(first LayerType, df DecodeFeedback) DecodingLayerFunc {
+ return LayersDecoder(dl, first, df)
+}
+
+// Decoder implements DecodingLayerContainer interface.
+func (dl DecodingLayerSparse) Decoder(typ LayerType) (DecodingLayer, bool) {
+ if int64(typ) < int64(len(dl)) {
+ decoder := dl[typ]
+ return decoder, decoder != nil
+ }
+ return nil, false
+}
+
+// DecodingLayerArray is an array-based implementation of
+// DecodingLayerContainer. Each DecodingLayer is searched linearly in
+// an allocated slice in one-by-one fashion.
+type DecodingLayerArray []decodingLayerElem
+
+// Put implements DecodingLayerContainer interface.
+func (dl DecodingLayerArray) Put(d DecodingLayer) DecodingLayerContainer {
+TYPES:
+ for _, typ := range d.CanDecode().LayerTypes() {
+ for i := range dl {
+ if dl[i].typ == typ {
+ dl[i].dec = d
+ continue TYPES
+ }
+ }
+ dl = append(dl, decodingLayerElem{typ, d})
+ }
+ return dl
+}
+
+// Decoder implements DecodingLayerContainer interface.
+func (dl DecodingLayerArray) Decoder(typ LayerType) (DecodingLayer, bool) {
+ for i := range dl {
+ if dl[i].typ == typ {
+ return dl[i].dec, true
+ }
+ }
+ return nil, false
+}
+
+// LayersDecoder implements DecodingLayerContainer interface.
+func (dl DecodingLayerArray) LayersDecoder(first LayerType, df DecodeFeedback) DecodingLayerFunc {
+ return LayersDecoder(dl, first, df)
+}
+
+// DecodingLayerMap is an map-based implementation of
+// DecodingLayerContainer. Each DecodingLayer is searched in a map
+// hashed by LayerType value.
+type DecodingLayerMap map[LayerType]DecodingLayer
+
+// Put implements DecodingLayerContainer interface.
+func (dl DecodingLayerMap) Put(d DecodingLayer) DecodingLayerContainer {
+ for _, typ := range d.CanDecode().LayerTypes() {
+ if dl == nil {
+ dl = make(map[LayerType]DecodingLayer)
+ }
+ dl[typ] = d
+ }
+ return dl
+}
+
+// Decoder implements DecodingLayerContainer interface.
+func (dl DecodingLayerMap) Decoder(typ LayerType) (DecodingLayer, bool) {
+ d, ok := dl[typ]
+ return d, ok
+}
+
+// LayersDecoder implements DecodingLayerContainer interface.
+func (dl DecodingLayerMap) LayersDecoder(first LayerType, df DecodeFeedback) DecodingLayerFunc {
+ return LayersDecoder(dl, first, df)
+}
+
+// Static code check.
+var (
+ _ = []DecodingLayerContainer{
+ DecodingLayerSparse(nil),
+ DecodingLayerMap(nil),
+ DecodingLayerArray(nil),
+ }
+)
+
+// DecodingLayerParser parses a given set of layer types. See DecodeLayers for
+// more information on how DecodingLayerParser should be used.
+type DecodingLayerParser struct {
+ // DecodingLayerParserOptions is the set of options available to the
+ // user to define the parser's behavior.
+ DecodingLayerParserOptions
+ dlc DecodingLayerContainer
+ first LayerType
+ df DecodeFeedback
+
+ decodeFunc DecodingLayerFunc
+
+ // Truncated is set when a decode layer detects that the packet has been
+ // truncated.
+ Truncated bool
+}
+
+// AddDecodingLayer adds a decoding layer to the parser. This adds support for
+// the decoding layer's CanDecode layers to the parser... should they be
+// encountered, they'll be parsed.
+func (l *DecodingLayerParser) AddDecodingLayer(d DecodingLayer) {
+ l.SetDecodingLayerContainer(l.dlc.Put(d))
+}
+
+// SetTruncated is used by DecodingLayers to set the Truncated boolean in the
+// DecodingLayerParser. Users should simply read Truncated after calling
+// DecodeLayers.
+func (l *DecodingLayerParser) SetTruncated() {
+ l.Truncated = true
+}
+
+// NewDecodingLayerParser creates a new DecodingLayerParser and adds in all
+// of the given DecodingLayers with AddDecodingLayer.
+//
+// Each call to DecodeLayers will attempt to decode the given bytes first by
+// treating them as a 'first'-type layer, then by using NextLayerType on
+// subsequently decoded layers to find the next relevant decoder. Should a
+// deoder not be available for the layer type returned by NextLayerType,
+// decoding will stop.
+//
+// NewDecodingLayerParser uses DecodingLayerMap container by
+// default.
+func NewDecodingLayerParser(first LayerType, decoders ...DecodingLayer) *DecodingLayerParser {
+ dlp := &DecodingLayerParser{first: first}
+ dlp.df = dlp // Cast this once to the interface
+ // default container
+ dlc := DecodingLayerContainer(DecodingLayerMap(make(map[LayerType]DecodingLayer)))
+ for _, d := range decoders {
+ dlc = dlc.Put(d)
+ }
+
+ dlp.SetDecodingLayerContainer(dlc)
+ return dlp
+}
+
+// SetDecodingLayerContainer specifies container with decoders. This
+// call replaces all decoders already registered in given instance of
+// DecodingLayerParser.
+func (l *DecodingLayerParser) SetDecodingLayerContainer(dlc DecodingLayerContainer) {
+ l.dlc = dlc
+ l.decodeFunc = l.dlc.LayersDecoder(l.first, l.df)
+}
+
+// DecodeLayers decodes as many layers as possible from the given data. It
+// initially treats the data as layer type 'typ', then uses NextLayerType on
+// each subsequent decoded layer until it gets to a layer type it doesn't know
+// how to parse.
+//
+// For each layer successfully decoded, DecodeLayers appends the layer type to
+// the decoded slice. DecodeLayers truncates the 'decoded' slice initially, so
+// there's no need to empty it yourself.
+//
+// This decoding method is about an order of magnitude faster than packet
+// decoding, because it only decodes known layers that have already been
+// allocated. This means it doesn't need to allocate each layer it returns...
+// instead it overwrites the layers that already exist.
+//
+// Example usage:
+// func main() {
+// var eth layers.Ethernet
+// var ip4 layers.IPv4
+// var ip6 layers.IPv6
+// var tcp layers.TCP
+// var udp layers.UDP
+// var payload gopacket.Payload
+// parser := gopacket.NewDecodingLayerParser(layers.LayerTypeEthernet, ð, &ip4, &ip6, &tcp, &udp, &payload)
+// var source gopacket.PacketDataSource = getMyDataSource()
+// decodedLayers := make([]gopacket.LayerType, 0, 10)
+// for {
+// data, _, err := source.ReadPacketData()
+// if err != nil {
+// fmt.Println("Error reading packet data: ", err)
+// continue
+// }
+// fmt.Println("Decoding packet")
+// err = parser.DecodeLayers(data, &decodedLayers)
+// for _, typ := range decodedLayers {
+// fmt.Println(" Successfully decoded layer type", typ)
+// switch typ {
+// case layers.LayerTypeEthernet:
+// fmt.Println(" Eth ", eth.SrcMAC, eth.DstMAC)
+// case layers.LayerTypeIPv4:
+// fmt.Println(" IP4 ", ip4.SrcIP, ip4.DstIP)
+// case layers.LayerTypeIPv6:
+// fmt.Println(" IP6 ", ip6.SrcIP, ip6.DstIP)
+// case layers.LayerTypeTCP:
+// fmt.Println(" TCP ", tcp.SrcPort, tcp.DstPort)
+// case layers.LayerTypeUDP:
+// fmt.Println(" UDP ", udp.SrcPort, udp.DstPort)
+// }
+// }
+// if decodedLayers.Truncated {
+// fmt.Println(" Packet has been truncated")
+// }
+// if err != nil {
+// fmt.Println(" Error encountered:", err)
+// }
+// }
+// }
+//
+// If DecodeLayers is unable to decode the next layer type, it will return the
+// error UnsupportedLayerType.
+func (l *DecodingLayerParser) DecodeLayers(data []byte, decoded *[]LayerType) (err error) {
+ l.Truncated = false
+ if !l.IgnorePanic {
+ defer panicToError(&err)
+ }
+ typ, err := l.decodeFunc(data, decoded)
+ if typ != LayerTypeZero {
+ // no decoder
+ if l.IgnoreUnsupported {
+ return nil
+ }
+ return UnsupportedLayerType(typ)
+ }
+ return err
+}
+
+// UnsupportedLayerType is returned by DecodingLayerParser if DecodeLayers
+// encounters a layer type that the DecodingLayerParser has no decoder for.
+type UnsupportedLayerType LayerType
+
+// Error implements the error interface, returning a string to say that the
+// given layer type is unsupported.
+func (e UnsupportedLayerType) Error() string {
+ return fmt.Sprintf("No decoder for layer type %v", LayerType(e))
+}
+
+func panicToError(e *error) {
+ if r := recover(); r != nil {
+ *e = fmt.Errorf("panic: %v", r)
+ }
+}
+
+// DecodingLayerParserOptions provides options to affect the behavior of a given
+// DecodingLayerParser.
+type DecodingLayerParserOptions struct {
+ // IgnorePanic determines whether a DecodingLayerParser should stop
+ // panics on its own (by returning them as an error from DecodeLayers)
+ // or should allow them to raise up the stack. Handling errors does add
+ // latency to the process of decoding layers, but is much safer for
+ // callers. IgnorePanic defaults to false, thus if the caller does
+ // nothing decode panics will be returned as errors.
+ IgnorePanic bool
+ // IgnoreUnsupported will stop parsing and return a nil error when it
+ // encounters a layer it doesn't have a parser for, instead of returning an
+ // UnsupportedLayerType error. If this is true, it's up to the caller to make
+ // sure that all expected layers have been parsed (by checking the decoded
+ // slice).
+ IgnoreUnsupported bool
+}
diff --git a/vendor/github.com/google/gopacket/pcap/defs_windows_386.go b/vendor/github.com/google/gopacket/pcap/defs_windows_386.go
new file mode 100644
index 0000000..774e907
--- /dev/null
+++ b/vendor/github.com/google/gopacket/pcap/defs_windows_386.go
@@ -0,0 +1,74 @@
+// Copyright 2019 The GoPacket Authors. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+// This file contains necessary structs/constants generated from libpcap headers with cgo -godefs
+// generated with: generate_defs.exe
+// DO NOT MODIFY
+
+package pcap
+
+import "syscall"
+
+const errorBufferSize = 0x100
+
+const (
+ pcapErrorNotActivated = -0x3
+ pcapErrorActivated = -0x4
+ pcapWarningPromisc = 0x2
+ pcapErrorNoSuchDevice = -0x5
+ pcapErrorDenied = -0x8
+ pcapErrorNotUp = -0x9
+ pcapError = -0x1
+ pcapWarning = 0x1
+ pcapDIN = 0x1
+ pcapDOUT = 0x2
+ pcapDINOUT = 0x0
+ pcapNetmaskUnknown = 0xffffffff
+ pcapTstampPrecisionMicro = 0x0
+ pcapTstampPrecisionNano = 0x1
+)
+
+type timeval struct {
+ Sec int32
+ Usec int32
+}
+type pcapPkthdr struct {
+ Ts timeval
+ Caplen uint32
+ Len uint32
+}
+type pcapTPtr uintptr
+type pcapBpfInstruction struct {
+ Code uint16
+ Jt uint8
+ Jf uint8
+ K uint32
+}
+type pcapBpfProgram struct {
+ Len uint32
+ Insns *pcapBpfInstruction
+}
+type pcapStats struct {
+ Recv uint32
+ Drop uint32
+ Ifdrop uint32
+}
+type pcapCint int32
+type pcapIf struct {
+ Next *pcapIf
+ Name *int8
+ Description *int8
+ Addresses *pcapAddr
+ Flags uint32
+}
+
+type pcapAddr struct {
+ Next *pcapAddr
+ Addr *syscall.RawSockaddr
+ Netmask *syscall.RawSockaddr
+ Broadaddr *syscall.RawSockaddr
+ Dstaddr *syscall.RawSockaddr
+}
diff --git a/vendor/github.com/google/gopacket/pcap/defs_windows_amd64.go b/vendor/github.com/google/gopacket/pcap/defs_windows_amd64.go
new file mode 100644
index 0000000..9619215
--- /dev/null
+++ b/vendor/github.com/google/gopacket/pcap/defs_windows_amd64.go
@@ -0,0 +1,76 @@
+// Copyright 2019 The GoPacket Authors. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+// This file contains necessary structs/constants generated from libpcap headers with cgo -godefs
+// generated with: generate_defs.exe
+// DO NOT MODIFY
+
+package pcap
+
+import "syscall"
+
+const errorBufferSize = 0x100
+
+const (
+ pcapErrorNotActivated = -0x3
+ pcapErrorActivated = -0x4
+ pcapWarningPromisc = 0x2
+ pcapErrorNoSuchDevice = -0x5
+ pcapErrorDenied = -0x8
+ pcapErrorNotUp = -0x9
+ pcapError = -0x1
+ pcapWarning = 0x1
+ pcapDIN = 0x1
+ pcapDOUT = 0x2
+ pcapDINOUT = 0x0
+ pcapNetmaskUnknown = 0xffffffff
+ pcapTstampPrecisionMicro = 0x0
+ pcapTstampPrecisionNano = 0x1
+)
+
+type timeval struct {
+ Sec int32
+ Usec int32
+}
+type pcapPkthdr struct {
+ Ts timeval
+ Caplen uint32
+ Len uint32
+}
+type pcapTPtr uintptr
+type pcapBpfInstruction struct {
+ Code uint16
+ Jt uint8
+ Jf uint8
+ K uint32
+}
+type pcapBpfProgram struct {
+ Len uint32
+ Pad_cgo_0 [4]byte
+ Insns *pcapBpfInstruction
+}
+type pcapStats struct {
+ Recv uint32
+ Drop uint32
+ Ifdrop uint32
+}
+type pcapCint int32
+type pcapIf struct {
+ Next *pcapIf
+ Name *int8
+ Description *int8
+ Addresses *pcapAddr
+ Flags uint32
+ Pad_cgo_0 [4]byte
+}
+
+type pcapAddr struct {
+ Next *pcapAddr
+ Addr *syscall.RawSockaddr
+ Netmask *syscall.RawSockaddr
+ Broadaddr *syscall.RawSockaddr
+ Dstaddr *syscall.RawSockaddr
+}
diff --git a/vendor/github.com/google/gopacket/pcap/doc.go b/vendor/github.com/google/gopacket/pcap/doc.go
new file mode 100644
index 0000000..38b3141
--- /dev/null
+++ b/vendor/github.com/google/gopacket/pcap/doc.go
@@ -0,0 +1,112 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+/*
+Package pcap allows users of gopacket to read packets off the wire or from
+pcap files.
+
+This package is meant to be used with its parent,
+http://github.com/google/gopacket, although it can also be used independently
+if you just want to get packet data from the wire.
+
+Depending on libpcap version, os support, or file timestamp resolution,
+nanosecond resolution is used for the internal timestamps. Returned timestamps
+are always scaled to nanosecond resolution due to the usage of time.Time.
+libpcap must be at least version 1.5 to support nanosecond timestamps. OpenLive
+supports only microsecond resolution.
+
+Reading PCAP Files
+
+The following code can be used to read in data from a pcap file.
+
+ if handle, err := pcap.OpenOffline("/path/to/my/file"); err != nil {
+ panic(err)
+ } else {
+ packetSource := gopacket.NewPacketSource(handle, handle.LinkType())
+ for packet := range packetSource.Packets() {
+ handlePacket(packet) // Do something with a packet here.
+ }
+ }
+
+Reading Live Packets
+
+The following code can be used to read in data from a live device, in this case
+"eth0". Be aware, that OpenLive only supports microsecond resolution.
+
+ if handle, err := pcap.OpenLive("eth0", 1600, true, pcap.BlockForever); err != nil {
+ panic(err)
+ } else if err := handle.SetBPFFilter("tcp and port 80"); err != nil { // optional
+ panic(err)
+ } else {
+ packetSource := gopacket.NewPacketSource(handle, handle.LinkType())
+ for packet := range packetSource.Packets() {
+ handlePacket(packet) // Do something with a packet here.
+ }
+ }
+
+Inactive Handles
+
+Newer PCAP functionality requires the concept of an 'inactive' PCAP handle.
+Instead of constantly adding new arguments to pcap_open_live, users now call
+pcap_create to create a handle, set it up with a bunch of optional function
+calls, then call pcap_activate to activate it. This library mirrors that
+mechanism, for those that want to expose/use these new features:
+
+ inactive, err := pcap.NewInactiveHandle(deviceName)
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer inactive.CleanUp()
+
+ // Call various functions on inactive to set it up the way you'd like:
+ if err = inactive.SetTimeout(time.Minute); err != nil {
+ log.Fatal(err)
+ } else if err = inactive.SetTimestampSource("foo"); err != nil {
+ log.Fatal(err)
+ }
+
+ // Finally, create the actual handle by calling Activate:
+ handle, err := inactive.Activate() // after this, inactive is no longer valid
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer handle.Close()
+
+ // Now use your handle as you see fit.
+
+PCAP Timeouts
+
+pcap.OpenLive and pcap.SetTimeout both take timeouts.
+If you don't care about timeouts, just pass in BlockForever,
+which should do what you expect with minimal fuss.
+
+A timeout of 0 is not recommended. Some platforms, like Macs
+(http://www.manpages.info/macosx/pcap.3.html) say:
+ The read timeout is used to arrange that the read not necessarily return
+ immediately when a packet is seen, but that it wait for some amount of time
+ to allow more packets to arrive and to read multiple packets from the OS
+ kernel in one operation.
+This means that if you only capture one packet, the kernel might decide to wait
+'timeout' for more packets to batch with it before returning. A timeout of
+0, then, means 'wait forever for more packets', which is... not good.
+
+To get around this, we've introduced the following behavior: if a negative
+timeout is passed in, we set the positive timeout in the handle, then loop
+internally in ReadPacketData/ZeroCopyReadPacketData when we see timeout
+errors.
+
+PCAP File Writing
+
+This package does not implement PCAP file writing. However, gopacket/pcapgo
+does! Look there if you'd like to write PCAP files.
+
+Note For Windows Users
+
+gopacket can use winpcap or npcap. If both are installed at the same time,
+npcap is preferred. Make sure the right windows service is loaded (npcap for npcap
+and npf for winpcap).
+*/
+package pcap
diff --git a/vendor/github.com/google/gopacket/pcap/generate_defs.go b/vendor/github.com/google/gopacket/pcap/generate_defs.go
new file mode 100644
index 0000000..bcbf161
--- /dev/null
+++ b/vendor/github.com/google/gopacket/pcap/generate_defs.go
@@ -0,0 +1,157 @@
+// Copyright 2019 The GoPacket Authors. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+// +build ignore
+
+package main
+
+// This file generates the godefs needed for the windows version.
+// Rebuild is only necessary if additional libpcap functionality is implemented, or a new arch is implemented in golang.
+// Call with go run generate_windows.go [-I includepath]
+// Needs npcap sdk, go tool cgo, and gofmt to work. Location of npcap includes can be specified with -I
+
+import (
+ "bytes"
+ "flag"
+ "fmt"
+ "io/ioutil"
+ "log"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "strings"
+)
+
+const header = `// Copyright 2019 The GoPacket Authors. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+// This file contains necessary structs/constants generated from libpcap headers with cgo -godefs
+// generated with: %s
+// DO NOT MODIFY
+
+`
+
+const source = `
+package pcap
+
+//#include
+import "C"
+
+import "syscall" // needed for RawSockaddr
+
+const errorBufferSize = C.PCAP_ERRBUF_SIZE
+
+const (
+ pcapErrorNotActivated = C.PCAP_ERROR_NOT_ACTIVATED
+ pcapErrorActivated = C.PCAP_ERROR_ACTIVATED
+ pcapWarningPromisc = C.PCAP_WARNING_PROMISC_NOTSUP
+ pcapErrorNoSuchDevice = C.PCAP_ERROR_NO_SUCH_DEVICE
+ pcapErrorDenied = C.PCAP_ERROR_PERM_DENIED
+ pcapErrorNotUp = C.PCAP_ERROR_IFACE_NOT_UP
+ pcapError = C.PCAP_ERROR
+ pcapWarning = C.PCAP_WARNING
+ pcapDIN = C.PCAP_D_IN
+ pcapDOUT = C.PCAP_D_OUT
+ pcapDINOUT = C.PCAP_D_INOUT
+ pcapNetmaskUnknown = C.PCAP_NETMASK_UNKNOWN
+ pcapTstampPrecisionMicro = C.PCAP_TSTAMP_PRECISION_MICRO
+ pcapTstampPrecisionNano = C.PCAP_TSTAMP_PRECISION_NANO
+)
+
+type timeval C.struct_timeval
+type pcapPkthdr C.struct_pcap_pkthdr
+type pcapTPtr uintptr
+type pcapBpfInstruction C.struct_bpf_insn
+type pcapBpfProgram C.struct_bpf_program
+type pcapStats C.struct_pcap_stat
+type pcapCint C.int
+type pcapIf C.struct_pcap_if
+// +godefs map struct_sockaddr syscall.RawSockaddr
+type pcapAddr C.struct_pcap_addr
+`
+
+var includes = flag.String("I", "C:\\npcap-sdk-1.01\\Include", "Include path containing libpcap headers")
+
+func main() {
+ flag.Parse()
+
+ infile, err := ioutil.TempFile(".", "defs.*.go")
+ if err != nil {
+ log.Fatal("Couldn't create temporary source file: ", err)
+ }
+ defer infile.Close()
+ defer os.Remove(infile.Name())
+
+ _, err = infile.WriteString(source)
+ if err != nil {
+ log.Fatalf("Couldn't write definitions to temporary file %s: %s", infile.Name(), err)
+ }
+ err = infile.Close()
+ if err != nil {
+ log.Fatalf("Couldn't close temporary source file %s: %s", infile.Name(), err)
+ }
+
+ archs := []string{"386", "amd64"}
+ for _, arch := range archs {
+ env := append(os.Environ(), "GOARCH="+arch)
+ cmd := exec.Command("go", "tool", "cgo", "-godefs", "--", "-I", *includes, infile.Name())
+ cmd.Env = env
+ cmd.Stderr = os.Stderr
+ var generated bytes.Buffer
+ cmd.Stdout = &generated
+ err := cmd.Run()
+ if err != nil {
+ log.Fatalf("Couldn't generated defs for %s: %s\n", arch, err)
+ }
+
+ cmd = exec.Command("gofmt")
+ cmd.Env = env
+ cmd.Stderr = os.Stderr
+ outName := fmt.Sprintf("defs_windows_%s.go", arch)
+ out, err := os.Create(outName)
+ if err != nil {
+ log.Fatalf("Couldn't open file %s: %s", outName, err)
+ }
+ cmd.Stdout = out
+ in, err := cmd.StdinPipe()
+ if err != nil {
+ log.Fatal("Couldn't create input pipe for gofmt: ", err)
+ }
+ err = cmd.Start()
+ if err != nil {
+ log.Fatal("Couldn't start gofmt: ", err)
+ }
+
+ _, err = fmt.Fprintf(in, header, strings.Join(append([]string{filepath.Base(os.Args[0])}, os.Args[1:]...), " "))
+ if err != nil {
+ log.Fatal("Couldn't write header to gofmt: ", err)
+ }
+
+ for {
+ line, err := generated.ReadBytes('\n')
+ if err != nil {
+ break
+ }
+ // remove godefs comments
+ if bytes.HasPrefix(line, []byte("//")) {
+ continue
+ }
+ _, err = in.Write(line)
+ if err != nil {
+ log.Fatal("Couldn't write line to gofmt: ", err)
+ }
+ }
+ in.Close()
+ err = cmd.Wait()
+ if err != nil {
+ log.Fatal("gofmt failed: ", err)
+ }
+ out.Close()
+ }
+}
diff --git a/vendor/github.com/google/gopacket/pcap/pcap.go b/vendor/github.com/google/gopacket/pcap/pcap.go
new file mode 100644
index 0000000..9fa22bf
--- /dev/null
+++ b/vendor/github.com/google/gopacket/pcap/pcap.go
@@ -0,0 +1,873 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+// Copyright 2009-2011 Andreas Krennmair. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package pcap
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "net"
+ "os"
+ "reflect"
+ "runtime"
+ "strconv"
+ "sync"
+ "sync/atomic"
+ "syscall"
+ "time"
+ "unsafe"
+
+ "github.com/google/gopacket"
+ "github.com/google/gopacket/layers"
+)
+
+// ErrNotActive is returned if handle is not activated
+const ErrNotActive = pcapErrorNotActivated
+
+// MaxBpfInstructions is the maximum number of BPF instructions supported (BPF_MAXINSNS),
+// taken from Linux kernel: include/uapi/linux/bpf_common.h
+//
+// https://github.com/torvalds/linux/blob/master/include/uapi/linux/bpf_common.h
+const MaxBpfInstructions = 4096
+
+// 8 bytes per instruction, max 4096 instructions
+const bpfInstructionBufferSize = 8 * MaxBpfInstructions
+
+// Handle provides a connection to a pcap handle, allowing users to read packets
+// off the wire (Next), inject packets onto the wire (Inject), and
+// perform a number of other functions to affect and understand packet output.
+//
+// Handles are already pcap_activate'd
+type Handle struct {
+ // stop is set to a non-zero value by Handle.Close to signal to
+ // getNextBufPtrLocked to stop trying to read packets
+ // This must be the first entry to ensure alignment for sync.atomic
+ stop uint64
+ // cptr is the handle for the actual pcap C object.
+ cptr pcapTPtr
+ timeout time.Duration
+ device string
+ deviceIndex int
+ mu sync.Mutex
+ closeMu sync.Mutex
+ nanoSecsFactor int64
+
+ // Since pointers to these objects are passed into a C function, if
+ // they're declared locally then the Go compiler thinks they may have
+ // escaped into C-land, so it allocates them on the heap. This causes a
+ // huge memory hit, so to handle that we store them here instead.
+ pkthdr *pcapPkthdr
+ bufptr *uint8
+}
+
+// Stats contains statistics on how many packets were handled by a pcap handle,
+// and what was done with those packets.
+type Stats struct {
+ PacketsReceived int
+ PacketsDropped int
+ PacketsIfDropped int
+}
+
+// Interface describes a single network interface on a machine.
+type Interface struct {
+ Name string
+ Description string
+ Flags uint32
+ Addresses []InterfaceAddress
+}
+
+// Datalink describes the datalink
+type Datalink struct {
+ Name string
+ Description string
+}
+
+// InterfaceAddress describes an address associated with an Interface.
+// Currently, it's IPv4/6 specific.
+type InterfaceAddress struct {
+ IP net.IP
+ Netmask net.IPMask // Netmask may be nil if we were unable to retrieve it.
+ Broadaddr net.IP // Broadcast address for this IP may be nil
+ P2P net.IP // P2P destination address for this IP may be nil
+}
+
+// bpfFilter keeps C.struct_bpf_program separate from BPF.orig which might be a pointer to go memory.
+// This is a workaround for https://github.com/golang/go/issues/32970 which will be fixed in go1.14.
+// (type conversion is in pcap_unix.go pcapOfflineFilter)
+type bpfFilter struct {
+ bpf pcapBpfProgram // takes a finalizer, not overriden by outsiders
+}
+
+// BPF is a compiled filter program, useful for offline packet matching.
+type BPF struct {
+ orig string
+ bpf *bpfFilter
+ hdr pcapPkthdr // allocate on the heap to enable optimizations
+}
+
+// BPFInstruction is a byte encoded structure holding a BPF instruction
+type BPFInstruction struct {
+ Code uint16
+ Jt uint8
+ Jf uint8
+ K uint32
+}
+
+// BlockForever causes it to block forever waiting for packets, when passed
+// into SetTimeout or OpenLive, while still returning incoming packets to userland relatively
+// quickly.
+const BlockForever = -time.Millisecond * 10
+
+func timeoutMillis(timeout time.Duration) int {
+ // Flip sign if necessary. See package docs on timeout for reasoning behind this.
+ if timeout < 0 {
+ timeout *= -1
+ }
+ // Round up
+ if timeout != 0 && timeout < time.Millisecond {
+ timeout = time.Millisecond
+ }
+ return int(timeout / time.Millisecond)
+}
+
+// OpenLive opens a device and returns a *Handle.
+// It takes as arguments the name of the device ("eth0"), the maximum size to
+// read for each packet (snaplen), whether to put the interface in promiscuous
+// mode, and a timeout. Warning: this function supports only microsecond timestamps.
+// For nanosecond resolution use an InactiveHandle.
+//
+// See the package documentation for important details regarding 'timeout'.
+func OpenLive(device string, snaplen int32, promisc bool, timeout time.Duration) (handle *Handle, _ error) {
+ var pro int
+ if promisc {
+ pro = 1
+ }
+
+ p, err := pcapOpenLive(device, int(snaplen), pro, timeoutMillis(timeout))
+ if err != nil {
+ return nil, err
+ }
+ p.timeout = timeout
+ p.device = device
+
+ ifc, err := net.InterfaceByName(device)
+ if err != nil {
+ // The device wasn't found in the OS, but could be "any"
+ // Set index to 0
+ p.deviceIndex = 0
+ } else {
+ p.deviceIndex = ifc.Index
+ }
+
+ p.nanoSecsFactor = 1000
+
+ // Only set the PCAP handle into non-blocking mode if we have a timeout
+ // greater than zero. If the user wants to block forever, we'll let libpcap
+ // handle that.
+ if p.timeout > 0 {
+ if err := p.setNonBlocking(); err != nil {
+ p.pcapClose()
+ return nil, err
+ }
+ }
+
+ return p, nil
+}
+
+// OpenOffline opens a file and returns its contents as a *Handle. Depending on libpcap support and
+// on the timestamp resolution used in the file, nanosecond or microsecond resolution is used
+// internally. All returned timestamps are scaled to nanosecond resolution. Resolution() can be used
+// to query the actual resolution used.
+func OpenOffline(file string) (handle *Handle, err error) {
+ handle, err = openOffline(file)
+ if err != nil {
+ return
+ }
+ if pcapGetTstampPrecision(handle.cptr) == pcapTstampPrecisionNano {
+ handle.nanoSecsFactor = 1
+ } else {
+ handle.nanoSecsFactor = 1000
+ }
+ return
+}
+
+// OpenOfflineFile returns contents of input file as a *Handle. Depending on libpcap support and
+// on the timestamp resolution used in the file, nanosecond or microsecond resolution is used
+// internally. All returned timestamps are scaled to nanosecond resolution. Resolution() can be used
+// to query the actual resolution used.
+func OpenOfflineFile(file *os.File) (handle *Handle, err error) {
+ handle, err = openOfflineFile(file)
+ if err != nil {
+ return
+ }
+ if pcapGetTstampPrecision(handle.cptr) == pcapTstampPrecisionNano {
+ handle.nanoSecsFactor = 1
+ } else {
+ handle.nanoSecsFactor = 1000
+ }
+ return
+}
+
+// NextError is the return code from a call to Next.
+type NextError int32
+
+// NextError implements the error interface.
+func (n NextError) Error() string {
+ switch n {
+ case NextErrorOk:
+ return "OK"
+ case NextErrorTimeoutExpired:
+ return "Timeout Expired"
+ case NextErrorReadError:
+ return "Read Error"
+ case NextErrorNoMorePackets:
+ return "No More Packets In File"
+ case NextErrorNotActivated:
+ return "Not Activated"
+ }
+ return strconv.Itoa(int(n))
+}
+
+// NextError values.
+const (
+ NextErrorOk NextError = 1
+ NextErrorTimeoutExpired NextError = 0
+ NextErrorReadError NextError = -1
+ // NextErrorNoMorePackets is returned when reading from a file (OpenOffline) and
+ // EOF is reached. When this happens, Next() returns io.EOF instead of this.
+ NextErrorNoMorePackets NextError = -2
+ NextErrorNotActivated NextError = -3
+)
+
+// ReadPacketData returns the next packet read from the pcap handle, along with an error
+// code associated with that packet. If the packet is read successfully, the
+// returned error is nil.
+func (p *Handle) ReadPacketData() (data []byte, ci gopacket.CaptureInfo, err error) {
+ p.mu.Lock()
+ err = p.getNextBufPtrLocked(&ci)
+ if err == nil {
+ data = make([]byte, ci.CaptureLength)
+ copy(data, (*(*[1 << 30]byte)(unsafe.Pointer(p.bufptr)))[:])
+ }
+ p.mu.Unlock()
+ if err == NextErrorTimeoutExpired {
+ runtime.Gosched()
+ }
+ return
+}
+
+type activateError int
+
+const (
+ aeNoError = activateError(0)
+ aeActivated = activateError(pcapErrorActivated)
+ aePromisc = activateError(pcapWarningPromisc)
+ aeNoSuchDevice = activateError(pcapErrorNoSuchDevice)
+ aeDenied = activateError(pcapErrorDenied)
+ aeNotUp = activateError(pcapErrorNotUp)
+ aeWarning = activateError(pcapWarning)
+)
+
+func (a activateError) Error() string {
+ switch a {
+ case aeNoError:
+ return "No Error"
+ case aeActivated:
+ return "Already Activated"
+ case aePromisc:
+ return "Cannot set as promisc"
+ case aeNoSuchDevice:
+ return "No Such Device"
+ case aeDenied:
+ return "Permission Denied"
+ case aeNotUp:
+ return "Interface Not Up"
+ case aeWarning:
+ return fmt.Sprintf("Warning: %v", activateErrMsg.Error())
+ default:
+ return fmt.Sprintf("unknown activated error: %d", a)
+ }
+}
+
+// getNextBufPtrLocked is shared code for ReadPacketData and
+// ZeroCopyReadPacketData.
+func (p *Handle) getNextBufPtrLocked(ci *gopacket.CaptureInfo) error {
+ if !p.isOpen() {
+ return io.EOF
+ }
+
+ // set after we have call waitForPacket for the first time
+ var waited bool
+
+ for atomic.LoadUint64(&p.stop) == 0 {
+ // try to read a packet if one is immediately available
+ result := p.pcapNextPacketEx()
+
+ switch result {
+ case NextErrorOk:
+ sec := p.pkthdr.getSec()
+ // convert micros to nanos
+ nanos := int64(p.pkthdr.getUsec()) * p.nanoSecsFactor
+
+ ci.Timestamp = time.Unix(sec, nanos)
+ ci.CaptureLength = p.pkthdr.getCaplen()
+ ci.Length = p.pkthdr.getLen()
+ ci.InterfaceIndex = p.deviceIndex
+
+ return nil
+ case NextErrorNoMorePackets:
+ // no more packets, return EOF rather than libpcap-specific error
+ return io.EOF
+ case NextErrorTimeoutExpired:
+ // we've already waited for a packet and we're supposed to time out
+ //
+ // we should never actually hit this if we were passed BlockForever
+ // since we should block on C.pcap_next_ex until there's a packet
+ // to read.
+ if waited && p.timeout > 0 {
+ return result
+ }
+
+ // wait for packet before trying again
+ p.waitForPacket()
+ waited = true
+ default:
+ return result
+ }
+ }
+
+ // stop must be set
+ return io.EOF
+}
+
+// ZeroCopyReadPacketData reads the next packet off the wire, and returns its data.
+// The slice returned by ZeroCopyReadPacketData points to bytes owned by the
+// the Handle. Each call to ZeroCopyReadPacketData invalidates any data previously
+// returned by ZeroCopyReadPacketData. Care must be taken not to keep pointers
+// to old bytes when using ZeroCopyReadPacketData... if you need to keep data past
+// the next time you call ZeroCopyReadPacketData, use ReadPacketData, which copies
+// the bytes into a new buffer for you.
+// data1, _, _ := handle.ZeroCopyReadPacketData()
+// // do everything you want with data1 here, copying bytes out of it if you'd like to keep them around.
+// data2, _, _ := handle.ZeroCopyReadPacketData() // invalidates bytes in data1
+func (p *Handle) ZeroCopyReadPacketData() (data []byte, ci gopacket.CaptureInfo, err error) {
+ p.mu.Lock()
+ err = p.getNextBufPtrLocked(&ci)
+ if err == nil {
+ slice := (*reflect.SliceHeader)(unsafe.Pointer(&data))
+ slice.Data = uintptr(unsafe.Pointer(p.bufptr))
+ slice.Len = ci.CaptureLength
+ slice.Cap = ci.CaptureLength
+ }
+ p.mu.Unlock()
+ if err == NextErrorTimeoutExpired {
+ runtime.Gosched()
+ }
+ return
+}
+
+// Close closes the underlying pcap handle.
+func (p *Handle) Close() {
+ p.closeMu.Lock()
+ defer p.closeMu.Unlock()
+
+ if !p.isOpen() {
+ return
+ }
+
+ atomic.StoreUint64(&p.stop, 1)
+
+ // wait for packet reader to stop
+ p.mu.Lock()
+ defer p.mu.Unlock()
+
+ p.pcapClose()
+}
+
+// Error returns the current error associated with a pcap handle (pcap_geterr).
+func (p *Handle) Error() error {
+ return p.pcapGeterr()
+}
+
+// Stats returns statistics on the underlying pcap handle.
+func (p *Handle) Stats() (stat *Stats, err error) {
+ return p.pcapStats()
+}
+
+// ListDataLinks obtains a list of all possible data link types supported for an interface.
+func (p *Handle) ListDataLinks() (datalinks []Datalink, err error) {
+ return p.pcapListDatalinks()
+}
+
+// compileBPFFilter always returns an allocated C.struct_bpf_program
+// It is the callers responsibility to free the memory again, e.g.
+//
+// C.pcap_freecode(&bpf)
+//
+func (p *Handle) compileBPFFilter(expr string) (pcapBpfProgram, error) {
+ var maskp = uint32(pcapNetmaskUnknown)
+
+ // Only do the lookup on network interfaces.
+ // No device indicates we're handling a pcap file.
+ if len(p.device) > 0 {
+ var err error
+ _, maskp, err = pcapLookupnet(p.device)
+ if err != nil {
+ // We can't lookup the network, but that could be because the interface
+ // doesn't have an IPv4.
+ maskp = uint32(pcapNetmaskUnknown)
+ }
+ }
+
+ return p.pcapCompile(expr, maskp)
+}
+
+// CompileBPFFilter compiles and returns a BPF filter with given a link type and capture length.
+func CompileBPFFilter(linkType layers.LinkType, captureLength int, expr string) ([]BPFInstruction, error) {
+ h, err := pcapOpenDead(linkType, captureLength)
+ if err != nil {
+ return nil, err
+ }
+ defer h.Close()
+ return h.CompileBPFFilter(expr)
+}
+
+// CompileBPFFilter compiles and returns a BPF filter for the pcap handle.
+func (p *Handle) CompileBPFFilter(expr string) ([]BPFInstruction, error) {
+ bpf, err := p.compileBPFFilter(expr)
+ defer bpf.free()
+ if err != nil {
+ return nil, err
+ }
+
+ return bpf.toBPFInstruction(), nil
+}
+
+// SetBPFFilter compiles and sets a BPF filter for the pcap handle.
+func (p *Handle) SetBPFFilter(expr string) (err error) {
+ bpf, err := p.compileBPFFilter(expr)
+ defer bpf.free()
+ if err != nil {
+ return err
+ }
+
+ return p.pcapSetfilter(bpf)
+}
+
+// SetBPFInstructionFilter may be used to apply a filter in BPF asm byte code format.
+//
+// Simplest way to generate BPF asm byte code is with tcpdump:
+// tcpdump -dd 'udp'
+//
+// The output may be used directly to add a filter, e.g.:
+// bpfInstructions := []pcap.BpfInstruction{
+// {0x28, 0, 0, 0x0000000c},
+// {0x15, 0, 9, 0x00000800},
+// {0x30, 0, 0, 0x00000017},
+// {0x15, 0, 7, 0x00000006},
+// {0x28, 0, 0, 0x00000014},
+// {0x45, 5, 0, 0x00001fff},
+// {0xb1, 0, 0, 0x0000000e},
+// {0x50, 0, 0, 0x0000001b},
+// {0x54, 0, 0, 0x00000012},
+// {0x15, 0, 1, 0x00000012},
+// {0x6, 0, 0, 0x0000ffff},
+// {0x6, 0, 0, 0x00000000},
+// }
+//
+// An other posibility is to write the bpf code in bpf asm.
+// Documentation: https://www.kernel.org/doc/Documentation/networking/filter.txt
+//
+// To compile the code use bpf_asm from
+// https://github.com/torvalds/linux/tree/master/tools/net
+//
+// The following command may be used to convert bpf_asm output to c/go struct, usable for SetBPFFilterByte:
+// bpf_asm -c tcp.bpf
+func (p *Handle) SetBPFInstructionFilter(bpfInstructions []BPFInstruction) (err error) {
+ bpf, err := bpfInstructionFilter(bpfInstructions)
+ if err != nil {
+ return err
+ }
+ defer bpf.free()
+
+ return p.pcapSetfilter(bpf)
+}
+
+func bpfInstructionFilter(bpfInstructions []BPFInstruction) (bpf pcapBpfProgram, err error) {
+ if len(bpfInstructions) < 1 {
+ return bpf, errors.New("bpfInstructions must not be empty")
+ }
+
+ if len(bpfInstructions) > MaxBpfInstructions {
+ return bpf, fmt.Errorf("bpfInstructions must not be larger than %d", MaxBpfInstructions)
+ }
+
+ return pcapBpfProgramFromInstructions(bpfInstructions), nil
+}
+
+// NewBPF compiles the given string into a new filter program.
+//
+// BPF filters need to be created from activated handles, because they need to
+// know the underlying link type to correctly compile their offsets.
+func (p *Handle) NewBPF(expr string) (*BPF, error) {
+ bpf := &BPF{orig: expr, bpf: new(bpfFilter)}
+
+ var err error
+ bpf.bpf.bpf, err = p.pcapCompile(expr, pcapNetmaskUnknown)
+ if err != nil {
+ return nil, err
+ }
+
+ runtime.SetFinalizer(bpf, destroyBPF)
+ return bpf, nil
+}
+
+// NewBPF allows to create a BPF without requiring an existing handle.
+// This allows to match packets obtained from a-non GoPacket capture source
+// to be matched.
+//
+// buf := make([]byte, MaxFrameSize)
+// bpfi, _ := pcap.NewBPF(layers.LinkTypeEthernet, MaxFrameSize, "icmp")
+// n, _ := someIO.Read(buf)
+// ci := gopacket.CaptureInfo{CaptureLength: n, Length: n}
+// if bpfi.Matches(ci, buf) {
+// doSomething()
+// }
+func NewBPF(linkType layers.LinkType, captureLength int, expr string) (*BPF, error) {
+ h, err := pcapOpenDead(linkType, captureLength)
+ if err != nil {
+ return nil, err
+ }
+ defer h.Close()
+ return h.NewBPF(expr)
+}
+
+// NewBPFInstructionFilter sets the given BPFInstructions as new filter program.
+//
+// More details see func SetBPFInstructionFilter
+//
+// BPF filters need to be created from activated handles, because they need to
+// know the underlying link type to correctly compile their offsets.
+func (p *Handle) NewBPFInstructionFilter(bpfInstructions []BPFInstruction) (*BPF, error) {
+ var err error
+ bpf := &BPF{orig: "BPF Instruction Filter", bpf: new(bpfFilter)}
+
+ bpf.bpf.bpf, err = bpfInstructionFilter(bpfInstructions)
+ if err != nil {
+ return nil, err
+ }
+
+ runtime.SetFinalizer(bpf, destroyBPF)
+ return bpf, nil
+}
+func destroyBPF(bpf *BPF) {
+ bpf.bpf.bpf.free()
+}
+
+// String returns the original string this BPF filter was compiled from.
+func (b *BPF) String() string {
+ return b.orig
+}
+
+// Matches returns true if the given packet data matches this filter.
+func (b *BPF) Matches(ci gopacket.CaptureInfo, data []byte) bool {
+ return b.pcapOfflineFilter(ci, data)
+}
+
+// Version returns pcap_lib_version.
+func Version() string {
+ return pcapLibVersion()
+}
+
+// LinkType returns pcap_datalink, as a layers.LinkType.
+func (p *Handle) LinkType() layers.LinkType {
+ return p.pcapDatalink()
+}
+
+// SetLinkType calls pcap_set_datalink on the pcap handle.
+func (p *Handle) SetLinkType(dlt layers.LinkType) error {
+ return p.pcapSetDatalink(dlt)
+}
+
+// DatalinkValToName returns pcap_datalink_val_to_name as string
+func DatalinkValToName(dlt int) string {
+ return pcapDatalinkValToName(dlt)
+}
+
+// DatalinkValToDescription returns pcap_datalink_val_to_description as string
+func DatalinkValToDescription(dlt int) string {
+ return pcapDatalinkValToDescription(dlt)
+}
+
+// DatalinkNameToVal returns pcap_datalink_name_to_val as int
+func DatalinkNameToVal(name string) int {
+ return pcapDatalinkNameToVal(name)
+}
+
+// FindAllDevs attempts to enumerate all interfaces on the current machine.
+func FindAllDevs() (ifs []Interface, err error) {
+ alldevsp, err := pcapFindAllDevs()
+ if err != nil {
+ return nil, err
+ }
+ defer alldevsp.free()
+
+ for alldevsp.next() {
+ var iface Interface
+ iface.Name = alldevsp.name()
+ iface.Description = alldevsp.description()
+ iface.Addresses = findalladdresses(alldevsp.addresses())
+ iface.Flags = alldevsp.flags()
+ ifs = append(ifs, iface)
+ }
+ return
+}
+
+func findalladdresses(addresses pcapAddresses) (retval []InterfaceAddress) {
+ // TODO - make it support more than IPv4 and IPv6?
+ retval = make([]InterfaceAddress, 0, 1)
+ for addresses.next() {
+ // Strangely, it appears that in some cases, we get a pcap address back from
+ // pcap_findalldevs with a nil .addr. It appears that we can skip over
+ // these.
+ if addresses.addr() == nil {
+ continue
+ }
+ var a InterfaceAddress
+ var err error
+ if a.IP, err = sockaddrToIP(addresses.addr()); err != nil {
+ continue
+ }
+ // To be safe, we'll also check for netmask.
+ if addresses.netmask() == nil {
+ continue
+ }
+ if a.Netmask, err = sockaddrToIP(addresses.netmask()); err != nil {
+ // If we got an IP address but we can't get a netmask, just return the IP
+ // address.
+ a.Netmask = nil
+ }
+ if a.Broadaddr, err = sockaddrToIP(addresses.broadaddr()); err != nil {
+ a.Broadaddr = nil
+ }
+ if a.P2P, err = sockaddrToIP(addresses.dstaddr()); err != nil {
+ a.P2P = nil
+ }
+ retval = append(retval, a)
+ }
+ return
+}
+
+func sockaddrToIP(rsa *syscall.RawSockaddr) (IP []byte, err error) {
+ if rsa == nil {
+ err = errors.New("Value not set")
+ return
+ }
+ switch rsa.Family {
+ case syscall.AF_INET:
+ pp := (*syscall.RawSockaddrInet4)(unsafe.Pointer(rsa))
+ IP = make([]byte, 4)
+ for i := 0; i < len(IP); i++ {
+ IP[i] = pp.Addr[i]
+ }
+ return
+ case syscall.AF_INET6:
+ pp := (*syscall.RawSockaddrInet6)(unsafe.Pointer(rsa))
+ IP = make([]byte, 16)
+ for i := 0; i < len(IP); i++ {
+ IP[i] = pp.Addr[i]
+ }
+ return
+ }
+ err = errors.New("Unsupported address type")
+ return
+}
+
+// WritePacketData calls pcap_sendpacket, injecting the given data into the pcap handle.
+func (p *Handle) WritePacketData(data []byte) (err error) {
+ return p.pcapSendpacket(data)
+}
+
+// Direction is used by Handle.SetDirection.
+type Direction uint8
+
+// Direction values for Handle.SetDirection.
+const (
+ DirectionIn = Direction(pcapDIN)
+ DirectionOut = Direction(pcapDOUT)
+ DirectionInOut = Direction(pcapDINOUT)
+)
+
+// SetDirection sets the direction for which packets will be captured.
+func (p *Handle) SetDirection(direction Direction) error {
+ if direction != DirectionIn && direction != DirectionOut && direction != DirectionInOut {
+ return fmt.Errorf("Invalid direction: %v", direction)
+ }
+ return p.pcapSetdirection(direction)
+}
+
+// SnapLen returns the snapshot length
+func (p *Handle) SnapLen() int {
+ return p.pcapSnapshot()
+}
+
+// Resolution returns the timestamp resolution of acquired timestamps before scaling to NanosecondTimestampResolution.
+func (p *Handle) Resolution() gopacket.TimestampResolution {
+ if p.nanoSecsFactor == 1 {
+ return gopacket.TimestampResolutionMicrosecond
+ }
+ return gopacket.TimestampResolutionNanosecond
+}
+
+// TimestampSource tells PCAP which type of timestamp to use for packets.
+type TimestampSource int
+
+// String returns the timestamp type as a human-readable string.
+func (t TimestampSource) String() string {
+ return t.pcapTstampTypeValToName()
+}
+
+// TimestampSourceFromString translates a string into a timestamp type, case
+// insensitive.
+func TimestampSourceFromString(s string) (TimestampSource, error) {
+ return pcapTstampTypeNameToVal(s)
+}
+
+// InactiveHandle allows you to call pre-pcap_activate functions on your pcap
+// handle to set it up just the way you'd like.
+type InactiveHandle struct {
+ // cptr is the handle for the actual pcap C object.
+ cptr pcapTPtr
+ device string
+ deviceIndex int
+ timeout time.Duration
+}
+
+// holds the err messoge in case activation returned a Warning
+var activateErrMsg error
+
+// Error returns the current error associated with a pcap handle (pcap_geterr).
+func (p *InactiveHandle) Error() error {
+ return p.pcapGeterr()
+}
+
+// Activate activates the handle. The current InactiveHandle becomes invalid
+// and all future function calls on it will fail.
+func (p *InactiveHandle) Activate() (*Handle, error) {
+ // ignore error with set_tstamp_precision, since the actual precision is queried later anyway
+ pcapSetTstampPrecision(p.cptr, pcapTstampPrecisionNano)
+ handle, err := p.pcapActivate()
+ if err != aeNoError {
+ if err == aeWarning {
+ activateErrMsg = p.Error()
+ }
+ return nil, err
+ }
+ handle.timeout = p.timeout
+ if p.timeout > 0 {
+ if err := handle.setNonBlocking(); err != nil {
+ handle.pcapClose()
+ return nil, err
+ }
+ }
+ handle.device = p.device
+ handle.deviceIndex = p.deviceIndex
+ if pcapGetTstampPrecision(handle.cptr) == pcapTstampPrecisionNano {
+ handle.nanoSecsFactor = 1
+ } else {
+ handle.nanoSecsFactor = 1000
+ }
+ return handle, nil
+}
+
+// CleanUp cleans up any stuff left over from a successful or failed building
+// of a handle.
+func (p *InactiveHandle) CleanUp() {
+ p.pcapClose()
+}
+
+// NewInactiveHandle creates a new InactiveHandle, which wraps an un-activated PCAP handle.
+// Callers of NewInactiveHandle should immediately defer 'CleanUp', as in:
+// inactive := NewInactiveHandle("eth0")
+// defer inactive.CleanUp()
+func NewInactiveHandle(device string) (*InactiveHandle, error) {
+ // Try to get the interface index, but iy could be something like "any"
+ // in which case use 0, which doesn't exist in nature
+ deviceIndex := 0
+ ifc, err := net.InterfaceByName(device)
+ if err == nil {
+ deviceIndex = ifc.Index
+ }
+
+ // This copies a bunch of the pcap_open_live implementation from pcap.c:
+ handle, err := pcapCreate(device)
+ if err != nil {
+ return nil, err
+ }
+ handle.device = device
+ handle.deviceIndex = deviceIndex
+ return handle, nil
+}
+
+// SetSnapLen sets the snap length (max bytes per packet to capture).
+func (p *InactiveHandle) SetSnapLen(snaplen int) error {
+ return p.pcapSetSnaplen(snaplen)
+}
+
+// SetPromisc sets the handle to either be promiscuous (capture packets
+// unrelated to this host) or not.
+func (p *InactiveHandle) SetPromisc(promisc bool) error {
+ return p.pcapSetPromisc(promisc)
+}
+
+// SetTimeout sets the read timeout for the handle.
+//
+// See the package documentation for important details regarding 'timeout'.
+func (p *InactiveHandle) SetTimeout(timeout time.Duration) error {
+ err := p.pcapSetTimeout(timeout)
+ if err != nil {
+ return err
+ }
+ p.timeout = timeout
+ return nil
+}
+
+// SupportedTimestamps returns a list of supported timstamp types for this
+// handle.
+func (p *InactiveHandle) SupportedTimestamps() (out []TimestampSource) {
+ return p.pcapListTstampTypes()
+}
+
+// SetTimestampSource sets the type of timestamp generator PCAP uses when
+// attaching timestamps to packets.
+func (p *InactiveHandle) SetTimestampSource(t TimestampSource) error {
+ return p.pcapSetTstampType(t)
+}
+
+// CannotSetRFMon is returned by SetRFMon if the handle does not allow
+// setting RFMon because pcap_can_set_rfmon returns 0.
+var CannotSetRFMon = errors.New("Cannot set rfmon for this handle")
+
+// SetRFMon turns on radio monitoring mode, similar to promiscuous mode but for
+// wireless networks. If this mode is enabled, the interface will not need to
+// associate with an access point before it can receive traffic.
+func (p *InactiveHandle) SetRFMon(monitor bool) error {
+ return p.pcapSetRfmon(monitor)
+}
+
+// SetBufferSize sets the buffer size (in bytes) of the handle.
+func (p *InactiveHandle) SetBufferSize(bufferSize int) error {
+ return p.pcapSetBufferSize(bufferSize)
+}
+
+// SetImmediateMode sets (or unsets) the immediate mode of the
+// handle. In immediate mode, packets are delivered to the application
+// as soon as they arrive. In other words, this overrides SetTimeout.
+func (p *InactiveHandle) SetImmediateMode(mode bool) error {
+ return p.pcapSetImmediateMode(mode)
+}
diff --git a/vendor/github.com/google/gopacket/pcap/pcap_tester.go b/vendor/github.com/google/gopacket/pcap/pcap_tester.go
new file mode 100644
index 0000000..7873a96
--- /dev/null
+++ b/vendor/github.com/google/gopacket/pcap/pcap_tester.go
@@ -0,0 +1,108 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+// +build ignore
+
+// This binary tests that PCAP packet capture is working correctly by issuing
+// HTTP requests, then making sure we actually capture data off the wire.
+package main
+
+import (
+ "errors"
+ "flag"
+ "fmt"
+ "log"
+ "net/http"
+ "os"
+ "time"
+
+ "github.com/google/gopacket/pcap"
+)
+
+var mode = flag.String("mode", "basic", "One of: basic,filtered,timestamp")
+
+func generatePackets() {
+ if resp, err := http.Get("http://code.google.com"); err != nil {
+ log.Printf("Could not get HTTP: %v", err)
+ } else {
+ resp.Body.Close()
+ }
+}
+
+func main() {
+ flag.Parse()
+ ifaces, err := pcap.FindAllDevs()
+ if err != nil {
+ log.Fatal(err)
+ }
+ for _, iface := range ifaces {
+ log.Printf("Trying capture on %q", iface.Name)
+ if err := tryCapture(iface); err != nil {
+ log.Printf("Error capturing on %q: %v", iface.Name, err)
+ } else {
+ log.Printf("Successfully captured on %q", iface.Name)
+ return
+ }
+ }
+ os.Exit(1)
+}
+
+func tryCapture(iface pcap.Interface) error {
+ if iface.Name[:2] == "lo" {
+ return errors.New("skipping loopback")
+ }
+ var h *pcap.Handle
+ var err error
+ switch *mode {
+ case "basic":
+ h, err = pcap.OpenLive(iface.Name, 65536, false, time.Second*3)
+ if err != nil {
+ return fmt.Errorf("openlive: %v", err)
+ }
+ defer h.Close()
+ case "filtered":
+ h, err = pcap.OpenLive(iface.Name, 65536, false, time.Second*3)
+ if err != nil {
+ return fmt.Errorf("openlive: %v", err)
+ }
+ defer h.Close()
+ if err := h.SetBPFFilter("port 80 or port 443"); err != nil {
+ return fmt.Errorf("setbpf: %v", err)
+ }
+ case "timestamp":
+ u, err := pcap.NewInactiveHandle(iface.Name)
+ if err != nil {
+ return err
+ }
+ defer u.CleanUp()
+ if err = u.SetSnapLen(65536); err != nil {
+ return err
+ } else if err = u.SetPromisc(false); err != nil {
+ return err
+ } else if err = u.SetTimeout(time.Second * 3); err != nil {
+ return err
+ }
+ sources := u.SupportedTimestamps()
+ if len(sources) == 0 {
+ return errors.New("no supported timestamp sources")
+ } else if err := u.SetTimestampSource(sources[0]); err != nil {
+ return fmt.Errorf("settimestampsource(%v): %v", sources[0], err)
+ } else if h, err = u.Activate(); err != nil {
+ return fmt.Errorf("could not activate: %v", err)
+ }
+ defer h.Close()
+ default:
+ panic("Invalid --mode: " + *mode)
+ }
+ go generatePackets()
+ h.ReadPacketData() // Do one dummy read to clear any timeouts.
+ data, ci, err := h.ReadPacketData()
+ if err != nil {
+ return fmt.Errorf("readpacketdata: %v", err)
+ }
+ log.Printf("Read packet, %v bytes, CI: %+v", len(data), ci)
+ return nil
+}
diff --git a/vendor/github.com/google/gopacket/pcap/pcap_unix.go b/vendor/github.com/google/gopacket/pcap/pcap_unix.go
new file mode 100644
index 0000000..c58752e
--- /dev/null
+++ b/vendor/github.com/google/gopacket/pcap/pcap_unix.go
@@ -0,0 +1,709 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+// Copyright 2009-2011 Andreas Krennmair. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+//
+// +build !windows
+
+package pcap
+
+import (
+ "errors"
+ "os"
+ "sync"
+ "syscall"
+ "time"
+ "unsafe"
+
+ "github.com/google/gopacket"
+
+ "github.com/google/gopacket/layers"
+)
+
+/*
+#cgo solaris LDFLAGS: -L /opt/local/lib -lpcap
+#cgo linux LDFLAGS: -lpcap
+#cgo dragonfly LDFLAGS: -lpcap
+#cgo freebsd LDFLAGS: -lpcap
+#cgo openbsd LDFLAGS: -lpcap
+#cgo netbsd LDFLAGS: -lpcap
+#cgo darwin LDFLAGS: -lpcap
+#include
+#include
+#include
+
+// Some old versions of pcap don't define this constant.
+#ifndef PCAP_NETMASK_UNKNOWN
+#define PCAP_NETMASK_UNKNOWN 0xffffffff
+#endif
+
+// libpcap doesn't actually export its version in a #define-guardable way,
+// so we have to use other defined things to differentiate versions.
+// We assume at least libpcap v1.1 at the moment.
+// See http://upstream-tracker.org/versions/libpcap.html
+
+#ifndef PCAP_ERROR_TSTAMP_PRECISION_NOTSUP // < v1.5
+#define PCAP_ERROR_TSTAMP_PRECISION_NOTSUP -12
+
+int pcap_set_immediate_mode(pcap_t *p, int mode) {
+ return PCAP_ERROR;
+}
+
+// libpcap version < v1.5 doesn't have timestamp precision (everything is microsecond)
+//
+// This means *_tstamp_* functions and macros are missing. Therefore, we emulate these
+// functions here and pretend the setting the precision works. This is actually the way
+// the pcap_open_offline_with_tstamp_precision works, because it doesn't return an error
+// if it was not possible to set the precision, which depends on support by the given file.
+// => The rest of the functions always pretend as if they could set nano precision and
+// verify the actual precision with pcap_get_tstamp_precision, which is emulated for
+#ifdef __OpenBSD__
+#define gopacket_time_secs_t u_int32_t
+#define gopacket_time_usecs_t u_int32_t
+#else
+#define gopacket_time_secs_t time_t
+#define gopacket_time_usecs_t suseconds_t
+#endif
+#endif
+
+// The things we do to avoid pointers escaping to the heap...
+// According to https://github.com/the-tcpdump-group/libpcap/blob/1131a7c26c6f4d4772e4a2beeaf7212f4dea74ac/pcap.c#L398-L406 ,
+// the return value of pcap_next_ex could be greater than 1 for success.
+// Let's just make it 1 if it comes bigger than 1.
+int pcap_next_ex_escaping(pcap_t *p, uintptr_t pkt_hdr, uintptr_t pkt_data) {
+ int ex = pcap_next_ex(p, (struct pcap_pkthdr**)(pkt_hdr), (const u_char**)(pkt_data));
+ if (ex > 1) {
+ ex = 1;
+ }
+ return ex;
+}
+
+int pcap_offline_filter_escaping(struct bpf_program *fp, uintptr_t pkt_hdr, uintptr_t pkt) {
+ return pcap_offline_filter(fp, (struct pcap_pkthdr*)(pkt_hdr), (const u_char*)(pkt));
+}
+
+// pcap_wait returns when the next packet is available or the timeout expires.
+// Since it uses pcap_get_selectable_fd, it will not work in Windows.
+int pcap_wait(pcap_t *p, int usec) {
+ fd_set fds;
+ int fd;
+ struct timeval tv;
+
+ fd = pcap_get_selectable_fd(p);
+ if(fd < 0) {
+ return fd;
+ }
+
+ FD_ZERO(&fds);
+ FD_SET(fd, &fds);
+
+ tv.tv_sec = 0;
+ tv.tv_usec = usec;
+
+ if(usec != 0) {
+ return select(fd+1, &fds, NULL, NULL, &tv);
+ }
+
+ // block indefinitely if no timeout provided
+ return select(fd+1, &fds, NULL, NULL, NULL);
+}
+
+*/
+import "C"
+
+const errorBufferSize = C.PCAP_ERRBUF_SIZE
+
+const (
+ pcapErrorNotActivated = C.PCAP_ERROR_NOT_ACTIVATED
+ pcapErrorActivated = C.PCAP_ERROR_ACTIVATED
+ pcapWarningPromisc = C.PCAP_WARNING_PROMISC_NOTSUP
+ pcapErrorNoSuchDevice = C.PCAP_ERROR_NO_SUCH_DEVICE
+ pcapErrorDenied = C.PCAP_ERROR_PERM_DENIED
+ pcapErrorNotUp = C.PCAP_ERROR_IFACE_NOT_UP
+ pcapWarning = C.PCAP_WARNING
+ pcapDIN = C.PCAP_D_IN
+ pcapDOUT = C.PCAP_D_OUT
+ pcapDINOUT = C.PCAP_D_INOUT
+ pcapNetmaskUnknown = C.PCAP_NETMASK_UNKNOWN
+ pcapTstampPrecisionMicro = C.PCAP_TSTAMP_PRECISION_MICRO
+ pcapTstampPrecisionNano = C.PCAP_TSTAMP_PRECISION_NANO
+)
+
+type pcapPkthdr C.struct_pcap_pkthdr
+type pcapTPtr *C.struct_pcap
+type pcapBpfProgram C.struct_bpf_program
+
+func (h *pcapPkthdr) getSec() int64 {
+ return int64(h.ts.tv_sec)
+}
+
+func (h *pcapPkthdr) getUsec() int64 {
+ return int64(h.ts.tv_usec)
+}
+
+func (h *pcapPkthdr) getLen() int {
+ return int(h.len)
+}
+
+func (h *pcapPkthdr) getCaplen() int {
+ return int(h.caplen)
+}
+
+func pcapGetTstampPrecision(cptr pcapTPtr) int {
+ return int(C.pcap_get_tstamp_precision(cptr))
+}
+
+func pcapSetTstampPrecision(cptr pcapTPtr, precision int) error {
+ ret := C.pcap_set_tstamp_precision(cptr, C.int(precision))
+ if ret < 0 {
+ return errors.New(C.GoString(C.pcap_geterr(cptr)))
+ }
+ return nil
+}
+
+func statusError(status C.int) error {
+ return errors.New(C.GoString(C.pcap_statustostr(status)))
+}
+
+func pcapOpenLive(device string, snaplen int, pro int, timeout int) (*Handle, error) {
+ buf := (*C.char)(C.calloc(errorBufferSize, 1))
+ defer C.free(unsafe.Pointer(buf))
+
+ dev := C.CString(device)
+ defer C.free(unsafe.Pointer(dev))
+
+ cptr := C.pcap_open_live(dev, C.int(snaplen), C.int(pro), C.int(timeout), buf)
+ if cptr == nil {
+ return nil, errors.New(C.GoString(buf))
+ }
+ return &Handle{cptr: cptr}, nil
+}
+
+func openOffline(file string) (handle *Handle, err error) {
+ buf := (*C.char)(C.calloc(errorBufferSize, 1))
+ defer C.free(unsafe.Pointer(buf))
+ cf := C.CString(file)
+ defer C.free(unsafe.Pointer(cf))
+
+ cptr := C.pcap_open_offline_with_tstamp_precision(cf, C.PCAP_TSTAMP_PRECISION_NANO, buf)
+ if cptr == nil {
+ return nil, errors.New(C.GoString(buf))
+ }
+ return &Handle{cptr: cptr}, nil
+}
+
+func (p *Handle) pcapClose() {
+ if p.cptr != nil {
+ C.pcap_close(p.cptr)
+ }
+ p.cptr = nil
+}
+
+func (p *Handle) pcapGeterr() error {
+ return errors.New(C.GoString(C.pcap_geterr(p.cptr)))
+}
+
+func (p *Handle) pcapStats() (*Stats, error) {
+ var cstats C.struct_pcap_stat
+ if C.pcap_stats(p.cptr, &cstats) < 0 {
+ return nil, p.pcapGeterr()
+ }
+ return &Stats{
+ PacketsReceived: int(cstats.ps_recv),
+ PacketsDropped: int(cstats.ps_drop),
+ PacketsIfDropped: int(cstats.ps_ifdrop),
+ }, nil
+}
+
+// for libpcap < 1.8 pcap_compile is NOT thread-safe, so protect it.
+var pcapCompileMu sync.Mutex
+
+func (p *Handle) pcapCompile(expr string, maskp uint32) (pcapBpfProgram, error) {
+ var bpf pcapBpfProgram
+ cexpr := C.CString(expr)
+ defer C.free(unsafe.Pointer(cexpr))
+
+ pcapCompileMu.Lock()
+ defer pcapCompileMu.Unlock()
+ if C.pcap_compile(p.cptr, (*C.struct_bpf_program)(&bpf), cexpr, 1, C.bpf_u_int32(maskp)) < 0 {
+ return bpf, p.pcapGeterr()
+ }
+ return bpf, nil
+}
+
+func (p pcapBpfProgram) free() {
+ C.pcap_freecode((*C.struct_bpf_program)(&p))
+}
+
+func (p pcapBpfProgram) toBPFInstruction() []BPFInstruction {
+ bpfInsn := (*[bpfInstructionBufferSize]C.struct_bpf_insn)(unsafe.Pointer(p.bf_insns))[0:p.bf_len:p.bf_len]
+ bpfInstruction := make([]BPFInstruction, len(bpfInsn), len(bpfInsn))
+
+ for i, v := range bpfInsn {
+ bpfInstruction[i].Code = uint16(v.code)
+ bpfInstruction[i].Jt = uint8(v.jt)
+ bpfInstruction[i].Jf = uint8(v.jf)
+ bpfInstruction[i].K = uint32(v.k)
+ }
+ return bpfInstruction
+}
+
+func pcapBpfProgramFromInstructions(bpfInstructions []BPFInstruction) pcapBpfProgram {
+ var bpf pcapBpfProgram
+ bpf.bf_len = C.u_int(len(bpfInstructions))
+ cbpfInsns := C.calloc(C.size_t(len(bpfInstructions)), C.size_t(unsafe.Sizeof(bpfInstructions[0])))
+ gbpfInsns := (*[bpfInstructionBufferSize]C.struct_bpf_insn)(cbpfInsns)
+
+ for i, v := range bpfInstructions {
+ gbpfInsns[i].code = C.u_short(v.Code)
+ gbpfInsns[i].jt = C.u_char(v.Jt)
+ gbpfInsns[i].jf = C.u_char(v.Jf)
+ gbpfInsns[i].k = C.bpf_u_int32(v.K)
+ }
+
+ bpf.bf_insns = (*C.struct_bpf_insn)(cbpfInsns)
+ return bpf
+}
+
+func pcapLookupnet(device string) (netp, maskp uint32, err error) {
+ errorBuf := (*C.char)(C.calloc(errorBufferSize, 1))
+ defer C.free(unsafe.Pointer(errorBuf))
+ dev := C.CString(device)
+ defer C.free(unsafe.Pointer(dev))
+ if C.pcap_lookupnet(
+ dev,
+ (*C.bpf_u_int32)(unsafe.Pointer(&netp)),
+ (*C.bpf_u_int32)(unsafe.Pointer(&maskp)),
+ errorBuf,
+ ) < 0 {
+ return 0, 0, errors.New(C.GoString(errorBuf))
+ // We can't lookup the network, but that could be because the interface
+ // doesn't have an IPv4.
+ }
+ return
+}
+
+func (b *BPF) pcapOfflineFilter(ci gopacket.CaptureInfo, data []byte) bool {
+ hdr := (*C.struct_pcap_pkthdr)(&b.hdr)
+ hdr.ts.tv_sec = C.gopacket_time_secs_t(ci.Timestamp.Unix())
+ hdr.ts.tv_usec = C.gopacket_time_usecs_t(ci.Timestamp.Nanosecond() / 1000)
+ hdr.caplen = C.bpf_u_int32(len(data)) // Trust actual length over ci.Length.
+ hdr.len = C.bpf_u_int32(ci.Length)
+ dataptr := (*C.u_char)(unsafe.Pointer(&data[0]))
+ return C.pcap_offline_filter_escaping((*C.struct_bpf_program)(&b.bpf.bpf),
+ C.uintptr_t(uintptr(unsafe.Pointer(hdr))),
+ C.uintptr_t(uintptr(unsafe.Pointer(dataptr)))) != 0
+}
+
+func (p *Handle) pcapSetfilter(bpf pcapBpfProgram) error {
+ if C.pcap_setfilter(p.cptr, (*C.struct_bpf_program)(&bpf)) < 0 {
+ return p.pcapGeterr()
+ }
+ return nil
+}
+
+func (p *Handle) pcapListDatalinks() (datalinks []Datalink, err error) {
+ var dltbuf *C.int
+
+ n := int(C.pcap_list_datalinks(p.cptr, &dltbuf))
+ if n < 0 {
+ return nil, p.pcapGeterr()
+ }
+
+ defer C.pcap_free_datalinks(dltbuf)
+
+ datalinks = make([]Datalink, n)
+
+ dltArray := (*[1 << 28]C.int)(unsafe.Pointer(dltbuf))
+
+ for i := 0; i < n; i++ {
+ datalinks[i].Name = pcapDatalinkValToName(int((*dltArray)[i]))
+ datalinks[i].Description = pcapDatalinkValToDescription(int((*dltArray)[i]))
+ }
+
+ return datalinks, nil
+}
+
+func pcapOpenDead(linkType layers.LinkType, captureLength int) (*Handle, error) {
+ cptr := C.pcap_open_dead(C.int(linkType), C.int(captureLength))
+ if cptr == nil {
+ return nil, errors.New("error opening dead capture")
+ }
+
+ return &Handle{cptr: cptr}, nil
+}
+
+func (p *Handle) pcapNextPacketEx() NextError {
+ // This horrible magic allows us to pass a ptr-to-ptr to pcap_next_ex
+ // without causing that ptr-to-ptr to itself be allocated on the heap.
+ // Since Handle itself survives through the duration of the pcap_next_ex
+ // call, this should be perfectly safe for GC stuff, etc.
+
+ return NextError(C.pcap_next_ex_escaping(p.cptr, C.uintptr_t(uintptr(unsafe.Pointer(&p.pkthdr))), C.uintptr_t(uintptr(unsafe.Pointer(&p.bufptr)))))
+}
+
+func (p *Handle) pcapDatalink() layers.LinkType {
+ return layers.LinkType(C.pcap_datalink(p.cptr))
+}
+
+func (p *Handle) pcapSetDatalink(dlt layers.LinkType) error {
+ if C.pcap_set_datalink(p.cptr, C.int(dlt)) < 0 {
+ return p.pcapGeterr()
+ }
+ return nil
+}
+
+func pcapDatalinkValToName(dlt int) string {
+ return C.GoString(C.pcap_datalink_val_to_name(C.int(dlt)))
+}
+
+func pcapDatalinkValToDescription(dlt int) string {
+ return C.GoString(C.pcap_datalink_val_to_description(C.int(dlt)))
+}
+
+func pcapDatalinkNameToVal(name string) int {
+ cptr := C.CString(name)
+ defer C.free(unsafe.Pointer(cptr))
+ return int(C.pcap_datalink_name_to_val(cptr))
+}
+
+func pcapLibVersion() string {
+ return C.GoString(C.pcap_lib_version())
+}
+
+func (p *Handle) isOpen() bool {
+ return p.cptr != nil
+}
+
+type pcapDevices struct {
+ all, cur *C.pcap_if_t
+}
+
+func (p pcapDevices) free() {
+ C.pcap_freealldevs((*C.pcap_if_t)(p.all))
+}
+
+func (p *pcapDevices) next() bool {
+ if p.cur == nil {
+ p.cur = p.all
+ if p.cur == nil {
+ return false
+ }
+ return true
+ }
+ if p.cur.next == nil {
+ return false
+ }
+ p.cur = p.cur.next
+ return true
+}
+
+func (p pcapDevices) name() string {
+ return C.GoString(p.cur.name)
+}
+
+func (p pcapDevices) description() string {
+ return C.GoString(p.cur.description)
+}
+
+func (p pcapDevices) flags() uint32 {
+ return uint32(p.cur.flags)
+}
+
+type pcapAddresses struct {
+ all, cur *C.pcap_addr_t
+}
+
+func (p *pcapAddresses) next() bool {
+ if p.cur == nil {
+ p.cur = p.all
+ if p.cur == nil {
+ return false
+ }
+ return true
+ }
+ if p.cur.next == nil {
+ return false
+ }
+ p.cur = p.cur.next
+ return true
+}
+
+func (p pcapAddresses) addr() *syscall.RawSockaddr {
+ return (*syscall.RawSockaddr)(unsafe.Pointer(p.cur.addr))
+}
+
+func (p pcapAddresses) netmask() *syscall.RawSockaddr {
+ return (*syscall.RawSockaddr)(unsafe.Pointer(p.cur.netmask))
+}
+
+func (p pcapAddresses) broadaddr() *syscall.RawSockaddr {
+ return (*syscall.RawSockaddr)(unsafe.Pointer(p.cur.broadaddr))
+}
+
+func (p pcapAddresses) dstaddr() *syscall.RawSockaddr {
+ return (*syscall.RawSockaddr)(unsafe.Pointer(p.cur.dstaddr))
+}
+
+func (p pcapDevices) addresses() pcapAddresses {
+ return pcapAddresses{all: p.cur.addresses}
+}
+
+func pcapFindAllDevs() (pcapDevices, error) {
+ var buf *C.char
+ buf = (*C.char)(C.calloc(errorBufferSize, 1))
+ defer C.free(unsafe.Pointer(buf))
+ var alldevsp pcapDevices
+
+ if C.pcap_findalldevs((**C.pcap_if_t)(&alldevsp.all), buf) < 0 {
+ return pcapDevices{}, errors.New(C.GoString(buf))
+ }
+ return alldevsp, nil
+}
+
+func (p *Handle) pcapSendpacket(data []byte) error {
+ if C.pcap_sendpacket(p.cptr, (*C.u_char)(&data[0]), (C.int)(len(data))) < 0 {
+ return p.pcapGeterr()
+ }
+ return nil
+}
+
+func (p *Handle) pcapSetdirection(direction Direction) error {
+ if status := C.pcap_setdirection(p.cptr, (C.pcap_direction_t)(direction)); status < 0 {
+ return statusError(status)
+ }
+ return nil
+}
+
+func (p *Handle) pcapSnapshot() int {
+ return int(C.pcap_snapshot(p.cptr))
+}
+
+func (t TimestampSource) pcapTstampTypeValToName() string {
+ return C.GoString(C.pcap_tstamp_type_val_to_name(C.int(t)))
+}
+
+func pcapTstampTypeNameToVal(s string) (TimestampSource, error) {
+ cs := C.CString(s)
+ defer C.free(unsafe.Pointer(cs))
+ t := C.pcap_tstamp_type_name_to_val(cs)
+ if t < 0 {
+ return 0, statusError(t)
+ }
+ return TimestampSource(t), nil
+}
+
+func (p *InactiveHandle) pcapGeterr() error {
+ return errors.New(C.GoString(C.pcap_geterr(p.cptr)))
+}
+
+func (p *InactiveHandle) pcapActivate() (*Handle, activateError) {
+ ret := activateError(C.pcap_activate(p.cptr))
+ if ret != aeNoError {
+ return nil, ret
+ }
+ h := &Handle{
+ cptr: p.cptr,
+ }
+ p.cptr = nil
+ return h, ret
+}
+
+func (p *InactiveHandle) pcapClose() {
+ if p.cptr != nil {
+ C.pcap_close(p.cptr)
+ }
+}
+
+func pcapCreate(device string) (*InactiveHandle, error) {
+ buf := (*C.char)(C.calloc(errorBufferSize, 1))
+ defer C.free(unsafe.Pointer(buf))
+ dev := C.CString(device)
+ defer C.free(unsafe.Pointer(dev))
+
+ cptr := C.pcap_create(dev, buf)
+ if cptr == nil {
+ return nil, errors.New(C.GoString(buf))
+ }
+ return &InactiveHandle{cptr: cptr}, nil
+}
+
+func (p *InactiveHandle) pcapSetSnaplen(snaplen int) error {
+ if status := C.pcap_set_snaplen(p.cptr, C.int(snaplen)); status < 0 {
+ return statusError(status)
+ }
+ return nil
+}
+
+func (p *InactiveHandle) pcapSetPromisc(promisc bool) error {
+ var pro C.int
+ if promisc {
+ pro = 1
+ }
+ if status := C.pcap_set_promisc(p.cptr, pro); status < 0 {
+ return statusError(status)
+ }
+ return nil
+}
+
+func (p *InactiveHandle) pcapSetTimeout(timeout time.Duration) error {
+ if status := C.pcap_set_timeout(p.cptr, C.int(timeoutMillis(timeout))); status < 0 {
+ return statusError(status)
+ }
+ return nil
+}
+
+func (p *InactiveHandle) pcapListTstampTypes() (out []TimestampSource) {
+ var types *C.int
+ n := int(C.pcap_list_tstamp_types(p.cptr, &types))
+ if n < 0 {
+ return // public interface doesn't have error :(
+ }
+ defer C.pcap_free_tstamp_types(types)
+ typesArray := (*[1 << 28]C.int)(unsafe.Pointer(types))
+ for i := 0; i < n; i++ {
+ out = append(out, TimestampSource((*typesArray)[i]))
+ }
+ return
+}
+
+func (p *InactiveHandle) pcapSetTstampType(t TimestampSource) error {
+ if status := C.pcap_set_tstamp_type(p.cptr, C.int(t)); status < 0 {
+ return statusError(status)
+ }
+ return nil
+}
+
+func (p *InactiveHandle) pcapSetRfmon(monitor bool) error {
+ var mon C.int
+ if monitor {
+ mon = 1
+ }
+ switch canset := C.pcap_can_set_rfmon(p.cptr); canset {
+ case 0:
+ return CannotSetRFMon
+ case 1:
+ // success
+ default:
+ return statusError(canset)
+ }
+ if status := C.pcap_set_rfmon(p.cptr, mon); status != 0 {
+ return statusError(status)
+ }
+ return nil
+}
+
+func (p *InactiveHandle) pcapSetBufferSize(bufferSize int) error {
+ if status := C.pcap_set_buffer_size(p.cptr, C.int(bufferSize)); status < 0 {
+ return statusError(status)
+ }
+ return nil
+}
+
+func (p *InactiveHandle) pcapSetImmediateMode(mode bool) error {
+ var md C.int
+ if mode {
+ md = 1
+ }
+ if status := C.pcap_set_immediate_mode(p.cptr, md); status < 0 {
+ return statusError(status)
+ }
+ return nil
+}
+
+func (p *Handle) setNonBlocking() error {
+ buf := (*C.char)(C.calloc(errorBufferSize, 1))
+ defer C.free(unsafe.Pointer(buf))
+
+ // Change the device to non-blocking, we'll use pcap_wait to wait until the
+ // handle is ready to read.
+ if v := C.pcap_setnonblock(p.cptr, 1, buf); v < -1 {
+ return errors.New(C.GoString(buf))
+ }
+
+ return nil
+}
+
+// waitForPacket waits for a packet or for the timeout to expire.
+func (p *Handle) waitForPacket() {
+ // need to wait less than the read timeout according to pcap documentation.
+ // timeoutMillis rounds up to at least one millisecond so we can safely
+ // subtract up to a millisecond.
+ usec := timeoutMillis(p.timeout) * 1000
+ usec -= 100
+
+ C.pcap_wait(p.cptr, C.int(usec))
+}
+
+// openOfflineFile returns contents of input file as a *Handle.
+func openOfflineFile(file *os.File) (handle *Handle, err error) {
+ buf := (*C.char)(C.calloc(errorBufferSize, 1))
+ defer C.free(unsafe.Pointer(buf))
+ cmode := C.CString("rb")
+ defer C.free(unsafe.Pointer(cmode))
+ cf := C.fdopen(C.int(file.Fd()), cmode)
+
+ cptr := C.pcap_fopen_offline_with_tstamp_precision(cf, C.PCAP_TSTAMP_PRECISION_NANO, buf)
+ if cptr == nil {
+ return nil, errors.New(C.GoString(buf))
+ }
+ return &Handle{cptr: cptr}, nil
+}
diff --git a/vendor/github.com/google/gopacket/pcap/pcap_windows.go b/vendor/github.com/google/gopacket/pcap/pcap_windows.go
new file mode 100644
index 0000000..c758272
--- /dev/null
+++ b/vendor/github.com/google/gopacket/pcap/pcap_windows.go
@@ -0,0 +1,885 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+// Copyright 2009-2011 Andreas Krennmair. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package pcap
+
+import (
+ "errors"
+ "fmt"
+ "os"
+ "runtime"
+ "sync"
+ "syscall"
+ "time"
+ "unsafe"
+
+ "github.com/google/gopacket"
+ "github.com/google/gopacket/layers"
+)
+
+var pcapLoaded = false
+
+const npcapPath = "\\Npcap"
+
+func initDllPath(kernel32 syscall.Handle) {
+ setDllDirectory, err := syscall.GetProcAddress(kernel32, "SetDllDirectoryA")
+ if err != nil {
+ // we can't do anything since SetDllDirectoryA is missing - fall back to use first wpcap.dll we encounter
+ return
+ }
+ getSystemDirectory, err := syscall.GetProcAddress(kernel32, "GetSystemDirectoryA")
+ if err != nil {
+ // we can't do anything since SetDllDirectoryA is missing - fall back to use first wpcap.dll we encounter
+ return
+ }
+ buf := make([]byte, 4096)
+ r, _, _ := syscall.Syscall(getSystemDirectory, 2, uintptr(unsafe.Pointer(&buf[0])), uintptr(len(buf)), 0)
+ if r == 0 || r > 4096-uintptr(len(npcapPath))-1 {
+ // we can't do anything since SetDllDirectoryA is missing - fall back to use first wpcap.dll we encounter
+ return
+ }
+ copy(buf[r:], npcapPath)
+ _, _, _ = syscall.Syscall(setDllDirectory, 1, uintptr(unsafe.Pointer(&buf[0])), 0, 0)
+ // ignore errors here - we just fallback to load wpcap.dll from default locations
+}
+
+// loadedDllPath will hold the full pathname of the loaded wpcap.dll after init if possible
+var loadedDllPath = "wpcap.dll"
+
+func initLoadedDllPath(kernel32 syscall.Handle) {
+ getModuleFileName, err := syscall.GetProcAddress(kernel32, "GetModuleFileNameA")
+ if err != nil {
+ // we can't get the filename of the loaded module in this case - just leave default of wpcap.dll
+ return
+ }
+ buf := make([]byte, 4096)
+ r, _, _ := syscall.Syscall(getModuleFileName, 3, uintptr(wpcapHandle), uintptr(unsafe.Pointer(&buf[0])), uintptr(len(buf)))
+ if r == 0 {
+ // we can't get the filename of the loaded module in this case - just leave default of wpcap.dll
+ return
+ }
+ loadedDllPath = string(buf[:int(r)])
+}
+
+func mustLoad(fun string) uintptr {
+ addr, err := syscall.GetProcAddress(wpcapHandle, fun)
+ if err != nil {
+ panic(fmt.Sprintf("Couldn't load function %s from %s", fun, loadedDllPath))
+ }
+ return addr
+}
+
+func mightLoad(fun string) uintptr {
+ addr, err := syscall.GetProcAddress(wpcapHandle, fun)
+ if err != nil {
+ return 0
+ }
+ return addr
+}
+
+func byteSliceToString(bval []byte) string {
+ for i := range bval {
+ if bval[i] == 0 {
+ return string(bval[:i])
+ }
+ }
+ return string(bval[:])
+}
+
+// bytePtrToString returns a string copied from pointer to a null terminated byte array
+// WARNING: ONLY SAFE WITH IF r POINTS TO C MEMORY!
+// govet will complain about this function for the reason stated above
+func bytePtrToString(r uintptr) string {
+ if r == 0 {
+ return ""
+ }
+ bval := (*[1 << 30]byte)(unsafe.Pointer(r))
+ return byteSliceToString(bval[:])
+}
+
+var wpcapHandle syscall.Handle
+var msvcrtHandle syscall.Handle
+var (
+ callocPtr,
+ pcapStrerrorPtr,
+ pcapStatustostrPtr,
+ pcapOpenLivePtr,
+ pcapOpenOfflinePtr,
+ pcapClosePtr,
+ pcapGeterrPtr,
+ pcapStatsPtr,
+ pcapCompilePtr,
+ pcapFreecodePtr,
+ pcapLookupnetPtr,
+ pcapOfflineFilterPtr,
+ pcapSetfilterPtr,
+ pcapListDatalinksPtr,
+ pcapFreeDatalinksPtr,
+ pcapDatalinkValToNamePtr,
+ pcapDatalinkValToDescriptionPtr,
+ pcapOpenDeadPtr,
+ pcapNextExPtr,
+ pcapDatalinkPtr,
+ pcapSetDatalinkPtr,
+ pcapDatalinkNameToValPtr,
+ pcapLibVersionPtr,
+ pcapFreealldevsPtr,
+ pcapFindalldevsPtr,
+ pcapSendpacketPtr,
+ pcapSetdirectionPtr,
+ pcapSnapshotPtr,
+ pcapTstampTypeValToNamePtr,
+ pcapTstampTypeNameToValPtr,
+ pcapListTstampTypesPtr,
+ pcapFreeTstampTypesPtr,
+ pcapSetTstampTypePtr,
+ pcapGetTstampPrecisionPtr,
+ pcapSetTstampPrecisionPtr,
+ pcapOpenOfflineWithTstampPrecisionPtr,
+ pcapHOpenOfflineWithTstampPrecisionPtr,
+ pcapActivatePtr,
+ pcapCreatePtr,
+ pcapSetSnaplenPtr,
+ pcapSetPromiscPtr,
+ pcapSetTimeoutPtr,
+ pcapCanSetRfmonPtr,
+ pcapSetRfmonPtr,
+ pcapSetBufferSizePtr,
+ pcapSetImmediateModePtr,
+ pcapHopenOfflinePtr uintptr
+)
+
+func init() {
+ LoadWinPCAP()
+}
+
+// LoadWinPCAP attempts to dynamically load the wpcap DLL and resolve necessary functions
+func LoadWinPCAP() error {
+ if pcapLoaded {
+ return nil
+ }
+
+ kernel32, err := syscall.LoadLibrary("kernel32.dll")
+ if err != nil {
+ return fmt.Errorf("couldn't load kernel32.dll")
+ }
+ defer syscall.FreeLibrary(kernel32)
+
+ initDllPath(kernel32)
+
+ wpcapHandle, err = syscall.LoadLibrary("wpcap.dll")
+ if err != nil {
+ return fmt.Errorf("couldn't load wpcap.dll")
+ }
+ initLoadedDllPath(kernel32)
+ msvcrtHandle, err = syscall.LoadLibrary("msvcrt.dll")
+ if err != nil {
+ return fmt.Errorf("couldn't load msvcrt.dll")
+ }
+ callocPtr, err = syscall.GetProcAddress(msvcrtHandle, "calloc")
+ if err != nil {
+ return fmt.Errorf("couldn't get calloc function")
+ }
+
+ pcapStrerrorPtr = mustLoad("pcap_strerror")
+ pcapStatustostrPtr = mightLoad("pcap_statustostr") // not available on winpcap
+ pcapOpenLivePtr = mustLoad("pcap_open_live")
+ pcapOpenOfflinePtr = mustLoad("pcap_open_offline")
+ pcapClosePtr = mustLoad("pcap_close")
+ pcapGeterrPtr = mustLoad("pcap_geterr")
+ pcapStatsPtr = mustLoad("pcap_stats")
+ pcapCompilePtr = mustLoad("pcap_compile")
+ pcapFreecodePtr = mustLoad("pcap_freecode")
+ pcapLookupnetPtr = mustLoad("pcap_lookupnet")
+ pcapOfflineFilterPtr = mustLoad("pcap_offline_filter")
+ pcapSetfilterPtr = mustLoad("pcap_setfilter")
+ pcapListDatalinksPtr = mustLoad("pcap_list_datalinks")
+ pcapFreeDatalinksPtr = mustLoad("pcap_free_datalinks")
+ pcapDatalinkValToNamePtr = mustLoad("pcap_datalink_val_to_name")
+ pcapDatalinkValToDescriptionPtr = mustLoad("pcap_datalink_val_to_description")
+ pcapOpenDeadPtr = mustLoad("pcap_open_dead")
+ pcapNextExPtr = mustLoad("pcap_next_ex")
+ pcapDatalinkPtr = mustLoad("pcap_datalink")
+ pcapSetDatalinkPtr = mustLoad("pcap_set_datalink")
+ pcapDatalinkNameToValPtr = mustLoad("pcap_datalink_name_to_val")
+ pcapLibVersionPtr = mustLoad("pcap_lib_version")
+ pcapFreealldevsPtr = mustLoad("pcap_freealldevs")
+ pcapFindalldevsPtr = mustLoad("pcap_findalldevs")
+ pcapSendpacketPtr = mustLoad("pcap_sendpacket")
+ pcapSetdirectionPtr = mustLoad("pcap_setdirection")
+ pcapSnapshotPtr = mustLoad("pcap_snapshot")
+ //libpcap <1.2 doesn't have pcap_*_tstamp_* functions
+ pcapTstampTypeValToNamePtr = mightLoad("pcap_tstamp_type_val_to_name")
+ pcapTstampTypeNameToValPtr = mightLoad("pcap_tstamp_type_name_to_val")
+ pcapListTstampTypesPtr = mightLoad("pcap_list_tstamp_types")
+ pcapFreeTstampTypesPtr = mightLoad("pcap_free_tstamp_types")
+ pcapSetTstampTypePtr = mightLoad("pcap_set_tstamp_type")
+ pcapGetTstampPrecisionPtr = mightLoad("pcap_get_tstamp_precision")
+ pcapSetTstampPrecisionPtr = mightLoad("pcap_set_tstamp_precision")
+ pcapOpenOfflineWithTstampPrecisionPtr = mightLoad("pcap_open_offline_with_tstamp_precision")
+ pcapHOpenOfflineWithTstampPrecisionPtr = mightLoad("pcap_hopen_offline_with_tstamp_precision")
+ pcapActivatePtr = mustLoad("pcap_activate")
+ pcapCreatePtr = mustLoad("pcap_create")
+ pcapSetSnaplenPtr = mustLoad("pcap_set_snaplen")
+ pcapSetPromiscPtr = mustLoad("pcap_set_promisc")
+ pcapSetTimeoutPtr = mustLoad("pcap_set_timeout")
+ //winpcap does not support rfmon
+ pcapCanSetRfmonPtr = mightLoad("pcap_can_set_rfmon")
+ pcapSetRfmonPtr = mightLoad("pcap_set_rfmon")
+ pcapSetBufferSizePtr = mustLoad("pcap_set_buffer_size")
+ //libpcap <1.5 does not have pcap_set_immediate_mode
+ pcapSetImmediateModePtr = mightLoad("pcap_set_immediate_mode")
+ pcapHopenOfflinePtr = mustLoad("pcap_hopen_offline")
+
+ pcapLoaded = true
+ return nil
+}
+
+func (h *pcapPkthdr) getSec() int64 {
+ return int64(h.Ts.Sec)
+}
+
+func (h *pcapPkthdr) getUsec() int64 {
+ return int64(h.Ts.Usec)
+}
+
+func (h *pcapPkthdr) getLen() int {
+ return int(h.Len)
+}
+
+func (h *pcapPkthdr) getCaplen() int {
+ return int(h.Caplen)
+}
+
+func statusError(status pcapCint) error {
+ var ret uintptr
+ if pcapStatustostrPtr == 0 {
+ ret, _, _ = syscall.Syscall(pcapStrerrorPtr, 1, uintptr(status), 0, 0)
+ } else {
+ ret, _, _ = syscall.Syscall(pcapStatustostrPtr, 1, uintptr(status), 0, 0)
+ }
+ return errors.New(bytePtrToString(ret))
+}
+
+func pcapGetTstampPrecision(cptr pcapTPtr) int {
+ if pcapGetTstampPrecisionPtr == 0 {
+ return pcapTstampPrecisionMicro
+ }
+ ret, _, _ := syscall.Syscall(pcapGetTstampPrecisionPtr, 1, uintptr(cptr), 0, 0)
+ return int(pcapCint(ret))
+}
+
+func pcapSetTstampPrecision(cptr pcapTPtr, precision int) error {
+ if pcapSetTstampPrecisionPtr == 0 {
+ return errors.New("Not supported")
+ }
+ ret, _, _ := syscall.Syscall(pcapSetTstampPrecisionPtr, 2, uintptr(cptr), uintptr(precision), 0)
+ if pcapCint(ret) < 0 {
+ return errors.New("Not supported")
+ }
+ return nil
+}
+
+func pcapOpenLive(device string, snaplen int, pro int, timeout int) (*Handle, error) {
+ err := LoadWinPCAP()
+ if err != nil {
+ return nil, err
+ }
+
+ buf := make([]byte, errorBufferSize)
+ dev, err := syscall.BytePtrFromString(device)
+ if err != nil {
+ return nil, err
+ }
+
+ cptr, _, _ := syscall.Syscall6(pcapOpenLivePtr, 5, uintptr(unsafe.Pointer(dev)), uintptr(snaplen), uintptr(pro), uintptr(timeout), uintptr(unsafe.Pointer(&buf[0])), 0)
+
+ if cptr == 0 {
+ return nil, errors.New(byteSliceToString(buf))
+ }
+ return &Handle{cptr: pcapTPtr(cptr)}, nil
+}
+
+func openOffline(file string) (handle *Handle, err error) {
+ err = LoadWinPCAP()
+ if err != nil {
+ return nil, err
+ }
+
+ buf := make([]byte, errorBufferSize)
+ f, err := syscall.BytePtrFromString(file)
+ if err != nil {
+ return nil, err
+ }
+
+ var cptr uintptr
+ if pcapOpenOfflineWithTstampPrecisionPtr == 0 {
+ cptr, _, _ = syscall.Syscall(pcapOpenOfflinePtr, 2, uintptr(unsafe.Pointer(f)), uintptr(unsafe.Pointer(&buf[0])), 0)
+ } else {
+ cptr, _, _ = syscall.Syscall(pcapOpenOfflineWithTstampPrecisionPtr, 3, uintptr(unsafe.Pointer(f)), uintptr(pcapTstampPrecisionNano), uintptr(unsafe.Pointer(&buf[0])))
+ }
+
+ if cptr == 0 {
+ return nil, errors.New(byteSliceToString(buf))
+ }
+
+ h := &Handle{cptr: pcapTPtr(cptr)}
+ return h, nil
+}
+
+func (p *Handle) pcapClose() {
+ if p.cptr != 0 {
+ _, _, _ = syscall.Syscall(pcapClosePtr, 1, uintptr(p.cptr), 0, 0)
+ }
+ p.cptr = 0
+}
+
+func (p *Handle) pcapGeterr() error {
+ ret, _, _ := syscall.Syscall(pcapGeterrPtr, 1, uintptr(p.cptr), 0, 0)
+ return errors.New(bytePtrToString(ret))
+}
+
+func (p *Handle) pcapStats() (*Stats, error) {
+ var cstats pcapStats
+ ret, _, _ := syscall.Syscall(pcapStatsPtr, 2, uintptr(p.cptr), uintptr(unsafe.Pointer(&cstats)), 0)
+ if pcapCint(ret) < 0 {
+ return nil, p.pcapGeterr()
+ }
+ return &Stats{
+ PacketsReceived: int(cstats.Recv),
+ PacketsDropped: int(cstats.Drop),
+ PacketsIfDropped: int(cstats.Ifdrop),
+ }, nil
+}
+
+// for libpcap < 1.8 pcap_compile is NOT thread-safe, so protect it.
+var pcapCompileMu sync.Mutex
+
+func (p *Handle) pcapCompile(expr string, maskp uint32) (pcapBpfProgram, error) {
+ var bpf pcapBpfProgram
+ cexpr, err := syscall.BytePtrFromString(expr)
+ if err != nil {
+ return pcapBpfProgram{}, err
+ }
+ pcapCompileMu.Lock()
+ defer pcapCompileMu.Unlock()
+ res, _, _ := syscall.Syscall6(pcapCompilePtr, 5, uintptr(p.cptr), uintptr(unsafe.Pointer(&bpf)), uintptr(unsafe.Pointer(cexpr)), uintptr(1), uintptr(maskp), 0)
+ if pcapCint(res) < 0 {
+ return bpf, p.pcapGeterr()
+ }
+ return bpf, nil
+}
+
+func (p pcapBpfProgram) free() {
+ _, _, _ = syscall.Syscall(pcapFreecodePtr, 1, uintptr(unsafe.Pointer(&p)), 0, 0)
+}
+
+func (p pcapBpfProgram) toBPFInstruction() []BPFInstruction {
+ bpfInsn := (*[bpfInstructionBufferSize]pcapBpfInstruction)(unsafe.Pointer(p.Insns))[0:p.Len:p.Len]
+ bpfInstruction := make([]BPFInstruction, len(bpfInsn), len(bpfInsn))
+
+ for i, v := range bpfInsn {
+ bpfInstruction[i].Code = v.Code
+ bpfInstruction[i].Jt = v.Jt
+ bpfInstruction[i].Jf = v.Jf
+ bpfInstruction[i].K = v.K
+ }
+ return bpfInstruction
+}
+
+func pcapBpfProgramFromInstructions(bpfInstructions []BPFInstruction) pcapBpfProgram {
+ var bpf pcapBpfProgram
+ bpf.Len = uint32(len(bpfInstructions))
+ cbpfInsns, _, _ := syscall.Syscall(callocPtr, 2, uintptr(len(bpfInstructions)), uintptr(unsafe.Sizeof(bpfInstructions[0])), 0)
+ gbpfInsns := (*[bpfInstructionBufferSize]pcapBpfInstruction)(unsafe.Pointer(cbpfInsns))
+
+ for i, v := range bpfInstructions {
+ gbpfInsns[i].Code = v.Code
+ gbpfInsns[i].Jt = v.Jt
+ gbpfInsns[i].Jf = v.Jf
+ gbpfInsns[i].K = v.K
+ }
+
+ bpf.Insns = (*pcapBpfInstruction)(unsafe.Pointer(cbpfInsns))
+ return bpf
+}
+
+func pcapLookupnet(device string) (netp, maskp uint32, err error) {
+ err = LoadWinPCAP()
+ if err != nil {
+ return 0, 0, err
+ }
+
+ buf := make([]byte, errorBufferSize)
+ dev, err := syscall.BytePtrFromString(device)
+ if err != nil {
+ return 0, 0, err
+ }
+ e, _, _ := syscall.Syscall6(pcapLookupnetPtr, 4, uintptr(unsafe.Pointer(dev)), uintptr(unsafe.Pointer(&netp)), uintptr(unsafe.Pointer(&maskp)), uintptr(unsafe.Pointer(&buf[0])), 0, 0)
+ if pcapCint(e) < 0 {
+ return 0, 0, errors.New(byteSliceToString(buf))
+ }
+ return
+}
+
+func (b *BPF) pcapOfflineFilter(ci gopacket.CaptureInfo, data []byte) bool {
+ var hdr pcapPkthdr
+ hdr.Ts.Sec = int32(ci.Timestamp.Unix())
+ hdr.Ts.Usec = int32(ci.Timestamp.Nanosecond() / 1000)
+ hdr.Caplen = uint32(len(data)) // Trust actual length over ci.Length.
+ hdr.Len = uint32(ci.Length)
+ e, _, _ := syscall.Syscall(pcapOfflineFilterPtr, 3, uintptr(unsafe.Pointer(&b.bpf.bpf)), uintptr(unsafe.Pointer(&hdr)), uintptr(unsafe.Pointer(&data[0])))
+ return e != 0
+}
+
+func (p *Handle) pcapSetfilter(bpf pcapBpfProgram) error {
+ e, _, _ := syscall.Syscall(pcapSetfilterPtr, 2, uintptr(p.cptr), uintptr(unsafe.Pointer(&bpf)), 0)
+ if pcapCint(e) < 0 {
+ return p.pcapGeterr()
+ }
+ return nil
+}
+
+func (p *Handle) pcapListDatalinks() (datalinks []Datalink, err error) {
+ var dltbuf *pcapCint
+ ret, _, _ := syscall.Syscall(pcapListDatalinksPtr, 2, uintptr(p.cptr), uintptr(unsafe.Pointer(&dltbuf)), 0)
+
+ n := int(pcapCint(ret))
+
+ if n < 0 {
+ return nil, p.pcapGeterr()
+ }
+ defer syscall.Syscall(pcapFreeDatalinksPtr, 1, uintptr(unsafe.Pointer(dltbuf)), 0, 0)
+
+ datalinks = make([]Datalink, n)
+
+ dltArray := (*[1 << 28]pcapCint)(unsafe.Pointer(dltbuf))
+
+ for i := 0; i < n; i++ {
+ datalinks[i].Name = pcapDatalinkValToName(int((*dltArray)[i]))
+ datalinks[i].Description = pcapDatalinkValToDescription(int((*dltArray)[i]))
+ }
+
+ return datalinks, nil
+}
+
+func pcapOpenDead(linkType layers.LinkType, captureLength int) (*Handle, error) {
+ err := LoadWinPCAP()
+ if err != nil {
+ return nil, err
+ }
+
+ cptr, _, _ := syscall.Syscall(pcapOpenDeadPtr, 2, uintptr(linkType), uintptr(captureLength), 0)
+ if cptr == 0 {
+ return nil, errors.New("error opening dead capture")
+ }
+
+ return &Handle{cptr: pcapTPtr(cptr)}, nil
+}
+
+func (p *Handle) pcapNextPacketEx() NextError {
+ r, _, _ := syscall.Syscall(pcapNextExPtr, 3, uintptr(p.cptr), uintptr(unsafe.Pointer(&p.pkthdr)), uintptr(unsafe.Pointer(&p.bufptr)))
+ ret := pcapCint(r)
+ // According to https://github.com/the-tcpdump-group/libpcap/blob/1131a7c26c6f4d4772e4a2beeaf7212f4dea74ac/pcap.c#L398-L406 ,
+ // the return value of pcap_next_ex could be greater than 1 for success.
+ // Let's just make it 1 if it comes bigger than 1.
+ if ret > 1 {
+ ret = 1
+ }
+ return NextError(ret)
+}
+
+func (p *Handle) pcapDatalink() layers.LinkType {
+ ret, _, _ := syscall.Syscall(pcapDatalinkPtr, 1, uintptr(p.cptr), 0, 0)
+ return layers.LinkType(ret)
+}
+
+func (p *Handle) pcapSetDatalink(dlt layers.LinkType) error {
+ ret, _, _ := syscall.Syscall(pcapSetDatalinkPtr, 2, uintptr(p.cptr), uintptr(dlt), 0)
+ if pcapCint(ret) < 0 {
+ return p.pcapGeterr()
+ }
+ return nil
+}
+
+func pcapDatalinkValToName(dlt int) string {
+ err := LoadWinPCAP()
+ if err != nil {
+ panic(err)
+ }
+ ret, _, _ := syscall.Syscall(pcapDatalinkValToNamePtr, 1, uintptr(dlt), 0, 0)
+ return bytePtrToString(ret)
+}
+
+func pcapDatalinkValToDescription(dlt int) string {
+ err := LoadWinPCAP()
+ if err != nil {
+ panic(err)
+ }
+ ret, _, _ := syscall.Syscall(pcapDatalinkValToDescriptionPtr, 1, uintptr(dlt), 0, 0)
+ return bytePtrToString(ret)
+}
+
+func pcapDatalinkNameToVal(name string) int {
+ err := LoadWinPCAP()
+ if err != nil {
+ panic(err)
+ }
+ cptr, err := syscall.BytePtrFromString(name)
+ if err != nil {
+ return 0
+ }
+ ret, _, _ := syscall.Syscall(pcapDatalinkNameToValPtr, 1, uintptr(unsafe.Pointer(cptr)), 0, 0)
+ return int(pcapCint(ret))
+}
+
+func pcapLibVersion() string {
+ err := LoadWinPCAP()
+ if err != nil {
+ panic(err)
+ }
+ ret, _, _ := syscall.Syscall(pcapLibVersionPtr, 0, 0, 0, 0)
+ return bytePtrToString(ret)
+}
+
+func (p *Handle) isOpen() bool {
+ return p.cptr != 0
+}
+
+type pcapDevices struct {
+ all, cur *pcapIf
+}
+
+func (p pcapDevices) free() {
+ syscall.Syscall(pcapFreealldevsPtr, 1, uintptr(unsafe.Pointer(p.all)), 0, 0)
+}
+
+func (p *pcapDevices) next() bool {
+ if p.cur == nil {
+ p.cur = p.all
+ if p.cur == nil {
+ return false
+ }
+ return true
+ }
+ if p.cur.Next == nil {
+ return false
+ }
+ p.cur = p.cur.Next
+ return true
+}
+
+func (p pcapDevices) name() string {
+ return bytePtrToString(uintptr(unsafe.Pointer(p.cur.Name)))
+}
+
+func (p pcapDevices) description() string {
+ return bytePtrToString(uintptr(unsafe.Pointer(p.cur.Description)))
+}
+
+func (p pcapDevices) flags() uint32 {
+ return p.cur.Flags
+}
+
+type pcapAddresses struct {
+ all, cur *pcapAddr
+}
+
+func (p *pcapAddresses) next() bool {
+ if p.cur == nil {
+ p.cur = p.all
+ if p.cur == nil {
+ return false
+ }
+ return true
+ }
+ if p.cur.Next == nil {
+ return false
+ }
+ p.cur = p.cur.Next
+ return true
+}
+
+func (p pcapAddresses) addr() *syscall.RawSockaddr {
+ return p.cur.Addr
+}
+
+func (p pcapAddresses) netmask() *syscall.RawSockaddr {
+ return p.cur.Netmask
+}
+
+func (p pcapAddresses) broadaddr() *syscall.RawSockaddr {
+ return p.cur.Broadaddr
+}
+
+func (p pcapAddresses) dstaddr() *syscall.RawSockaddr {
+ return p.cur.Dstaddr
+}
+
+func (p pcapDevices) addresses() pcapAddresses {
+ return pcapAddresses{all: p.cur.Addresses}
+}
+
+func pcapFindAllDevs() (pcapDevices, error) {
+ var alldevsp pcapDevices
+ err := LoadWinPCAP()
+ if err != nil {
+ return alldevsp, err
+ }
+
+ buf := make([]byte, errorBufferSize)
+
+ ret, _, _ := syscall.Syscall(pcapFindalldevsPtr, 2, uintptr(unsafe.Pointer(&alldevsp.all)), uintptr(unsafe.Pointer(&buf[0])), 0)
+
+ if pcapCint(ret) < 0 {
+ return pcapDevices{}, errors.New(byteSliceToString(buf))
+ }
+ return alldevsp, nil
+}
+
+func (p *Handle) pcapSendpacket(data []byte) error {
+ ret, _, _ := syscall.Syscall(pcapSendpacketPtr, 3, uintptr(p.cptr), uintptr(unsafe.Pointer(&data[0])), uintptr(len(data)))
+ if pcapCint(ret) < 0 {
+ return p.pcapGeterr()
+ }
+ return nil
+}
+
+func (p *Handle) pcapSetdirection(direction Direction) error {
+ status, _, _ := syscall.Syscall(pcapSetdirectionPtr, 2, uintptr(p.cptr), uintptr(direction), 0)
+ if pcapCint(status) < 0 {
+ return statusError(pcapCint(status))
+ }
+ return nil
+}
+
+func (p *Handle) pcapSnapshot() int {
+ ret, _, _ := syscall.Syscall(pcapSnapshotPtr, 1, uintptr(p.cptr), 0, 0)
+ return int(pcapCint(ret))
+}
+
+func (t TimestampSource) pcapTstampTypeValToName() string {
+ err := LoadWinPCAP()
+ if err != nil {
+ return err.Error()
+ }
+
+ //libpcap <1.2 doesn't have pcap_*_tstamp_* functions
+ if pcapTstampTypeValToNamePtr == 0 {
+ return "pcap timestamp types not supported"
+ }
+ ret, _, _ := syscall.Syscall(pcapTstampTypeValToNamePtr, 1, uintptr(t), 0, 0)
+ return bytePtrToString(ret)
+}
+
+func pcapTstampTypeNameToVal(s string) (TimestampSource, error) {
+ err := LoadWinPCAP()
+ if err != nil {
+ return 0, err
+ }
+
+ //libpcap <1.2 doesn't have pcap_*_tstamp_* functions
+ if pcapTstampTypeNameToValPtr == 0 {
+ return 0, statusError(pcapCint(pcapError))
+ }
+ cs, err := syscall.BytePtrFromString(s)
+ if err != nil {
+ return 0, err
+ }
+ ret, _, _ := syscall.Syscall(pcapTstampTypeNameToValPtr, 1, uintptr(unsafe.Pointer(cs)), 0, 0)
+ t := pcapCint(ret)
+ if t < 0 {
+ return 0, statusError(pcapCint(t))
+ }
+ return TimestampSource(t), nil
+}
+
+func (p *InactiveHandle) pcapGeterr() error {
+ ret, _, _ := syscall.Syscall(pcapGeterrPtr, 1, uintptr(p.cptr), 0, 0)
+ return errors.New(bytePtrToString(ret))
+}
+
+func (p *InactiveHandle) pcapActivate() (*Handle, activateError) {
+ r, _, _ := syscall.Syscall(pcapActivatePtr, 1, uintptr(p.cptr), 0, 0)
+ ret := activateError(pcapCint(r))
+ if ret != aeNoError {
+ return nil, ret
+ }
+ h := &Handle{
+ cptr: p.cptr,
+ }
+ p.cptr = 0
+ return h, ret
+}
+
+func (p *InactiveHandle) pcapClose() {
+ if p.cptr != 0 {
+ _, _, _ = syscall.Syscall(pcapClosePtr, 1, uintptr(p.cptr), 0, 0)
+ }
+ p.cptr = 0
+}
+
+func pcapCreate(device string) (*InactiveHandle, error) {
+ err := LoadWinPCAP()
+ if err != nil {
+ return nil, err
+ }
+
+ buf := make([]byte, errorBufferSize)
+ dev, err := syscall.BytePtrFromString(device)
+ if err != nil {
+ return nil, err
+ }
+ cptr, _, _ := syscall.Syscall(pcapCreatePtr, 2, uintptr(unsafe.Pointer(dev)), uintptr(unsafe.Pointer(&buf[0])), 0)
+ if cptr == 0 {
+ return nil, errors.New(byteSliceToString(buf))
+ }
+ return &InactiveHandle{cptr: pcapTPtr(cptr)}, nil
+}
+
+func (p *InactiveHandle) pcapSetSnaplen(snaplen int) error {
+ status, _, _ := syscall.Syscall(pcapSetSnaplenPtr, 2, uintptr(p.cptr), uintptr(snaplen), 0)
+ if pcapCint(status) < 0 {
+ return statusError(pcapCint(status))
+ }
+ return nil
+}
+
+func (p *InactiveHandle) pcapSetPromisc(promisc bool) error {
+ var pro uintptr
+ if promisc {
+ pro = 1
+ }
+ status, _, _ := syscall.Syscall(pcapSetPromiscPtr, 2, uintptr(p.cptr), pro, 0)
+ if pcapCint(status) < 0 {
+ return statusError(pcapCint(status))
+ }
+ return nil
+}
+
+func (p *InactiveHandle) pcapSetTimeout(timeout time.Duration) error {
+ status, _, _ := syscall.Syscall(pcapSetTimeoutPtr, 2, uintptr(p.cptr), uintptr(timeoutMillis(timeout)), 0)
+
+ if pcapCint(status) < 0 {
+ return statusError(pcapCint(status))
+ }
+ return nil
+}
+
+func (p *InactiveHandle) pcapListTstampTypes() (out []TimestampSource) {
+ //libpcap <1.2 doesn't have pcap_*_tstamp_* functions
+ if pcapListTstampTypesPtr == 0 {
+ return
+ }
+ var types *pcapCint
+ ret, _, _ := syscall.Syscall(pcapListTstampTypesPtr, 2, uintptr(p.cptr), uintptr(unsafe.Pointer(&types)), 0)
+ n := int(pcapCint(ret))
+ if n < 0 {
+ return // public interface doesn't have error :(
+ }
+ defer syscall.Syscall(pcapFreeTstampTypesPtr, 1, uintptr(unsafe.Pointer(types)), 0, 0)
+ typesArray := (*[1 << 28]pcapCint)(unsafe.Pointer(types))
+ for i := 0; i < n; i++ {
+ out = append(out, TimestampSource((*typesArray)[i]))
+ }
+ return
+}
+
+func (p *InactiveHandle) pcapSetTstampType(t TimestampSource) error {
+ //libpcap <1.2 doesn't have pcap_*_tstamp_* functions
+ if pcapSetTstampTypePtr == 0 {
+ return statusError(pcapError)
+ }
+ status, _, _ := syscall.Syscall(pcapSetTstampTypePtr, 2, uintptr(p.cptr), uintptr(t), 0)
+ if pcapCint(status) < 0 {
+ return statusError(pcapCint(status))
+ }
+ return nil
+}
+
+func (p *InactiveHandle) pcapSetRfmon(monitor bool) error {
+ //winpcap does not support rfmon
+ if pcapCanSetRfmonPtr == 0 {
+ return CannotSetRFMon
+ }
+ var mon uintptr
+ if monitor {
+ mon = 1
+ }
+ canset, _, _ := syscall.Syscall(pcapCanSetRfmonPtr, 1, uintptr(p.cptr), 0, 0)
+ switch canset {
+ case 0:
+ return CannotSetRFMon
+ case 1:
+ // success
+ default:
+ return statusError(pcapCint(canset))
+ }
+ status, _, _ := syscall.Syscall(pcapSetRfmonPtr, 2, uintptr(p.cptr), mon, 0)
+ if status != 0 {
+ return statusError(pcapCint(status))
+ }
+ return nil
+}
+
+func (p *InactiveHandle) pcapSetBufferSize(bufferSize int) error {
+ status, _, _ := syscall.Syscall(pcapSetBufferSizePtr, 2, uintptr(p.cptr), uintptr(bufferSize), 0)
+ if pcapCint(status) < 0 {
+ return statusError(pcapCint(status))
+ }
+ return nil
+}
+
+func (p *InactiveHandle) pcapSetImmediateMode(mode bool) error {
+ //libpcap <1.5 does not have pcap_set_immediate_mode
+ if pcapSetImmediateModePtr == 0 {
+ return statusError(pcapError)
+ }
+ var md uintptr
+ if mode {
+ md = 1
+ }
+ status, _, _ := syscall.Syscall(pcapSetImmediateModePtr, 2, uintptr(p.cptr), md, 0)
+ if pcapCint(status) < 0 {
+ return statusError(pcapCint(status))
+ }
+ return nil
+}
+
+func (p *Handle) setNonBlocking() error {
+ // do nothing
+ return nil
+}
+
+// waitForPacket waits for a packet or for the timeout to expire.
+func (p *Handle) waitForPacket() {
+ // can't use select() so instead just switch goroutines
+ runtime.Gosched()
+}
+
+// openOfflineFile returns contents of input file as a *Handle.
+func openOfflineFile(file *os.File) (handle *Handle, err error) {
+ err = LoadWinPCAP()
+ if err != nil {
+ return nil, err
+ }
+
+ buf := make([]byte, errorBufferSize)
+ cf := file.Fd()
+
+ var cptr uintptr
+ if pcapOpenOfflineWithTstampPrecisionPtr == 0 {
+ cptr, _, _ = syscall.Syscall(pcapHopenOfflinePtr, 2, cf, uintptr(unsafe.Pointer(&buf[0])), 0)
+ } else {
+ cptr, _, _ = syscall.Syscall(pcapHOpenOfflineWithTstampPrecisionPtr, 3, cf, uintptr(pcapTstampPrecisionNano), uintptr(unsafe.Pointer(&buf[0])))
+ }
+
+ if cptr == 0 {
+ return nil, errors.New(byteSliceToString(buf))
+ }
+ return &Handle{cptr: pcapTPtr(cptr)}, nil
+}
diff --git a/vendor/github.com/google/gopacket/pcap/test_dns.pcap b/vendor/github.com/google/gopacket/pcap/test_dns.pcap
new file mode 100644
index 0000000..3a79f92
Binary files /dev/null and b/vendor/github.com/google/gopacket/pcap/test_dns.pcap differ
diff --git a/vendor/github.com/google/gopacket/pcap/test_ethernet.pcap b/vendor/github.com/google/gopacket/pcap/test_ethernet.pcap
new file mode 100644
index 0000000..1d01bd9
Binary files /dev/null and b/vendor/github.com/google/gopacket/pcap/test_ethernet.pcap differ
diff --git a/vendor/github.com/google/gopacket/pcap/test_loopback.pcap b/vendor/github.com/google/gopacket/pcap/test_loopback.pcap
new file mode 100644
index 0000000..ddeb82c
Binary files /dev/null and b/vendor/github.com/google/gopacket/pcap/test_loopback.pcap differ
diff --git a/vendor/github.com/google/gopacket/time.go b/vendor/github.com/google/gopacket/time.go
new file mode 100644
index 0000000..6d116cd
--- /dev/null
+++ b/vendor/github.com/google/gopacket/time.go
@@ -0,0 +1,72 @@
+// Copyright 2018 The GoPacket Authors. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package gopacket
+
+import (
+ "fmt"
+ "math"
+ "time"
+)
+
+// TimestampResolution represents the resolution of timestamps in Base^Exponent.
+type TimestampResolution struct {
+ Base, Exponent int
+}
+
+func (t TimestampResolution) String() string {
+ return fmt.Sprintf("%d^%d", t.Base, t.Exponent)
+}
+
+// ToDuration returns the smallest representable time difference as a time.Duration
+func (t TimestampResolution) ToDuration() time.Duration {
+ if t.Base == 0 {
+ return 0
+ }
+ if t.Exponent == 0 {
+ return time.Second
+ }
+ switch t.Base {
+ case 10:
+ return time.Duration(math.Pow10(t.Exponent + 9))
+ case 2:
+ if t.Exponent < 0 {
+ return time.Second >> uint(-t.Exponent)
+ }
+ return time.Second << uint(t.Exponent)
+ default:
+ // this might loose precision
+ return time.Duration(float64(time.Second) * math.Pow(float64(t.Base), float64(t.Exponent)))
+ }
+}
+
+// TimestampResolutionInvalid represents an invalid timestamp resolution
+var TimestampResolutionInvalid = TimestampResolution{}
+
+// TimestampResolutionMillisecond is a resolution of 10^-3s
+var TimestampResolutionMillisecond = TimestampResolution{10, -3}
+
+// TimestampResolutionMicrosecond is a resolution of 10^-6s
+var TimestampResolutionMicrosecond = TimestampResolution{10, -6}
+
+// TimestampResolutionNanosecond is a resolution of 10^-9s
+var TimestampResolutionNanosecond = TimestampResolution{10, -9}
+
+// TimestampResolutionNTP is the resolution of NTP timestamps which is 2^-32 ≈ 233 picoseconds
+var TimestampResolutionNTP = TimestampResolution{2, -32}
+
+// TimestampResolutionCaptureInfo is the resolution used in CaptureInfo, which his currently nanosecond
+var TimestampResolutionCaptureInfo = TimestampResolutionNanosecond
+
+// PacketSourceResolution is an interface for packet data sources that
+// support reporting the timestamp resolution of the aqcuired timestamps.
+// Returned timestamps will always have NanosecondTimestampResolution due
+// to the use of time.Time, but scaling might have occured if acquired
+// timestamps have a different resolution.
+type PacketSourceResolution interface {
+ // Resolution returns the timestamp resolution of acquired timestamps before scaling to NanosecondTimestampResolution.
+ Resolution() TimestampResolution
+}
diff --git a/vendor/github.com/google/gopacket/writer.go b/vendor/github.com/google/gopacket/writer.go
new file mode 100644
index 0000000..5d303dc
--- /dev/null
+++ b/vendor/github.com/google/gopacket/writer.go
@@ -0,0 +1,232 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package gopacket
+
+import (
+ "fmt"
+)
+
+// SerializableLayer allows its implementations to be written out as a set of bytes,
+// so those bytes may be sent on the wire or otherwise used by the caller.
+// SerializableLayer is implemented by certain Layer types, and can be encoded to
+// bytes using the LayerWriter object.
+type SerializableLayer interface {
+ // SerializeTo writes this layer to a slice, growing that slice if necessary
+ // to make it fit the layer's data.
+ // Args:
+ // b: SerializeBuffer to write this layer on to. When called, b.Bytes()
+ // is the payload this layer should wrap, if any. Note that this
+ // layer can either prepend itself (common), append itself
+ // (uncommon), or both (sometimes padding or footers are required at
+ // the end of packet data). It's also possible (though probably very
+ // rarely needed) to overwrite any bytes in the current payload.
+ // After this call, b.Bytes() should return the byte encoding of
+ // this layer wrapping the original b.Bytes() payload.
+ // opts: options to use while writing out data.
+ // Returns:
+ // error if a problem was encountered during encoding. If an error is
+ // returned, the bytes in data should be considered invalidated, and
+ // not used.
+ //
+ // SerializeTo calls SHOULD entirely ignore LayerContents and
+ // LayerPayload. It just serializes based on struct fields, neither
+ // modifying nor using contents/payload.
+ SerializeTo(b SerializeBuffer, opts SerializeOptions) error
+ // LayerType returns the type of the layer that is being serialized to the buffer
+ LayerType() LayerType
+}
+
+// SerializeOptions provides options for behaviors that SerializableLayers may want to
+// implement.
+type SerializeOptions struct {
+ // FixLengths determines whether, during serialization, layers should fix
+ // the values for any length field that depends on the payload.
+ FixLengths bool
+ // ComputeChecksums determines whether, during serialization, layers
+ // should recompute checksums based on their payloads.
+ ComputeChecksums bool
+}
+
+// SerializeBuffer is a helper used by gopacket for writing out packet layers.
+// SerializeBuffer starts off as an empty []byte. Subsequent calls to PrependBytes
+// return byte slices before the current Bytes(), AppendBytes returns byte
+// slices after.
+//
+// Byte slices returned by PrependBytes/AppendBytes are NOT zero'd out, so if
+// you want to make sure they're all zeros, set them as such.
+//
+// SerializeBuffer is specifically designed to handle packet writing, where unlike
+// with normal writes it's easier to start writing at the inner-most layer and
+// work out, meaning that we often need to prepend bytes. This runs counter to
+// typical writes to byte slices using append(), where we only write at the end
+// of the buffer.
+//
+// It can be reused via Clear. Note, however, that a Clear call will invalidate the
+// byte slices returned by any previous Bytes() call (the same buffer is
+// reused).
+//
+// 1) Reusing a write buffer is generally much faster than creating a new one,
+// and with the default implementation it avoids additional memory allocations.
+// 2) If a byte slice from a previous Bytes() call will continue to be used,
+// it's better to create a new SerializeBuffer.
+//
+// The Clear method is specifically designed to minimize memory allocations for
+// similar later workloads on the SerializeBuffer. IE: if you make a set of
+// Prepend/Append calls, then clear, then make the same calls with the same
+// sizes, the second round (and all future similar rounds) shouldn't allocate
+// any new memory.
+type SerializeBuffer interface {
+ // Bytes returns the contiguous set of bytes collected so far by Prepend/Append
+ // calls. The slice returned by Bytes will be modified by future Clear calls,
+ // so if you're planning on clearing this SerializeBuffer, you may want to copy
+ // Bytes somewhere safe first.
+ Bytes() []byte
+ // PrependBytes returns a set of bytes which prepends the current bytes in this
+ // buffer. These bytes start in an indeterminate state, so they should be
+ // overwritten by the caller. The caller must only call PrependBytes if they
+ // know they're going to immediately overwrite all bytes returned.
+ PrependBytes(num int) ([]byte, error)
+ // AppendBytes returns a set of bytes which appends the current bytes in this
+ // buffer. These bytes start in an indeterminate state, so they should be
+ // overwritten by the caller. The caller must only call AppendBytes if they
+ // know they're going to immediately overwrite all bytes returned.
+ AppendBytes(num int) ([]byte, error)
+ // Clear resets the SerializeBuffer to a new, empty buffer. After a call to clear,
+ // the byte slice returned by any previous call to Bytes() for this buffer
+ // should be considered invalidated.
+ Clear() error
+ // Layers returns all the Layers that have been successfully serialized into this buffer
+ // already.
+ Layers() []LayerType
+ // PushLayer adds the current Layer to the list of Layers that have been serialized
+ // into this buffer.
+ PushLayer(LayerType)
+}
+
+type serializeBuffer struct {
+ data []byte
+ start int
+ prepended, appended int
+ layers []LayerType
+}
+
+// NewSerializeBuffer creates a new instance of the default implementation of
+// the SerializeBuffer interface.
+func NewSerializeBuffer() SerializeBuffer {
+ return &serializeBuffer{}
+}
+
+// NewSerializeBufferExpectedSize creates a new buffer for serialization, optimized for an
+// expected number of bytes prepended/appended. This tends to decrease the
+// number of memory allocations made by the buffer during writes.
+func NewSerializeBufferExpectedSize(expectedPrependLength, expectedAppendLength int) SerializeBuffer {
+ return &serializeBuffer{
+ data: make([]byte, expectedPrependLength, expectedPrependLength+expectedAppendLength),
+ start: expectedPrependLength,
+ prepended: expectedPrependLength,
+ appended: expectedAppendLength,
+ }
+}
+
+func (w *serializeBuffer) Bytes() []byte {
+ return w.data[w.start:]
+}
+
+func (w *serializeBuffer) PrependBytes(num int) ([]byte, error) {
+ if num < 0 {
+ panic("num < 0")
+ }
+ if w.start < num {
+ toPrepend := w.prepended
+ if toPrepend < num {
+ toPrepend = num
+ }
+ w.prepended += toPrepend
+ length := cap(w.data) + toPrepend
+ newData := make([]byte, length)
+ newStart := w.start + toPrepend
+ copy(newData[newStart:], w.data[w.start:])
+ w.start = newStart
+ w.data = newData[:toPrepend+len(w.data)]
+ }
+ w.start -= num
+ return w.data[w.start : w.start+num], nil
+}
+
+func (w *serializeBuffer) AppendBytes(num int) ([]byte, error) {
+ if num < 0 {
+ panic("num < 0")
+ }
+ initialLength := len(w.data)
+ if cap(w.data)-initialLength < num {
+ toAppend := w.appended
+ if toAppend < num {
+ toAppend = num
+ }
+ w.appended += toAppend
+ newData := make([]byte, cap(w.data)+toAppend)
+ copy(newData[w.start:], w.data[w.start:])
+ w.data = newData[:initialLength]
+ }
+ // Grow the buffer. We know it'll be under capacity given above.
+ w.data = w.data[:initialLength+num]
+ return w.data[initialLength:], nil
+}
+
+func (w *serializeBuffer) Clear() error {
+ w.start = w.prepended
+ w.data = w.data[:w.start]
+ w.layers = w.layers[:0]
+ return nil
+}
+
+func (w *serializeBuffer) Layers() []LayerType {
+ return w.layers
+}
+
+func (w *serializeBuffer) PushLayer(l LayerType) {
+ w.layers = append(w.layers, l)
+}
+
+// SerializeLayers clears the given write buffer, then writes all layers into it so
+// they correctly wrap each other. Note that by clearing the buffer, it
+// invalidates all slices previously returned by w.Bytes()
+//
+// Example:
+// buf := gopacket.NewSerializeBuffer()
+// opts := gopacket.SerializeOptions{}
+// gopacket.SerializeLayers(buf, opts, a, b, c)
+// firstPayload := buf.Bytes() // contains byte representation of a(b(c))
+// gopacket.SerializeLayers(buf, opts, d, e, f)
+// secondPayload := buf.Bytes() // contains byte representation of d(e(f)). firstPayload is now invalidated, since the SerializeLayers call Clears buf.
+func SerializeLayers(w SerializeBuffer, opts SerializeOptions, layers ...SerializableLayer) error {
+ w.Clear()
+ for i := len(layers) - 1; i >= 0; i-- {
+ layer := layers[i]
+ err := layer.SerializeTo(w, opts)
+ if err != nil {
+ return err
+ }
+ w.PushLayer(layer.LayerType())
+ }
+ return nil
+}
+
+// SerializePacket is a convenience function that calls SerializeLayers
+// on packet's Layers().
+// It returns an error if one of the packet layers is not a SerializableLayer.
+func SerializePacket(buf SerializeBuffer, opts SerializeOptions, packet Packet) error {
+ sls := []SerializableLayer{}
+ for _, layer := range packet.Layers() {
+ sl, ok := layer.(SerializableLayer)
+ if !ok {
+ return fmt.Errorf("layer %s is not serializable", layer.LayerType().String())
+ }
+ sls = append(sls, sl)
+ }
+ return SerializeLayers(buf, opts, sls...)
+}
diff --git a/vendor/github.com/pierrec/lz4/.gitignore b/vendor/github.com/pierrec/lz4/.gitignore
new file mode 100644
index 0000000..c2bb6e4
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/.gitignore
@@ -0,0 +1,31 @@
+# Created by https://www.gitignore.io/api/macos
+
+### macOS ###
+*.DS_Store
+.AppleDouble
+.LSOverride
+
+# Icon must end with two \r
+Icon
+
+
+# Thumbnails
+._*
+
+# Files that might appear in the root of a volume
+.DocumentRevisions-V100
+.fseventsd
+.Spotlight-V100
+.TemporaryItems
+.Trashes
+.VolumeIcon.icns
+.com.apple.timemachine.donotpresent
+
+# Directories potentially created on remote AFP share
+.AppleDB
+.AppleDesktop
+Network Trash Folder
+Temporary Items
+.apdisk
+
+# End of https://www.gitignore.io/api/macos
diff --git a/vendor/github.com/pierrec/lz4/.travis.yml b/vendor/github.com/pierrec/lz4/.travis.yml
new file mode 100644
index 0000000..78be21c
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/.travis.yml
@@ -0,0 +1,8 @@
+language: go
+
+go:
+ - 1.x
+
+script:
+ - go test -v -cpu=2
+ - go test -v -cpu=2 -race
\ No newline at end of file
diff --git a/vendor/github.com/pierrec/lz4/LICENSE b/vendor/github.com/pierrec/lz4/LICENSE
new file mode 100644
index 0000000..bd899d8
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/LICENSE
@@ -0,0 +1,28 @@
+Copyright (c) 2015, Pierre Curto
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+* Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+* Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+* Neither the name of xxHash nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
diff --git a/vendor/github.com/pierrec/lz4/README.md b/vendor/github.com/pierrec/lz4/README.md
new file mode 100644
index 0000000..dd3c9d4
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/README.md
@@ -0,0 +1,31 @@
+[](https://godoc.org/github.com/pierrec/lz4)
+[](https://travis-ci.org/pierrec/lz4)
+
+# lz4
+LZ4 compression and decompression in pure Go
+
+## Usage
+
+```go
+import "github.com/pierrec/lz4"
+```
+
+## Description
+
+Package lz4 implements reading and writing lz4 compressed data (a frame),
+as specified in http://fastcompression.blogspot.fr/2013/04/lz4-streaming-format-final.html,
+using an io.Reader (decompression) and io.Writer (compression).
+It is designed to minimize memory usage while maximizing throughput by being able to
+[de]compress data concurrently.
+
+The Reader and the Writer support concurrent processing provided the supplied buffers are
+large enough (in multiples of BlockMaxSize) and there is no block dependency.
+Reader.WriteTo and Writer.ReadFrom do leverage the concurrency transparently.
+The runtime.GOMAXPROCS() value is used to apply concurrency or not.
+
+Although the block level compression and decompression functions are exposed and are fully compatible
+with the lz4 block format definition, they are low level and should not be used directly.
+For a complete description of an lz4 compressed block, see:
+http://fastcompression.blogspot.fr/2011/05/lz4-explained.html
+
+See https://github.com/Cyan4973/lz4 for the reference C implementation.
diff --git a/vendor/github.com/pierrec/lz4/block.go b/vendor/github.com/pierrec/lz4/block.go
new file mode 100644
index 0000000..145eec2
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/block.go
@@ -0,0 +1,445 @@
+package lz4
+
+import (
+ "encoding/binary"
+ "errors"
+)
+
+// block represents a frame data block.
+// Used when compressing or decompressing frame blocks concurrently.
+type block struct {
+ compressed bool
+ zdata []byte // compressed data
+ data []byte // decompressed data
+ offset int // offset within the data as with block dependency the 64Kb window is prepended to it
+ checksum uint32 // compressed data checksum
+ err error // error while [de]compressing
+}
+
+var (
+ // ErrInvalidSource is returned by UncompressBlock when a compressed block is corrupted.
+ ErrInvalidSource = errors.New("lz4: invalid source")
+ // ErrShortBuffer is returned by UncompressBlock, CompressBlock or CompressBlockHC when
+ // the supplied buffer for [de]compression is too small.
+ ErrShortBuffer = errors.New("lz4: short buffer")
+)
+
+// CompressBlockBound returns the maximum size of a given buffer of size n, when not compressible.
+func CompressBlockBound(n int) int {
+ return n + n/255 + 16
+}
+
+// UncompressBlock decompresses the source buffer into the destination one,
+// starting at the di index and returning the decompressed size.
+//
+// The destination buffer must be sized appropriately.
+//
+// An error is returned if the source data is invalid or the destination buffer is too small.
+func UncompressBlock(src, dst []byte, di int) (int, error) {
+ si, sn, di0 := 0, len(src), di
+ if sn == 0 {
+ return 0, nil
+ }
+
+ for {
+ // literals and match lengths (token)
+ lLen := int(src[si] >> 4)
+ mLen := int(src[si] & 0xF)
+ if si++; si == sn {
+ return di, ErrInvalidSource
+ }
+
+ // literals
+ if lLen > 0 {
+ if lLen == 0xF {
+ for src[si] == 0xFF {
+ lLen += 0xFF
+ if si++; si == sn {
+ return di - di0, ErrInvalidSource
+ }
+ }
+ lLen += int(src[si])
+ if si++; si == sn {
+ return di - di0, ErrInvalidSource
+ }
+ }
+ if len(dst)-di < lLen || si+lLen > sn {
+ return di - di0, ErrShortBuffer
+ }
+ di += copy(dst[di:], src[si:si+lLen])
+
+ if si += lLen; si >= sn {
+ return di - di0, nil
+ }
+ }
+
+ if si += 2; si >= sn {
+ return di, ErrInvalidSource
+ }
+ offset := int(src[si-2]) | int(src[si-1])<<8
+ if di-offset < 0 || offset == 0 {
+ return di - di0, ErrInvalidSource
+ }
+
+ // match
+ if mLen == 0xF {
+ for src[si] == 0xFF {
+ mLen += 0xFF
+ if si++; si == sn {
+ return di - di0, ErrInvalidSource
+ }
+ }
+ mLen += int(src[si])
+ if si++; si == sn {
+ return di - di0, ErrInvalidSource
+ }
+ }
+ // minimum match length is 4
+ mLen += 4
+ if len(dst)-di <= mLen {
+ return di - di0, ErrShortBuffer
+ }
+
+ // copy the match (NB. match is at least 4 bytes long)
+ // NB. past di, copy() would write old bytes instead of
+ // the ones we just copied, so split the work into the largest chunk.
+ for ; mLen >= offset; mLen -= offset {
+ di += copy(dst[di:], dst[di-offset:di])
+ }
+ di += copy(dst[di:], dst[di-offset:di-offset+mLen])
+ }
+}
+
+// CompressBlock compresses the source buffer starting at soffet into the destination one.
+// This is the fast version of LZ4 compression and also the default one.
+//
+// The size of the compressed data is returned. If it is 0 and no error, then the data is incompressible.
+//
+// An error is returned if the destination buffer is too small.
+func CompressBlock(src, dst []byte, soffset int) (int, error) {
+ sn, dn := len(src)-mfLimit, len(dst)
+ if sn <= 0 || dn == 0 || soffset >= sn {
+ return 0, nil
+ }
+ var si, di int
+
+ // fast scan strategy:
+ // we only need a hash table to store the last sequences (4 bytes)
+ var hashTable [1 << hashLog]int
+ var hashShift = uint((minMatch * 8) - hashLog)
+
+ // Initialise the hash table with the first 64Kb of the input buffer
+ // (used when compressing dependent blocks)
+ for si < soffset {
+ h := binary.LittleEndian.Uint32(src[si:]) * hasher >> hashShift
+ si++
+ hashTable[h] = si
+ }
+
+ anchor := si
+ fma := 1 << skipStrength
+ for si < sn-minMatch {
+ // hash the next 4 bytes (sequence)...
+ h := binary.LittleEndian.Uint32(src[si:]) * hasher >> hashShift
+ // -1 to separate existing entries from new ones
+ ref := hashTable[h] - 1
+ // ...and store the position of the hash in the hash table (+1 to compensate the -1 upon saving)
+ hashTable[h] = si + 1
+ // no need to check the last 3 bytes in the first literal 4 bytes as
+ // this guarantees that the next match, if any, is compressed with
+ // a lower size, since to have some compression we must have:
+ // ll+ml-overlap > 1 + (ll-15)/255 + (ml-4-15)/255 + 2 (uncompressed size>compressed size)
+ // => ll+ml>3+2*overlap => ll+ml>= 4+2*overlap
+ // and by definition we do have:
+ // ll >= 1, ml >= 4
+ // => ll+ml >= 5
+ // => so overlap must be 0
+
+ // the sequence is new, out of bound (64kb) or not valid: try next sequence
+ if ref < 0 || fma&(1<>winSizeLog > 0 ||
+ src[ref] != src[si] ||
+ src[ref+1] != src[si+1] ||
+ src[ref+2] != src[si+2] ||
+ src[ref+3] != src[si+3] {
+ // variable step: improves performance on non-compressible data
+ si += fma >> skipStrength
+ fma++
+ continue
+ }
+ // match found
+ fma = 1 << skipStrength
+ lLen := si - anchor
+ offset := si - ref
+
+ // encode match length part 1
+ si += minMatch
+ mLen := si // match length has minMatch already
+ for si <= sn && src[si] == src[si-offset] {
+ si++
+ }
+ mLen = si - mLen
+ if mLen < 0xF {
+ dst[di] = byte(mLen)
+ } else {
+ dst[di] = 0xF
+ }
+
+ // encode literals length
+ if lLen < 0xF {
+ dst[di] |= byte(lLen << 4)
+ } else {
+ dst[di] |= 0xF0
+ if di++; di == dn {
+ return di, ErrShortBuffer
+ }
+ l := lLen - 0xF
+ for ; l >= 0xFF; l -= 0xFF {
+ dst[di] = 0xFF
+ if di++; di == dn {
+ return di, ErrShortBuffer
+ }
+ }
+ dst[di] = byte(l)
+ }
+ if di++; di == dn {
+ return di, ErrShortBuffer
+ }
+
+ // literals
+ if di+lLen >= dn {
+ return di, ErrShortBuffer
+ }
+ di += copy(dst[di:], src[anchor:anchor+lLen])
+ anchor = si
+
+ // encode offset
+ if di += 2; di >= dn {
+ return di, ErrShortBuffer
+ }
+ dst[di-2], dst[di-1] = byte(offset), byte(offset>>8)
+
+ // encode match length part 2
+ if mLen >= 0xF {
+ for mLen -= 0xF; mLen >= 0xFF; mLen -= 0xFF {
+ dst[di] = 0xFF
+ if di++; di == dn {
+ return di, ErrShortBuffer
+ }
+ }
+ dst[di] = byte(mLen)
+ if di++; di == dn {
+ return di, ErrShortBuffer
+ }
+ }
+ }
+
+ if anchor == 0 {
+ // incompressible
+ return 0, nil
+ }
+
+ // last literals
+ lLen := len(src) - anchor
+ if lLen < 0xF {
+ dst[di] = byte(lLen << 4)
+ } else {
+ dst[di] = 0xF0
+ if di++; di == dn {
+ return di, ErrShortBuffer
+ }
+ lLen -= 0xF
+ for ; lLen >= 0xFF; lLen -= 0xFF {
+ dst[di] = 0xFF
+ if di++; di == dn {
+ return di, ErrShortBuffer
+ }
+ }
+ dst[di] = byte(lLen)
+ }
+ if di++; di == dn {
+ return di, ErrShortBuffer
+ }
+
+ // write literals
+ src = src[anchor:]
+ switch n := di + len(src); {
+ case n > dn:
+ return di, ErrShortBuffer
+ case n >= sn:
+ // incompressible
+ return 0, nil
+ }
+ di += copy(dst[di:], src)
+ return di, nil
+}
+
+// CompressBlockHC compresses the source buffer starting at soffet into the destination one.
+// CompressBlockHC compression ratio is better than CompressBlock but it is also slower.
+//
+// The size of the compressed data is returned. If it is 0 and no error, then the data is not compressible.
+//
+// An error is returned if the destination buffer is too small.
+func CompressBlockHC(src, dst []byte, soffset int) (int, error) {
+ sn, dn := len(src)-mfLimit, len(dst)
+ if sn <= 0 || dn == 0 || soffset >= sn {
+ return 0, nil
+ }
+ var si, di int
+
+ // Hash Chain strategy:
+ // we need a hash table and a chain table
+ // the chain table cannot contain more entries than the window size (64Kb entries)
+ var hashTable [1 << hashLog]int
+ var chainTable [winSize]int
+ var hashShift = uint((minMatch * 8) - hashLog)
+
+ // Initialise the hash table with the first 64Kb of the input buffer
+ // (used when compressing dependent blocks)
+ for si < soffset {
+ h := binary.LittleEndian.Uint32(src[si:]) * hasher >> hashShift
+ chainTable[si&winMask] = hashTable[h]
+ si++
+ hashTable[h] = si
+ }
+
+ anchor := si
+ for si < sn-minMatch {
+ // hash the next 4 bytes (sequence)...
+ h := binary.LittleEndian.Uint32(src[si:]) * hasher >> hashShift
+
+ // follow the chain until out of window and give the longest match
+ mLen := 0
+ offset := 0
+ for next := hashTable[h] - 1; next > 0 && next > si-winSize; next = chainTable[next&winMask] - 1 {
+ // the first (mLen==0) or next byte (mLen>=minMatch) at current match length must match to improve on the match length
+ if src[next+mLen] == src[si+mLen] {
+ for ml := 0; ; ml++ {
+ if src[next+ml] != src[si+ml] || si+ml > sn {
+ // found a longer match, keep its position and length
+ if mLen < ml && ml >= minMatch {
+ mLen = ml
+ offset = si - next
+ }
+ break
+ }
+ }
+ }
+ }
+ chainTable[si&winMask] = hashTable[h]
+ hashTable[h] = si + 1
+
+ // no match found
+ if mLen == 0 {
+ si++
+ continue
+ }
+
+ // match found
+ // update hash/chain tables with overlaping bytes:
+ // si already hashed, add everything from si+1 up to the match length
+ for si, ml := si+1, si+mLen; si < ml; {
+ h := binary.LittleEndian.Uint32(src[si:]) * hasher >> hashShift
+ chainTable[si&winMask] = hashTable[h]
+ si++
+ hashTable[h] = si
+ }
+
+ lLen := si - anchor
+ si += mLen
+ mLen -= minMatch // match length does not include minMatch
+
+ if mLen < 0xF {
+ dst[di] = byte(mLen)
+ } else {
+ dst[di] = 0xF
+ }
+
+ // encode literals length
+ if lLen < 0xF {
+ dst[di] |= byte(lLen << 4)
+ } else {
+ dst[di] |= 0xF0
+ if di++; di == dn {
+ return di, ErrShortBuffer
+ }
+ l := lLen - 0xF
+ for ; l >= 0xFF; l -= 0xFF {
+ dst[di] = 0xFF
+ if di++; di == dn {
+ return di, ErrShortBuffer
+ }
+ }
+ dst[di] = byte(l)
+ }
+ if di++; di == dn {
+ return di, ErrShortBuffer
+ }
+
+ // literals
+ if di+lLen >= dn {
+ return di, ErrShortBuffer
+ }
+ di += copy(dst[di:], src[anchor:anchor+lLen])
+ anchor = si
+
+ // encode offset
+ if di += 2; di >= dn {
+ return di, ErrShortBuffer
+ }
+ dst[di-2], dst[di-1] = byte(offset), byte(offset>>8)
+
+ // encode match length part 2
+ if mLen >= 0xF {
+ for mLen -= 0xF; mLen >= 0xFF; mLen -= 0xFF {
+ dst[di] = 0xFF
+ if di++; di == dn {
+ return di, ErrShortBuffer
+ }
+ }
+ dst[di] = byte(mLen)
+ if di++; di == dn {
+ return di, ErrShortBuffer
+ }
+ }
+ }
+
+ if anchor == 0 {
+ // incompressible
+ return 0, nil
+ }
+
+ // last literals
+ lLen := len(src) - anchor
+ if lLen < 0xF {
+ dst[di] = byte(lLen << 4)
+ } else {
+ dst[di] = 0xF0
+ if di++; di == dn {
+ return di, ErrShortBuffer
+ }
+ lLen -= 0xF
+ for ; lLen >= 0xFF; lLen -= 0xFF {
+ dst[di] = 0xFF
+ if di++; di == dn {
+ return di, ErrShortBuffer
+ }
+ }
+ dst[di] = byte(lLen)
+ }
+ if di++; di == dn {
+ return di, ErrShortBuffer
+ }
+
+ // write literals
+ src = src[anchor:]
+ switch n := di + len(src); {
+ case n > dn:
+ return di, ErrShortBuffer
+ case n >= sn:
+ // incompressible
+ return 0, nil
+ }
+ di += copy(dst[di:], src)
+ return di, nil
+}
diff --git a/vendor/github.com/pierrec/lz4/lz4.go b/vendor/github.com/pierrec/lz4/lz4.go
new file mode 100644
index 0000000..ddb82f6
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/lz4.go
@@ -0,0 +1,105 @@
+// Package lz4 implements reading and writing lz4 compressed data (a frame),
+// as specified in http://fastcompression.blogspot.fr/2013/04/lz4-streaming-format-final.html,
+// using an io.Reader (decompression) and io.Writer (compression).
+// It is designed to minimize memory usage while maximizing throughput by being able to
+// [de]compress data concurrently.
+//
+// The Reader and the Writer support concurrent processing provided the supplied buffers are
+// large enough (in multiples of BlockMaxSize) and there is no block dependency.
+// Reader.WriteTo and Writer.ReadFrom do leverage the concurrency transparently.
+// The runtime.GOMAXPROCS() value is used to apply concurrency or not.
+//
+// Although the block level compression and decompression functions are exposed and are fully compatible
+// with the lz4 block format definition, they are low level and should not be used directly.
+// For a complete description of an lz4 compressed block, see:
+// http://fastcompression.blogspot.fr/2011/05/lz4-explained.html
+//
+// See https://github.com/Cyan4973/lz4 for the reference C implementation.
+package lz4
+
+import (
+ "hash"
+ "sync"
+
+ "github.com/pierrec/xxHash/xxHash32"
+)
+
+const (
+ // Extension is the LZ4 frame file name extension
+ Extension = ".lz4"
+ // Version is the LZ4 frame format version
+ Version = 1
+
+ frameMagic = uint32(0x184D2204)
+ frameSkipMagic = uint32(0x184D2A50)
+
+ // The following constants are used to setup the compression algorithm.
+ minMatch = 4 // the minimum size of the match sequence size (4 bytes)
+ winSizeLog = 16 // LZ4 64Kb window size limit
+ winSize = 1 << winSizeLog
+ winMask = winSize - 1 // 64Kb window of previous data for dependent blocks
+
+ // hashLog determines the size of the hash table used to quickly find a previous match position.
+ // Its value influences the compression speed and memory usage, the lower the faster,
+ // but at the expense of the compression ratio.
+ // 16 seems to be the best compromise.
+ hashLog = 16
+ hashTableSize = 1 << hashLog
+ hashShift = uint((minMatch * 8) - hashLog)
+
+ mfLimit = 8 + minMatch // The last match cannot start within the last 12 bytes.
+ skipStrength = 6 // variable step for fast scan
+
+ hasher = uint32(2654435761) // prime number used to hash minMatch
+)
+
+// map the block max size id with its value in bytes: 64Kb, 256Kb, 1Mb and 4Mb.
+var bsMapID = map[byte]int{4: 64 << 10, 5: 256 << 10, 6: 1 << 20, 7: 4 << 20}
+var bsMapValue = map[int]byte{}
+
+// Reversed.
+func init() {
+ for i, v := range bsMapID {
+ bsMapValue[v] = i
+ }
+}
+
+// Header describes the various flags that can be set on a Writer or obtained from a Reader.
+// The default values match those of the LZ4 frame format definition (http://fastcompression.blogspot.com/2013/04/lz4-streaming-format-final.html).
+//
+// NB. in a Reader, in case of concatenated frames, the Header values may change between Read() calls.
+// It is the caller responsibility to check them if necessary (typically when using the Reader concurrency).
+type Header struct {
+ BlockDependency bool // compressed blocks are dependent (one block depends on the last 64Kb of the previous one)
+ BlockChecksum bool // compressed blocks are checksumed
+ NoChecksum bool // frame checksum
+ BlockMaxSize int // the size of the decompressed data block (one of [64KB, 256KB, 1MB, 4MB]). Default=4MB.
+ Size uint64 // the frame total size. It is _not_ computed by the Writer.
+ HighCompression bool // use high compression (only for the Writer)
+ done bool // whether the descriptor was processed (Read or Write and checked)
+ // Removed as not supported
+ // Dict bool // a dictionary id is to be used
+ // DictID uint32 // the dictionary id read from the frame, if any.
+}
+
+// xxhPool wraps the standard pool for xxHash items.
+// Putting items back in the pool automatically resets them.
+type xxhPool struct {
+ sync.Pool
+}
+
+func (p *xxhPool) Get() hash.Hash32 {
+ return p.Pool.Get().(hash.Hash32)
+}
+
+func (p *xxhPool) Put(h hash.Hash32) {
+ h.Reset()
+ p.Pool.Put(h)
+}
+
+// hashPool is used by readers and writers and contains xxHash items.
+var hashPool = xxhPool{
+ Pool: sync.Pool{
+ New: func() interface{} { return xxHash32.New(0) },
+ },
+}
diff --git a/vendor/github.com/pierrec/lz4/reader.go b/vendor/github.com/pierrec/lz4/reader.go
new file mode 100644
index 0000000..9f7fd60
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/reader.go
@@ -0,0 +1,364 @@
+package lz4
+
+import (
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "hash"
+ "io"
+ "io/ioutil"
+ "runtime"
+ "sync"
+ "sync/atomic"
+)
+
+// ErrInvalid is returned when the data being read is not an LZ4 archive
+// (LZ4 magic number detection failed).
+var ErrInvalid = errors.New("invalid lz4 data")
+
+// errEndOfBlock is returned by readBlock when it has reached the last block of the frame.
+// It is not an error.
+var errEndOfBlock = errors.New("end of block")
+
+// Reader implements the LZ4 frame decoder.
+// The Header is set after the first call to Read().
+// The Header may change between Read() calls in case of concatenated frames.
+type Reader struct {
+ Pos int64 // position within the source
+ Header
+ src io.Reader
+ checksum hash.Hash32 // frame hash
+ wg sync.WaitGroup // decompressing go routine wait group
+ data []byte // buffered decompressed data
+ window []byte // 64Kb decompressed data window
+}
+
+// NewReader returns a new LZ4 frame decoder.
+// No access to the underlying io.Reader is performed.
+func NewReader(src io.Reader) *Reader {
+ return &Reader{
+ src: src,
+ checksum: hashPool.Get(),
+ }
+}
+
+// readHeader checks the frame magic number and parses the frame descriptoz.
+// Skippable frames are supported even as a first frame although the LZ4
+// specifications recommends skippable frames not to be used as first frames.
+func (z *Reader) readHeader(first bool) error {
+ defer z.checksum.Reset()
+
+ for {
+ var magic uint32
+ if err := binary.Read(z.src, binary.LittleEndian, &magic); err != nil {
+ if !first && err == io.ErrUnexpectedEOF {
+ return io.EOF
+ }
+ return err
+ }
+ z.Pos += 4
+ if magic>>8 == frameSkipMagic>>8 {
+ var skipSize uint32
+ if err := binary.Read(z.src, binary.LittleEndian, &skipSize); err != nil {
+ return err
+ }
+ z.Pos += 4
+ m, err := io.CopyN(ioutil.Discard, z.src, int64(skipSize))
+ z.Pos += m
+ if err != nil {
+ return err
+ }
+ continue
+ }
+ if magic != frameMagic {
+ return ErrInvalid
+ }
+ break
+ }
+
+ // header
+ var buf [8]byte
+ if _, err := io.ReadFull(z.src, buf[:2]); err != nil {
+ return err
+ }
+ z.Pos += 2
+
+ b := buf[0]
+ if b>>6 != Version {
+ return fmt.Errorf("lz4.Read: invalid version: got %d expected %d", b>>6, Version)
+ }
+ z.BlockDependency = b>>5&1 == 0
+ z.BlockChecksum = b>>4&1 > 0
+ frameSize := b>>3&1 > 0
+ z.NoChecksum = b>>2&1 == 0
+ // z.Dict = b&1 > 0
+
+ bmsID := buf[1] >> 4 & 0x7
+ bSize, ok := bsMapID[bmsID]
+ if !ok {
+ return fmt.Errorf("lz4.Read: invalid block max size: %d", bmsID)
+ }
+ z.BlockMaxSize = bSize
+
+ z.checksum.Write(buf[0:2])
+
+ if frameSize {
+ if err := binary.Read(z.src, binary.LittleEndian, &z.Size); err != nil {
+ return err
+ }
+ z.Pos += 8
+ binary.LittleEndian.PutUint64(buf[:], z.Size)
+ z.checksum.Write(buf[0:8])
+ }
+
+ // if z.Dict {
+ // if err := binary.Read(z.src, binary.LittleEndian, &z.DictID); err != nil {
+ // return err
+ // }
+ // z.Pos += 4
+ // binary.LittleEndian.PutUint32(buf[:], z.DictID)
+ // z.checksum.Write(buf[0:4])
+ // }
+
+ // header checksum
+ if _, err := io.ReadFull(z.src, buf[:1]); err != nil {
+ return err
+ }
+ z.Pos++
+ if h := byte(z.checksum.Sum32() >> 8 & 0xFF); h != buf[0] {
+ return fmt.Errorf("lz4.Read: invalid header checksum: got %v expected %v", buf[0], h)
+ }
+
+ z.Header.done = true
+
+ return nil
+}
+
+// Read decompresses data from the underlying source into the supplied buffer.
+//
+// Since there can be multiple streams concatenated, Header values may
+// change between calls to Read(). If that is the case, no data is actually read from
+// the underlying io.Reader, to allow for potential input buffer resizing.
+//
+// Data is buffered if the input buffer is too small, and exhausted upon successive calls.
+//
+// If the buffer is large enough (typically in multiples of BlockMaxSize) and there is
+// no block dependency, then the data will be decompressed concurrently based on the GOMAXPROCS value.
+func (z *Reader) Read(buf []byte) (n int, err error) {
+ if !z.Header.done {
+ if err = z.readHeader(true); err != nil {
+ return
+ }
+ }
+
+ if len(buf) == 0 {
+ return
+ }
+
+ // exhaust remaining data from previous Read()
+ if len(z.data) > 0 {
+ n = copy(buf, z.data)
+ z.data = z.data[n:]
+ if len(z.data) == 0 {
+ z.data = nil
+ }
+ return
+ }
+
+ // Break up the input buffer into BlockMaxSize blocks with at least one block.
+ // Then decompress into each of them concurrently if possible (no dependency).
+ // In case of dependency, the first block will be missing the window (except on the
+ // very first call), the rest will have it already since it comes from the previous block.
+ wbuf := buf
+ zn := (len(wbuf) + z.BlockMaxSize - 1) / z.BlockMaxSize
+ zblocks := make([]block, zn)
+ for zi, abort := 0, uint32(0); zi < zn && atomic.LoadUint32(&abort) == 0; zi++ {
+ zb := &zblocks[zi]
+ // last block may be too small
+ if len(wbuf) < z.BlockMaxSize+len(z.window) {
+ wbuf = make([]byte, z.BlockMaxSize+len(z.window))
+ }
+ copy(wbuf, z.window)
+ if zb.err = z.readBlock(wbuf, zb); zb.err != nil {
+ break
+ }
+ wbuf = wbuf[z.BlockMaxSize:]
+ if !z.BlockDependency {
+ z.wg.Add(1)
+ go z.decompressBlock(zb, &abort)
+ continue
+ }
+ // cannot decompress concurrently when dealing with block dependency
+ z.decompressBlock(zb, nil)
+ // the last block may not contain enough data
+ if len(z.window) == 0 {
+ z.window = make([]byte, winSize)
+ }
+ if len(zb.data) >= winSize {
+ copy(z.window, zb.data[len(zb.data)-winSize:])
+ } else {
+ copy(z.window, z.window[len(zb.data):])
+ copy(z.window[len(zb.data)+1:], zb.data)
+ }
+ }
+ z.wg.Wait()
+
+ // since a block size may be less then BlockMaxSize, trim the decompressed buffers
+ for _, zb := range zblocks {
+ if zb.err != nil {
+ if zb.err == errEndOfBlock {
+ return n, z.close()
+ }
+ return n, zb.err
+ }
+ bLen := len(zb.data)
+ if !z.NoChecksum {
+ z.checksum.Write(zb.data)
+ }
+ m := copy(buf[n:], zb.data)
+ // buffer the remaining data (this is necessarily the last block)
+ if m < bLen {
+ z.data = zb.data[m:]
+ }
+ n += m
+ }
+
+ return
+}
+
+// readBlock reads an entire frame block from the frame.
+// The input buffer is the one that will receive the decompressed data.
+// If the end of the frame is detected, it returns the errEndOfBlock error.
+func (z *Reader) readBlock(buf []byte, b *block) error {
+ var bLen uint32
+ if err := binary.Read(z.src, binary.LittleEndian, &bLen); err != nil {
+ return err
+ }
+ atomic.AddInt64(&z.Pos, 4)
+
+ switch {
+ case bLen == 0:
+ return errEndOfBlock
+ case bLen&(1<<31) == 0:
+ b.compressed = true
+ b.data = buf
+ b.zdata = make([]byte, bLen)
+ default:
+ bLen = bLen & (1<<31 - 1)
+ if int(bLen) > len(buf) {
+ return fmt.Errorf("lz4.Read: invalid block size: %d", bLen)
+ }
+ b.data = buf[:bLen]
+ b.zdata = buf[:bLen]
+ }
+ if _, err := io.ReadFull(z.src, b.zdata); err != nil {
+ return err
+ }
+
+ if z.BlockChecksum {
+ if err := binary.Read(z.src, binary.LittleEndian, &b.checksum); err != nil {
+ return err
+ }
+ xxh := hashPool.Get()
+ defer hashPool.Put(xxh)
+ xxh.Write(b.zdata)
+ if h := xxh.Sum32(); h != b.checksum {
+ return fmt.Errorf("lz4.Read: invalid block checksum: got %x expected %x", h, b.checksum)
+ }
+ }
+
+ return nil
+}
+
+// decompressBlock decompresses a frame block.
+// In case of an error, the block err is set with it and abort is set to 1.
+func (z *Reader) decompressBlock(b *block, abort *uint32) {
+ if abort != nil {
+ defer z.wg.Done()
+ }
+ if b.compressed {
+ n := len(z.window)
+ m, err := UncompressBlock(b.zdata, b.data, n)
+ if err != nil {
+ if abort != nil {
+ atomic.StoreUint32(abort, 1)
+ }
+ b.err = err
+ return
+ }
+ b.data = b.data[n : n+m]
+ }
+ atomic.AddInt64(&z.Pos, int64(len(b.data)))
+}
+
+// close validates the frame checksum (if any) and checks the next frame (if any).
+func (z *Reader) close() error {
+ if !z.NoChecksum {
+ var checksum uint32
+ if err := binary.Read(z.src, binary.LittleEndian, &checksum); err != nil {
+ return err
+ }
+ if checksum != z.checksum.Sum32() {
+ return fmt.Errorf("lz4.Read: invalid frame checksum: got %x expected %x", z.checksum.Sum32(), checksum)
+ }
+ }
+
+ // get ready for the next concatenated frame, but do not change the position
+ pos := z.Pos
+ z.Reset(z.src)
+ z.Pos = pos
+
+ // since multiple frames can be concatenated, check for another one
+ return z.readHeader(false)
+}
+
+// Reset discards the Reader's state and makes it equivalent to the
+// result of its original state from NewReader, but reading from r instead.
+// This permits reusing a Reader rather than allocating a new one.
+func (z *Reader) Reset(r io.Reader) {
+ z.Header = Header{}
+ z.Pos = 0
+ z.src = r
+ z.checksum.Reset()
+ z.data = nil
+ z.window = nil
+}
+
+// WriteTo decompresses the data from the underlying io.Reader and writes it to the io.Writer.
+// Returns the number of bytes written.
+func (z *Reader) WriteTo(w io.Writer) (n int64, err error) {
+ cpus := runtime.GOMAXPROCS(0)
+ var buf []byte
+
+ // The initial buffer being nil, the first Read will be only read the compressed frame options.
+ // The buffer can then be sized appropriately to support maximum concurrency decompression.
+ // If multiple frames are concatenated, Read() will return with no data decompressed but with
+ // potentially changed options. The buffer will be resized accordingly, always trying to
+ // maximize concurrency.
+ for {
+ nsize := 0
+ // the block max size can change if multiple streams are concatenated.
+ // Check it after every Read().
+ if z.BlockDependency {
+ // in case of dependency, we cannot decompress concurrently,
+ // so allocate the minimum buffer + window size
+ nsize = len(z.window) + z.BlockMaxSize
+ } else {
+ // if no dependency, allocate a buffer large enough for concurrent decompression
+ nsize = cpus * z.BlockMaxSize
+ }
+ if nsize != len(buf) {
+ buf = make([]byte, nsize)
+ }
+
+ m, er := z.Read(buf)
+ if er != nil && er != io.EOF {
+ return n, er
+ }
+ m, err = w.Write(buf[:m])
+ n += int64(m)
+ if err != nil || er == io.EOF {
+ return
+ }
+ }
+}
diff --git a/vendor/github.com/pierrec/lz4/writer.go b/vendor/github.com/pierrec/lz4/writer.go
new file mode 100644
index 0000000..b1b712f
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/writer.go
@@ -0,0 +1,377 @@
+package lz4
+
+import (
+ "encoding/binary"
+ "fmt"
+ "hash"
+ "io"
+ "runtime"
+)
+
+// Writer implements the LZ4 frame encoder.
+type Writer struct {
+ Header
+ dst io.Writer
+ checksum hash.Hash32 // frame checksum
+ data []byte // data to be compressed, only used when dealing with block dependency as we need 64Kb to work with
+ window []byte // last 64KB of decompressed data (block dependency) + blockMaxSize buffer
+
+ zbCompressBuf []byte // buffer for compressing lz4 blocks
+ writeSizeBuf []byte // four-byte slice for writing checksums and sizes in writeblock
+}
+
+// NewWriter returns a new LZ4 frame encoder.
+// No access to the underlying io.Writer is performed.
+// The supplied Header is checked at the first Write.
+// It is ok to change it before the first Write but then not until a Reset() is performed.
+func NewWriter(dst io.Writer) *Writer {
+ return &Writer{
+ dst: dst,
+ checksum: hashPool.Get(),
+ Header: Header{
+ BlockMaxSize: 4 << 20,
+ },
+ writeSizeBuf: make([]byte, 4),
+ }
+}
+
+// writeHeader builds and writes the header (magic+header) to the underlying io.Writer.
+func (z *Writer) writeHeader() error {
+ // Default to 4Mb if BlockMaxSize is not set
+ if z.Header.BlockMaxSize == 0 {
+ z.Header.BlockMaxSize = 4 << 20
+ }
+ // the only option that need to be validated
+ bSize, ok := bsMapValue[z.Header.BlockMaxSize]
+ if !ok {
+ return fmt.Errorf("lz4: invalid block max size: %d", z.Header.BlockMaxSize)
+ }
+
+ // magic number(4) + header(flags(2)+[Size(8)+DictID(4)]+checksum(1)) does not exceed 19 bytes
+ // Size and DictID are optional
+ var buf [19]byte
+
+ // set the fixed size data: magic number, block max size and flags
+ binary.LittleEndian.PutUint32(buf[0:], frameMagic)
+ flg := byte(Version << 6)
+ if !z.Header.BlockDependency {
+ flg |= 1 << 5
+ }
+ if z.Header.BlockChecksum {
+ flg |= 1 << 4
+ }
+ if z.Header.Size > 0 {
+ flg |= 1 << 3
+ }
+ if !z.Header.NoChecksum {
+ flg |= 1 << 2
+ }
+ // if z.Header.Dict {
+ // flg |= 1
+ // }
+ buf[4] = flg
+ buf[5] = bSize << 4
+
+ // current buffer size: magic(4) + flags(1) + block max size (1)
+ n := 6
+ // optional items
+ if z.Header.Size > 0 {
+ binary.LittleEndian.PutUint64(buf[n:], z.Header.Size)
+ n += 8
+ }
+ // if z.Header.Dict {
+ // binary.LittleEndian.PutUint32(buf[n:], z.Header.DictID)
+ // n += 4
+ // }
+
+ // header checksum includes the flags, block max size and optional Size and DictID
+ z.checksum.Write(buf[4:n])
+ buf[n] = byte(z.checksum.Sum32() >> 8 & 0xFF)
+ z.checksum.Reset()
+
+ // header ready, write it out
+ if _, err := z.dst.Write(buf[0 : n+1]); err != nil {
+ return err
+ }
+ z.Header.done = true
+
+ // initialize buffers dependent on header info
+ z.zbCompressBuf = make([]byte, winSize+z.BlockMaxSize)
+
+ return nil
+}
+
+// Write compresses data from the supplied buffer into the underlying io.Writer.
+// Write does not return until the data has been written.
+//
+// If the input buffer is large enough (typically in multiples of BlockMaxSize)
+// the data will be compressed concurrently.
+//
+// Write never buffers any data unless in BlockDependency mode where it may
+// do so until it has 64Kb of data, after which it never buffers any.
+func (z *Writer) Write(buf []byte) (n int, err error) {
+ if !z.Header.done {
+ if err = z.writeHeader(); err != nil {
+ return
+ }
+ }
+
+ if len(buf) == 0 {
+ return
+ }
+
+ if !z.NoChecksum {
+ z.checksum.Write(buf)
+ }
+
+ // with block dependency, require at least 64Kb of data to work with
+ // not having 64Kb only matters initially to setup the first window
+ bl := 0
+ if z.BlockDependency && len(z.window) == 0 {
+ bl = len(z.data)
+ z.data = append(z.data, buf...)
+ if len(z.data) < winSize {
+ return len(buf), nil
+ }
+ buf = z.data
+ z.data = nil
+ }
+
+ // Break up the input buffer into BlockMaxSize blocks, provisioning the left over block.
+ // Then compress into each of them concurrently if possible (no dependency).
+ var (
+ zb block
+ wbuf = buf
+ zn = len(wbuf) / z.BlockMaxSize
+ zi = 0
+ leftover = len(buf) % z.BlockMaxSize
+ )
+
+loop:
+ for zi < zn {
+ if z.BlockDependency {
+ if zi == 0 {
+ // first block does not have the window
+ zb.data = append(z.window, wbuf[:z.BlockMaxSize]...)
+ zb.offset = len(z.window)
+ wbuf = wbuf[z.BlockMaxSize-winSize:]
+ } else {
+ // set the uncompressed data including the window from previous block
+ zb.data = wbuf[:z.BlockMaxSize+winSize]
+ zb.offset = winSize
+ wbuf = wbuf[z.BlockMaxSize:]
+ }
+ } else {
+ zb.data = wbuf[:z.BlockMaxSize]
+ wbuf = wbuf[z.BlockMaxSize:]
+ }
+
+ goto write
+ }
+
+ // left over
+ if leftover > 0 {
+ zb = block{data: wbuf}
+ if z.BlockDependency {
+ if zn == 0 {
+ zb.data = append(z.window, zb.data...)
+ zb.offset = len(z.window)
+ } else {
+ zb.offset = winSize
+ }
+ }
+
+ leftover = 0
+ goto write
+ }
+
+ if z.BlockDependency {
+ if len(z.window) == 0 {
+ z.window = make([]byte, winSize)
+ }
+ // last buffer may be shorter than the window
+ if len(buf) >= winSize {
+ copy(z.window, buf[len(buf)-winSize:])
+ } else {
+ copy(z.window, z.window[len(buf):])
+ copy(z.window[len(buf)+1:], buf)
+ }
+ }
+
+ return
+
+write:
+ zb = z.compressBlock(zb)
+ _, err = z.writeBlock(zb)
+
+ written := len(zb.data)
+ if bl > 0 {
+ if written >= bl {
+ written -= bl
+ bl = 0
+ } else {
+ bl -= written
+ written = 0
+ }
+ }
+
+ n += written
+ // remove the window in zb.data
+ if z.BlockDependency {
+ if zi == 0 {
+ n -= len(z.window)
+ } else {
+ n -= winSize
+ }
+ }
+ if err != nil {
+ return
+ }
+ zi++
+ goto loop
+}
+
+// compressBlock compresses a block.
+func (z *Writer) compressBlock(zb block) block {
+ // compressed block size cannot exceed the input's
+ var (
+ n int
+ err error
+ zbuf = z.zbCompressBuf
+ )
+ if z.HighCompression {
+ n, err = CompressBlockHC(zb.data, zbuf, zb.offset)
+ } else {
+ n, err = CompressBlock(zb.data, zbuf, zb.offset)
+ }
+
+ // compressible and compressed size smaller than decompressed: ok!
+ if err == nil && n > 0 && len(zb.zdata) < len(zb.data) {
+ zb.compressed = true
+ zb.zdata = zbuf[:n]
+ } else {
+ zb.compressed = false
+ zb.zdata = zb.data[zb.offset:]
+ }
+
+ if z.BlockChecksum {
+ xxh := hashPool.Get()
+ xxh.Write(zb.zdata)
+ zb.checksum = xxh.Sum32()
+ hashPool.Put(xxh)
+ }
+
+ return zb
+}
+
+// writeBlock writes a frame block to the underlying io.Writer (size, data).
+func (z *Writer) writeBlock(zb block) (int, error) {
+ bLen := uint32(len(zb.zdata))
+ if !zb.compressed {
+ bLen |= 1 << 31
+ }
+
+ n := 0
+
+ binary.LittleEndian.PutUint32(z.writeSizeBuf, bLen)
+ n, err := z.dst.Write(z.writeSizeBuf)
+ if err != nil {
+ return n, err
+ }
+
+ m, err := z.dst.Write(zb.zdata)
+ n += m
+ if err != nil {
+ return n, err
+ }
+
+ if z.BlockChecksum {
+ binary.LittleEndian.PutUint32(z.writeSizeBuf, zb.checksum)
+ m, err := z.dst.Write(z.writeSizeBuf)
+ n += m
+
+ if err != nil {
+ return n, err
+ }
+ }
+
+ return n, nil
+}
+
+// Flush flushes any pending compressed data to the underlying writer.
+// Flush does not return until the data has been written.
+// If the underlying writer returns an error, Flush returns that error.
+//
+// Flush is only required when in BlockDependency mode and the total of
+// data written is less than 64Kb.
+func (z *Writer) Flush() error {
+ if len(z.data) == 0 {
+ return nil
+ }
+
+ zb := z.compressBlock(block{data: z.data})
+ if _, err := z.writeBlock(zb); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Close closes the Writer, flushing any unwritten data to the underlying io.Writer, but does not close the underlying io.Writer.
+func (z *Writer) Close() error {
+ if !z.Header.done {
+ if err := z.writeHeader(); err != nil {
+ return err
+ }
+ }
+
+ // buffered data for the block dependency window
+ if z.BlockDependency && len(z.data) > 0 {
+ zb := block{data: z.data}
+ if _, err := z.writeBlock(z.compressBlock(zb)); err != nil {
+ return err
+ }
+ }
+
+ if err := binary.Write(z.dst, binary.LittleEndian, uint32(0)); err != nil {
+ return err
+ }
+ if !z.NoChecksum {
+ if err := binary.Write(z.dst, binary.LittleEndian, z.checksum.Sum32()); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// Reset clears the state of the Writer z such that it is equivalent to its
+// initial state from NewWriter, but instead writing to w.
+// No access to the underlying io.Writer is performed.
+func (z *Writer) Reset(w io.Writer) {
+ z.Header = Header{}
+ z.dst = w
+ z.checksum.Reset()
+ z.data = nil
+ z.window = nil
+}
+
+// ReadFrom compresses the data read from the io.Reader and writes it to the underlying io.Writer.
+// Returns the number of bytes read.
+// It does not close the Writer.
+func (z *Writer) ReadFrom(r io.Reader) (n int64, err error) {
+ cpus := runtime.GOMAXPROCS(0)
+ buf := make([]byte, cpus*z.BlockMaxSize)
+ for {
+ m, er := io.ReadFull(r, buf)
+ n += int64(m)
+ if er == nil || er == io.ErrUnexpectedEOF || er == io.EOF {
+ if _, err = z.Write(buf[:m]); err != nil {
+ return
+ }
+ if er == nil {
+ continue
+ }
+ return
+ }
+ return n, er
+ }
+}
diff --git a/vendor/github.com/pierrec/xxHash/LICENSE b/vendor/github.com/pierrec/xxHash/LICENSE
new file mode 100644
index 0000000..c1418f3
--- /dev/null
+++ b/vendor/github.com/pierrec/xxHash/LICENSE
@@ -0,0 +1,28 @@
+Copyright (c) 2014, Pierre Curto
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+* Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+* Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+* Neither the name of xxHash nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
diff --git a/vendor/github.com/pierrec/xxHash/xxHash32/xxHash32.go b/vendor/github.com/pierrec/xxHash/xxHash32/xxHash32.go
new file mode 100644
index 0000000..ff58256
--- /dev/null
+++ b/vendor/github.com/pierrec/xxHash/xxHash32/xxHash32.go
@@ -0,0 +1,212 @@
+// Package xxHash32 implements the very fast xxHash hashing algorithm (32 bits version).
+// (https://github.com/Cyan4973/xxHash/)
+package xxHash32
+
+import "hash"
+
+const (
+ prime32_1 = 2654435761
+ prime32_2 = 2246822519
+ prime32_3 = 3266489917
+ prime32_4 = 668265263
+ prime32_5 = 374761393
+)
+
+type xxHash struct {
+ seed uint32
+ v1 uint32
+ v2 uint32
+ v3 uint32
+ v4 uint32
+ totalLen uint64
+ buf [16]byte
+ bufused int
+}
+
+// New returns a new Hash32 instance.
+func New(seed uint32) hash.Hash32 {
+ xxh := &xxHash{seed: seed}
+ xxh.Reset()
+ return xxh
+}
+
+// Sum appends the current hash to b and returns the resulting slice.
+// It does not change the underlying hash state.
+func (xxh xxHash) Sum(b []byte) []byte {
+ h32 := xxh.Sum32()
+ return append(b, byte(h32), byte(h32>>8), byte(h32>>16), byte(h32>>24))
+}
+
+// Reset resets the Hash to its initial state.
+func (xxh *xxHash) Reset() {
+ xxh.v1 = xxh.seed + prime32_1 + prime32_2
+ xxh.v2 = xxh.seed + prime32_2
+ xxh.v3 = xxh.seed
+ xxh.v4 = xxh.seed - prime32_1
+ xxh.totalLen = 0
+ xxh.bufused = 0
+}
+
+// Size returns the number of bytes returned by Sum().
+func (xxh *xxHash) Size() int {
+ return 4
+}
+
+// BlockSize gives the minimum number of bytes accepted by Write().
+func (xxh *xxHash) BlockSize() int {
+ return 1
+}
+
+// Write adds input bytes to the Hash.
+// It never returns an error.
+func (xxh *xxHash) Write(input []byte) (int, error) {
+ n := len(input)
+ m := xxh.bufused
+
+ xxh.totalLen += uint64(n)
+
+ r := len(xxh.buf) - m
+ if n < r {
+ copy(xxh.buf[m:], input)
+ xxh.bufused += len(input)
+ return n, nil
+ }
+
+ p := 0
+ if m > 0 {
+ // some data left from previous update
+ copy(xxh.buf[xxh.bufused:], input[:r])
+ xxh.bufused += len(input) - r
+
+ // fast rotl(13)
+ xxh.v1 = rol13(xxh.v1+u32(xxh.buf[:])*prime32_2) * prime32_1
+ xxh.v2 = rol13(xxh.v2+u32(xxh.buf[4:])*prime32_2) * prime32_1
+ xxh.v3 = rol13(xxh.v3+u32(xxh.buf[8:])*prime32_2) * prime32_1
+ xxh.v4 = rol13(xxh.v4+u32(xxh.buf[12:])*prime32_2) * prime32_1
+ p = r
+ xxh.bufused = 0
+ }
+
+ // Causes compiler to work directly from registers instead of stack:
+ v1, v2, v3, v4 := xxh.v1, xxh.v2, xxh.v3, xxh.v4
+ for n := n - 16; p <= n; p += 16 {
+ sub := input[p:][:16] //BCE hint for compiler
+ v1 = rol13(v1+u32(sub[:])*prime32_2) * prime32_1
+ v2 = rol13(v2+u32(sub[4:])*prime32_2) * prime32_1
+ v3 = rol13(v3+u32(sub[8:])*prime32_2) * prime32_1
+ v4 = rol13(v4+u32(sub[12:])*prime32_2) * prime32_1
+ }
+ xxh.v1, xxh.v2, xxh.v3, xxh.v4 = v1, v2, v3, v4
+
+ copy(xxh.buf[xxh.bufused:], input[p:])
+ xxh.bufused += len(input) - p
+
+ return n, nil
+}
+
+// Sum32 returns the 32 bits Hash value.
+func (xxh *xxHash) Sum32() uint32 {
+ h32 := uint32(xxh.totalLen)
+ if xxh.totalLen >= 16 {
+ h32 += rol1(xxh.v1) + rol7(xxh.v2) + rol12(xxh.v3) + rol18(xxh.v4)
+ } else {
+ h32 += xxh.seed + prime32_5
+ }
+
+ p := 0
+ n := xxh.bufused
+ for n := n - 4; p <= n; p += 4 {
+ h32 += u32(xxh.buf[p:p+4]) * prime32_3
+ h32 = rol17(h32) * prime32_4
+ }
+ for ; p < n; p++ {
+ h32 += uint32(xxh.buf[p]) * prime32_5
+ h32 = rol11(h32) * prime32_1
+ }
+
+ h32 ^= h32 >> 15
+ h32 *= prime32_2
+ h32 ^= h32 >> 13
+ h32 *= prime32_3
+ h32 ^= h32 >> 16
+
+ return h32
+}
+
+// Checksum returns the 32bits Hash value.
+func Checksum(input []byte, seed uint32) uint32 {
+ n := len(input)
+ h32 := uint32(n)
+
+ if n < 16 {
+ h32 += seed + prime32_5
+ } else {
+ v1 := seed + prime32_1 + prime32_2
+ v2 := seed + prime32_2
+ v3 := seed
+ v4 := seed - prime32_1
+ p := 0
+ for n := n - 16; p <= n; p += 16 {
+ sub := input[p:][:16] //BCE hint for compiler
+ v1 = rol13(v1+u32(sub[:])*prime32_2) * prime32_1
+ v2 = rol13(v2+u32(sub[4:])*prime32_2) * prime32_1
+ v3 = rol13(v3+u32(sub[8:])*prime32_2) * prime32_1
+ v4 = rol13(v4+u32(sub[12:])*prime32_2) * prime32_1
+ }
+ input = input[p:]
+ n -= p
+ h32 += rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4)
+ }
+
+ p := 0
+ for n := n - 4; p <= n; p += 4 {
+ h32 += u32(input[p:p+4]) * prime32_3
+ h32 = rol17(h32) * prime32_4
+ }
+ for p < n {
+ h32 += uint32(input[p]) * prime32_5
+ h32 = rol11(h32) * prime32_1
+ p++
+ }
+
+ h32 ^= h32 >> 15
+ h32 *= prime32_2
+ h32 ^= h32 >> 13
+ h32 *= prime32_3
+ h32 ^= h32 >> 16
+
+ return h32
+}
+
+func u32(buf []byte) uint32 {
+ // go compiler recognizes this pattern and optimizes it on little endian platforms
+ return uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
+}
+
+func rol1(u uint32) uint32 {
+ return u<<1 | u>>31
+}
+
+func rol7(u uint32) uint32 {
+ return u<<7 | u>>25
+}
+
+func rol11(u uint32) uint32 {
+ return u<<11 | u>>21
+}
+
+func rol12(u uint32) uint32 {
+ return u<<12 | u>>20
+}
+
+func rol13(u uint32) uint32 {
+ return u<<13 | u>>19
+}
+
+func rol17(u uint32) uint32 {
+ return u<<17 | u>>15
+}
+
+func rol18(u uint32) uint32 {
+ return u<<18 | u>>14
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/.gitignore b/vendor/github.com/rcrowley/go-metrics/.gitignore
new file mode 100644
index 0000000..83c8f82
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/.gitignore
@@ -0,0 +1,9 @@
+*.[68]
+*.a
+*.out
+*.swp
+_obj
+_testmain.go
+cmd/metrics-bench/metrics-bench
+cmd/metrics-example/metrics-example
+cmd/never-read/never-read
diff --git a/vendor/github.com/rcrowley/go-metrics/.travis.yml b/vendor/github.com/rcrowley/go-metrics/.travis.yml
new file mode 100644
index 0000000..f8b3b2e
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/.travis.yml
@@ -0,0 +1,18 @@
+language: go
+
+go:
+ - 1.2
+ - 1.3
+ - 1.4
+ - 1.5
+ - 1.6
+ - 1.7
+ - 1.8
+ - 1.9
+
+script:
+ - ./validate.sh
+
+# this should give us faster builds according to
+# http://docs.travis-ci.com/user/migrating-from-legacy/
+sudo: false
diff --git a/vendor/github.com/rcrowley/go-metrics/LICENSE b/vendor/github.com/rcrowley/go-metrics/LICENSE
new file mode 100644
index 0000000..363fa9e
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/LICENSE
@@ -0,0 +1,29 @@
+Copyright 2012 Richard Crowley. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ 1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following
+ disclaimer in the documentation and/or other materials provided
+ with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY RICHARD CROWLEY ``AS IS'' AND ANY EXPRESS
+OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL RICHARD CROWLEY OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+THE POSSIBILITY OF SUCH DAMAGE.
+
+The views and conclusions contained in the software and documentation
+are those of the authors and should not be interpreted as representing
+official policies, either expressed or implied, of Richard Crowley.
diff --git a/vendor/github.com/rcrowley/go-metrics/README.md b/vendor/github.com/rcrowley/go-metrics/README.md
new file mode 100644
index 0000000..bc2a45a
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/README.md
@@ -0,0 +1,166 @@
+go-metrics
+==========
+
+
+
+Go port of Coda Hale's Metrics library: .
+
+Documentation: .
+
+Usage
+-----
+
+Create and update metrics:
+
+```go
+c := metrics.NewCounter()
+metrics.Register("foo", c)
+c.Inc(47)
+
+g := metrics.NewGauge()
+metrics.Register("bar", g)
+g.Update(47)
+
+r := NewRegistry()
+g := metrics.NewRegisteredFunctionalGauge("cache-evictions", r, func() int64 { return cache.getEvictionsCount() })
+
+s := metrics.NewExpDecaySample(1028, 0.015) // or metrics.NewUniformSample(1028)
+h := metrics.NewHistogram(s)
+metrics.Register("baz", h)
+h.Update(47)
+
+m := metrics.NewMeter()
+metrics.Register("quux", m)
+m.Mark(47)
+
+t := metrics.NewTimer()
+metrics.Register("bang", t)
+t.Time(func() {})
+t.Update(47)
+```
+
+Register() is not threadsafe. For threadsafe metric registration use
+GetOrRegister:
+
+```go
+t := metrics.GetOrRegisterTimer("account.create.latency", nil)
+t.Time(func() {})
+t.Update(47)
+```
+
+**NOTE:** Be sure to unregister short-lived meters and timers otherwise they will
+leak memory:
+
+```go
+// Will call Stop() on the Meter to allow for garbage collection
+metrics.Unregister("quux")
+// Or similarly for a Timer that embeds a Meter
+metrics.Unregister("bang")
+```
+
+Periodically log every metric in human-readable form to standard error:
+
+```go
+go metrics.Log(metrics.DefaultRegistry, 5 * time.Second, log.New(os.Stderr, "metrics: ", log.Lmicroseconds))
+```
+
+Periodically log every metric in slightly-more-parseable form to syslog:
+
+```go
+w, _ := syslog.Dial("unixgram", "/dev/log", syslog.LOG_INFO, "metrics")
+go metrics.Syslog(metrics.DefaultRegistry, 60e9, w)
+```
+
+Periodically emit every metric to Graphite using the [Graphite client](https://github.com/cyberdelia/go-metrics-graphite):
+
+```go
+
+import "github.com/cyberdelia/go-metrics-graphite"
+
+addr, _ := net.ResolveTCPAddr("tcp", "127.0.0.1:2003")
+go graphite.Graphite(metrics.DefaultRegistry, 10e9, "metrics", addr)
+```
+
+Periodically emit every metric into InfluxDB:
+
+**NOTE:** this has been pulled out of the library due to constant fluctuations
+in the InfluxDB API. In fact, all client libraries are on their way out. see
+issues [#121](https://github.com/rcrowley/go-metrics/issues/121) and
+[#124](https://github.com/rcrowley/go-metrics/issues/124) for progress and details.
+
+```go
+import "github.com/vrischmann/go-metrics-influxdb"
+
+go influxdb.InfluxDB(metrics.DefaultRegistry,
+ 10e9,
+ "127.0.0.1:8086",
+ "database-name",
+ "username",
+ "password"
+)
+```
+
+Periodically upload every metric to Librato using the [Librato client](https://github.com/mihasya/go-metrics-librato):
+
+**Note**: the client included with this repository under the `librato` package
+has been deprecated and moved to the repository linked above.
+
+```go
+import "github.com/mihasya/go-metrics-librato"
+
+go librato.Librato(metrics.DefaultRegistry,
+ 10e9, // interval
+ "example@example.com", // account owner email address
+ "token", // Librato API token
+ "hostname", // source
+ []float64{0.95}, // percentiles to send
+ time.Millisecond, // time unit
+)
+```
+
+Periodically emit every metric to StatHat:
+
+```go
+import "github.com/rcrowley/go-metrics/stathat"
+
+go stathat.Stathat(metrics.DefaultRegistry, 10e9, "example@example.com")
+```
+
+Maintain all metrics along with expvars at `/debug/metrics`:
+
+This uses the same mechanism as [the official expvar](http://golang.org/pkg/expvar/)
+but exposed under `/debug/metrics`, which shows a json representation of all your usual expvars
+as well as all your go-metrics.
+
+
+```go
+import "github.com/rcrowley/go-metrics/exp"
+
+exp.Exp(metrics.DefaultRegistry)
+```
+
+Installation
+------------
+
+```sh
+go get github.com/rcrowley/go-metrics
+```
+
+StatHat support additionally requires their Go client:
+
+```sh
+go get github.com/stathat/go
+```
+
+Publishing Metrics
+------------------
+
+Clients are available for the following destinations:
+
+* Librato - https://github.com/mihasya/go-metrics-librato
+* Graphite - https://github.com/cyberdelia/go-metrics-graphite
+* InfluxDB - https://github.com/vrischmann/go-metrics-influxdb
+* Ganglia - https://github.com/appscode/metlia
+* Prometheus - https://github.com/deathowl/go-metrics-prometheus
+* DataDog - https://github.com/syntaqx/go-metrics-datadog
+* SignalFX - https://github.com/pascallouisperez/go-metrics-signalfx
diff --git a/vendor/github.com/rcrowley/go-metrics/counter.go b/vendor/github.com/rcrowley/go-metrics/counter.go
new file mode 100644
index 0000000..bb7b039
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/counter.go
@@ -0,0 +1,112 @@
+package metrics
+
+import "sync/atomic"
+
+// Counters hold an int64 value that can be incremented and decremented.
+type Counter interface {
+ Clear()
+ Count() int64
+ Dec(int64)
+ Inc(int64)
+ Snapshot() Counter
+}
+
+// GetOrRegisterCounter returns an existing Counter or constructs and registers
+// a new StandardCounter.
+func GetOrRegisterCounter(name string, r Registry) Counter {
+ if nil == r {
+ r = DefaultRegistry
+ }
+ return r.GetOrRegister(name, NewCounter).(Counter)
+}
+
+// NewCounter constructs a new StandardCounter.
+func NewCounter() Counter {
+ if UseNilMetrics {
+ return NilCounter{}
+ }
+ return &StandardCounter{0}
+}
+
+// NewRegisteredCounter constructs and registers a new StandardCounter.
+func NewRegisteredCounter(name string, r Registry) Counter {
+ c := NewCounter()
+ if nil == r {
+ r = DefaultRegistry
+ }
+ r.Register(name, c)
+ return c
+}
+
+// CounterSnapshot is a read-only copy of another Counter.
+type CounterSnapshot int64
+
+// Clear panics.
+func (CounterSnapshot) Clear() {
+ panic("Clear called on a CounterSnapshot")
+}
+
+// Count returns the count at the time the snapshot was taken.
+func (c CounterSnapshot) Count() int64 { return int64(c) }
+
+// Dec panics.
+func (CounterSnapshot) Dec(int64) {
+ panic("Dec called on a CounterSnapshot")
+}
+
+// Inc panics.
+func (CounterSnapshot) Inc(int64) {
+ panic("Inc called on a CounterSnapshot")
+}
+
+// Snapshot returns the snapshot.
+func (c CounterSnapshot) Snapshot() Counter { return c }
+
+// NilCounter is a no-op Counter.
+type NilCounter struct{}
+
+// Clear is a no-op.
+func (NilCounter) Clear() {}
+
+// Count is a no-op.
+func (NilCounter) Count() int64 { return 0 }
+
+// Dec is a no-op.
+func (NilCounter) Dec(i int64) {}
+
+// Inc is a no-op.
+func (NilCounter) Inc(i int64) {}
+
+// Snapshot is a no-op.
+func (NilCounter) Snapshot() Counter { return NilCounter{} }
+
+// StandardCounter is the standard implementation of a Counter and uses the
+// sync/atomic package to manage a single int64 value.
+type StandardCounter struct {
+ count int64
+}
+
+// Clear sets the counter to zero.
+func (c *StandardCounter) Clear() {
+ atomic.StoreInt64(&c.count, 0)
+}
+
+// Count returns the current count.
+func (c *StandardCounter) Count() int64 {
+ return atomic.LoadInt64(&c.count)
+}
+
+// Dec decrements the counter by the given amount.
+func (c *StandardCounter) Dec(i int64) {
+ atomic.AddInt64(&c.count, -i)
+}
+
+// Inc increments the counter by the given amount.
+func (c *StandardCounter) Inc(i int64) {
+ atomic.AddInt64(&c.count, i)
+}
+
+// Snapshot returns a read-only copy of the counter.
+func (c *StandardCounter) Snapshot() Counter {
+ return CounterSnapshot(c.Count())
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/debug.go b/vendor/github.com/rcrowley/go-metrics/debug.go
new file mode 100644
index 0000000..043ccef
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/debug.go
@@ -0,0 +1,76 @@
+package metrics
+
+import (
+ "runtime/debug"
+ "time"
+)
+
+var (
+ debugMetrics struct {
+ GCStats struct {
+ LastGC Gauge
+ NumGC Gauge
+ Pause Histogram
+ //PauseQuantiles Histogram
+ PauseTotal Gauge
+ }
+ ReadGCStats Timer
+ }
+ gcStats debug.GCStats
+)
+
+// Capture new values for the Go garbage collector statistics exported in
+// debug.GCStats. This is designed to be called as a goroutine.
+func CaptureDebugGCStats(r Registry, d time.Duration) {
+ for _ = range time.Tick(d) {
+ CaptureDebugGCStatsOnce(r)
+ }
+}
+
+// Capture new values for the Go garbage collector statistics exported in
+// debug.GCStats. This is designed to be called in a background goroutine.
+// Giving a registry which has not been given to RegisterDebugGCStats will
+// panic.
+//
+// Be careful (but much less so) with this because debug.ReadGCStats calls
+// the C function runtime·lock(runtime·mheap) which, while not a stop-the-world
+// operation, isn't something you want to be doing all the time.
+func CaptureDebugGCStatsOnce(r Registry) {
+ lastGC := gcStats.LastGC
+ t := time.Now()
+ debug.ReadGCStats(&gcStats)
+ debugMetrics.ReadGCStats.UpdateSince(t)
+
+ debugMetrics.GCStats.LastGC.Update(int64(gcStats.LastGC.UnixNano()))
+ debugMetrics.GCStats.NumGC.Update(int64(gcStats.NumGC))
+ if lastGC != gcStats.LastGC && 0 < len(gcStats.Pause) {
+ debugMetrics.GCStats.Pause.Update(int64(gcStats.Pause[0]))
+ }
+ //debugMetrics.GCStats.PauseQuantiles.Update(gcStats.PauseQuantiles)
+ debugMetrics.GCStats.PauseTotal.Update(int64(gcStats.PauseTotal))
+}
+
+// Register metrics for the Go garbage collector statistics exported in
+// debug.GCStats. The metrics are named by their fully-qualified Go symbols,
+// i.e. debug.GCStats.PauseTotal.
+func RegisterDebugGCStats(r Registry) {
+ debugMetrics.GCStats.LastGC = NewGauge()
+ debugMetrics.GCStats.NumGC = NewGauge()
+ debugMetrics.GCStats.Pause = NewHistogram(NewExpDecaySample(1028, 0.015))
+ //debugMetrics.GCStats.PauseQuantiles = NewHistogram(NewExpDecaySample(1028, 0.015))
+ debugMetrics.GCStats.PauseTotal = NewGauge()
+ debugMetrics.ReadGCStats = NewTimer()
+
+ r.Register("debug.GCStats.LastGC", debugMetrics.GCStats.LastGC)
+ r.Register("debug.GCStats.NumGC", debugMetrics.GCStats.NumGC)
+ r.Register("debug.GCStats.Pause", debugMetrics.GCStats.Pause)
+ //r.Register("debug.GCStats.PauseQuantiles", debugMetrics.GCStats.PauseQuantiles)
+ r.Register("debug.GCStats.PauseTotal", debugMetrics.GCStats.PauseTotal)
+ r.Register("debug.ReadGCStats", debugMetrics.ReadGCStats)
+}
+
+// Allocate an initial slice for gcStats.Pause to avoid allocations during
+// normal operation.
+func init() {
+ gcStats.Pause = make([]time.Duration, 11)
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/ewma.go b/vendor/github.com/rcrowley/go-metrics/ewma.go
new file mode 100644
index 0000000..694a1d0
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/ewma.go
@@ -0,0 +1,118 @@
+package metrics
+
+import (
+ "math"
+ "sync"
+ "sync/atomic"
+)
+
+// EWMAs continuously calculate an exponentially-weighted moving average
+// based on an outside source of clock ticks.
+type EWMA interface {
+ Rate() float64
+ Snapshot() EWMA
+ Tick()
+ Update(int64)
+}
+
+// NewEWMA constructs a new EWMA with the given alpha.
+func NewEWMA(alpha float64) EWMA {
+ if UseNilMetrics {
+ return NilEWMA{}
+ }
+ return &StandardEWMA{alpha: alpha}
+}
+
+// NewEWMA1 constructs a new EWMA for a one-minute moving average.
+func NewEWMA1() EWMA {
+ return NewEWMA(1 - math.Exp(-5.0/60.0/1))
+}
+
+// NewEWMA5 constructs a new EWMA for a five-minute moving average.
+func NewEWMA5() EWMA {
+ return NewEWMA(1 - math.Exp(-5.0/60.0/5))
+}
+
+// NewEWMA15 constructs a new EWMA for a fifteen-minute moving average.
+func NewEWMA15() EWMA {
+ return NewEWMA(1 - math.Exp(-5.0/60.0/15))
+}
+
+// EWMASnapshot is a read-only copy of another EWMA.
+type EWMASnapshot float64
+
+// Rate returns the rate of events per second at the time the snapshot was
+// taken.
+func (a EWMASnapshot) Rate() float64 { return float64(a) }
+
+// Snapshot returns the snapshot.
+func (a EWMASnapshot) Snapshot() EWMA { return a }
+
+// Tick panics.
+func (EWMASnapshot) Tick() {
+ panic("Tick called on an EWMASnapshot")
+}
+
+// Update panics.
+func (EWMASnapshot) Update(int64) {
+ panic("Update called on an EWMASnapshot")
+}
+
+// NilEWMA is a no-op EWMA.
+type NilEWMA struct{}
+
+// Rate is a no-op.
+func (NilEWMA) Rate() float64 { return 0.0 }
+
+// Snapshot is a no-op.
+func (NilEWMA) Snapshot() EWMA { return NilEWMA{} }
+
+// Tick is a no-op.
+func (NilEWMA) Tick() {}
+
+// Update is a no-op.
+func (NilEWMA) Update(n int64) {}
+
+// StandardEWMA is the standard implementation of an EWMA and tracks the number
+// of uncounted events and processes them on each tick. It uses the
+// sync/atomic package to manage uncounted events.
+type StandardEWMA struct {
+ uncounted int64 // /!\ this should be the first member to ensure 64-bit alignment
+ alpha float64
+ rate float64
+ init bool
+ mutex sync.Mutex
+}
+
+// Rate returns the moving average rate of events per second.
+func (a *StandardEWMA) Rate() float64 {
+ a.mutex.Lock()
+ defer a.mutex.Unlock()
+ return a.rate * float64(1e9)
+}
+
+// Snapshot returns a read-only copy of the EWMA.
+func (a *StandardEWMA) Snapshot() EWMA {
+ return EWMASnapshot(a.Rate())
+}
+
+// Tick ticks the clock to update the moving average. It assumes it is called
+// every five seconds.
+func (a *StandardEWMA) Tick() {
+ count := atomic.LoadInt64(&a.uncounted)
+ atomic.AddInt64(&a.uncounted, -count)
+ instantRate := float64(count) / float64(5e9)
+ a.mutex.Lock()
+ defer a.mutex.Unlock()
+ if a.init {
+ a.rate += a.alpha * (instantRate - a.rate)
+ } else {
+ a.init = true
+ a.rate = instantRate
+ }
+}
+
+// Update adds n uncounted events.
+func (a *StandardEWMA) Update(n int64) {
+ atomic.AddInt64(&a.uncounted, n)
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/gauge.go b/vendor/github.com/rcrowley/go-metrics/gauge.go
new file mode 100644
index 0000000..cb57a93
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/gauge.go
@@ -0,0 +1,120 @@
+package metrics
+
+import "sync/atomic"
+
+// Gauges hold an int64 value that can be set arbitrarily.
+type Gauge interface {
+ Snapshot() Gauge
+ Update(int64)
+ Value() int64
+}
+
+// GetOrRegisterGauge returns an existing Gauge or constructs and registers a
+// new StandardGauge.
+func GetOrRegisterGauge(name string, r Registry) Gauge {
+ if nil == r {
+ r = DefaultRegistry
+ }
+ return r.GetOrRegister(name, NewGauge).(Gauge)
+}
+
+// NewGauge constructs a new StandardGauge.
+func NewGauge() Gauge {
+ if UseNilMetrics {
+ return NilGauge{}
+ }
+ return &StandardGauge{0}
+}
+
+// NewRegisteredGauge constructs and registers a new StandardGauge.
+func NewRegisteredGauge(name string, r Registry) Gauge {
+ c := NewGauge()
+ if nil == r {
+ r = DefaultRegistry
+ }
+ r.Register(name, c)
+ return c
+}
+
+// NewFunctionalGauge constructs a new FunctionalGauge.
+func NewFunctionalGauge(f func() int64) Gauge {
+ if UseNilMetrics {
+ return NilGauge{}
+ }
+ return &FunctionalGauge{value: f}
+}
+
+// NewRegisteredFunctionalGauge constructs and registers a new StandardGauge.
+func NewRegisteredFunctionalGauge(name string, r Registry, f func() int64) Gauge {
+ c := NewFunctionalGauge(f)
+ if nil == r {
+ r = DefaultRegistry
+ }
+ r.Register(name, c)
+ return c
+}
+
+// GaugeSnapshot is a read-only copy of another Gauge.
+type GaugeSnapshot int64
+
+// Snapshot returns the snapshot.
+func (g GaugeSnapshot) Snapshot() Gauge { return g }
+
+// Update panics.
+func (GaugeSnapshot) Update(int64) {
+ panic("Update called on a GaugeSnapshot")
+}
+
+// Value returns the value at the time the snapshot was taken.
+func (g GaugeSnapshot) Value() int64 { return int64(g) }
+
+// NilGauge is a no-op Gauge.
+type NilGauge struct{}
+
+// Snapshot is a no-op.
+func (NilGauge) Snapshot() Gauge { return NilGauge{} }
+
+// Update is a no-op.
+func (NilGauge) Update(v int64) {}
+
+// Value is a no-op.
+func (NilGauge) Value() int64 { return 0 }
+
+// StandardGauge is the standard implementation of a Gauge and uses the
+// sync/atomic package to manage a single int64 value.
+type StandardGauge struct {
+ value int64
+}
+
+// Snapshot returns a read-only copy of the gauge.
+func (g *StandardGauge) Snapshot() Gauge {
+ return GaugeSnapshot(g.Value())
+}
+
+// Update updates the gauge's value.
+func (g *StandardGauge) Update(v int64) {
+ atomic.StoreInt64(&g.value, v)
+}
+
+// Value returns the gauge's current value.
+func (g *StandardGauge) Value() int64 {
+ return atomic.LoadInt64(&g.value)
+}
+
+// FunctionalGauge returns value from given function
+type FunctionalGauge struct {
+ value func() int64
+}
+
+// Value returns the gauge's current value.
+func (g FunctionalGauge) Value() int64 {
+ return g.value()
+}
+
+// Snapshot returns the snapshot.
+func (g FunctionalGauge) Snapshot() Gauge { return GaugeSnapshot(g.Value()) }
+
+// Update panics.
+func (FunctionalGauge) Update(int64) {
+ panic("Update called on a FunctionalGauge")
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/gauge_float64.go b/vendor/github.com/rcrowley/go-metrics/gauge_float64.go
new file mode 100644
index 0000000..6f93920
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/gauge_float64.go
@@ -0,0 +1,127 @@
+package metrics
+
+import "sync"
+
+// GaugeFloat64s hold a float64 value that can be set arbitrarily.
+type GaugeFloat64 interface {
+ Snapshot() GaugeFloat64
+ Update(float64)
+ Value() float64
+}
+
+// GetOrRegisterGaugeFloat64 returns an existing GaugeFloat64 or constructs and registers a
+// new StandardGaugeFloat64.
+func GetOrRegisterGaugeFloat64(name string, r Registry) GaugeFloat64 {
+ if nil == r {
+ r = DefaultRegistry
+ }
+ return r.GetOrRegister(name, NewGaugeFloat64()).(GaugeFloat64)
+}
+
+// NewGaugeFloat64 constructs a new StandardGaugeFloat64.
+func NewGaugeFloat64() GaugeFloat64 {
+ if UseNilMetrics {
+ return NilGaugeFloat64{}
+ }
+ return &StandardGaugeFloat64{
+ value: 0.0,
+ }
+}
+
+// NewRegisteredGaugeFloat64 constructs and registers a new StandardGaugeFloat64.
+func NewRegisteredGaugeFloat64(name string, r Registry) GaugeFloat64 {
+ c := NewGaugeFloat64()
+ if nil == r {
+ r = DefaultRegistry
+ }
+ r.Register(name, c)
+ return c
+}
+
+// NewFunctionalGauge constructs a new FunctionalGauge.
+func NewFunctionalGaugeFloat64(f func() float64) GaugeFloat64 {
+ if UseNilMetrics {
+ return NilGaugeFloat64{}
+ }
+ return &FunctionalGaugeFloat64{value: f}
+}
+
+// NewRegisteredFunctionalGauge constructs and registers a new StandardGauge.
+func NewRegisteredFunctionalGaugeFloat64(name string, r Registry, f func() float64) GaugeFloat64 {
+ c := NewFunctionalGaugeFloat64(f)
+ if nil == r {
+ r = DefaultRegistry
+ }
+ r.Register(name, c)
+ return c
+}
+
+// GaugeFloat64Snapshot is a read-only copy of another GaugeFloat64.
+type GaugeFloat64Snapshot float64
+
+// Snapshot returns the snapshot.
+func (g GaugeFloat64Snapshot) Snapshot() GaugeFloat64 { return g }
+
+// Update panics.
+func (GaugeFloat64Snapshot) Update(float64) {
+ panic("Update called on a GaugeFloat64Snapshot")
+}
+
+// Value returns the value at the time the snapshot was taken.
+func (g GaugeFloat64Snapshot) Value() float64 { return float64(g) }
+
+// NilGauge is a no-op Gauge.
+type NilGaugeFloat64 struct{}
+
+// Snapshot is a no-op.
+func (NilGaugeFloat64) Snapshot() GaugeFloat64 { return NilGaugeFloat64{} }
+
+// Update is a no-op.
+func (NilGaugeFloat64) Update(v float64) {}
+
+// Value is a no-op.
+func (NilGaugeFloat64) Value() float64 { return 0.0 }
+
+// StandardGaugeFloat64 is the standard implementation of a GaugeFloat64 and uses
+// sync.Mutex to manage a single float64 value.
+type StandardGaugeFloat64 struct {
+ mutex sync.Mutex
+ value float64
+}
+
+// Snapshot returns a read-only copy of the gauge.
+func (g *StandardGaugeFloat64) Snapshot() GaugeFloat64 {
+ return GaugeFloat64Snapshot(g.Value())
+}
+
+// Update updates the gauge's value.
+func (g *StandardGaugeFloat64) Update(v float64) {
+ g.mutex.Lock()
+ defer g.mutex.Unlock()
+ g.value = v
+}
+
+// Value returns the gauge's current value.
+func (g *StandardGaugeFloat64) Value() float64 {
+ g.mutex.Lock()
+ defer g.mutex.Unlock()
+ return g.value
+}
+
+// FunctionalGaugeFloat64 returns value from given function
+type FunctionalGaugeFloat64 struct {
+ value func() float64
+}
+
+// Value returns the gauge's current value.
+func (g FunctionalGaugeFloat64) Value() float64 {
+ return g.value()
+}
+
+// Snapshot returns the snapshot.
+func (g FunctionalGaugeFloat64) Snapshot() GaugeFloat64 { return GaugeFloat64Snapshot(g.Value()) }
+
+// Update panics.
+func (FunctionalGaugeFloat64) Update(float64) {
+ panic("Update called on a FunctionalGaugeFloat64")
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/graphite.go b/vendor/github.com/rcrowley/go-metrics/graphite.go
new file mode 100644
index 0000000..abd0a7d
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/graphite.go
@@ -0,0 +1,113 @@
+package metrics
+
+import (
+ "bufio"
+ "fmt"
+ "log"
+ "net"
+ "strconv"
+ "strings"
+ "time"
+)
+
+// GraphiteConfig provides a container with configuration parameters for
+// the Graphite exporter
+type GraphiteConfig struct {
+ Addr *net.TCPAddr // Network address to connect to
+ Registry Registry // Registry to be exported
+ FlushInterval time.Duration // Flush interval
+ DurationUnit time.Duration // Time conversion unit for durations
+ Prefix string // Prefix to be prepended to metric names
+ Percentiles []float64 // Percentiles to export from timers and histograms
+}
+
+// Graphite is a blocking exporter function which reports metrics in r
+// to a graphite server located at addr, flushing them every d duration
+// and prepending metric names with prefix.
+func Graphite(r Registry, d time.Duration, prefix string, addr *net.TCPAddr) {
+ GraphiteWithConfig(GraphiteConfig{
+ Addr: addr,
+ Registry: r,
+ FlushInterval: d,
+ DurationUnit: time.Nanosecond,
+ Prefix: prefix,
+ Percentiles: []float64{0.5, 0.75, 0.95, 0.99, 0.999},
+ })
+}
+
+// GraphiteWithConfig is a blocking exporter function just like Graphite,
+// but it takes a GraphiteConfig instead.
+func GraphiteWithConfig(c GraphiteConfig) {
+ log.Printf("WARNING: This go-metrics client has been DEPRECATED! It has been moved to https://github.com/cyberdelia/go-metrics-graphite and will be removed from rcrowley/go-metrics on August 12th 2015")
+ for _ = range time.Tick(c.FlushInterval) {
+ if err := graphite(&c); nil != err {
+ log.Println(err)
+ }
+ }
+}
+
+// GraphiteOnce performs a single submission to Graphite, returning a
+// non-nil error on failed connections. This can be used in a loop
+// similar to GraphiteWithConfig for custom error handling.
+func GraphiteOnce(c GraphiteConfig) error {
+ log.Printf("WARNING: This go-metrics client has been DEPRECATED! It has been moved to https://github.com/cyberdelia/go-metrics-graphite and will be removed from rcrowley/go-metrics on August 12th 2015")
+ return graphite(&c)
+}
+
+func graphite(c *GraphiteConfig) error {
+ now := time.Now().Unix()
+ du := float64(c.DurationUnit)
+ conn, err := net.DialTCP("tcp", nil, c.Addr)
+ if nil != err {
+ return err
+ }
+ defer conn.Close()
+ w := bufio.NewWriter(conn)
+ c.Registry.Each(func(name string, i interface{}) {
+ switch metric := i.(type) {
+ case Counter:
+ fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, metric.Count(), now)
+ case Gauge:
+ fmt.Fprintf(w, "%s.%s.value %d %d\n", c.Prefix, name, metric.Value(), now)
+ case GaugeFloat64:
+ fmt.Fprintf(w, "%s.%s.value %f %d\n", c.Prefix, name, metric.Value(), now)
+ case Histogram:
+ h := metric.Snapshot()
+ ps := h.Percentiles(c.Percentiles)
+ fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, h.Count(), now)
+ fmt.Fprintf(w, "%s.%s.min %d %d\n", c.Prefix, name, h.Min(), now)
+ fmt.Fprintf(w, "%s.%s.max %d %d\n", c.Prefix, name, h.Max(), now)
+ fmt.Fprintf(w, "%s.%s.mean %.2f %d\n", c.Prefix, name, h.Mean(), now)
+ fmt.Fprintf(w, "%s.%s.std-dev %.2f %d\n", c.Prefix, name, h.StdDev(), now)
+ for psIdx, psKey := range c.Percentiles {
+ key := strings.Replace(strconv.FormatFloat(psKey*100.0, 'f', -1, 64), ".", "", 1)
+ fmt.Fprintf(w, "%s.%s.%s-percentile %.2f %d\n", c.Prefix, name, key, ps[psIdx], now)
+ }
+ case Meter:
+ m := metric.Snapshot()
+ fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, m.Count(), now)
+ fmt.Fprintf(w, "%s.%s.one-minute %.2f %d\n", c.Prefix, name, m.Rate1(), now)
+ fmt.Fprintf(w, "%s.%s.five-minute %.2f %d\n", c.Prefix, name, m.Rate5(), now)
+ fmt.Fprintf(w, "%s.%s.fifteen-minute %.2f %d\n", c.Prefix, name, m.Rate15(), now)
+ fmt.Fprintf(w, "%s.%s.mean %.2f %d\n", c.Prefix, name, m.RateMean(), now)
+ case Timer:
+ t := metric.Snapshot()
+ ps := t.Percentiles(c.Percentiles)
+ fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, t.Count(), now)
+ fmt.Fprintf(w, "%s.%s.min %d %d\n", c.Prefix, name, t.Min()/int64(du), now)
+ fmt.Fprintf(w, "%s.%s.max %d %d\n", c.Prefix, name, t.Max()/int64(du), now)
+ fmt.Fprintf(w, "%s.%s.mean %.2f %d\n", c.Prefix, name, t.Mean()/du, now)
+ fmt.Fprintf(w, "%s.%s.std-dev %.2f %d\n", c.Prefix, name, t.StdDev()/du, now)
+ for psIdx, psKey := range c.Percentiles {
+ key := strings.Replace(strconv.FormatFloat(psKey*100.0, 'f', -1, 64), ".", "", 1)
+ fmt.Fprintf(w, "%s.%s.%s-percentile %.2f %d\n", c.Prefix, name, key, ps[psIdx], now)
+ }
+ fmt.Fprintf(w, "%s.%s.one-minute %.2f %d\n", c.Prefix, name, t.Rate1(), now)
+ fmt.Fprintf(w, "%s.%s.five-minute %.2f %d\n", c.Prefix, name, t.Rate5(), now)
+ fmt.Fprintf(w, "%s.%s.fifteen-minute %.2f %d\n", c.Prefix, name, t.Rate15(), now)
+ fmt.Fprintf(w, "%s.%s.mean-rate %.2f %d\n", c.Prefix, name, t.RateMean(), now)
+ }
+ w.Flush()
+ })
+ return nil
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/healthcheck.go b/vendor/github.com/rcrowley/go-metrics/healthcheck.go
new file mode 100644
index 0000000..445131c
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/healthcheck.go
@@ -0,0 +1,61 @@
+package metrics
+
+// Healthchecks hold an error value describing an arbitrary up/down status.
+type Healthcheck interface {
+ Check()
+ Error() error
+ Healthy()
+ Unhealthy(error)
+}
+
+// NewHealthcheck constructs a new Healthcheck which will use the given
+// function to update its status.
+func NewHealthcheck(f func(Healthcheck)) Healthcheck {
+ if UseNilMetrics {
+ return NilHealthcheck{}
+ }
+ return &StandardHealthcheck{nil, f}
+}
+
+// NilHealthcheck is a no-op.
+type NilHealthcheck struct{}
+
+// Check is a no-op.
+func (NilHealthcheck) Check() {}
+
+// Error is a no-op.
+func (NilHealthcheck) Error() error { return nil }
+
+// Healthy is a no-op.
+func (NilHealthcheck) Healthy() {}
+
+// Unhealthy is a no-op.
+func (NilHealthcheck) Unhealthy(error) {}
+
+// StandardHealthcheck is the standard implementation of a Healthcheck and
+// stores the status and a function to call to update the status.
+type StandardHealthcheck struct {
+ err error
+ f func(Healthcheck)
+}
+
+// Check runs the healthcheck function to update the healthcheck's status.
+func (h *StandardHealthcheck) Check() {
+ h.f(h)
+}
+
+// Error returns the healthcheck's status, which will be nil if it is healthy.
+func (h *StandardHealthcheck) Error() error {
+ return h.err
+}
+
+// Healthy marks the healthcheck as healthy.
+func (h *StandardHealthcheck) Healthy() {
+ h.err = nil
+}
+
+// Unhealthy marks the healthcheck as unhealthy. The error is stored and
+// may be retrieved by the Error method.
+func (h *StandardHealthcheck) Unhealthy(err error) {
+ h.err = err
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/histogram.go b/vendor/github.com/rcrowley/go-metrics/histogram.go
new file mode 100644
index 0000000..dbc837f
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/histogram.go
@@ -0,0 +1,202 @@
+package metrics
+
+// Histograms calculate distribution statistics from a series of int64 values.
+type Histogram interface {
+ Clear()
+ Count() int64
+ Max() int64
+ Mean() float64
+ Min() int64
+ Percentile(float64) float64
+ Percentiles([]float64) []float64
+ Sample() Sample
+ Snapshot() Histogram
+ StdDev() float64
+ Sum() int64
+ Update(int64)
+ Variance() float64
+}
+
+// GetOrRegisterHistogram returns an existing Histogram or constructs and
+// registers a new StandardHistogram.
+func GetOrRegisterHistogram(name string, r Registry, s Sample) Histogram {
+ if nil == r {
+ r = DefaultRegistry
+ }
+ return r.GetOrRegister(name, func() Histogram { return NewHistogram(s) }).(Histogram)
+}
+
+// NewHistogram constructs a new StandardHistogram from a Sample.
+func NewHistogram(s Sample) Histogram {
+ if UseNilMetrics {
+ return NilHistogram{}
+ }
+ return &StandardHistogram{sample: s}
+}
+
+// NewRegisteredHistogram constructs and registers a new StandardHistogram from
+// a Sample.
+func NewRegisteredHistogram(name string, r Registry, s Sample) Histogram {
+ c := NewHistogram(s)
+ if nil == r {
+ r = DefaultRegistry
+ }
+ r.Register(name, c)
+ return c
+}
+
+// HistogramSnapshot is a read-only copy of another Histogram.
+type HistogramSnapshot struct {
+ sample *SampleSnapshot
+}
+
+// Clear panics.
+func (*HistogramSnapshot) Clear() {
+ panic("Clear called on a HistogramSnapshot")
+}
+
+// Count returns the number of samples recorded at the time the snapshot was
+// taken.
+func (h *HistogramSnapshot) Count() int64 { return h.sample.Count() }
+
+// Max returns the maximum value in the sample at the time the snapshot was
+// taken.
+func (h *HistogramSnapshot) Max() int64 { return h.sample.Max() }
+
+// Mean returns the mean of the values in the sample at the time the snapshot
+// was taken.
+func (h *HistogramSnapshot) Mean() float64 { return h.sample.Mean() }
+
+// Min returns the minimum value in the sample at the time the snapshot was
+// taken.
+func (h *HistogramSnapshot) Min() int64 { return h.sample.Min() }
+
+// Percentile returns an arbitrary percentile of values in the sample at the
+// time the snapshot was taken.
+func (h *HistogramSnapshot) Percentile(p float64) float64 {
+ return h.sample.Percentile(p)
+}
+
+// Percentiles returns a slice of arbitrary percentiles of values in the sample
+// at the time the snapshot was taken.
+func (h *HistogramSnapshot) Percentiles(ps []float64) []float64 {
+ return h.sample.Percentiles(ps)
+}
+
+// Sample returns the Sample underlying the histogram.
+func (h *HistogramSnapshot) Sample() Sample { return h.sample }
+
+// Snapshot returns the snapshot.
+func (h *HistogramSnapshot) Snapshot() Histogram { return h }
+
+// StdDev returns the standard deviation of the values in the sample at the
+// time the snapshot was taken.
+func (h *HistogramSnapshot) StdDev() float64 { return h.sample.StdDev() }
+
+// Sum returns the sum in the sample at the time the snapshot was taken.
+func (h *HistogramSnapshot) Sum() int64 { return h.sample.Sum() }
+
+// Update panics.
+func (*HistogramSnapshot) Update(int64) {
+ panic("Update called on a HistogramSnapshot")
+}
+
+// Variance returns the variance of inputs at the time the snapshot was taken.
+func (h *HistogramSnapshot) Variance() float64 { return h.sample.Variance() }
+
+// NilHistogram is a no-op Histogram.
+type NilHistogram struct{}
+
+// Clear is a no-op.
+func (NilHistogram) Clear() {}
+
+// Count is a no-op.
+func (NilHistogram) Count() int64 { return 0 }
+
+// Max is a no-op.
+func (NilHistogram) Max() int64 { return 0 }
+
+// Mean is a no-op.
+func (NilHistogram) Mean() float64 { return 0.0 }
+
+// Min is a no-op.
+func (NilHistogram) Min() int64 { return 0 }
+
+// Percentile is a no-op.
+func (NilHistogram) Percentile(p float64) float64 { return 0.0 }
+
+// Percentiles is a no-op.
+func (NilHistogram) Percentiles(ps []float64) []float64 {
+ return make([]float64, len(ps))
+}
+
+// Sample is a no-op.
+func (NilHistogram) Sample() Sample { return NilSample{} }
+
+// Snapshot is a no-op.
+func (NilHistogram) Snapshot() Histogram { return NilHistogram{} }
+
+// StdDev is a no-op.
+func (NilHistogram) StdDev() float64 { return 0.0 }
+
+// Sum is a no-op.
+func (NilHistogram) Sum() int64 { return 0 }
+
+// Update is a no-op.
+func (NilHistogram) Update(v int64) {}
+
+// Variance is a no-op.
+func (NilHistogram) Variance() float64 { return 0.0 }
+
+// StandardHistogram is the standard implementation of a Histogram and uses a
+// Sample to bound its memory use.
+type StandardHistogram struct {
+ sample Sample
+}
+
+// Clear clears the histogram and its sample.
+func (h *StandardHistogram) Clear() { h.sample.Clear() }
+
+// Count returns the number of samples recorded since the histogram was last
+// cleared.
+func (h *StandardHistogram) Count() int64 { return h.sample.Count() }
+
+// Max returns the maximum value in the sample.
+func (h *StandardHistogram) Max() int64 { return h.sample.Max() }
+
+// Mean returns the mean of the values in the sample.
+func (h *StandardHistogram) Mean() float64 { return h.sample.Mean() }
+
+// Min returns the minimum value in the sample.
+func (h *StandardHistogram) Min() int64 { return h.sample.Min() }
+
+// Percentile returns an arbitrary percentile of the values in the sample.
+func (h *StandardHistogram) Percentile(p float64) float64 {
+ return h.sample.Percentile(p)
+}
+
+// Percentiles returns a slice of arbitrary percentiles of the values in the
+// sample.
+func (h *StandardHistogram) Percentiles(ps []float64) []float64 {
+ return h.sample.Percentiles(ps)
+}
+
+// Sample returns the Sample underlying the histogram.
+func (h *StandardHistogram) Sample() Sample { return h.sample }
+
+// Snapshot returns a read-only copy of the histogram.
+func (h *StandardHistogram) Snapshot() Histogram {
+ return &HistogramSnapshot{sample: h.sample.Snapshot().(*SampleSnapshot)}
+}
+
+// StdDev returns the standard deviation of the values in the sample.
+func (h *StandardHistogram) StdDev() float64 { return h.sample.StdDev() }
+
+// Sum returns the sum in the sample.
+func (h *StandardHistogram) Sum() int64 { return h.sample.Sum() }
+
+// Update samples a new value.
+func (h *StandardHistogram) Update(v int64) { h.sample.Update(v) }
+
+// Variance returns the variance of the values in the sample.
+func (h *StandardHistogram) Variance() float64 { return h.sample.Variance() }
diff --git a/vendor/github.com/rcrowley/go-metrics/json.go b/vendor/github.com/rcrowley/go-metrics/json.go
new file mode 100644
index 0000000..174b947
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/json.go
@@ -0,0 +1,31 @@
+package metrics
+
+import (
+ "encoding/json"
+ "io"
+ "time"
+)
+
+// MarshalJSON returns a byte slice containing a JSON representation of all
+// the metrics in the Registry.
+func (r *StandardRegistry) MarshalJSON() ([]byte, error) {
+ return json.Marshal(r.GetAll())
+}
+
+// WriteJSON writes metrics from the given registry periodically to the
+// specified io.Writer as JSON.
+func WriteJSON(r Registry, d time.Duration, w io.Writer) {
+ for _ = range time.Tick(d) {
+ WriteJSONOnce(r, w)
+ }
+}
+
+// WriteJSONOnce writes metrics from the given registry to the specified
+// io.Writer as JSON.
+func WriteJSONOnce(r Registry, w io.Writer) {
+ json.NewEncoder(w).Encode(r)
+}
+
+func (p *PrefixedRegistry) MarshalJSON() ([]byte, error) {
+ return json.Marshal(p.GetAll())
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/log.go b/vendor/github.com/rcrowley/go-metrics/log.go
new file mode 100644
index 0000000..f8074c0
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/log.go
@@ -0,0 +1,80 @@
+package metrics
+
+import (
+ "time"
+)
+
+type Logger interface {
+ Printf(format string, v ...interface{})
+}
+
+func Log(r Registry, freq time.Duration, l Logger) {
+ LogScaled(r, freq, time.Nanosecond, l)
+}
+
+// Output each metric in the given registry periodically using the given
+// logger. Print timings in `scale` units (eg time.Millisecond) rather than nanos.
+func LogScaled(r Registry, freq time.Duration, scale time.Duration, l Logger) {
+ du := float64(scale)
+ duSuffix := scale.String()[1:]
+
+ for _ = range time.Tick(freq) {
+ r.Each(func(name string, i interface{}) {
+ switch metric := i.(type) {
+ case Counter:
+ l.Printf("counter %s\n", name)
+ l.Printf(" count: %9d\n", metric.Count())
+ case Gauge:
+ l.Printf("gauge %s\n", name)
+ l.Printf(" value: %9d\n", metric.Value())
+ case GaugeFloat64:
+ l.Printf("gauge %s\n", name)
+ l.Printf(" value: %f\n", metric.Value())
+ case Healthcheck:
+ metric.Check()
+ l.Printf("healthcheck %s\n", name)
+ l.Printf(" error: %v\n", metric.Error())
+ case Histogram:
+ h := metric.Snapshot()
+ ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
+ l.Printf("histogram %s\n", name)
+ l.Printf(" count: %9d\n", h.Count())
+ l.Printf(" min: %9d\n", h.Min())
+ l.Printf(" max: %9d\n", h.Max())
+ l.Printf(" mean: %12.2f\n", h.Mean())
+ l.Printf(" stddev: %12.2f\n", h.StdDev())
+ l.Printf(" median: %12.2f\n", ps[0])
+ l.Printf(" 75%%: %12.2f\n", ps[1])
+ l.Printf(" 95%%: %12.2f\n", ps[2])
+ l.Printf(" 99%%: %12.2f\n", ps[3])
+ l.Printf(" 99.9%%: %12.2f\n", ps[4])
+ case Meter:
+ m := metric.Snapshot()
+ l.Printf("meter %s\n", name)
+ l.Printf(" count: %9d\n", m.Count())
+ l.Printf(" 1-min rate: %12.2f\n", m.Rate1())
+ l.Printf(" 5-min rate: %12.2f\n", m.Rate5())
+ l.Printf(" 15-min rate: %12.2f\n", m.Rate15())
+ l.Printf(" mean rate: %12.2f\n", m.RateMean())
+ case Timer:
+ t := metric.Snapshot()
+ ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
+ l.Printf("timer %s\n", name)
+ l.Printf(" count: %9d\n", t.Count())
+ l.Printf(" min: %12.2f%s\n", float64(t.Min())/du, duSuffix)
+ l.Printf(" max: %12.2f%s\n", float64(t.Max())/du, duSuffix)
+ l.Printf(" mean: %12.2f%s\n", t.Mean()/du, duSuffix)
+ l.Printf(" stddev: %12.2f%s\n", t.StdDev()/du, duSuffix)
+ l.Printf(" median: %12.2f%s\n", ps[0]/du, duSuffix)
+ l.Printf(" 75%%: %12.2f%s\n", ps[1]/du, duSuffix)
+ l.Printf(" 95%%: %12.2f%s\n", ps[2]/du, duSuffix)
+ l.Printf(" 99%%: %12.2f%s\n", ps[3]/du, duSuffix)
+ l.Printf(" 99.9%%: %12.2f%s\n", ps[4]/du, duSuffix)
+ l.Printf(" 1-min rate: %12.2f\n", t.Rate1())
+ l.Printf(" 5-min rate: %12.2f\n", t.Rate5())
+ l.Printf(" 15-min rate: %12.2f\n", t.Rate15())
+ l.Printf(" mean rate: %12.2f\n", t.RateMean())
+ }
+ })
+ }
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/memory.md b/vendor/github.com/rcrowley/go-metrics/memory.md
new file mode 100644
index 0000000..47454f5
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/memory.md
@@ -0,0 +1,285 @@
+Memory usage
+============
+
+(Highly unscientific.)
+
+Command used to gather static memory usage:
+
+```sh
+grep ^Vm "/proc/$(ps fax | grep [m]etrics-bench | awk '{print $1}')/status"
+```
+
+Program used to gather baseline memory usage:
+
+```go
+package main
+
+import "time"
+
+func main() {
+ time.Sleep(600e9)
+}
+```
+
+Baseline
+--------
+
+```
+VmPeak: 42604 kB
+VmSize: 42604 kB
+VmLck: 0 kB
+VmHWM: 1120 kB
+VmRSS: 1120 kB
+VmData: 35460 kB
+VmStk: 136 kB
+VmExe: 1020 kB
+VmLib: 1848 kB
+VmPTE: 36 kB
+VmSwap: 0 kB
+```
+
+Program used to gather metric memory usage (with other metrics being similar):
+
+```go
+package main
+
+import (
+ "fmt"
+ "metrics"
+ "time"
+)
+
+func main() {
+ fmt.Sprintf("foo")
+ metrics.NewRegistry()
+ time.Sleep(600e9)
+}
+```
+
+1000 counters registered
+------------------------
+
+```
+VmPeak: 44016 kB
+VmSize: 44016 kB
+VmLck: 0 kB
+VmHWM: 1928 kB
+VmRSS: 1928 kB
+VmData: 36868 kB
+VmStk: 136 kB
+VmExe: 1024 kB
+VmLib: 1848 kB
+VmPTE: 40 kB
+VmSwap: 0 kB
+```
+
+**1.412 kB virtual, TODO 0.808 kB resident per counter.**
+
+100000 counters registered
+--------------------------
+
+```
+VmPeak: 55024 kB
+VmSize: 55024 kB
+VmLck: 0 kB
+VmHWM: 12440 kB
+VmRSS: 12440 kB
+VmData: 47876 kB
+VmStk: 136 kB
+VmExe: 1024 kB
+VmLib: 1848 kB
+VmPTE: 64 kB
+VmSwap: 0 kB
+```
+
+**0.1242 kB virtual, 0.1132 kB resident per counter.**
+
+1000 gauges registered
+----------------------
+
+```
+VmPeak: 44012 kB
+VmSize: 44012 kB
+VmLck: 0 kB
+VmHWM: 1928 kB
+VmRSS: 1928 kB
+VmData: 36868 kB
+VmStk: 136 kB
+VmExe: 1020 kB
+VmLib: 1848 kB
+VmPTE: 40 kB
+VmSwap: 0 kB
+```
+
+**1.408 kB virtual, 0.808 kB resident per counter.**
+
+100000 gauges registered
+------------------------
+
+```
+VmPeak: 55020 kB
+VmSize: 55020 kB
+VmLck: 0 kB
+VmHWM: 12432 kB
+VmRSS: 12432 kB
+VmData: 47876 kB
+VmStk: 136 kB
+VmExe: 1020 kB
+VmLib: 1848 kB
+VmPTE: 60 kB
+VmSwap: 0 kB
+```
+
+**0.12416 kB virtual, 0.11312 resident per gauge.**
+
+1000 histograms with a uniform sample size of 1028
+--------------------------------------------------
+
+```
+VmPeak: 72272 kB
+VmSize: 72272 kB
+VmLck: 0 kB
+VmHWM: 16204 kB
+VmRSS: 16204 kB
+VmData: 65100 kB
+VmStk: 136 kB
+VmExe: 1048 kB
+VmLib: 1848 kB
+VmPTE: 80 kB
+VmSwap: 0 kB
+```
+
+**29.668 kB virtual, TODO 15.084 resident per histogram.**
+
+10000 histograms with a uniform sample size of 1028
+---------------------------------------------------
+
+```
+VmPeak: 256912 kB
+VmSize: 256912 kB
+VmLck: 0 kB
+VmHWM: 146204 kB
+VmRSS: 146204 kB
+VmData: 249740 kB
+VmStk: 136 kB
+VmExe: 1048 kB
+VmLib: 1848 kB
+VmPTE: 448 kB
+VmSwap: 0 kB
+```
+
+**21.4308 kB virtual, 14.5084 kB resident per histogram.**
+
+50000 histograms with a uniform sample size of 1028
+---------------------------------------------------
+
+```
+VmPeak: 908112 kB
+VmSize: 908112 kB
+VmLck: 0 kB
+VmHWM: 645832 kB
+VmRSS: 645588 kB
+VmData: 900940 kB
+VmStk: 136 kB
+VmExe: 1048 kB
+VmLib: 1848 kB
+VmPTE: 1716 kB
+VmSwap: 1544 kB
+```
+
+**17.31016 kB virtual, 12.88936 kB resident per histogram.**
+
+1000 histograms with an exponentially-decaying sample size of 1028 and alpha of 0.015
+-------------------------------------------------------------------------------------
+
+```
+VmPeak: 62480 kB
+VmSize: 62480 kB
+VmLck: 0 kB
+VmHWM: 11572 kB
+VmRSS: 11572 kB
+VmData: 55308 kB
+VmStk: 136 kB
+VmExe: 1048 kB
+VmLib: 1848 kB
+VmPTE: 64 kB
+VmSwap: 0 kB
+```
+
+**19.876 kB virtual, 10.452 kB resident per histogram.**
+
+10000 histograms with an exponentially-decaying sample size of 1028 and alpha of 0.015
+--------------------------------------------------------------------------------------
+
+```
+VmPeak: 153296 kB
+VmSize: 153296 kB
+VmLck: 0 kB
+VmHWM: 101176 kB
+VmRSS: 101176 kB
+VmData: 146124 kB
+VmStk: 136 kB
+VmExe: 1048 kB
+VmLib: 1848 kB
+VmPTE: 240 kB
+VmSwap: 0 kB
+```
+
+**11.0692 kB virtual, 10.0056 kB resident per histogram.**
+
+50000 histograms with an exponentially-decaying sample size of 1028 and alpha of 0.015
+--------------------------------------------------------------------------------------
+
+```
+VmPeak: 557264 kB
+VmSize: 557264 kB
+VmLck: 0 kB
+VmHWM: 501056 kB
+VmRSS: 501056 kB
+VmData: 550092 kB
+VmStk: 136 kB
+VmExe: 1048 kB
+VmLib: 1848 kB
+VmPTE: 1032 kB
+VmSwap: 0 kB
+```
+
+**10.2932 kB virtual, 9.99872 kB resident per histogram.**
+
+1000 meters
+-----------
+
+```
+VmPeak: 74504 kB
+VmSize: 74504 kB
+VmLck: 0 kB
+VmHWM: 24124 kB
+VmRSS: 24124 kB
+VmData: 67340 kB
+VmStk: 136 kB
+VmExe: 1040 kB
+VmLib: 1848 kB
+VmPTE: 92 kB
+VmSwap: 0 kB
+```
+
+**31.9 kB virtual, 23.004 kB resident per meter.**
+
+10000 meters
+------------
+
+```
+VmPeak: 278920 kB
+VmSize: 278920 kB
+VmLck: 0 kB
+VmHWM: 227300 kB
+VmRSS: 227300 kB
+VmData: 271756 kB
+VmStk: 136 kB
+VmExe: 1040 kB
+VmLib: 1848 kB
+VmPTE: 488 kB
+VmSwap: 0 kB
+```
+
+**23.6316 kB virtual, 22.618 kB resident per meter.**
diff --git a/vendor/github.com/rcrowley/go-metrics/meter.go b/vendor/github.com/rcrowley/go-metrics/meter.go
new file mode 100644
index 0000000..53ff329
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/meter.go
@@ -0,0 +1,264 @@
+package metrics
+
+import (
+ "sync"
+ "time"
+)
+
+// Meters count events to produce exponentially-weighted moving average rates
+// at one-, five-, and fifteen-minutes and a mean rate.
+type Meter interface {
+ Count() int64
+ Mark(int64)
+ Rate1() float64
+ Rate5() float64
+ Rate15() float64
+ RateMean() float64
+ Snapshot() Meter
+ Stop()
+}
+
+// GetOrRegisterMeter returns an existing Meter or constructs and registers a
+// new StandardMeter.
+// Be sure to unregister the meter from the registry once it is of no use to
+// allow for garbage collection.
+func GetOrRegisterMeter(name string, r Registry) Meter {
+ if nil == r {
+ r = DefaultRegistry
+ }
+ return r.GetOrRegister(name, NewMeter).(Meter)
+}
+
+// NewMeter constructs a new StandardMeter and launches a goroutine.
+// Be sure to call Stop() once the meter is of no use to allow for garbage collection.
+func NewMeter() Meter {
+ if UseNilMetrics {
+ return NilMeter{}
+ }
+ m := newStandardMeter()
+ arbiter.Lock()
+ defer arbiter.Unlock()
+ arbiter.meters[m] = struct{}{}
+ if !arbiter.started {
+ arbiter.started = true
+ go arbiter.tick()
+ }
+ return m
+}
+
+// NewMeter constructs and registers a new StandardMeter and launches a
+// goroutine.
+// Be sure to unregister the meter from the registry once it is of no use to
+// allow for garbage collection.
+func NewRegisteredMeter(name string, r Registry) Meter {
+ c := NewMeter()
+ if nil == r {
+ r = DefaultRegistry
+ }
+ r.Register(name, c)
+ return c
+}
+
+// MeterSnapshot is a read-only copy of another Meter.
+type MeterSnapshot struct {
+ count int64
+ rate1, rate5, rate15, rateMean float64
+}
+
+// Count returns the count of events at the time the snapshot was taken.
+func (m *MeterSnapshot) Count() int64 { return m.count }
+
+// Mark panics.
+func (*MeterSnapshot) Mark(n int64) {
+ panic("Mark called on a MeterSnapshot")
+}
+
+// Rate1 returns the one-minute moving average rate of events per second at the
+// time the snapshot was taken.
+func (m *MeterSnapshot) Rate1() float64 { return m.rate1 }
+
+// Rate5 returns the five-minute moving average rate of events per second at
+// the time the snapshot was taken.
+func (m *MeterSnapshot) Rate5() float64 { return m.rate5 }
+
+// Rate15 returns the fifteen-minute moving average rate of events per second
+// at the time the snapshot was taken.
+func (m *MeterSnapshot) Rate15() float64 { return m.rate15 }
+
+// RateMean returns the meter's mean rate of events per second at the time the
+// snapshot was taken.
+func (m *MeterSnapshot) RateMean() float64 { return m.rateMean }
+
+// Snapshot returns the snapshot.
+func (m *MeterSnapshot) Snapshot() Meter { return m }
+
+// Stop is a no-op.
+func (m *MeterSnapshot) Stop() {}
+
+// NilMeter is a no-op Meter.
+type NilMeter struct{}
+
+// Count is a no-op.
+func (NilMeter) Count() int64 { return 0 }
+
+// Mark is a no-op.
+func (NilMeter) Mark(n int64) {}
+
+// Rate1 is a no-op.
+func (NilMeter) Rate1() float64 { return 0.0 }
+
+// Rate5 is a no-op.
+func (NilMeter) Rate5() float64 { return 0.0 }
+
+// Rate15is a no-op.
+func (NilMeter) Rate15() float64 { return 0.0 }
+
+// RateMean is a no-op.
+func (NilMeter) RateMean() float64 { return 0.0 }
+
+// Snapshot is a no-op.
+func (NilMeter) Snapshot() Meter { return NilMeter{} }
+
+// Stop is a no-op.
+func (NilMeter) Stop() {}
+
+// StandardMeter is the standard implementation of a Meter.
+type StandardMeter struct {
+ lock sync.RWMutex
+ snapshot *MeterSnapshot
+ a1, a5, a15 EWMA
+ startTime time.Time
+ stopped bool
+}
+
+func newStandardMeter() *StandardMeter {
+ return &StandardMeter{
+ snapshot: &MeterSnapshot{},
+ a1: NewEWMA1(),
+ a5: NewEWMA5(),
+ a15: NewEWMA15(),
+ startTime: time.Now(),
+ }
+}
+
+// Stop stops the meter, Mark() will be a no-op if you use it after being stopped.
+func (m *StandardMeter) Stop() {
+ m.lock.Lock()
+ stopped := m.stopped
+ m.stopped = true
+ m.lock.Unlock()
+ if !stopped {
+ arbiter.Lock()
+ delete(arbiter.meters, m)
+ arbiter.Unlock()
+ }
+}
+
+// Count returns the number of events recorded.
+func (m *StandardMeter) Count() int64 {
+ m.lock.RLock()
+ count := m.snapshot.count
+ m.lock.RUnlock()
+ return count
+}
+
+// Mark records the occurance of n events.
+func (m *StandardMeter) Mark(n int64) {
+ m.lock.Lock()
+ defer m.lock.Unlock()
+ if m.stopped {
+ return
+ }
+ m.snapshot.count += n
+ m.a1.Update(n)
+ m.a5.Update(n)
+ m.a15.Update(n)
+ m.updateSnapshot()
+}
+
+// Rate1 returns the one-minute moving average rate of events per second.
+func (m *StandardMeter) Rate1() float64 {
+ m.lock.RLock()
+ rate1 := m.snapshot.rate1
+ m.lock.RUnlock()
+ return rate1
+}
+
+// Rate5 returns the five-minute moving average rate of events per second.
+func (m *StandardMeter) Rate5() float64 {
+ m.lock.RLock()
+ rate5 := m.snapshot.rate5
+ m.lock.RUnlock()
+ return rate5
+}
+
+// Rate15 returns the fifteen-minute moving average rate of events per second.
+func (m *StandardMeter) Rate15() float64 {
+ m.lock.RLock()
+ rate15 := m.snapshot.rate15
+ m.lock.RUnlock()
+ return rate15
+}
+
+// RateMean returns the meter's mean rate of events per second.
+func (m *StandardMeter) RateMean() float64 {
+ m.lock.RLock()
+ rateMean := m.snapshot.rateMean
+ m.lock.RUnlock()
+ return rateMean
+}
+
+// Snapshot returns a read-only copy of the meter.
+func (m *StandardMeter) Snapshot() Meter {
+ m.lock.RLock()
+ snapshot := *m.snapshot
+ m.lock.RUnlock()
+ return &snapshot
+}
+
+func (m *StandardMeter) updateSnapshot() {
+ // should run with write lock held on m.lock
+ snapshot := m.snapshot
+ snapshot.rate1 = m.a1.Rate()
+ snapshot.rate5 = m.a5.Rate()
+ snapshot.rate15 = m.a15.Rate()
+ snapshot.rateMean = float64(snapshot.count) / time.Since(m.startTime).Seconds()
+}
+
+func (m *StandardMeter) tick() {
+ m.lock.Lock()
+ defer m.lock.Unlock()
+ m.a1.Tick()
+ m.a5.Tick()
+ m.a15.Tick()
+ m.updateSnapshot()
+}
+
+// meterArbiter ticks meters every 5s from a single goroutine.
+// meters are references in a set for future stopping.
+type meterArbiter struct {
+ sync.RWMutex
+ started bool
+ meters map[*StandardMeter]struct{}
+ ticker *time.Ticker
+}
+
+var arbiter = meterArbiter{ticker: time.NewTicker(5e9), meters: make(map[*StandardMeter]struct{})}
+
+// Ticks meters on the scheduled interval
+func (ma *meterArbiter) tick() {
+ for {
+ select {
+ case <-ma.ticker.C:
+ ma.tickMeters()
+ }
+ }
+}
+
+func (ma *meterArbiter) tickMeters() {
+ ma.RLock()
+ defer ma.RUnlock()
+ for meter := range ma.meters {
+ meter.tick()
+ }
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/metrics.go b/vendor/github.com/rcrowley/go-metrics/metrics.go
new file mode 100644
index 0000000..b97a49e
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/metrics.go
@@ -0,0 +1,13 @@
+// Go port of Coda Hale's Metrics library
+//
+//
+//
+// Coda Hale's original work:
+package metrics
+
+// UseNilMetrics is checked by the constructor functions for all of the
+// standard metrics. If it is true, the metric returned is a stub.
+//
+// This global kill-switch helps quantify the observer effect and makes
+// for less cluttered pprof profiles.
+var UseNilMetrics bool = false
diff --git a/vendor/github.com/rcrowley/go-metrics/opentsdb.go b/vendor/github.com/rcrowley/go-metrics/opentsdb.go
new file mode 100644
index 0000000..266b6c9
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/opentsdb.go
@@ -0,0 +1,119 @@
+package metrics
+
+import (
+ "bufio"
+ "fmt"
+ "log"
+ "net"
+ "os"
+ "strings"
+ "time"
+)
+
+var shortHostName string = ""
+
+// OpenTSDBConfig provides a container with configuration parameters for
+// the OpenTSDB exporter
+type OpenTSDBConfig struct {
+ Addr *net.TCPAddr // Network address to connect to
+ Registry Registry // Registry to be exported
+ FlushInterval time.Duration // Flush interval
+ DurationUnit time.Duration // Time conversion unit for durations
+ Prefix string // Prefix to be prepended to metric names
+}
+
+// OpenTSDB is a blocking exporter function which reports metrics in r
+// to a TSDB server located at addr, flushing them every d duration
+// and prepending metric names with prefix.
+func OpenTSDB(r Registry, d time.Duration, prefix string, addr *net.TCPAddr) {
+ OpenTSDBWithConfig(OpenTSDBConfig{
+ Addr: addr,
+ Registry: r,
+ FlushInterval: d,
+ DurationUnit: time.Nanosecond,
+ Prefix: prefix,
+ })
+}
+
+// OpenTSDBWithConfig is a blocking exporter function just like OpenTSDB,
+// but it takes a OpenTSDBConfig instead.
+func OpenTSDBWithConfig(c OpenTSDBConfig) {
+ for _ = range time.Tick(c.FlushInterval) {
+ if err := openTSDB(&c); nil != err {
+ log.Println(err)
+ }
+ }
+}
+
+func getShortHostname() string {
+ if shortHostName == "" {
+ host, _ := os.Hostname()
+ if index := strings.Index(host, "."); index > 0 {
+ shortHostName = host[:index]
+ } else {
+ shortHostName = host
+ }
+ }
+ return shortHostName
+}
+
+func openTSDB(c *OpenTSDBConfig) error {
+ shortHostname := getShortHostname()
+ now := time.Now().Unix()
+ du := float64(c.DurationUnit)
+ conn, err := net.DialTCP("tcp", nil, c.Addr)
+ if nil != err {
+ return err
+ }
+ defer conn.Close()
+ w := bufio.NewWriter(conn)
+ c.Registry.Each(func(name string, i interface{}) {
+ switch metric := i.(type) {
+ case Counter:
+ fmt.Fprintf(w, "put %s.%s.count %d %d host=%s\n", c.Prefix, name, now, metric.Count(), shortHostname)
+ case Gauge:
+ fmt.Fprintf(w, "put %s.%s.value %d %d host=%s\n", c.Prefix, name, now, metric.Value(), shortHostname)
+ case GaugeFloat64:
+ fmt.Fprintf(w, "put %s.%s.value %d %f host=%s\n", c.Prefix, name, now, metric.Value(), shortHostname)
+ case Histogram:
+ h := metric.Snapshot()
+ ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
+ fmt.Fprintf(w, "put %s.%s.count %d %d host=%s\n", c.Prefix, name, now, h.Count(), shortHostname)
+ fmt.Fprintf(w, "put %s.%s.min %d %d host=%s\n", c.Prefix, name, now, h.Min(), shortHostname)
+ fmt.Fprintf(w, "put %s.%s.max %d %d host=%s\n", c.Prefix, name, now, h.Max(), shortHostname)
+ fmt.Fprintf(w, "put %s.%s.mean %d %.2f host=%s\n", c.Prefix, name, now, h.Mean(), shortHostname)
+ fmt.Fprintf(w, "put %s.%s.std-dev %d %.2f host=%s\n", c.Prefix, name, now, h.StdDev(), shortHostname)
+ fmt.Fprintf(w, "put %s.%s.50-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[0], shortHostname)
+ fmt.Fprintf(w, "put %s.%s.75-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[1], shortHostname)
+ fmt.Fprintf(w, "put %s.%s.95-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[2], shortHostname)
+ fmt.Fprintf(w, "put %s.%s.99-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[3], shortHostname)
+ fmt.Fprintf(w, "put %s.%s.999-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[4], shortHostname)
+ case Meter:
+ m := metric.Snapshot()
+ fmt.Fprintf(w, "put %s.%s.count %d %d host=%s\n", c.Prefix, name, now, m.Count(), shortHostname)
+ fmt.Fprintf(w, "put %s.%s.one-minute %d %.2f host=%s\n", c.Prefix, name, now, m.Rate1(), shortHostname)
+ fmt.Fprintf(w, "put %s.%s.five-minute %d %.2f host=%s\n", c.Prefix, name, now, m.Rate5(), shortHostname)
+ fmt.Fprintf(w, "put %s.%s.fifteen-minute %d %.2f host=%s\n", c.Prefix, name, now, m.Rate15(), shortHostname)
+ fmt.Fprintf(w, "put %s.%s.mean %d %.2f host=%s\n", c.Prefix, name, now, m.RateMean(), shortHostname)
+ case Timer:
+ t := metric.Snapshot()
+ ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
+ fmt.Fprintf(w, "put %s.%s.count %d %d host=%s\n", c.Prefix, name, now, t.Count(), shortHostname)
+ fmt.Fprintf(w, "put %s.%s.min %d %d host=%s\n", c.Prefix, name, now, t.Min()/int64(du), shortHostname)
+ fmt.Fprintf(w, "put %s.%s.max %d %d host=%s\n", c.Prefix, name, now, t.Max()/int64(du), shortHostname)
+ fmt.Fprintf(w, "put %s.%s.mean %d %.2f host=%s\n", c.Prefix, name, now, t.Mean()/du, shortHostname)
+ fmt.Fprintf(w, "put %s.%s.std-dev %d %.2f host=%s\n", c.Prefix, name, now, t.StdDev()/du, shortHostname)
+ fmt.Fprintf(w, "put %s.%s.50-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[0]/du, shortHostname)
+ fmt.Fprintf(w, "put %s.%s.75-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[1]/du, shortHostname)
+ fmt.Fprintf(w, "put %s.%s.95-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[2]/du, shortHostname)
+ fmt.Fprintf(w, "put %s.%s.99-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[3]/du, shortHostname)
+ fmt.Fprintf(w, "put %s.%s.999-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[4]/du, shortHostname)
+ fmt.Fprintf(w, "put %s.%s.one-minute %d %.2f host=%s\n", c.Prefix, name, now, t.Rate1(), shortHostname)
+ fmt.Fprintf(w, "put %s.%s.five-minute %d %.2f host=%s\n", c.Prefix, name, now, t.Rate5(), shortHostname)
+ fmt.Fprintf(w, "put %s.%s.fifteen-minute %d %.2f host=%s\n", c.Prefix, name, now, t.Rate15(), shortHostname)
+ fmt.Fprintf(w, "put %s.%s.mean-rate %d %.2f host=%s\n", c.Prefix, name, now, t.RateMean(), shortHostname)
+ }
+ w.Flush()
+ })
+ return nil
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/registry.go b/vendor/github.com/rcrowley/go-metrics/registry.go
new file mode 100644
index 0000000..6c0007b
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/registry.go
@@ -0,0 +1,354 @@
+package metrics
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+ "sync"
+)
+
+// DuplicateMetric is the error returned by Registry.Register when a metric
+// already exists. If you mean to Register that metric you must first
+// Unregister the existing metric.
+type DuplicateMetric string
+
+func (err DuplicateMetric) Error() string {
+ return fmt.Sprintf("duplicate metric: %s", string(err))
+}
+
+// A Registry holds references to a set of metrics by name and can iterate
+// over them, calling callback functions provided by the user.
+//
+// This is an interface so as to encourage other structs to implement
+// the Registry API as appropriate.
+type Registry interface {
+
+ // Call the given function for each registered metric.
+ Each(func(string, interface{}))
+
+ // Get the metric by the given name or nil if none is registered.
+ Get(string) interface{}
+
+ // GetAll metrics in the Registry.
+ GetAll() map[string]map[string]interface{}
+
+ // Gets an existing metric or registers the given one.
+ // The interface can be the metric to register if not found in registry,
+ // or a function returning the metric for lazy instantiation.
+ GetOrRegister(string, interface{}) interface{}
+
+ // Register the given metric under the given name.
+ Register(string, interface{}) error
+
+ // Run all registered healthchecks.
+ RunHealthchecks()
+
+ // Unregister the metric with the given name.
+ Unregister(string)
+
+ // Unregister all metrics. (Mostly for testing.)
+ UnregisterAll()
+}
+
+// The standard implementation of a Registry is a mutex-protected map
+// of names to metrics.
+type StandardRegistry struct {
+ metrics map[string]interface{}
+ mutex sync.Mutex
+}
+
+// Create a new registry.
+func NewRegistry() Registry {
+ return &StandardRegistry{metrics: make(map[string]interface{})}
+}
+
+// Call the given function for each registered metric.
+func (r *StandardRegistry) Each(f func(string, interface{})) {
+ for name, i := range r.registered() {
+ f(name, i)
+ }
+}
+
+// Get the metric by the given name or nil if none is registered.
+func (r *StandardRegistry) Get(name string) interface{} {
+ r.mutex.Lock()
+ defer r.mutex.Unlock()
+ return r.metrics[name]
+}
+
+// Gets an existing metric or creates and registers a new one. Threadsafe
+// alternative to calling Get and Register on failure.
+// The interface can be the metric to register if not found in registry,
+// or a function returning the metric for lazy instantiation.
+func (r *StandardRegistry) GetOrRegister(name string, i interface{}) interface{} {
+ r.mutex.Lock()
+ defer r.mutex.Unlock()
+ if metric, ok := r.metrics[name]; ok {
+ return metric
+ }
+ if v := reflect.ValueOf(i); v.Kind() == reflect.Func {
+ i = v.Call(nil)[0].Interface()
+ }
+ r.register(name, i)
+ return i
+}
+
+// Register the given metric under the given name. Returns a DuplicateMetric
+// if a metric by the given name is already registered.
+func (r *StandardRegistry) Register(name string, i interface{}) error {
+ r.mutex.Lock()
+ defer r.mutex.Unlock()
+ return r.register(name, i)
+}
+
+// Run all registered healthchecks.
+func (r *StandardRegistry) RunHealthchecks() {
+ r.mutex.Lock()
+ defer r.mutex.Unlock()
+ for _, i := range r.metrics {
+ if h, ok := i.(Healthcheck); ok {
+ h.Check()
+ }
+ }
+}
+
+// GetAll metrics in the Registry
+func (r *StandardRegistry) GetAll() map[string]map[string]interface{} {
+ data := make(map[string]map[string]interface{})
+ r.Each(func(name string, i interface{}) {
+ values := make(map[string]interface{})
+ switch metric := i.(type) {
+ case Counter:
+ values["count"] = metric.Count()
+ case Gauge:
+ values["value"] = metric.Value()
+ case GaugeFloat64:
+ values["value"] = metric.Value()
+ case Healthcheck:
+ values["error"] = nil
+ metric.Check()
+ if err := metric.Error(); nil != err {
+ values["error"] = metric.Error().Error()
+ }
+ case Histogram:
+ h := metric.Snapshot()
+ ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
+ values["count"] = h.Count()
+ values["min"] = h.Min()
+ values["max"] = h.Max()
+ values["mean"] = h.Mean()
+ values["stddev"] = h.StdDev()
+ values["median"] = ps[0]
+ values["75%"] = ps[1]
+ values["95%"] = ps[2]
+ values["99%"] = ps[3]
+ values["99.9%"] = ps[4]
+ case Meter:
+ m := metric.Snapshot()
+ values["count"] = m.Count()
+ values["1m.rate"] = m.Rate1()
+ values["5m.rate"] = m.Rate5()
+ values["15m.rate"] = m.Rate15()
+ values["mean.rate"] = m.RateMean()
+ case Timer:
+ t := metric.Snapshot()
+ ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
+ values["count"] = t.Count()
+ values["min"] = t.Min()
+ values["max"] = t.Max()
+ values["mean"] = t.Mean()
+ values["stddev"] = t.StdDev()
+ values["median"] = ps[0]
+ values["75%"] = ps[1]
+ values["95%"] = ps[2]
+ values["99%"] = ps[3]
+ values["99.9%"] = ps[4]
+ values["1m.rate"] = t.Rate1()
+ values["5m.rate"] = t.Rate5()
+ values["15m.rate"] = t.Rate15()
+ values["mean.rate"] = t.RateMean()
+ }
+ data[name] = values
+ })
+ return data
+}
+
+// Unregister the metric with the given name.
+func (r *StandardRegistry) Unregister(name string) {
+ r.mutex.Lock()
+ defer r.mutex.Unlock()
+ r.stop(name)
+ delete(r.metrics, name)
+}
+
+// Unregister all metrics. (Mostly for testing.)
+func (r *StandardRegistry) UnregisterAll() {
+ r.mutex.Lock()
+ defer r.mutex.Unlock()
+ for name, _ := range r.metrics {
+ r.stop(name)
+ delete(r.metrics, name)
+ }
+}
+
+func (r *StandardRegistry) register(name string, i interface{}) error {
+ if _, ok := r.metrics[name]; ok {
+ return DuplicateMetric(name)
+ }
+ switch i.(type) {
+ case Counter, Gauge, GaugeFloat64, Healthcheck, Histogram, Meter, Timer:
+ r.metrics[name] = i
+ }
+ return nil
+}
+
+func (r *StandardRegistry) registered() map[string]interface{} {
+ r.mutex.Lock()
+ defer r.mutex.Unlock()
+ metrics := make(map[string]interface{}, len(r.metrics))
+ for name, i := range r.metrics {
+ metrics[name] = i
+ }
+ return metrics
+}
+
+func (r *StandardRegistry) stop(name string) {
+ if i, ok := r.metrics[name]; ok {
+ if s, ok := i.(Stoppable); ok {
+ s.Stop()
+ }
+ }
+}
+
+// Stoppable defines the metrics which has to be stopped.
+type Stoppable interface {
+ Stop()
+}
+
+type PrefixedRegistry struct {
+ underlying Registry
+ prefix string
+}
+
+func NewPrefixedRegistry(prefix string) Registry {
+ return &PrefixedRegistry{
+ underlying: NewRegistry(),
+ prefix: prefix,
+ }
+}
+
+func NewPrefixedChildRegistry(parent Registry, prefix string) Registry {
+ return &PrefixedRegistry{
+ underlying: parent,
+ prefix: prefix,
+ }
+}
+
+// Call the given function for each registered metric.
+func (r *PrefixedRegistry) Each(fn func(string, interface{})) {
+ wrappedFn := func(prefix string) func(string, interface{}) {
+ return func(name string, iface interface{}) {
+ if strings.HasPrefix(name, prefix) {
+ fn(name, iface)
+ } else {
+ return
+ }
+ }
+ }
+
+ baseRegistry, prefix := findPrefix(r, "")
+ baseRegistry.Each(wrappedFn(prefix))
+}
+
+func findPrefix(registry Registry, prefix string) (Registry, string) {
+ switch r := registry.(type) {
+ case *PrefixedRegistry:
+ return findPrefix(r.underlying, r.prefix+prefix)
+ case *StandardRegistry:
+ return r, prefix
+ }
+ return nil, ""
+}
+
+// Get the metric by the given name or nil if none is registered.
+func (r *PrefixedRegistry) Get(name string) interface{} {
+ realName := r.prefix + name
+ return r.underlying.Get(realName)
+}
+
+// Gets an existing metric or registers the given one.
+// The interface can be the metric to register if not found in registry,
+// or a function returning the metric for lazy instantiation.
+func (r *PrefixedRegistry) GetOrRegister(name string, metric interface{}) interface{} {
+ realName := r.prefix + name
+ return r.underlying.GetOrRegister(realName, metric)
+}
+
+// Register the given metric under the given name. The name will be prefixed.
+func (r *PrefixedRegistry) Register(name string, metric interface{}) error {
+ realName := r.prefix + name
+ return r.underlying.Register(realName, metric)
+}
+
+// Run all registered healthchecks.
+func (r *PrefixedRegistry) RunHealthchecks() {
+ r.underlying.RunHealthchecks()
+}
+
+// GetAll metrics in the Registry
+func (r *PrefixedRegistry) GetAll() map[string]map[string]interface{} {
+ return r.underlying.GetAll()
+}
+
+// Unregister the metric with the given name. The name will be prefixed.
+func (r *PrefixedRegistry) Unregister(name string) {
+ realName := r.prefix + name
+ r.underlying.Unregister(realName)
+}
+
+// Unregister all metrics. (Mostly for testing.)
+func (r *PrefixedRegistry) UnregisterAll() {
+ r.underlying.UnregisterAll()
+}
+
+var DefaultRegistry Registry = NewRegistry()
+
+// Call the given function for each registered metric.
+func Each(f func(string, interface{})) {
+ DefaultRegistry.Each(f)
+}
+
+// Get the metric by the given name or nil if none is registered.
+func Get(name string) interface{} {
+ return DefaultRegistry.Get(name)
+}
+
+// Gets an existing metric or creates and registers a new one. Threadsafe
+// alternative to calling Get and Register on failure.
+func GetOrRegister(name string, i interface{}) interface{} {
+ return DefaultRegistry.GetOrRegister(name, i)
+}
+
+// Register the given metric under the given name. Returns a DuplicateMetric
+// if a metric by the given name is already registered.
+func Register(name string, i interface{}) error {
+ return DefaultRegistry.Register(name, i)
+}
+
+// Register the given metric under the given name. Panics if a metric by the
+// given name is already registered.
+func MustRegister(name string, i interface{}) {
+ if err := Register(name, i); err != nil {
+ panic(err)
+ }
+}
+
+// Run all registered healthchecks.
+func RunHealthchecks() {
+ DefaultRegistry.RunHealthchecks()
+}
+
+// Unregister the metric with the given name.
+func Unregister(name string) {
+ DefaultRegistry.Unregister(name)
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/runtime.go b/vendor/github.com/rcrowley/go-metrics/runtime.go
new file mode 100644
index 0000000..11c6b78
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/runtime.go
@@ -0,0 +1,212 @@
+package metrics
+
+import (
+ "runtime"
+ "runtime/pprof"
+ "time"
+)
+
+var (
+ memStats runtime.MemStats
+ runtimeMetrics struct {
+ MemStats struct {
+ Alloc Gauge
+ BuckHashSys Gauge
+ DebugGC Gauge
+ EnableGC Gauge
+ Frees Gauge
+ HeapAlloc Gauge
+ HeapIdle Gauge
+ HeapInuse Gauge
+ HeapObjects Gauge
+ HeapReleased Gauge
+ HeapSys Gauge
+ LastGC Gauge
+ Lookups Gauge
+ Mallocs Gauge
+ MCacheInuse Gauge
+ MCacheSys Gauge
+ MSpanInuse Gauge
+ MSpanSys Gauge
+ NextGC Gauge
+ NumGC Gauge
+ GCCPUFraction GaugeFloat64
+ PauseNs Histogram
+ PauseTotalNs Gauge
+ StackInuse Gauge
+ StackSys Gauge
+ Sys Gauge
+ TotalAlloc Gauge
+ }
+ NumCgoCall Gauge
+ NumGoroutine Gauge
+ NumThread Gauge
+ ReadMemStats Timer
+ }
+ frees uint64
+ lookups uint64
+ mallocs uint64
+ numGC uint32
+ numCgoCalls int64
+
+ threadCreateProfile = pprof.Lookup("threadcreate")
+)
+
+// Capture new values for the Go runtime statistics exported in
+// runtime.MemStats. This is designed to be called as a goroutine.
+func CaptureRuntimeMemStats(r Registry, d time.Duration) {
+ for _ = range time.Tick(d) {
+ CaptureRuntimeMemStatsOnce(r)
+ }
+}
+
+// Capture new values for the Go runtime statistics exported in
+// runtime.MemStats. This is designed to be called in a background
+// goroutine. Giving a registry which has not been given to
+// RegisterRuntimeMemStats will panic.
+//
+// Be very careful with this because runtime.ReadMemStats calls the C
+// functions runtime·semacquire(&runtime·worldsema) and runtime·stoptheworld()
+// and that last one does what it says on the tin.
+func CaptureRuntimeMemStatsOnce(r Registry) {
+ t := time.Now()
+ runtime.ReadMemStats(&memStats) // This takes 50-200us.
+ runtimeMetrics.ReadMemStats.UpdateSince(t)
+
+ runtimeMetrics.MemStats.Alloc.Update(int64(memStats.Alloc))
+ runtimeMetrics.MemStats.BuckHashSys.Update(int64(memStats.BuckHashSys))
+ if memStats.DebugGC {
+ runtimeMetrics.MemStats.DebugGC.Update(1)
+ } else {
+ runtimeMetrics.MemStats.DebugGC.Update(0)
+ }
+ if memStats.EnableGC {
+ runtimeMetrics.MemStats.EnableGC.Update(1)
+ } else {
+ runtimeMetrics.MemStats.EnableGC.Update(0)
+ }
+
+ runtimeMetrics.MemStats.Frees.Update(int64(memStats.Frees - frees))
+ runtimeMetrics.MemStats.HeapAlloc.Update(int64(memStats.HeapAlloc))
+ runtimeMetrics.MemStats.HeapIdle.Update(int64(memStats.HeapIdle))
+ runtimeMetrics.MemStats.HeapInuse.Update(int64(memStats.HeapInuse))
+ runtimeMetrics.MemStats.HeapObjects.Update(int64(memStats.HeapObjects))
+ runtimeMetrics.MemStats.HeapReleased.Update(int64(memStats.HeapReleased))
+ runtimeMetrics.MemStats.HeapSys.Update(int64(memStats.HeapSys))
+ runtimeMetrics.MemStats.LastGC.Update(int64(memStats.LastGC))
+ runtimeMetrics.MemStats.Lookups.Update(int64(memStats.Lookups - lookups))
+ runtimeMetrics.MemStats.Mallocs.Update(int64(memStats.Mallocs - mallocs))
+ runtimeMetrics.MemStats.MCacheInuse.Update(int64(memStats.MCacheInuse))
+ runtimeMetrics.MemStats.MCacheSys.Update(int64(memStats.MCacheSys))
+ runtimeMetrics.MemStats.MSpanInuse.Update(int64(memStats.MSpanInuse))
+ runtimeMetrics.MemStats.MSpanSys.Update(int64(memStats.MSpanSys))
+ runtimeMetrics.MemStats.NextGC.Update(int64(memStats.NextGC))
+ runtimeMetrics.MemStats.NumGC.Update(int64(memStats.NumGC - numGC))
+ runtimeMetrics.MemStats.GCCPUFraction.Update(gcCPUFraction(&memStats))
+
+ //
+ i := numGC % uint32(len(memStats.PauseNs))
+ ii := memStats.NumGC % uint32(len(memStats.PauseNs))
+ if memStats.NumGC-numGC >= uint32(len(memStats.PauseNs)) {
+ for i = 0; i < uint32(len(memStats.PauseNs)); i++ {
+ runtimeMetrics.MemStats.PauseNs.Update(int64(memStats.PauseNs[i]))
+ }
+ } else {
+ if i > ii {
+ for ; i < uint32(len(memStats.PauseNs)); i++ {
+ runtimeMetrics.MemStats.PauseNs.Update(int64(memStats.PauseNs[i]))
+ }
+ i = 0
+ }
+ for ; i < ii; i++ {
+ runtimeMetrics.MemStats.PauseNs.Update(int64(memStats.PauseNs[i]))
+ }
+ }
+ frees = memStats.Frees
+ lookups = memStats.Lookups
+ mallocs = memStats.Mallocs
+ numGC = memStats.NumGC
+
+ runtimeMetrics.MemStats.PauseTotalNs.Update(int64(memStats.PauseTotalNs))
+ runtimeMetrics.MemStats.StackInuse.Update(int64(memStats.StackInuse))
+ runtimeMetrics.MemStats.StackSys.Update(int64(memStats.StackSys))
+ runtimeMetrics.MemStats.Sys.Update(int64(memStats.Sys))
+ runtimeMetrics.MemStats.TotalAlloc.Update(int64(memStats.TotalAlloc))
+
+ currentNumCgoCalls := numCgoCall()
+ runtimeMetrics.NumCgoCall.Update(currentNumCgoCalls - numCgoCalls)
+ numCgoCalls = currentNumCgoCalls
+
+ runtimeMetrics.NumGoroutine.Update(int64(runtime.NumGoroutine()))
+
+ runtimeMetrics.NumThread.Update(int64(threadCreateProfile.Count()))
+}
+
+// Register runtimeMetrics for the Go runtime statistics exported in runtime and
+// specifically runtime.MemStats. The runtimeMetrics are named by their
+// fully-qualified Go symbols, i.e. runtime.MemStats.Alloc.
+func RegisterRuntimeMemStats(r Registry) {
+ runtimeMetrics.MemStats.Alloc = NewGauge()
+ runtimeMetrics.MemStats.BuckHashSys = NewGauge()
+ runtimeMetrics.MemStats.DebugGC = NewGauge()
+ runtimeMetrics.MemStats.EnableGC = NewGauge()
+ runtimeMetrics.MemStats.Frees = NewGauge()
+ runtimeMetrics.MemStats.HeapAlloc = NewGauge()
+ runtimeMetrics.MemStats.HeapIdle = NewGauge()
+ runtimeMetrics.MemStats.HeapInuse = NewGauge()
+ runtimeMetrics.MemStats.HeapObjects = NewGauge()
+ runtimeMetrics.MemStats.HeapReleased = NewGauge()
+ runtimeMetrics.MemStats.HeapSys = NewGauge()
+ runtimeMetrics.MemStats.LastGC = NewGauge()
+ runtimeMetrics.MemStats.Lookups = NewGauge()
+ runtimeMetrics.MemStats.Mallocs = NewGauge()
+ runtimeMetrics.MemStats.MCacheInuse = NewGauge()
+ runtimeMetrics.MemStats.MCacheSys = NewGauge()
+ runtimeMetrics.MemStats.MSpanInuse = NewGauge()
+ runtimeMetrics.MemStats.MSpanSys = NewGauge()
+ runtimeMetrics.MemStats.NextGC = NewGauge()
+ runtimeMetrics.MemStats.NumGC = NewGauge()
+ runtimeMetrics.MemStats.GCCPUFraction = NewGaugeFloat64()
+ runtimeMetrics.MemStats.PauseNs = NewHistogram(NewExpDecaySample(1028, 0.015))
+ runtimeMetrics.MemStats.PauseTotalNs = NewGauge()
+ runtimeMetrics.MemStats.StackInuse = NewGauge()
+ runtimeMetrics.MemStats.StackSys = NewGauge()
+ runtimeMetrics.MemStats.Sys = NewGauge()
+ runtimeMetrics.MemStats.TotalAlloc = NewGauge()
+ runtimeMetrics.NumCgoCall = NewGauge()
+ runtimeMetrics.NumGoroutine = NewGauge()
+ runtimeMetrics.NumThread = NewGauge()
+ runtimeMetrics.ReadMemStats = NewTimer()
+
+ r.Register("runtime.MemStats.Alloc", runtimeMetrics.MemStats.Alloc)
+ r.Register("runtime.MemStats.BuckHashSys", runtimeMetrics.MemStats.BuckHashSys)
+ r.Register("runtime.MemStats.DebugGC", runtimeMetrics.MemStats.DebugGC)
+ r.Register("runtime.MemStats.EnableGC", runtimeMetrics.MemStats.EnableGC)
+ r.Register("runtime.MemStats.Frees", runtimeMetrics.MemStats.Frees)
+ r.Register("runtime.MemStats.HeapAlloc", runtimeMetrics.MemStats.HeapAlloc)
+ r.Register("runtime.MemStats.HeapIdle", runtimeMetrics.MemStats.HeapIdle)
+ r.Register("runtime.MemStats.HeapInuse", runtimeMetrics.MemStats.HeapInuse)
+ r.Register("runtime.MemStats.HeapObjects", runtimeMetrics.MemStats.HeapObjects)
+ r.Register("runtime.MemStats.HeapReleased", runtimeMetrics.MemStats.HeapReleased)
+ r.Register("runtime.MemStats.HeapSys", runtimeMetrics.MemStats.HeapSys)
+ r.Register("runtime.MemStats.LastGC", runtimeMetrics.MemStats.LastGC)
+ r.Register("runtime.MemStats.Lookups", runtimeMetrics.MemStats.Lookups)
+ r.Register("runtime.MemStats.Mallocs", runtimeMetrics.MemStats.Mallocs)
+ r.Register("runtime.MemStats.MCacheInuse", runtimeMetrics.MemStats.MCacheInuse)
+ r.Register("runtime.MemStats.MCacheSys", runtimeMetrics.MemStats.MCacheSys)
+ r.Register("runtime.MemStats.MSpanInuse", runtimeMetrics.MemStats.MSpanInuse)
+ r.Register("runtime.MemStats.MSpanSys", runtimeMetrics.MemStats.MSpanSys)
+ r.Register("runtime.MemStats.NextGC", runtimeMetrics.MemStats.NextGC)
+ r.Register("runtime.MemStats.NumGC", runtimeMetrics.MemStats.NumGC)
+ r.Register("runtime.MemStats.GCCPUFraction", runtimeMetrics.MemStats.GCCPUFraction)
+ r.Register("runtime.MemStats.PauseNs", runtimeMetrics.MemStats.PauseNs)
+ r.Register("runtime.MemStats.PauseTotalNs", runtimeMetrics.MemStats.PauseTotalNs)
+ r.Register("runtime.MemStats.StackInuse", runtimeMetrics.MemStats.StackInuse)
+ r.Register("runtime.MemStats.StackSys", runtimeMetrics.MemStats.StackSys)
+ r.Register("runtime.MemStats.Sys", runtimeMetrics.MemStats.Sys)
+ r.Register("runtime.MemStats.TotalAlloc", runtimeMetrics.MemStats.TotalAlloc)
+ r.Register("runtime.NumCgoCall", runtimeMetrics.NumCgoCall)
+ r.Register("runtime.NumGoroutine", runtimeMetrics.NumGoroutine)
+ r.Register("runtime.NumThread", runtimeMetrics.NumThread)
+ r.Register("runtime.ReadMemStats", runtimeMetrics.ReadMemStats)
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/runtime_cgo.go b/vendor/github.com/rcrowley/go-metrics/runtime_cgo.go
new file mode 100644
index 0000000..e3391f4
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/runtime_cgo.go
@@ -0,0 +1,10 @@
+// +build cgo
+// +build !appengine
+
+package metrics
+
+import "runtime"
+
+func numCgoCall() int64 {
+ return runtime.NumCgoCall()
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/runtime_gccpufraction.go b/vendor/github.com/rcrowley/go-metrics/runtime_gccpufraction.go
new file mode 100644
index 0000000..ca12c05
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/runtime_gccpufraction.go
@@ -0,0 +1,9 @@
+// +build go1.5
+
+package metrics
+
+import "runtime"
+
+func gcCPUFraction(memStats *runtime.MemStats) float64 {
+ return memStats.GCCPUFraction
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/runtime_no_cgo.go b/vendor/github.com/rcrowley/go-metrics/runtime_no_cgo.go
new file mode 100644
index 0000000..616a3b4
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/runtime_no_cgo.go
@@ -0,0 +1,7 @@
+// +build !cgo appengine
+
+package metrics
+
+func numCgoCall() int64 {
+ return 0
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/runtime_no_gccpufraction.go b/vendor/github.com/rcrowley/go-metrics/runtime_no_gccpufraction.go
new file mode 100644
index 0000000..be96aa6
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/runtime_no_gccpufraction.go
@@ -0,0 +1,9 @@
+// +build !go1.5
+
+package metrics
+
+import "runtime"
+
+func gcCPUFraction(memStats *runtime.MemStats) float64 {
+ return 0
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/sample.go b/vendor/github.com/rcrowley/go-metrics/sample.go
new file mode 100644
index 0000000..fecee5e
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/sample.go
@@ -0,0 +1,616 @@
+package metrics
+
+import (
+ "math"
+ "math/rand"
+ "sort"
+ "sync"
+ "time"
+)
+
+const rescaleThreshold = time.Hour
+
+// Samples maintain a statistically-significant selection of values from
+// a stream.
+type Sample interface {
+ Clear()
+ Count() int64
+ Max() int64
+ Mean() float64
+ Min() int64
+ Percentile(float64) float64
+ Percentiles([]float64) []float64
+ Size() int
+ Snapshot() Sample
+ StdDev() float64
+ Sum() int64
+ Update(int64)
+ Values() []int64
+ Variance() float64
+}
+
+// ExpDecaySample is an exponentially-decaying sample using a forward-decaying
+// priority reservoir. See Cormode et al's "Forward Decay: A Practical Time
+// Decay Model for Streaming Systems".
+//
+//
+type ExpDecaySample struct {
+ alpha float64
+ count int64
+ mutex sync.Mutex
+ reservoirSize int
+ t0, t1 time.Time
+ values *expDecaySampleHeap
+}
+
+// NewExpDecaySample constructs a new exponentially-decaying sample with the
+// given reservoir size and alpha.
+func NewExpDecaySample(reservoirSize int, alpha float64) Sample {
+ if UseNilMetrics {
+ return NilSample{}
+ }
+ s := &ExpDecaySample{
+ alpha: alpha,
+ reservoirSize: reservoirSize,
+ t0: time.Now(),
+ values: newExpDecaySampleHeap(reservoirSize),
+ }
+ s.t1 = s.t0.Add(rescaleThreshold)
+ return s
+}
+
+// Clear clears all samples.
+func (s *ExpDecaySample) Clear() {
+ s.mutex.Lock()
+ defer s.mutex.Unlock()
+ s.count = 0
+ s.t0 = time.Now()
+ s.t1 = s.t0.Add(rescaleThreshold)
+ s.values.Clear()
+}
+
+// Count returns the number of samples recorded, which may exceed the
+// reservoir size.
+func (s *ExpDecaySample) Count() int64 {
+ s.mutex.Lock()
+ defer s.mutex.Unlock()
+ return s.count
+}
+
+// Max returns the maximum value in the sample, which may not be the maximum
+// value ever to be part of the sample.
+func (s *ExpDecaySample) Max() int64 {
+ return SampleMax(s.Values())
+}
+
+// Mean returns the mean of the values in the sample.
+func (s *ExpDecaySample) Mean() float64 {
+ return SampleMean(s.Values())
+}
+
+// Min returns the minimum value in the sample, which may not be the minimum
+// value ever to be part of the sample.
+func (s *ExpDecaySample) Min() int64 {
+ return SampleMin(s.Values())
+}
+
+// Percentile returns an arbitrary percentile of values in the sample.
+func (s *ExpDecaySample) Percentile(p float64) float64 {
+ return SamplePercentile(s.Values(), p)
+}
+
+// Percentiles returns a slice of arbitrary percentiles of values in the
+// sample.
+func (s *ExpDecaySample) Percentiles(ps []float64) []float64 {
+ return SamplePercentiles(s.Values(), ps)
+}
+
+// Size returns the size of the sample, which is at most the reservoir size.
+func (s *ExpDecaySample) Size() int {
+ s.mutex.Lock()
+ defer s.mutex.Unlock()
+ return s.values.Size()
+}
+
+// Snapshot returns a read-only copy of the sample.
+func (s *ExpDecaySample) Snapshot() Sample {
+ s.mutex.Lock()
+ defer s.mutex.Unlock()
+ vals := s.values.Values()
+ values := make([]int64, len(vals))
+ for i, v := range vals {
+ values[i] = v.v
+ }
+ return &SampleSnapshot{
+ count: s.count,
+ values: values,
+ }
+}
+
+// StdDev returns the standard deviation of the values in the sample.
+func (s *ExpDecaySample) StdDev() float64 {
+ return SampleStdDev(s.Values())
+}
+
+// Sum returns the sum of the values in the sample.
+func (s *ExpDecaySample) Sum() int64 {
+ return SampleSum(s.Values())
+}
+
+// Update samples a new value.
+func (s *ExpDecaySample) Update(v int64) {
+ s.update(time.Now(), v)
+}
+
+// Values returns a copy of the values in the sample.
+func (s *ExpDecaySample) Values() []int64 {
+ s.mutex.Lock()
+ defer s.mutex.Unlock()
+ vals := s.values.Values()
+ values := make([]int64, len(vals))
+ for i, v := range vals {
+ values[i] = v.v
+ }
+ return values
+}
+
+// Variance returns the variance of the values in the sample.
+func (s *ExpDecaySample) Variance() float64 {
+ return SampleVariance(s.Values())
+}
+
+// update samples a new value at a particular timestamp. This is a method all
+// its own to facilitate testing.
+func (s *ExpDecaySample) update(t time.Time, v int64) {
+ s.mutex.Lock()
+ defer s.mutex.Unlock()
+ s.count++
+ if s.values.Size() == s.reservoirSize {
+ s.values.Pop()
+ }
+ s.values.Push(expDecaySample{
+ k: math.Exp(t.Sub(s.t0).Seconds()*s.alpha) / rand.Float64(),
+ v: v,
+ })
+ if t.After(s.t1) {
+ values := s.values.Values()
+ t0 := s.t0
+ s.values.Clear()
+ s.t0 = t
+ s.t1 = s.t0.Add(rescaleThreshold)
+ for _, v := range values {
+ v.k = v.k * math.Exp(-s.alpha*s.t0.Sub(t0).Seconds())
+ s.values.Push(v)
+ }
+ }
+}
+
+// NilSample is a no-op Sample.
+type NilSample struct{}
+
+// Clear is a no-op.
+func (NilSample) Clear() {}
+
+// Count is a no-op.
+func (NilSample) Count() int64 { return 0 }
+
+// Max is a no-op.
+func (NilSample) Max() int64 { return 0 }
+
+// Mean is a no-op.
+func (NilSample) Mean() float64 { return 0.0 }
+
+// Min is a no-op.
+func (NilSample) Min() int64 { return 0 }
+
+// Percentile is a no-op.
+func (NilSample) Percentile(p float64) float64 { return 0.0 }
+
+// Percentiles is a no-op.
+func (NilSample) Percentiles(ps []float64) []float64 {
+ return make([]float64, len(ps))
+}
+
+// Size is a no-op.
+func (NilSample) Size() int { return 0 }
+
+// Sample is a no-op.
+func (NilSample) Snapshot() Sample { return NilSample{} }
+
+// StdDev is a no-op.
+func (NilSample) StdDev() float64 { return 0.0 }
+
+// Sum is a no-op.
+func (NilSample) Sum() int64 { return 0 }
+
+// Update is a no-op.
+func (NilSample) Update(v int64) {}
+
+// Values is a no-op.
+func (NilSample) Values() []int64 { return []int64{} }
+
+// Variance is a no-op.
+func (NilSample) Variance() float64 { return 0.0 }
+
+// SampleMax returns the maximum value of the slice of int64.
+func SampleMax(values []int64) int64 {
+ if 0 == len(values) {
+ return 0
+ }
+ var max int64 = math.MinInt64
+ for _, v := range values {
+ if max < v {
+ max = v
+ }
+ }
+ return max
+}
+
+// SampleMean returns the mean value of the slice of int64.
+func SampleMean(values []int64) float64 {
+ if 0 == len(values) {
+ return 0.0
+ }
+ return float64(SampleSum(values)) / float64(len(values))
+}
+
+// SampleMin returns the minimum value of the slice of int64.
+func SampleMin(values []int64) int64 {
+ if 0 == len(values) {
+ return 0
+ }
+ var min int64 = math.MaxInt64
+ for _, v := range values {
+ if min > v {
+ min = v
+ }
+ }
+ return min
+}
+
+// SamplePercentiles returns an arbitrary percentile of the slice of int64.
+func SamplePercentile(values int64Slice, p float64) float64 {
+ return SamplePercentiles(values, []float64{p})[0]
+}
+
+// SamplePercentiles returns a slice of arbitrary percentiles of the slice of
+// int64.
+func SamplePercentiles(values int64Slice, ps []float64) []float64 {
+ scores := make([]float64, len(ps))
+ size := len(values)
+ if size > 0 {
+ sort.Sort(values)
+ for i, p := range ps {
+ pos := p * float64(size+1)
+ if pos < 1.0 {
+ scores[i] = float64(values[0])
+ } else if pos >= float64(size) {
+ scores[i] = float64(values[size-1])
+ } else {
+ lower := float64(values[int(pos)-1])
+ upper := float64(values[int(pos)])
+ scores[i] = lower + (pos-math.Floor(pos))*(upper-lower)
+ }
+ }
+ }
+ return scores
+}
+
+// SampleSnapshot is a read-only copy of another Sample.
+type SampleSnapshot struct {
+ count int64
+ values []int64
+}
+
+func NewSampleSnapshot(count int64, values []int64) *SampleSnapshot {
+ return &SampleSnapshot{
+ count: count,
+ values: values,
+ }
+}
+
+// Clear panics.
+func (*SampleSnapshot) Clear() {
+ panic("Clear called on a SampleSnapshot")
+}
+
+// Count returns the count of inputs at the time the snapshot was taken.
+func (s *SampleSnapshot) Count() int64 { return s.count }
+
+// Max returns the maximal value at the time the snapshot was taken.
+func (s *SampleSnapshot) Max() int64 { return SampleMax(s.values) }
+
+// Mean returns the mean value at the time the snapshot was taken.
+func (s *SampleSnapshot) Mean() float64 { return SampleMean(s.values) }
+
+// Min returns the minimal value at the time the snapshot was taken.
+func (s *SampleSnapshot) Min() int64 { return SampleMin(s.values) }
+
+// Percentile returns an arbitrary percentile of values at the time the
+// snapshot was taken.
+func (s *SampleSnapshot) Percentile(p float64) float64 {
+ return SamplePercentile(s.values, p)
+}
+
+// Percentiles returns a slice of arbitrary percentiles of values at the time
+// the snapshot was taken.
+func (s *SampleSnapshot) Percentiles(ps []float64) []float64 {
+ return SamplePercentiles(s.values, ps)
+}
+
+// Size returns the size of the sample at the time the snapshot was taken.
+func (s *SampleSnapshot) Size() int { return len(s.values) }
+
+// Snapshot returns the snapshot.
+func (s *SampleSnapshot) Snapshot() Sample { return s }
+
+// StdDev returns the standard deviation of values at the time the snapshot was
+// taken.
+func (s *SampleSnapshot) StdDev() float64 { return SampleStdDev(s.values) }
+
+// Sum returns the sum of values at the time the snapshot was taken.
+func (s *SampleSnapshot) Sum() int64 { return SampleSum(s.values) }
+
+// Update panics.
+func (*SampleSnapshot) Update(int64) {
+ panic("Update called on a SampleSnapshot")
+}
+
+// Values returns a copy of the values in the sample.
+func (s *SampleSnapshot) Values() []int64 {
+ values := make([]int64, len(s.values))
+ copy(values, s.values)
+ return values
+}
+
+// Variance returns the variance of values at the time the snapshot was taken.
+func (s *SampleSnapshot) Variance() float64 { return SampleVariance(s.values) }
+
+// SampleStdDev returns the standard deviation of the slice of int64.
+func SampleStdDev(values []int64) float64 {
+ return math.Sqrt(SampleVariance(values))
+}
+
+// SampleSum returns the sum of the slice of int64.
+func SampleSum(values []int64) int64 {
+ var sum int64
+ for _, v := range values {
+ sum += v
+ }
+ return sum
+}
+
+// SampleVariance returns the variance of the slice of int64.
+func SampleVariance(values []int64) float64 {
+ if 0 == len(values) {
+ return 0.0
+ }
+ m := SampleMean(values)
+ var sum float64
+ for _, v := range values {
+ d := float64(v) - m
+ sum += d * d
+ }
+ return sum / float64(len(values))
+}
+
+// A uniform sample using Vitter's Algorithm R.
+//
+//
+type UniformSample struct {
+ count int64
+ mutex sync.Mutex
+ reservoirSize int
+ values []int64
+}
+
+// NewUniformSample constructs a new uniform sample with the given reservoir
+// size.
+func NewUniformSample(reservoirSize int) Sample {
+ if UseNilMetrics {
+ return NilSample{}
+ }
+ return &UniformSample{
+ reservoirSize: reservoirSize,
+ values: make([]int64, 0, reservoirSize),
+ }
+}
+
+// Clear clears all samples.
+func (s *UniformSample) Clear() {
+ s.mutex.Lock()
+ defer s.mutex.Unlock()
+ s.count = 0
+ s.values = make([]int64, 0, s.reservoirSize)
+}
+
+// Count returns the number of samples recorded, which may exceed the
+// reservoir size.
+func (s *UniformSample) Count() int64 {
+ s.mutex.Lock()
+ defer s.mutex.Unlock()
+ return s.count
+}
+
+// Max returns the maximum value in the sample, which may not be the maximum
+// value ever to be part of the sample.
+func (s *UniformSample) Max() int64 {
+ s.mutex.Lock()
+ defer s.mutex.Unlock()
+ return SampleMax(s.values)
+}
+
+// Mean returns the mean of the values in the sample.
+func (s *UniformSample) Mean() float64 {
+ s.mutex.Lock()
+ defer s.mutex.Unlock()
+ return SampleMean(s.values)
+}
+
+// Min returns the minimum value in the sample, which may not be the minimum
+// value ever to be part of the sample.
+func (s *UniformSample) Min() int64 {
+ s.mutex.Lock()
+ defer s.mutex.Unlock()
+ return SampleMin(s.values)
+}
+
+// Percentile returns an arbitrary percentile of values in the sample.
+func (s *UniformSample) Percentile(p float64) float64 {
+ s.mutex.Lock()
+ defer s.mutex.Unlock()
+ return SamplePercentile(s.values, p)
+}
+
+// Percentiles returns a slice of arbitrary percentiles of values in the
+// sample.
+func (s *UniformSample) Percentiles(ps []float64) []float64 {
+ s.mutex.Lock()
+ defer s.mutex.Unlock()
+ return SamplePercentiles(s.values, ps)
+}
+
+// Size returns the size of the sample, which is at most the reservoir size.
+func (s *UniformSample) Size() int {
+ s.mutex.Lock()
+ defer s.mutex.Unlock()
+ return len(s.values)
+}
+
+// Snapshot returns a read-only copy of the sample.
+func (s *UniformSample) Snapshot() Sample {
+ s.mutex.Lock()
+ defer s.mutex.Unlock()
+ values := make([]int64, len(s.values))
+ copy(values, s.values)
+ return &SampleSnapshot{
+ count: s.count,
+ values: values,
+ }
+}
+
+// StdDev returns the standard deviation of the values in the sample.
+func (s *UniformSample) StdDev() float64 {
+ s.mutex.Lock()
+ defer s.mutex.Unlock()
+ return SampleStdDev(s.values)
+}
+
+// Sum returns the sum of the values in the sample.
+func (s *UniformSample) Sum() int64 {
+ s.mutex.Lock()
+ defer s.mutex.Unlock()
+ return SampleSum(s.values)
+}
+
+// Update samples a new value.
+func (s *UniformSample) Update(v int64) {
+ s.mutex.Lock()
+ defer s.mutex.Unlock()
+ s.count++
+ if len(s.values) < s.reservoirSize {
+ s.values = append(s.values, v)
+ } else {
+ r := rand.Int63n(s.count)
+ if r < int64(len(s.values)) {
+ s.values[int(r)] = v
+ }
+ }
+}
+
+// Values returns a copy of the values in the sample.
+func (s *UniformSample) Values() []int64 {
+ s.mutex.Lock()
+ defer s.mutex.Unlock()
+ values := make([]int64, len(s.values))
+ copy(values, s.values)
+ return values
+}
+
+// Variance returns the variance of the values in the sample.
+func (s *UniformSample) Variance() float64 {
+ s.mutex.Lock()
+ defer s.mutex.Unlock()
+ return SampleVariance(s.values)
+}
+
+// expDecaySample represents an individual sample in a heap.
+type expDecaySample struct {
+ k float64
+ v int64
+}
+
+func newExpDecaySampleHeap(reservoirSize int) *expDecaySampleHeap {
+ return &expDecaySampleHeap{make([]expDecaySample, 0, reservoirSize)}
+}
+
+// expDecaySampleHeap is a min-heap of expDecaySamples.
+// The internal implementation is copied from the standard library's container/heap
+type expDecaySampleHeap struct {
+ s []expDecaySample
+}
+
+func (h *expDecaySampleHeap) Clear() {
+ h.s = h.s[:0]
+}
+
+func (h *expDecaySampleHeap) Push(s expDecaySample) {
+ n := len(h.s)
+ h.s = h.s[0 : n+1]
+ h.s[n] = s
+ h.up(n)
+}
+
+func (h *expDecaySampleHeap) Pop() expDecaySample {
+ n := len(h.s) - 1
+ h.s[0], h.s[n] = h.s[n], h.s[0]
+ h.down(0, n)
+
+ n = len(h.s)
+ s := h.s[n-1]
+ h.s = h.s[0 : n-1]
+ return s
+}
+
+func (h *expDecaySampleHeap) Size() int {
+ return len(h.s)
+}
+
+func (h *expDecaySampleHeap) Values() []expDecaySample {
+ return h.s
+}
+
+func (h *expDecaySampleHeap) up(j int) {
+ for {
+ i := (j - 1) / 2 // parent
+ if i == j || !(h.s[j].k < h.s[i].k) {
+ break
+ }
+ h.s[i], h.s[j] = h.s[j], h.s[i]
+ j = i
+ }
+}
+
+func (h *expDecaySampleHeap) down(i, n int) {
+ for {
+ j1 := 2*i + 1
+ if j1 >= n || j1 < 0 { // j1 < 0 after int overflow
+ break
+ }
+ j := j1 // left child
+ if j2 := j1 + 1; j2 < n && !(h.s[j1].k < h.s[j2].k) {
+ j = j2 // = 2*i + 2 // right child
+ }
+ if !(h.s[j].k < h.s[i].k) {
+ break
+ }
+ h.s[i], h.s[j] = h.s[j], h.s[i]
+ i = j
+ }
+}
+
+type int64Slice []int64
+
+func (p int64Slice) Len() int { return len(p) }
+func (p int64Slice) Less(i, j int) bool { return p[i] < p[j] }
+func (p int64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
diff --git a/vendor/github.com/rcrowley/go-metrics/syslog.go b/vendor/github.com/rcrowley/go-metrics/syslog.go
new file mode 100644
index 0000000..693f190
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/syslog.go
@@ -0,0 +1,78 @@
+// +build !windows
+
+package metrics
+
+import (
+ "fmt"
+ "log/syslog"
+ "time"
+)
+
+// Output each metric in the given registry to syslog periodically using
+// the given syslogger.
+func Syslog(r Registry, d time.Duration, w *syslog.Writer) {
+ for _ = range time.Tick(d) {
+ r.Each(func(name string, i interface{}) {
+ switch metric := i.(type) {
+ case Counter:
+ w.Info(fmt.Sprintf("counter %s: count: %d", name, metric.Count()))
+ case Gauge:
+ w.Info(fmt.Sprintf("gauge %s: value: %d", name, metric.Value()))
+ case GaugeFloat64:
+ w.Info(fmt.Sprintf("gauge %s: value: %f", name, metric.Value()))
+ case Healthcheck:
+ metric.Check()
+ w.Info(fmt.Sprintf("healthcheck %s: error: %v", name, metric.Error()))
+ case Histogram:
+ h := metric.Snapshot()
+ ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
+ w.Info(fmt.Sprintf(
+ "histogram %s: count: %d min: %d max: %d mean: %.2f stddev: %.2f median: %.2f 75%%: %.2f 95%%: %.2f 99%%: %.2f 99.9%%: %.2f",
+ name,
+ h.Count(),
+ h.Min(),
+ h.Max(),
+ h.Mean(),
+ h.StdDev(),
+ ps[0],
+ ps[1],
+ ps[2],
+ ps[3],
+ ps[4],
+ ))
+ case Meter:
+ m := metric.Snapshot()
+ w.Info(fmt.Sprintf(
+ "meter %s: count: %d 1-min: %.2f 5-min: %.2f 15-min: %.2f mean: %.2f",
+ name,
+ m.Count(),
+ m.Rate1(),
+ m.Rate5(),
+ m.Rate15(),
+ m.RateMean(),
+ ))
+ case Timer:
+ t := metric.Snapshot()
+ ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
+ w.Info(fmt.Sprintf(
+ "timer %s: count: %d min: %d max: %d mean: %.2f stddev: %.2f median: %.2f 75%%: %.2f 95%%: %.2f 99%%: %.2f 99.9%%: %.2f 1-min: %.2f 5-min: %.2f 15-min: %.2f mean-rate: %.2f",
+ name,
+ t.Count(),
+ t.Min(),
+ t.Max(),
+ t.Mean(),
+ t.StdDev(),
+ ps[0],
+ ps[1],
+ ps[2],
+ ps[3],
+ ps[4],
+ t.Rate1(),
+ t.Rate5(),
+ t.Rate15(),
+ t.RateMean(),
+ ))
+ }
+ })
+ }
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/timer.go b/vendor/github.com/rcrowley/go-metrics/timer.go
new file mode 100644
index 0000000..d6ec4c6
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/timer.go
@@ -0,0 +1,329 @@
+package metrics
+
+import (
+ "sync"
+ "time"
+)
+
+// Timers capture the duration and rate of events.
+type Timer interface {
+ Count() int64
+ Max() int64
+ Mean() float64
+ Min() int64
+ Percentile(float64) float64
+ Percentiles([]float64) []float64
+ Rate1() float64
+ Rate5() float64
+ Rate15() float64
+ RateMean() float64
+ Snapshot() Timer
+ StdDev() float64
+ Stop()
+ Sum() int64
+ Time(func())
+ Update(time.Duration)
+ UpdateSince(time.Time)
+ Variance() float64
+}
+
+// GetOrRegisterTimer returns an existing Timer or constructs and registers a
+// new StandardTimer.
+// Be sure to unregister the meter from the registry once it is of no use to
+// allow for garbage collection.
+func GetOrRegisterTimer(name string, r Registry) Timer {
+ if nil == r {
+ r = DefaultRegistry
+ }
+ return r.GetOrRegister(name, NewTimer).(Timer)
+}
+
+// NewCustomTimer constructs a new StandardTimer from a Histogram and a Meter.
+// Be sure to call Stop() once the timer is of no use to allow for garbage collection.
+func NewCustomTimer(h Histogram, m Meter) Timer {
+ if UseNilMetrics {
+ return NilTimer{}
+ }
+ return &StandardTimer{
+ histogram: h,
+ meter: m,
+ }
+}
+
+// NewRegisteredTimer constructs and registers a new StandardTimer.
+// Be sure to unregister the meter from the registry once it is of no use to
+// allow for garbage collection.
+func NewRegisteredTimer(name string, r Registry) Timer {
+ c := NewTimer()
+ if nil == r {
+ r = DefaultRegistry
+ }
+ r.Register(name, c)
+ return c
+}
+
+// NewTimer constructs a new StandardTimer using an exponentially-decaying
+// sample with the same reservoir size and alpha as UNIX load averages.
+// Be sure to call Stop() once the timer is of no use to allow for garbage collection.
+func NewTimer() Timer {
+ if UseNilMetrics {
+ return NilTimer{}
+ }
+ return &StandardTimer{
+ histogram: NewHistogram(NewExpDecaySample(1028, 0.015)),
+ meter: NewMeter(),
+ }
+}
+
+// NilTimer is a no-op Timer.
+type NilTimer struct {
+ h Histogram
+ m Meter
+}
+
+// Count is a no-op.
+func (NilTimer) Count() int64 { return 0 }
+
+// Max is a no-op.
+func (NilTimer) Max() int64 { return 0 }
+
+// Mean is a no-op.
+func (NilTimer) Mean() float64 { return 0.0 }
+
+// Min is a no-op.
+func (NilTimer) Min() int64 { return 0 }
+
+// Percentile is a no-op.
+func (NilTimer) Percentile(p float64) float64 { return 0.0 }
+
+// Percentiles is a no-op.
+func (NilTimer) Percentiles(ps []float64) []float64 {
+ return make([]float64, len(ps))
+}
+
+// Rate1 is a no-op.
+func (NilTimer) Rate1() float64 { return 0.0 }
+
+// Rate5 is a no-op.
+func (NilTimer) Rate5() float64 { return 0.0 }
+
+// Rate15 is a no-op.
+func (NilTimer) Rate15() float64 { return 0.0 }
+
+// RateMean is a no-op.
+func (NilTimer) RateMean() float64 { return 0.0 }
+
+// Snapshot is a no-op.
+func (NilTimer) Snapshot() Timer { return NilTimer{} }
+
+// StdDev is a no-op.
+func (NilTimer) StdDev() float64 { return 0.0 }
+
+// Stop is a no-op.
+func (NilTimer) Stop() {}
+
+// Sum is a no-op.
+func (NilTimer) Sum() int64 { return 0 }
+
+// Time is a no-op.
+func (NilTimer) Time(func()) {}
+
+// Update is a no-op.
+func (NilTimer) Update(time.Duration) {}
+
+// UpdateSince is a no-op.
+func (NilTimer) UpdateSince(time.Time) {}
+
+// Variance is a no-op.
+func (NilTimer) Variance() float64 { return 0.0 }
+
+// StandardTimer is the standard implementation of a Timer and uses a Histogram
+// and Meter.
+type StandardTimer struct {
+ histogram Histogram
+ meter Meter
+ mutex sync.Mutex
+}
+
+// Count returns the number of events recorded.
+func (t *StandardTimer) Count() int64 {
+ return t.histogram.Count()
+}
+
+// Max returns the maximum value in the sample.
+func (t *StandardTimer) Max() int64 {
+ return t.histogram.Max()
+}
+
+// Mean returns the mean of the values in the sample.
+func (t *StandardTimer) Mean() float64 {
+ return t.histogram.Mean()
+}
+
+// Min returns the minimum value in the sample.
+func (t *StandardTimer) Min() int64 {
+ return t.histogram.Min()
+}
+
+// Percentile returns an arbitrary percentile of the values in the sample.
+func (t *StandardTimer) Percentile(p float64) float64 {
+ return t.histogram.Percentile(p)
+}
+
+// Percentiles returns a slice of arbitrary percentiles of the values in the
+// sample.
+func (t *StandardTimer) Percentiles(ps []float64) []float64 {
+ return t.histogram.Percentiles(ps)
+}
+
+// Rate1 returns the one-minute moving average rate of events per second.
+func (t *StandardTimer) Rate1() float64 {
+ return t.meter.Rate1()
+}
+
+// Rate5 returns the five-minute moving average rate of events per second.
+func (t *StandardTimer) Rate5() float64 {
+ return t.meter.Rate5()
+}
+
+// Rate15 returns the fifteen-minute moving average rate of events per second.
+func (t *StandardTimer) Rate15() float64 {
+ return t.meter.Rate15()
+}
+
+// RateMean returns the meter's mean rate of events per second.
+func (t *StandardTimer) RateMean() float64 {
+ return t.meter.RateMean()
+}
+
+// Snapshot returns a read-only copy of the timer.
+func (t *StandardTimer) Snapshot() Timer {
+ t.mutex.Lock()
+ defer t.mutex.Unlock()
+ return &TimerSnapshot{
+ histogram: t.histogram.Snapshot().(*HistogramSnapshot),
+ meter: t.meter.Snapshot().(*MeterSnapshot),
+ }
+}
+
+// StdDev returns the standard deviation of the values in the sample.
+func (t *StandardTimer) StdDev() float64 {
+ return t.histogram.StdDev()
+}
+
+// Stop stops the meter.
+func (t *StandardTimer) Stop() {
+ t.meter.Stop()
+}
+
+// Sum returns the sum in the sample.
+func (t *StandardTimer) Sum() int64 {
+ return t.histogram.Sum()
+}
+
+// Record the duration of the execution of the given function.
+func (t *StandardTimer) Time(f func()) {
+ ts := time.Now()
+ f()
+ t.Update(time.Since(ts))
+}
+
+// Record the duration of an event.
+func (t *StandardTimer) Update(d time.Duration) {
+ t.mutex.Lock()
+ defer t.mutex.Unlock()
+ t.histogram.Update(int64(d))
+ t.meter.Mark(1)
+}
+
+// Record the duration of an event that started at a time and ends now.
+func (t *StandardTimer) UpdateSince(ts time.Time) {
+ t.mutex.Lock()
+ defer t.mutex.Unlock()
+ t.histogram.Update(int64(time.Since(ts)))
+ t.meter.Mark(1)
+}
+
+// Variance returns the variance of the values in the sample.
+func (t *StandardTimer) Variance() float64 {
+ return t.histogram.Variance()
+}
+
+// TimerSnapshot is a read-only copy of another Timer.
+type TimerSnapshot struct {
+ histogram *HistogramSnapshot
+ meter *MeterSnapshot
+}
+
+// Count returns the number of events recorded at the time the snapshot was
+// taken.
+func (t *TimerSnapshot) Count() int64 { return t.histogram.Count() }
+
+// Max returns the maximum value at the time the snapshot was taken.
+func (t *TimerSnapshot) Max() int64 { return t.histogram.Max() }
+
+// Mean returns the mean value at the time the snapshot was taken.
+func (t *TimerSnapshot) Mean() float64 { return t.histogram.Mean() }
+
+// Min returns the minimum value at the time the snapshot was taken.
+func (t *TimerSnapshot) Min() int64 { return t.histogram.Min() }
+
+// Percentile returns an arbitrary percentile of sampled values at the time the
+// snapshot was taken.
+func (t *TimerSnapshot) Percentile(p float64) float64 {
+ return t.histogram.Percentile(p)
+}
+
+// Percentiles returns a slice of arbitrary percentiles of sampled values at
+// the time the snapshot was taken.
+func (t *TimerSnapshot) Percentiles(ps []float64) []float64 {
+ return t.histogram.Percentiles(ps)
+}
+
+// Rate1 returns the one-minute moving average rate of events per second at the
+// time the snapshot was taken.
+func (t *TimerSnapshot) Rate1() float64 { return t.meter.Rate1() }
+
+// Rate5 returns the five-minute moving average rate of events per second at
+// the time the snapshot was taken.
+func (t *TimerSnapshot) Rate5() float64 { return t.meter.Rate5() }
+
+// Rate15 returns the fifteen-minute moving average rate of events per second
+// at the time the snapshot was taken.
+func (t *TimerSnapshot) Rate15() float64 { return t.meter.Rate15() }
+
+// RateMean returns the meter's mean rate of events per second at the time the
+// snapshot was taken.
+func (t *TimerSnapshot) RateMean() float64 { return t.meter.RateMean() }
+
+// Snapshot returns the snapshot.
+func (t *TimerSnapshot) Snapshot() Timer { return t }
+
+// StdDev returns the standard deviation of the values at the time the snapshot
+// was taken.
+func (t *TimerSnapshot) StdDev() float64 { return t.histogram.StdDev() }
+
+// Stop is a no-op.
+func (t *TimerSnapshot) Stop() {}
+
+// Sum returns the sum at the time the snapshot was taken.
+func (t *TimerSnapshot) Sum() int64 { return t.histogram.Sum() }
+
+// Time panics.
+func (*TimerSnapshot) Time(func()) {
+ panic("Time called on a TimerSnapshot")
+}
+
+// Update panics.
+func (*TimerSnapshot) Update(time.Duration) {
+ panic("Update called on a TimerSnapshot")
+}
+
+// UpdateSince panics.
+func (*TimerSnapshot) UpdateSince(time.Time) {
+ panic("UpdateSince called on a TimerSnapshot")
+}
+
+// Variance returns the variance of the values at the time the snapshot was
+// taken.
+func (t *TimerSnapshot) Variance() float64 { return t.histogram.Variance() }
diff --git a/vendor/github.com/rcrowley/go-metrics/validate.sh b/vendor/github.com/rcrowley/go-metrics/validate.sh
new file mode 100755
index 0000000..c4ae91e
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/validate.sh
@@ -0,0 +1,10 @@
+#!/bin/bash
+
+set -e
+
+# check there are no formatting issues
+GOFMT_LINES=`gofmt -l . | wc -l | xargs`
+test $GOFMT_LINES -eq 0 || echo "gofmt needs to be run, ${GOFMT_LINES} files have issues"
+
+# run the tests for the root package
+go test -race .
diff --git a/vendor/github.com/rcrowley/go-metrics/writer.go b/vendor/github.com/rcrowley/go-metrics/writer.go
new file mode 100644
index 0000000..091e971
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/writer.go
@@ -0,0 +1,100 @@
+package metrics
+
+import (
+ "fmt"
+ "io"
+ "sort"
+ "time"
+)
+
+// Write sorts writes each metric in the given registry periodically to the
+// given io.Writer.
+func Write(r Registry, d time.Duration, w io.Writer) {
+ for _ = range time.Tick(d) {
+ WriteOnce(r, w)
+ }
+}
+
+// WriteOnce sorts and writes metrics in the given registry to the given
+// io.Writer.
+func WriteOnce(r Registry, w io.Writer) {
+ var namedMetrics namedMetricSlice
+ r.Each(func(name string, i interface{}) {
+ namedMetrics = append(namedMetrics, namedMetric{name, i})
+ })
+
+ sort.Sort(namedMetrics)
+ for _, namedMetric := range namedMetrics {
+ switch metric := namedMetric.m.(type) {
+ case Counter:
+ fmt.Fprintf(w, "counter %s\n", namedMetric.name)
+ fmt.Fprintf(w, " count: %9d\n", metric.Count())
+ case Gauge:
+ fmt.Fprintf(w, "gauge %s\n", namedMetric.name)
+ fmt.Fprintf(w, " value: %9d\n", metric.Value())
+ case GaugeFloat64:
+ fmt.Fprintf(w, "gauge %s\n", namedMetric.name)
+ fmt.Fprintf(w, " value: %f\n", metric.Value())
+ case Healthcheck:
+ metric.Check()
+ fmt.Fprintf(w, "healthcheck %s\n", namedMetric.name)
+ fmt.Fprintf(w, " error: %v\n", metric.Error())
+ case Histogram:
+ h := metric.Snapshot()
+ ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
+ fmt.Fprintf(w, "histogram %s\n", namedMetric.name)
+ fmt.Fprintf(w, " count: %9d\n", h.Count())
+ fmt.Fprintf(w, " min: %9d\n", h.Min())
+ fmt.Fprintf(w, " max: %9d\n", h.Max())
+ fmt.Fprintf(w, " mean: %12.2f\n", h.Mean())
+ fmt.Fprintf(w, " stddev: %12.2f\n", h.StdDev())
+ fmt.Fprintf(w, " median: %12.2f\n", ps[0])
+ fmt.Fprintf(w, " 75%%: %12.2f\n", ps[1])
+ fmt.Fprintf(w, " 95%%: %12.2f\n", ps[2])
+ fmt.Fprintf(w, " 99%%: %12.2f\n", ps[3])
+ fmt.Fprintf(w, " 99.9%%: %12.2f\n", ps[4])
+ case Meter:
+ m := metric.Snapshot()
+ fmt.Fprintf(w, "meter %s\n", namedMetric.name)
+ fmt.Fprintf(w, " count: %9d\n", m.Count())
+ fmt.Fprintf(w, " 1-min rate: %12.2f\n", m.Rate1())
+ fmt.Fprintf(w, " 5-min rate: %12.2f\n", m.Rate5())
+ fmt.Fprintf(w, " 15-min rate: %12.2f\n", m.Rate15())
+ fmt.Fprintf(w, " mean rate: %12.2f\n", m.RateMean())
+ case Timer:
+ t := metric.Snapshot()
+ ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
+ fmt.Fprintf(w, "timer %s\n", namedMetric.name)
+ fmt.Fprintf(w, " count: %9d\n", t.Count())
+ fmt.Fprintf(w, " min: %9d\n", t.Min())
+ fmt.Fprintf(w, " max: %9d\n", t.Max())
+ fmt.Fprintf(w, " mean: %12.2f\n", t.Mean())
+ fmt.Fprintf(w, " stddev: %12.2f\n", t.StdDev())
+ fmt.Fprintf(w, " median: %12.2f\n", ps[0])
+ fmt.Fprintf(w, " 75%%: %12.2f\n", ps[1])
+ fmt.Fprintf(w, " 95%%: %12.2f\n", ps[2])
+ fmt.Fprintf(w, " 99%%: %12.2f\n", ps[3])
+ fmt.Fprintf(w, " 99.9%%: %12.2f\n", ps[4])
+ fmt.Fprintf(w, " 1-min rate: %12.2f\n", t.Rate1())
+ fmt.Fprintf(w, " 5-min rate: %12.2f\n", t.Rate5())
+ fmt.Fprintf(w, " 15-min rate: %12.2f\n", t.Rate15())
+ fmt.Fprintf(w, " mean rate: %12.2f\n", t.RateMean())
+ }
+ }
+}
+
+type namedMetric struct {
+ name string
+ m interface{}
+}
+
+// namedMetricSlice is a slice of namedMetrics that implements sort.Interface.
+type namedMetricSlice []namedMetric
+
+func (nms namedMetricSlice) Len() int { return len(nms) }
+
+func (nms namedMetricSlice) Swap(i, j int) { nms[i], nms[j] = nms[j], nms[i] }
+
+func (nms namedMetricSlice) Less(i, j int) bool {
+ return nms[i].name < nms[j].name
+}
diff --git a/vendor/github.com/sirupsen/logrus/.gitignore b/vendor/github.com/sirupsen/logrus/.gitignore
new file mode 100644
index 0000000..66be63a
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/.gitignore
@@ -0,0 +1 @@
+logrus
diff --git a/vendor/github.com/sirupsen/logrus/.travis.yml b/vendor/github.com/sirupsen/logrus/.travis.yml
new file mode 100644
index 0000000..a23296a
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/.travis.yml
@@ -0,0 +1,15 @@
+language: go
+go:
+ - 1.6.x
+ - 1.7.x
+ - 1.8.x
+ - tip
+env:
+ - GOMAXPROCS=4 GORACE=halt_on_error=1
+install:
+ - go get github.com/stretchr/testify/assert
+ - go get gopkg.in/gemnasium/logrus-airbrake-hook.v2
+ - go get golang.org/x/sys/unix
+ - go get golang.org/x/sys/windows
+script:
+ - go test -race -v ./...
diff --git a/vendor/github.com/sirupsen/logrus/CHANGELOG.md b/vendor/github.com/sirupsen/logrus/CHANGELOG.md
new file mode 100644
index 0000000..cc58f64
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/CHANGELOG.md
@@ -0,0 +1,118 @@
+# 1.0.4
+
+* Fix race when adding hooks (#612)
+* Fix terminal check in AppEngine (#635)
+
+# 1.0.3
+
+* Replace example files with testable examples
+
+# 1.0.2
+
+* bug: quote non-string values in text formatter (#583)
+* Make (*Logger) SetLevel a public method
+
+# 1.0.1
+
+* bug: fix escaping in text formatter (#575)
+
+# 1.0.0
+
+* Officially changed name to lower-case
+* bug: colors on Windows 10 (#541)
+* bug: fix race in accessing level (#512)
+
+# 0.11.5
+
+* feature: add writer and writerlevel to entry (#372)
+
+# 0.11.4
+
+* bug: fix undefined variable on solaris (#493)
+
+# 0.11.3
+
+* formatter: configure quoting of empty values (#484)
+* formatter: configure quoting character (default is `"`) (#484)
+* bug: fix not importing io correctly in non-linux environments (#481)
+
+# 0.11.2
+
+* bug: fix windows terminal detection (#476)
+
+# 0.11.1
+
+* bug: fix tty detection with custom out (#471)
+
+# 0.11.0
+
+* performance: Use bufferpool to allocate (#370)
+* terminal: terminal detection for app-engine (#343)
+* feature: exit handler (#375)
+
+# 0.10.0
+
+* feature: Add a test hook (#180)
+* feature: `ParseLevel` is now case-insensitive (#326)
+* feature: `FieldLogger` interface that generalizes `Logger` and `Entry` (#308)
+* performance: avoid re-allocations on `WithFields` (#335)
+
+# 0.9.0
+
+* logrus/text_formatter: don't emit empty msg
+* logrus/hooks/airbrake: move out of main repository
+* logrus/hooks/sentry: move out of main repository
+* logrus/hooks/papertrail: move out of main repository
+* logrus/hooks/bugsnag: move out of main repository
+* logrus/core: run tests with `-race`
+* logrus/core: detect TTY based on `stderr`
+* logrus/core: support `WithError` on logger
+* logrus/core: Solaris support
+
+# 0.8.7
+
+* logrus/core: fix possible race (#216)
+* logrus/doc: small typo fixes and doc improvements
+
+
+# 0.8.6
+
+* hooks/raven: allow passing an initialized client
+
+# 0.8.5
+
+* logrus/core: revert #208
+
+# 0.8.4
+
+* formatter/text: fix data race (#218)
+
+# 0.8.3
+
+* logrus/core: fix entry log level (#208)
+* logrus/core: improve performance of text formatter by 40%
+* logrus/core: expose `LevelHooks` type
+* logrus/core: add support for DragonflyBSD and NetBSD
+* formatter/text: print structs more verbosely
+
+# 0.8.2
+
+* logrus: fix more Fatal family functions
+
+# 0.8.1
+
+* logrus: fix not exiting on `Fatalf` and `Fatalln`
+
+# 0.8.0
+
+* logrus: defaults to stderr instead of stdout
+* hooks/sentry: add special field for `*http.Request`
+* formatter/text: ignore Windows for colors
+
+# 0.7.3
+
+* formatter/\*: allow configuration of timestamp layout
+
+# 0.7.2
+
+* formatter/text: Add configuration option for time format (#158)
diff --git a/vendor/github.com/sirupsen/logrus/LICENSE b/vendor/github.com/sirupsen/logrus/LICENSE
new file mode 100644
index 0000000..f090cb4
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Simon Eskildsen
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/github.com/sirupsen/logrus/README.md b/vendor/github.com/sirupsen/logrus/README.md
new file mode 100644
index 0000000..08584b5
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/README.md
@@ -0,0 +1,509 @@
+# Logrus
[](https://travis-ci.org/sirupsen/logrus) [](https://godoc.org/github.com/sirupsen/logrus)
+
+Logrus is a structured logger for Go (golang), completely API compatible with
+the standard library logger.
+
+**Seeing weird case-sensitive problems?** It's in the past been possible to
+import Logrus as both upper- and lower-case. Due to the Go package environment,
+this caused issues in the community and we needed a standard. Some environments
+experienced problems with the upper-case variant, so the lower-case was decided.
+Everything using `logrus` will need to use the lower-case:
+`github.com/sirupsen/logrus`. Any package that isn't, should be changed.
+
+To fix Glide, see [these
+comments](https://github.com/sirupsen/logrus/issues/553#issuecomment-306591437).
+For an in-depth explanation of the casing issue, see [this
+comment](https://github.com/sirupsen/logrus/issues/570#issuecomment-313933276).
+
+**Are you interested in assisting in maintaining Logrus?** Currently I have a
+lot of obligations, and I am unable to provide Logrus with the maintainership it
+needs. If you'd like to help, please reach out to me at `simon at author's
+username dot com`.
+
+Nicely color-coded in development (when a TTY is attached, otherwise just
+plain text):
+
+
+
+With `log.SetFormatter(&log.JSONFormatter{})`, for easy parsing by logstash
+or Splunk:
+
+```json
+{"animal":"walrus","level":"info","msg":"A group of walrus emerges from the
+ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"}
+
+{"level":"warning","msg":"The group's number increased tremendously!",
+"number":122,"omg":true,"time":"2014-03-10 19:57:38.562471297 -0400 EDT"}
+
+{"animal":"walrus","level":"info","msg":"A giant walrus appears!",
+"size":10,"time":"2014-03-10 19:57:38.562500591 -0400 EDT"}
+
+{"animal":"walrus","level":"info","msg":"Tremendously sized cow enters the ocean.",
+"size":9,"time":"2014-03-10 19:57:38.562527896 -0400 EDT"}
+
+{"level":"fatal","msg":"The ice breaks!","number":100,"omg":true,
+"time":"2014-03-10 19:57:38.562543128 -0400 EDT"}
+```
+
+With the default `log.SetFormatter(&log.TextFormatter{})` when a TTY is not
+attached, the output is compatible with the
+[logfmt](http://godoc.org/github.com/kr/logfmt) format:
+
+```text
+time="2015-03-26T01:27:38-04:00" level=debug msg="Started observing beach" animal=walrus number=8
+time="2015-03-26T01:27:38-04:00" level=info msg="A group of walrus emerges from the ocean" animal=walrus size=10
+time="2015-03-26T01:27:38-04:00" level=warning msg="The group's number increased tremendously!" number=122 omg=true
+time="2015-03-26T01:27:38-04:00" level=debug msg="Temperature changes" temperature=-4
+time="2015-03-26T01:27:38-04:00" level=panic msg="It's over 9000!" animal=orca size=9009
+time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x2082280c0 map[animal:orca size:9009] 2015-03-26 01:27:38.441574009 -0400 EDT panic It's over 9000!} number=100 omg=true
+exit status 1
+```
+
+#### Case-sensitivity
+
+The organization's name was changed to lower-case--and this will not be changed
+back. If you are getting import conflicts due to case sensitivity, please use
+the lower-case import: `github.com/sirupsen/logrus`.
+
+#### Example
+
+The simplest way to use Logrus is simply the package-level exported logger:
+
+```go
+package main
+
+import (
+ log "github.com/sirupsen/logrus"
+)
+
+func main() {
+ log.WithFields(log.Fields{
+ "animal": "walrus",
+ }).Info("A walrus appears")
+}
+```
+
+Note that it's completely api-compatible with the stdlib logger, so you can
+replace your `log` imports everywhere with `log "github.com/sirupsen/logrus"`
+and you'll now have the flexibility of Logrus. You can customize it all you
+want:
+
+```go
+package main
+
+import (
+ "os"
+ log "github.com/sirupsen/logrus"
+)
+
+func init() {
+ // Log as JSON instead of the default ASCII formatter.
+ log.SetFormatter(&log.JSONFormatter{})
+
+ // Output to stdout instead of the default stderr
+ // Can be any io.Writer, see below for File example
+ log.SetOutput(os.Stdout)
+
+ // Only log the warning severity or above.
+ log.SetLevel(log.WarnLevel)
+}
+
+func main() {
+ log.WithFields(log.Fields{
+ "animal": "walrus",
+ "size": 10,
+ }).Info("A group of walrus emerges from the ocean")
+
+ log.WithFields(log.Fields{
+ "omg": true,
+ "number": 122,
+ }).Warn("The group's number increased tremendously!")
+
+ log.WithFields(log.Fields{
+ "omg": true,
+ "number": 100,
+ }).Fatal("The ice breaks!")
+
+ // A common pattern is to re-use fields between logging statements by re-using
+ // the logrus.Entry returned from WithFields()
+ contextLogger := log.WithFields(log.Fields{
+ "common": "this is a common field",
+ "other": "I also should be logged always",
+ })
+
+ contextLogger.Info("I'll be logged with common and other field")
+ contextLogger.Info("Me too")
+}
+```
+
+For more advanced usage such as logging to multiple locations from the same
+application, you can also create an instance of the `logrus` Logger:
+
+```go
+package main
+
+import (
+ "os"
+ "github.com/sirupsen/logrus"
+)
+
+// Create a new instance of the logger. You can have any number of instances.
+var log = logrus.New()
+
+func main() {
+ // The API for setting attributes is a little different than the package level
+ // exported logger. See Godoc.
+ log.Out = os.Stdout
+
+ // You could set this to any `io.Writer` such as a file
+ // file, err := os.OpenFile("logrus.log", os.O_CREATE|os.O_WRONLY, 0666)
+ // if err == nil {
+ // log.Out = file
+ // } else {
+ // log.Info("Failed to log to file, using default stderr")
+ // }
+
+ log.WithFields(logrus.Fields{
+ "animal": "walrus",
+ "size": 10,
+ }).Info("A group of walrus emerges from the ocean")
+}
+```
+
+#### Fields
+
+Logrus encourages careful, structured logging through logging fields instead of
+long, unparseable error messages. For example, instead of: `log.Fatalf("Failed
+to send event %s to topic %s with key %d")`, you should log the much more
+discoverable:
+
+```go
+log.WithFields(log.Fields{
+ "event": event,
+ "topic": topic,
+ "key": key,
+}).Fatal("Failed to send event")
+```
+
+We've found this API forces you to think about logging in a way that produces
+much more useful logging messages. We've been in countless situations where just
+a single added field to a log statement that was already there would've saved us
+hours. The `WithFields` call is optional.
+
+In general, with Logrus using any of the `printf`-family functions should be
+seen as a hint you should add a field, however, you can still use the
+`printf`-family functions with Logrus.
+
+#### Default Fields
+
+Often it's helpful to have fields _always_ attached to log statements in an
+application or parts of one. For example, you may want to always log the
+`request_id` and `user_ip` in the context of a request. Instead of writing
+`log.WithFields(log.Fields{"request_id": request_id, "user_ip": user_ip})` on
+every line, you can create a `logrus.Entry` to pass around instead:
+
+```go
+requestLogger := log.WithFields(log.Fields{"request_id": request_id, "user_ip": user_ip})
+requestLogger.Info("something happened on that request") # will log request_id and user_ip
+requestLogger.Warn("something not great happened")
+```
+
+#### Hooks
+
+You can add hooks for logging levels. For example to send errors to an exception
+tracking service on `Error`, `Fatal` and `Panic`, info to StatsD or log to
+multiple places simultaneously, e.g. syslog.
+
+Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in
+`init`:
+
+```go
+import (
+ log "github.com/sirupsen/logrus"
+ "gopkg.in/gemnasium/logrus-airbrake-hook.v2" // the package is named "aibrake"
+ logrus_syslog "github.com/sirupsen/logrus/hooks/syslog"
+ "log/syslog"
+)
+
+func init() {
+
+ // Use the Airbrake hook to report errors that have Error severity or above to
+ // an exception tracker. You can create custom hooks, see the Hooks section.
+ log.AddHook(airbrake.NewHook(123, "xyz", "production"))
+
+ hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "")
+ if err != nil {
+ log.Error("Unable to connect to local syslog daemon")
+ } else {
+ log.AddHook(hook)
+ }
+}
+```
+Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). For the detail, please check the [syslog hook README](hooks/syslog/README.md).
+
+| Hook | Description |
+| ----- | ----------- |
+| [Airbrake "legacy"](https://github.com/gemnasium/logrus-airbrake-legacy-hook) | Send errors to an exception tracking service compatible with the Airbrake API V2. Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes. |
+| [Airbrake](https://github.com/gemnasium/logrus-airbrake-hook) | Send errors to the Airbrake API V3. Uses the official [`gobrake`](https://github.com/airbrake/gobrake) behind the scenes. |
+| [Amazon Kinesis](https://github.com/evalphobia/logrus_kinesis) | Hook for logging to [Amazon Kinesis](https://aws.amazon.com/kinesis/) |
+| [Amqp-Hook](https://github.com/vladoatanasov/logrus_amqp) | Hook for logging to Amqp broker (Like RabbitMQ) |
+| [AzureTableHook](https://github.com/kpfaulkner/azuretablehook/) | Hook for logging to Azure Table Storage|
+| [Bugsnag](https://github.com/Shopify/logrus-bugsnag/blob/master/bugsnag.go) | Send errors to the Bugsnag exception tracking service. |
+| [DeferPanic](https://github.com/deferpanic/dp-logrus) | Hook for logging to DeferPanic |
+| [Discordrus](https://github.com/kz/discordrus) | Hook for logging to [Discord](https://discordapp.com/) |
+| [ElasticSearch](https://github.com/sohlich/elogrus) | Hook for logging to ElasticSearch|
+| [Firehose](https://github.com/beaubrewer/logrus_firehose) | Hook for logging to [Amazon Firehose](https://aws.amazon.com/kinesis/firehose/)
+| [Fluentd](https://github.com/evalphobia/logrus_fluent) | Hook for logging to fluentd |
+| [Go-Slack](https://github.com/multiplay/go-slack) | Hook for logging to [Slack](https://slack.com) |
+| [Graylog](https://github.com/gemnasium/logrus-graylog-hook) | Hook for logging to [Graylog](http://graylog2.org/) |
+| [Hiprus](https://github.com/nubo/hiprus) | Send errors to a channel in hipchat. |
+| [Honeybadger](https://github.com/agonzalezro/logrus_honeybadger) | Hook for sending exceptions to Honeybadger |
+| [InfluxDB](https://github.com/Abramovic/logrus_influxdb) | Hook for logging to influxdb |
+| [Influxus](http://github.com/vlad-doru/influxus) | Hook for concurrently logging to [InfluxDB](http://influxdata.com/) |
+| [Journalhook](https://github.com/wercker/journalhook) | Hook for logging to `systemd-journald` |
+| [KafkaLogrus](https://github.com/tracer0tong/kafkalogrus) | Hook for logging to Kafka |
+| [LFShook](https://github.com/rifflock/lfshook) | Hook for logging to the local filesystem |
+| [Logbeat](https://github.com/macandmia/logbeat) | Hook for logging to [Opbeat](https://opbeat.com/) |
+| [Logentries](https://github.com/jcftang/logentriesrus) | Hook for logging to [Logentries](https://logentries.com/) |
+| [Logentrus](https://github.com/puddingfactory/logentrus) | Hook for logging to [Logentries](https://logentries.com/) |
+| [Logmatic.io](https://github.com/logmatic/logmatic-go) | Hook for logging to [Logmatic.io](http://logmatic.io/) |
+| [Logrusly](https://github.com/sebest/logrusly) | Send logs to [Loggly](https://www.loggly.com/) |
+| [Logstash](https://github.com/bshuster-repo/logrus-logstash-hook) | Hook for logging to [Logstash](https://www.elastic.co/products/logstash) |
+| [Mail](https://github.com/zbindenren/logrus_mail) | Hook for sending exceptions via mail |
+| [Mattermost](https://github.com/shuLhan/mattermost-integration/tree/master/hooks/logrus) | Hook for logging to [Mattermost](https://mattermost.com/) |
+| [Mongodb](https://github.com/weekface/mgorus) | Hook for logging to mongodb |
+| [NATS-Hook](https://github.com/rybit/nats_logrus_hook) | Hook for logging to [NATS](https://nats.io) |
+| [Octokit](https://github.com/dorajistyle/logrus-octokit-hook) | Hook for logging to github via octokit |
+| [Papertrail](https://github.com/polds/logrus-papertrail-hook) | Send errors to the [Papertrail](https://papertrailapp.com) hosted logging service via UDP. |
+| [PostgreSQL](https://github.com/gemnasium/logrus-postgresql-hook) | Send logs to [PostgreSQL](http://postgresql.org) |
+| [Promrus](https://github.com/weaveworks/promrus) | Expose number of log messages as [Prometheus](https://prometheus.io/) metrics |
+| [Pushover](https://github.com/toorop/logrus_pushover) | Send error via [Pushover](https://pushover.net) |
+| [Raygun](https://github.com/squirkle/logrus-raygun-hook) | Hook for logging to [Raygun.io](http://raygun.io/) |
+| [Redis-Hook](https://github.com/rogierlommers/logrus-redis-hook) | Hook for logging to a ELK stack (through Redis) |
+| [Rollrus](https://github.com/heroku/rollrus) | Hook for sending errors to rollbar |
+| [Scribe](https://github.com/sagar8192/logrus-scribe-hook) | Hook for logging to [Scribe](https://github.com/facebookarchive/scribe)|
+| [Sentry](https://github.com/evalphobia/logrus_sentry) | Send errors to the Sentry error logging and aggregation service. |
+| [Slackrus](https://github.com/johntdyer/slackrus) | Hook for Slack chat. |
+| [Stackdriver](https://github.com/knq/sdhook) | Hook for logging to [Google Stackdriver](https://cloud.google.com/logging/) |
+| [Sumorus](https://github.com/doublefree/sumorus) | Hook for logging to [SumoLogic](https://www.sumologic.com/)|
+| [Syslog](https://github.com/sirupsen/logrus/blob/master/hooks/syslog/syslog.go) | Send errors to remote syslog server. Uses standard library `log/syslog` behind the scenes. |
+| [Syslog TLS](https://github.com/shinji62/logrus-syslog-ng) | Send errors to remote syslog server with TLS support. |
+| [Telegram](https://github.com/rossmcdonald/telegram_hook) | Hook for logging errors to [Telegram](https://telegram.org/) |
+| [TraceView](https://github.com/evalphobia/logrus_appneta) | Hook for logging to [AppNeta TraceView](https://www.appneta.com/products/traceview/) |
+| [Typetalk](https://github.com/dragon3/logrus-typetalk-hook) | Hook for logging to [Typetalk](https://www.typetalk.in/) |
+| [logz.io](https://github.com/ripcurld00d/logrus-logzio-hook) | Hook for logging to [logz.io](https://logz.io), a Log as a Service using Logstash |
+| [SQS-Hook](https://github.com/tsarpaul/logrus_sqs) | Hook for logging to [Amazon Simple Queue Service (SQS)](https://aws.amazon.com/sqs/) |
+
+#### Level logging
+
+Logrus has six logging levels: Debug, Info, Warning, Error, Fatal and Panic.
+
+```go
+log.Debug("Useful debugging information.")
+log.Info("Something noteworthy happened!")
+log.Warn("You should probably take a look at this.")
+log.Error("Something failed but I'm not quitting.")
+// Calls os.Exit(1) after logging
+log.Fatal("Bye.")
+// Calls panic() after logging
+log.Panic("I'm bailing.")
+```
+
+You can set the logging level on a `Logger`, then it will only log entries with
+that severity or anything above it:
+
+```go
+// Will log anything that is info or above (warn, error, fatal, panic). Default.
+log.SetLevel(log.InfoLevel)
+```
+
+It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose
+environment if your application has that.
+
+#### Entries
+
+Besides the fields added with `WithField` or `WithFields` some fields are
+automatically added to all logging events:
+
+1. `time`. The timestamp when the entry was created.
+2. `msg`. The logging message passed to `{Info,Warn,Error,Fatal,Panic}` after
+ the `AddFields` call. E.g. `Failed to send event.`
+3. `level`. The logging level. E.g. `info`.
+
+#### Environments
+
+Logrus has no notion of environment.
+
+If you wish for hooks and formatters to only be used in specific environments,
+you should handle that yourself. For example, if your application has a global
+variable `Environment`, which is a string representation of the environment you
+could do:
+
+```go
+import (
+ log "github.com/sirupsen/logrus"
+)
+
+init() {
+ // do something here to set environment depending on an environment variable
+ // or command-line flag
+ if Environment == "production" {
+ log.SetFormatter(&log.JSONFormatter{})
+ } else {
+ // The TextFormatter is default, you don't actually have to do this.
+ log.SetFormatter(&log.TextFormatter{})
+ }
+}
+```
+
+This configuration is how `logrus` was intended to be used, but JSON in
+production is mostly only useful if you do log aggregation with tools like
+Splunk or Logstash.
+
+#### Formatters
+
+The built-in logging formatters are:
+
+* `logrus.TextFormatter`. Logs the event in colors if stdout is a tty, otherwise
+ without colors.
+ * *Note:* to force colored output when there is no TTY, set the `ForceColors`
+ field to `true`. To force no colored output even if there is a TTY set the
+ `DisableColors` field to `true`. For Windows, see
+ [github.com/mattn/go-colorable](https://github.com/mattn/go-colorable).
+ * All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#TextFormatter).
+* `logrus.JSONFormatter`. Logs fields as JSON.
+ * All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#JSONFormatter).
+
+Third party logging formatters:
+
+* [`FluentdFormatter`](https://github.com/joonix/log). Formats entries that can be parsed by Kubernetes and Google Container Engine.
+* [`logstash`](https://github.com/bshuster-repo/logrus-logstash-hook). Logs fields as [Logstash](http://logstash.net) Events.
+* [`prefixed`](https://github.com/x-cray/logrus-prefixed-formatter). Displays log entry source along with alternative layout.
+* [`zalgo`](https://github.com/aybabtme/logzalgo). Invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦.
+
+You can define your formatter by implementing the `Formatter` interface,
+requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a
+`Fields` type (`map[string]interface{}`) with all your fields as well as the
+default ones (see Entries section above):
+
+```go
+type MyJSONFormatter struct {
+}
+
+log.SetFormatter(new(MyJSONFormatter))
+
+func (f *MyJSONFormatter) Format(entry *Entry) ([]byte, error) {
+ // Note this doesn't include Time, Level and Message which are available on
+ // the Entry. Consult `godoc` on information about those fields or read the
+ // source of the official loggers.
+ serialized, err := json.Marshal(entry.Data)
+ if err != nil {
+ return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
+ }
+ return append(serialized, '\n'), nil
+}
+```
+
+#### Logger as an `io.Writer`
+
+Logrus can be transformed into an `io.Writer`. That writer is the end of an `io.Pipe` and it is your responsibility to close it.
+
+```go
+w := logger.Writer()
+defer w.Close()
+
+srv := http.Server{
+ // create a stdlib log.Logger that writes to
+ // logrus.Logger.
+ ErrorLog: log.New(w, "", 0),
+}
+```
+
+Each line written to that writer will be printed the usual way, using formatters
+and hooks. The level for those entries is `info`.
+
+This means that we can override the standard library logger easily:
+
+```go
+logger := logrus.New()
+logger.Formatter = &logrus.JSONFormatter{}
+
+// Use logrus for standard log output
+// Note that `log` here references stdlib's log
+// Not logrus imported under the name `log`.
+log.SetOutput(logger.Writer())
+```
+
+#### Rotation
+
+Log rotation is not provided with Logrus. Log rotation should be done by an
+external program (like `logrotate(8)`) that can compress and delete old log
+entries. It should not be a feature of the application-level logger.
+
+#### Tools
+
+| Tool | Description |
+| ---- | ----------- |
+|[Logrus Mate](https://github.com/gogap/logrus_mate)|Logrus mate is a tool for Logrus to manage loggers, you can initial logger's level, hook and formatter by config file, the logger will generated with different config at different environment.|
+|[Logrus Viper Helper](https://github.com/heirko/go-contrib/tree/master/logrusHelper)|An Helper around Logrus to wrap with spf13/Viper to load configuration with fangs! And to simplify Logrus configuration use some behavior of [Logrus Mate](https://github.com/gogap/logrus_mate). [sample](https://github.com/heirko/iris-contrib/blob/master/middleware/logrus-logger/example) |
+
+#### Testing
+
+Logrus has a built in facility for asserting the presence of log messages. This is implemented through the `test` hook and provides:
+
+* decorators for existing logger (`test.NewLocal` and `test.NewGlobal`) which basically just add the `test` hook
+* a test logger (`test.NewNullLogger`) that just records log messages (and does not output any):
+
+```go
+import(
+ "github.com/sirupsen/logrus"
+ "github.com/sirupsen/logrus/hooks/test"
+ "github.com/stretchr/testify/assert"
+ "testing"
+)
+
+func TestSomething(t*testing.T){
+ logger, hook := test.NewNullLogger()
+ logger.Error("Helloerror")
+
+ assert.Equal(t, 1, len(hook.Entries))
+ assert.Equal(t, logrus.ErrorLevel, hook.LastEntry().Level)
+ assert.Equal(t, "Helloerror", hook.LastEntry().Message)
+
+ hook.Reset()
+ assert.Nil(t, hook.LastEntry())
+}
+```
+
+#### Fatal handlers
+
+Logrus can register one or more functions that will be called when any `fatal`
+level message is logged. The registered handlers will be executed before
+logrus performs a `os.Exit(1)`. This behavior may be helpful if callers need
+to gracefully shutdown. Unlike a `panic("Something went wrong...")` call which can be intercepted with a deferred `recover` a call to `os.Exit(1)` can not be intercepted.
+
+```
+...
+handler := func() {
+ // gracefully shutdown something...
+}
+logrus.RegisterExitHandler(handler)
+...
+```
+
+#### Thread safety
+
+By default Logger is protected by mutex for concurrent writes, this mutex is invoked when calling hooks and writing logs.
+If you are sure such locking is not needed, you can call logger.SetNoLock() to disable the locking.
+
+Situation when locking is not needed includes:
+
+* You have no hooks registered, or hooks calling is already thread-safe.
+
+* Writing to logger.Out is already thread-safe, for example:
+
+ 1) logger.Out is protected by locks.
+
+ 2) logger.Out is a os.File handler opened with `O_APPEND` flag, and every write is smaller than 4k. (This allow multi-thread/multi-process writing)
+
+ (Refer to http://www.notthewizard.com/2014/06/17/are-files-appends-really-atomic/)
diff --git a/vendor/github.com/sirupsen/logrus/alt_exit.go b/vendor/github.com/sirupsen/logrus/alt_exit.go
new file mode 100644
index 0000000..8af9063
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/alt_exit.go
@@ -0,0 +1,64 @@
+package logrus
+
+// The following code was sourced and modified from the
+// https://github.com/tebeka/atexit package governed by the following license:
+//
+// Copyright (c) 2012 Miki Tebeka .
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal in
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+// the Software, and to permit persons to whom the Software is furnished to do so,
+// subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+import (
+ "fmt"
+ "os"
+)
+
+var handlers = []func(){}
+
+func runHandler(handler func()) {
+ defer func() {
+ if err := recover(); err != nil {
+ fmt.Fprintln(os.Stderr, "Error: Logrus exit handler error:", err)
+ }
+ }()
+
+ handler()
+}
+
+func runHandlers() {
+ for _, handler := range handlers {
+ runHandler(handler)
+ }
+}
+
+// Exit runs all the Logrus atexit handlers and then terminates the program using os.Exit(code)
+func Exit(code int) {
+ runHandlers()
+ os.Exit(code)
+}
+
+// RegisterExitHandler adds a Logrus Exit handler, call logrus.Exit to invoke
+// all handlers. The handlers will also be invoked when any Fatal log entry is
+// made.
+//
+// This method is useful when a caller wishes to use logrus to log a fatal
+// message but also needs to gracefully shutdown. An example usecase could be
+// closing database connections, or sending a alert that the application is
+// closing.
+func RegisterExitHandler(handler func()) {
+ handlers = append(handlers, handler)
+}
diff --git a/vendor/github.com/sirupsen/logrus/appveyor.yml b/vendor/github.com/sirupsen/logrus/appveyor.yml
new file mode 100644
index 0000000..96c2ce1
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/appveyor.yml
@@ -0,0 +1,14 @@
+version: "{build}"
+platform: x64
+clone_folder: c:\gopath\src\github.com\sirupsen\logrus
+environment:
+ GOPATH: c:\gopath
+branches:
+ only:
+ - master
+install:
+ - set PATH=%GOPATH%\bin;c:\go\bin;%PATH%
+ - go version
+build_script:
+ - go get -t
+ - go test
diff --git a/vendor/github.com/sirupsen/logrus/doc.go b/vendor/github.com/sirupsen/logrus/doc.go
new file mode 100644
index 0000000..da67aba
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/doc.go
@@ -0,0 +1,26 @@
+/*
+Package logrus is a structured logger for Go, completely API compatible with the standard library logger.
+
+
+The simplest way to use Logrus is simply the package-level exported logger:
+
+ package main
+
+ import (
+ log "github.com/sirupsen/logrus"
+ )
+
+ func main() {
+ log.WithFields(log.Fields{
+ "animal": "walrus",
+ "number": 1,
+ "size": 10,
+ }).Info("A walrus appears")
+ }
+
+Output:
+ time="2015-09-07T08:48:33Z" level=info msg="A walrus appears" animal=walrus number=1 size=10
+
+For a full guide visit https://github.com/sirupsen/logrus
+*/
+package logrus
diff --git a/vendor/github.com/sirupsen/logrus/entry.go b/vendor/github.com/sirupsen/logrus/entry.go
new file mode 100644
index 0000000..1fad45e
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/entry.go
@@ -0,0 +1,279 @@
+package logrus
+
+import (
+ "bytes"
+ "fmt"
+ "os"
+ "sync"
+ "time"
+)
+
+var bufferPool *sync.Pool
+
+func init() {
+ bufferPool = &sync.Pool{
+ New: func() interface{} {
+ return new(bytes.Buffer)
+ },
+ }
+}
+
+// Defines the key when adding errors using WithError.
+var ErrorKey = "error"
+
+// An entry is the final or intermediate Logrus logging entry. It contains all
+// the fields passed with WithField{,s}. It's finally logged when Debug, Info,
+// Warn, Error, Fatal or Panic is called on it. These objects can be reused and
+// passed around as much as you wish to avoid field duplication.
+type Entry struct {
+ Logger *Logger
+
+ // Contains all the fields set by the user.
+ Data Fields
+
+ // Time at which the log entry was created
+ Time time.Time
+
+ // Level the log entry was logged at: Debug, Info, Warn, Error, Fatal or Panic
+ // This field will be set on entry firing and the value will be equal to the one in Logger struct field.
+ Level Level
+
+ // Message passed to Debug, Info, Warn, Error, Fatal or Panic
+ Message string
+
+ // When formatter is called in entry.log(), an Buffer may be set to entry
+ Buffer *bytes.Buffer
+}
+
+func NewEntry(logger *Logger) *Entry {
+ return &Entry{
+ Logger: logger,
+ // Default is three fields, give a little extra room
+ Data: make(Fields, 5),
+ }
+}
+
+// Returns the string representation from the reader and ultimately the
+// formatter.
+func (entry *Entry) String() (string, error) {
+ serialized, err := entry.Logger.Formatter.Format(entry)
+ if err != nil {
+ return "", err
+ }
+ str := string(serialized)
+ return str, nil
+}
+
+// Add an error as single field (using the key defined in ErrorKey) to the Entry.
+func (entry *Entry) WithError(err error) *Entry {
+ return entry.WithField(ErrorKey, err)
+}
+
+// Add a single field to the Entry.
+func (entry *Entry) WithField(key string, value interface{}) *Entry {
+ return entry.WithFields(Fields{key: value})
+}
+
+// Add a map of fields to the Entry.
+func (entry *Entry) WithFields(fields Fields) *Entry {
+ data := make(Fields, len(entry.Data)+len(fields))
+ for k, v := range entry.Data {
+ data[k] = v
+ }
+ for k, v := range fields {
+ data[k] = v
+ }
+ return &Entry{Logger: entry.Logger, Data: data}
+}
+
+// This function is not declared with a pointer value because otherwise
+// race conditions will occur when using multiple goroutines
+func (entry Entry) log(level Level, msg string) {
+ var buffer *bytes.Buffer
+ entry.Time = time.Now()
+ entry.Level = level
+ entry.Message = msg
+
+ entry.Logger.mu.Lock()
+ err := entry.Logger.Hooks.Fire(level, &entry)
+ entry.Logger.mu.Unlock()
+ if err != nil {
+ entry.Logger.mu.Lock()
+ fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err)
+ entry.Logger.mu.Unlock()
+ }
+ buffer = bufferPool.Get().(*bytes.Buffer)
+ buffer.Reset()
+ defer bufferPool.Put(buffer)
+ entry.Buffer = buffer
+ serialized, err := entry.Logger.Formatter.Format(&entry)
+ entry.Buffer = nil
+ if err != nil {
+ entry.Logger.mu.Lock()
+ fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err)
+ entry.Logger.mu.Unlock()
+ } else {
+ entry.Logger.mu.Lock()
+ _, err = entry.Logger.Out.Write(serialized)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err)
+ }
+ entry.Logger.mu.Unlock()
+ }
+
+ // To avoid Entry#log() returning a value that only would make sense for
+ // panic() to use in Entry#Panic(), we avoid the allocation by checking
+ // directly here.
+ if level <= PanicLevel {
+ panic(&entry)
+ }
+}
+
+func (entry *Entry) Debug(args ...interface{}) {
+ if entry.Logger.level() >= DebugLevel {
+ entry.log(DebugLevel, fmt.Sprint(args...))
+ }
+}
+
+func (entry *Entry) Print(args ...interface{}) {
+ entry.Info(args...)
+}
+
+func (entry *Entry) Info(args ...interface{}) {
+ if entry.Logger.level() >= InfoLevel {
+ entry.log(InfoLevel, fmt.Sprint(args...))
+ }
+}
+
+func (entry *Entry) Warn(args ...interface{}) {
+ if entry.Logger.level() >= WarnLevel {
+ entry.log(WarnLevel, fmt.Sprint(args...))
+ }
+}
+
+func (entry *Entry) Warning(args ...interface{}) {
+ entry.Warn(args...)
+}
+
+func (entry *Entry) Error(args ...interface{}) {
+ if entry.Logger.level() >= ErrorLevel {
+ entry.log(ErrorLevel, fmt.Sprint(args...))
+ }
+}
+
+func (entry *Entry) Fatal(args ...interface{}) {
+ if entry.Logger.level() >= FatalLevel {
+ entry.log(FatalLevel, fmt.Sprint(args...))
+ }
+ Exit(1)
+}
+
+func (entry *Entry) Panic(args ...interface{}) {
+ if entry.Logger.level() >= PanicLevel {
+ entry.log(PanicLevel, fmt.Sprint(args...))
+ }
+ panic(fmt.Sprint(args...))
+}
+
+// Entry Printf family functions
+
+func (entry *Entry) Debugf(format string, args ...interface{}) {
+ if entry.Logger.level() >= DebugLevel {
+ entry.Debug(fmt.Sprintf(format, args...))
+ }
+}
+
+func (entry *Entry) Infof(format string, args ...interface{}) {
+ if entry.Logger.level() >= InfoLevel {
+ entry.Info(fmt.Sprintf(format, args...))
+ }
+}
+
+func (entry *Entry) Printf(format string, args ...interface{}) {
+ entry.Infof(format, args...)
+}
+
+func (entry *Entry) Warnf(format string, args ...interface{}) {
+ if entry.Logger.level() >= WarnLevel {
+ entry.Warn(fmt.Sprintf(format, args...))
+ }
+}
+
+func (entry *Entry) Warningf(format string, args ...interface{}) {
+ entry.Warnf(format, args...)
+}
+
+func (entry *Entry) Errorf(format string, args ...interface{}) {
+ if entry.Logger.level() >= ErrorLevel {
+ entry.Error(fmt.Sprintf(format, args...))
+ }
+}
+
+func (entry *Entry) Fatalf(format string, args ...interface{}) {
+ if entry.Logger.level() >= FatalLevel {
+ entry.Fatal(fmt.Sprintf(format, args...))
+ }
+ Exit(1)
+}
+
+func (entry *Entry) Panicf(format string, args ...interface{}) {
+ if entry.Logger.level() >= PanicLevel {
+ entry.Panic(fmt.Sprintf(format, args...))
+ }
+}
+
+// Entry Println family functions
+
+func (entry *Entry) Debugln(args ...interface{}) {
+ if entry.Logger.level() >= DebugLevel {
+ entry.Debug(entry.sprintlnn(args...))
+ }
+}
+
+func (entry *Entry) Infoln(args ...interface{}) {
+ if entry.Logger.level() >= InfoLevel {
+ entry.Info(entry.sprintlnn(args...))
+ }
+}
+
+func (entry *Entry) Println(args ...interface{}) {
+ entry.Infoln(args...)
+}
+
+func (entry *Entry) Warnln(args ...interface{}) {
+ if entry.Logger.level() >= WarnLevel {
+ entry.Warn(entry.sprintlnn(args...))
+ }
+}
+
+func (entry *Entry) Warningln(args ...interface{}) {
+ entry.Warnln(args...)
+}
+
+func (entry *Entry) Errorln(args ...interface{}) {
+ if entry.Logger.level() >= ErrorLevel {
+ entry.Error(entry.sprintlnn(args...))
+ }
+}
+
+func (entry *Entry) Fatalln(args ...interface{}) {
+ if entry.Logger.level() >= FatalLevel {
+ entry.Fatal(entry.sprintlnn(args...))
+ }
+ Exit(1)
+}
+
+func (entry *Entry) Panicln(args ...interface{}) {
+ if entry.Logger.level() >= PanicLevel {
+ entry.Panic(entry.sprintlnn(args...))
+ }
+}
+
+// Sprintlnn => Sprint no newline. This is to get the behavior of how
+// fmt.Sprintln where spaces are always added between operands, regardless of
+// their type. Instead of vendoring the Sprintln implementation to spare a
+// string allocation, we do the simplest thing.
+func (entry *Entry) sprintlnn(args ...interface{}) string {
+ msg := fmt.Sprintln(args...)
+ return msg[:len(msg)-1]
+}
diff --git a/vendor/github.com/sirupsen/logrus/exported.go b/vendor/github.com/sirupsen/logrus/exported.go
new file mode 100644
index 0000000..013183e
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/exported.go
@@ -0,0 +1,193 @@
+package logrus
+
+import (
+ "io"
+)
+
+var (
+ // std is the name of the standard logger in stdlib `log`
+ std = New()
+)
+
+func StandardLogger() *Logger {
+ return std
+}
+
+// SetOutput sets the standard logger output.
+func SetOutput(out io.Writer) {
+ std.mu.Lock()
+ defer std.mu.Unlock()
+ std.Out = out
+}
+
+// SetFormatter sets the standard logger formatter.
+func SetFormatter(formatter Formatter) {
+ std.mu.Lock()
+ defer std.mu.Unlock()
+ std.Formatter = formatter
+}
+
+// SetLevel sets the standard logger level.
+func SetLevel(level Level) {
+ std.mu.Lock()
+ defer std.mu.Unlock()
+ std.SetLevel(level)
+}
+
+// GetLevel returns the standard logger level.
+func GetLevel() Level {
+ std.mu.Lock()
+ defer std.mu.Unlock()
+ return std.level()
+}
+
+// AddHook adds a hook to the standard logger hooks.
+func AddHook(hook Hook) {
+ std.mu.Lock()
+ defer std.mu.Unlock()
+ std.Hooks.Add(hook)
+}
+
+// WithError creates an entry from the standard logger and adds an error to it, using the value defined in ErrorKey as key.
+func WithError(err error) *Entry {
+ return std.WithField(ErrorKey, err)
+}
+
+// WithField creates an entry from the standard logger and adds a field to
+// it. If you want multiple fields, use `WithFields`.
+//
+// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
+// or Panic on the Entry it returns.
+func WithField(key string, value interface{}) *Entry {
+ return std.WithField(key, value)
+}
+
+// WithFields creates an entry from the standard logger and adds multiple
+// fields to it. This is simply a helper for `WithField`, invoking it
+// once for each field.
+//
+// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
+// or Panic on the Entry it returns.
+func WithFields(fields Fields) *Entry {
+ return std.WithFields(fields)
+}
+
+// Debug logs a message at level Debug on the standard logger.
+func Debug(args ...interface{}) {
+ std.Debug(args...)
+}
+
+// Print logs a message at level Info on the standard logger.
+func Print(args ...interface{}) {
+ std.Print(args...)
+}
+
+// Info logs a message at level Info on the standard logger.
+func Info(args ...interface{}) {
+ std.Info(args...)
+}
+
+// Warn logs a message at level Warn on the standard logger.
+func Warn(args ...interface{}) {
+ std.Warn(args...)
+}
+
+// Warning logs a message at level Warn on the standard logger.
+func Warning(args ...interface{}) {
+ std.Warning(args...)
+}
+
+// Error logs a message at level Error on the standard logger.
+func Error(args ...interface{}) {
+ std.Error(args...)
+}
+
+// Panic logs a message at level Panic on the standard logger.
+func Panic(args ...interface{}) {
+ std.Panic(args...)
+}
+
+// Fatal logs a message at level Fatal on the standard logger.
+func Fatal(args ...interface{}) {
+ std.Fatal(args...)
+}
+
+// Debugf logs a message at level Debug on the standard logger.
+func Debugf(format string, args ...interface{}) {
+ std.Debugf(format, args...)
+}
+
+// Printf logs a message at level Info on the standard logger.
+func Printf(format string, args ...interface{}) {
+ std.Printf(format, args...)
+}
+
+// Infof logs a message at level Info on the standard logger.
+func Infof(format string, args ...interface{}) {
+ std.Infof(format, args...)
+}
+
+// Warnf logs a message at level Warn on the standard logger.
+func Warnf(format string, args ...interface{}) {
+ std.Warnf(format, args...)
+}
+
+// Warningf logs a message at level Warn on the standard logger.
+func Warningf(format string, args ...interface{}) {
+ std.Warningf(format, args...)
+}
+
+// Errorf logs a message at level Error on the standard logger.
+func Errorf(format string, args ...interface{}) {
+ std.Errorf(format, args...)
+}
+
+// Panicf logs a message at level Panic on the standard logger.
+func Panicf(format string, args ...interface{}) {
+ std.Panicf(format, args...)
+}
+
+// Fatalf logs a message at level Fatal on the standard logger.
+func Fatalf(format string, args ...interface{}) {
+ std.Fatalf(format, args...)
+}
+
+// Debugln logs a message at level Debug on the standard logger.
+func Debugln(args ...interface{}) {
+ std.Debugln(args...)
+}
+
+// Println logs a message at level Info on the standard logger.
+func Println(args ...interface{}) {
+ std.Println(args...)
+}
+
+// Infoln logs a message at level Info on the standard logger.
+func Infoln(args ...interface{}) {
+ std.Infoln(args...)
+}
+
+// Warnln logs a message at level Warn on the standard logger.
+func Warnln(args ...interface{}) {
+ std.Warnln(args...)
+}
+
+// Warningln logs a message at level Warn on the standard logger.
+func Warningln(args ...interface{}) {
+ std.Warningln(args...)
+}
+
+// Errorln logs a message at level Error on the standard logger.
+func Errorln(args ...interface{}) {
+ std.Errorln(args...)
+}
+
+// Panicln logs a message at level Panic on the standard logger.
+func Panicln(args ...interface{}) {
+ std.Panicln(args...)
+}
+
+// Fatalln logs a message at level Fatal on the standard logger.
+func Fatalln(args ...interface{}) {
+ std.Fatalln(args...)
+}
diff --git a/vendor/github.com/sirupsen/logrus/formatter.go b/vendor/github.com/sirupsen/logrus/formatter.go
new file mode 100644
index 0000000..b183ff5
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/formatter.go
@@ -0,0 +1,45 @@
+package logrus
+
+import "time"
+
+const defaultTimestampFormat = time.RFC3339
+
+// The Formatter interface is used to implement a custom Formatter. It takes an
+// `Entry`. It exposes all the fields, including the default ones:
+//
+// * `entry.Data["msg"]`. The message passed from Info, Warn, Error ..
+// * `entry.Data["time"]`. The timestamp.
+// * `entry.Data["level"]. The level the entry was logged at.
+//
+// Any additional fields added with `WithField` or `WithFields` are also in
+// `entry.Data`. Format is expected to return an array of bytes which are then
+// logged to `logger.Out`.
+type Formatter interface {
+ Format(*Entry) ([]byte, error)
+}
+
+// This is to not silently overwrite `time`, `msg` and `level` fields when
+// dumping it. If this code wasn't there doing:
+//
+// logrus.WithField("level", 1).Info("hello")
+//
+// Would just silently drop the user provided level. Instead with this code
+// it'll logged as:
+//
+// {"level": "info", "fields.level": 1, "msg": "hello", "time": "..."}
+//
+// It's not exported because it's still using Data in an opinionated way. It's to
+// avoid code duplication between the two default formatters.
+func prefixFieldClashes(data Fields) {
+ if t, ok := data["time"]; ok {
+ data["fields.time"] = t
+ }
+
+ if m, ok := data["msg"]; ok {
+ data["fields.msg"] = m
+ }
+
+ if l, ok := data["level"]; ok {
+ data["fields.level"] = l
+ }
+}
diff --git a/vendor/github.com/sirupsen/logrus/hooks.go b/vendor/github.com/sirupsen/logrus/hooks.go
new file mode 100644
index 0000000..3f151cd
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/hooks.go
@@ -0,0 +1,34 @@
+package logrus
+
+// A hook to be fired when logging on the logging levels returned from
+// `Levels()` on your implementation of the interface. Note that this is not
+// fired in a goroutine or a channel with workers, you should handle such
+// functionality yourself if your call is non-blocking and you don't wish for
+// the logging calls for levels returned from `Levels()` to block.
+type Hook interface {
+ Levels() []Level
+ Fire(*Entry) error
+}
+
+// Internal type for storing the hooks on a logger instance.
+type LevelHooks map[Level][]Hook
+
+// Add a hook to an instance of logger. This is called with
+// `log.Hooks.Add(new(MyHook))` where `MyHook` implements the `Hook` interface.
+func (hooks LevelHooks) Add(hook Hook) {
+ for _, level := range hook.Levels() {
+ hooks[level] = append(hooks[level], hook)
+ }
+}
+
+// Fire all the hooks for the passed level. Used by `entry.log` to fire
+// appropriate hooks for a log entry.
+func (hooks LevelHooks) Fire(level Level, entry *Entry) error {
+ for _, hook := range hooks[level] {
+ if err := hook.Fire(entry); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/sirupsen/logrus/json_formatter.go b/vendor/github.com/sirupsen/logrus/json_formatter.go
new file mode 100644
index 0000000..fb01c1b
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/json_formatter.go
@@ -0,0 +1,79 @@
+package logrus
+
+import (
+ "encoding/json"
+ "fmt"
+)
+
+type fieldKey string
+
+// FieldMap allows customization of the key names for default fields.
+type FieldMap map[fieldKey]string
+
+// Default key names for the default fields
+const (
+ FieldKeyMsg = "msg"
+ FieldKeyLevel = "level"
+ FieldKeyTime = "time"
+)
+
+func (f FieldMap) resolve(key fieldKey) string {
+ if k, ok := f[key]; ok {
+ return k
+ }
+
+ return string(key)
+}
+
+// JSONFormatter formats logs into parsable json
+type JSONFormatter struct {
+ // TimestampFormat sets the format used for marshaling timestamps.
+ TimestampFormat string
+
+ // DisableTimestamp allows disabling automatic timestamps in output
+ DisableTimestamp bool
+
+ // FieldMap allows users to customize the names of keys for default fields.
+ // As an example:
+ // formatter := &JSONFormatter{
+ // FieldMap: FieldMap{
+ // FieldKeyTime: "@timestamp",
+ // FieldKeyLevel: "@level",
+ // FieldKeyMsg: "@message",
+ // },
+ // }
+ FieldMap FieldMap
+}
+
+// Format renders a single log entry
+func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
+ data := make(Fields, len(entry.Data)+3)
+ for k, v := range entry.Data {
+ switch v := v.(type) {
+ case error:
+ // Otherwise errors are ignored by `encoding/json`
+ // https://github.com/sirupsen/logrus/issues/137
+ data[k] = v.Error()
+ default:
+ data[k] = v
+ }
+ }
+ prefixFieldClashes(data)
+
+ timestampFormat := f.TimestampFormat
+ if timestampFormat == "" {
+ timestampFormat = defaultTimestampFormat
+ }
+
+ if !f.DisableTimestamp {
+ data[f.FieldMap.resolve(FieldKeyTime)] = entry.Time.Format(timestampFormat)
+ }
+ data[f.FieldMap.resolve(FieldKeyMsg)] = entry.Message
+ data[f.FieldMap.resolve(FieldKeyLevel)] = entry.Level.String()
+
+ serialized, err := json.Marshal(data)
+ if err != nil {
+ return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
+ }
+ return append(serialized, '\n'), nil
+}
diff --git a/vendor/github.com/sirupsen/logrus/logger.go b/vendor/github.com/sirupsen/logrus/logger.go
new file mode 100644
index 0000000..fdaf8a6
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/logger.go
@@ -0,0 +1,323 @@
+package logrus
+
+import (
+ "io"
+ "os"
+ "sync"
+ "sync/atomic"
+)
+
+type Logger struct {
+ // The logs are `io.Copy`'d to this in a mutex. It's common to set this to a
+ // file, or leave it default which is `os.Stderr`. You can also set this to
+ // something more adventorous, such as logging to Kafka.
+ Out io.Writer
+ // Hooks for the logger instance. These allow firing events based on logging
+ // levels and log entries. For example, to send errors to an error tracking
+ // service, log to StatsD or dump the core on fatal errors.
+ Hooks LevelHooks
+ // All log entries pass through the formatter before logged to Out. The
+ // included formatters are `TextFormatter` and `JSONFormatter` for which
+ // TextFormatter is the default. In development (when a TTY is attached) it
+ // logs with colors, but to a file it wouldn't. You can easily implement your
+ // own that implements the `Formatter` interface, see the `README` or included
+ // formatters for examples.
+ Formatter Formatter
+ // The logging level the logger should log at. This is typically (and defaults
+ // to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be
+ // logged.
+ Level Level
+ // Used to sync writing to the log. Locking is enabled by Default
+ mu MutexWrap
+ // Reusable empty entry
+ entryPool sync.Pool
+}
+
+type MutexWrap struct {
+ lock sync.Mutex
+ disabled bool
+}
+
+func (mw *MutexWrap) Lock() {
+ if !mw.disabled {
+ mw.lock.Lock()
+ }
+}
+
+func (mw *MutexWrap) Unlock() {
+ if !mw.disabled {
+ mw.lock.Unlock()
+ }
+}
+
+func (mw *MutexWrap) Disable() {
+ mw.disabled = true
+}
+
+// Creates a new logger. Configuration should be set by changing `Formatter`,
+// `Out` and `Hooks` directly on the default logger instance. You can also just
+// instantiate your own:
+//
+// var log = &Logger{
+// Out: os.Stderr,
+// Formatter: new(JSONFormatter),
+// Hooks: make(LevelHooks),
+// Level: logrus.DebugLevel,
+// }
+//
+// It's recommended to make this a global instance called `log`.
+func New() *Logger {
+ return &Logger{
+ Out: os.Stderr,
+ Formatter: new(TextFormatter),
+ Hooks: make(LevelHooks),
+ Level: InfoLevel,
+ }
+}
+
+func (logger *Logger) newEntry() *Entry {
+ entry, ok := logger.entryPool.Get().(*Entry)
+ if ok {
+ return entry
+ }
+ return NewEntry(logger)
+}
+
+func (logger *Logger) releaseEntry(entry *Entry) {
+ logger.entryPool.Put(entry)
+}
+
+// Adds a field to the log entry, note that it doesn't log until you call
+// Debug, Print, Info, Warn, Fatal or Panic. It only creates a log entry.
+// If you want multiple fields, use `WithFields`.
+func (logger *Logger) WithField(key string, value interface{}) *Entry {
+ entry := logger.newEntry()
+ defer logger.releaseEntry(entry)
+ return entry.WithField(key, value)
+}
+
+// Adds a struct of fields to the log entry. All it does is call `WithField` for
+// each `Field`.
+func (logger *Logger) WithFields(fields Fields) *Entry {
+ entry := logger.newEntry()
+ defer logger.releaseEntry(entry)
+ return entry.WithFields(fields)
+}
+
+// Add an error as single field to the log entry. All it does is call
+// `WithError` for the given `error`.
+func (logger *Logger) WithError(err error) *Entry {
+ entry := logger.newEntry()
+ defer logger.releaseEntry(entry)
+ return entry.WithError(err)
+}
+
+func (logger *Logger) Debugf(format string, args ...interface{}) {
+ if logger.level() >= DebugLevel {
+ entry := logger.newEntry()
+ entry.Debugf(format, args...)
+ logger.releaseEntry(entry)
+ }
+}
+
+func (logger *Logger) Infof(format string, args ...interface{}) {
+ if logger.level() >= InfoLevel {
+ entry := logger.newEntry()
+ entry.Infof(format, args...)
+ logger.releaseEntry(entry)
+ }
+}
+
+func (logger *Logger) Printf(format string, args ...interface{}) {
+ entry := logger.newEntry()
+ entry.Printf(format, args...)
+ logger.releaseEntry(entry)
+}
+
+func (logger *Logger) Warnf(format string, args ...interface{}) {
+ if logger.level() >= WarnLevel {
+ entry := logger.newEntry()
+ entry.Warnf(format, args...)
+ logger.releaseEntry(entry)
+ }
+}
+
+func (logger *Logger) Warningf(format string, args ...interface{}) {
+ if logger.level() >= WarnLevel {
+ entry := logger.newEntry()
+ entry.Warnf(format, args...)
+ logger.releaseEntry(entry)
+ }
+}
+
+func (logger *Logger) Errorf(format string, args ...interface{}) {
+ if logger.level() >= ErrorLevel {
+ entry := logger.newEntry()
+ entry.Errorf(format, args...)
+ logger.releaseEntry(entry)
+ }
+}
+
+func (logger *Logger) Fatalf(format string, args ...interface{}) {
+ if logger.level() >= FatalLevel {
+ entry := logger.newEntry()
+ entry.Fatalf(format, args...)
+ logger.releaseEntry(entry)
+ }
+ Exit(1)
+}
+
+func (logger *Logger) Panicf(format string, args ...interface{}) {
+ if logger.level() >= PanicLevel {
+ entry := logger.newEntry()
+ entry.Panicf(format, args...)
+ logger.releaseEntry(entry)
+ }
+}
+
+func (logger *Logger) Debug(args ...interface{}) {
+ if logger.level() >= DebugLevel {
+ entry := logger.newEntry()
+ entry.Debug(args...)
+ logger.releaseEntry(entry)
+ }
+}
+
+func (logger *Logger) Info(args ...interface{}) {
+ if logger.level() >= InfoLevel {
+ entry := logger.newEntry()
+ entry.Info(args...)
+ logger.releaseEntry(entry)
+ }
+}
+
+func (logger *Logger) Print(args ...interface{}) {
+ entry := logger.newEntry()
+ entry.Info(args...)
+ logger.releaseEntry(entry)
+}
+
+func (logger *Logger) Warn(args ...interface{}) {
+ if logger.level() >= WarnLevel {
+ entry := logger.newEntry()
+ entry.Warn(args...)
+ logger.releaseEntry(entry)
+ }
+}
+
+func (logger *Logger) Warning(args ...interface{}) {
+ if logger.level() >= WarnLevel {
+ entry := logger.newEntry()
+ entry.Warn(args...)
+ logger.releaseEntry(entry)
+ }
+}
+
+func (logger *Logger) Error(args ...interface{}) {
+ if logger.level() >= ErrorLevel {
+ entry := logger.newEntry()
+ entry.Error(args...)
+ logger.releaseEntry(entry)
+ }
+}
+
+func (logger *Logger) Fatal(args ...interface{}) {
+ if logger.level() >= FatalLevel {
+ entry := logger.newEntry()
+ entry.Fatal(args...)
+ logger.releaseEntry(entry)
+ }
+ Exit(1)
+}
+
+func (logger *Logger) Panic(args ...interface{}) {
+ if logger.level() >= PanicLevel {
+ entry := logger.newEntry()
+ entry.Panic(args...)
+ logger.releaseEntry(entry)
+ }
+}
+
+func (logger *Logger) Debugln(args ...interface{}) {
+ if logger.level() >= DebugLevel {
+ entry := logger.newEntry()
+ entry.Debugln(args...)
+ logger.releaseEntry(entry)
+ }
+}
+
+func (logger *Logger) Infoln(args ...interface{}) {
+ if logger.level() >= InfoLevel {
+ entry := logger.newEntry()
+ entry.Infoln(args...)
+ logger.releaseEntry(entry)
+ }
+}
+
+func (logger *Logger) Println(args ...interface{}) {
+ entry := logger.newEntry()
+ entry.Println(args...)
+ logger.releaseEntry(entry)
+}
+
+func (logger *Logger) Warnln(args ...interface{}) {
+ if logger.level() >= WarnLevel {
+ entry := logger.newEntry()
+ entry.Warnln(args...)
+ logger.releaseEntry(entry)
+ }
+}
+
+func (logger *Logger) Warningln(args ...interface{}) {
+ if logger.level() >= WarnLevel {
+ entry := logger.newEntry()
+ entry.Warnln(args...)
+ logger.releaseEntry(entry)
+ }
+}
+
+func (logger *Logger) Errorln(args ...interface{}) {
+ if logger.level() >= ErrorLevel {
+ entry := logger.newEntry()
+ entry.Errorln(args...)
+ logger.releaseEntry(entry)
+ }
+}
+
+func (logger *Logger) Fatalln(args ...interface{}) {
+ if logger.level() >= FatalLevel {
+ entry := logger.newEntry()
+ entry.Fatalln(args...)
+ logger.releaseEntry(entry)
+ }
+ Exit(1)
+}
+
+func (logger *Logger) Panicln(args ...interface{}) {
+ if logger.level() >= PanicLevel {
+ entry := logger.newEntry()
+ entry.Panicln(args...)
+ logger.releaseEntry(entry)
+ }
+}
+
+//When file is opened with appending mode, it's safe to
+//write concurrently to a file (within 4k message on Linux).
+//In these cases user can choose to disable the lock.
+func (logger *Logger) SetNoLock() {
+ logger.mu.Disable()
+}
+
+func (logger *Logger) level() Level {
+ return Level(atomic.LoadUint32((*uint32)(&logger.Level)))
+}
+
+func (logger *Logger) SetLevel(level Level) {
+ atomic.StoreUint32((*uint32)(&logger.Level), uint32(level))
+}
+
+func (logger *Logger) AddHook(hook Hook) {
+ logger.mu.Lock()
+ defer logger.mu.Unlock()
+ logger.Hooks.Add(hook)
+}
diff --git a/vendor/github.com/sirupsen/logrus/logrus.go b/vendor/github.com/sirupsen/logrus/logrus.go
new file mode 100644
index 0000000..dd38999
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/logrus.go
@@ -0,0 +1,143 @@
+package logrus
+
+import (
+ "fmt"
+ "log"
+ "strings"
+)
+
+// Fields type, used to pass to `WithFields`.
+type Fields map[string]interface{}
+
+// Level type
+type Level uint32
+
+// Convert the Level to a string. E.g. PanicLevel becomes "panic".
+func (level Level) String() string {
+ switch level {
+ case DebugLevel:
+ return "debug"
+ case InfoLevel:
+ return "info"
+ case WarnLevel:
+ return "warning"
+ case ErrorLevel:
+ return "error"
+ case FatalLevel:
+ return "fatal"
+ case PanicLevel:
+ return "panic"
+ }
+
+ return "unknown"
+}
+
+// ParseLevel takes a string level and returns the Logrus log level constant.
+func ParseLevel(lvl string) (Level, error) {
+ switch strings.ToLower(lvl) {
+ case "panic":
+ return PanicLevel, nil
+ case "fatal":
+ return FatalLevel, nil
+ case "error":
+ return ErrorLevel, nil
+ case "warn", "warning":
+ return WarnLevel, nil
+ case "info":
+ return InfoLevel, nil
+ case "debug":
+ return DebugLevel, nil
+ }
+
+ var l Level
+ return l, fmt.Errorf("not a valid logrus Level: %q", lvl)
+}
+
+// A constant exposing all logging levels
+var AllLevels = []Level{
+ PanicLevel,
+ FatalLevel,
+ ErrorLevel,
+ WarnLevel,
+ InfoLevel,
+ DebugLevel,
+}
+
+// These are the different logging levels. You can set the logging level to log
+// on your instance of logger, obtained with `logrus.New()`.
+const (
+ // PanicLevel level, highest level of severity. Logs and then calls panic with the
+ // message passed to Debug, Info, ...
+ PanicLevel Level = iota
+ // FatalLevel level. Logs and then calls `os.Exit(1)`. It will exit even if the
+ // logging level is set to Panic.
+ FatalLevel
+ // ErrorLevel level. Logs. Used for errors that should definitely be noted.
+ // Commonly used for hooks to send errors to an error tracking service.
+ ErrorLevel
+ // WarnLevel level. Non-critical entries that deserve eyes.
+ WarnLevel
+ // InfoLevel level. General operational entries about what's going on inside the
+ // application.
+ InfoLevel
+ // DebugLevel level. Usually only enabled when debugging. Very verbose logging.
+ DebugLevel
+)
+
+// Won't compile if StdLogger can't be realized by a log.Logger
+var (
+ _ StdLogger = &log.Logger{}
+ _ StdLogger = &Entry{}
+ _ StdLogger = &Logger{}
+)
+
+// StdLogger is what your logrus-enabled library should take, that way
+// it'll accept a stdlib logger and a logrus logger. There's no standard
+// interface, this is the closest we get, unfortunately.
+type StdLogger interface {
+ Print(...interface{})
+ Printf(string, ...interface{})
+ Println(...interface{})
+
+ Fatal(...interface{})
+ Fatalf(string, ...interface{})
+ Fatalln(...interface{})
+
+ Panic(...interface{})
+ Panicf(string, ...interface{})
+ Panicln(...interface{})
+}
+
+// The FieldLogger interface generalizes the Entry and Logger types
+type FieldLogger interface {
+ WithField(key string, value interface{}) *Entry
+ WithFields(fields Fields) *Entry
+ WithError(err error) *Entry
+
+ Debugf(format string, args ...interface{})
+ Infof(format string, args ...interface{})
+ Printf(format string, args ...interface{})
+ Warnf(format string, args ...interface{})
+ Warningf(format string, args ...interface{})
+ Errorf(format string, args ...interface{})
+ Fatalf(format string, args ...interface{})
+ Panicf(format string, args ...interface{})
+
+ Debug(args ...interface{})
+ Info(args ...interface{})
+ Print(args ...interface{})
+ Warn(args ...interface{})
+ Warning(args ...interface{})
+ Error(args ...interface{})
+ Fatal(args ...interface{})
+ Panic(args ...interface{})
+
+ Debugln(args ...interface{})
+ Infoln(args ...interface{})
+ Println(args ...interface{})
+ Warnln(args ...interface{})
+ Warningln(args ...interface{})
+ Errorln(args ...interface{})
+ Fatalln(args ...interface{})
+ Panicln(args ...interface{})
+}
diff --git a/vendor/github.com/sirupsen/logrus/terminal_bsd.go b/vendor/github.com/sirupsen/logrus/terminal_bsd.go
new file mode 100644
index 0000000..d7b3893
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/terminal_bsd.go
@@ -0,0 +1,10 @@
+// +build darwin freebsd openbsd netbsd dragonfly
+// +build !appengine
+
+package logrus
+
+import "golang.org/x/sys/unix"
+
+const ioctlReadTermios = unix.TIOCGETA
+
+type Termios unix.Termios
diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_appengine.go b/vendor/github.com/sirupsen/logrus/terminal_check_appengine.go
new file mode 100644
index 0000000..2403de9
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/terminal_check_appengine.go
@@ -0,0 +1,11 @@
+// +build appengine
+
+package logrus
+
+import (
+ "io"
+)
+
+func checkIfTerminal(w io.Writer) bool {
+ return true
+}
diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go b/vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go
new file mode 100644
index 0000000..116bcb4
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go
@@ -0,0 +1,19 @@
+// +build !appengine
+
+package logrus
+
+import (
+ "io"
+ "os"
+
+ "golang.org/x/crypto/ssh/terminal"
+)
+
+func checkIfTerminal(w io.Writer) bool {
+ switch v := w.(type) {
+ case *os.File:
+ return terminal.IsTerminal(int(v.Fd()))
+ default:
+ return false
+ }
+}
diff --git a/vendor/github.com/sirupsen/logrus/terminal_linux.go b/vendor/github.com/sirupsen/logrus/terminal_linux.go
new file mode 100644
index 0000000..88d7298
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/terminal_linux.go
@@ -0,0 +1,14 @@
+// Based on ssh/terminal:
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+
+package logrus
+
+import "golang.org/x/sys/unix"
+
+const ioctlReadTermios = unix.TCGETS
+
+type Termios unix.Termios
diff --git a/vendor/github.com/sirupsen/logrus/text_formatter.go b/vendor/github.com/sirupsen/logrus/text_formatter.go
new file mode 100644
index 0000000..61b21ca
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/text_formatter.go
@@ -0,0 +1,178 @@
+package logrus
+
+import (
+ "bytes"
+ "fmt"
+ "sort"
+ "strings"
+ "sync"
+ "time"
+)
+
+const (
+ nocolor = 0
+ red = 31
+ green = 32
+ yellow = 33
+ blue = 36
+ gray = 37
+)
+
+var (
+ baseTimestamp time.Time
+)
+
+func init() {
+ baseTimestamp = time.Now()
+}
+
+// TextFormatter formats logs into text
+type TextFormatter struct {
+ // Set to true to bypass checking for a TTY before outputting colors.
+ ForceColors bool
+
+ // Force disabling colors.
+ DisableColors bool
+
+ // Disable timestamp logging. useful when output is redirected to logging
+ // system that already adds timestamps.
+ DisableTimestamp bool
+
+ // Enable logging the full timestamp when a TTY is attached instead of just
+ // the time passed since beginning of execution.
+ FullTimestamp bool
+
+ // TimestampFormat to use for display when a full timestamp is printed
+ TimestampFormat string
+
+ // The fields are sorted by default for a consistent output. For applications
+ // that log extremely frequently and don't use the JSON formatter this may not
+ // be desired.
+ DisableSorting bool
+
+ // QuoteEmptyFields will wrap empty fields in quotes if true
+ QuoteEmptyFields bool
+
+ // Whether the logger's out is to a terminal
+ isTerminal bool
+
+ sync.Once
+}
+
+func (f *TextFormatter) init(entry *Entry) {
+ if entry.Logger != nil {
+ f.isTerminal = checkIfTerminal(entry.Logger.Out)
+ }
+}
+
+// Format renders a single log entry
+func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
+ var b *bytes.Buffer
+ keys := make([]string, 0, len(entry.Data))
+ for k := range entry.Data {
+ keys = append(keys, k)
+ }
+
+ if !f.DisableSorting {
+ sort.Strings(keys)
+ }
+ if entry.Buffer != nil {
+ b = entry.Buffer
+ } else {
+ b = &bytes.Buffer{}
+ }
+
+ prefixFieldClashes(entry.Data)
+
+ f.Do(func() { f.init(entry) })
+
+ isColored := (f.ForceColors || f.isTerminal) && !f.DisableColors
+
+ timestampFormat := f.TimestampFormat
+ if timestampFormat == "" {
+ timestampFormat = defaultTimestampFormat
+ }
+ if isColored {
+ f.printColored(b, entry, keys, timestampFormat)
+ } else {
+ if !f.DisableTimestamp {
+ f.appendKeyValue(b, "time", entry.Time.Format(timestampFormat))
+ }
+ f.appendKeyValue(b, "level", entry.Level.String())
+ if entry.Message != "" {
+ f.appendKeyValue(b, "msg", entry.Message)
+ }
+ for _, key := range keys {
+ f.appendKeyValue(b, key, entry.Data[key])
+ }
+ }
+
+ b.WriteByte('\n')
+ return b.Bytes(), nil
+}
+
+func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string, timestampFormat string) {
+ var levelColor int
+ switch entry.Level {
+ case DebugLevel:
+ levelColor = gray
+ case WarnLevel:
+ levelColor = yellow
+ case ErrorLevel, FatalLevel, PanicLevel:
+ levelColor = red
+ default:
+ levelColor = blue
+ }
+
+ levelText := strings.ToUpper(entry.Level.String())[0:4]
+
+ if f.DisableTimestamp {
+ fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m %-44s ", levelColor, levelText, entry.Message)
+ } else if !f.FullTimestamp {
+ fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, int(entry.Time.Sub(baseTimestamp)/time.Second), entry.Message)
+ } else {
+ fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s] %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), entry.Message)
+ }
+ for _, k := range keys {
+ v := entry.Data[k]
+ fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=", levelColor, k)
+ f.appendValue(b, v)
+ }
+}
+
+func (f *TextFormatter) needsQuoting(text string) bool {
+ if f.QuoteEmptyFields && len(text) == 0 {
+ return true
+ }
+ for _, ch := range text {
+ if !((ch >= 'a' && ch <= 'z') ||
+ (ch >= 'A' && ch <= 'Z') ||
+ (ch >= '0' && ch <= '9') ||
+ ch == '-' || ch == '.' || ch == '_' || ch == '/' || ch == '@' || ch == '^' || ch == '+') {
+ return true
+ }
+ }
+ return false
+}
+
+func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key string, value interface{}) {
+ if b.Len() > 0 {
+ b.WriteByte(' ')
+ }
+ b.WriteString(key)
+ b.WriteByte('=')
+ f.appendValue(b, value)
+}
+
+func (f *TextFormatter) appendValue(b *bytes.Buffer, value interface{}) {
+ stringVal, ok := value.(string)
+ if !ok {
+ stringVal = fmt.Sprint(value)
+ }
+
+ if !f.needsQuoting(stringVal) {
+ b.WriteString(stringVal)
+ } else {
+ b.WriteString(fmt.Sprintf("%q", stringVal))
+ }
+}
diff --git a/vendor/github.com/sirupsen/logrus/writer.go b/vendor/github.com/sirupsen/logrus/writer.go
new file mode 100644
index 0000000..7bdebed
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/writer.go
@@ -0,0 +1,62 @@
+package logrus
+
+import (
+ "bufio"
+ "io"
+ "runtime"
+)
+
+func (logger *Logger) Writer() *io.PipeWriter {
+ return logger.WriterLevel(InfoLevel)
+}
+
+func (logger *Logger) WriterLevel(level Level) *io.PipeWriter {
+ return NewEntry(logger).WriterLevel(level)
+}
+
+func (entry *Entry) Writer() *io.PipeWriter {
+ return entry.WriterLevel(InfoLevel)
+}
+
+func (entry *Entry) WriterLevel(level Level) *io.PipeWriter {
+ reader, writer := io.Pipe()
+
+ var printFunc func(args ...interface{})
+
+ switch level {
+ case DebugLevel:
+ printFunc = entry.Debug
+ case InfoLevel:
+ printFunc = entry.Info
+ case WarnLevel:
+ printFunc = entry.Warn
+ case ErrorLevel:
+ printFunc = entry.Error
+ case FatalLevel:
+ printFunc = entry.Fatal
+ case PanicLevel:
+ printFunc = entry.Panic
+ default:
+ printFunc = entry.Print
+ }
+
+ go entry.writerScanner(reader, printFunc)
+ runtime.SetFinalizer(writer, writerFinalizer)
+
+ return writer
+}
+
+func (entry *Entry) writerScanner(reader *io.PipeReader, printFunc func(args ...interface{})) {
+ scanner := bufio.NewScanner(reader)
+ for scanner.Scan() {
+ printFunc(scanner.Text())
+ }
+ if err := scanner.Err(); err != nil {
+ entry.Errorf("Error while reading from Writer: %s", err)
+ }
+ reader.Close()
+}
+
+func writerFinalizer(writer *io.PipeWriter) {
+ writer.Close()
+}
diff --git a/vendor/github.com/zr-hebo/util-db/.gitignore b/vendor/github.com/zr-hebo/util-db/.gitignore
new file mode 100644
index 0000000..7ed9c7c
--- /dev/null
+++ b/vendor/github.com/zr-hebo/util-db/.gitignore
@@ -0,0 +1,16 @@
+# Binaries for programs and plugins
+*.exe
+*.dll
+*.so
+*.dylib
+
+# Test binary, build with `go test -c`
+*.test
+
+# Output of the go coverage tool, specifically when used with LiteIDE
+*.out
+
+# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736
+.glide/
+.idea/
+*.log
diff --git a/vendor/github.com/zr-hebo/util-db/LICENSE b/vendor/github.com/zr-hebo/util-db/LICENSE
new file mode 100644
index 0000000..261eeb9
--- /dev/null
+++ b/vendor/github.com/zr-hebo/util-db/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/zr-hebo/util-db/README.md b/vendor/github.com/zr-hebo/util-db/README.md
new file mode 100644
index 0000000..a863eb4
--- /dev/null
+++ b/vendor/github.com/zr-hebo/util-db/README.md
@@ -0,0 +1,2 @@
+# db_util
+golang implement database operation util
diff --git a/vendor/github.com/zr-hebo/util-db/conn_info.go b/vendor/github.com/zr-hebo/util-db/conn_info.go
new file mode 100644
index 0000000..d275f89
--- /dev/null
+++ b/vendor/github.com/zr-hebo/util-db/conn_info.go
@@ -0,0 +1,93 @@
+package db
+
+import (
+ "database/sql"
+ "fmt"
+ "reflect"
+ "regexp"
+ "runtime/debug"
+ "strings"
+)
+
+// GetMySQLConnInfoIgnoreErr 获取MySQL连接的字符串显示,忽略错误信息
+func GetMySQLConnInfoIgnoreErr(conn *sql.DB) (connInfo string) {
+ connInfo, _ = GetMySQLConnInfo(conn)
+ return
+}
+
+// GetMySQLConnInfo 获取MySQL连接的字符串显示
+func GetMySQLConnInfo(conn *sql.DB) (connInfo string, err error) {
+ defer func() {
+ if panicRecover := recover(); panicRecover != nil {
+ err = fmt.Errorf(
+ "get mysql connection info failed for %v", panicRecover)
+ debug.PrintStack()
+ }
+ }()
+
+ cv := reflect.ValueOf(conn).Elem()
+ // fmt.Printf("%#v\n", cv)
+
+ dsnv := cv.FieldByName("dsn")
+ dsnStr := fmt.Sprint(dsnv)
+ dsnInfo, err := resolveDsn(dsnStr)
+ if err != nil {
+ return
+ }
+
+ fcsv := cv.FieldByName("dep")
+ // fmt.Printf("%#v\n", fcsv)
+
+ lports := make([]string, 0)
+ for _, key := range fcsv.MapKeys() {
+ // fcv := fcsv.Index(0).Elem()
+ depSet := fcsv.MapIndex(key).MapKeys()
+ if len(depSet) < 1 {
+ continue
+ }
+
+ fcv := depSet[0].Elem().Elem()
+ // fmt.Printf("%#v\n", fcv)
+ // fmt.Printf("%#v\n", fcv.Elem())
+
+ mc := fcv.FieldByName("ci").Elem().Elem()
+ // fmt.Printf("%#v", mc)
+
+ nc := mc.FieldByName("netConn").Elem().Elem()
+ // fmt.Printf("%#v", nc)
+
+ ic := nc.FieldByName("conn")
+ // fmt.Printf("%#v", conn)
+
+ fd := ic.FieldByName("fd").Elem()
+ // fmt.Printf("%#v", fd)
+
+ la := fd.FieldByName("laddr").Elem().Elem()
+ // fmt.Printf("%#v", la)
+
+ lport := la.FieldByName("Port")
+
+ lports = append(lports, fmt.Sprint(lport))
+ // fmt.Printf("%#v", lport)
+ }
+
+ connInfo = fmt.Sprintf(
+ "127.0.0.1:%s <==> %s:%s", strings.Join(lports, ", "),
+ dsnInfo["host"], dsnInfo["port"])
+ return
+}
+
+func resolveDsn(dsn string) (info map[string]string, err error) {
+ info = make(map[string]string)
+
+ pattern := regexp.MustCompile(`.*:.*@tcp\((?P[\w.]+):(?P\d+)\)/`)
+
+ match := pattern.FindStringSubmatch(dsn)
+ for idx, name := range pattern.SubexpNames() {
+ if idx > 0 && idx <= len(match) {
+ info[name] = match[idx]
+ }
+ }
+
+ return
+}
diff --git a/vendor/github.com/zr-hebo/util-db/conn_operation.go b/vendor/github.com/zr-hebo/util-db/conn_operation.go
new file mode 100644
index 0000000..1d1eb72
--- /dev/null
+++ b/vendor/github.com/zr-hebo/util-db/conn_operation.go
@@ -0,0 +1,16 @@
+package db
+
+import (
+ "database/sql"
+)
+
+// CloseConnection 关闭数据库连接
+func CloseConnection(conn *sql.DB) (err error) {
+ err = conn.Close()
+ if err != nil {
+ return err
+ }
+
+ _, err = GetMySQLConnInfo(conn)
+ return
+}
diff --git a/vendor/github.com/zr-hebo/util-db/query_db.go b/vendor/github.com/zr-hebo/util-db/query_db.go
new file mode 100644
index 0000000..574ae53
--- /dev/null
+++ b/vendor/github.com/zr-hebo/util-db/query_db.go
@@ -0,0 +1,359 @@
+package db
+
+import (
+ "context"
+ "database/sql"
+ "fmt"
+ "time"
+)
+
+const (
+ dbTypeMysql = "mysql"
+)
+
+// Host 主机
+type Host struct {
+ IP string `json:"ip"`
+ Domain string `json:"domain"`
+ Port int `json:"port"`
+}
+
+// UnanimityHost id标示的主机
+type UnanimityHost struct {
+ Host string `json:"host"`
+ Port int `json:"port"`
+}
+
+func (uh *UnanimityHost) String() string {
+ return fmt.Sprintf("%s:%d", uh.Host, uh.Port)
+}
+
+// UnanimityHostWithDomains 带域名的id标示的主机
+type UnanimityHostWithDomains struct {
+ UnanimityHost
+ IP string `json:"ip"`
+ Domains []string `json:"domains"`
+}
+
+// MysqlDB Mysql主机实例
+type MysqlDB struct {
+ Host
+ UserName string
+ Passwd string
+ DatabaseType string
+ DBName string
+ ConnectTimeout int
+}
+
+// NewMysqlDB 创建MySQL数据库
+func NewMysqlDB() (md *MysqlDB) {
+ md = new(MysqlDB)
+ md.DatabaseType = dbTypeMysql
+ return
+}
+
+// NewMysqlDBWithAllParam 带参数创建MySQL数据库
+func NewMysqlDBWithAllParam(
+ ip string, port int, userName, passwd, dbName string) (
+ pmd *MysqlDB) {
+ pmd = NewMysqlDB()
+ pmd.IP = ip
+ pmd.Port = port
+ pmd.UserName = userName
+ pmd.Passwd = passwd
+ pmd.DBName = dbName
+
+ return
+}
+
+// GetConnection 获取数据库连接
+func (md *MysqlDB) getConnection() (*sql.DB, error) {
+ connStr := md.fillConnStr()
+
+ stmtDB, err := sql.Open(md.DatabaseType, connStr)
+ if err != nil {
+ if stmtDB != nil {
+ stmtDB.Close()
+ }
+ return nil, err
+ }
+
+ stmtDB.SetMaxOpenConns(0)
+
+ ctx, cancel := context.WithTimeout(context.Background(), time.Second*3)
+ defer cancel()
+ if err := stmtDB.PingContext(ctx); err != nil {
+ return nil, err
+ }
+
+ return stmtDB, nil
+}
+
+// GetConnection 获取数据库连接
+func (md *MysqlDB) getRealConnection(ctx context.Context) (*sql.Conn, error) {
+ connStr := md.fillConnStr()
+
+ stmtDB, err := sql.Open(md.DatabaseType, connStr)
+ if err != nil {
+ if stmtDB != nil {
+ stmtDB.Close()
+ }
+ return nil, err
+ }
+
+ conn, err := stmtDB.Conn(ctx)
+ if err != nil {
+ if conn != nil {
+ conn.Close()
+ }
+ return nil, err
+ }
+
+ return conn, nil
+}
+
+type Field struct {
+ Name string
+ Type string
+}
+
+// FieldType Common type include "STRING", "FLOAT", "INT", "BOOL"
+func (f *Field) FieldType() string {
+ return f.Type
+}
+
+type QueryRow struct {
+ Fields []Field
+ Record map[string]interface{}
+}
+
+type QueryRows struct {
+ Fields []Field
+ Records []map[string]interface{}
+}
+
+func newQueryRow() *QueryRow {
+ queryRow := new(QueryRow)
+ queryRow.Fields = make([]Field, 0)
+ queryRow.Record = make(map[string]interface{})
+ return queryRow
+}
+
+func newQueryRows() *QueryRows {
+ queryRows := new(QueryRows)
+ queryRows.Fields = make([]Field, 0)
+ queryRows.Records = make([]map[string]interface{}, 0)
+ return queryRows
+}
+
+// QueryRows 执行MySQL Query语句,返回多条数据
+func (md *MysqlDB) QueryRows(stmt string) (queryRows *QueryRows, err error) {
+ defer func() {
+ if err != nil {
+ err = fmt.Errorf("query rows on %s:%d failed <-- %s", md.IP, md.Port, err.Error())
+ }
+ }()
+
+ connStr := md.fillConnStr()
+
+ db, err := sql.Open(md.DatabaseType, connStr)
+ if db != nil {
+ defer db.Close()
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ rawRows, err := db.Query(stmt)
+ if rawRows != nil {
+ defer rawRows.Close()
+ }
+ if err != nil {
+ return
+ }
+
+ colTypes, err := rawRows.ColumnTypes()
+ if err != nil {
+ return
+ }
+
+ fields := make([]Field, 0, len(colTypes))
+ for _, colType := range colTypes {
+ fields = append(fields, Field{Name: colType.Name(), Type: getDataType(colType.DatabaseTypeName())})
+ }
+
+ queryRows = newQueryRows()
+ queryRows.Fields = fields
+ for rawRows.Next() {
+ receiver := createReceiver(fields)
+ err = rawRows.Scan(receiver...)
+ if err != nil {
+ return
+ }
+
+ queryRows.Records = append(queryRows.Records, getRecordFromReceiver(receiver, fields))
+ }
+ return
+}
+
+func createReceiver(fields []Field) (receiver []interface{}) {
+ receiver = make([]interface{}, 0, len(fields))
+ for _, field := range fields {
+ switch field.Type {
+ case "string":
+ {
+ var val sql.NullString
+ receiver = append(receiver, &val)
+ }
+ case "int64":
+ {
+ var val sql.NullInt64
+ receiver = append(receiver, &val)
+ }
+ case "float64":
+ {
+ var val sql.NullFloat64
+ receiver = append(receiver, &val)
+ }
+ case "bool":
+ {
+ var val sql.NullBool
+ receiver = append(receiver, &val)
+ }
+ default:
+ var val sql.NullString
+ receiver = append(receiver, &val)
+ }
+ }
+
+ return
+}
+
+func getRecordFromReceiver(receiver []interface{}, fields []Field) (record map[string]interface{}) {
+ record = make(map[string]interface{})
+ for idx := 0; idx < len(fields); idx++ {
+ field := fields[idx]
+ value := receiver[idx]
+ switch field.Type {
+ case "string":
+ {
+ nullVal := value.(*sql.NullString)
+ record[field.Name] = nil
+ if nullVal.Valid {
+ record[field.Name] = nullVal.String
+ }
+ }
+ case "int64":
+ {
+ nullVal := value.(*sql.NullInt64)
+ record[field.Name] = nil
+ if nullVal.Valid {
+ record[field.Name] = nullVal.Int64
+ }
+ }
+ case "float64":
+ {
+ nullVal := value.(*sql.NullFloat64)
+ record[field.Name] = nil
+ if nullVal.Valid {
+ record[field.Name] = nullVal.Float64
+ }
+ }
+ case "bool":
+ {
+ nullVal := value.(*sql.NullBool)
+ record[field.Name] = nil
+ if nullVal.Valid {
+ record[field.Name] = nullVal.Bool
+ }
+ }
+ default:
+ nullVal := value.(*sql.NullString)
+ record[field.Name] = nil
+ if nullVal.Valid {
+ record[field.Name] = nullVal.String
+ }
+ }
+ }
+ return
+}
+
+func getDataType(dbColType string) (colType string) {
+ var columnTypeDict = map[string]string{
+ "VARCHAR": "string",
+ "TEXT": "string",
+ "NVARCHAR": "string",
+ "DATETIME": "float64",
+ "DECIMAL": "float64",
+ "BOOL": "bool",
+ "INT": "int64",
+ "BIGINT": "int64",
+ }
+
+ colType, ok := columnTypeDict[dbColType]
+ if ok {
+ return
+ }
+
+ colType = "string"
+ return
+}
+
+// QueryRow 执行MySQL Query语句,返回1条或0条数据
+func (md *MysqlDB) QueryRow(stmt string) (row *QueryRow, err error) {
+ defer func() {
+ if err != nil {
+ err = fmt.Errorf("query row failed <-- %s", err.Error())
+ }
+ }()
+
+ queryRows, err := md.QueryRows(stmt)
+ if err != nil {
+ return
+ }
+
+ if len(queryRows.Records) < 1 {
+ return
+ }
+
+ row = newQueryRow()
+ row.Fields = queryRows.Fields
+ row.Record = queryRows.Records[0]
+
+ return
+}
+
+// ExecChange 执行MySQL DML Query语句
+func (md *MysqlDB) ExecChange(stmt string, args ...interface{}) (
+ result sql.Result, err error) {
+ defer func() {
+ if err != nil {
+ err = fmt.Errorf("execute dml failed <-- %s", err.Error())
+ }
+ }()
+
+ ctx, cancel := context.WithTimeout(context.Background(), time.Second*5)
+ defer cancel()
+
+ conn, err := md.getRealConnection(ctx)
+ if conn != nil {
+ defer conn.Close()
+ }
+ if err != nil {
+ return
+ }
+
+ result, err = conn.ExecContext(ctx, stmt, args...)
+ return
+}
+
+func (md *MysqlDB) fillConnStr() string {
+ dbServerInfoStr := fmt.Sprintf("%s:%s@tcp(%s:%d)/%s",
+ md.UserName, md.Passwd, md.IP, md.Port, md.DBName)
+ if md.ConnectTimeout > 0 {
+ dbServerInfoStr = fmt.Sprintf("%s?timeout=%ds&readTimeout=%ds&writeTimeout=%ds",
+ dbServerInfoStr, md.ConnectTimeout, md.ConnectTimeout, md.ConnectTimeout)
+ }
+
+ return dbServerInfoStr
+}
diff --git a/vendor/github.com/zr-hebo/util-db/query_pooled_db.go b/vendor/github.com/zr-hebo/util-db/query_pooled_db.go
new file mode 100644
index 0000000..051c35d
--- /dev/null
+++ b/vendor/github.com/zr-hebo/util-db/query_pooled_db.go
@@ -0,0 +1,105 @@
+package db
+
+import (
+ "database/sql"
+ "sync"
+ "time"
+)
+
+
+// PooledMysqlDB Mysql主机实例
+type PooledMysqlDB struct {
+ MysqlDB
+ conn *sql.DB
+ lock *sync.Mutex
+}
+
+// NewPooledMysqlDBWithParam 带参数创建MySQL数据库
+func NewPooledMysqlDBWithParam(
+ ip string, port int, userName, passwd string) (
+ pmd *PooledMysqlDB) {
+ pmd = NewPooledMysqlDB()
+ pmd.IP = ip
+ pmd.Port = port
+ pmd.UserName = userName
+ pmd.Passwd = passwd
+ pmd.DatabaseType = dbTypeMysql
+
+ return
+}
+
+// NewPooledMysqlDBWithAllParam 带参数创建MySQL数据库
+func NewPooledMysqlDBWithAllParam(
+ ip string, port int, userName, passwd, dbName string) (
+ pmd *PooledMysqlDB) {
+ pmd = NewPooledMysqlDB()
+ pmd.IP = ip
+ pmd.Port = port
+ pmd.UserName = userName
+ pmd.Passwd = passwd
+ pmd.DBName = dbName
+
+ return
+}
+
+// NewPooledMysqlDB 创建MySQL数据库
+func NewPooledMysqlDB() (pmd *PooledMysqlDB) {
+ pmd = new(PooledMysqlDB)
+ pmd.DatabaseType = dbTypeMysql
+ pmd.lock = new(sync.Mutex)
+ return
+}
+
+// CloseConnection 获取数据库连接
+func (pmd *PooledMysqlDB) CloseConnection() (err error) {
+ if pmd.conn == nil {
+ return
+ }
+
+ err = pmd.conn.Close()
+ return
+}
+
+// GetConnection 获取数据库连接
+func (pmd *PooledMysqlDB) GetConnection() (conn *sql.DB, err error) {
+ pmd.lock.Lock()
+ defer func() {
+ pmd.lock.Unlock()
+ }()
+
+ if pmd.conn != nil {
+ conn = pmd.conn
+ return
+ }
+
+ conn, err = pmd.MysqlDB.getConnection()
+ if err != nil {
+ return
+ }
+
+ conn.SetConnMaxLifetime(time.Second * 60 * 30)
+ conn.SetMaxOpenConns(0)
+ if err := conn.Ping(); err != nil {
+ return nil, err
+ }
+ pmd.conn = conn
+
+ return
+}
+
+// ExecChange 执行MySQL Query语句
+func (pmd *PooledMysqlDB) ExecChange(stmt string, args ...interface{}) (
+ result sql.Result, err error) {
+ return pmd.MysqlDB.ExecChange(stmt, args...)
+}
+
+// QueryRows 执行MySQL Query语句
+func (pmd *PooledMysqlDB) QueryRows(stmt string) (queryRows *QueryRows, err error) {
+ return pmd.MysqlDB.QueryRows(stmt)
+
+}
+
+// QueryRow 执行MySQL Query语句
+func (pmd *PooledMysqlDB) QueryRow(stmt string) (row *QueryRow, err error) {
+ return pmd.MysqlDB.QueryRow(stmt)
+}
diff --git a/vendor/github.com/zr-hebo/util-db/scanner.go b/vendor/github.com/zr-hebo/util-db/scanner.go
new file mode 100644
index 0000000..936bec9
--- /dev/null
+++ b/vendor/github.com/zr-hebo/util-db/scanner.go
@@ -0,0 +1,54 @@
+package db
+
+import (
+ "database/sql"
+ "fmt"
+)
+
+// CheckPair CheckPair
+type CheckPair struct {
+ Host
+ UserErp string `json:"erp"`
+ DBName string `json:"dbname"`
+}
+
+func (cp *CheckPair) String() string {
+ return fmt.Sprintf(
+ "check if %s can visit %s@%s:%d", cp.UserErp, cp.DBName, cp.IP, cp.Port)
+}
+
+// Scanner SQL rows读取器
+type Scanner interface {
+ Scan(*sql.Rows, ...*string) error
+}
+
+type showNullScanner struct {
+}
+
+// NewShowNullScanner 显示NULL值的接收器,要求Scan的时候传入的参数是string类型
+func NewShowNullScanner() (s Scanner) {
+ return new(showNullScanner)
+}
+
+func (ns *showNullScanner) Scan(
+ rows *sql.Rows, receivers ...*string) (err error) {
+ nullReceivers := make([]interface{}, 0, len(receivers))
+ for range receivers {
+ nullReceivers = append(nullReceivers, &sql.NullString{})
+ }
+
+ if err = rows.Scan(nullReceivers...); err != nil {
+ return
+ }
+
+ for i, rv := range nullReceivers {
+ nullReceiver := rv.(*sql.NullString)
+ if nullReceiver.Valid {
+ *receivers[i] = nullReceiver.String
+ } else {
+ *receivers[i] = "NULL"
+ }
+ }
+
+ return
+}