diff --git a/capture/config.go b/capture/config.go
index 328f9c0..f64bc13 100644
--- a/capture/config.go
+++ b/capture/config.go
@@ -1,10 +1,11 @@
 package capture
 
 import (
-	log "github.com/sirupsen/logrus"
-	sd "github.com/zr-hebo/sniffer-agent/session-dealer"
 	"math/rand"
 	"time"
+
+	log "github.com/golang/glog"
+	sd "github.com/zr-hebo/sniffer-agent/session-dealer"
 )
 
 var (
@@ -21,7 +22,10 @@ func init() {
 	}
 
 	localIPAddr = &ipAddr
-	log.Infof("parsed local ip address:%s", *localIPAddr)
 
 	rand.Seed(time.Now().UnixNano())
 }
+
+func ShowLocalIP() {
+	log.Infof("parsed local ip address:%s", *localIPAddr)
+}
diff --git a/capture/model.go b/capture/model.go
new file mode 100644
index 0000000..d6adc94
--- /dev/null
+++ b/capture/model.go
@@ -0,0 +1,11 @@
+package capture
+
+import (
+	"github.com/google/gopacket/layers"
+)
+
+type TCPIPPair struct {
+	srcIP  string
+	dstIP  string
+	tcpPkt *layers.TCP
+}
diff --git a/capture/network.go b/capture/network.go
index d855959..7cd01f6 100644
--- a/capture/network.go
+++ b/capture/network.go
@@ -4,14 +4,13 @@ import (
 	"bufio"
 	"bytes"
 	"flag"
-	"fmt"
 	"math/rand"
 	"time"
 
+	log "github.com/golang/glog"
 	"github.com/google/gopacket"
 	"github.com/google/gopacket/layers"
 	pp "github.com/pires/go-proxyproto"
-	log "github.com/sirupsen/logrus"
 	"github.com/zr-hebo/sniffer-agent/communicator"
 	"github.com/zr-hebo/sniffer-agent/model"
 	sd "github.com/zr-hebo/sniffer-agent/session-dealer"
@@ -57,14 +56,34 @@ func (nc *networkCard) Listen() (receiver chan model.QueryPiece) {
 // Listen get a connection.
 func (nc *networkCard) listenNormal() {
 	go func() {
+		dealTCPPacket := func(srcIP, dstIP string, tcpPkt *layers.TCP, capturePacketRate float64) {
+			// send FIN tcp packet to avoid not complete session cannot be released
+			// deal FIN packet
+			if tcpPkt.FIN {
+				nc.parseTCPPackage(srcIP, dstIP, tcpPkt, nil)
+				return
+			}
+
+			// deal auth packet
+			if sd.IsAuthPacket(tcpPkt.Payload) {
+				authHeader, _ := pp.Read(bufio.NewReader(bytes.NewReader(tcpPkt.Payload)))
+				nc.parseTCPPackage(srcIP, dstIP, tcpPkt, authHeader)
+				return
+			}
+
+			if 0 < capturePacketRate && capturePacketRate < 1.0 {
+				// fall into throw range
+				rn := rand.Float64()
+				if rn > capturePacketRate {
+					return
+				}
+			}
+
+			nc.parseTCPPackage(srcIP, dstIP, tcpPkt, nil)
+		}
+
 		aliveCounter := 0
-		handler := initEthernetHandlerFromPacp()
-
-		for {
-			var data []byte
-			var ci gopacket.CaptureInfo
-			var err error
-
+		dealTCPIPPacket := func(tcpIPPkt *TCPIPPair) {
 			// capture packets according to a certain probability
 			capturePacketRate := communicator.GetTCPCapturePacketRate()
 			if capturePacketRate <= 0 {
@@ -74,53 +93,19 @@ func (nc *networkCard) listenNormal() {
 					aliveCounter = 0
 					nc.receiver <- model.NewBaseQueryPiece(localIPAddr, nc.listenPort, capturePacketRate)
 				}
-				continue
+
+			} else {
+				dealTCPPacket(tcpIPPkt.srcIP, tcpIPPkt.dstIP, tcpIPPkt.tcpPkt, capturePacketRate)
 			}
-
-			data, ci, err = handler.ZeroCopyReadPacketData()
-			if err != nil {
-				log.Error(err.Error())
-				time.Sleep(time.Second * 3)
-				continue
-			}
-
-			packet := gopacket.NewPacket(data, layers.LayerTypeEthernet, gopacket.NoCopy)
-			// packet := gopacket.NewPacket(data, handler.LinkType(), gopacket.NoCopy)
-			m := packet.Metadata()
-			m.CaptureInfo = ci
-
-			tcpPkt := packet.TransportLayer().(*layers.TCP)
-			// send FIN tcp packet to avoid not complete session cannot be released
-			// deal FIN packet
-			if tcpPkt.FIN {
-				nc.parseTCPPackage(packet, nil)
-				continue
-			}
-
-			// deal auth packet
-			if sd.IsAuthPacket(tcpPkt.Payload) {
-				authHeader, _ := pp.Read(bufio.NewReader(bytes.NewReader(tcpPkt.Payload)))
-				nc.parseTCPPackage(packet, authHeader)
-				continue
-			}
-
-			if 0 < capturePacketRate && capturePacketRate < 1.0 {
-				// fall into throw range
-				rn := rand.Float64()
-				if rn > capturePacketRate {
-					continue
-				}
-			}
-
-			aliveCounter = 0
-			nc.parseTCPPackage(packet, nil)
 		}
+
+		dealEachTCPIPPacket(dealTCPIPPacket)
 	}()
 
 	return
 }
 
-func (nc *networkCard) parseTCPPackage(packet gopacket.Packet, authHeader *pp.Header) {
+func (nc *networkCard) parseTCPPackage(srcIP, dstIP string, tcpPkt *layers.TCP, authHeader *pp.Header) {
 	var err error
 	defer func() {
 		if err != nil {
@@ -128,25 +113,10 @@ func (nc *networkCard) parseTCPPackage(packet gopacket.Packet, authHeader *pp.He
 		}
 	}()
 
-	tcpPkt := packet.TransportLayer().(*layers.TCP)
 	if tcpPkt.SYN || tcpPkt.RST {
 		return
 	}
 
-	ipLayer := packet.Layer(layers.LayerTypeIPv4)
-	if ipLayer == nil {
-		err = fmt.Errorf("no ip layer found in package")
-		return
-	}
-
-	ipInfo, ok := ipLayer.(*layers.IPv4)
-	if !ok {
-		err = fmt.Errorf("parsed no ip address")
-		return
-	}
-
-	srcIP := ipInfo.SrcIP.String()
-	dstIP := ipInfo.DstIP.String()
 	srcPort := int(tcpPkt.SrcPort)
 	dstPort := int(tcpPkt.DstPort)
 
@@ -231,7 +201,7 @@ func readToServerPackage(
 			session.Close()
 			delete(sessionPool, *sessionKey)
 		}
-		log.Debugf("close connection from %s", *sessionKey)
+		log.Infof("close connection from %s", *sessionKey)
 		return
 	}
 
diff --git a/capture/pacp_linux.go b/capture/pacp_linux.go
index 8a4f128..526604a 100644
--- a/capture/pacp_linux.go
+++ b/capture/pacp_linux.go
@@ -1,17 +1,21 @@
+//go:build linux
 // +build linux
 
 package capture
 
 import (
 	"fmt"
+	"time"
 
+	log "github.com/golang/glog"
+	"github.com/google/gopacket"
 	"github.com/google/gopacket/layers"
 	"github.com/google/gopacket/pcap"
 	"github.com/google/gopacket/pcapgo"
 	"golang.org/x/net/bpf"
 )
 
-func initEthernetHandlerFromPacp() (handler PcapHandler) {
+func initEthernetHandlerFromPacp() (pcapgoHandler *pcapgo.EthernetHandle) {
 	pcapgoHandler, err := pcapgo.NewEthernetHandle(DeviceName)
 	if err != nil {
 		panic(fmt.Sprintf("cannot open network interface %s <-- %s", DeviceName, err.Error()))
@@ -40,6 +44,58 @@ func initEthernetHandlerFromPacp() (handler PcapHandler) {
 	}
 
 	_ = pcapgoHandler.SetCaptureLength(65536)
-	handler = pcapgoHandler
 	return
 }
+
+func dealEachTCPIPPacket(dealTCPIPPacket func(tcpIPPkt *TCPIPPair)) {
+	handler := initEthernetHandlerFromPacp()
+	defer func() {
+		handler.Close()
+	}()
+
+	for {
+		var ci gopacket.CaptureInfo
+		data, ci, err := handler.ZeroCopyReadPacketData()
+		if err != nil {
+			log.Error(err.Error())
+			time.Sleep(time.Second * 3)
+			continue
+		}
+
+		packet := gopacket.NewPacket(data, layers.LayerTypeEthernet, gopacket.NoCopy)
+		m := packet.Metadata()
+		m.CaptureInfo = ci
+
+		tcpPkt, ok := packet.TransportLayer().(*layers.TCP)
+		if !ok {
+			continue
+		}
+
+		ipLayer := packet.NetworkLayer()
+		if ipLayer == nil {
+			log.Error("no ip layer found in package")
+			continue
+		}
+
+		var srcIP, dstIP string
+		switch realIPLayer := ipLayer.(type) {
+		case *layers.IPv6:
+			{
+				srcIP = realIPLayer.SrcIP.String()
+				dstIP = realIPLayer.DstIP.String()
+			}
+		case *layers.IPv4:
+			{
+				srcIP = realIPLayer.SrcIP.String()
+				dstIP = realIPLayer.DstIP.String()
+			}
+		}
+
+		tcpipPair := &TCPIPPair{
+			srcIP:  srcIP,
+			dstIP:  dstIP,
+			tcpPkt: tcpPkt,
+		}
+		dealTCPIPPacket(tcpipPair)
+	}
+}
diff --git a/capture/pacp_macos.go b/capture/pacp_macos.go
index d2703c4..a0de85b 100644
--- a/capture/pacp_macos.go
+++ b/capture/pacp_macos.go
@@ -1,15 +1,20 @@
+//go:build darwin
 // +build darwin
 
 package capture
 
 import (
 	"fmt"
+	"time"
 
+	log "github.com/golang/glog"
+	"github.com/google/gopacket"
+	"github.com/google/gopacket/layers"
 	"github.com/google/gopacket/pcap"
 )
 
 // in online use, we found a strange bug: pcap cost 100% core CPU and memory increase along
-func initEthernetHandlerFromPacp() (handler PcapHandler) {
+func initEthernetHandlerFromPacp() (pcapHandler *pcap.Handle) {
 	pcapHandler, err := pcap.OpenLive(DeviceName, 65536, false, pcap.BlockForever)
 	if err != nil {
 		panic(fmt.Sprintf("cannot open network interface %s <-- %s", DeviceName, err.Error()))
@@ -20,6 +25,58 @@ func initEthernetHandlerFromPacp() (handler PcapHandler) {
 		panic(err.Error())
 	}
 
-	handler = pcapHandler
 	return
 }
+
+func dealEachTCPIPPacket(dealTCPIPPacket func(tcpIPPkt *TCPIPPair)) {
+	handler := initEthernetHandlerFromPacp()
+	defer func() {
+		handler.Close()
+	}()
+
+	for {
+		var ci gopacket.CaptureInfo
+		data, ci, err := handler.ZeroCopyReadPacketData()
+		if err != nil {
+			log.Error(err.Error())
+			time.Sleep(time.Second * 3)
+			continue
+		}
+
+		packet := gopacket.NewPacket(data, layers.LayerTypeEthernet, gopacket.NoCopy)
+		m := packet.Metadata()
+		m.CaptureInfo = ci
+
+		tcpPkt, ok := packet.TransportLayer().(*layers.TCP)
+		if !ok {
+			continue
+		}
+
+		ipLayer := packet.NetworkLayer()
+		if ipLayer == nil {
+			log.Error("no ip layer found in package")
+			continue
+		}
+
+		var srcIP, dstIP string
+		switch realIPLayer := ipLayer.(type) {
+		case *layers.IPv6:
+			{
+				srcIP = realIPLayer.SrcIP.String()
+				dstIP = realIPLayer.DstIP.String()
+			}
+		case *layers.IPv4:
+			{
+				srcIP = realIPLayer.SrcIP.String()
+				dstIP = realIPLayer.DstIP.String()
+			}
+		}
+
+		tcpipPair := &TCPIPPair{
+			srcIP:  srcIP,
+			dstIP:  dstIP,
+			tcpPkt: tcpPkt,
+		}
+		dealTCPIPPacket(tcpipPair)
+	}
+}
diff --git a/capture/pacp_windows.go b/capture/pacp_windows.go
new file mode 100644
index 0000000..03783ae
--- /dev/null
+++ b/capture/pacp_windows.go
@@ -0,0 +1,81 @@
+//go:build windows
+// +build windows
+
+package capture
+
+import (
+	"fmt"
+	"time"
+
+	log "github.com/golang/glog"
+	"github.com/google/gopacket"
+	"github.com/google/gopacket/layers"
+	"github.com/google/gopacket/pcap"
+)
+
+var (
+	handler *pcap.Handle
+)
+
+func initEthernetHandlerFromPacp() (pcapHandler *pcap.Handle) {
+	devices, err := pcap.FindAllDevs()
+	if err != nil {
+		log.Fatal(err)
+	}
+
+	for _, device := range devices {
+		log.Infof("found Windows device:'%s', device info:%s", device.Name, device.Description)
+	}
+
+	pcapHandler, err = pcap.OpenLive(DeviceName, 1024, false, time.Hour*24)
+	if err != nil {
+		panic(fmt.Sprintf("cannot open network interface %s <-- %s", DeviceName, err.Error()))
+	}
+
+	return
+}
+
+func dealEachTCPIPPacket(dealTCPIPPacket func(tcpIPPkt *TCPIPPair)) {
+	handler = initEthernetHandlerFromPacp()
+	defer handler.Close()
+	packetSource := gopacket.NewPacketSource(handler, handler.LinkType())
+	for packet := range packetSource.Packets() {
+		if err := packet.ErrorLayer(); err != nil {
+			log.Error(err.Error())
+			continue
+		}
+
+		// Process packet here
+		tcpLayer := packet.Layer(layers.LayerTypeTCP)
+		if tcpLayer == nil {
+			continue
+		}
+		tcpPkt := tcpLayer.(*layers.TCP)
+		if (int(tcpPkt.SrcPort) != snifferPort && int(tcpPkt.DstPort) != snifferPort) {
+			continue
+		}
+
+		var srcIP, dstIP string
+		ipLayer := packet.NetworkLayer()
+		switch realIPLayer := ipLayer.(type) {
+		case *layers.IPv6:
+			{
+				srcIP = realIPLayer.SrcIP.String()
+				dstIP = realIPLayer.DstIP.String()
+			}
+		case *layers.IPv4:
+			{
+				srcIP = realIPLayer.SrcIP.String()
+				dstIP = realIPLayer.DstIP.String()
+			}
+		}
+
+		tcpipPair := &TCPIPPair{
+			srcIP:  srcIP,
+			dstIP:  dstIP,
+			tcpPkt: tcpPkt,
+		}
+		dealTCPIPPacket(tcpipPair)
+	}
+	return
+}
diff --git a/communicator/config.go b/communicator/config.go
index 0a1fe05..d844a3e 100644
--- a/communicator/config.go
+++ b/communicator/config.go
@@ -2,9 +2,10 @@ package communicator
 
 import (
 	"flag"
-	"github.com/gorilla/mux"
 	_ "net/http/pprof"
 	"sync"
+
+	"github.com/gorilla/mux"
 )
 
 const (
@@ -30,9 +31,6 @@ func init() {
 	flag.IntVar(&communicatePort, "communicate_port", 8088, "http server port. Default is 8088")
 	flag.Float64Var(&catpurePacketRateVal, CAPTURE_PACKET_RATE, 1.0, "capture packet rate. Default is 1.0")
 
-	if err := catpurePacketRate.setVal(catpurePacketRateVal); err != nil {
-		panic(err.Error())
-	}
 	configMap = make(map[string]configItem)
 	regsiterConfig()
 }
diff --git a/exporter/kafka.go b/exporter/kafka.go
index af82717..ceab564 100644
--- a/exporter/kafka.go
+++ b/exporter/kafka.go
@@ -6,7 +6,7 @@ import (
 	"strings"
 
 	"github.com/Shopify/sarama"
-	log "github.com/sirupsen/logrus"
+	log "github.com/golang/glog"
 	"github.com/zr-hebo/sniffer-agent/model"
 )
 
diff --git a/main.go b/main.go
index a05574d..79396fb 100644
--- a/main.go
+++ b/main.go
@@ -2,43 +2,28 @@ package main
 
 import (
 	"flag"
-	"fmt"
-	sd "github.com/zr-hebo/sniffer-agent/session-dealer"
-	"github.com/zr-hebo/sniffer-agent/session-dealer/mysql"
 	"os"
 
-	log "github.com/sirupsen/logrus"
+	log "github.com/golang/glog"
 	"github.com/zr-hebo/sniffer-agent/capture"
 	"github.com/zr-hebo/sniffer-agent/communicator"
 	"github.com/zr-hebo/sniffer-agent/exporter"
+	sd "github.com/zr-hebo/sniffer-agent/session-dealer"
+	"github.com/zr-hebo/sniffer-agent/session-dealer/mysql"
 )
 
 var (
 	logLevel string
 )
 
-func init()  {
+func init() {
 	flag.StringVar(&logLevel, "log_level", "warn", "log level. Default is info")
 }
 
-func initLog()  {
-	log.SetFormatter(&log.TextFormatter{})
-	log.SetOutput(os.Stdout)
-	switch logLevel {
-	case "debug":
-		log.SetLevel(log.DebugLevel)
-	case "info":
-		log.SetLevel(log.InfoLevel)
-	case "warn":
-		log.SetLevel(log.WarnLevel)
-	case "error":
-		log.SetLevel(log.ErrorLevel)
-	default:
-		panic(fmt.Sprintf("cannot set log level:%s, there have four types can set: debug, info, warn, error", logLevel))
-	}
+func initLog() {
 }
 
-func main()  {
+func main() {
 	flag.Parse()
 	prepareEnv()
 
@@ -46,7 +31,7 @@ func main()  {
 	mainServer()
 }
 
-func mainServer()  {
+func mainServer() {
 	ept := exporter.NewExporter()
 	networkCard := capture.NewNetworkCard()
 	log.Info("begin listen")
@@ -62,8 +47,9 @@ func mainServer()  {
 	os.Exit(1)
 }
 
-func prepareEnv()  {
+func prepareEnv() {
 	initLog()
 	sd.CheckParams()
 	mysql.PrepareEnv()
-}
\ No newline at end of file
+	capture.ShowLocalIP()
+}
diff --git a/session-dealer/mysql/session.go b/session-dealer/mysql/session.go
index b6cf37b..1141975 100644
--- a/session-dealer/mysql/session.go
+++ b/session-dealer/mysql/session.go
@@ -5,8 +5,8 @@ import (
 	"sync"
 	"time"
 
+	log "github.com/golang/glog"
 	"github.com/pingcap/tidb/util/hack"
-	log "github.com/sirupsen/logrus"
 	"github.com/zr-hebo/sniffer-agent/communicator"
 	"github.com/zr-hebo/sniffer-agent/model"
 )
@@ -171,7 +171,7 @@ func (ms *MysqlSession) readFromClient(seqID int64, bytes []byte) {
 		ms.endSeqID = seqID
 
 		if int64(ms.expectReceiveSize) < int64(len(contents)) {
-			log.Debug("receive invalid mysql packet")
+			log.Warning("receive invalid mysql packet")
 			return
 		}
 
@@ -186,13 +186,13 @@ func (ms *MysqlSession) readFromClient(seqID int64, bytes []byte) {
 		}
 
 		if ms.beginSeqID == -1 {
-			log.Debug("cover range is empty")
+			log.Info("cover range is empty")
 			return
 		}
 
 		if seqID < ms.beginSeqID {
 			// out date packet
-			log.Debugf("in session %s get outdate package with Seq:%d, beginSeq:%d",
+			log.Infof("in session %s get outdate package with Seq:%d, beginSeq:%d",
 				*ms.connectionID, seqID, ms.beginSeqID)
 			return
 		}
@@ -200,7 +200,7 @@ func (ms *MysqlSession) readFromClient(seqID int64, bytes []byte) {
 		seqOffset := seqID - ms.beginSeqID
 		if seqOffset+contentSize > int64(len(ms.cachedStmtBytes)) {
 			// not in a normal mysql packet
-			log.Debugf("receive an unexpect packet")
+			log.Info("receive an unexpect packet")
 			ms.clear()
 			return
 		}
@@ -225,12 +225,12 @@ func (ms *MysqlSession) GenerateQueryPiece() (qp model.QueryPiece) {
 	}
 
 	if !ms.checkFinish() {
-		log.Debugf("receive a not complete cover")
+		log.Warning("receive a not complete cover")
 		return
 	}
 
 	if len(ms.cachedStmtBytes) > maxSQLLen {
-		log.Warn("sql in cache is too long, ignore it")
+		log.Warning("sql in cache is too long, ignore it")
 		return
 	}
 
@@ -275,7 +275,7 @@ func (ms *MysqlSession) GenerateQueryPiece() (qp model.QueryPiece) {
 			querySQL := hack.String(querySQLInBytes)
 			mqp.QuerySQL = &querySQL
 			ms.cachedPrepareStmt[ms.prepareInfo.prepareStmtID] = querySQLInBytes
-			log.Debugf("prepare statement %s, get id:%d", querySQL, ms.prepareInfo.prepareStmtID)
+			log.Infof("prepare statement %s, get id:%d", querySQL, ms.prepareInfo.prepareStmtID)
 
 		case ComStmtExecute:
 			prepareStmtID := bytesToInt(ms.cachedStmtBytes[1:5])
@@ -293,7 +293,7 @@ func (ms *MysqlSession) GenerateQueryPiece() (qp model.QueryPiece) {
 		case ComStmtClose:
 			prepareStmtID := bytesToInt(ms.cachedStmtBytes[1:5])
 			delete(ms.cachedPrepareStmt, prepareStmtID)
-			log.Debugf("remove prepare statement:%d", prepareStmtID)
+			log.Infof("remove prepare statement:%d", prepareStmtID)
 
 		default:
 			return
diff --git a/vendor/github.com/golang/glog/LICENSE b/vendor/github.com/golang/glog/LICENSE
new file mode 100644
index 0000000..37ec93a
--- /dev/null
+++ b/vendor/github.com/golang/glog/LICENSE
@@ -0,0 +1,191 @@
+Apache License
+Version 2.0, January 2004
+http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+"License" shall mean the terms and conditions for use, reproduction, and
+distribution as defined by Sections 1 through 9 of this document.
+
+"Licensor" shall mean the copyright owner or entity authorized by the copyright
+owner that is granting the License.
+
+"Legal Entity" shall mean the union of the acting entity and all other entities
+that control, are controlled by, or are under common control with that entity.
+For the purposes of this definition, "control" means (i) the power, direct or
+indirect, to cause the direction or management of such entity, whether by
+contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
+outstanding shares, or (iii) beneficial ownership of such entity.
+
+"You" (or "Your") shall mean an individual or Legal Entity exercising
+permissions granted by this License.
+
+"Source" form shall mean the preferred form for making modifications, including
+but not limited to software source code, documentation source, and configuration
+files.
+
+"Object" form shall mean any form resulting from mechanical transformation or
+translation of a Source form, including but not limited to compiled object code,
+generated documentation, and conversions to other media types.
+
+"Work" shall mean the work of authorship, whether in Source or Object form, made
+available under the License, as indicated by a copyright notice that is included
+in or attached to the work (an example is provided in the Appendix below).
+
+"Derivative Works" shall mean any work, whether in Source or Object form, that
+is based on (or derived from) the Work and for which the editorial revisions,
+annotations, elaborations, or other modifications represent, as a whole, an
+original work of authorship. For the purposes of this License, Derivative Works
+shall not include works that remain separable from, or merely link (or bind by
+name) to the interfaces of, the Work and Derivative Works thereof.
+
+"Contribution" shall mean any work of authorship, including the original version
+of the Work and any modifications or additions to that Work or Derivative Works
+thereof, that is intentionally submitted to Licensor for inclusion in the Work
+by the copyright owner or by an individual or Legal Entity authorized to submit
+on behalf of the copyright owner. For the purposes of this definition,
+"submitted" means any form of electronic, verbal, or written communication sent
+to the Licensor or its representatives, including but not limited to
+communication on electronic mailing lists, source code control systems, and
+issue tracking systems that are managed by, or on behalf of, the Licensor for
+the purpose of discussing and improving the Work, but excluding communication
+that is conspicuously marked or otherwise designated in writing by the copyright
+owner as "Not a Contribution."
+
+"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
+of whom a Contribution has been received by Licensor and subsequently
+incorporated within the Work.
+
+2. Grant of Copyright License.
+
+Subject to the terms and conditions of this License, each Contributor hereby
+grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
+irrevocable copyright license to reproduce, prepare Derivative Works of,
+publicly display, publicly perform, sublicense, and distribute the Work and such
+Derivative Works in Source or Object form.
+
+3. Grant of Patent License.
+
+Subject to the terms and conditions of this License, each Contributor hereby
+grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
+irrevocable (except as stated in this section) patent license to make, have
+made, use, offer to sell, sell, import, and otherwise transfer the Work, where
+such license applies only to those patent claims licensable by such Contributor
+that are necessarily infringed by their Contribution(s) alone or by combination
+of their Contribution(s) with the Work to which such Contribution(s) was
+submitted. If You institute patent litigation against any entity (including a
+cross-claim or counterclaim in a lawsuit) alleging that the Work or a
+Contribution incorporated within the Work constitutes direct or contributory
+patent infringement, then any patent licenses granted to You under this License
+for that Work shall terminate as of the date such litigation is filed.
+
+4. Redistribution.
+
+You may reproduce and distribute copies of the Work or Derivative Works thereof
+in any medium, with or without modifications, and in Source or Object form,
+provided that You meet the following conditions:
+
+You must give any other recipients of the Work or Derivative Works a copy of
+this License; and
+You must cause any modified files to carry prominent notices stating that You
+changed the files; and
+You must retain, in the Source form of any Derivative Works that You distribute,
+all copyright, patent, trademark, and attribution notices from the Source form
+of the Work, excluding those notices that do not pertain to any part of the
+Derivative Works; and
+If the Work includes a "NOTICE" text file as part of its distribution, then any
+Derivative Works that You distribute must include a readable copy of the
+attribution notices contained within such NOTICE file, excluding those notices
+that do not pertain to any part of the Derivative Works, in at least one of the
+following places: within a NOTICE text file distributed as part of the
+Derivative Works; within the Source form or documentation, if provided along
+with the Derivative Works; or, within a display generated by the Derivative
+Works, if and wherever such third-party notices normally appear. The contents of
+the NOTICE file are for informational purposes only and do not modify the
+License. You may add Your own attribution notices within Derivative Works that
+You distribute, alongside or as an addendum to the NOTICE text from the Work,
+provided that such additional attribution notices cannot be construed as
+modifying the License.
+You may add Your own copyright statement to Your modifications and may provide
+additional or different license terms and conditions for use, reproduction, or
+distribution of Your modifications, or for any such Derivative Works as a whole,
+provided Your use, reproduction, and distribution of the Work otherwise complies
+with the conditions stated in this License.
+
+5. Submission of Contributions.
+
+Unless You explicitly state otherwise, any Contribution intentionally submitted
+for inclusion in the Work by You to the Licensor shall be under the terms and
+conditions of this License, without any additional terms or conditions.
+Notwithstanding the above, nothing herein shall supersede or modify the terms of
+any separate license agreement you may have executed with Licensor regarding
+such Contributions.
+
+6. Trademarks.
+
+This License does not grant permission to use the trade names, trademarks,
+service marks, or product names of the Licensor, except as required for
+reasonable and customary use in describing the origin of the Work and
+reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty.
+
+Unless required by applicable law or agreed to in writing, Licensor provides the
+Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
+including, without limitation, any warranties or conditions of TITLE,
+NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
+solely responsible for determining the appropriateness of using or
+redistributing the Work and assume any risks associated with Your exercise of
+permissions under this License.
+
+8. Limitation of Liability.
+
+In no event and under no legal theory, whether in tort (including negligence),
+contract, or otherwise, unless required by applicable law (such as deliberate
+and grossly negligent acts) or agreed to in writing, shall any Contributor be
+liable to You for damages, including any direct, indirect, special, incidental,
+or consequential damages of any character arising as a result of this License or
+out of the use or inability to use the Work (including but not limited to
+damages for loss of goodwill, work stoppage, computer failure or malfunction, or
+any and all other commercial damages or losses), even if such Contributor has
+been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability.
+
+While redistributing the Work or Derivative Works thereof, You may choose to
+offer, and charge a fee for, acceptance of support, warranty, indemnity, or
+other liability obligations and/or rights consistent with this License. However,
+in accepting such obligations, You may act only on Your own behalf and on Your
+sole responsibility, not on behalf of any other Contributor, and only if You
+agree to indemnify, defend, and hold each Contributor harmless for any liability
+incurred by, or claims asserted against, such Contributor by reason of your
+accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work
+
+To apply the Apache License to your work, attach the following boilerplate
+notice, with the fields enclosed by brackets "[]" replaced with your own
+identifying information. (Don't include the brackets!) The text should be
+enclosed in the appropriate comment syntax for the file format. We also
+recommend that a file or class name and description of purpose be included on
+the same "printed page" as the copyright notice for easier identification within
+third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/github.com/golang/glog/README b/vendor/github.com/golang/glog/README
new file mode 100644
index 0000000..387b4eb
--- /dev/null
+++ b/vendor/github.com/golang/glog/README
@@ -0,0 +1,44 @@
+glog
+====
+
+Leveled execution logs for Go.
+
+This is an efficient pure Go implementation of leveled logs in the
+manner of the open source C++ package
+	https://github.com/google/glog
+
+By binding methods to booleans it is possible to use the log package
+without paying the expense of evaluating the arguments to the log.
+Through the -vmodule flag, the package also provides fine-grained
+control over logging at the file level.
+
+The comment from glog.go introduces the ideas:
+
+	Package glog implements logging analogous to the Google-internal
+	C++ INFO/ERROR/V setup.  It provides functions Info, Warning,
+	Error, Fatal, plus formatting variants such as Infof. It
+	also provides V-style logging controlled by the -v and
+	-vmodule=file=2 flags.
+	
+	Basic examples:
+	
+		glog.Info("Prepare to repel boarders")
+	
+		glog.Fatalf("Initialization failed: %s", err)
+	
+	See the documentation for the V function for an explanation
+	of these examples:
+	
+		if glog.V(2) {
+			glog.Info("Starting transaction...")
+		}
+	
+		glog.V(2).Infoln("Processed", nItems, "elements")
+
+
+The repository contains an open source version of the log package
+used inside Google. The master copy of the source lives inside
+Google, not here. The code in this repo is for export only and is not itself
+under development. Feature requests will be ignored.
+
+Send bug reports to golang-nuts@googlegroups.com.
diff --git a/vendor/github.com/golang/glog/glog.go b/vendor/github.com/golang/glog/glog.go
new file mode 100644
index 0000000..54bd7af
--- /dev/null
+++ b/vendor/github.com/golang/glog/glog.go
@@ -0,0 +1,1180 @@
+// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/
+//
+// Copyright 2013 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package glog implements logging analogous to the Google-internal C++ INFO/ERROR/V setup.
+// It provides functions Info, Warning, Error, Fatal, plus formatting variants such as
+// Infof. It also provides V-style logging controlled by the -v and -vmodule=file=2 flags.
+//
+// Basic examples:
+//
+//	glog.Info("Prepare to repel boarders")
+//
+//	glog.Fatalf("Initialization failed: %s", err)
+//
+// See the documentation for the V function for an explanation of these examples:
+//
+//	if glog.V(2) {
+//		glog.Info("Starting transaction...")
+//	}
+//
+//	glog.V(2).Infoln("Processed", nItems, "elements")
+//
+// Log output is buffered and written periodically using Flush. Programs
+// should call Flush before exiting to guarantee all log output is written.
+//
+// By default, all log statements write to files in a temporary directory.
+// This package provides several flags that modify this behavior.
+// As a result, flag.Parse must be called before any logging is done.
+//
+//	-logtostderr=false
+//		Logs are written to standard error instead of to files.
+//	-alsologtostderr=false
+//		Logs are written to standard error as well as to files.
+//	-stderrthreshold=ERROR
+//		Log events at or above this severity are logged to standard
+//		error as well as to files.
+//	-log_dir=""
+//		Log files will be written to this directory instead of the
+//		default temporary directory.
+//
+//	Other flags provide aids to debugging.
+//
+//	-log_backtrace_at=""
+//		When set to a file and line number holding a logging statement,
+//		such as
+//			-log_backtrace_at=gopherflakes.go:234
+//		a stack trace will be written to the Info log whenever execution
+//		hits that statement. (Unlike with -vmodule, the ".go" must be
+//		present.)
+//	-v=0
+//		Enable V-leveled logging at the specified level.
+//	-vmodule=""
+//		The syntax of the argument is a comma-separated list of pattern=N,
+//		where pattern is a literal file name (minus the ".go" suffix) or
+//		"glob" pattern and N is a V level. For instance,
+//			-vmodule=gopher*=3
+//		sets the V level to 3 in all Go files whose names begin "gopher".
+//
+package glog
+
+import (
+	"bufio"
+	"bytes"
+	"errors"
+	"flag"
+	"fmt"
+	"io"
+	stdLog "log"
+	"os"
+	"path/filepath"
+	"runtime"
+	"strconv"
+	"strings"
+	"sync"
+	"sync/atomic"
+	"time"
+)
+
+// severity identifies the sort of log: info, warning etc. It also implements
+// the flag.Value interface. The -stderrthreshold flag is of type severity and
+// should be modified only through the flag.Value interface. The values match
+// the corresponding constants in C++.
+type severity int32 // sync/atomic int32
+
+// These constants identify the log levels in order of increasing severity.
+// A message written to a high-severity log file is also written to each
+// lower-severity log file.
+const (
+	infoLog severity = iota
+	warningLog
+	errorLog
+	fatalLog
+	numSeverity = 4
+)
+
+const severityChar = "IWEF"
+
+var severityName = []string{
+	infoLog:    "INFO",
+	warningLog: "WARNING",
+	errorLog:   "ERROR",
+	fatalLog:   "FATAL",
+}
+
+// get returns the value of the severity.
+func (s *severity) get() severity {
+	return severity(atomic.LoadInt32((*int32)(s)))
+}
+
+// set sets the value of the severity.
+func (s *severity) set(val severity) {
+	atomic.StoreInt32((*int32)(s), int32(val))
+}
+
+// String is part of the flag.Value interface.
+func (s *severity) String() string {
+	return strconv.FormatInt(int64(*s), 10)
+}
+
+// Get is part of the flag.Value interface.
+func (s *severity) Get() interface{} {
+	return *s
+}
+
+// Set is part of the flag.Value interface.
+func (s *severity) Set(value string) error {
+	var threshold severity
+	// Is it a known name?
+	if v, ok := severityByName(value); ok {
+		threshold = v
+	} else {
+		v, err := strconv.Atoi(value)
+		if err != nil {
+			return err
+		}
+		threshold = severity(v)
+	}
+	logging.stderrThreshold.set(threshold)
+	return nil
+}
+
+func severityByName(s string) (severity, bool) {
+	s = strings.ToUpper(s)
+	for i, name := range severityName {
+		if name == s {
+			return severity(i), true
+		}
+	}
+	return 0, false
+}
+
+// OutputStats tracks the number of output lines and bytes written.
+type OutputStats struct {
+	lines int64
+	bytes int64
+}
+
+// Lines returns the number of lines written.
+func (s *OutputStats) Lines() int64 {
+	return atomic.LoadInt64(&s.lines)
+}
+
+// Bytes returns the number of bytes written.
+func (s *OutputStats) Bytes() int64 {
+	return atomic.LoadInt64(&s.bytes)
+}
+
+// Stats tracks the number of lines of output and number of bytes
+// per severity level. Values must be read with atomic.LoadInt64.
+var Stats struct {
+	Info, Warning, Error OutputStats
+}
+
+var severityStats = [numSeverity]*OutputStats{
+	infoLog:    &Stats.Info,
+	warningLog: &Stats.Warning,
+	errorLog:   &Stats.Error,
+}
+
+// Level is exported because it appears in the arguments to V and is
+// the type of the v flag, which can be set programmatically.
+// It's a distinct type because we want to discriminate it from logType.
+// Variables of type level are only changed under logging.mu.
+// The -v flag is read only with atomic ops, so the state of the logging
+// module is consistent.
+
+// Level is treated as a sync/atomic int32.
+
+// Level specifies a level of verbosity for V logs. *Level implements
+// flag.Value; the -v flag is of type Level and should be modified
+// only through the flag.Value interface.
+type Level int32
+
+// get returns the value of the Level.
+func (l *Level) get() Level {
+	return Level(atomic.LoadInt32((*int32)(l)))
+}
+
+// set sets the value of the Level.
+func (l *Level) set(val Level) {
+	atomic.StoreInt32((*int32)(l), int32(val))
+}
+
+// String is part of the flag.Value interface.
+func (l *Level) String() string {
+	return strconv.FormatInt(int64(*l), 10)
+}
+
+// Get is part of the flag.Value interface.
+func (l *Level) Get() interface{} {
+	return *l
+}
+
+// Set is part of the flag.Value interface.
+func (l *Level) Set(value string) error {
+	v, err := strconv.Atoi(value)
+	if err != nil {
+		return err
+	}
+	logging.mu.Lock()
+	defer logging.mu.Unlock()
+	logging.setVState(Level(v), logging.vmodule.filter, false)
+	return nil
+}
+
+// moduleSpec represents the setting of the -vmodule flag.
+type moduleSpec struct {
+	filter []modulePat
+}
+
+// modulePat contains a filter for the -vmodule flag.
+// It holds a verbosity level and a file pattern to match.
+type modulePat struct {
+	pattern string
+	literal bool // The pattern is a literal string
+	level   Level
+}
+
+// match reports whether the file matches the pattern. It uses a string
+// comparison if the pattern contains no metacharacters.
+func (m *modulePat) match(file string) bool {
+	if m.literal {
+		return file == m.pattern
+	}
+	match, _ := filepath.Match(m.pattern, file)
+	return match
+}
+
+func (m *moduleSpec) String() string {
+	// Lock because the type is not atomic. TODO: clean this up.
+	logging.mu.Lock()
+	defer logging.mu.Unlock()
+	var b bytes.Buffer
+	for i, f := range m.filter {
+		if i > 0 {
+			b.WriteRune(',')
+		}
+		fmt.Fprintf(&b, "%s=%d", f.pattern, f.level)
+	}
+	return b.String()
+}
+
+// Get is part of the (Go 1.2)  flag.Getter interface. It always returns nil for this flag type since the
+// struct is not exported.
+func (m *moduleSpec) Get() interface{} {
+	return nil
+}
+
+var errVmoduleSyntax = errors.New("syntax error: expect comma-separated list of filename=N")
+
+// Syntax: -vmodule=recordio=2,file=1,gfs*=3
+func (m *moduleSpec) Set(value string) error {
+	var filter []modulePat
+	for _, pat := range strings.Split(value, ",") {
+		if len(pat) == 0 {
+			// Empty strings such as from a trailing comma can be ignored.
+			continue
+		}
+		patLev := strings.Split(pat, "=")
+		if len(patLev) != 2 || len(patLev[0]) == 0 || len(patLev[1]) == 0 {
+			return errVmoduleSyntax
+		}
+		pattern := patLev[0]
+		v, err := strconv.Atoi(patLev[1])
+		if err != nil {
+			return errors.New("syntax error: expect comma-separated list of filename=N")
+		}
+		if v < 0 {
+			return errors.New("negative value for vmodule level")
+		}
+		if v == 0 {
+			continue // Ignore. It's harmless but no point in paying the overhead.
+		}
+		// TODO: check syntax of filter?
+		filter = append(filter, modulePat{pattern, isLiteral(pattern), Level(v)})
+	}
+	logging.mu.Lock()
+	defer logging.mu.Unlock()
+	logging.setVState(logging.verbosity, filter, true)
+	return nil
+}
+
+// isLiteral reports whether the pattern is a literal string, that is, has no metacharacters
+// that require filepath.Match to be called to match the pattern.
+func isLiteral(pattern string) bool {
+	return !strings.ContainsAny(pattern, `\*?[]`)
+}
+
+// traceLocation represents the setting of the -log_backtrace_at flag.
+type traceLocation struct {
+	file string
+	line int
+}
+
+// isSet reports whether the trace location has been specified.
+// logging.mu is held.
+func (t *traceLocation) isSet() bool {
+	return t.line > 0
+}
+
+// match reports whether the specified file and line matches the trace location.
+// The argument file name is the full path, not the basename specified in the flag.
+// logging.mu is held.
+func (t *traceLocation) match(file string, line int) bool {
+	if t.line != line {
+		return false
+	}
+	if i := strings.LastIndex(file, "/"); i >= 0 {
+		file = file[i+1:]
+	}
+	return t.file == file
+}
+
+func (t *traceLocation) String() string {
+	// Lock because the type is not atomic. TODO: clean this up.
+	logging.mu.Lock()
+	defer logging.mu.Unlock()
+	return fmt.Sprintf("%s:%d", t.file, t.line)
+}
+
+// Get is part of the (Go 1.2) flag.Getter interface. It always returns nil for this flag type since the
+// struct is not exported
+func (t *traceLocation) Get() interface{} {
+	return nil
+}
+
+var errTraceSyntax = errors.New("syntax error: expect file.go:234")
+
+// Syntax: -log_backtrace_at=gopherflakes.go:234
+// Note that unlike vmodule the file extension is included here.
+func (t *traceLocation) Set(value string) error {
+	if value == "" {
+		// Unset.
+		t.line = 0
+		t.file = ""
+	}
+	fields := strings.Split(value, ":")
+	if len(fields) != 2 {
+		return errTraceSyntax
+	}
+	file, line := fields[0], fields[1]
+	if !strings.Contains(file, ".") {
+		return errTraceSyntax
+	}
+	v, err := strconv.Atoi(line)
+	if err != nil {
+		return errTraceSyntax
+	}
+	if v <= 0 {
+		return errors.New("negative or zero value for level")
+	}
+	logging.mu.Lock()
+	defer logging.mu.Unlock()
+	t.line = v
+	t.file = file
+	return nil
+}
+
+// flushSyncWriter is the interface satisfied by logging destinations.
+type flushSyncWriter interface {
+	Flush() error
+	Sync() error
+	io.Writer
+}
+
+func init() {
+	flag.BoolVar(&logging.toStderr, "logtostderr", false, "log to standard error instead of files")
+	flag.BoolVar(&logging.alsoToStderr, "alsologtostderr", false, "log to standard error as well as files")
+	flag.Var(&logging.verbosity, "v", "log level for V logs")
+	flag.Var(&logging.stderrThreshold, "stderrthreshold", "logs at or above this threshold go to stderr")
+	flag.Var(&logging.vmodule, "vmodule", "comma-separated list of pattern=N settings for file-filtered logging")
+	flag.Var(&logging.traceLocation, "log_backtrace_at", "when logging hits line file:N, emit a stack trace")
+
+	// Default stderrThreshold is ERROR.
+	logging.stderrThreshold = errorLog
+
+	logging.setVState(0, nil, false)
+	go logging.flushDaemon()
+}
+
+// Flush flushes all pending log I/O.
+func Flush() {
+	logging.lockAndFlushAll()
+}
+
+// loggingT collects all the global state of the logging setup.
+type loggingT struct {
+	// Boolean flags. Not handled atomically because the flag.Value interface
+	// does not let us avoid the =true, and that shorthand is necessary for
+	// compatibility. TODO: does this matter enough to fix? Seems unlikely.
+	toStderr     bool // The -logtostderr flag.
+	alsoToStderr bool // The -alsologtostderr flag.
+
+	// Level flag. Handled atomically.
+	stderrThreshold severity // The -stderrthreshold flag.
+
+	// freeList is a list of byte buffers, maintained under freeListMu.
+	freeList *buffer
+	// freeListMu maintains the free list. It is separate from the main mutex
+	// so buffers can be grabbed and printed to without holding the main lock,
+	// for better parallelization.
+	freeListMu sync.Mutex
+
+	// mu protects the remaining elements of this structure and is
+	// used to synchronize logging.
+	mu sync.Mutex
+	// file holds writer for each of the log types.
+	file [numSeverity]flushSyncWriter
+	// pcs is used in V to avoid an allocation when computing the caller's PC.
+	pcs [1]uintptr
+	// vmap is a cache of the V Level for each V() call site, identified by PC.
+	// It is wiped whenever the vmodule flag changes state.
+	vmap map[uintptr]Level
+	// filterLength stores the length of the vmodule filter chain. If greater
+	// than zero, it means vmodule is enabled. It may be read safely
+	// using sync.LoadInt32, but is only modified under mu.
+	filterLength int32
+	// traceLocation is the state of the -log_backtrace_at flag.
+	traceLocation traceLocation
+	// These flags are modified only under lock, although verbosity may be fetched
+	// safely using atomic.LoadInt32.
+	vmodule   moduleSpec // The state of the -vmodule flag.
+	verbosity Level      // V logging level, the value of the -v flag/
+}
+
+// buffer holds a byte Buffer for reuse. The zero value is ready for use.
+type buffer struct {
+	bytes.Buffer
+	tmp  [64]byte // temporary byte array for creating headers.
+	next *buffer
+}
+
+var logging loggingT
+
+// setVState sets a consistent state for V logging.
+// l.mu is held.
+func (l *loggingT) setVState(verbosity Level, filter []modulePat, setFilter bool) {
+	// Turn verbosity off so V will not fire while we are in transition.
+	logging.verbosity.set(0)
+	// Ditto for filter length.
+	atomic.StoreInt32(&logging.filterLength, 0)
+
+	// Set the new filters and wipe the pc->Level map if the filter has changed.
+	if setFilter {
+		logging.vmodule.filter = filter
+		logging.vmap = make(map[uintptr]Level)
+	}
+
+	// Things are consistent now, so enable filtering and verbosity.
+	// They are enabled in order opposite to that in V.
+	atomic.StoreInt32(&logging.filterLength, int32(len(filter)))
+	logging.verbosity.set(verbosity)
+}
+
+// getBuffer returns a new, ready-to-use buffer.
+func (l *loggingT) getBuffer() *buffer {
+	l.freeListMu.Lock()
+	b := l.freeList
+	if b != nil {
+		l.freeList = b.next
+	}
+	l.freeListMu.Unlock()
+	if b == nil {
+		b = new(buffer)
+	} else {
+		b.next = nil
+		b.Reset()
+	}
+	return b
+}
+
+// putBuffer returns a buffer to the free list.
+func (l *loggingT) putBuffer(b *buffer) {
+	if b.Len() >= 256 {
+		// Let big buffers die a natural death.
+		return
+	}
+	l.freeListMu.Lock()
+	b.next = l.freeList
+	l.freeList = b
+	l.freeListMu.Unlock()
+}
+
+var timeNow = time.Now // Stubbed out for testing.
+
+/*
+header formats a log header as defined by the C++ implementation.
+It returns a buffer containing the formatted header and the user's file and line number.
+The depth specifies how many stack frames above lives the source line to be identified in the log message.
+
+Log lines have this form:
+	Lmmdd hh:mm:ss.uuuuuu threadid file:line] msg...
+where the fields are defined as follows:
+	L                A single character, representing the log level (eg 'I' for INFO)
+	mm               The month (zero padded; ie May is '05')
+	dd               The day (zero padded)
+	hh:mm:ss.uuuuuu  Time in hours, minutes and fractional seconds
+	threadid         The space-padded thread ID as returned by GetTID()
+	file             The file name
+	line             The line number
+	msg              The user-supplied message
+*/
+func (l *loggingT) header(s severity, depth int) (*buffer, string, int) {
+	_, file, line, ok := runtime.Caller(3 + depth)
+	if !ok {
+		file = "???"
+		line = 1
+	} else {
+		slash := strings.LastIndex(file, "/")
+		if slash >= 0 {
+			file = file[slash+1:]
+		}
+	}
+	return l.formatHeader(s, file, line), file, line
+}
+
+// formatHeader formats a log header using the provided file name and line number.
+func (l *loggingT) formatHeader(s severity, file string, line int) *buffer {
+	now := timeNow()
+	if line < 0 {
+		line = 0 // not a real line number, but acceptable to someDigits
+	}
+	if s > fatalLog {
+		s = infoLog // for safety.
+	}
+	buf := l.getBuffer()
+
+	// Avoid Fprintf, for speed. The format is so simple that we can do it quickly by hand.
+	// It's worth about 3X. Fprintf is hard.
+	_, month, day := now.Date()
+	hour, minute, second := now.Clock()
+	// Lmmdd hh:mm:ss.uuuuuu threadid file:line]
+	buf.tmp[0] = severityChar[s]
+	buf.twoDigits(1, int(month))
+	buf.twoDigits(3, day)
+	buf.tmp[5] = ' '
+	buf.twoDigits(6, hour)
+	buf.tmp[8] = ':'
+	buf.twoDigits(9, minute)
+	buf.tmp[11] = ':'
+	buf.twoDigits(12, second)
+	buf.tmp[14] = '.'
+	buf.nDigits(6, 15, now.Nanosecond()/1000, '0')
+	buf.tmp[21] = ' '
+	buf.nDigits(7, 22, pid, ' ') // TODO: should be TID
+	buf.tmp[29] = ' '
+	buf.Write(buf.tmp[:30])
+	buf.WriteString(file)
+	buf.tmp[0] = ':'
+	n := buf.someDigits(1, line)
+	buf.tmp[n+1] = ']'
+	buf.tmp[n+2] = ' '
+	buf.Write(buf.tmp[:n+3])
+	return buf
+}
+
+// Some custom tiny helper functions to print the log header efficiently.
+
+const digits = "0123456789"
+
+// twoDigits formats a zero-prefixed two-digit integer at buf.tmp[i].
+func (buf *buffer) twoDigits(i, d int) {
+	buf.tmp[i+1] = digits[d%10]
+	d /= 10
+	buf.tmp[i] = digits[d%10]
+}
+
+// nDigits formats an n-digit integer at buf.tmp[i],
+// padding with pad on the left.
+// It assumes d >= 0.
+func (buf *buffer) nDigits(n, i, d int, pad byte) {
+	j := n - 1
+	for ; j >= 0 && d > 0; j-- {
+		buf.tmp[i+j] = digits[d%10]
+		d /= 10
+	}
+	for ; j >= 0; j-- {
+		buf.tmp[i+j] = pad
+	}
+}
+
+// someDigits formats a zero-prefixed variable-width integer at buf.tmp[i].
+func (buf *buffer) someDigits(i, d int) int {
+	// Print into the top, then copy down. We know there's space for at least
+	// a 10-digit number.
+	j := len(buf.tmp)
+	for {
+		j--
+		buf.tmp[j] = digits[d%10]
+		d /= 10
+		if d == 0 {
+			break
+		}
+	}
+	return copy(buf.tmp[i:], buf.tmp[j:])
+}
+
+func (l *loggingT) println(s severity, args ...interface{}) {
+	buf, file, line := l.header(s, 0)
+	fmt.Fprintln(buf, args...)
+	l.output(s, buf, file, line, false)
+}
+
+func (l *loggingT) print(s severity, args ...interface{}) {
+	l.printDepth(s, 1, args...)
+}
+
+func (l *loggingT) printDepth(s severity, depth int, args ...interface{}) {
+	buf, file, line := l.header(s, depth)
+	fmt.Fprint(buf, args...)
+	if buf.Bytes()[buf.Len()-1] != '\n' {
+		buf.WriteByte('\n')
+	}
+	l.output(s, buf, file, line, false)
+}
+
+func (l *loggingT) printf(s severity, format string, args ...interface{}) {
+	buf, file, line := l.header(s, 0)
+	fmt.Fprintf(buf, format, args...)
+	if buf.Bytes()[buf.Len()-1] != '\n' {
+		buf.WriteByte('\n')
+	}
+	l.output(s, buf, file, line, false)
+}
+
+// printWithFileLine behaves like print but uses the provided file and line number.  If
+// alsoLogToStderr is true, the log message always appears on standard error; it
+// will also appear in the log file unless --logtostderr is set.
+func (l *loggingT) printWithFileLine(s severity, file string, line int, alsoToStderr bool, args ...interface{}) {
+	buf := l.formatHeader(s, file, line)
+	fmt.Fprint(buf, args...)
+	if buf.Bytes()[buf.Len()-1] != '\n' {
+		buf.WriteByte('\n')
+	}
+	l.output(s, buf, file, line, alsoToStderr)
+}
+
+// output writes the data to the log files and releases the buffer.
+func (l *loggingT) output(s severity, buf *buffer, file string, line int, alsoToStderr bool) {
+	l.mu.Lock()
+	if l.traceLocation.isSet() {
+		if l.traceLocation.match(file, line) {
+			buf.Write(stacks(false))
+		}
+	}
+	data := buf.Bytes()
+	if !flag.Parsed() {
+		os.Stderr.Write([]byte("ERROR: logging before flag.Parse: "))
+		os.Stderr.Write(data)
+	} else if l.toStderr {
+		os.Stderr.Write(data)
+	} else {
+		if alsoToStderr || l.alsoToStderr || s >= l.stderrThreshold.get() {
+			os.Stderr.Write(data)
+		}
+		if l.file[s] == nil {
+			if err := l.createFiles(s); err != nil {
+				os.Stderr.Write(data) // Make sure the message appears somewhere.
+				l.exit(err)
+			}
+		}
+		switch s {
+		case fatalLog:
+			l.file[fatalLog].Write(data)
+			fallthrough
+		case errorLog:
+			l.file[errorLog].Write(data)
+			fallthrough
+		case warningLog:
+			l.file[warningLog].Write(data)
+			fallthrough
+		case infoLog:
+			l.file[infoLog].Write(data)
+		}
+	}
+	if s == fatalLog {
+		// If we got here via Exit rather than Fatal, print no stacks.
+		if atomic.LoadUint32(&fatalNoStacks) > 0 {
+			l.mu.Unlock()
+			timeoutFlush(10 * time.Second)
+			os.Exit(1)
+		}
+		// Dump all goroutine stacks before exiting.
+		// First, make sure we see the trace for the current goroutine on standard error.
+		// If -logtostderr has been specified, the loop below will do that anyway
+		// as the first stack in the full dump.
+		if !l.toStderr {
+			os.Stderr.Write(stacks(false))
+		}
+		// Write the stack trace for all goroutines to the files.
+		trace := stacks(true)
+		logExitFunc = func(error) {} // If we get a write error, we'll still exit below.
+		for log := fatalLog; log >= infoLog; log-- {
+			if f := l.file[log]; f != nil { // Can be nil if -logtostderr is set.
+				f.Write(trace)
+			}
+		}
+		l.mu.Unlock()
+		timeoutFlush(10 * time.Second)
+		os.Exit(255) // C++ uses -1, which is silly because it's anded with 255 anyway.
+	}
+	l.putBuffer(buf)
+	l.mu.Unlock()
+	if stats := severityStats[s]; stats != nil {
+		atomic.AddInt64(&stats.lines, 1)
+		atomic.AddInt64(&stats.bytes, int64(len(data)))
+	}
+}
+
+// timeoutFlush calls Flush and returns when it completes or after timeout
+// elapses, whichever happens first.  This is needed because the hooks invoked
+// by Flush may deadlock when glog.Fatal is called from a hook that holds
+// a lock.
+func timeoutFlush(timeout time.Duration) {
+	done := make(chan bool, 1)
+	go func() {
+		Flush() // calls logging.lockAndFlushAll()
+		done <- true
+	}()
+	select {
+	case <-done:
+	case <-time.After(timeout):
+		fmt.Fprintln(os.Stderr, "glog: Flush took longer than", timeout)
+	}
+}
+
+// stacks is a wrapper for runtime.Stack that attempts to recover the data for all goroutines.
+func stacks(all bool) []byte {
+	// We don't know how big the traces are, so grow a few times if they don't fit. Start large, though.
+	n := 10000
+	if all {
+		n = 100000
+	}
+	var trace []byte
+	for i := 0; i < 5; i++ {
+		trace = make([]byte, n)
+		nbytes := runtime.Stack(trace, all)
+		if nbytes < len(trace) {
+			return trace[:nbytes]
+		}
+		n *= 2
+	}
+	return trace
+}
+
+// logExitFunc provides a simple mechanism to override the default behavior
+// of exiting on error. Used in testing and to guarantee we reach a required exit
+// for fatal logs. Instead, exit could be a function rather than a method but that
+// would make its use clumsier.
+var logExitFunc func(error)
+
+// exit is called if there is trouble creating or writing log files.
+// It flushes the logs and exits the program; there's no point in hanging around.
+// l.mu is held.
+func (l *loggingT) exit(err error) {
+	fmt.Fprintf(os.Stderr, "log: exiting because of error: %s\n", err)
+	// If logExitFunc is set, we do that instead of exiting.
+	if logExitFunc != nil {
+		logExitFunc(err)
+		return
+	}
+	l.flushAll()
+	os.Exit(2)
+}
+
+// syncBuffer joins a bufio.Writer to its underlying file, providing access to the
+// file's Sync method and providing a wrapper for the Write method that provides log
+// file rotation. There are conflicting methods, so the file cannot be embedded.
+// l.mu is held for all its methods.
+type syncBuffer struct {
+	logger *loggingT
+	*bufio.Writer
+	file   *os.File
+	sev    severity
+	nbytes uint64 // The number of bytes written to this file
+}
+
+func (sb *syncBuffer) Sync() error {
+	return sb.file.Sync()
+}
+
+func (sb *syncBuffer) Write(p []byte) (n int, err error) {
+	if sb.nbytes+uint64(len(p)) >= MaxSize {
+		if err := sb.rotateFile(time.Now()); err != nil {
+			sb.logger.exit(err)
+		}
+	}
+	n, err = sb.Writer.Write(p)
+	sb.nbytes += uint64(n)
+	if err != nil {
+		sb.logger.exit(err)
+	}
+	return
+}
+
+// rotateFile closes the syncBuffer's file and starts a new one.
+func (sb *syncBuffer) rotateFile(now time.Time) error {
+	if sb.file != nil {
+		sb.Flush()
+		sb.file.Close()
+	}
+	var err error
+	sb.file, _, err = create(severityName[sb.sev], now)
+	sb.nbytes = 0
+	if err != nil {
+		return err
+	}
+
+	sb.Writer = bufio.NewWriterSize(sb.file, bufferSize)
+
+	// Write header.
+	var buf bytes.Buffer
+	fmt.Fprintf(&buf, "Log file created at: %s\n", now.Format("2006/01/02 15:04:05"))
+	fmt.Fprintf(&buf, "Running on machine: %s\n", host)
+	fmt.Fprintf(&buf, "Binary: Built with %s %s for %s/%s\n", runtime.Compiler, runtime.Version(), runtime.GOOS, runtime.GOARCH)
+	fmt.Fprintf(&buf, "Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg\n")
+	n, err := sb.file.Write(buf.Bytes())
+	sb.nbytes += uint64(n)
+	return err
+}
+
+// bufferSize sizes the buffer associated with each log file. It's large
+// so that log records can accumulate without the logging thread blocking
+// on disk I/O. The flushDaemon will block instead.
+const bufferSize = 256 * 1024
+
+// createFiles creates all the log files for severity from sev down to infoLog.
+// l.mu is held.
+func (l *loggingT) createFiles(sev severity) error {
+	now := time.Now()
+	// Files are created in decreasing severity order, so as soon as we find one
+	// has already been created, we can stop.
+	for s := sev; s >= infoLog && l.file[s] == nil; s-- {
+		sb := &syncBuffer{
+			logger: l,
+			sev:    s,
+		}
+		if err := sb.rotateFile(now); err != nil {
+			return err
+		}
+		l.file[s] = sb
+	}
+	return nil
+}
+
+const flushInterval = 30 * time.Second
+
+// flushDaemon periodically flushes the log file buffers.
+func (l *loggingT) flushDaemon() {
+	for _ = range time.NewTicker(flushInterval).C {
+		l.lockAndFlushAll()
+	}
+}
+
+// lockAndFlushAll is like flushAll but locks l.mu first.
+func (l *loggingT) lockAndFlushAll() {
+	l.mu.Lock()
+	l.flushAll()
+	l.mu.Unlock()
+}
+
+// flushAll flushes all the logs and attempts to "sync" their data to disk.
+// l.mu is held.
+func (l *loggingT) flushAll() {
+	// Flush from fatal down, in case there's trouble flushing.
+	for s := fatalLog; s >= infoLog; s-- {
+		file := l.file[s]
+		if file != nil {
+			file.Flush() // ignore error
+			file.Sync()  // ignore error
+		}
+	}
+}
+
+// CopyStandardLogTo arranges for messages written to the Go "log" package's
+// default logs to also appear in the Google logs for the named and lower
+// severities.  Subsequent changes to the standard log's default output location
+// or format may break this behavior.
+//
+// Valid names are "INFO", "WARNING", "ERROR", and "FATAL".  If the name is not
+// recognized, CopyStandardLogTo panics.
+func CopyStandardLogTo(name string) {
+	sev, ok := severityByName(name)
+	if !ok {
+		panic(fmt.Sprintf("log.CopyStandardLogTo(%q): unrecognized severity name", name))
+	}
+	// Set a log format that captures the user's file and line:
+	//   d.go:23: message
+	stdLog.SetFlags(stdLog.Lshortfile)
+	stdLog.SetOutput(logBridge(sev))
+}
+
+// logBridge provides the Write method that enables CopyStandardLogTo to connect
+// Go's standard logs to the logs provided by this package.
+type logBridge severity
+
+// Write parses the standard logging line and passes its components to the
+// logger for severity(lb).
+func (lb logBridge) Write(b []byte) (n int, err error) {
+	var (
+		file = "???"
+		line = 1
+		text string
+	)
+	// Split "d.go:23: message" into "d.go", "23", and "message".
+	if parts := bytes.SplitN(b, []byte{':'}, 3); len(parts) != 3 || len(parts[0]) < 1 || len(parts[2]) < 1 {
+		text = fmt.Sprintf("bad log format: %s", b)
+	} else {
+		file = string(parts[0])
+		text = string(parts[2][1:]) // skip leading space
+		line, err = strconv.Atoi(string(parts[1]))
+		if err != nil {
+			text = fmt.Sprintf("bad line number: %s", b)
+			line = 1
+		}
+	}
+	// printWithFileLine with alsoToStderr=true, so standard log messages
+	// always appear on standard error.
+	logging.printWithFileLine(severity(lb), file, line, true, text)
+	return len(b), nil
+}
+
+// setV computes and remembers the V level for a given PC
+// when vmodule is enabled.
+// File pattern matching takes the basename of the file, stripped
+// of its .go suffix, and uses filepath.Match, which is a little more
+// general than the *? matching used in C++.
+// l.mu is held.
+func (l *loggingT) setV(pc uintptr) Level {
+	fn := runtime.FuncForPC(pc)
+	file, _ := fn.FileLine(pc)
+	// The file is something like /a/b/c/d.go. We want just the d.
+	if strings.HasSuffix(file, ".go") {
+		file = file[:len(file)-3]
+	}
+	if slash := strings.LastIndex(file, "/"); slash >= 0 {
+		file = file[slash+1:]
+	}
+	for _, filter := range l.vmodule.filter {
+		if filter.match(file) {
+			l.vmap[pc] = filter.level
+			return filter.level
+		}
+	}
+	l.vmap[pc] = 0
+	return 0
+}
+
+// Verbose is a boolean type that implements Infof (like Printf) etc.
+// See the documentation of V for more information.
+type Verbose bool
+
+// V reports whether verbosity at the call site is at least the requested level.
+// The returned value is a boolean of type Verbose, which implements Info, Infoln
+// and Infof. These methods will write to the Info log if called.
+// Thus, one may write either
+//	if glog.V(2) { glog.Info("log this") }
+// or
+//	glog.V(2).Info("log this")
+// The second form is shorter but the first is cheaper if logging is off because it does
+// not evaluate its arguments.
+//
+// Whether an individual call to V generates a log record depends on the setting of
+// the -v and --vmodule flags; both are off by default. If the level in the call to
+// V is at least the value of -v, or of -vmodule for the source file containing the
+// call, the V call will log.
+func V(level Level) Verbose {
+	// This function tries hard to be cheap unless there's work to do.
+	// The fast path is two atomic loads and compares.
+
+	// Here is a cheap but safe test to see if V logging is enabled globally.
+	if logging.verbosity.get() >= level {
+		return Verbose(true)
+	}
+
+	// It's off globally but it vmodule may still be set.
+	// Here is another cheap but safe test to see if vmodule is enabled.
+	if atomic.LoadInt32(&logging.filterLength) > 0 {
+		// Now we need a proper lock to use the logging structure. The pcs field
+		// is shared so we must lock before accessing it. This is fairly expensive,
+		// but if V logging is enabled we're slow anyway.
+		logging.mu.Lock()
+		defer logging.mu.Unlock()
+		if runtime.Callers(2, logging.pcs[:]) == 0 {
+			return Verbose(false)
+		}
+		v, ok := logging.vmap[logging.pcs[0]]
+		if !ok {
+			v = logging.setV(logging.pcs[0])
+		}
+		return Verbose(v >= level)
+	}
+	return Verbose(false)
+}
+
+// Info is equivalent to the global Info function, guarded by the value of v.
+// See the documentation of V for usage.
+func (v Verbose) Info(args ...interface{}) {
+	if v {
+		logging.print(infoLog, args...)
+	}
+}
+
+// Infoln is equivalent to the global Infoln function, guarded by the value of v.
+// See the documentation of V for usage.
+func (v Verbose) Infoln(args ...interface{}) {
+	if v {
+		logging.println(infoLog, args...)
+	}
+}
+
+// Infof is equivalent to the global Infof function, guarded by the value of v.
+// See the documentation of V for usage.
+func (v Verbose) Infof(format string, args ...interface{}) {
+	if v {
+		logging.printf(infoLog, format, args...)
+	}
+}
+
+// Info logs to the INFO log.
+// Arguments are handled in the manner of fmt.Print; a newline is appended if missing.
+func Info(args ...interface{}) {
+	logging.print(infoLog, args...)
+}
+
+// InfoDepth acts as Info but uses depth to determine which call frame to log.
+// InfoDepth(0, "msg") is the same as Info("msg").
+func InfoDepth(depth int, args ...interface{}) {
+	logging.printDepth(infoLog, depth, args...)
+}
+
+// Infoln logs to the INFO log.
+// Arguments are handled in the manner of fmt.Println; a newline is appended if missing.
+func Infoln(args ...interface{}) {
+	logging.println(infoLog, args...)
+}
+
+// Infof logs to the INFO log.
+// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing.
+func Infof(format string, args ...interface{}) {
+	logging.printf(infoLog, format, args...)
+}
+
+// Warning logs to the WARNING and INFO logs.
+// Arguments are handled in the manner of fmt.Print; a newline is appended if missing.
+func Warning(args ...interface{}) {
+	logging.print(warningLog, args...)
+}
+
+// WarningDepth acts as Warning but uses depth to determine which call frame to log.
+// WarningDepth(0, "msg") is the same as Warning("msg").
+func WarningDepth(depth int, args ...interface{}) {
+	logging.printDepth(warningLog, depth, args...)
+}
+
+// Warningln logs to the WARNING and INFO logs.
+// Arguments are handled in the manner of fmt.Println; a newline is appended if missing.
+func Warningln(args ...interface{}) {
+	logging.println(warningLog, args...)
+}
+
+// Warningf logs to the WARNING and INFO logs.
+// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing.
+func Warningf(format string, args ...interface{}) {
+	logging.printf(warningLog, format, args...)
+}
+
+// Error logs to the ERROR, WARNING, and INFO logs.
+// Arguments are handled in the manner of fmt.Print; a newline is appended if missing.
+func Error(args ...interface{}) {
+	logging.print(errorLog, args...)
+}
+
+// ErrorDepth acts as Error but uses depth to determine which call frame to log.
+// ErrorDepth(0, "msg") is the same as Error("msg").
+func ErrorDepth(depth int, args ...interface{}) {
+	logging.printDepth(errorLog, depth, args...)
+}
+
+// Errorln logs to the ERROR, WARNING, and INFO logs.
+// Arguments are handled in the manner of fmt.Println; a newline is appended if missing.
+func Errorln(args ...interface{}) {
+	logging.println(errorLog, args...)
+}
+
+// Errorf logs to the ERROR, WARNING, and INFO logs.
+// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing.
+func Errorf(format string, args ...interface{}) {
+	logging.printf(errorLog, format, args...)
+}
+
+// Fatal logs to the FATAL, ERROR, WARNING, and INFO logs,
+// including a stack trace of all running goroutines, then calls os.Exit(255).
+// Arguments are handled in the manner of fmt.Print; a newline is appended if missing.
+func Fatal(args ...interface{}) {
+	logging.print(fatalLog, args...)
+}
+
+// FatalDepth acts as Fatal but uses depth to determine which call frame to log.
+// FatalDepth(0, "msg") is the same as Fatal("msg").
+func FatalDepth(depth int, args ...interface{}) {
+	logging.printDepth(fatalLog, depth, args...)
+}
+
+// Fatalln logs to the FATAL, ERROR, WARNING, and INFO logs,
+// including a stack trace of all running goroutines, then calls os.Exit(255).
+// Arguments are handled in the manner of fmt.Println; a newline is appended if missing.
+func Fatalln(args ...interface{}) {
+	logging.println(fatalLog, args...)
+}
+
+// Fatalf logs to the FATAL, ERROR, WARNING, and INFO logs,
+// including a stack trace of all running goroutines, then calls os.Exit(255).
+// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing.
+func Fatalf(format string, args ...interface{}) {
+	logging.printf(fatalLog, format, args...)
+}
+
+// fatalNoStacks is non-zero if we are to exit without dumping goroutine stacks.
+// It allows Exit and relatives to use the Fatal logs.
+var fatalNoStacks uint32
+
+// Exit logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1).
+// Arguments are handled in the manner of fmt.Print; a newline is appended if missing.
+func Exit(args ...interface{}) {
+	atomic.StoreUint32(&fatalNoStacks, 1)
+	logging.print(fatalLog, args...)
+}
+
+// ExitDepth acts as Exit but uses depth to determine which call frame to log.
+// ExitDepth(0, "msg") is the same as Exit("msg").
+func ExitDepth(depth int, args ...interface{}) {
+	atomic.StoreUint32(&fatalNoStacks, 1)
+	logging.printDepth(fatalLog, depth, args...)
+}
+
+// Exitln logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1).
+func Exitln(args ...interface{}) {
+	atomic.StoreUint32(&fatalNoStacks, 1)
+	logging.println(fatalLog, args...)
+}
+
+// Exitf logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1).
+// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing.
+func Exitf(format string, args ...interface{}) {
+	atomic.StoreUint32(&fatalNoStacks, 1)
+	logging.printf(fatalLog, format, args...)
+}
diff --git a/vendor/github.com/golang/glog/glog_file.go b/vendor/github.com/golang/glog/glog_file.go
new file mode 100644
index 0000000..65075d2
--- /dev/null
+++ b/vendor/github.com/golang/glog/glog_file.go
@@ -0,0 +1,124 @@
+// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/
+//
+// Copyright 2013 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// File I/O for logs.
+
+package glog
+
+import (
+	"errors"
+	"flag"
+	"fmt"
+	"os"
+	"os/user"
+	"path/filepath"
+	"strings"
+	"sync"
+	"time"
+)
+
+// MaxSize is the maximum size of a log file in bytes.
+var MaxSize uint64 = 1024 * 1024 * 1800
+
+// logDirs lists the candidate directories for new log files.
+var logDirs []string
+
+// If non-empty, overrides the choice of directory in which to write logs.
+// See createLogDirs for the full list of possible destinations.
+var logDir = flag.String("log_dir", "", "If non-empty, write log files in this directory")
+
+func createLogDirs() {
+	if *logDir != "" {
+		logDirs = append(logDirs, *logDir)
+	}
+	logDirs = append(logDirs, os.TempDir())
+}
+
+var (
+	pid      = os.Getpid()
+	program  = filepath.Base(os.Args[0])
+	host     = "unknownhost"
+	userName = "unknownuser"
+)
+
+func init() {
+	h, err := os.Hostname()
+	if err == nil {
+		host = shortHostname(h)
+	}
+
+	current, err := user.Current()
+	if err == nil {
+		userName = current.Username
+	}
+
+	// Sanitize userName since it may contain filepath separators on Windows.
+	userName = strings.Replace(userName, `\`, "_", -1)
+}
+
+// shortHostname returns its argument, truncating at the first period.
+// For instance, given "www.google.com" it returns "www".
+func shortHostname(hostname string) string {
+	if i := strings.Index(hostname, "."); i >= 0 {
+		return hostname[:i]
+	}
+	return hostname
+}
+
+// logName returns a new log file name containing tag, with start time t, and
+// the name for the symlink for tag.
+func logName(tag string, t time.Time) (name, link string) {
+	name = fmt.Sprintf("%s.%s.%s.log.%s.%04d%02d%02d-%02d%02d%02d.%d",
+		program,
+		host,
+		userName,
+		tag,
+		t.Year(),
+		t.Month(),
+		t.Day(),
+		t.Hour(),
+		t.Minute(),
+		t.Second(),
+		pid)
+	return name, program + "." + tag
+}
+
+var onceLogDirs sync.Once
+
+// create creates a new log file and returns the file and its filename, which
+// contains tag ("INFO", "FATAL", etc.) and t.  If the file is created
+// successfully, create also attempts to update the symlink for that tag, ignoring
+// errors.
+func create(tag string, t time.Time) (f *os.File, filename string, err error) {
+	onceLogDirs.Do(createLogDirs)
+	if len(logDirs) == 0 {
+		return nil, "", errors.New("log: no log dirs")
+	}
+	name, link := logName(tag, t)
+	var lastErr error
+	for _, dir := range logDirs {
+		fname := filepath.Join(dir, name)
+		f, err := os.Create(fname)
+		if err == nil {
+			symlink := filepath.Join(dir, link)
+			os.Remove(symlink)        // ignore err
+			os.Symlink(name, symlink) // ignore err
+			return f, fname, nil
+		}
+		lastErr = err
+	}
+	return nil, "", fmt.Errorf("log: cannot create log: %v", lastErr)
+}
diff --git a/vendor/github.com/golang/glog/glog_test.go b/vendor/github.com/golang/glog/glog_test.go
new file mode 100644
index 0000000..0fb376e
--- /dev/null
+++ b/vendor/github.com/golang/glog/glog_test.go
@@ -0,0 +1,415 @@
+// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/
+//
+// Copyright 2013 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package glog
+
+import (
+	"bytes"
+	"fmt"
+	stdLog "log"
+	"path/filepath"
+	"runtime"
+	"strconv"
+	"strings"
+	"testing"
+	"time"
+)
+
+// Test that shortHostname works as advertised.
+func TestShortHostname(t *testing.T) {
+	for hostname, expect := range map[string]string{
+		"":                "",
+		"host":            "host",
+		"host.google.com": "host",
+	} {
+		if got := shortHostname(hostname); expect != got {
+			t.Errorf("shortHostname(%q): expected %q, got %q", hostname, expect, got)
+		}
+	}
+}
+
+// flushBuffer wraps a bytes.Buffer to satisfy flushSyncWriter.
+type flushBuffer struct {
+	bytes.Buffer
+}
+
+func (f *flushBuffer) Flush() error {
+	return nil
+}
+
+func (f *flushBuffer) Sync() error {
+	return nil
+}
+
+// swap sets the log writers and returns the old array.
+func (l *loggingT) swap(writers [numSeverity]flushSyncWriter) (old [numSeverity]flushSyncWriter) {
+	l.mu.Lock()
+	defer l.mu.Unlock()
+	old = l.file
+	for i, w := range writers {
+		logging.file[i] = w
+	}
+	return
+}
+
+// newBuffers sets the log writers to all new byte buffers and returns the old array.
+func (l *loggingT) newBuffers() [numSeverity]flushSyncWriter {
+	return l.swap([numSeverity]flushSyncWriter{new(flushBuffer), new(flushBuffer), new(flushBuffer), new(flushBuffer)})
+}
+
+// contents returns the specified log value as a string.
+func contents(s severity) string {
+	return logging.file[s].(*flushBuffer).String()
+}
+
+// contains reports whether the string is contained in the log.
+func contains(s severity, str string, t *testing.T) bool {
+	return strings.Contains(contents(s), str)
+}
+
+// setFlags configures the logging flags how the test expects them.
+func setFlags() {
+	logging.toStderr = false
+}
+
+// Test that Info works as advertised.
+func TestInfo(t *testing.T) {
+	setFlags()
+	defer logging.swap(logging.newBuffers())
+	Info("test")
+	if !contains(infoLog, "I", t) {
+		t.Errorf("Info has wrong character: %q", contents(infoLog))
+	}
+	if !contains(infoLog, "test", t) {
+		t.Error("Info failed")
+	}
+}
+
+func TestInfoDepth(t *testing.T) {
+	setFlags()
+	defer logging.swap(logging.newBuffers())
+
+	f := func() { InfoDepth(1, "depth-test1") }
+
+	// The next three lines must stay together
+	_, _, wantLine, _ := runtime.Caller(0)
+	InfoDepth(0, "depth-test0")
+	f()
+
+	msgs := strings.Split(strings.TrimSuffix(contents(infoLog), "\n"), "\n")
+	if len(msgs) != 2 {
+		t.Fatalf("Got %d lines, expected 2", len(msgs))
+	}
+
+	for i, m := range msgs {
+		if !strings.HasPrefix(m, "I") {
+			t.Errorf("InfoDepth[%d] has wrong character: %q", i, m)
+		}
+		w := fmt.Sprintf("depth-test%d", i)
+		if !strings.Contains(m, w) {
+			t.Errorf("InfoDepth[%d] missing %q: %q", i, w, m)
+		}
+
+		// pull out the line number (between : and ])
+		msg := m[strings.LastIndex(m, ":")+1:]
+		x := strings.Index(msg, "]")
+		if x < 0 {
+			t.Errorf("InfoDepth[%d]: missing ']': %q", i, m)
+			continue
+		}
+		line, err := strconv.Atoi(msg[:x])
+		if err != nil {
+			t.Errorf("InfoDepth[%d]: bad line number: %q", i, m)
+			continue
+		}
+		wantLine++
+		if wantLine != line {
+			t.Errorf("InfoDepth[%d]: got line %d, want %d", i, line, wantLine)
+		}
+	}
+}
+
+func init() {
+	CopyStandardLogTo("INFO")
+}
+
+// Test that CopyStandardLogTo panics on bad input.
+func TestCopyStandardLogToPanic(t *testing.T) {
+	defer func() {
+		if s, ok := recover().(string); !ok || !strings.Contains(s, "LOG") {
+			t.Errorf(`CopyStandardLogTo("LOG") should have panicked: %v`, s)
+		}
+	}()
+	CopyStandardLogTo("LOG")
+}
+
+// Test that using the standard log package logs to INFO.
+func TestStandardLog(t *testing.T) {
+	setFlags()
+	defer logging.swap(logging.newBuffers())
+	stdLog.Print("test")
+	if !contains(infoLog, "I", t) {
+		t.Errorf("Info has wrong character: %q", contents(infoLog))
+	}
+	if !contains(infoLog, "test", t) {
+		t.Error("Info failed")
+	}
+}
+
+// Test that the header has the correct format.
+func TestHeader(t *testing.T) {
+	setFlags()
+	defer logging.swap(logging.newBuffers())
+	defer func(previous func() time.Time) { timeNow = previous }(timeNow)
+	timeNow = func() time.Time {
+		return time.Date(2006, 1, 2, 15, 4, 5, .067890e9, time.Local)
+	}
+	pid = 1234
+	Info("test")
+	var line int
+	format := "I0102 15:04:05.067890    1234 glog_test.go:%d] test\n"
+	n, err := fmt.Sscanf(contents(infoLog), format, &line)
+	if n != 1 || err != nil {
+		t.Errorf("log format error: %d elements, error %s:\n%s", n, err, contents(infoLog))
+	}
+	// Scanf treats multiple spaces as equivalent to a single space,
+	// so check for correct space-padding also.
+	want := fmt.Sprintf(format, line)
+	if contents(infoLog) != want {
+		t.Errorf("log format error: got:\n\t%q\nwant:\t%q", contents(infoLog), want)
+	}
+}
+
+// Test that an Error log goes to Warning and Info.
+// Even in the Info log, the source character will be E, so the data should
+// all be identical.
+func TestError(t *testing.T) {
+	setFlags()
+	defer logging.swap(logging.newBuffers())
+	Error("test")
+	if !contains(errorLog, "E", t) {
+		t.Errorf("Error has wrong character: %q", contents(errorLog))
+	}
+	if !contains(errorLog, "test", t) {
+		t.Error("Error failed")
+	}
+	str := contents(errorLog)
+	if !contains(warningLog, str, t) {
+		t.Error("Warning failed")
+	}
+	if !contains(infoLog, str, t) {
+		t.Error("Info failed")
+	}
+}
+
+// Test that a Warning log goes to Info.
+// Even in the Info log, the source character will be W, so the data should
+// all be identical.
+func TestWarning(t *testing.T) {
+	setFlags()
+	defer logging.swap(logging.newBuffers())
+	Warning("test")
+	if !contains(warningLog, "W", t) {
+		t.Errorf("Warning has wrong character: %q", contents(warningLog))
+	}
+	if !contains(warningLog, "test", t) {
+		t.Error("Warning failed")
+	}
+	str := contents(warningLog)
+	if !contains(infoLog, str, t) {
+		t.Error("Info failed")
+	}
+}
+
+// Test that a V log goes to Info.
+func TestV(t *testing.T) {
+	setFlags()
+	defer logging.swap(logging.newBuffers())
+	logging.verbosity.Set("2")
+	defer logging.verbosity.Set("0")
+	V(2).Info("test")
+	if !contains(infoLog, "I", t) {
+		t.Errorf("Info has wrong character: %q", contents(infoLog))
+	}
+	if !contains(infoLog, "test", t) {
+		t.Error("Info failed")
+	}
+}
+
+// Test that a vmodule enables a log in this file.
+func TestVmoduleOn(t *testing.T) {
+	setFlags()
+	defer logging.swap(logging.newBuffers())
+	logging.vmodule.Set("glog_test=2")
+	defer logging.vmodule.Set("")
+	if !V(1) {
+		t.Error("V not enabled for 1")
+	}
+	if !V(2) {
+		t.Error("V not enabled for 2")
+	}
+	if V(3) {
+		t.Error("V enabled for 3")
+	}
+	V(2).Info("test")
+	if !contains(infoLog, "I", t) {
+		t.Errorf("Info has wrong character: %q", contents(infoLog))
+	}
+	if !contains(infoLog, "test", t) {
+		t.Error("Info failed")
+	}
+}
+
+// Test that a vmodule of another file does not enable a log in this file.
+func TestVmoduleOff(t *testing.T) {
+	setFlags()
+	defer logging.swap(logging.newBuffers())
+	logging.vmodule.Set("notthisfile=2")
+	defer logging.vmodule.Set("")
+	for i := 1; i <= 3; i++ {
+		if V(Level(i)) {
+			t.Errorf("V enabled for %d", i)
+		}
+	}
+	V(2).Info("test")
+	if contents(infoLog) != "" {
+		t.Error("V logged incorrectly")
+	}
+}
+
+// vGlobs are patterns that match/don't match this file at V=2.
+var vGlobs = map[string]bool{
+	// Easy to test the numeric match here.
+	"glog_test=1": false, // If -vmodule sets V to 1, V(2) will fail.
+	"glog_test=2": true,
+	"glog_test=3": true, // If -vmodule sets V to 1, V(3) will succeed.
+	// These all use 2 and check the patterns. All are true.
+	"*=2":           true,
+	"?l*=2":         true,
+	"????_*=2":      true,
+	"??[mno]?_*t=2": true,
+	// These all use 2 and check the patterns. All are false.
+	"*x=2":         false,
+	"m*=2":         false,
+	"??_*=2":       false,
+	"?[abc]?_*t=2": false,
+}
+
+// Test that vmodule globbing works as advertised.
+func testVmoduleGlob(pat string, match bool, t *testing.T) {
+	setFlags()
+	defer logging.swap(logging.newBuffers())
+	defer logging.vmodule.Set("")
+	logging.vmodule.Set(pat)
+	if V(2) != Verbose(match) {
+		t.Errorf("incorrect match for %q: got %t expected %t", pat, V(2), match)
+	}
+}
+
+// Test that a vmodule globbing works as advertised.
+func TestVmoduleGlob(t *testing.T) {
+	for glob, match := range vGlobs {
+		testVmoduleGlob(glob, match, t)
+	}
+}
+
+func TestRollover(t *testing.T) {
+	setFlags()
+	var err error
+	defer func(previous func(error)) { logExitFunc = previous }(logExitFunc)
+	logExitFunc = func(e error) {
+		err = e
+	}
+	defer func(previous uint64) { MaxSize = previous }(MaxSize)
+	MaxSize = 512
+
+	Info("x") // Be sure we have a file.
+	info, ok := logging.file[infoLog].(*syncBuffer)
+	if !ok {
+		t.Fatal("info wasn't created")
+	}
+	if err != nil {
+		t.Fatalf("info has initial error: %v", err)
+	}
+	fname0 := info.file.Name()
+	Info(strings.Repeat("x", int(MaxSize))) // force a rollover
+	if err != nil {
+		t.Fatalf("info has error after big write: %v", err)
+	}
+
+	// Make sure the next log file gets a file name with a different
+	// time stamp.
+	//
+	// TODO: determine whether we need to support subsecond log
+	// rotation.  C++ does not appear to handle this case (nor does it
+	// handle Daylight Savings Time properly).
+	time.Sleep(1 * time.Second)
+
+	Info("x") // create a new file
+	if err != nil {
+		t.Fatalf("error after rotation: %v", err)
+	}
+	fname1 := info.file.Name()
+	if fname0 == fname1 {
+		t.Errorf("info.f.Name did not change: %v", fname0)
+	}
+	if info.nbytes >= MaxSize {
+		t.Errorf("file size was not reset: %d", info.nbytes)
+	}
+}
+
+func TestLogBacktraceAt(t *testing.T) {
+	setFlags()
+	defer logging.swap(logging.newBuffers())
+	// The peculiar style of this code simplifies line counting and maintenance of the
+	// tracing block below.
+	var infoLine string
+	setTraceLocation := func(file string, line int, ok bool, delta int) {
+		if !ok {
+			t.Fatal("could not get file:line")
+		}
+		_, file = filepath.Split(file)
+		infoLine = fmt.Sprintf("%s:%d", file, line+delta)
+		err := logging.traceLocation.Set(infoLine)
+		if err != nil {
+			t.Fatal("error setting log_backtrace_at: ", err)
+		}
+	}
+	{
+		// Start of tracing block. These lines know about each other's relative position.
+		_, file, line, ok := runtime.Caller(0)
+		setTraceLocation(file, line, ok, +2) // Two lines between Caller and Info calls.
+		Info("we want a stack trace here")
+	}
+	numAppearances := strings.Count(contents(infoLog), infoLine)
+	if numAppearances < 2 {
+		// Need 2 appearances, one in the log header and one in the trace:
+		//   log_test.go:281: I0511 16:36:06.952398 02238 log_test.go:280] we want a stack trace here
+		//   ...
+		//   github.com/glog/glog_test.go:280 (0x41ba91)
+		//   ...
+		// We could be more precise but that would require knowing the details
+		// of the traceback format, which may not be dependable.
+		t.Fatal("got no trace back; log is ", contents(infoLog))
+	}
+}
+
+func BenchmarkHeader(b *testing.B) {
+	for i := 0; i < b.N; i++ {
+		buf, _, _ := logging.header(infoLog, 0)
+		logging.putBuffer(buf)
+	}
+}
diff --git a/vendor/github.com/google/gopacket/.gitignore b/vendor/github.com/google/gopacket/.gitignore
deleted file mode 100644
index 149266f..0000000
--- a/vendor/github.com/google/gopacket/.gitignore
+++ /dev/null
@@ -1,38 +0,0 @@
-# Compiled Object files, Static and Dynamic libs (Shared Objects)
-*.o
-*.a
-*.so
-
-# Folders
-_obj
-_test
-
-# Architecture specific extensions/prefixes
-*.[568vq]
-[568vq].out
-
-*.cgo1.go
-*.cgo2.c
-_cgo_defun.c
-_cgo_gotypes.go
-_cgo_export.*
-
-_testmain.go
-
-*.exe
-#*
-*~
-
-# examples binaries
-examples/synscan/synscan
-examples/pfdump/pfdump
-examples/pcapdump/pcapdump
-examples/httpassembly/httpassembly
-examples/statsassembly/statsassembly
-examples/arpscan/arpscan
-examples/bidirectional/bidirectional
-examples/bytediff/bytediff
-examples/reassemblydump/reassemblydump
-layers/gen
-macs/gen
-pcap/pcap_tester
diff --git a/vendor/github.com/google/gopacket/.travis.gofmt.sh b/vendor/github.com/google/gopacket/.travis.gofmt.sh
deleted file mode 100755
index e341a1c..0000000
--- a/vendor/github.com/google/gopacket/.travis.gofmt.sh
+++ /dev/null
@@ -1,7 +0,0 @@
-#!/bin/bash
-
-cd "$(dirname $0)"
-if [ -n "$(go fmt ./...)" ]; then
-  echo "Go code is not formatted, run 'go fmt github.com/google/stenographer/...'" >&2
-  exit 1
-fi
diff --git a/vendor/github.com/google/gopacket/.travis.golint.sh b/vendor/github.com/google/gopacket/.travis.golint.sh
deleted file mode 100755
index 0e267f5..0000000
--- a/vendor/github.com/google/gopacket/.travis.golint.sh
+++ /dev/null
@@ -1,28 +0,0 @@
-#!/bin/bash
-
-cd "$(dirname $0)"
-
-go get golang.org/x/lint/golint
-DIRS=". tcpassembly tcpassembly/tcpreader ip4defrag reassembly macs pcapgo pcap afpacket pfring routing defrag/lcmdefrag"
-# Add subdirectories here as we clean up golint on each.
-for subdir in $DIRS; do
-  pushd $subdir
-  if golint |
-      grep -v CannotSetRFMon |  # pcap exported error name
-      grep -v DataLost |        # tcpassembly/tcpreader exported error name
-      grep .; then
-    exit 1
-  fi
-  popd
-done
-
-pushd layers
-for file in *.go; do
-  if cat .lint_blacklist | grep -q $file; then
-    echo "Skipping lint of $file due to .lint_blacklist"
-  elif golint $file | grep .; then
-    echo "Lint error in file $file"
-    exit 1
-  fi
-done
-popd
diff --git a/vendor/github.com/google/gopacket/.travis.govet.sh b/vendor/github.com/google/gopacket/.travis.govet.sh
deleted file mode 100755
index a5c1354..0000000
--- a/vendor/github.com/google/gopacket/.travis.govet.sh
+++ /dev/null
@@ -1,10 +0,0 @@
-#!/bin/bash
-
-cd "$(dirname $0)"
-DIRS=". layers pcap pcapgo tcpassembly tcpassembly/tcpreader routing ip4defrag bytediff macs defrag/lcmdefrag"
-set -e
-for subdir in $DIRS; do
-  pushd $subdir
-  go vet
-  popd
-done
diff --git a/vendor/github.com/google/gopacket/.travis.install.sh b/vendor/github.com/google/gopacket/.travis.install.sh
deleted file mode 100755
index 648c901..0000000
--- a/vendor/github.com/google/gopacket/.travis.install.sh
+++ /dev/null
@@ -1,9 +0,0 @@
-#!/bin/bash
-
-set -ev
-
-go get github.com/google/gopacket
-go get github.com/google/gopacket/layers
-go get github.com/google/gopacket/tcpassembly
-go get github.com/google/gopacket/reassembly
-go get github.com/google/gopacket/pcapgo
diff --git a/vendor/github.com/google/gopacket/.travis.script.sh b/vendor/github.com/google/gopacket/.travis.script.sh
deleted file mode 100755
index a483f4f..0000000
--- a/vendor/github.com/google/gopacket/.travis.script.sh
+++ /dev/null
@@ -1,10 +0,0 @@
-#!/bin/bash
-
-set -ev
-
-go test github.com/google/gopacket
-go test github.com/google/gopacket/layers
-go test github.com/google/gopacket/tcpassembly
-go test github.com/google/gopacket/reassembly
-go test github.com/google/gopacket/pcapgo 
-go test github.com/google/gopacket/pcap
diff --git a/vendor/github.com/google/gopacket/.travis.yml b/vendor/github.com/google/gopacket/.travis.yml
deleted file mode 100644
index 8ebb01d..0000000
--- a/vendor/github.com/google/gopacket/.travis.yml
+++ /dev/null
@@ -1,55 +0,0 @@
-language: go
-go:
- - 1.11.x
- - 1.12.x
- - master
-
-addons:
-  apt:
-    packages:
-      libpcap-dev
-
-# use modules except for older versions (see below)
-install: true
-
-env:
-  - GO111MODULE=on
-
-script: ./.travis.script.sh
-
-matrix:
-  fast_finish: true
-  allow_failures:
-    - go: master
-
-jobs:
-  include:
-    - go: 1.5.x
-      install: ./.travis.install.sh
-    - go: 1.6.x
-      install: ./.travis.install.sh
-    - go: 1.7.x
-      install: ./.travis.install.sh
-    - go: 1.8.x
-      install: ./.travis.install.sh
-    - go: 1.9.x
-      install: ./.travis.install.sh
-    - go: 1.10.x
-      install: ./.travis.install.sh
-    - os: osx
-      go: 1.x
-    - os: windows
-      go: 1.x
-      # winpcap does not work on travis ci - so install nmap to get libpcap
-      before_install: choco install nmap
-    - stage: style
-      name: "fmt/vet/lint"
-      go: 1.x
-      script:
-        - ./.travis.gofmt.sh
-        - ./.travis.govet.sh
-        - ./.travis.golint.sh
-
-stages:
-  - style
-  - test
diff --git a/vendor/github.com/google/gopacket/AUTHORS b/vendor/github.com/google/gopacket/AUTHORS
index e8a225f..24e834e 100644
--- a/vendor/github.com/google/gopacket/AUTHORS
+++ b/vendor/github.com/google/gopacket/AUTHORS
@@ -33,6 +33,7 @@ Jesse Ward <jesse@jesseward.com>
 Kane Mathers <kane@kanemathers.name>
 Jose Selvi <jselvi@pentester.es>
 Yerden Zhumabekov <yerden.zhumabekov@gmail.com>
+Jensen Hwa <jensenhwa@gmail.com>
 
 -----------------------------------------------
 FORKED FROM github.com/akrennmair/gopcap
diff --git a/vendor/github.com/google/gopacket/README.md b/vendor/github.com/google/gopacket/README.md
index a2f48a9..efe462e 100644
--- a/vendor/github.com/google/gopacket/README.md
+++ b/vendor/github.com/google/gopacket/README.md
@@ -6,7 +6,7 @@ See [godoc](https://godoc.org/github.com/google/gopacket) for more details.
 [![Build Status](https://travis-ci.org/google/gopacket.svg?branch=master)](https://travis-ci.org/google/gopacket)
 [![GoDoc](https://godoc.org/github.com/google/gopacket?status.svg)](https://godoc.org/github.com/google/gopacket)
 
-Minimum Go version required is 1.5 except for pcapgo/EthernetHandle, afpacket, and bsdbpf which need at least 1.7 due to x/sys/unix dependencies.
+Minimum Go version required is 1.5 except for pcapgo/EthernetHandle, afpacket, and bsdbpf which need at least 1.9 due to x/sys/unix dependencies.
 
 Originally forked from the gopcap project written by Andreas
 Krennmair <ak@synflood.at> (http://github.com/akrennmair/gopcap).
diff --git a/vendor/github.com/google/gopacket/benchmark_test.go b/vendor/github.com/google/gopacket/benchmark_test.go
new file mode 100644
index 0000000..4bc2037
--- /dev/null
+++ b/vendor/github.com/google/gopacket/benchmark_test.go
@@ -0,0 +1,194 @@
+// Copyright 2012, Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package gopacket
+
+import (
+	"runtime"
+	"testing"
+)
+
+// A few benchmarks for figuring out exactly how fast some underlying Go
+// things are.
+
+type testError struct{}
+
+func (t *testError) Error() string { return "abc" }
+
+func BenchmarkTypeAssertion(b *testing.B) {
+	var e error = &testError{}
+	for i := 0; i < b.N; i++ {
+		_, _ = e.(*testError)
+	}
+}
+
+func BenchmarkMapLookup(b *testing.B) {
+	m := map[LayerType]bool{
+		LayerTypePayload: true,
+	}
+	for i := 0; i < b.N; i++ {
+		_ = m[LayerTypePayload]
+	}
+}
+
+func BenchmarkNilMapLookup(b *testing.B) {
+	var m map[LayerType]bool
+	for i := 0; i < b.N; i++ {
+		_ = m[LayerTypePayload]
+	}
+}
+
+func BenchmarkNilMapLookupWithNilCheck(b *testing.B) {
+	var m map[LayerType]bool
+	for i := 0; i < b.N; i++ {
+		if m != nil {
+			_ = m[LayerTypePayload]
+		}
+	}
+}
+
+func BenchmarkArrayLookup(b *testing.B) {
+	m := make([]bool, 100)
+	for i := 0; i < b.N; i++ {
+		_ = m[LayerTypePayload]
+	}
+}
+
+var testError1 = &testError{}
+var testError2 error = testError1
+
+func BenchmarkTypeToInterface1(b *testing.B) {
+	var e error
+	for i := 0; i < b.N; i++ {
+		e = testError1
+	}
+	// Have to do someting with 'e' or the compiler complains about an unused
+	// variable.
+	testError2 = e
+}
+func BenchmarkTypeToInterface2(b *testing.B) {
+	var e error
+	for i := 0; i < b.N; i++ {
+		e = testError2
+	}
+	// Have to do someting with 'e' or the compiler complains about an unused
+	// variable.
+	testError2 = e
+}
+
+var decodeOpts DecodeOptions
+
+func decodeOptsByValue(_ DecodeOptions)    {}
+func decodeOptsByPointer(_ *DecodeOptions) {}
+func BenchmarkPassDecodeOptionsByValue(b *testing.B) {
+	for i := 0; i < b.N; i++ {
+		decodeOptsByValue(decodeOpts)
+	}
+}
+func BenchmarkPassDecodeOptionsByPointer(b *testing.B) {
+	for i := 0; i < b.N; i++ {
+		decodeOptsByPointer(&decodeOpts)
+	}
+}
+
+func BenchmarkLockOSThread(b *testing.B) {
+	for i := 0; i < b.N; i++ {
+		runtime.LockOSThread()
+	}
+}
+func BenchmarkUnlockOSThread(b *testing.B) {
+	for i := 0; i < b.N; i++ {
+		runtime.UnlockOSThread()
+	}
+}
+func lockUnlock() {
+	runtime.LockOSThread()
+	runtime.UnlockOSThread()
+}
+func lockDeferUnlock() {
+	runtime.LockOSThread()
+	defer runtime.UnlockOSThread()
+}
+func BenchmarkLockUnlockOSThread(b *testing.B) {
+	for i := 0; i < b.N; i++ {
+		lockUnlock()
+	}
+}
+func BenchmarkLockDeferUnlockOSThread(b *testing.B) {
+	for i := 0; i < b.N; i++ {
+		lockDeferUnlock()
+	}
+}
+
+func BenchmarkUnbufferedChannel(b *testing.B) {
+	ca := make(chan bool)
+	cb := make(chan bool)
+	defer close(ca)
+	go func() {
+		defer close(cb)
+		for range ca {
+			cb <- true
+		}
+	}()
+	for i := 0; i < b.N; i++ {
+		ca <- true
+		<-cb
+	}
+}
+func BenchmarkSmallBufferedChannel(b *testing.B) {
+	ca := make(chan bool, 1)
+	cb := make(chan bool, 1)
+	defer close(ca)
+	go func() {
+		defer close(cb)
+		for range ca {
+			cb <- true
+		}
+	}()
+	for i := 0; i < b.N; i++ {
+		ca <- true
+		<-cb
+	}
+}
+func BenchmarkLargeBufferedChannel(b *testing.B) {
+	ca := make(chan bool, 1000)
+	cb := make(chan bool, 1000)
+	defer close(ca)
+	go func() {
+		defer close(cb)
+		for range ca {
+			cb <- true
+		}
+	}()
+	for i := 0; i < b.N; i++ {
+		ca <- true
+		<-cb
+	}
+}
+func BenchmarkEndpointFastHashShort(b *testing.B) {
+	e := Endpoint{typ: 1, len: 2}
+	for i := 0; i < b.N; i++ {
+		e.FastHash()
+	}
+}
+func BenchmarkEndpointFastHashLong(b *testing.B) {
+	e := Endpoint{typ: 1, len: 16}
+	for i := 0; i < b.N; i++ {
+		e.FastHash()
+	}
+}
+func BenchmarkFlowFastHashShort(b *testing.B) {
+	e := Flow{typ: 1, slen: 2, dlen: 2}
+	for i := 0; i < b.N; i++ {
+		e.FastHash()
+	}
+}
+func BenchmarkFlowFastHashLong(b *testing.B) {
+	e := Flow{typ: 1, slen: 16, dlen: 16}
+	for i := 0; i < b.N; i++ {
+		e.FastHash()
+	}
+}
diff --git a/vendor/github.com/google/gopacket/doc.go b/vendor/github.com/google/gopacket/doc.go
index 5937fd1..b46e43d 100644
--- a/vendor/github.com/google/gopacket/doc.go
+++ b/vendor/github.com/google/gopacket/doc.go
@@ -208,7 +208,7 @@ based on endpoint criteria:
    }
  }
  // Find all packets coming from UDP port 1000 to UDP port 500
- interestingFlow := gopacket.NewFlow(layers.NewUDPPortEndpoint(1000), layers.NewUDPPortEndpoint(500))
+ interestingFlow := gopacket.FlowFromEndpoints(layers.NewUDPPortEndpoint(1000), layers.NewUDPPortEndpoint(500))
  if t := packet.NetworkLayer(); t != nil && t.TransportFlow() == interestingFlow {
    fmt.Println("Found that UDP flow I was looking for!")
  }
diff --git a/vendor/github.com/google/gopacket/gen.go b/vendor/github.com/google/gopacket/gen.go
deleted file mode 100644
index dc75f94..0000000
--- a/vendor/github.com/google/gopacket/gen.go
+++ /dev/null
@@ -1,75 +0,0 @@
-// Copyright 2019 The GoPacket Authors. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style license
-// that can be found in the LICENSE file in the root of the source
-// tree.
-
-// +build ignore
-
-// This file generates LayersDecoder function for DecodingLayerContainer
-// go run gen.go | gofmt > layers_decoder.go
-package main
-
-import (
-	"fmt"
-	"os"
-	"time"
-)
-
-const headerFmt = `// Copyright 2019 The GoPacket Authors. All rights reserved.
-
-package gopacket
-
-// Created by gen.go, don't edit manually
-// Generated at %s
-
-// LayersDecoder returns DecodingLayerFunc for specified
-// DecodingLayerContainer, LayerType value to start decoding with and
-// some DecodeFeedback.
-func LayersDecoder(dl DecodingLayerContainer, first LayerType, df DecodeFeedback) DecodingLayerFunc {
-  firstDec, ok := dl.Decoder(first)
-  if !ok {
-    return func([]byte, *[]LayerType) (LayerType, error) {
-      return first, nil
-    }
-  }
-`
-
-var funcBody = `return func(data []byte, decoded *[]LayerType) (LayerType, error) {
-  *decoded = (*decoded)[:0] // Truncated decoded layers.
-  typ := first
-  decoder := firstDec
-  for {
-    if err := decoder.DecodeFromBytes(data, df); err != nil {
-      return LayerTypeZero, err
-    }
-    *decoded = append(*decoded, typ)
-    typ = decoder.NextLayerType()
-    if data = decoder.LayerPayload(); len(data) == 0 {
-      break
-    }
-    if decoder, ok = dlc.Decoder(typ); !ok {
-      return typ, nil
-    }
-  }
-  return LayerTypeZero, nil
-}`
-
-func main() {
-	fmt.Fprintf(os.Stderr, "Writing results to stdout\n")
-	types := []string{
-		"DecodingLayerSparse",
-		"DecodingLayerArray",
-		"DecodingLayerMap",
-	}
-
-	fmt.Printf(headerFmt, time.Now())
-	for _, t := range types {
-		fmt.Printf("if dlc, ok := dl.(%s); ok {", t)
-		fmt.Println(funcBody)
-		fmt.Println("}")
-	}
-	fmt.Println("dlc := dl")
-	fmt.Println(funcBody)
-	fmt.Println("}")
-}
diff --git a/vendor/github.com/google/gopacket/go.mod b/vendor/github.com/google/gopacket/go.mod
index 99e99f4..fcd84bc 100644
--- a/vendor/github.com/google/gopacket/go.mod
+++ b/vendor/github.com/google/gopacket/go.mod
@@ -3,6 +3,9 @@ module github.com/google/gopacket
 go 1.12
 
 require (
-	golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3
-	golang.org/x/sys v0.0.0-20190405154228-4b34438f7a67
+	github.com/vishvananda/netlink v1.1.0
+	github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f
+	golang.org/x/lint v0.0.0-20200302205851-738671d3881b // indirect
+	golang.org/x/net v0.0.0-20190620200207-3b0461eec859
+	golang.org/x/sys v0.0.0-20200217220822-9197077df867
 )
diff --git a/vendor/github.com/google/gopacket/go.sum b/vendor/github.com/google/gopacket/go.sum
index 2b28942..20806aa 100644
--- a/vendor/github.com/google/gopacket/go.sum
+++ b/vendor/github.com/google/gopacket/go.sum
@@ -1,7 +1,27 @@
+github.com/vishvananda/netlink v1.1.0 h1:1iyaYNBLmP6L0220aDnYQpo1QEV4t4hJ+xEEhhJH8j0=
+github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE=
+github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU=
+github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f h1:p4VB7kIXpOQvVn1ZaTIVp+3vuYAXFe3OJEvjbUYJLaA=
+github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0=
 golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/lint v0.0.0-20200302205851-738671d3881b h1:Wh+f8QHJXR411sJR8/vRBTZ7YapZaRvUcLFFJhusH0k=
+golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
 golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3 h1:0GoQqolDA55aaLxZyTzK/Y2ePZzZTUrRacwib7cNsYQ=
 golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190620200207-3b0461eec859 h1:R/3boaszxrf1GEUWTVDzSKVwLmSJpwZ1yqXm8j0v2QI=
+golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
 golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
 golang.org/x/sys v0.0.0-20190405154228-4b34438f7a67 h1:1Fzlr8kkDLQwqMP8GxrhptBLqZG/EDpiATneiZHY998=
 golang.org/x/sys v0.0.0-20190405154228-4b34438f7a67/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190412213103-97732733099d h1:+R4KGOnez64A81RvjARKc4UT5/tI9ujCIVX+P5KiHuI=
+golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200217220822-9197077df867 h1:JoRuNIf+rpHl+VhScRQQvzbHed86tKkqwPMV34T8myw=
+golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7 h1:EBZoQjiKKPaLbPrbpssUfuHtwM6KV/vb4U85g/cigFY=
+golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
diff --git a/vendor/github.com/google/gopacket/packet_test.go b/vendor/github.com/google/gopacket/packet_test.go
new file mode 100644
index 0000000..bd05448
--- /dev/null
+++ b/vendor/github.com/google/gopacket/packet_test.go
@@ -0,0 +1,62 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package gopacket
+
+import (
+	"io"
+	"reflect"
+	"testing"
+)
+
+type embedded struct {
+	A, B int
+}
+
+type embedding struct {
+	embedded
+	C, D int
+}
+
+func TestDumpEmbedded(t *testing.T) {
+	e := embedding{embedded: embedded{A: 1, B: 2}, C: 3, D: 4}
+	if got, want := layerString(reflect.ValueOf(e), false, false), "{A=1 B=2 C=3 D=4}"; got != want {
+		t.Errorf("embedded dump mismatch:\n   got: %v\n  want: %v", got, want)
+	}
+}
+
+type singlePacketSource [1][]byte
+
+func (s *singlePacketSource) ReadPacketData() ([]byte, CaptureInfo, error) {
+	if (*s)[0] == nil {
+		return nil, CaptureInfo{}, io.EOF
+	}
+	out := (*s)[0]
+	(*s)[0] = nil
+	return out, CaptureInfo{}, nil
+}
+
+func TestConcatPacketSources(t *testing.T) {
+	sourceA := &singlePacketSource{[]byte{1}}
+	sourceB := &singlePacketSource{[]byte{2}}
+	sourceC := &singlePacketSource{[]byte{3}}
+	concat := ConcatFinitePacketDataSources(sourceA, sourceB, sourceC)
+	a, _, err := concat.ReadPacketData()
+	if err != nil || len(a) != 1 || a[0] != 1 {
+		t.Errorf("expected [1], got %v/%v", a, err)
+	}
+	b, _, err := concat.ReadPacketData()
+	if err != nil || len(b) != 1 || b[0] != 2 {
+		t.Errorf("expected [2], got %v/%v", b, err)
+	}
+	c, _, err := concat.ReadPacketData()
+	if err != nil || len(c) != 1 || c[0] != 3 {
+		t.Errorf("expected [3], got %v/%v", c, err)
+	}
+	if _, _, err := concat.ReadPacketData(); err != io.EOF {
+		t.Errorf("expected io.EOF, got %v", err)
+	}
+}
diff --git a/vendor/github.com/google/gopacket/time_test.go b/vendor/github.com/google/gopacket/time_test.go
new file mode 100644
index 0000000..c9b99e0
--- /dev/null
+++ b/vendor/github.com/google/gopacket/time_test.go
@@ -0,0 +1,73 @@
+// Copyright 2019 The GoPacket Authors. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package gopacket
+
+import (
+	"testing"
+	"time"
+)
+
+func TestToDuration(t *testing.T) {
+	for i, test := range []struct {
+		r TimestampResolution
+		d time.Duration
+	}{
+		{
+			TimestampResolutionMillisecond,
+			time.Millisecond,
+		},
+		{
+			TimestampResolutionMicrosecond,
+			time.Microsecond,
+		},
+		{
+			TimestampResolutionNanosecond,
+			time.Nanosecond,
+		},
+		{
+			TimestampResolutionNTP,
+			0, // this is not representable since it's ~0.233 nanoseconds
+		},
+		{
+			TimestampResolution{2, -16},
+			15258,
+		},
+		{
+			TimestampResolution{2, 1},
+			2 * time.Second,
+		},
+		{
+			TimestampResolution{10, 1},
+			10 * time.Second,
+		},
+		{
+			TimestampResolution{10, 0},
+			time.Second,
+		},
+		{
+			TimestampResolution{2, 0},
+			time.Second,
+		},
+		{
+			TimestampResolution{0, 0},
+			0,
+		},
+		{
+			TimestampResolution{3, 2},
+			9 * time.Second,
+		},
+		{
+			TimestampResolution{3, -2},
+			111111111,
+		},
+	} {
+		d := test.r.ToDuration()
+		if d != test.d {
+			t.Errorf("%d: resolution: %s want: %d got: %d", i, test.r, test.d, d)
+		}
+	}
+}
diff --git a/vendor/github.com/google/gopacket/writer_test.go b/vendor/github.com/google/gopacket/writer_test.go
new file mode 100644
index 0000000..e19069b
--- /dev/null
+++ b/vendor/github.com/google/gopacket/writer_test.go
@@ -0,0 +1,94 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package gopacket
+
+import (
+	"fmt"
+	"testing"
+)
+
+func TestExponentialSizeIncreasePrepend(t *testing.T) {
+	var b serializeBuffer
+	for i, test := range []struct {
+		prepend, size int
+	}{
+		{2, 2},
+		{2, 4},
+		{2, 8},
+		{2, 8},
+		{2, 16},
+		{2, 16},
+		{2, 16},
+		{2, 16},
+		{2, 32},
+	} {
+		b.PrependBytes(test.prepend)
+		if test.size != cap(b.data) {
+			t.Error(i, "size want", test.size, "got", cap(b.data))
+		}
+	}
+	b.Clear()
+	if b.start != 32 {
+		t.Error(b.start)
+	}
+}
+
+func TestExponentialSizeIncreaseAppend(t *testing.T) {
+	var b serializeBuffer
+	for i, test := range []struct {
+		appnd, size int
+	}{
+		{2, 2},
+		{2, 4},
+		{2, 8},
+		{2, 8},
+		{2, 16},
+		{2, 16},
+		{2, 16},
+		{2, 16},
+		{2, 32},
+	} {
+		b.AppendBytes(test.appnd)
+		if test.size != cap(b.data) {
+			t.Error(i, "size want", test.size, "got", cap(b.data))
+		}
+	}
+	b.Clear()
+	if b.start != 0 {
+		t.Error(b.start)
+	}
+}
+
+func ExampleSerializeBuffer() {
+	b := NewSerializeBuffer()
+	fmt.Println("1:", b.Bytes())
+	bytes, _ := b.PrependBytes(3)
+	copy(bytes, []byte{1, 2, 3})
+	fmt.Println("2:", b.Bytes())
+	bytes, _ = b.AppendBytes(2)
+	copy(bytes, []byte{4, 5})
+	fmt.Println("3:", b.Bytes())
+	bytes, _ = b.PrependBytes(1)
+	copy(bytes, []byte{0})
+	fmt.Println("4:", b.Bytes())
+	bytes, _ = b.AppendBytes(3)
+	copy(bytes, []byte{6, 7, 8})
+	fmt.Println("5:", b.Bytes())
+	b.Clear()
+	fmt.Println("6:", b.Bytes())
+	bytes, _ = b.PrependBytes(2)
+	copy(bytes, []byte{9, 9})
+	fmt.Println("7:", b.Bytes())
+	// Output:
+	// 1: []
+	// 2: [1 2 3]
+	// 3: [1 2 3 4 5]
+	// 4: [0 1 2 3 4 5]
+	// 5: [0 1 2 3 4 5 6 7 8]
+	// 6: []
+	// 7: [9 9]
+}
diff --git a/vendor/vendor.json b/vendor/vendor.json
new file mode 100644
index 0000000..4a72691
--- /dev/null
+++ b/vendor/vendor.json
@@ -0,0 +1,19 @@
+{
+	"comment": "",
+	"ignore": "",
+	"package": [
+		{
+			"checksumSHA1": "yUc84k7cfnRi9AlPFuRo77Y18Og=",
+			"path": "github.com/golang/glog",
+			"revision": "23def4e6c14b4da8ac2ed8007337bc5eb5007998",
+			"revisionTime": "2016-01-25T20:49:56Z"
+		},
+		{
+			"checksumSHA1": "X5ivYXvxs8wLkNqaPMBcuzNKEv4=",
+			"path": "github.com/google/gopacket",
+			"revision": "3eaba08943250fd212520e5cff00ed808b8fc60a",
+			"revisionTime": "2021-04-29T15:22:48Z"
+		}
+	],
+	"rootPath": "github.com/zr-hebo/sniffer-agent"
+}