This commit is contained in:
jtjing 2018-10-05 22:28:10 +08:00
parent 7dade7158a
commit a50ccb4caa
5 changed files with 438 additions and 0 deletions

View File

@ -9,6 +9,7 @@ import (
redis "github.com/40t/go-sniffer/plugSrc/redis/build"
hp "github.com/40t/go-sniffer/plugSrc/http/build"
mongodb "github.com/40t/go-sniffer/plugSrc/mongodb/build"
kafka "github.com/40t/go-sniffer/plugSrc/kafka/build"
"path/filepath"
"fmt"
"path"
@ -82,6 +83,9 @@ func (p *Plug) LoadInternalPlugList() {
//Mongodb
list["mongodb"] = mongodb.NewInstance()
//kafka
list["kafka"] = kafka.NewInstance()
//Redis
list["redis"] = redis.NewInstance()

View File

@ -0,0 +1,29 @@
package build
const (
ProduceRequest = 0
FetchRequest = 1
OffsetRequest = 2
MetadataRequest = 3
//Non-user facing control APIs = 4-7
OffsetCommitRequest = 8
OffsetFetchRequest = 9
GroupCoordinatorRequest = 10
JoinGroupRequest = 11
HeartbeatRequest = 12
LeaveGroupRequest = 13
SyncGroupRequest = 14
DescribeGroupsRequest = 15
ListGroupsRequest = 16
APIVersionsReqKind = 18
CreateTopicsReqKind = 19
)
const (
ApiV0 = 0
ApiV1 = 1
ApiV2 = 2
ApiV3 = 3
ApiV4 = 4
ApiV5 = 5
)

View File

@ -0,0 +1,232 @@
package build
import (
"bytes"
"fmt"
"github.com/google/gopacket"
"io"
"strconv"
"sync"
)
const (
Port = 9092
Version = "0.1"
CmdPort = "-p"
)
type Kafka struct {
port int
version string
source map[string]*stream
}
type stream struct {
packets chan *packet
}
type packet struct {
isClientFlow bool //客户端->服务器端流
messageSize int32
requestHeader
responseHeader
payload io.Reader
}
type requestHeader struct {
apiKey int16
apiVersion int16
correlationId int32
clientId string
}
type responseHeader struct {
correlationId int32
}
type messageSet struct {
offset int64
messageSize int32
}
func newMessageSet(r io.Reader) messageSet {
messageSet := messageSet{}
messageSet.offset = ReadInt64(r)
messageSet.messageSize = ReadInt32(r)
return messageSet
}
type message struct {
crc int32
magicByte int8
attributes int8
key []byte
value []byte
}
var kafkaInstance *Kafka
var once sync.Once
func NewInstance() *Kafka {
once.Do(func() {
kafkaInstance = &Kafka{
port :Port,
version:Version,
source: make(map[string]*stream),
}
})
return kafkaInstance
}
func (m *Kafka) SetFlag(flg []string) {
c := len(flg)
if c == 0 {
return
}
if c >> 1 != 1 {
panic("Mongodb参数数量不正确!")
}
for i:=0;i<c;i=i+2 {
key := flg[i]
val := flg[i+1]
switch key {
case CmdPort:
p, err := strconv.Atoi(val);
if err != nil {
panic("端口数不正确")
}
kafkaInstance.port = p
if p < 0 || p > 65535 {
panic("参数不正确: 端口范围(0-65535)")
}
break
default:
panic("参数不正确")
}
}
}
func (m *Kafka) BPFFilter() string {
return "tcp and port "+strconv.Itoa(m.port);
}
func (m *Kafka) Version() string {
return m.version
}
func (m *Kafka) ResolveStream(net, transport gopacket.Flow, buf io.Reader) {
//uuid
uuid := fmt.Sprintf("%v:%v", net.FastHash(), transport.FastHash())
//resolve packet
if _, ok := m.source[uuid]; !ok {
var newStream = stream {
packets:make(chan *packet, 100),
}
m.source[uuid] = &newStream
go newStream.resolve()
}
//read bi-directional packet
//server -> client || client -> server
for {
newPacket := m.newPacket(net, transport, buf)
if newPacket == nil {
return
}
m.source[uuid].packets <- newPacket
}
}
func (m *Kafka) newPacket(net, transport gopacket.Flow, r io.Reader) *packet {
//read packet
pk := packet{}
//read messageSize
pk.messageSize = ReadInt32(r)
//set flow direction
if transport.Src().String() == strconv.Itoa(m.port) {
pk.isClientFlow = false
respHeader := responseHeader{}
respHeader.correlationId = ReadInt32(r)
pk.responseHeader = respHeader
var buf bytes.Buffer
if _, err := io.CopyN(&buf, r, int64(pk.messageSize-4)); err != nil {
if err == io.EOF {
fmt.Println(net, transport, " 关闭")
return nil
}
fmt.Println("流解析错误", net, transport, ":", err)
return nil
}
pk.payload = &buf
}else{
pk.isClientFlow = true
var clientIdLen = 0
reqHeader := requestHeader{}
reqHeader.apiKey = ReadInt16(r)
reqHeader.apiVersion = ReadInt16(r)
reqHeader.correlationId = ReadInt32(r)
reqHeader.clientId, clientIdLen = ReadString(r)
pk.requestHeader = reqHeader
var buf bytes.Buffer
if _, err := io.CopyN(&buf, r, int64(pk.messageSize-10) - int64(clientIdLen)); err != nil {
if err == io.EOF {
fmt.Println(net, transport, " 关闭")
return nil
}
fmt.Println("流解析错误", net, transport, ":", err)
return nil
}
pk.payload = &buf
}
return &pk
}
func (stm *stream) resolve() {
for {
select {
case packet := <- stm.packets:
if packet.isClientFlow {
stm.resolveClientPacket(packet)
} else {
stm.resolveServerPacket(packet)
}
}
}
}
func (stm *stream) resolveServerPacket(pk *packet) {
return
}
func (stm *stream) resolveClientPacket(pk *packet) {
var msg string
payload := pk.payload
fmt.Println("apiKey:")
fmt.Println(pk.apiKey)
switch int(pk.apiKey) {
case ProduceRequest:
msg = ReadProduceRequest(payload, pk.apiVersion)
}
_=msg
//fmt.Println(msg)
}

View File

@ -0,0 +1,79 @@
package build
import (
"fmt"
"io"
"time"
)
type Message struct {
Key []byte
Value []byte
Offset int64
Crc uint32
Topic string
Partition int32
TipOffset int64
}
/**
Produce request Protocol
v0, v1 (supported in 0.9.0 or later) and v2 (supported in 0.10.0 or later)
ProduceRequest => RequiredAcks Timeout [TopicName [Partition MessageSetSize MessageSet]]
RequiredAcks => int16
Timeout => int32
Partition => int32
MessageSetSize => int32
*/
type ProduceReq struct {
TransactionalID string
RequiredAcks int16
Timeout time.Duration
Topics []ProduceReqTopic
}
type ProduceReqTopic struct {
Name string
Partitions []ProduceReqPartition
}
type ProduceReqPartition struct {
ID int32
Messages []*Message
}
func ReadProduceRequest(r io.Reader, version int16) string {
var msg string
produceReq := ProduceReq{}
if int(version) >= ApiV3 {
produceReq.TransactionalID, _ = ReadString(r)
fmt.Println(produceReq.TransactionalID)
}
produceReq.RequiredAcks = ReadInt16(r)
produceReq.Timeout = time.Duration(ReadInt32(r)) * time.Millisecond
l := ReadInt32(r)
req := ProduceReq{}
req.Topics = make([]ProduceReqTopic, l)
for ti := range req.Topics {
var topic = &req.Topics[ti]
topic.Name,_ = ReadString(r)
fmt.Println("msg")
fmt.Println(topic.Name)
l := ReadInt32(r)
topic.Partitions = make([]ProduceReqPartition, l)
}
return msg
}

View File

@ -0,0 +1,94 @@
package build
import (
"encoding/binary"
"io"
"time"
)
func GetNowStr(isClient bool) string {
var msg string
layout := "01/02 15:04:05.000000"
msg += time.Now().Format(layout)
if isClient {
msg += "| cli -> ser |"
}else{
msg += "| ser -> cli |"
}
return msg
}
func IsEof(r io.Reader) bool {
buf := make([]byte, 1)
_, err := r.Read(buf)
if err != nil {
return true
}
return false
}
func ReadOnce() {
}
func ReadInt16(r io.Reader) (n int16) {
binary.Read(r, binary.BigEndian, &n)
return
}
func ReadInt32(r io.Reader) (n int32) {
binary.Read(r, binary.BigEndian, &n)
return
}
func ReadInt64(r io.Reader) (n int64) {
binary.Read(r, binary.BigEndian, &n)
return
}
func ReadString(r io.Reader) (string, int) {
l := int(ReadInt16(r))
//-1 => null
if l == -1 {
return " ",1
}
str := make([]byte, l)
if _, err := io.ReadFull(r, str); err != nil {
panic(err)
}
return string(str), l
}
//
//func TryReadInt16(r io.Reader) (n int16, err error) {
//
// if err := binary.Read(r, binary.BigEndian, &n); err != nil {
// if n == -1 {
// return 1,nil
// }
// panic(err)
// }
//}
func ReadBytes(r io.Reader) []byte {
l := int(ReadInt32(r))
var result []byte
var b = make([]byte, l)
for i:=0;i<l;i++ {
_, err := r.Read(b)
if err != nil {
panic(err)
}
result = append(result, b[0])
}
return result
}