Merge pull request #14 from zr-hebo/buffer-pool

共享内存bug修复
This commit is contained in:
河伯 2019-12-17 20:44:41 +08:00 committed by GitHub
commit e5d9649827
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 24 additions and 25 deletions

View File

@ -15,6 +15,8 @@ var (
kafkaGroupID string kafkaGroupID string
asyncTopic string asyncTopic string
syncTopic string syncTopic string
compress string
compressType sarama.CompressionCodec
) )
func init() { func init() {
@ -27,8 +29,11 @@ func init() {
&asyncTopic, &asyncTopic,
"kafka-async-topic", "", "kafka async send topic. No default value") "kafka-async-topic", "", "kafka async send topic. No default value")
flag.StringVar( flag.StringVar(
&syncTopic, &syncTopic,
"kafka-sync-topic", "", "kafka sync send topic. No default value") "kafka-sync-topic", "", "kafka sync send topic. No default value")
flag.StringVar(
&compress,
"compress-type", "", "kafka message compress type. Default value is no compress")
} }
type kafkaExporter struct { type kafkaExporter struct {
@ -39,6 +44,20 @@ type kafkaExporter struct {
} }
func checkParams() { func checkParams() {
switch compress {
case "":
compressType = sarama.CompressionNone
case "gzip":
compressType = sarama.CompressionGZIP
case "snappy":
compressType = sarama.CompressionSnappy
case "lz4":
compressType = sarama.CompressionLZ4
default:
panic(fmt.Sprintf("cannot support compress type: %s", compress))
}
fmt.Printf("kafka message compress type: %s", compress)
params := make(map[string]string) params := make(map[string]string)
params["kafka-server"] = kafkaServer params["kafka-server"] = kafkaServer
params["kafka-group-id"] = kafkaGroupID params["kafka-group-id"] = kafkaGroupID
@ -57,6 +76,7 @@ func NewKafkaExporter() (ke *kafkaExporter) {
conf := sarama.NewConfig() conf := sarama.NewConfig()
conf.Producer.Return.Successes = true conf.Producer.Return.Successes = true
conf.ClientID = kafkaGroupID conf.ClientID = kafkaGroupID
conf.Producer.Compression = compressType
addrs := strings.Split(kafkaServer, ",") addrs := strings.Split(kafkaServer, ",")
syncProducer, err := sarama.NewSyncProducer(addrs, conf) syncProducer, err := sarama.NewSyncProducer(addrs, conf)
if err != nil { if err != nil {

View File

@ -1,8 +1,6 @@
package model package model
import ( import (
"bytes"
"encoding/json"
jsoniter "github.com/json-iterator/go" jsoniter "github.com/json-iterator/go"
"github.com/pingcap/tidb/util/hack" "github.com/pingcap/tidb/util/hack"
"time" "time"
@ -82,8 +80,8 @@ func (bqp *BaseQueryPiece) GetSQL() (*string) {
func (bqp *BaseQueryPiece) Recovery() { func (bqp *BaseQueryPiece) Recovery() {
} }
/**
func marsharQueryPieceShareMemory(qp interface{}, cacheBuffer []byte) []byte { func marsharQueryPieceShareMemory(qp interface{}, cacheBuffer []byte) []byte {
buffer := bytes.NewBuffer(cacheBuffer) buffer := bytes.NewBuffer(cacheBuffer)
err := json.NewEncoder(buffer).Encode(qp) err := json.NewEncoder(buffer).Encode(qp)
if err != nil { if err != nil {
@ -92,6 +90,7 @@ func marsharQueryPieceShareMemory(qp interface{}, cacheBuffer []byte) []byte {
return buffer.Bytes() return buffer.Bytes()
} }
*/
func marsharQueryPieceMonopolize(qp interface{}) (content []byte) { func marsharQueryPieceMonopolize(qp interface{}) (content []byte) {
content, err := jsonIterator.Marshal(qp) content, err := jsonIterator.Marshal(qp)

View File

@ -1,19 +1,13 @@
package model package model
import ( import (
"github.com/zr-hebo/sniffer-agent/util"
"sync" "sync"
"time" "time"
) )
var (
localSliceBufferPool = util.NewSliceBufferPool("json cache", (128+1)*1024)
)
type PooledMysqlQueryPiece struct { type PooledMysqlQueryPiece struct {
MysqlQueryPiece MysqlQueryPiece
recoverPool *mysqlQueryPiecePool recoverPool *mysqlQueryPiecePool
sliceBufferPool *util.SliceBufferPool
} }
func NewPooledMysqlQueryPiece( func NewPooledMysqlQueryPiece(
@ -22,7 +16,6 @@ func NewPooledMysqlQueryPiece(
pmqp *PooledMysqlQueryPiece) { pmqp *PooledMysqlQueryPiece) {
pmqp = mqpp.Dequeue() pmqp = mqpp.Dequeue()
pmqp.sliceBufferPool = localSliceBufferPool
nowInMS := time.Now().UnixNano() / millSecondUnit nowInMS := time.Now().UnixNano() / millSecondUnit
pmqp.SessionID = sessionID pmqp.SessionID = sessionID
pmqp.ClientHost = clientIP pmqp.ClientHost = clientIP
@ -41,9 +34,6 @@ func NewPooledMysqlQueryPiece(
} }
func (pmqp *PooledMysqlQueryPiece) Recovery() { func (pmqp *PooledMysqlQueryPiece) Recovery() {
if pmqp.sliceBufferPool != nil {
pmqp.sliceBufferPool.Enqueue(pmqp.jsonContent[:0])
}
pmqp.jsonContent = nil pmqp.jsonContent = nil
pmqp.recoverPool.Enqueue(pmqp) pmqp.recoverPool.Enqueue(pmqp)
} }
@ -59,17 +49,7 @@ func (pmqp *PooledMysqlQueryPiece) Bytes() (content []byte) {
} }
func (pmqp *PooledMysqlQueryPiece) GenerateJsonBytes() { func (pmqp *PooledMysqlQueryPiece) GenerateJsonBytes() {
if pmqp.sliceBufferPool == nil { pmqp.jsonContent = marsharQueryPieceMonopolize(pmqp)
pmqp.jsonContent = marsharQueryPieceMonopolize(pmqp)
return
}
var cacheBuffer = pmqp.sliceBufferPool.Dequeue()
if len(cacheBuffer) > 0 {
panic("there already have bytes in buffer")
}
pmqp.jsonContent = marsharQueryPieceShareMemory(pmqp, cacheBuffer)
return return
} }