Merge pull request #14 from zr-hebo/buffer-pool

共享内存bug修复
This commit is contained in:
河伯 2019-12-17 20:44:41 +08:00 committed by GitHub
commit e5d9649827
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 24 additions and 25 deletions

View File

@ -15,6 +15,8 @@ var (
kafkaGroupID string
asyncTopic string
syncTopic string
compress string
compressType sarama.CompressionCodec
)
func init() {
@ -29,6 +31,9 @@ func init() {
flag.StringVar(
&syncTopic,
"kafka-sync-topic", "", "kafka sync send topic. No default value")
flag.StringVar(
&compress,
"compress-type", "", "kafka message compress type. Default value is no compress")
}
type kafkaExporter struct {
@ -39,6 +44,20 @@ type kafkaExporter struct {
}
func checkParams() {
switch compress {
case "":
compressType = sarama.CompressionNone
case "gzip":
compressType = sarama.CompressionGZIP
case "snappy":
compressType = sarama.CompressionSnappy
case "lz4":
compressType = sarama.CompressionLZ4
default:
panic(fmt.Sprintf("cannot support compress type: %s", compress))
}
fmt.Printf("kafka message compress type: %s", compress)
params := make(map[string]string)
params["kafka-server"] = kafkaServer
params["kafka-group-id"] = kafkaGroupID
@ -57,6 +76,7 @@ func NewKafkaExporter() (ke *kafkaExporter) {
conf := sarama.NewConfig()
conf.Producer.Return.Successes = true
conf.ClientID = kafkaGroupID
conf.Producer.Compression = compressType
addrs := strings.Split(kafkaServer, ",")
syncProducer, err := sarama.NewSyncProducer(addrs, conf)
if err != nil {

View File

@ -1,8 +1,6 @@
package model
import (
"bytes"
"encoding/json"
jsoniter "github.com/json-iterator/go"
"github.com/pingcap/tidb/util/hack"
"time"
@ -82,8 +80,8 @@ func (bqp *BaseQueryPiece) GetSQL() (*string) {
func (bqp *BaseQueryPiece) Recovery() {
}
/**
func marsharQueryPieceShareMemory(qp interface{}, cacheBuffer []byte) []byte {
buffer := bytes.NewBuffer(cacheBuffer)
err := json.NewEncoder(buffer).Encode(qp)
if err != nil {
@ -92,6 +90,7 @@ func marsharQueryPieceShareMemory(qp interface{}, cacheBuffer []byte) []byte {
return buffer.Bytes()
}
*/
func marsharQueryPieceMonopolize(qp interface{}) (content []byte) {
content, err := jsonIterator.Marshal(qp)

View File

@ -1,19 +1,13 @@
package model
import (
"github.com/zr-hebo/sniffer-agent/util"
"sync"
"time"
)
var (
localSliceBufferPool = util.NewSliceBufferPool("json cache", (128+1)*1024)
)
type PooledMysqlQueryPiece struct {
MysqlQueryPiece
recoverPool *mysqlQueryPiecePool
sliceBufferPool *util.SliceBufferPool
}
func NewPooledMysqlQueryPiece(
@ -22,7 +16,6 @@ func NewPooledMysqlQueryPiece(
pmqp *PooledMysqlQueryPiece) {
pmqp = mqpp.Dequeue()
pmqp.sliceBufferPool = localSliceBufferPool
nowInMS := time.Now().UnixNano() / millSecondUnit
pmqp.SessionID = sessionID
pmqp.ClientHost = clientIP
@ -41,9 +34,6 @@ func NewPooledMysqlQueryPiece(
}
func (pmqp *PooledMysqlQueryPiece) Recovery() {
if pmqp.sliceBufferPool != nil {
pmqp.sliceBufferPool.Enqueue(pmqp.jsonContent[:0])
}
pmqp.jsonContent = nil
pmqp.recoverPool.Enqueue(pmqp)
}
@ -59,18 +49,8 @@ func (pmqp *PooledMysqlQueryPiece) Bytes() (content []byte) {
}
func (pmqp *PooledMysqlQueryPiece) GenerateJsonBytes() {
if pmqp.sliceBufferPool == nil {
pmqp.jsonContent = marsharQueryPieceMonopolize(pmqp)
return
}
var cacheBuffer = pmqp.sliceBufferPool.Dequeue()
if len(cacheBuffer) > 0 {
panic("there already have bytes in buffer")
}
pmqp.jsonContent = marsharQueryPieceShareMemory(pmqp, cacheBuffer)
return
}
type mysqlQueryPiecePool struct {