databus.go 9.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414
  1. package service
  2. import (
  3. "bytes"
  4. "context"
  5. "fmt"
  6. "time"
  7. "go-common/app/admin/main/apm/conf"
  8. "go-common/app/admin/main/apm/model/databus"
  9. "go-common/library/ecode"
  10. "go-common/library/log"
  11. "github.com/Shopify/sarama"
  12. scluster "github.com/bsm/sarama-cluster"
  13. )
  14. var (
  15. commitInterval = 20 * time.Millisecond
  16. )
  17. // Client kafka client
  18. type Client struct {
  19. sarama.Client
  20. config *sarama.Config
  21. addrs []string
  22. topic string
  23. group string
  24. }
  25. // NewClient returns a new kafka client instance
  26. func NewClient(addrs []string, topic, group string) (c *Client, err error) {
  27. config := sarama.NewConfig()
  28. config.Version = sarama.V1_0_0_0
  29. config.Consumer.Offsets.CommitInterval = commitInterval
  30. config.Consumer.Offsets.Initial = sarama.OffsetOldest
  31. config.Consumer.Return.Errors = true
  32. c = &Client{
  33. config: config,
  34. addrs: addrs,
  35. topic: topic,
  36. group: group,
  37. }
  38. c.Client, err = sarama.NewClient(addrs, config)
  39. return
  40. }
  41. // Close close kafka client
  42. func (c *Client) Close() {
  43. c.Client.Close()
  44. }
  45. // Topics return the list of topics for the given kafka cluster
  46. func (c *Client) Topics() []string {
  47. topics, err := c.Client.Topics()
  48. if err != nil {
  49. panic(err)
  50. }
  51. return topics
  52. }
  53. // Partitions returns the list of parititons for the given topic, as registered with SetMetadata
  54. func (c *Client) Partitions() []int32 {
  55. ps, err := c.Client.Partitions(c.topic)
  56. if err != nil {
  57. panic(err)
  58. }
  59. return ps
  60. }
  61. // OffsetNew return newest offset for given topic
  62. func (c *Client) OffsetNew() (info map[int32]int64, err error) {
  63. var (
  64. offset int64
  65. )
  66. ps, err := c.Client.Partitions(c.topic)
  67. if err != nil {
  68. return
  69. }
  70. info = make(map[int32]int64)
  71. for _, p := range ps {
  72. offset, err = c.Client.GetOffset(c.topic, p, sarama.OffsetNewest)
  73. if err != nil {
  74. return
  75. }
  76. info[p] = offset
  77. }
  78. return
  79. }
  80. // OffsetOld return newest oldset for given topic
  81. func (c *Client) OffsetOld() (info map[int32]int64, err error) {
  82. var (
  83. offset int64
  84. )
  85. ps, err := c.Client.Partitions(c.topic)
  86. if err != nil {
  87. return
  88. }
  89. info = make(map[int32]int64)
  90. for _, p := range ps {
  91. offset, err = c.Client.GetOffset(c.topic, p, sarama.OffsetOldest)
  92. if err != nil {
  93. return
  94. }
  95. info[p] = offset
  96. }
  97. return
  98. }
  99. // SetOffset commit offset of partition to kafka
  100. func (c *Client) SetOffset(partition int32, offset int64) (err error) {
  101. om, err := sarama.NewOffsetManagerFromClient(c.group, c.Client)
  102. if err != nil {
  103. return
  104. }
  105. defer om.Close()
  106. pm, err := om.ManagePartition(c.topic, partition)
  107. if err != nil {
  108. return
  109. }
  110. pm.MarkOffset(offset, "")
  111. if err = pm.Close(); err != nil {
  112. return
  113. }
  114. time.Sleep(10 * commitInterval)
  115. // verify
  116. marked, err := c.OffsetMarked()
  117. log.Info("partititon:%d, before:%d, after:%d\n", partition, offset, marked[partition])
  118. return
  119. }
  120. // ResetOffset commit offset of partition to kafka
  121. func (c *Client) ResetOffset(partition int32, offset int64) (err error) {
  122. om, err := sarama.NewOffsetManagerFromClient(c.group, c.Client)
  123. if err != nil {
  124. return
  125. }
  126. defer om.Close()
  127. pm, err := om.ManagePartition(c.topic, partition)
  128. if err != nil {
  129. return
  130. }
  131. pm.ResetOffset(offset, "")
  132. if err = pm.Close(); err != nil {
  133. return
  134. }
  135. time.Sleep(10 * commitInterval)
  136. // verify
  137. marked, err := c.OffsetMarked()
  138. log.Info("partititon:%d, before:%d, after:%d\n", partition, offset, marked[partition])
  139. return
  140. }
  141. // SeekBegin commit newest offset to kafka
  142. func (c *Client) SeekBegin() (err error) {
  143. offset, err := c.OffsetOld()
  144. if err != nil {
  145. return
  146. }
  147. for partition, offset := range offset {
  148. err = c.ResetOffset(partition, offset)
  149. if err != nil {
  150. log.Info("partititon:%d, offset:%d, topic:%s, group:%s\n", partition, offset, c.topic, c.group)
  151. return
  152. }
  153. }
  154. return
  155. }
  156. // SeekEnd commit newest offset to kafka
  157. func (c *Client) SeekEnd() (err error) {
  158. offset, err := c.OffsetNew()
  159. if err != nil {
  160. return
  161. }
  162. for partition, offset := range offset {
  163. err = c.SetOffset(partition, offset)
  164. if err != nil {
  165. log.Info("partititon:%d, offset:%d, topic:%s, group:%s\n", partition, offset, c.topic, c.group)
  166. return
  167. }
  168. }
  169. return
  170. }
  171. // OffsetMarked fetch marked offset
  172. func (c *Client) OffsetMarked() (marked map[int32]int64, err error) {
  173. req := &sarama.OffsetFetchRequest{
  174. Version: 1,
  175. ConsumerGroup: c.group,
  176. }
  177. partitions := c.Partitions()
  178. marked = make(map[int32]int64)
  179. broker, err := c.Client.Coordinator(c.group)
  180. if err != nil {
  181. return
  182. }
  183. defer broker.Close()
  184. for _, partition := range partitions {
  185. req.AddPartition(c.topic, partition)
  186. }
  187. resp, err := broker.FetchOffset(req)
  188. if err != nil {
  189. return
  190. }
  191. for _, p := range partitions {
  192. block := resp.GetBlock(c.topic, p)
  193. if block == nil {
  194. err = sarama.ErrIncompleteResponse
  195. return
  196. }
  197. if block.Err == sarama.ErrNoError {
  198. marked[p] = block.Offset
  199. } else {
  200. err = block.Err
  201. log.Info("block error(%v)", err)
  202. return
  203. }
  204. }
  205. return
  206. }
  207. // Diff offset
  208. func Diff(cluster, topic, group string) (records []*databus.Record, err error) {
  209. client, err := NewClient(conf.Conf.Kafka[cluster].Brokers, topic, group)
  210. if err != nil {
  211. log.Error("service.NewClient() error(%v)\n", err)
  212. return
  213. }
  214. defer client.Close()
  215. marked, err := client.OffsetMarked()
  216. if err != nil {
  217. log.Error("client.OffsetMarked() error(%v)\n", err)
  218. return
  219. }
  220. new, err := client.OffsetNew()
  221. if err != nil {
  222. log.Error("client.OffsetNew() error(%v)\n", err)
  223. return
  224. }
  225. records = make([]*databus.Record, 0, len(new))
  226. for partition, offset := range new {
  227. r := &databus.Record{
  228. Partition: partition,
  229. New: offset,
  230. }
  231. if tmp, ok := marked[partition]; ok {
  232. r.Diff = offset - tmp
  233. }
  234. records = append(records, r)
  235. }
  236. return
  237. }
  238. // CreateTopic to kafka
  239. func CreateTopic(addrs []string, topic string, partitions int32, factor int16) (err error) {
  240. config := sarama.NewConfig()
  241. config.Version = sarama.V1_0_0_0
  242. config.Consumer.Return.Errors = true
  243. cli, err := sarama.NewClient(addrs, config)
  244. if err != nil {
  245. return
  246. }
  247. defer cli.Close()
  248. var topics []string
  249. topics, err = cli.Topics()
  250. if err != nil {
  251. log.Error("CreateTopic get all topics on cluster err(%v)", err)
  252. return
  253. }
  254. for _, t := range topics {
  255. if t == topic {
  256. return
  257. }
  258. }
  259. broker, err := cli.Controller()
  260. if err != nil {
  261. return
  262. }
  263. // retention := "-1"
  264. req := &sarama.CreateTopicsRequest{
  265. TopicDetails: map[string]*sarama.TopicDetail{
  266. topic: {
  267. NumPartitions: partitions,
  268. ReplicationFactor: factor,
  269. // ConfigEntries: map[string]*string{
  270. // "retention.ms": &retention,
  271. // },
  272. },
  273. },
  274. Timeout: time.Second,
  275. }
  276. var res *sarama.CreateTopicsResponse
  277. if res, err = broker.CreateTopics(req); err != nil {
  278. log.Info("CreateTopic CreateTopics error(%v) res(%v)", err, res)
  279. return
  280. }
  281. if !(res.TopicErrors[topic].Err == sarama.ErrNoError || res.TopicErrors[topic].Err == sarama.ErrTopicAlreadyExists) {
  282. log.Error("CreateTopic CreateTopics kfafka topic create error(%v) topic(%v)", res.TopicErrors[topic].Err, topic)
  283. err = res.TopicErrors[topic].Err
  284. }
  285. log.Info("CreateTopic CreateTopics kfafka topic create info(%v) topic(%v)", res.TopicErrors[topic].Err, topic)
  286. return
  287. }
  288. // FetchMessage fetch topic message by timestamp or offset.
  289. func FetchMessage(c context.Context, cluster, topic, group, key string, start, end int64, limit int) (res []*databus.Message, err error) {
  290. res = make([]*databus.Message, 0)
  291. kc, ok := conf.Conf.Kafka[cluster]
  292. if !ok {
  293. err = ecode.NothingFound
  294. return
  295. }
  296. cfg := scluster.NewConfig()
  297. cfg.Version = sarama.V1_0_0_0
  298. cfg.ClientID = fmt.Sprintf("%s-%s", group, topic)
  299. cfg.Consumer.Offsets.Initial = sarama.OffsetOldest
  300. cfg.Consumer.Return.Errors = true
  301. consumer, err := scluster.NewConsumer(kc.Brokers, group, []string{topic}, cfg)
  302. if err != nil {
  303. log.Error("fetchMsg NewConsumer(%v,%s,%s) error(%v)", kc.Brokers, group, topic, err)
  304. return
  305. }
  306. defer consumer.Close()
  307. bkey := []byte(key)
  308. for {
  309. select {
  310. case msg := <-consumer.Messages():
  311. consumer.MarkPartitionOffset(topic, int32(msg.Partition), msg.Offset, "")
  312. if key != "" && bytes.Equal(bkey, msg.Key) {
  313. continue
  314. }
  315. if start > 0 && msg.Timestamp.UnixNano()/int64(time.Millisecond) < start {
  316. continue
  317. }
  318. r := &databus.Message{}
  319. r.Key = string(msg.Key)
  320. r.Value = string(msg.Value)
  321. r.Topic = topic
  322. r.Partition = msg.Partition
  323. r.Offset = msg.Offset
  324. r.Timestamp = msg.Timestamp.Unix()
  325. res = append(res, r)
  326. if len(res) >= limit {
  327. return
  328. }
  329. if end > 0 && msg.Timestamp.UnixNano()/int64(time.Millisecond) > end {
  330. return
  331. }
  332. case err = <-consumer.Errors():
  333. log.Error("fetchMsg error(%v)", err)
  334. return
  335. case <-time.After(time.Second * 10):
  336. err = ecode.Deadline
  337. return
  338. case <-c.Done():
  339. return
  340. }
  341. }
  342. }
  343. // NewOffset commit new offset to kafka
  344. func (c *Client) NewOffset(partition int32, offset int64) (err error) {
  345. err = c.ResetOffset(partition, offset)
  346. if err != nil {
  347. log.Info("partititon:%d, offset:%d, topic:%s, group:%s\n", partition, offset, c.topic, c.group)
  348. return
  349. }
  350. return
  351. }
  352. // OffsetNewTime return newest offset for given topic
  353. func (c *Client) OffsetNewTime(time int64) (info map[int32]int64, err error) {
  354. var (
  355. offset int64
  356. message string
  357. )
  358. ps, err := c.Client.Partitions(c.topic)
  359. if err != nil {
  360. return
  361. }
  362. info = make(map[int32]int64)
  363. for _, p := range ps {
  364. offset, err = c.Client.GetOffset(c.topic, p, time)
  365. message += fmt.Sprintf("partititon:%v, offset:%v, topic:%v, group:%v, time:%v, err:%v\n", p, offset, c.topic, c.group, time, err)
  366. if err != nil {
  367. log.Info(message)
  368. return
  369. }
  370. info[p] = offset
  371. }
  372. log.Info(message)
  373. return
  374. }
  375. // NewTime commit new time offset to kafka
  376. func (c *Client) NewTime(time int64) (err error) {
  377. offset, err := c.OffsetNewTime(time)
  378. if err != nil {
  379. return
  380. }
  381. for partition, offset := range offset {
  382. err = c.ResetOffset(partition, offset)
  383. if err != nil {
  384. log.Info("partititon:%d, offset:%d, topic:%s, group:%s\n", partition, offset, c.topic, c.group)
  385. return
  386. }
  387. }
  388. return
  389. }