multi_template.go 2.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127
  1. package main
  2. var _multiTemplate = `
  3. // NAME {{or .Comment "get data from cache if miss will call source method, then add to cache."}}
  4. func (d *Dao) NAME(c context.Context, keys []KEY{{.ExtraArgsType}}) (res map[KEY]VALUE, err error) {
  5. if len(keys) == 0 {
  6. return
  7. }
  8. addCache := true
  9. if res, err = CACHEFUNC(c, keys {{.ExtraCacheArgs}});err != nil {
  10. addCache = false
  11. res = nil
  12. err = nil
  13. }
  14. var miss []KEY
  15. for _, key := range keys {
  16. {{if .GoValue}}
  17. if (res == nil) || (len(res[key]) == 0) {
  18. {{else}}
  19. {{if .NumberValue}}
  20. if _, ok := res[key]; !ok {
  21. {{else}}
  22. if (res == nil) || (res[key] == {{.ZeroValue}}) {
  23. {{end}}
  24. {{end}}
  25. miss = append(miss, key)
  26. }
  27. }
  28. prom.CacheHit.Add("NAME", int64(len(keys) - len(miss)))
  29. {{if .EnableNullCache}}
  30. for k, v := range res {
  31. {{if .SimpleValue}} if v == {{.NullCache}} { {{else}} if {{.CheckNullCode}} { {{end}}
  32. delete(res, k)
  33. }
  34. }
  35. {{end}}
  36. missLen := len(miss)
  37. if missLen == 0 {
  38. return
  39. }
  40. {{if .EnableBatch}}
  41. missData := make(map[KEY]VALUE, missLen)
  42. {{else}}
  43. var missData map[KEY]VALUE
  44. {{end}}
  45. {{if .EnableSingleFlight}}
  46. var rr interface{}
  47. sf := d.cacheSFNAME(keys {{.ExtraArgs}})
  48. rr, err, _ = cacheSingleFlights[SFNUM].Do(sf, func() (r interface{}, e error) {
  49. prom.CacheMiss.Add("NAME", int64(len(miss)))
  50. r, e = RAWFUNC(c, miss {{.ExtraRawArgs}})
  51. return
  52. })
  53. missData = rr.(map[KEY]VALUE)
  54. {{else}}
  55. {{if .EnableBatch}}
  56. prom.CacheMiss.Add("NAME", int64(missLen))
  57. var mutex sync.Mutex
  58. {{if .BatchErrBreak}}
  59. group, ctx := errgroup.WithContext(c)
  60. {{else}}
  61. group := &errgroup.Group{}
  62. ctx := c
  63. {{end}}
  64. if missLen > MAXGROUP {
  65. group.GOMAXPROCS(MAXGROUP)
  66. }
  67. var run = func(ms []KEY) {
  68. group.Go(func() (err error) {
  69. data, err := RAWFUNC(ctx, ms {{.ExtraRawArgs}})
  70. mutex.Lock()
  71. for k, v := range data {
  72. missData[k] = v
  73. }
  74. mutex.Unlock()
  75. return
  76. })
  77. }
  78. var (
  79. i int
  80. n = missLen/GROUPSIZE
  81. )
  82. for i=0; i< n; i++{
  83. run(miss[i*n:(i+1)*n])
  84. }
  85. if len(miss[i*n:]) > 0 {
  86. run(miss[i*n:])
  87. }
  88. err = group.Wait()
  89. {{else}}
  90. prom.CacheMiss.Add("NAME", int64(len(miss)))
  91. missData, err = RAWFUNC(c, miss {{.ExtraRawArgs}})
  92. {{end}}
  93. {{end}}
  94. if res == nil {
  95. res = make(map[KEY]VALUE, len(keys))
  96. }
  97. for k, v := range missData {
  98. res[k] = v
  99. }
  100. if err != nil {
  101. return
  102. }
  103. {{if .EnableNullCache}}
  104. for _, key := range miss {
  105. {{if .GoValue}}
  106. if len(res[key]) == 0 {
  107. {{else}}
  108. if res[key] == {{.ZeroValue}} {
  109. {{end}}
  110. missData[key] = {{.NullCache}}
  111. }
  112. }
  113. {{end}}
  114. if !addCache {
  115. return
  116. }
  117. {{if .Sync}}
  118. ADDCACHEFUNC(c, missData {{.ExtraAddCacheArgs}})
  119. {{else}}
  120. d.cache.Do(c, func(c context.Context) {
  121. ADDCACHEFUNC(c, missData {{.ExtraAddCacheArgs}})
  122. })
  123. {{end}}
  124. return
  125. }
  126. `