main.go 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547
  1. /*
  2. *
  3. * Copyright 2017 gRPC authors.
  4. *
  5. * Licensed under the Apache License, Version 2.0 (the "License");
  6. * you may not use this file except in compliance with the License.
  7. * You may obtain a copy of the License at
  8. *
  9. * http://www.apache.org/licenses/LICENSE-2.0
  10. *
  11. * Unless required by applicable law or agreed to in writing, software
  12. * distributed under the License is distributed on an "AS IS" BASIS,
  13. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  14. * See the License for the specific language governing permissions and
  15. * limitations under the License.
  16. *
  17. */
  18. /*
  19. Package main provides benchmark with setting flags.
  20. An example to run some benchmarks with profiling enabled:
  21. go run benchmark/benchmain/main.go -benchtime=10s -workloads=all \
  22. -compression=on -maxConcurrentCalls=1 -trace=off \
  23. -reqSizeBytes=1,1048576 -respSizeBytes=1,1048576 -networkMode=Local \
  24. -cpuProfile=cpuProf -memProfile=memProf -memProfileRate=10000 -resultFile=result
  25. As a suggestion, when creating a branch, you can run this benchmark and save the result
  26. file "-resultFile=basePerf", and later when you at the middle of the work or finish the
  27. work, you can get the benchmark result and compare it with the base anytime.
  28. Assume there are two result files names as "basePerf" and "curPerf" created by adding
  29. -resultFile=basePerf and -resultFile=curPerf.
  30. To format the curPerf, run:
  31. go run benchmark/benchresult/main.go curPerf
  32. To observe how the performance changes based on a base result, run:
  33. go run benchmark/benchresult/main.go basePerf curPerf
  34. */
  35. package main
  36. import (
  37. "context"
  38. "encoding/gob"
  39. "errors"
  40. "flag"
  41. "fmt"
  42. "io"
  43. "io/ioutil"
  44. "log"
  45. "net"
  46. "os"
  47. "reflect"
  48. "runtime"
  49. "runtime/pprof"
  50. "strconv"
  51. "strings"
  52. "sync"
  53. "sync/atomic"
  54. "testing"
  55. "time"
  56. "google.golang.org/grpc"
  57. bm "google.golang.org/grpc/benchmark"
  58. testpb "google.golang.org/grpc/benchmark/grpc_testing"
  59. "google.golang.org/grpc/benchmark/latency"
  60. "google.golang.org/grpc/benchmark/stats"
  61. "google.golang.org/grpc/grpclog"
  62. "google.golang.org/grpc/internal/channelz"
  63. "google.golang.org/grpc/test/bufconn"
  64. )
  65. const (
  66. modeOn = "on"
  67. modeOff = "off"
  68. modeBoth = "both"
  69. )
  70. var allCompressionModes = []string{modeOn, modeOff, modeBoth}
  71. var allTraceModes = []string{modeOn, modeOff, modeBoth}
  72. const (
  73. workloadsUnary = "unary"
  74. workloadsStreaming = "streaming"
  75. workloadsAll = "all"
  76. )
  77. var allWorkloads = []string{workloadsUnary, workloadsStreaming, workloadsAll}
  78. var (
  79. runMode = []bool{true, true} // {runUnary, runStream}
  80. // When set the latency to 0 (no delay), the result is slower than the real result with no delay
  81. // because latency simulation section has extra operations
  82. ltc = []time.Duration{0, 40 * time.Millisecond} // if non-positive, no delay.
  83. kbps = []int{0, 10240} // if non-positive, infinite
  84. mtu = []int{0} // if non-positive, infinite
  85. maxConcurrentCalls = []int{1, 8, 64, 512}
  86. reqSizeBytes = []int{1, 1024, 1024 * 1024}
  87. respSizeBytes = []int{1, 1024, 1024 * 1024}
  88. enableTrace []bool
  89. benchtime time.Duration
  90. memProfile, cpuProfile string
  91. memProfileRate int
  92. enableCompressor []bool
  93. enableChannelz []bool
  94. networkMode string
  95. benchmarkResultFile string
  96. networks = map[string]latency.Network{
  97. "Local": latency.Local,
  98. "LAN": latency.LAN,
  99. "WAN": latency.WAN,
  100. "Longhaul": latency.Longhaul,
  101. }
  102. )
  103. func unaryBenchmark(startTimer func(), stopTimer func(int32), benchFeatures stats.Features, benchtime time.Duration, s *stats.Stats) {
  104. caller, cleanup := makeFuncUnary(benchFeatures)
  105. defer cleanup()
  106. runBenchmark(caller, startTimer, stopTimer, benchFeatures, benchtime, s)
  107. }
  108. func streamBenchmark(startTimer func(), stopTimer func(int32), benchFeatures stats.Features, benchtime time.Duration, s *stats.Stats) {
  109. caller, cleanup := makeFuncStream(benchFeatures)
  110. defer cleanup()
  111. runBenchmark(caller, startTimer, stopTimer, benchFeatures, benchtime, s)
  112. }
  113. func makeFuncUnary(benchFeatures stats.Features) (func(int), func()) {
  114. nw := &latency.Network{Kbps: benchFeatures.Kbps, Latency: benchFeatures.Latency, MTU: benchFeatures.Mtu}
  115. opts := []grpc.DialOption{}
  116. sopts := []grpc.ServerOption{}
  117. if benchFeatures.EnableCompressor {
  118. sopts = append(sopts,
  119. grpc.RPCCompressor(nopCompressor{}),
  120. grpc.RPCDecompressor(nopDecompressor{}),
  121. )
  122. opts = append(opts,
  123. grpc.WithCompressor(nopCompressor{}),
  124. grpc.WithDecompressor(nopDecompressor{}),
  125. )
  126. }
  127. sopts = append(sopts, grpc.MaxConcurrentStreams(uint32(benchFeatures.MaxConcurrentCalls+1)))
  128. opts = append(opts, grpc.WithInsecure())
  129. var lis net.Listener
  130. if *useBufconn {
  131. bcLis := bufconn.Listen(256 * 1024)
  132. lis = bcLis
  133. opts = append(opts, grpc.WithDialer(func(string, time.Duration) (net.Conn, error) {
  134. return nw.TimeoutDialer(
  135. func(string, string, time.Duration) (net.Conn, error) {
  136. return bcLis.Dial()
  137. })("", "", 0)
  138. }))
  139. } else {
  140. var err error
  141. lis, err = net.Listen("tcp", "localhost:0")
  142. if err != nil {
  143. grpclog.Fatalf("Failed to listen: %v", err)
  144. }
  145. opts = append(opts, grpc.WithDialer(func(_ string, timeout time.Duration) (net.Conn, error) {
  146. return nw.TimeoutDialer(net.DialTimeout)("tcp", lis.Addr().String(), timeout)
  147. }))
  148. }
  149. lis = nw.Listener(lis)
  150. stopper := bm.StartServer(bm.ServerInfo{Type: "protobuf", Listener: lis}, sopts...)
  151. conn := bm.NewClientConn("" /* target not used */, opts...)
  152. tc := testpb.NewBenchmarkServiceClient(conn)
  153. return func(int) {
  154. unaryCaller(tc, benchFeatures.ReqSizeBytes, benchFeatures.RespSizeBytes)
  155. }, func() {
  156. conn.Close()
  157. stopper()
  158. }
  159. }
  160. func makeFuncStream(benchFeatures stats.Features) (func(int), func()) {
  161. // TODO: Refactor to remove duplication with makeFuncUnary.
  162. nw := &latency.Network{Kbps: benchFeatures.Kbps, Latency: benchFeatures.Latency, MTU: benchFeatures.Mtu}
  163. opts := []grpc.DialOption{}
  164. sopts := []grpc.ServerOption{}
  165. if benchFeatures.EnableCompressor {
  166. sopts = append(sopts,
  167. grpc.RPCCompressor(grpc.NewGZIPCompressor()),
  168. grpc.RPCDecompressor(grpc.NewGZIPDecompressor()),
  169. )
  170. opts = append(opts,
  171. grpc.WithCompressor(grpc.NewGZIPCompressor()),
  172. grpc.WithDecompressor(grpc.NewGZIPDecompressor()),
  173. )
  174. }
  175. sopts = append(sopts, grpc.MaxConcurrentStreams(uint32(benchFeatures.MaxConcurrentCalls+1)))
  176. opts = append(opts, grpc.WithInsecure())
  177. var lis net.Listener
  178. if *useBufconn {
  179. bcLis := bufconn.Listen(256 * 1024)
  180. lis = bcLis
  181. opts = append(opts, grpc.WithDialer(func(string, time.Duration) (net.Conn, error) {
  182. return nw.TimeoutDialer(
  183. func(string, string, time.Duration) (net.Conn, error) {
  184. return bcLis.Dial()
  185. })("", "", 0)
  186. }))
  187. } else {
  188. var err error
  189. lis, err = net.Listen("tcp", "localhost:0")
  190. if err != nil {
  191. grpclog.Fatalf("Failed to listen: %v", err)
  192. }
  193. opts = append(opts, grpc.WithDialer(func(_ string, timeout time.Duration) (net.Conn, error) {
  194. return nw.TimeoutDialer(net.DialTimeout)("tcp", lis.Addr().String(), timeout)
  195. }))
  196. }
  197. lis = nw.Listener(lis)
  198. stopper := bm.StartServer(bm.ServerInfo{Type: "protobuf", Listener: lis}, sopts...)
  199. conn := bm.NewClientConn("" /* target not used */, opts...)
  200. tc := testpb.NewBenchmarkServiceClient(conn)
  201. streams := make([]testpb.BenchmarkService_StreamingCallClient, benchFeatures.MaxConcurrentCalls)
  202. for i := 0; i < benchFeatures.MaxConcurrentCalls; i++ {
  203. stream, err := tc.StreamingCall(context.Background())
  204. if err != nil {
  205. grpclog.Fatalf("%v.StreamingCall(_) = _, %v", tc, err)
  206. }
  207. streams[i] = stream
  208. }
  209. return func(pos int) {
  210. streamCaller(streams[pos], benchFeatures.ReqSizeBytes, benchFeatures.RespSizeBytes)
  211. }, func() {
  212. conn.Close()
  213. stopper()
  214. }
  215. }
  216. func unaryCaller(client testpb.BenchmarkServiceClient, reqSize, respSize int) {
  217. if err := bm.DoUnaryCall(client, reqSize, respSize); err != nil {
  218. grpclog.Fatalf("DoUnaryCall failed: %v", err)
  219. }
  220. }
  221. func streamCaller(stream testpb.BenchmarkService_StreamingCallClient, reqSize, respSize int) {
  222. if err := bm.DoStreamingRoundTrip(stream, reqSize, respSize); err != nil {
  223. grpclog.Fatalf("DoStreamingRoundTrip failed: %v", err)
  224. }
  225. }
  226. func runBenchmark(caller func(int), startTimer func(), stopTimer func(int32), benchFeatures stats.Features, benchtime time.Duration, s *stats.Stats) {
  227. // Warm up connection.
  228. for i := 0; i < 10; i++ {
  229. caller(0)
  230. }
  231. // Run benchmark.
  232. startTimer()
  233. var (
  234. mu sync.Mutex
  235. wg sync.WaitGroup
  236. )
  237. wg.Add(benchFeatures.MaxConcurrentCalls)
  238. bmEnd := time.Now().Add(benchtime)
  239. var count int32
  240. for i := 0; i < benchFeatures.MaxConcurrentCalls; i++ {
  241. go func(pos int) {
  242. for {
  243. t := time.Now()
  244. if t.After(bmEnd) {
  245. break
  246. }
  247. start := time.Now()
  248. caller(pos)
  249. elapse := time.Since(start)
  250. atomic.AddInt32(&count, 1)
  251. mu.Lock()
  252. s.Add(elapse)
  253. mu.Unlock()
  254. }
  255. wg.Done()
  256. }(i)
  257. }
  258. wg.Wait()
  259. stopTimer(count)
  260. }
  261. var useBufconn = flag.Bool("bufconn", false, "Use in-memory connection instead of system network I/O")
  262. // Initiate main function to get settings of features.
  263. func init() {
  264. var (
  265. workloads, traceMode, compressorMode, readLatency, channelzOn string
  266. readKbps, readMtu, readMaxConcurrentCalls intSliceType
  267. readReqSizeBytes, readRespSizeBytes intSliceType
  268. )
  269. flag.StringVar(&workloads, "workloads", workloadsAll,
  270. fmt.Sprintf("Workloads to execute - One of: %v", strings.Join(allWorkloads, ", ")))
  271. flag.StringVar(&traceMode, "trace", modeOff,
  272. fmt.Sprintf("Trace mode - One of: %v", strings.Join(allTraceModes, ", ")))
  273. flag.StringVar(&readLatency, "latency", "", "Simulated one-way network latency - may be a comma-separated list")
  274. flag.StringVar(&channelzOn, "channelz", modeOff, "whether channelz should be turned on")
  275. flag.DurationVar(&benchtime, "benchtime", time.Second, "Configures the amount of time to run each benchmark")
  276. flag.Var(&readKbps, "kbps", "Simulated network throughput (in kbps) - may be a comma-separated list")
  277. flag.Var(&readMtu, "mtu", "Simulated network MTU (Maximum Transmission Unit) - may be a comma-separated list")
  278. flag.Var(&readMaxConcurrentCalls, "maxConcurrentCalls", "Number of concurrent RPCs during benchmarks")
  279. flag.Var(&readReqSizeBytes, "reqSizeBytes", "Request size in bytes - may be a comma-separated list")
  280. flag.Var(&readRespSizeBytes, "respSizeBytes", "Response size in bytes - may be a comma-separated list")
  281. flag.StringVar(&memProfile, "memProfile", "", "Enables memory profiling output to the filename provided.")
  282. flag.IntVar(&memProfileRate, "memProfileRate", 512*1024, "Configures the memory profiling rate. \n"+
  283. "memProfile should be set before setting profile rate. To include every allocated block in the profile, "+
  284. "set MemProfileRate to 1. To turn off profiling entirely, set MemProfileRate to 0. 512 * 1024 by default.")
  285. flag.StringVar(&cpuProfile, "cpuProfile", "", "Enables CPU profiling output to the filename provided")
  286. flag.StringVar(&compressorMode, "compression", modeOff,
  287. fmt.Sprintf("Compression mode - One of: %v", strings.Join(allCompressionModes, ", ")))
  288. flag.StringVar(&benchmarkResultFile, "resultFile", "", "Save the benchmark result into a binary file")
  289. flag.StringVar(&networkMode, "networkMode", "", "Network mode includes LAN, WAN, Local and Longhaul")
  290. flag.Parse()
  291. if flag.NArg() != 0 {
  292. log.Fatal("Error: unparsed arguments: ", flag.Args())
  293. }
  294. switch workloads {
  295. case workloadsUnary:
  296. runMode[0] = true
  297. runMode[1] = false
  298. case workloadsStreaming:
  299. runMode[0] = false
  300. runMode[1] = true
  301. case workloadsAll:
  302. runMode[0] = true
  303. runMode[1] = true
  304. default:
  305. log.Fatalf("Unknown workloads setting: %v (want one of: %v)",
  306. workloads, strings.Join(allWorkloads, ", "))
  307. }
  308. enableCompressor = setMode(compressorMode)
  309. enableTrace = setMode(traceMode)
  310. enableChannelz = setMode(channelzOn)
  311. // Time input formats as (time + unit).
  312. readTimeFromInput(&ltc, readLatency)
  313. readIntFromIntSlice(&kbps, readKbps)
  314. readIntFromIntSlice(&mtu, readMtu)
  315. readIntFromIntSlice(&maxConcurrentCalls, readMaxConcurrentCalls)
  316. readIntFromIntSlice(&reqSizeBytes, readReqSizeBytes)
  317. readIntFromIntSlice(&respSizeBytes, readRespSizeBytes)
  318. // Re-write latency, kpbs and mtu if network mode is set.
  319. if network, ok := networks[networkMode]; ok {
  320. ltc = []time.Duration{network.Latency}
  321. kbps = []int{network.Kbps}
  322. mtu = []int{network.MTU}
  323. }
  324. }
  325. func setMode(name string) []bool {
  326. switch name {
  327. case modeOn:
  328. return []bool{true}
  329. case modeOff:
  330. return []bool{false}
  331. case modeBoth:
  332. return []bool{false, true}
  333. default:
  334. log.Fatalf("Unknown %s setting: %v (want one of: %v)",
  335. name, name, strings.Join(allCompressionModes, ", "))
  336. return []bool{}
  337. }
  338. }
  339. type intSliceType []int
  340. func (intSlice *intSliceType) String() string {
  341. return fmt.Sprintf("%v", *intSlice)
  342. }
  343. func (intSlice *intSliceType) Set(value string) error {
  344. if len(*intSlice) > 0 {
  345. return errors.New("interval flag already set")
  346. }
  347. for _, num := range strings.Split(value, ",") {
  348. next, err := strconv.Atoi(num)
  349. if err != nil {
  350. return err
  351. }
  352. *intSlice = append(*intSlice, next)
  353. }
  354. return nil
  355. }
  356. func readIntFromIntSlice(values *[]int, replace intSliceType) {
  357. // If not set replace in the flag, just return to run the default settings.
  358. if len(replace) == 0 {
  359. return
  360. }
  361. *values = replace
  362. }
  363. func readTimeFromInput(values *[]time.Duration, replace string) {
  364. if strings.Compare(replace, "") != 0 {
  365. *values = []time.Duration{}
  366. for _, ltc := range strings.Split(replace, ",") {
  367. duration, err := time.ParseDuration(ltc)
  368. if err != nil {
  369. log.Fatal(err.Error())
  370. }
  371. *values = append(*values, duration)
  372. }
  373. }
  374. }
  375. func main() {
  376. before()
  377. featuresPos := make([]int, 9)
  378. // 0:enableTracing 1:ltc 2:kbps 3:mtu 4:maxC 5:reqSize 6:respSize
  379. featuresNum := []int{len(enableTrace), len(ltc), len(kbps), len(mtu),
  380. len(maxConcurrentCalls), len(reqSizeBytes), len(respSizeBytes), len(enableCompressor), len(enableChannelz)}
  381. initalPos := make([]int, len(featuresPos))
  382. s := stats.NewStats(10)
  383. s.SortLatency()
  384. var memStats runtime.MemStats
  385. var results testing.BenchmarkResult
  386. var startAllocs, startBytes uint64
  387. var startTime time.Time
  388. start := true
  389. var startTimer = func() {
  390. runtime.ReadMemStats(&memStats)
  391. startAllocs = memStats.Mallocs
  392. startBytes = memStats.TotalAlloc
  393. startTime = time.Now()
  394. }
  395. var stopTimer = func(count int32) {
  396. runtime.ReadMemStats(&memStats)
  397. results = testing.BenchmarkResult{N: int(count), T: time.Since(startTime),
  398. Bytes: 0, MemAllocs: memStats.Mallocs - startAllocs, MemBytes: memStats.TotalAlloc - startBytes}
  399. }
  400. sharedPos := make([]bool, len(featuresPos))
  401. for i := 0; i < len(featuresPos); i++ {
  402. if featuresNum[i] <= 1 {
  403. sharedPos[i] = true
  404. }
  405. }
  406. // Run benchmarks
  407. resultSlice := []stats.BenchResults{}
  408. for !reflect.DeepEqual(featuresPos, initalPos) || start {
  409. start = false
  410. benchFeature := stats.Features{
  411. NetworkMode: networkMode,
  412. EnableTrace: enableTrace[featuresPos[0]],
  413. Latency: ltc[featuresPos[1]],
  414. Kbps: kbps[featuresPos[2]],
  415. Mtu: mtu[featuresPos[3]],
  416. MaxConcurrentCalls: maxConcurrentCalls[featuresPos[4]],
  417. ReqSizeBytes: reqSizeBytes[featuresPos[5]],
  418. RespSizeBytes: respSizeBytes[featuresPos[6]],
  419. EnableCompressor: enableCompressor[featuresPos[7]],
  420. EnableChannelz: enableChannelz[featuresPos[8]],
  421. }
  422. grpc.EnableTracing = enableTrace[featuresPos[0]]
  423. if enableChannelz[featuresPos[8]] {
  424. channelz.TurnOn()
  425. }
  426. if runMode[0] {
  427. unaryBenchmark(startTimer, stopTimer, benchFeature, benchtime, s)
  428. s.SetBenchmarkResult("Unary", benchFeature, results.N,
  429. results.AllocedBytesPerOp(), results.AllocsPerOp(), sharedPos)
  430. fmt.Println(s.BenchString())
  431. fmt.Println(s.String())
  432. resultSlice = append(resultSlice, s.GetBenchmarkResults())
  433. s.Clear()
  434. }
  435. if runMode[1] {
  436. streamBenchmark(startTimer, stopTimer, benchFeature, benchtime, s)
  437. s.SetBenchmarkResult("Stream", benchFeature, results.N,
  438. results.AllocedBytesPerOp(), results.AllocsPerOp(), sharedPos)
  439. fmt.Println(s.BenchString())
  440. fmt.Println(s.String())
  441. resultSlice = append(resultSlice, s.GetBenchmarkResults())
  442. s.Clear()
  443. }
  444. bm.AddOne(featuresPos, featuresNum)
  445. }
  446. after(resultSlice)
  447. }
  448. func before() {
  449. if memProfile != "" {
  450. runtime.MemProfileRate = memProfileRate
  451. }
  452. if cpuProfile != "" {
  453. f, err := os.Create(cpuProfile)
  454. if err != nil {
  455. fmt.Fprintf(os.Stderr, "testing: %s\n", err)
  456. return
  457. }
  458. if err := pprof.StartCPUProfile(f); err != nil {
  459. fmt.Fprintf(os.Stderr, "testing: can't start cpu profile: %s\n", err)
  460. f.Close()
  461. return
  462. }
  463. }
  464. }
  465. func after(data []stats.BenchResults) {
  466. if cpuProfile != "" {
  467. pprof.StopCPUProfile() // flushes profile to disk
  468. }
  469. if memProfile != "" {
  470. f, err := os.Create(memProfile)
  471. if err != nil {
  472. fmt.Fprintf(os.Stderr, "testing: %s\n", err)
  473. os.Exit(2)
  474. }
  475. runtime.GC() // materialize all statistics
  476. if err = pprof.WriteHeapProfile(f); err != nil {
  477. fmt.Fprintf(os.Stderr, "testing: can't write heap profile %s: %s\n", memProfile, err)
  478. os.Exit(2)
  479. }
  480. f.Close()
  481. }
  482. if benchmarkResultFile != "" {
  483. f, err := os.Create(benchmarkResultFile)
  484. if err != nil {
  485. log.Fatalf("testing: can't write benchmark result %s: %s\n", benchmarkResultFile, err)
  486. }
  487. dataEncoder := gob.NewEncoder(f)
  488. dataEncoder.Encode(data)
  489. f.Close()
  490. }
  491. }
  492. // nopCompressor is a compressor that just copies data.
  493. type nopCompressor struct{}
  494. func (nopCompressor) Do(w io.Writer, p []byte) error {
  495. n, err := w.Write(p)
  496. if err != nil {
  497. return err
  498. }
  499. if n != len(p) {
  500. return fmt.Errorf("nopCompressor.Write: wrote %v bytes; want %v", n, len(p))
  501. }
  502. return nil
  503. }
  504. func (nopCompressor) Type() string { return "nop" }
  505. // nopDecompressor is a decompressor that just copies data.
  506. type nopDecompressor struct{}
  507. func (nopDecompressor) Do(r io.Reader) ([]byte, error) { return ioutil.ReadAll(r) }
  508. func (nopDecompressor) Type() string { return "nop" }