fix: merge coverage reports from each test rerun (#5360)
* fix_: fix test coverage when rerunning tests * fix_: make lint-fix * chore_: change test coverage report artifact name * chore_: added codeclimate.json to artifacts * chore_: bring in gocovmerge util --------- Co-authored-by: Siddarth Kumar <siddarthkay@gmail.com>
This commit is contained in:
parent
a300e12853
commit
d2f4cae18f
|
@ -132,7 +132,8 @@ pipeline {
|
|||
]) {
|
||||
nix.shell('make test-unit V=1', pure: false)
|
||||
}
|
||||
archiveArtifacts('c.out')
|
||||
sh "mv c.out test-coverage.out"
|
||||
archiveArtifacts('test-coverage.out, coverage/codeclimate.json')
|
||||
}
|
||||
} }
|
||||
post { cleanup { /* Leftover DB containers. */
|
||||
|
|
|
@ -79,15 +79,24 @@ run_test_for_package() {
|
|||
gotestsum_flags="${gotestsum_flags} --junitfile=${report_file} --rerun-fails-report=${rerun_report_file}"
|
||||
fi
|
||||
|
||||
gotestsum --packages="${package}" ${gotestsum_flags} -- \
|
||||
# Cleanup previous coverage reports
|
||||
rm -f ${package_dir}/coverage.out.rerun.*
|
||||
|
||||
PACKAGE_DIR=${package_dir} gotestsum --packages="${package}" ${gotestsum_flags} --raw-command -- \
|
||||
./_assets/scripts/test-with-coverage.sh \
|
||||
${package} \
|
||||
-v ${GOTEST_EXTRAFLAGS} \
|
||||
-timeout "${package_timeout}" \
|
||||
-count 1 \
|
||||
-tags "${BUILD_TAGS}" \
|
||||
-covermode=atomic \
|
||||
-coverprofile="${coverage_file}" | \
|
||||
-tags "${BUILD_TAGS}" |
|
||||
redirect_stdout "${output_file}"
|
||||
|
||||
# Merge package coverage results
|
||||
go run ./cmd/test-coverage-utils/gocovmerge.go ${package_dir}/coverage.out.rerun.* > ${coverage_file}
|
||||
|
||||
# Cleanup coverage reports
|
||||
rm -f ${package_dir}/coverage.out.rerun.*
|
||||
|
||||
local go_test_exit=$?
|
||||
echo "${go_test_exit}" > "${exit_code_file}"
|
||||
if [[ "${go_test_exit}" -ne 0 ]]; then
|
||||
|
|
|
@ -0,0 +1,7 @@
|
|||
#!/usr/bin/env bash
|
||||
set -eu
|
||||
coverage_file_path="${PACKAGE_DIR}/$(mktemp coverage.out.rerun.XXXXXXXXXX)"
|
||||
go test -json \
|
||||
-covermode=atomic \
|
||||
-coverprofile="${coverage_file_path}" \
|
||||
"$@"
|
|
@ -0,0 +1,111 @@
|
|||
// gocovmerge takes the results from multiple `go test -coverprofile` runs and
|
||||
// merges them into one profile
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"sort"
|
||||
|
||||
"golang.org/x/tools/cover"
|
||||
)
|
||||
|
||||
func mergeProfiles(p *cover.Profile, merge *cover.Profile) {
|
||||
if p.Mode != merge.Mode {
|
||||
log.Fatalf("cannot merge profiles with different modes")
|
||||
}
|
||||
// Since the blocks are sorted, we can keep track of where the last block
|
||||
// was inserted and only look at the blocks after that as targets for merge
|
||||
startIndex := 0
|
||||
for _, b := range merge.Blocks {
|
||||
startIndex = mergeProfileBlock(p, b, startIndex)
|
||||
}
|
||||
}
|
||||
|
||||
func mergeProfileBlock(p *cover.Profile, pb cover.ProfileBlock, startIndex int) int {
|
||||
sortFunc := func(i int) bool {
|
||||
pi := p.Blocks[i+startIndex]
|
||||
return pi.StartLine >= pb.StartLine && (pi.StartLine != pb.StartLine || pi.StartCol >= pb.StartCol)
|
||||
}
|
||||
|
||||
i := 0
|
||||
if !sortFunc(i) {
|
||||
i = sort.Search(len(p.Blocks)-startIndex, sortFunc)
|
||||
}
|
||||
i += startIndex
|
||||
if i < len(p.Blocks) && p.Blocks[i].StartLine == pb.StartLine && p.Blocks[i].StartCol == pb.StartCol {
|
||||
if p.Blocks[i].EndLine != pb.EndLine || p.Blocks[i].EndCol != pb.EndCol {
|
||||
log.Fatalf("OVERLAP MERGE: %v %v %v", p.FileName, p.Blocks[i], pb)
|
||||
}
|
||||
switch p.Mode {
|
||||
case "set":
|
||||
p.Blocks[i].Count |= pb.Count
|
||||
case "count", "atomic":
|
||||
p.Blocks[i].Count += pb.Count
|
||||
default:
|
||||
log.Fatalf("unsupported covermode: '%s'", p.Mode)
|
||||
}
|
||||
} else {
|
||||
if i > 0 {
|
||||
pa := p.Blocks[i-1]
|
||||
if pa.EndLine >= pb.EndLine && (pa.EndLine != pb.EndLine || pa.EndCol > pb.EndCol) {
|
||||
log.Fatalf("OVERLAP BEFORE: %v %v %v", p.FileName, pa, pb)
|
||||
}
|
||||
}
|
||||
if i < len(p.Blocks)-1 {
|
||||
pa := p.Blocks[i+1]
|
||||
if pa.StartLine <= pb.StartLine && (pa.StartLine != pb.StartLine || pa.StartCol < pb.StartCol) {
|
||||
log.Fatalf("OVERLAP AFTER: %v %v %v", p.FileName, pa, pb)
|
||||
}
|
||||
}
|
||||
p.Blocks = append(p.Blocks, cover.ProfileBlock{})
|
||||
copy(p.Blocks[i+1:], p.Blocks[i:])
|
||||
p.Blocks[i] = pb
|
||||
}
|
||||
return i + 1
|
||||
}
|
||||
|
||||
func addProfile(profiles []*cover.Profile, p *cover.Profile) []*cover.Profile {
|
||||
i := sort.Search(len(profiles), func(i int) bool { return profiles[i].FileName >= p.FileName })
|
||||
if i < len(profiles) && profiles[i].FileName == p.FileName {
|
||||
mergeProfiles(profiles[i], p)
|
||||
} else {
|
||||
profiles = append(profiles, nil)
|
||||
copy(profiles[i+1:], profiles[i:])
|
||||
profiles[i] = p
|
||||
}
|
||||
return profiles
|
||||
}
|
||||
|
||||
func dumpProfiles(profiles []*cover.Profile, out io.Writer) {
|
||||
if len(profiles) == 0 {
|
||||
return
|
||||
}
|
||||
fmt.Fprintf(out, "mode: %s\n", profiles[0].Mode)
|
||||
for _, p := range profiles {
|
||||
for _, b := range p.Blocks {
|
||||
fmt.Fprintf(out, "%s:%d.%d,%d.%d %d %d\n", p.FileName, b.StartLine, b.StartCol, b.EndLine, b.EndCol, b.NumStmt, b.Count)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
|
||||
var merged []*cover.Profile
|
||||
|
||||
for _, file := range flag.Args() {
|
||||
profiles, err := cover.ParseProfiles(file)
|
||||
if err != nil {
|
||||
log.Fatalf("failed to parse profiles: %v", err)
|
||||
}
|
||||
for _, p := range profiles {
|
||||
merged = addProfile(merged, p)
|
||||
}
|
||||
}
|
||||
|
||||
dumpProfiles(merged, os.Stdout)
|
||||
}
|
2
go.mod
2
go.mod
|
@ -102,6 +102,7 @@ require (
|
|||
golang.org/x/net v0.25.0
|
||||
golang.org/x/text v0.15.0
|
||||
golang.org/x/time v0.5.0
|
||||
golang.org/x/tools v0.21.0
|
||||
)
|
||||
|
||||
require (
|
||||
|
@ -279,7 +280,6 @@ require (
|
|||
golang.org/x/sync v0.7.0 // indirect
|
||||
golang.org/x/sys v0.20.0 // indirect
|
||||
golang.org/x/term v0.20.0 // indirect
|
||||
golang.org/x/tools v0.21.0 // indirect
|
||||
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
lukechampine.com/blake3 v1.2.1 // indirect
|
||||
|
|
|
@ -8,9 +8,10 @@ import (
|
|||
big "math/big"
|
||||
reflect "reflect"
|
||||
|
||||
gomock "github.com/golang/mock/gomock"
|
||||
|
||||
common "github.com/ethereum/go-ethereum/common"
|
||||
types "github.com/ethereum/go-ethereum/core/types"
|
||||
gomock "github.com/golang/mock/gomock"
|
||||
account "github.com/status-im/status-go/account"
|
||||
types0 "github.com/status-im/status-go/eth-node/types"
|
||||
pathprocessor "github.com/status-im/status-go/services/wallet/router/pathprocessor"
|
||||
|
|
|
@ -14,8 +14,9 @@ import (
|
|||
"github.com/status-im/status-go/protocol/transport"
|
||||
"github.com/status-im/status-go/wakuv2"
|
||||
|
||||
v1protocol "github.com/status-im/status-go/protocol/v1"
|
||||
v2protocol "github.com/waku-org/go-waku/waku/v2/protocol"
|
||||
|
||||
v1protocol "github.com/status-im/status-go/protocol/v1"
|
||||
)
|
||||
|
||||
type TelemetryType string
|
||||
|
|
|
@ -10,12 +10,13 @@ import (
|
|||
"go.uber.org/zap"
|
||||
"google.golang.org/protobuf/proto"
|
||||
|
||||
v2protocol "github.com/waku-org/go-waku/waku/v2/protocol"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/pb"
|
||||
|
||||
"github.com/status-im/status-go/eth-node/types"
|
||||
"github.com/status-im/status-go/protocol/transport"
|
||||
v1protocol "github.com/status-im/status-go/protocol/v1"
|
||||
"github.com/status-im/status-go/wakuv2"
|
||||
v2protocol "github.com/waku-org/go-waku/waku/v2/protocol"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/pb"
|
||||
)
|
||||
|
||||
func createMockServer(t *testing.T) *httptest.Server {
|
||||
|
|
|
@ -0,0 +1,266 @@
|
|||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package cover provides support for parsing coverage profiles
|
||||
// generated by "go test -coverprofile=cover.out".
|
||||
package cover // import "golang.org/x/tools/cover"
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"os"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Profile represents the profiling data for a specific file.
|
||||
type Profile struct {
|
||||
FileName string
|
||||
Mode string
|
||||
Blocks []ProfileBlock
|
||||
}
|
||||
|
||||
// ProfileBlock represents a single block of profiling data.
|
||||
type ProfileBlock struct {
|
||||
StartLine, StartCol int
|
||||
EndLine, EndCol int
|
||||
NumStmt, Count int
|
||||
}
|
||||
|
||||
type byFileName []*Profile
|
||||
|
||||
func (p byFileName) Len() int { return len(p) }
|
||||
func (p byFileName) Less(i, j int) bool { return p[i].FileName < p[j].FileName }
|
||||
func (p byFileName) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
|
||||
|
||||
// ParseProfiles parses profile data in the specified file and returns a
|
||||
// Profile for each source file described therein.
|
||||
func ParseProfiles(fileName string) ([]*Profile, error) {
|
||||
pf, err := os.Open(fileName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer pf.Close()
|
||||
return ParseProfilesFromReader(pf)
|
||||
}
|
||||
|
||||
// ParseProfilesFromReader parses profile data from the Reader and
|
||||
// returns a Profile for each source file described therein.
|
||||
func ParseProfilesFromReader(rd io.Reader) ([]*Profile, error) {
|
||||
// First line is "mode: foo", where foo is "set", "count", or "atomic".
|
||||
// Rest of file is in the format
|
||||
// encoding/base64/base64.go:34.44,37.40 3 1
|
||||
// where the fields are: name.go:line.column,line.column numberOfStatements count
|
||||
files := make(map[string]*Profile)
|
||||
s := bufio.NewScanner(rd)
|
||||
mode := ""
|
||||
for s.Scan() {
|
||||
line := s.Text()
|
||||
if mode == "" {
|
||||
const p = "mode: "
|
||||
if !strings.HasPrefix(line, p) || line == p {
|
||||
return nil, fmt.Errorf("bad mode line: %v", line)
|
||||
}
|
||||
mode = line[len(p):]
|
||||
continue
|
||||
}
|
||||
fn, b, err := parseLine(line)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("line %q doesn't match expected format: %v", line, err)
|
||||
}
|
||||
p := files[fn]
|
||||
if p == nil {
|
||||
p = &Profile{
|
||||
FileName: fn,
|
||||
Mode: mode,
|
||||
}
|
||||
files[fn] = p
|
||||
}
|
||||
p.Blocks = append(p.Blocks, b)
|
||||
}
|
||||
if err := s.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, p := range files {
|
||||
sort.Sort(blocksByStart(p.Blocks))
|
||||
// Merge samples from the same location.
|
||||
j := 1
|
||||
for i := 1; i < len(p.Blocks); i++ {
|
||||
b := p.Blocks[i]
|
||||
last := p.Blocks[j-1]
|
||||
if b.StartLine == last.StartLine &&
|
||||
b.StartCol == last.StartCol &&
|
||||
b.EndLine == last.EndLine &&
|
||||
b.EndCol == last.EndCol {
|
||||
if b.NumStmt != last.NumStmt {
|
||||
return nil, fmt.Errorf("inconsistent NumStmt: changed from %d to %d", last.NumStmt, b.NumStmt)
|
||||
}
|
||||
if mode == "set" {
|
||||
p.Blocks[j-1].Count |= b.Count
|
||||
} else {
|
||||
p.Blocks[j-1].Count += b.Count
|
||||
}
|
||||
continue
|
||||
}
|
||||
p.Blocks[j] = b
|
||||
j++
|
||||
}
|
||||
p.Blocks = p.Blocks[:j]
|
||||
}
|
||||
// Generate a sorted slice.
|
||||
profiles := make([]*Profile, 0, len(files))
|
||||
for _, profile := range files {
|
||||
profiles = append(profiles, profile)
|
||||
}
|
||||
sort.Sort(byFileName(profiles))
|
||||
return profiles, nil
|
||||
}
|
||||
|
||||
// parseLine parses a line from a coverage file.
|
||||
// It is equivalent to the regex
|
||||
// ^(.+):([0-9]+)\.([0-9]+),([0-9]+)\.([0-9]+) ([0-9]+) ([0-9]+)$
|
||||
//
|
||||
// However, it is much faster: https://golang.org/cl/179377
|
||||
func parseLine(l string) (fileName string, block ProfileBlock, err error) {
|
||||
end := len(l)
|
||||
|
||||
b := ProfileBlock{}
|
||||
b.Count, end, err = seekBack(l, ' ', end, "Count")
|
||||
if err != nil {
|
||||
return "", b, err
|
||||
}
|
||||
b.NumStmt, end, err = seekBack(l, ' ', end, "NumStmt")
|
||||
if err != nil {
|
||||
return "", b, err
|
||||
}
|
||||
b.EndCol, end, err = seekBack(l, '.', end, "EndCol")
|
||||
if err != nil {
|
||||
return "", b, err
|
||||
}
|
||||
b.EndLine, end, err = seekBack(l, ',', end, "EndLine")
|
||||
if err != nil {
|
||||
return "", b, err
|
||||
}
|
||||
b.StartCol, end, err = seekBack(l, '.', end, "StartCol")
|
||||
if err != nil {
|
||||
return "", b, err
|
||||
}
|
||||
b.StartLine, end, err = seekBack(l, ':', end, "StartLine")
|
||||
if err != nil {
|
||||
return "", b, err
|
||||
}
|
||||
fn := l[0:end]
|
||||
if fn == "" {
|
||||
return "", b, errors.New("a FileName cannot be blank")
|
||||
}
|
||||
return fn, b, nil
|
||||
}
|
||||
|
||||
// seekBack searches backwards from end to find sep in l, then returns the
|
||||
// value between sep and end as an integer.
|
||||
// If seekBack fails, the returned error will reference what.
|
||||
func seekBack(l string, sep byte, end int, what string) (value int, nextSep int, err error) {
|
||||
// Since we're seeking backwards and we know only ASCII is legal for these values,
|
||||
// we can ignore the possibility of non-ASCII characters.
|
||||
for start := end - 1; start >= 0; start-- {
|
||||
if l[start] == sep {
|
||||
i, err := strconv.Atoi(l[start+1 : end])
|
||||
if err != nil {
|
||||
return 0, 0, fmt.Errorf("couldn't parse %q: %v", what, err)
|
||||
}
|
||||
if i < 0 {
|
||||
return 0, 0, fmt.Errorf("negative values are not allowed for %s, found %d", what, i)
|
||||
}
|
||||
return i, start, nil
|
||||
}
|
||||
}
|
||||
return 0, 0, fmt.Errorf("couldn't find a %s before %s", string(sep), what)
|
||||
}
|
||||
|
||||
type blocksByStart []ProfileBlock
|
||||
|
||||
func (b blocksByStart) Len() int { return len(b) }
|
||||
func (b blocksByStart) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
|
||||
func (b blocksByStart) Less(i, j int) bool {
|
||||
bi, bj := b[i], b[j]
|
||||
return bi.StartLine < bj.StartLine || bi.StartLine == bj.StartLine && bi.StartCol < bj.StartCol
|
||||
}
|
||||
|
||||
// Boundary represents the position in a source file of the beginning or end of a
|
||||
// block as reported by the coverage profile. In HTML mode, it will correspond to
|
||||
// the opening or closing of a <span> tag and will be used to colorize the source
|
||||
type Boundary struct {
|
||||
Offset int // Location as a byte offset in the source file.
|
||||
Start bool // Is this the start of a block?
|
||||
Count int // Event count from the cover profile.
|
||||
Norm float64 // Count normalized to [0..1].
|
||||
Index int // Order in input file.
|
||||
}
|
||||
|
||||
// Boundaries returns a Profile as a set of Boundary objects within the provided src.
|
||||
func (p *Profile) Boundaries(src []byte) (boundaries []Boundary) {
|
||||
// Find maximum count.
|
||||
max := 0
|
||||
for _, b := range p.Blocks {
|
||||
if b.Count > max {
|
||||
max = b.Count
|
||||
}
|
||||
}
|
||||
// Divisor for normalization.
|
||||
divisor := math.Log(float64(max))
|
||||
|
||||
// boundary returns a Boundary, populating the Norm field with a normalized Count.
|
||||
index := 0
|
||||
boundary := func(offset int, start bool, count int) Boundary {
|
||||
b := Boundary{Offset: offset, Start: start, Count: count, Index: index}
|
||||
index++
|
||||
if !start || count == 0 {
|
||||
return b
|
||||
}
|
||||
if max <= 1 {
|
||||
b.Norm = 0.8 // Profile is in"set" mode; we want a heat map. Use cov8 in the CSS.
|
||||
} else if count > 0 {
|
||||
b.Norm = math.Log(float64(count)) / divisor
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
line, col := 1, 2 // TODO: Why is this 2?
|
||||
for si, bi := 0, 0; si < len(src) && bi < len(p.Blocks); {
|
||||
b := p.Blocks[bi]
|
||||
if b.StartLine == line && b.StartCol == col {
|
||||
boundaries = append(boundaries, boundary(si, true, b.Count))
|
||||
}
|
||||
if b.EndLine == line && b.EndCol == col || line > b.EndLine {
|
||||
boundaries = append(boundaries, boundary(si, false, 0))
|
||||
bi++
|
||||
continue // Don't advance through src; maybe the next block starts here.
|
||||
}
|
||||
if src[si] == '\n' {
|
||||
line++
|
||||
col = 0
|
||||
}
|
||||
col++
|
||||
si++
|
||||
}
|
||||
sort.Sort(boundariesByPos(boundaries))
|
||||
return
|
||||
}
|
||||
|
||||
type boundariesByPos []Boundary
|
||||
|
||||
func (b boundariesByPos) Len() int { return len(b) }
|
||||
func (b boundariesByPos) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
|
||||
func (b boundariesByPos) Less(i, j int) bool {
|
||||
if b[i].Offset == b[j].Offset {
|
||||
// Boundaries at the same offset should be ordered according to
|
||||
// their original position.
|
||||
return b[i].Index < b[j].Index
|
||||
}
|
||||
return b[i].Offset < b[j].Offset
|
||||
}
|
|
@ -1281,6 +1281,7 @@ golang.org/x/time/rate
|
|||
# golang.org/x/tools v0.21.0
|
||||
## explicit; go 1.19
|
||||
golang.org/x/tools/cmd/goimports
|
||||
golang.org/x/tools/cover
|
||||
golang.org/x/tools/go/ast/astutil
|
||||
golang.org/x/tools/go/ast/inspector
|
||||
golang.org/x/tools/go/gcexportdata
|
||||
|
|
|
@ -23,16 +23,17 @@ import (
|
|||
"golang.org/x/exp/maps"
|
||||
"google.golang.org/protobuf/proto"
|
||||
|
||||
"github.com/status-im/status-go/appdatabase"
|
||||
"github.com/status-im/status-go/eth-node/types"
|
||||
"github.com/status-im/status-go/protocol/tt"
|
||||
"github.com/status-im/status-go/t/helpers"
|
||||
"github.com/status-im/status-go/wakuv2/common"
|
||||
"github.com/waku-org/go-waku/waku/v2/dnsdisc"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/filter"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/legacy_store"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/pb"
|
||||
"github.com/waku-org/go-waku/waku/v2/protocol/relay"
|
||||
|
||||
"github.com/status-im/status-go/appdatabase"
|
||||
"github.com/status-im/status-go/eth-node/types"
|
||||
"github.com/status-im/status-go/protocol/tt"
|
||||
"github.com/status-im/status-go/t/helpers"
|
||||
"github.com/status-im/status-go/wakuv2/common"
|
||||
)
|
||||
|
||||
var testENRBootstrap = "enrtree://AI4W5N5IFEUIHF5LESUAOSMV6TKWF2MB6GU2YK7PU4TYUGUNOCEPW@store.staging.shards.nodes.status.im"
|
||||
|
|
Loading…
Reference in New Issue