mirror of https://github.com/status-im/consul.git
consul-container test: no splitting and on single runner (#17394)
This commit is contained in:
parent
2904d0a431
commit
1339c79f8d
|
@ -343,51 +343,14 @@ jobs:
|
|||
DD_ENV: ci
|
||||
run: datadog-ci junit upload --service "$GITHUB_REPOSITORY" $TEST_RESULTS_DIR/results.xml
|
||||
|
||||
generate-compatibility-job-matrices:
|
||||
needs: [setup]
|
||||
runs-on: ${{ fromJSON(needs.setup.outputs.compute-small) }}
|
||||
name: Generate Compatibility Job Matrices
|
||||
outputs:
|
||||
compatibility-matrix: ${{ steps.set-matrix.outputs.compatibility-matrix }}
|
||||
steps:
|
||||
- uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0
|
||||
- name: Generate Compatibility Job Matrix
|
||||
id: set-matrix
|
||||
env:
|
||||
TOTAL_RUNNERS: 6
|
||||
JQ_SLICER: '[ inputs ] | [_nwise(length / $runnercount | floor)]'
|
||||
run: |
|
||||
cd ./test/integration/consul-container
|
||||
NUM_RUNNERS=$TOTAL_RUNNERS
|
||||
NUM_DIRS=$(find ./test -mindepth 1 -maxdepth 2 -type d | wc -l)
|
||||
|
||||
if [ "$NUM_DIRS" -lt "$NUM_RUNNERS" ]; then
|
||||
echo "TOTAL_RUNNERS is larger than the number of tests/packages to split."
|
||||
NUM_RUNNERS=$((NUM_DIRS-1))
|
||||
fi
|
||||
# fix issue where test splitting calculation generates 1 more split than TOTAL_RUNNERS.
|
||||
NUM_RUNNERS=$((NUM_RUNNERS-1))
|
||||
{
|
||||
echo -n "compatibility-matrix="
|
||||
find ./test -maxdepth 2 -type d -print0 | xargs -0 -n 1 \
|
||||
| grep -v util | grep -v upgrade \
|
||||
| jq --raw-input --argjson runnercount "$NUM_RUNNERS" "$JQ_SLICER" \
|
||||
| jq --compact-output 'map(join(" "))'
|
||||
} >> "$GITHUB_OUTPUT"
|
||||
|
||||
compatibility-integration-test:
|
||||
runs-on: ${{ fromJSON(needs.setup.outputs.compute-xl) }}
|
||||
needs:
|
||||
- setup
|
||||
- dev-build
|
||||
- generate-compatibility-job-matrices
|
||||
permissions:
|
||||
id-token: write # NOTE: this permission is explicitly required for Vault auth.
|
||||
contents: read
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
test-cases: ${{ fromJSON(needs.generate-compatibility-job-matrices.outputs.compatibility-matrix) }}
|
||||
env:
|
||||
ENVOY_VERSION: "1.25.4"
|
||||
steps:
|
||||
|
@ -396,8 +359,10 @@ jobs:
|
|||
with:
|
||||
go-version-file: 'go.mod'
|
||||
- run: go env
|
||||
|
||||
# Build the consul:local image from the already built binary
|
||||
- name: docker env
|
||||
run: |
|
||||
docker version
|
||||
docker info
|
||||
- name: fetch binary
|
||||
uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a # v3.0.2
|
||||
with:
|
||||
|
@ -405,7 +370,7 @@ jobs:
|
|||
path: .
|
||||
- name: restore mode+x
|
||||
run: chmod +x consul
|
||||
|
||||
# Build the consul:local image from the already built binary
|
||||
- name: Build consul:local image
|
||||
run: docker build -t ${{ env.CONSUL_LATEST_IMAGE_NAME }}:local -f ./build-support/docker/Consul-Dev.dockerfile .
|
||||
- name: Build consul-envoy:target-version image
|
||||
|
@ -426,21 +391,19 @@ jobs:
|
|||
mkdir -p "/tmp/test-results"
|
||||
cd ./test/integration/consul-container
|
||||
docker run --rm ${{ env.CONSUL_LATEST_IMAGE_NAME }}:local consul version
|
||||
echo "Running $(sed 's,|, ,g' <<< "${{ matrix.test-cases }}" |wc -w) subtests"
|
||||
# shellcheck disable=SC2001
|
||||
sed 's, ,\n,g' <<< "${{ matrix.test-cases }}"
|
||||
go run gotest.tools/gotestsum@v${{env.GOTESTSUM_VERSION}} \
|
||||
--raw-command \
|
||||
--format=short-verbose \
|
||||
--format=standard-verbose \
|
||||
--debug \
|
||||
--rerun-fails=3 \
|
||||
-- \
|
||||
go test \
|
||||
-p=4 \
|
||||
-p=6 \
|
||||
-parallel=4 \
|
||||
-tags "${{ env.GOTAGS }}" \
|
||||
-timeout=30m \
|
||||
-json \
|
||||
${{ matrix.test-cases }} \
|
||||
`go list ./... | grep -v upgrade` \
|
||||
--target-image ${{ env.CONSUL_LATEST_IMAGE_NAME }} \
|
||||
--target-version local \
|
||||
--latest-image docker.mirror.hashicorp.services/${{ env.CONSUL_LATEST_IMAGE_NAME }} \
|
||||
|
@ -483,52 +446,16 @@ jobs:
|
|||
DATADOG_API_KEY: "${{ endsWith(github.repository, '-enterprise') && env.DATADOG_API_KEY || secrets.DATADOG_API_KEY }}"
|
||||
DD_ENV: ci
|
||||
run: datadog-ci junit upload --service "$GITHUB_REPOSITORY" $TEST_RESULTS_DIR/results.xml
|
||||
|
||||
generate-upgrade-job-matrices:
|
||||
needs: [setup]
|
||||
runs-on: ${{ fromJSON(needs.setup.outputs.compute-small) }}
|
||||
name: Generate Upgrade Job Matrices
|
||||
outputs:
|
||||
upgrade-matrix: ${{ steps.set-matrix.outputs.upgrade-matrix }}
|
||||
steps:
|
||||
- uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0
|
||||
- uses: actions/setup-go@6edd4406fa81c3da01a34fa6f6343087c207a568 # v3.5.0
|
||||
with:
|
||||
go-version-file: 'go.mod'
|
||||
- name: Generate Updgrade Job Matrix
|
||||
id: set-matrix
|
||||
env:
|
||||
TOTAL_RUNNERS: 5
|
||||
JQ_SLICER: '[ inputs ] | [_nwise(length / $runnercount | floor)]'
|
||||
run: |
|
||||
cd ./test/integration/consul-container/test/upgrade
|
||||
NUM_RUNNERS=$TOTAL_RUNNERS
|
||||
NUM_DIRS=$(go test ./... -list=. -json | jq -r '.Output | select (. !=null) | select(. | startswith("Test")) | gsub("[\\n\\t]"; "")' | wc -l)
|
||||
|
||||
if [ "$NUM_DIRS" -lt "$NUM_RUNNERS" ]; then
|
||||
echo "TOTAL_RUNNERS is larger than the number of tests/packages to split."
|
||||
NUM_RUNNERS=$((NUM_DIRS-1))
|
||||
fi
|
||||
# fix issue where test splitting calculation generates 1 more split than TOTAL_RUNNERS.
|
||||
NUM_RUNNERS=$((NUM_RUNNERS-1))
|
||||
{
|
||||
echo -n "upgrade-matrix="
|
||||
go test ./... -list=. -json | jq -r '.Output | select (. !=null) | select(. | startswith("Test")) | gsub("[\\n\\t]"; "")' \
|
||||
| jq --raw-input --argjson runnercount "$NUM_RUNNERS" "$JQ_SLICER" \
|
||||
| jq --compact-output 'map(join("|"))'
|
||||
} >> "$GITHUB_OUTPUT"
|
||||
|
||||
upgrade-integration-test:
|
||||
runs-on: ${{ fromJSON(needs.setup.outputs.compute-large) }}
|
||||
runs-on: ${{ fromJSON(needs.setup.outputs.compute-xl) }}
|
||||
needs:
|
||||
- setup
|
||||
- dev-build
|
||||
- generate-upgrade-job-matrices
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
consul-version: [ "1.14", "1.15"]
|
||||
test-cases: ${{ fromJSON(needs.generate-upgrade-job-matrices.outputs.upgrade-matrix) }}
|
||||
env:
|
||||
CONSUL_LATEST_VERSION: ${{ matrix.consul-version }}
|
||||
ENVOY_VERSION: "1.24.6"
|
||||
|
@ -576,9 +503,6 @@ jobs:
|
|||
mkdir -p "${{ env.TEST_RESULTS_DIR }}"
|
||||
cd ./test/integration/consul-container/test/upgrade
|
||||
docker run --rm ${{ env.CONSUL_LATEST_IMAGE_NAME }}:local consul version
|
||||
echo "Running $(sed 's,|, ,g' <<< "${{ matrix.test-cases }}" |wc -w) subtests"
|
||||
# shellcheck disable=SC2001
|
||||
sed 's,|,\n,g' <<< "${{ matrix.test-cases }}"
|
||||
go run gotest.tools/gotestsum@v${{env.GOTESTSUM_VERSION}} \
|
||||
--raw-command \
|
||||
--format=short-verbose \
|
||||
|
@ -591,7 +515,6 @@ jobs:
|
|||
-tags "${{ env.GOTAGS }}" \
|
||||
-timeout=30m \
|
||||
-json ./... \
|
||||
-run "${{ matrix.test-cases }}" \
|
||||
--target-image ${{ env.CONSUL_LATEST_IMAGE_NAME }} \
|
||||
--target-version local \
|
||||
--latest-image docker.mirror.hashicorp.services/${{ env.CONSUL_LATEST_IMAGE_NAME }} \
|
||||
|
@ -617,9 +540,7 @@ jobs:
|
|||
- vault-integration-test
|
||||
- generate-envoy-job-matrices
|
||||
- envoy-integration-test
|
||||
- generate-compatibility-job-matrices
|
||||
- compatibility-integration-test
|
||||
- generate-upgrade-job-matrices
|
||||
- upgrade-integration-test
|
||||
runs-on: ${{ fromJSON(needs.setup.outputs.compute-small) }}
|
||||
if: ${{ always() }}
|
||||
|
|
|
@ -32,6 +32,13 @@ type BuiltCluster struct {
|
|||
Gateway libservice.Service
|
||||
}
|
||||
|
||||
type PeeringClusterSize struct {
|
||||
AcceptingNumServers int
|
||||
AcceptingNumClients int
|
||||
DialingNumServers int
|
||||
DialingNumClients int
|
||||
}
|
||||
|
||||
// BasicPeeringTwoClustersSetup sets up a scenario for testing peering, which consists of
|
||||
//
|
||||
// - an accepting cluster with 3 servers and 1 client agent. The client should be used to
|
||||
|
@ -46,11 +53,12 @@ func BasicPeeringTwoClustersSetup(
|
|||
t *testing.T,
|
||||
consulImage string,
|
||||
consulVersion string,
|
||||
pcs PeeringClusterSize,
|
||||
peeringThroughMeshgateway bool,
|
||||
) (*BuiltCluster, *BuiltCluster) {
|
||||
acceptingCluster, acceptingCtx, acceptingClient := NewCluster(t, &ClusterConfig{
|
||||
NumServers: 3,
|
||||
NumClients: 1,
|
||||
NumServers: pcs.AcceptingNumServers,
|
||||
NumClients: pcs.AcceptingNumClients,
|
||||
BuildOpts: &libcluster.BuildOptions{
|
||||
Datacenter: "dc1",
|
||||
ConsulImageName: consulImage,
|
||||
|
@ -61,8 +69,8 @@ func BasicPeeringTwoClustersSetup(
|
|||
})
|
||||
|
||||
dialingCluster, dialingCtx, dialingClient := NewCluster(t, &ClusterConfig{
|
||||
NumServers: 1,
|
||||
NumClients: 1,
|
||||
NumServers: pcs.DialingNumServers,
|
||||
NumClients: pcs.DialingNumClients,
|
||||
BuildOpts: &libcluster.BuildOptions{
|
||||
Datacenter: "dc2",
|
||||
ConsulImageName: consulImage,
|
||||
|
|
|
@ -502,8 +502,6 @@ func TestHTTPRouteParentRefChange(t *testing.T) {
|
|||
t.Skip("too slow for testing.Short")
|
||||
}
|
||||
|
||||
t.Parallel()
|
||||
|
||||
// infrastructure set up
|
||||
address := "localhost"
|
||||
|
||||
|
|
|
@ -53,7 +53,14 @@ import (
|
|||
func TestPeering_RotateServerAndCAThenFail_(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
accepting, dialing := libtopology.BasicPeeringTwoClustersSetup(t, utils.GetTargetImageName(), utils.TargetVersion, false)
|
||||
accepting, dialing := libtopology.BasicPeeringTwoClustersSetup(t, utils.GetTargetImageName(), utils.TargetVersion,
|
||||
libtopology.PeeringClusterSize{
|
||||
AcceptingNumServers: 3,
|
||||
AcceptingNumClients: 1,
|
||||
DialingNumServers: 1,
|
||||
DialingNumClients: 1,
|
||||
},
|
||||
false)
|
||||
var (
|
||||
acceptingCluster = accepting.Cluster
|
||||
dialingCluster = dialing.Cluster
|
||||
|
|
|
@ -82,7 +82,9 @@ func testSnapShotRestoreForLogStore(t *testing.T, logStore libcluster.LogStore)
|
|||
libcluster.WaitForMembers(t, client2, 3)
|
||||
|
||||
// Restore the saved snapshot
|
||||
require.NoError(t, client2.Snapshot().Restore(nil, snapshot))
|
||||
retry.RunWith(libcluster.LongFailer(), t, func(r *retry.R) {
|
||||
require.NoError(r, client2.Snapshot().Restore(nil, snapshot))
|
||||
})
|
||||
|
||||
libcluster.WaitForLeader(t, cluster2, client2)
|
||||
|
||||
|
|
|
@ -32,8 +32,6 @@ var (
|
|||
|
||||
// Test upgrade a cluster of latest version to the target version
|
||||
func TestStandardUpgradeToTarget_fromLatest(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
tcs = append(tcs,
|
||||
testcase{
|
||||
// Use the case of "1.12.3" ==> "1.13.0" to verify the test can
|
||||
|
|
|
@ -27,7 +27,14 @@ import (
|
|||
func TestPeering_ControlPlaneMGW(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
accepting, dialing := libtopology.BasicPeeringTwoClustersSetup(t, utils.GetLatestImageName(), utils.LatestVersion, true)
|
||||
accepting, dialing := libtopology.BasicPeeringTwoClustersSetup(t, utils.GetLatestImageName(), utils.LatestVersion,
|
||||
libtopology.PeeringClusterSize{
|
||||
AcceptingNumServers: 1,
|
||||
AcceptingNumClients: 1,
|
||||
DialingNumServers: 1,
|
||||
DialingNumClients: 1,
|
||||
},
|
||||
true)
|
||||
var (
|
||||
acceptingCluster = accepting.Cluster
|
||||
dialingCluster = dialing.Cluster
|
||||
|
|
|
@ -20,14 +20,28 @@ import (
|
|||
|
||||
func TestPeering_Basic(t *testing.T) {
|
||||
t.Parallel()
|
||||
accepting, dialing := libtopology.BasicPeeringTwoClustersSetup(t, utils.GetLatestImageName(), utils.LatestVersion, false)
|
||||
accepting, dialing := libtopology.BasicPeeringTwoClustersSetup(t, utils.GetLatestImageName(), utils.LatestVersion,
|
||||
libtopology.PeeringClusterSize{
|
||||
AcceptingNumServers: 1,
|
||||
AcceptingNumClients: 1,
|
||||
DialingNumServers: 1,
|
||||
DialingNumClients: 1,
|
||||
},
|
||||
false)
|
||||
peeringUpgrade(t, accepting, dialing, utils.TargetVersion)
|
||||
peeringPostUpgradeValidation(t, dialing)
|
||||
}
|
||||
|
||||
func TestPeering_HTTPRouter(t *testing.T) {
|
||||
t.Parallel()
|
||||
accepting, dialing := libtopology.BasicPeeringTwoClustersSetup(t, utils.GetLatestImageName(), utils.LatestVersion, false)
|
||||
accepting, dialing := libtopology.BasicPeeringTwoClustersSetup(t, utils.GetLatestImageName(), utils.LatestVersion,
|
||||
libtopology.PeeringClusterSize{
|
||||
AcceptingNumServers: 1,
|
||||
AcceptingNumClients: 1,
|
||||
DialingNumServers: 1,
|
||||
DialingNumClients: 1,
|
||||
},
|
||||
false)
|
||||
acceptingCluster := accepting.Cluster
|
||||
|
||||
// Create a second static-server at the client agent of accepting cluster and
|
||||
|
@ -90,7 +104,14 @@ func TestPeering_HTTPRouter(t *testing.T) {
|
|||
func TestPeering_HTTPResolverAndFailover(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
accepting, dialing := libtopology.BasicPeeringTwoClustersSetup(t, utils.GetLatestImageName(), utils.LatestVersion, false)
|
||||
accepting, dialing := libtopology.BasicPeeringTwoClustersSetup(t, utils.GetLatestImageName(), utils.LatestVersion,
|
||||
libtopology.PeeringClusterSize{
|
||||
AcceptingNumServers: 1,
|
||||
AcceptingNumClients: 1,
|
||||
DialingNumServers: 1,
|
||||
DialingNumClients: 1,
|
||||
},
|
||||
false)
|
||||
dialingCluster := dialing.Cluster
|
||||
|
||||
require.NoError(t, dialingCluster.ConfigEntryWrite(&api.ProxyConfigEntry{
|
||||
|
@ -183,7 +204,14 @@ func TestPeering_HTTPResolverAndFailover(t *testing.T) {
|
|||
func TestPeering_HTTPResolverAndSplitter(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
accepting, dialing := libtopology.BasicPeeringTwoClustersSetup(t, utils.GetLatestImageName(), utils.LatestVersion, false)
|
||||
accepting, dialing := libtopology.BasicPeeringTwoClustersSetup(t, utils.GetLatestImageName(), utils.LatestVersion,
|
||||
libtopology.PeeringClusterSize{
|
||||
AcceptingNumServers: 1,
|
||||
AcceptingNumClients: 1,
|
||||
DialingNumServers: 1,
|
||||
DialingNumClients: 1,
|
||||
},
|
||||
false)
|
||||
dialingCluster := dialing.Cluster
|
||||
|
||||
require.NoError(t, dialingCluster.ConfigEntryWrite(&api.ProxyConfigEntry{
|
||||
|
|
Loading…
Reference in New Issue