mirror of
https://github.com/status-im/consul.git
synced 2025-02-16 15:47:21 +00:00
Merge branch 'master' into feature/ui-node-ports-for-service
This commit is contained in:
commit
fd60f225ec
3
.dockerignore
Normal file
3
.dockerignore
Normal file
@ -0,0 +1,3 @@
|
||||
pkg/
|
||||
.git
|
||||
bin/
|
@ -3,11 +3,14 @@
|
||||
FEATURES:
|
||||
|
||||
* dns: Enable PTR record lookups for services with IPs that have no registered node [[PR-4083](https://github.com/hashicorp/consul/pull/4083)]
|
||||
* ui: Default to serving the new UI. Setting the `CONSUL_UI_LEGACY` environment variable to `1` of `true` will revert to serving the old UI
|
||||
|
||||
IMPROVEMENTS:
|
||||
|
||||
* agent: A Consul user-agent string is now sent to providers when making retry-join requests [GH-4013](https://github.com/hashicorp/consul/pull/4013)
|
||||
* client: Add metrics for failed RPCs [PR-4220](https://github.com/hashicorp/consul/pull/4220)
|
||||
* agent: Add configuration entry to control including TXT records for node meta in DNS responses [PR-4215](https://github.com/hashicorp/consul/pull/4215)
|
||||
* client: Make RPC rate limit configuration reloadable [GH-4012](https://github.com/hashicorp/consul/issues/4012)
|
||||
|
||||
BUG FIXES:
|
||||
|
||||
|
152
GNUmakefile
152
GNUmakefile
@ -16,35 +16,97 @@ GOTEST_PKGS ?= "./..."
|
||||
else
|
||||
GOTEST_PKGS=$(shell go list ./... | sed 's/github.com\/hashicorp\/consul/./' | egrep -v "^($(GOTEST_PKGS_EXCLUDE))$$")
|
||||
endif
|
||||
GOOS=$(shell go env GOOS)
|
||||
GOARCH=$(shell go env GOARCH)
|
||||
GOOS?=$(shell go env GOOS)
|
||||
GOARCH?=$(shell go env GOARCH)
|
||||
GOPATH=$(shell go env GOPATH)
|
||||
|
||||
ASSETFS_PATH?=agent/bindata_assetfs.go
|
||||
# Get the git commit
|
||||
GIT_COMMIT=$(shell git rev-parse --short HEAD)
|
||||
GIT_DIRTY=$(shell test -n "`git status --porcelain`" && echo "+CHANGES" || true)
|
||||
GIT_DESCRIBE=$(shell git describe --tags --always)
|
||||
GIT_COMMIT?=$(shell git rev-parse --short HEAD)
|
||||
GIT_DIRTY?=$(shell test -n "`git status --porcelain`" && echo "+CHANGES" || true)
|
||||
GIT_DESCRIBE?=$(shell git describe --tags --always)
|
||||
GIT_IMPORT=github.com/hashicorp/consul/version
|
||||
GOLDFLAGS=-X $(GIT_IMPORT).GitCommit=$(GIT_COMMIT)$(GIT_DIRTY) -X $(GIT_IMPORT).GitDescribe=$(GIT_DESCRIBE)
|
||||
|
||||
ifeq ($(FORCE_REBUILD),1)
|
||||
NOCACHE=--no-cache
|
||||
else
|
||||
NOCACHE=
|
||||
endif
|
||||
|
||||
DOCKER_BUILD_QUIET?=1
|
||||
ifeq (${DOCKER_BUILD_QUIET},1)
|
||||
QUIET=-q
|
||||
else
|
||||
QUIET=
|
||||
endif
|
||||
|
||||
CONSUL_DEV_IMAGE?=consul-dev
|
||||
GO_BUILD_TAG?=consul-build-go
|
||||
UI_BUILD_TAG?=consul-build-ui
|
||||
UI_LEGACY_BUILD_TAG?=consul-build-ui-legacy
|
||||
BUILD_CONTAINER_NAME?=consul-builder
|
||||
|
||||
DIST_TAG?=1
|
||||
DIST_BUILD?=1
|
||||
DIST_SIGN?=1
|
||||
|
||||
ifdef DIST_VERSION
|
||||
DIST_VERSION_ARG=-v "$(DIST_VERSION)"
|
||||
else
|
||||
DIST_VERSION_ARG=
|
||||
endif
|
||||
|
||||
ifdef DIST_RELEASE_DATE
|
||||
DIST_DATE_ARG=-d "$(DIST_RELEASE_DATE)"
|
||||
else
|
||||
DIST_DATE_ARG=
|
||||
endif
|
||||
|
||||
ifdef DIST_PRERELEASE
|
||||
DIST_REL_ARG=-r "$(DIST_PRERELEASE)"
|
||||
else
|
||||
DIST_REL_ARG=
|
||||
endif
|
||||
|
||||
PUB_GIT?=1
|
||||
PUB_WEBSITE?=1
|
||||
|
||||
ifeq ($(PUB_GIT),1)
|
||||
PUB_GIT_ARG=-g
|
||||
else
|
||||
PUB_GIT_ARG=
|
||||
endif
|
||||
|
||||
ifeq ($(PUB_WEBSITE),1)
|
||||
PUB_WEBSITE_ARG=-g
|
||||
else
|
||||
PUB_WEBSITE_ARG=
|
||||
endif
|
||||
|
||||
export GO_BUILD_TAG
|
||||
export UI_BUILD_TAG
|
||||
export UI_LEGACY_BUILD_TAG
|
||||
export BUILD_CONTAINER_NAME
|
||||
export GIT_COMMIT
|
||||
export GIT_DIRTY
|
||||
export GIT_DESCRIBE
|
||||
export GOTAGS
|
||||
export GOLDFLAGS
|
||||
|
||||
# all builds binaries for all targets
|
||||
all: bin
|
||||
|
||||
bin: tools
|
||||
@mkdir -p bin/
|
||||
@GOTAGS='$(GOTAGS)' sh -c "'$(CURDIR)/scripts/build.sh'"
|
||||
bin: tools dev-build
|
||||
|
||||
# dev creates binaries for testing locally - these are put into ./bin and $GOPATH
|
||||
dev: changelogfmt vendorfmt dev-build
|
||||
|
||||
dev-build:
|
||||
@echo "--> Building consul"
|
||||
mkdir -p pkg/$(GOOS)_$(GOARCH)/ bin/
|
||||
go install -ldflags '$(GOLDFLAGS)' -tags '$(GOTAGS)'
|
||||
cp $(GOPATH)/bin/consul bin/
|
||||
cp $(GOPATH)/bin/consul pkg/$(GOOS)_$(GOARCH)
|
||||
@$(SHELL) $(CURDIR)/build-support/scripts/build-local.sh -o $(GOOS) -a $(GOARCH)
|
||||
|
||||
dev-docker:
|
||||
@docker build -t '$(CONSUL_DEV_IMAGE)' --build-arg 'GIT_COMMIT=$(GIT_COMMIT)' --build-arg 'GIT_DIRTY=$(GIT_DIRTY)' --build-arg 'GIT_DESCRIBE=$(GIT_DESCRIBE)' -f $(CURDIR)/build-support/docker/Consul-Dev.dockerfile $(CURDIR)
|
||||
|
||||
vendorfmt:
|
||||
@echo "--> Formatting vendor/vendor.json"
|
||||
@ -57,12 +119,17 @@ changelogfmt:
|
||||
|
||||
# linux builds a linux package independent of the source platform
|
||||
linux:
|
||||
mkdir -p pkg/linux_amd64/
|
||||
GOOS=linux GOARCH=amd64 go build -ldflags '$(GOLDFLAGS)' -tags '$(GOTAGS)' -o pkg/linux_amd64/consul
|
||||
@$(SHELL) $(CURDIR)/build-support/scripts/build-local.sh -o linux -a amd64
|
||||
|
||||
# dist builds binaries for all platforms and packages them for distribution
|
||||
dist:
|
||||
@GOTAGS='$(GOTAGS)' sh -c "'$(CURDIR)/scripts/dist.sh'"
|
||||
@$(SHELL) $(CURDIR)/build-support/scripts/release.sh -t '$(DIST_TAG)' -b '$(DIST_BUILD)' -S '$(DIST_SIGN)' $(DIST_VERSION_ARG) $(DIST_DATE_ARG) $(DIST_REL_ARG)
|
||||
|
||||
publish:
|
||||
@$(SHELL) $(CURDIR)/build-support/scripts/publish.sh $(PUB_GIT_ARG) $(PUB_WEBSITE_ARG)
|
||||
|
||||
dev-tree:
|
||||
@$(SHELL) $(CURDIR)/build-support/scripts/dev.sh
|
||||
|
||||
cov:
|
||||
gocov test $(GOFILES) | gocov-html > /tmp/coverage.html
|
||||
@ -111,20 +178,57 @@ vet:
|
||||
exit 1; \
|
||||
fi
|
||||
|
||||
# Build the static web ui and build static assets inside a Docker container, the
|
||||
# same way a release build works. This implicitly does a "make static-assets" at
|
||||
# the end.
|
||||
ui:
|
||||
@sh -c "'$(CURDIR)/scripts/ui.sh'"
|
||||
|
||||
# If you've run "make ui" manually then this will get called for you. This is
|
||||
# also run as part of the release build script when it verifies that there are no
|
||||
# changes to the UI assets that aren't checked in.
|
||||
static-assets:
|
||||
@go-bindata-assetfs -pkg agent -prefix pkg -o agent/bindata_assetfs.go ./pkg/web_ui/...
|
||||
@go-bindata-assetfs -pkg agent -prefix pkg -o $(ASSETFS_PATH) ./pkg/web_ui/...
|
||||
$(MAKE) format
|
||||
|
||||
|
||||
# Build the static web ui and build static assets inside a Docker container
|
||||
ui: ui-legacy-docker ui-docker static-assets-docker
|
||||
|
||||
tools:
|
||||
go get -u -v $(GOTOOLS)
|
||||
|
||||
.PHONY: all ci bin dev dist cov test cover format vet ui static-assets tools vendorfmt
|
||||
version:
|
||||
@echo -n "Version: "
|
||||
@$(SHELL) $(CURDIR)/build-support/scripts/version.sh
|
||||
@echo -n "Version + release: "
|
||||
@$(SHELL) $(CURDIR)/build-support/scripts/version.sh -r
|
||||
@echo -n "Version + git: "
|
||||
@$(SHELL) $(CURDIR)/build-support/scripts/version.sh -g
|
||||
@echo -n "Version + release + git: "
|
||||
@$(SHELL) $(CURDIR)/build-support/scripts/version.sh -r -g
|
||||
|
||||
|
||||
docker-images: go-build-image ui-build-image ui-legacy-build-image
|
||||
|
||||
go-build-image:
|
||||
@echo "Building Golang build container"
|
||||
@docker build $(NOCACHE) $(QUIET) --build-arg 'GOTOOLS=$(GOTOOLS)' -t $(GO_BUILD_TAG) - < build-support/docker/Build-Go.dockerfile
|
||||
|
||||
ui-build-image:
|
||||
@echo "Building UI build container"
|
||||
@docker build $(NOCACHE) $(QUIET) -t $(UI_BUILD_TAG) - < build-support/docker/Build-UI.dockerfile
|
||||
|
||||
ui-legacy-build-image:
|
||||
@echo "Building Legacy UI build container"
|
||||
@docker build $(NOCACHE) $(QUIET) -t $(UI_LEGACY_BUILD_TAG) - < build-support/docker/Build-UI-Legacy.dockerfile
|
||||
|
||||
static-assets-docker: go-build-image
|
||||
@$(SHELL) $(CURDIR)/build-support/scripts/build-docker.sh static-assets
|
||||
|
||||
consul-docker: go-build-image
|
||||
@$(SHELL) $(CURDIR)/build-support/scripts/build-docker.sh consul
|
||||
|
||||
ui-docker: ui-build-image
|
||||
@$(SHELL) $(CURDIR)/build-support/scripts/build-docker.sh ui
|
||||
|
||||
ui-legacy-docker: ui-legacy-build-image
|
||||
@$(SHELL) $(CURDIR)/build-support/scripts/build-docker.sh ui-legacy
|
||||
|
||||
|
||||
.PHONY: all ci bin dev dist cov test cover format vet ui static-assets tools vendorfmt
|
||||
.PHONY: docker-images go-build-image ui-build-image ui-legacy-build-image static-assets-docker consul-docker ui-docker ui-legacy-docker version
|
||||
|
@ -73,6 +73,7 @@ type delegate interface {
|
||||
SnapshotRPC(args *structs.SnapshotRequest, in io.Reader, out io.Writer, replyFn structs.SnapshotReplyFn) error
|
||||
Shutdown() error
|
||||
Stats() map[string]map[string]string
|
||||
ReloadConfig(config *consul.Config) error
|
||||
enterpriseDelegate
|
||||
}
|
||||
|
||||
@ -2491,6 +2492,11 @@ func (a *Agent) DisableNodeMaintenance() {
|
||||
a.logger.Printf("[INFO] agent: Node left maintenance mode")
|
||||
}
|
||||
|
||||
func (a *Agent) loadLimits(conf *config.RuntimeConfig) {
|
||||
a.config.RPCRateLimit = conf.RPCRateLimit
|
||||
a.config.RPCMaxBurst = conf.RPCMaxBurst
|
||||
}
|
||||
|
||||
func (a *Agent) ReloadConfig(newCfg *config.RuntimeConfig) error {
|
||||
// Bulk update the services and checks
|
||||
a.PauseSync()
|
||||
@ -2525,6 +2531,18 @@ func (a *Agent) ReloadConfig(newCfg *config.RuntimeConfig) error {
|
||||
return fmt.Errorf("Failed reloading watches: %v", err)
|
||||
}
|
||||
|
||||
a.loadLimits(newCfg)
|
||||
|
||||
// create the config for the rpc server/client
|
||||
consulCfg, err := a.consulConfig()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := a.delegate.ReloadConfig(consulCfg); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Update filtered metrics
|
||||
metrics.UpdateFilter(newCfg.TelemetryAllowedPrefixes, newCfg.TelemetryBlockedPrefixes)
|
||||
|
||||
|
@ -281,6 +281,10 @@ func TestAgent_Reload(t *testing.T) {
|
||||
handler = "true"
|
||||
}
|
||||
]
|
||||
limits = {
|
||||
rpc_rate=1
|
||||
rpc_max_burst=100
|
||||
}
|
||||
`)
|
||||
defer a.Shutdown()
|
||||
|
||||
@ -302,6 +306,10 @@ func TestAgent_Reload(t *testing.T) {
|
||||
name = "redis-reloaded"
|
||||
}
|
||||
]
|
||||
limits = {
|
||||
rpc_rate=2
|
||||
rpc_max_burst=200
|
||||
}
|
||||
`,
|
||||
})
|
||||
|
||||
@ -312,6 +320,14 @@ func TestAgent_Reload(t *testing.T) {
|
||||
t.Fatal("missing redis-reloaded service")
|
||||
}
|
||||
|
||||
if a.config.RPCRateLimit != 2 {
|
||||
t.Fatalf("RPC rate not set correctly. Got %v. Want 2", a.config.RPCRateLimit)
|
||||
}
|
||||
|
||||
if a.config.RPCMaxBurst != 200 {
|
||||
t.Fatalf("RPC max burst not set correctly. Got %v. Want 200", a.config.RPCMaxBurst)
|
||||
}
|
||||
|
||||
for _, wp := range a.watchPlans {
|
||||
if !wp.IsStopped() {
|
||||
t.Fatalf("Reloading configs should stop watch plans of the previous configuration")
|
||||
|
@ -592,6 +592,7 @@ func (b *Builder) Build() (rt RuntimeConfig, err error) {
|
||||
DNSRecursors: dnsRecursors,
|
||||
DNSServiceTTL: dnsServiceTTL,
|
||||
DNSUDPAnswerLimit: b.intVal(c.DNS.UDPAnswerLimit),
|
||||
DNSNodeMetaTXT: b.boolValWithDefault(c.DNS.NodeMetaTXT, true),
|
||||
|
||||
// HTTP
|
||||
HTTPPort: httpPort,
|
||||
@ -1010,13 +1011,18 @@ func (b *Builder) serviceVal(v *ServiceDefinition) *structs.ServiceDefinition {
|
||||
}
|
||||
}
|
||||
|
||||
func (b *Builder) boolVal(v *bool) bool {
|
||||
func (b *Builder) boolValWithDefault(v *bool, default_val bool) bool {
|
||||
if v == nil {
|
||||
return false
|
||||
return default_val
|
||||
}
|
||||
|
||||
return *v
|
||||
}
|
||||
|
||||
func (b *Builder) boolVal(v *bool) bool {
|
||||
return b.boolValWithDefault(v, false)
|
||||
}
|
||||
|
||||
func (b *Builder) durationVal(name string, v *string) (d time.Duration) {
|
||||
if v == nil {
|
||||
return 0
|
||||
|
@ -360,6 +360,7 @@ type DNS struct {
|
||||
RecursorTimeout *string `json:"recursor_timeout,omitempty" hcl:"recursor_timeout" mapstructure:"recursor_timeout"`
|
||||
ServiceTTL map[string]string `json:"service_ttl,omitempty" hcl:"service_ttl" mapstructure:"service_ttl"`
|
||||
UDPAnswerLimit *int `json:"udp_answer_limit,omitempty" hcl:"udp_answer_limit" mapstructure:"udp_answer_limit"`
|
||||
NodeMetaTXT *bool `json:"enable_additional_node_meta_txt,omitempty" hcl:"enable_additional_node_meta_txt" mapstructure:"enable_additional_node_meta_txt"`
|
||||
}
|
||||
|
||||
type HTTPConfig struct {
|
||||
|
@ -281,6 +281,11 @@ type RuntimeConfig struct {
|
||||
// hcl: dns_config { udp_answer_limit = int }
|
||||
DNSUDPAnswerLimit int
|
||||
|
||||
// DNSNodeMetaTXT controls whether DNS queries will synthesize
|
||||
// TXT records for the node metadata and add them when not specifically
|
||||
// request (query type = TXT). If unset this will default to true
|
||||
DNSNodeMetaTXT bool
|
||||
|
||||
// DNSRecursors can be set to allow the DNS servers to recursively
|
||||
// resolve non-consul domains.
|
||||
//
|
||||
|
@ -3371,6 +3371,7 @@ func TestFullConfig(t *testing.T) {
|
||||
DNSRecursors: []string{"63.38.39.58", "92.49.18.18"},
|
||||
DNSServiceTTL: map[string]time.Duration{"*": 32030 * time.Second},
|
||||
DNSUDPAnswerLimit: 29909,
|
||||
DNSNodeMetaTXT: true,
|
||||
DataDir: dataDir,
|
||||
Datacenter: "rzo029wg",
|
||||
DevMode: true,
|
||||
@ -4043,6 +4044,7 @@ func TestSanitize(t *testing.T) {
|
||||
"DNSDomain": "",
|
||||
"DNSEnableTruncate": false,
|
||||
"DNSMaxStale": "0s",
|
||||
"DNSNodeMetaTXT": false,
|
||||
"DNSNodeTTL": "0s",
|
||||
"DNSOnlyPassing": false,
|
||||
"DNSPort": 0,
|
||||
|
@ -7,6 +7,7 @@ import (
|
||||
"os"
|
||||
"strconv"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/armon/go-metrics"
|
||||
@ -56,7 +57,7 @@ type Client struct {
|
||||
|
||||
// rpcLimiter is used to rate limit the total number of RPCs initiated
|
||||
// from an agent.
|
||||
rpcLimiter *rate.Limiter
|
||||
rpcLimiter atomic.Value
|
||||
|
||||
// eventCh is used to receive events from the
|
||||
// serf cluster in the datacenter
|
||||
@ -128,12 +129,13 @@ func NewClientLogger(config *Config, logger *log.Logger) (*Client, error) {
|
||||
c := &Client{
|
||||
config: config,
|
||||
connPool: connPool,
|
||||
rpcLimiter: rate.NewLimiter(config.RPCRate, config.RPCMaxBurst),
|
||||
eventCh: make(chan serf.Event, serfEventBacklog),
|
||||
logger: logger,
|
||||
shutdownCh: make(chan struct{}),
|
||||
}
|
||||
|
||||
c.rpcLimiter.Store(rate.NewLimiter(config.RPCRate, config.RPCMaxBurst))
|
||||
|
||||
if err := c.initEnterprise(); err != nil {
|
||||
c.Shutdown()
|
||||
return nil, err
|
||||
@ -263,7 +265,7 @@ TRY:
|
||||
|
||||
// Enforce the RPC limit.
|
||||
metrics.IncrCounter([]string{"client", "rpc"}, 1)
|
||||
if !c.rpcLimiter.Allow() {
|
||||
if !c.rpcLimiter.Load().(*rate.Limiter).Allow() {
|
||||
metrics.IncrCounter([]string{"client", "rpc", "exceeded"}, 1)
|
||||
return structs.ErrRPCRateExceeded
|
||||
}
|
||||
@ -306,7 +308,7 @@ func (c *Client) SnapshotRPC(args *structs.SnapshotRequest, in io.Reader, out io
|
||||
|
||||
// Enforce the RPC limit.
|
||||
metrics.IncrCounter([]string{"client", "rpc"}, 1)
|
||||
if !c.rpcLimiter.Allow() {
|
||||
if !c.rpcLimiter.Load().(*rate.Limiter).Allow() {
|
||||
metrics.IncrCounter([]string{"client", "rpc", "exceeded"}, 1)
|
||||
return structs.ErrRPCRateExceeded
|
||||
}
|
||||
@ -381,3 +383,10 @@ func (c *Client) GetLANCoordinate() (lib.CoordinateSet, error) {
|
||||
cs := lib.CoordinateSet{c.config.Segment: lan}
|
||||
return cs, nil
|
||||
}
|
||||
|
||||
// ReloadConfig is used to have the Client do an online reload of
|
||||
// relevant configuration information
|
||||
func (c *Client) ReloadConfig(config *Config) error {
|
||||
c.rpcLimiter.Store(rate.NewLimiter(config.RPCRate, config.RPCMaxBurst))
|
||||
return nil
|
||||
}
|
||||
|
@ -15,6 +15,8 @@ import (
|
||||
"github.com/hashicorp/consul/testutil/retry"
|
||||
"github.com/hashicorp/net-rpc-msgpackrpc"
|
||||
"github.com/hashicorp/serf/serf"
|
||||
"github.com/stretchr/testify/require"
|
||||
"golang.org/x/time/rate"
|
||||
)
|
||||
|
||||
func testClientConfig(t *testing.T) (string, *Config) {
|
||||
@ -665,3 +667,25 @@ func TestClient_Encrypted(t *testing.T) {
|
||||
t.Fatalf("should be encrypted")
|
||||
}
|
||||
}
|
||||
|
||||
func TestClient_Reload(t *testing.T) {
|
||||
t.Parallel()
|
||||
dir1, c := testClientWithConfig(t, func(c *Config) {
|
||||
c.RPCRate = 500
|
||||
c.RPCMaxBurst = 5000
|
||||
})
|
||||
defer os.RemoveAll(dir1)
|
||||
defer c.Shutdown()
|
||||
|
||||
limiter := c.rpcLimiter.Load().(*rate.Limiter)
|
||||
require.Equal(t, rate.Limit(500), limiter.Limit())
|
||||
require.Equal(t, 5000, limiter.Burst())
|
||||
|
||||
c.config.RPCRate = 1000
|
||||
c.config.RPCMaxBurst = 10000
|
||||
|
||||
require.NoError(t, c.ReloadConfig(c.config))
|
||||
limiter = c.rpcLimiter.Load().(*rate.Limiter)
|
||||
require.Equal(t, rate.Limit(1000), limiter.Limit())
|
||||
require.Equal(t, 10000, limiter.Burst())
|
||||
}
|
||||
|
@ -1066,6 +1066,12 @@ func (s *Server) GetLANCoordinate() (lib.CoordinateSet, error) {
|
||||
return cs, nil
|
||||
}
|
||||
|
||||
// ReloadConfig is used to have the Server do an online reload of
|
||||
// relevant configuration information
|
||||
func (s *Server) ReloadConfig(config *Config) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Atomically sets a readiness state flag when leadership is obtained, to indicate that server is past its barrier write
|
||||
func (s *Server) setConsistentReadReady() {
|
||||
atomic.StoreInt32(&s.readyForConsistentReads, 1)
|
||||
|
31
agent/dns.go
31
agent/dns.go
@ -51,6 +51,7 @@ type dnsConfig struct {
|
||||
ServiceTTL map[string]time.Duration
|
||||
UDPAnswerLimit int
|
||||
ARecordLimit int
|
||||
NodeMetaTXT bool
|
||||
}
|
||||
|
||||
// DNSServer is used to wrap an Agent and expose various
|
||||
@ -109,6 +110,7 @@ func GetDNSConfig(conf *config.RuntimeConfig) *dnsConfig {
|
||||
SegmentName: conf.SegmentName,
|
||||
ServiceTTL: conf.DNSServiceTTL,
|
||||
UDPAnswerLimit: conf.DNSUDPAnswerLimit,
|
||||
NodeMetaTXT: conf.DNSNodeMetaTXT,
|
||||
}
|
||||
}
|
||||
|
||||
@ -374,7 +376,7 @@ func (d *DNSServer) nameservers(edns bool) (ns []dns.RR, extra []dns.RR) {
|
||||
}
|
||||
ns = append(ns, nsrr)
|
||||
|
||||
glue := d.formatNodeRecord(nil, addr, fqdn, dns.TypeANY, d.config.NodeTTL, edns)
|
||||
glue := d.formatNodeRecord(nil, addr, fqdn, dns.TypeANY, d.config.NodeTTL, edns, false)
|
||||
extra = append(extra, glue...)
|
||||
|
||||
// don't provide more than 3 servers
|
||||
@ -582,7 +584,7 @@ RPC:
|
||||
n := out.NodeServices.Node
|
||||
edns := req.IsEdns0() != nil
|
||||
addr := d.agent.TranslateAddress(datacenter, n.Address, n.TaggedAddresses)
|
||||
records := d.formatNodeRecord(out.NodeServices.Node, addr, req.Question[0].Name, qType, d.config.NodeTTL, edns)
|
||||
records := d.formatNodeRecord(out.NodeServices.Node, addr, req.Question[0].Name, qType, d.config.NodeTTL, edns, true)
|
||||
if records != nil {
|
||||
resp.Answer = append(resp.Answer, records...)
|
||||
}
|
||||
@ -610,7 +612,7 @@ func encodeKVasRFC1464(key, value string) (txt string) {
|
||||
}
|
||||
|
||||
// formatNodeRecord takes a Node and returns an A, AAAA, TXT or CNAME record
|
||||
func (d *DNSServer) formatNodeRecord(node *structs.Node, addr, qName string, qType uint16, ttl time.Duration, edns bool) (records []dns.RR) {
|
||||
func (d *DNSServer) formatNodeRecord(node *structs.Node, addr, qName string, qType uint16, ttl time.Duration, edns, answer bool) (records []dns.RR) {
|
||||
// Parse the IP
|
||||
ip := net.ParseIP(addr)
|
||||
var ipv4 net.IP
|
||||
@ -671,7 +673,20 @@ func (d *DNSServer) formatNodeRecord(node *structs.Node, addr, qName string, qTy
|
||||
}
|
||||
}
|
||||
|
||||
if node != nil && (qType == dns.TypeANY || qType == dns.TypeTXT) {
|
||||
node_meta_txt := false
|
||||
|
||||
if node == nil {
|
||||
node_meta_txt = false
|
||||
} else if answer {
|
||||
node_meta_txt = true
|
||||
} else {
|
||||
// Use configuration when the TXT RR would
|
||||
// end up in the Additional section of the
|
||||
// DNS response
|
||||
node_meta_txt = d.config.NodeMetaTXT
|
||||
}
|
||||
|
||||
if node_meta_txt {
|
||||
for key, value := range node.Meta {
|
||||
txt := value
|
||||
if !strings.HasPrefix(strings.ToLower(key), "rfc1035-") {
|
||||
@ -782,8 +797,8 @@ func (d *DNSServer) trimTCPResponse(req, resp *dns.Msg) (trimmed bool) {
|
||||
originalNumRecords := len(resp.Answer)
|
||||
|
||||
// It is not possible to return more than 4k records even with compression
|
||||
// Since we are performing binary search it is not a big deal, but it
|
||||
// improves a bit performance, even with binary search
|
||||
// Since we are performing binary search it is not a big deal, but it
|
||||
// improves a bit performance, even with binary search
|
||||
truncateAt := 4096
|
||||
if req.Question[0].Qtype == dns.TypeSRV {
|
||||
// More than 1024 SRV records do not fit in 64k
|
||||
@ -1143,7 +1158,7 @@ func (d *DNSServer) serviceNodeRecords(dc string, nodes structs.CheckServiceNode
|
||||
handled[addr] = struct{}{}
|
||||
|
||||
// Add the node record
|
||||
records := d.formatNodeRecord(node.Node, addr, qName, qType, ttl, edns)
|
||||
records := d.formatNodeRecord(node.Node, addr, qName, qType, ttl, edns, true)
|
||||
if records != nil {
|
||||
resp.Answer = append(resp.Answer, records...)
|
||||
count++
|
||||
@ -1192,7 +1207,7 @@ func (d *DNSServer) serviceSRVRecords(dc string, nodes structs.CheckServiceNodes
|
||||
}
|
||||
|
||||
// Add the extra record
|
||||
records := d.formatNodeRecord(node.Node, addr, srvRec.Target, dns.TypeANY, ttl, edns)
|
||||
records := d.formatNodeRecord(node.Node, addr, srvRec.Target, dns.TypeANY, ttl, edns, false)
|
||||
if len(records) > 0 {
|
||||
// Use the node address if it doesn't differ from the service address
|
||||
if addr == node.Node.Address {
|
||||
|
@ -472,6 +472,51 @@ func TestDNS_NodeLookup_TXT(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestDNS_NodeLookup_TXT_DontSuppress(t *testing.T) {
|
||||
a := NewTestAgent(t.Name(), `dns_config = { enable_additional_node_meta_txt = false }`)
|
||||
defer a.Shutdown()
|
||||
|
||||
args := &structs.RegisterRequest{
|
||||
Datacenter: "dc1",
|
||||
Node: "google",
|
||||
Address: "127.0.0.1",
|
||||
NodeMeta: map[string]string{
|
||||
"rfc1035-00": "value0",
|
||||
"key0": "value1",
|
||||
},
|
||||
}
|
||||
|
||||
var out struct{}
|
||||
if err := a.RPC("Catalog.Register", args, &out); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
m := new(dns.Msg)
|
||||
m.SetQuestion("google.node.consul.", dns.TypeTXT)
|
||||
|
||||
c := new(dns.Client)
|
||||
in, _, err := c.Exchange(m, a.DNSAddr())
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
// Should have the 1 TXT record reply
|
||||
if len(in.Answer) != 2 {
|
||||
t.Fatalf("Bad: %#v", in)
|
||||
}
|
||||
|
||||
txtRec, ok := in.Answer[0].(*dns.TXT)
|
||||
if !ok {
|
||||
t.Fatalf("Bad: %#v", in.Answer[0])
|
||||
}
|
||||
if len(txtRec.Txt) != 1 {
|
||||
t.Fatalf("Bad: %#v", in.Answer[0])
|
||||
}
|
||||
if txtRec.Txt[0] != "value0" && txtRec.Txt[0] != "key0=value1" {
|
||||
t.Fatalf("Bad: %#v", in.Answer[0])
|
||||
}
|
||||
}
|
||||
|
||||
func TestDNS_NodeLookup_ANY(t *testing.T) {
|
||||
a := NewTestAgent(t.Name(), ``)
|
||||
defer a.Shutdown()
|
||||
@ -510,7 +555,46 @@ func TestDNS_NodeLookup_ANY(t *testing.T) {
|
||||
},
|
||||
}
|
||||
verify.Values(t, "answer", in.Answer, wantAnswer)
|
||||
}
|
||||
|
||||
func TestDNS_NodeLookup_ANY_DontSuppressTXT(t *testing.T) {
|
||||
a := NewTestAgent(t.Name(), `dns_config = { enable_additional_node_meta_txt = false }`)
|
||||
defer a.Shutdown()
|
||||
|
||||
args := &structs.RegisterRequest{
|
||||
Datacenter: "dc1",
|
||||
Node: "bar",
|
||||
Address: "127.0.0.1",
|
||||
NodeMeta: map[string]string{
|
||||
"key": "value",
|
||||
},
|
||||
}
|
||||
|
||||
var out struct{}
|
||||
if err := a.RPC("Catalog.Register", args, &out); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
m := new(dns.Msg)
|
||||
m.SetQuestion("bar.node.consul.", dns.TypeANY)
|
||||
|
||||
c := new(dns.Client)
|
||||
in, _, err := c.Exchange(m, a.DNSAddr())
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
wantAnswer := []dns.RR{
|
||||
&dns.A{
|
||||
Hdr: dns.RR_Header{Name: "bar.node.consul.", Rrtype: dns.TypeA, Class: dns.ClassINET, Rdlength: 0x4},
|
||||
A: []byte{0x7f, 0x0, 0x0, 0x1}, // 127.0.0.1
|
||||
},
|
||||
&dns.TXT{
|
||||
Hdr: dns.RR_Header{Name: "bar.node.consul.", Rrtype: dns.TypeTXT, Class: dns.ClassINET, Rdlength: 0xa},
|
||||
Txt: []string{"key=value"},
|
||||
},
|
||||
}
|
||||
verify.Values(t, "answer", in.Answer, wantAnswer)
|
||||
}
|
||||
|
||||
func TestDNS_EDNS0(t *testing.T) {
|
||||
@ -4613,6 +4697,93 @@ func TestDNS_ServiceLookup_FilterACL(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestDNS_ServiceLookup_MetaTXT(t *testing.T) {
|
||||
a := NewTestAgent(t.Name(), `dns_config = { enable_additional_node_meta_txt = true }`)
|
||||
defer a.Shutdown()
|
||||
|
||||
args := &structs.RegisterRequest{
|
||||
Datacenter: "dc1",
|
||||
Node: "bar",
|
||||
Address: "127.0.0.1",
|
||||
NodeMeta: map[string]string{
|
||||
"key": "value",
|
||||
},
|
||||
Service: &structs.NodeService{
|
||||
Service: "db",
|
||||
Tags: []string{"master"},
|
||||
Port: 12345,
|
||||
},
|
||||
}
|
||||
|
||||
var out struct{}
|
||||
if err := a.RPC("Catalog.Register", args, &out); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
m := new(dns.Msg)
|
||||
m.SetQuestion("db.service.consul.", dns.TypeSRV)
|
||||
|
||||
c := new(dns.Client)
|
||||
in, _, err := c.Exchange(m, a.DNSAddr())
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
wantAdditional := []dns.RR{
|
||||
&dns.A{
|
||||
Hdr: dns.RR_Header{Name: "bar.node.dc1.consul.", Rrtype: dns.TypeA, Class: dns.ClassINET, Rdlength: 0x4},
|
||||
A: []byte{0x7f, 0x0, 0x0, 0x1}, // 127.0.0.1
|
||||
},
|
||||
&dns.TXT{
|
||||
Hdr: dns.RR_Header{Name: "bar.node.dc1.consul.", Rrtype: dns.TypeTXT, Class: dns.ClassINET, Rdlength: 0xa},
|
||||
Txt: []string{"key=value"},
|
||||
},
|
||||
}
|
||||
verify.Values(t, "additional", in.Extra, wantAdditional)
|
||||
}
|
||||
|
||||
func TestDNS_ServiceLookup_SuppressTXT(t *testing.T) {
|
||||
a := NewTestAgent(t.Name(), `dns_config = { enable_additional_node_meta_txt = false }`)
|
||||
defer a.Shutdown()
|
||||
|
||||
// Register a node with a service.
|
||||
args := &structs.RegisterRequest{
|
||||
Datacenter: "dc1",
|
||||
Node: "bar",
|
||||
Address: "127.0.0.1",
|
||||
NodeMeta: map[string]string{
|
||||
"key": "value",
|
||||
},
|
||||
Service: &structs.NodeService{
|
||||
Service: "db",
|
||||
Tags: []string{"master"},
|
||||
Port: 12345,
|
||||
},
|
||||
}
|
||||
|
||||
var out struct{}
|
||||
if err := a.RPC("Catalog.Register", args, &out); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
m := new(dns.Msg)
|
||||
m.SetQuestion("db.service.consul.", dns.TypeSRV)
|
||||
|
||||
c := new(dns.Client)
|
||||
in, _, err := c.Exchange(m, a.DNSAddr())
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
wantAdditional := []dns.RR{
|
||||
&dns.A{
|
||||
Hdr: dns.RR_Header{Name: "bar.node.dc1.consul.", Rrtype: dns.TypeA, Class: dns.ClassINET, Rdlength: 0x4},
|
||||
A: []byte{0x7f, 0x0, 0x0, 0x1}, // 127.0.0.1
|
||||
},
|
||||
}
|
||||
verify.Values(t, "additional", in.Extra, wantAdditional)
|
||||
}
|
||||
|
||||
func TestDNS_AddressLookup(t *testing.T) {
|
||||
t.Parallel()
|
||||
a := NewTestAgent(t.Name(), "")
|
||||
|
@ -157,9 +157,9 @@ func (s *HTTPServer) handler(enableDebug bool) http.Handler {
|
||||
}
|
||||
|
||||
if s.IsUIEnabled() {
|
||||
new_ui, err := strconv.ParseBool(os.Getenv("CONSUL_UI_BETA"))
|
||||
legacy_ui, err := strconv.ParseBool(os.Getenv("CONSUL_UI_LEGACY"))
|
||||
if err != nil {
|
||||
new_ui = false
|
||||
legacy_ui = false
|
||||
}
|
||||
var uifs http.FileSystem
|
||||
|
||||
@ -169,15 +169,15 @@ func (s *HTTPServer) handler(enableDebug bool) http.Handler {
|
||||
} else {
|
||||
fs := assetFS()
|
||||
|
||||
if new_ui {
|
||||
fs.Prefix += "/v2/"
|
||||
} else {
|
||||
if legacy_ui {
|
||||
fs.Prefix += "/v1/"
|
||||
} else {
|
||||
fs.Prefix += "/v2/"
|
||||
}
|
||||
uifs = fs
|
||||
}
|
||||
|
||||
if new_ui {
|
||||
if !legacy_ui {
|
||||
uifs = &redirectFS{fs: uifs}
|
||||
}
|
||||
|
||||
|
16
build-support/docker/Build-Go.dockerfile
Normal file
16
build-support/docker/Build-Go.dockerfile
Normal file
@ -0,0 +1,16 @@
|
||||
ARG GOLANG_VERSION=1.10.1
|
||||
FROM golang:${GOLANG_VERSION}
|
||||
|
||||
ARG GOTOOLS="github.com/elazarl/go-bindata-assetfs/... \
|
||||
github.com/hashicorp/go-bindata/... \
|
||||
github.com/magiconair/vendorfmt/cmd/vendorfmt \
|
||||
github.com/mitchellh/gox \
|
||||
golang.org/x/tools/cmd/cover \
|
||||
golang.org/x/tools/cmd/stringer \
|
||||
github.com/axw/gocov/gocov \
|
||||
gopkg.in/matm/v1/gocov-html"
|
||||
|
||||
RUN go get -u -v ${GOTOOLS} && mkdir -p ${GOPATH}/src/github.com/hashicorp/consul
|
||||
|
||||
WORKDIR $GOPATH/src/github.com/hashicorp/consul
|
||||
|
16
build-support/docker/Build-UI-Legacy.dockerfile
Normal file
16
build-support/docker/Build-UI-Legacy.dockerfile
Normal file
@ -0,0 +1,16 @@
|
||||
FROM ubuntu:bionic
|
||||
|
||||
RUN mkdir -p /consul-src/ui
|
||||
|
||||
RUN apt-get update -y && \
|
||||
apt-get install --no-install-recommends -y -q \
|
||||
build-essential \
|
||||
git \
|
||||
ruby \
|
||||
ruby-dev \
|
||||
zip \
|
||||
zlib1g-dev && \
|
||||
gem install bundler
|
||||
|
||||
WORKDIR /consul-src/ui
|
||||
CMD make dist
|
14
build-support/docker/Build-UI.dockerfile
Normal file
14
build-support/docker/Build-UI.dockerfile
Normal file
@ -0,0 +1,14 @@
|
||||
ARG ALPINE_VERSION=3.7
|
||||
FROM alpine:${ALPINE_VERSION}
|
||||
|
||||
ARG NODEJS_VERSION=8.9.3-r1
|
||||
ARG MAKE_VERSION=4.2.1-r0
|
||||
ARG YARN_VERSION=1.7.0
|
||||
|
||||
RUN apk update && \
|
||||
apk add nodejs=${NODEJS_VERSION} nodejs-npm=${NODEJS_VERSION} make=${MAKE_VERSION} rsync && \
|
||||
npm install --global yarn@${YARN_VERSION} && \
|
||||
mkdir /consul-src
|
||||
|
||||
WORKDIR /consul-src
|
||||
CMD make
|
13
build-support/docker/Consul-Dev.dockerfile
Normal file
13
build-support/docker/Consul-Dev.dockerfile
Normal file
@ -0,0 +1,13 @@
|
||||
FROM golang:latest as builder
|
||||
ARG GIT_COMMIT
|
||||
ARG GIT_DIRTY
|
||||
ARG GIT_DESCRIBE
|
||||
WORKDIR /go/src/github.com/hashicorp/consul
|
||||
ENV CONSUL_DEV=1
|
||||
ENV COLORIZE=0
|
||||
Add . /go/src/github.com/hashicorp/consul/
|
||||
RUN make
|
||||
|
||||
FROM consul:latest
|
||||
|
||||
COPY --from=builder /go/src/github.com/hashicorp/consul/bin/consul /bin
|
39
build-support/functions/00-vars.sh
Normal file
39
build-support/functions/00-vars.sh
Normal file
@ -0,0 +1,39 @@
|
||||
# GPG Key ID to use for publically released builds
|
||||
HASHICORP_GPG_KEY="348FFC4C"
|
||||
|
||||
# Default Image Names
|
||||
UI_BUILD_CONTAINER_DEFAULT="consul-build-ui"
|
||||
UI_LEGACY_BUILD_CONTAINER_DEFAULT="consul-build-ui-legacy"
|
||||
GO_BUILD_CONTAINER_DEFAULT="consul-build-go"
|
||||
|
||||
# Whether to colorize shell output
|
||||
COLORIZE=${COLORIZE-1}
|
||||
|
||||
# determine GOPATH and the first GOPATH to use for intalling binaries
|
||||
GOPATH=${GOPATH:-$(go env GOPATH)}
|
||||
case $(uname) in
|
||||
CYGWIN*)
|
||||
GOPATH="$(cygpath $GOPATH)"
|
||||
;;
|
||||
esac
|
||||
MAIN_GOPATH=$(cut -d: -f1 <<< "${GOPATH}")
|
||||
|
||||
# Build debugging output is off by default
|
||||
BUILD_DEBUG=${BUILD_DEBUG-0}
|
||||
|
||||
# default publish host is github.com - only really useful to use something else for testing
|
||||
PUBLISH_GIT_HOST="${PUBLISH_GIT_HOST-github.com}"
|
||||
|
||||
# default publish repo is hashicorp/consul - useful to override for testing as well as in the enterprise repo
|
||||
PUBLISH_GIT_REPO="${PUBLISH_GIT_REPO-hashicorp/consul.git}"
|
||||
|
||||
CONSUL_PKG_NAME="consul"
|
||||
|
||||
if test "$(uname)" == "Darwin"
|
||||
then
|
||||
SED_EXT="-E"
|
||||
else
|
||||
SED_EXT=""
|
||||
fi
|
||||
|
||||
CONSUL_BINARY_TYPE=oss
|
910
build-support/functions/10-util.sh
Normal file
910
build-support/functions/10-util.sh
Normal file
@ -0,0 +1,910 @@
|
||||
function err {
|
||||
if test "${COLORIZE}" -eq 1
|
||||
then
|
||||
tput bold
|
||||
tput setaf 1
|
||||
fi
|
||||
|
||||
echo "$@" 1>&2
|
||||
|
||||
if test "${COLORIZE}" -eq 1
|
||||
then
|
||||
tput sgr0
|
||||
fi
|
||||
}
|
||||
|
||||
function status {
|
||||
if test "${COLORIZE}" -eq 1
|
||||
then
|
||||
tput bold
|
||||
tput setaf 4
|
||||
fi
|
||||
|
||||
echo "$@"
|
||||
|
||||
if test "${COLORIZE}" -eq 1
|
||||
then
|
||||
tput sgr0
|
||||
fi
|
||||
}
|
||||
|
||||
function status_stage {
|
||||
if test "${COLORIZE}" -eq 1
|
||||
then
|
||||
tput bold
|
||||
tput setaf 2
|
||||
fi
|
||||
|
||||
echo "$@"
|
||||
|
||||
if test "${COLORIZE}" -eq 1
|
||||
then
|
||||
tput sgr0
|
||||
fi
|
||||
}
|
||||
|
||||
function debug {
|
||||
if is_set "${BUILD_DEBUG}"
|
||||
then
|
||||
if test "${COLORIZE}" -eq 1
|
||||
then
|
||||
tput setaf 6
|
||||
fi
|
||||
echo "$@"
|
||||
if test "${COLORIZE}" -eq 1
|
||||
then
|
||||
tput sgr0
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
function is_set {
|
||||
# Arguments:
|
||||
# $1 - string value to check its truthiness
|
||||
#
|
||||
# Return:
|
||||
# 0 - is truthy (backwards I know but allows syntax like `if is_set <var>` to work)
|
||||
# 1 - is not truthy
|
||||
|
||||
local val=$(tr '[:upper:]' '[:lower:]' <<< "$1")
|
||||
case $val in
|
||||
1 | t | true | y | yes)
|
||||
return 0
|
||||
;;
|
||||
*)
|
||||
return 1
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
function have_gpg_key {
|
||||
# Arguments:
|
||||
# $1 - GPG Key id to check if we have installed
|
||||
#
|
||||
# Return:
|
||||
# 0 - success (we can use this key for signing)
|
||||
# * - failure (key cannot be used)
|
||||
|
||||
gpg --list-secret-keys $1 > /dev/null 2>&1
|
||||
return $?
|
||||
}
|
||||
|
||||
function parse_version {
|
||||
# Arguments:
|
||||
# $1 - Path to the top level Consul source
|
||||
# $2 - boolean value for whether the release version should be parsed from the source
|
||||
# $3 - boolean whether to use GIT_DESCRIBE and GIT_COMMIT environment variables
|
||||
# $4 - boolean whether to omit the version part of the version string. (optional)
|
||||
#
|
||||
# Return:
|
||||
# 0 - success (will write the version to stdout)
|
||||
# * - error (no version output)
|
||||
#
|
||||
# Notes:
|
||||
# If the GOTAGS environment variable is present then it is used to determine which
|
||||
# version file to use for parsing.
|
||||
|
||||
local vfile="${1}/version/version.go"
|
||||
|
||||
# ensure the version file exists
|
||||
if ! test -f "${vfile}"
|
||||
then
|
||||
err "Error - File not found: ${vfile}"
|
||||
return 1
|
||||
fi
|
||||
|
||||
local include_release="$2"
|
||||
local use_git_env="$3"
|
||||
local omit_version="$4"
|
||||
|
||||
local git_version=""
|
||||
local git_commit=""
|
||||
|
||||
if test -z "${include_release}"
|
||||
then
|
||||
include_release=true
|
||||
fi
|
||||
|
||||
if test -z "${use_git_env}"
|
||||
then
|
||||
use_git_env=true
|
||||
fi
|
||||
|
||||
if is_set "${use_git_env}"
|
||||
then
|
||||
git_version="${GIT_DESCRIBE}"
|
||||
git_commit="${GIT_COMMIT}"
|
||||
fi
|
||||
|
||||
# Get the main version out of the source file
|
||||
version_main=$(awk '$1 == "Version" && $2 == "=" { gsub(/"/, "", $3); print $3 }' < ${vfile})
|
||||
release_main=$(awk '$1 == "VersionPrerelease" && $2 == "=" { gsub(/"/, "", $3); print $3 }' < ${vfile})
|
||||
|
||||
|
||||
# try to determine the version if we have build tags
|
||||
for tag in "$GOTAGS"
|
||||
do
|
||||
for vfile in $(find "${1}/version" -name "version_*.go" 2> /dev/null| sort)
|
||||
do
|
||||
if grep -q "// +build $tag" "${vfile}"
|
||||
then
|
||||
version_main=$(awk '$1 == "Version" && $2 == "=" { gsub(/"/, "", $3); print $3 }' < ${vfile})
|
||||
release_main=$(awk '$1 == "VersionPrerelease" && $2 == "=" { gsub(/"/, "", $3); print $3 }' < ${vfile})
|
||||
fi
|
||||
done
|
||||
done
|
||||
|
||||
local version="${version_main}"
|
||||
# override the version from source with the value of the GIT_DESCRIBE env var if present
|
||||
if test -n "${git_version}"
|
||||
then
|
||||
version="${git_version}"
|
||||
fi
|
||||
|
||||
local rel_ver=""
|
||||
if is_set "${include_release}"
|
||||
then
|
||||
# Default to pre-release from the source
|
||||
rel_ver="${release_main}"
|
||||
|
||||
# When no GIT_DESCRIBE env var is present and no release is in the source then we
|
||||
# are definitely in dev mode
|
||||
if test -z "${git_version}" -a -z "${rel_ver}" && is_set "${use_git_env}"
|
||||
then
|
||||
rel_ver="dev"
|
||||
fi
|
||||
|
||||
# Add the release to the version
|
||||
if test -n "${rel_ver}" -a -n "${git_commit}"
|
||||
then
|
||||
rel_ver="${rel_ver} (${git_commit})"
|
||||
fi
|
||||
fi
|
||||
|
||||
if test -n "${rel_ver}"
|
||||
then
|
||||
if is_set "${omit_version}"
|
||||
then
|
||||
echo "${rel_ver}" | tr -d "'"
|
||||
else
|
||||
echo "${version}-${rel_ver}" | tr -d "'"
|
||||
fi
|
||||
return 0
|
||||
elif ! is_set "${omit_version}"
|
||||
then
|
||||
echo "${version}" | tr -d "'"
|
||||
return 0
|
||||
else
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
function get_version {
|
||||
# Arguments:
|
||||
# $1 - Path to the top level Consul source
|
||||
# $2 - Whether the release version should be parsed from source (optional)
|
||||
# $3 - Whether to use GIT_DESCRIBE and GIT_COMMIT environment variables
|
||||
#
|
||||
# Returns:
|
||||
# 0 - success (the version is also echoed to stdout)
|
||||
# 1 - error
|
||||
#
|
||||
# Notes:
|
||||
# If a VERSION environment variable is present it will override any parsing of the version from the source
|
||||
# In addition to processing the main version.go, version_*.go files will be processed if they have
|
||||
# a Go build tag that matches the one in the GOTAGS environment variable. This tag processing is
|
||||
# primitive though and will not match complex build tags in the files with negation etc.
|
||||
|
||||
local vers="$VERSION"
|
||||
if test -z "$vers"
|
||||
then
|
||||
# parse the OSS version from version.go
|
||||
vers="$(parse_version ${1} ${2} ${3})"
|
||||
fi
|
||||
|
||||
if test -z "$vers"
|
||||
then
|
||||
return 1
|
||||
else
|
||||
echo $vers
|
||||
return 0
|
||||
fi
|
||||
}
|
||||
|
||||
function git_branch {
|
||||
# Arguments:
|
||||
# $1 - Path to the git repo (optional - assumes pwd is git repo otherwise)
|
||||
#
|
||||
# Returns:
|
||||
# 0 - success
|
||||
# * - failure
|
||||
#
|
||||
# Notes:
|
||||
# Echos the current branch to stdout when successful
|
||||
|
||||
local gdir="$(pwd)"
|
||||
if test -d "$1"
|
||||
then
|
||||
gdir="$1"
|
||||
fi
|
||||
|
||||
pushd "${gdir}" > /dev/null
|
||||
|
||||
local ret=0
|
||||
local head="$(git status -b --porcelain=v2 | awk '{if ($1 == "#" && $2 =="branch.head") { print $3 }}')" || ret=1
|
||||
|
||||
popd > /dev/null
|
||||
|
||||
test ${ret} -eq 0 && echo "$head"
|
||||
return ${ret}
|
||||
}
|
||||
|
||||
function git_upstream {
|
||||
# Arguments:
|
||||
# $1 - Path to the git repo (optional - assumes pwd is git repo otherwise)
|
||||
#
|
||||
# Returns:
|
||||
# 0 - success
|
||||
# * - failure
|
||||
#
|
||||
# Notes:
|
||||
# Echos the current upstream branch to stdout when successful
|
||||
|
||||
local gdir="$(pwd)"
|
||||
if test -d "$1"
|
||||
then
|
||||
gdir="$1"
|
||||
fi
|
||||
|
||||
pushd "${gdir}" > /dev/null
|
||||
|
||||
local ret=0
|
||||
local head="$(git status -b --porcelain=v2 | awk '{if ($1 == "#" && $2 =="branch.upstream") { print $3 }}')" || ret=1
|
||||
|
||||
popd > /dev/null
|
||||
|
||||
test ${ret} -eq 0 && echo "$head"
|
||||
return ${ret}
|
||||
}
|
||||
|
||||
function git_log_summary {
|
||||
# Arguments:
|
||||
# $1 - Path to the git repo (optional - assumes pwd is git repo otherwise)
|
||||
#
|
||||
# Returns:
|
||||
# 0 - success
|
||||
# * - failure
|
||||
#
|
||||
|
||||
local gdir="$(pwd)"
|
||||
if test -d "$1"
|
||||
then
|
||||
gdir="$1"
|
||||
fi
|
||||
|
||||
pushd "${gdir}" > /dev/null
|
||||
|
||||
local ret=0
|
||||
|
||||
local head=$(git_branch) || ret=1
|
||||
local upstream=$(git_upstream) || ret=1
|
||||
local rev_range="${head}...${upstream}"
|
||||
|
||||
if test ${ret} -eq 0
|
||||
then
|
||||
status "Git Changes:"
|
||||
git log --pretty=oneline ${rev_range} || ret=1
|
||||
|
||||
fi
|
||||
return $ret
|
||||
}
|
||||
|
||||
function git_diff {
|
||||
# Arguments:
|
||||
# $1 - Path to the git repo (optional - assumes pwd is git repo otherwise)
|
||||
# $2 .. $N - Optional path specification
|
||||
#
|
||||
# Returns:
|
||||
# 0 - success
|
||||
# * - failure
|
||||
#
|
||||
|
||||
local gdir="$(pwd)"
|
||||
if test -d "$1"
|
||||
then
|
||||
gdir="$1"
|
||||
fi
|
||||
|
||||
shift
|
||||
|
||||
pushd "${gdir}" > /dev/null
|
||||
|
||||
local ret=0
|
||||
|
||||
local head=$(git_branch) || ret=1
|
||||
local upstream=$(git_upstream) || ret=1
|
||||
|
||||
if test ${ret} -eq 0
|
||||
then
|
||||
status "Git Diff - Paths: $@"
|
||||
git diff ${HEAD} ${upstream} -- "$@" || ret=1
|
||||
fi
|
||||
return $ret
|
||||
}
|
||||
|
||||
function normalize_git_url {
|
||||
url="${1#https://}"
|
||||
url="${url#git@}"
|
||||
url="${url%.git}"
|
||||
url="$(sed ${SED_EXT} -e 's/([^\/:]*)[:\/](.*)/\1:\2/' <<< "${url}")"
|
||||
echo "$url"
|
||||
return 0
|
||||
}
|
||||
|
||||
function git_remote_url {
|
||||
# Arguments:
|
||||
# $1 - Path to the top level Consul source
|
||||
# $2 - Remote name
|
||||
#
|
||||
# Returns:
|
||||
# 0 - success
|
||||
# * - error
|
||||
#
|
||||
# Note:
|
||||
# The push url for the git remote will be echoed to stdout
|
||||
|
||||
if ! test -d "$1"
|
||||
then
|
||||
err "ERROR: '$1' is not a directory. git_remote_url must be called with the path to the top level source as the first argument'"
|
||||
return 1
|
||||
fi
|
||||
|
||||
if test -z "$2"
|
||||
then
|
||||
err "ERROR: git_remote_url must be called with a second argument that is the name of the remote"
|
||||
return 1
|
||||
fi
|
||||
|
||||
local ret=0
|
||||
|
||||
pushd "$1" > /dev/null
|
||||
|
||||
local url=$(git remote get-url --push $2 2>&1) || ret=1
|
||||
|
||||
popd > /dev/null
|
||||
|
||||
if test "${ret}" -eq 0
|
||||
then
|
||||
echo "${url}"
|
||||
return 0
|
||||
fi
|
||||
}
|
||||
|
||||
function find_git_remote {
|
||||
# Arguments:
|
||||
# $1 - Path to the top level Consul source
|
||||
#
|
||||
# Returns:
|
||||
# 0 - success
|
||||
# * - error
|
||||
#
|
||||
# Note:
|
||||
# The remote name to use for publishing will be echoed to stdout upon success
|
||||
|
||||
if ! test -d "$1"
|
||||
then
|
||||
err "ERROR: '$1' is not a directory. find_git_remote must be called with the path to the top level source as the first argument'"
|
||||
return 1
|
||||
fi
|
||||
|
||||
need_url=$(normalize_git_url "${PUBLISH_GIT_HOST}:${PUBLISH_GIT_REPO}")
|
||||
debug "Required normalized remote: ${need_url}"
|
||||
|
||||
pushd "$1" > /dev/null
|
||||
|
||||
local ret=1
|
||||
for remote in $(git remote)
|
||||
do
|
||||
url=$(git remote get-url --push ${remote}) || continue
|
||||
url=$(normalize_git_url "${url}")
|
||||
|
||||
debug "Testing Remote: ${remote}: ${url}"
|
||||
if test "${url}" == "${need_url}"
|
||||
then
|
||||
echo "${remote}"
|
||||
ret=0
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
popd > /dev/null
|
||||
return ${ret}
|
||||
}
|
||||
|
||||
function git_remote_not_blacklisted {
|
||||
# Arguments:
|
||||
# $1 - path to the repo
|
||||
# $2 - the remote name
|
||||
#
|
||||
# Returns:
|
||||
# 0 - not blacklisted
|
||||
# * - blacklisted
|
||||
return 0
|
||||
}
|
||||
|
||||
function is_git_clean {
|
||||
# Arguments:
|
||||
# $1 - Path to git repo
|
||||
# $2 - boolean whether the git status should be output when not clean
|
||||
#
|
||||
# Returns:
|
||||
# 0 - success
|
||||
# * - error
|
||||
#
|
||||
|
||||
if ! test -d "$1"
|
||||
then
|
||||
err "ERROR: '$1' is not a directory. is_git_clean must be called with the path to a git repo as the first argument'"
|
||||
return 1
|
||||
fi
|
||||
|
||||
local output_status="$2"
|
||||
|
||||
pushd "${1}" > /dev/null
|
||||
|
||||
local ret=0
|
||||
test -z "$(git status --porcelain=v2 2> /dev/null)" || ret=1
|
||||
|
||||
if is_set "${output_status}" && test "$ret" -ne 0
|
||||
then
|
||||
err "Git repo is not clean"
|
||||
# --porcelain=v1 is the same as --short except uncolorized
|
||||
git status --porcelain=v1
|
||||
fi
|
||||
popd > /dev/null
|
||||
return ${ret}
|
||||
}
|
||||
|
||||
function update_git_env {
|
||||
# Arguments:
|
||||
# $1 - Path to git repo
|
||||
#
|
||||
# Returns:
|
||||
# 0 - success
|
||||
# * - error
|
||||
#
|
||||
|
||||
if ! test -d "$1"
|
||||
then
|
||||
err "ERROR: '$1' is not a directory. is_git_clean must be called with the path to a git repo as the first argument'"
|
||||
return 1
|
||||
fi
|
||||
|
||||
export GIT_COMMIT=$(git rev-parse --short HEAD)
|
||||
export GIT_DIRTY=$(test -n "$(git status --porcelain)" && echo "+CHANGES")
|
||||
export GIT_DESCRIBE=$(git describe --tags --always)
|
||||
export GIT_IMPORT=github.com/hashicorp/consul/version
|
||||
export GOLDFLAGS="-X ${GIT_IMPORT}.GitCommit=${GIT_COMMIT}${GIT_DIRTY} -X ${GIT_IMPORT}.GitDescribe=${GIT_DESCRIBE}"
|
||||
return 0
|
||||
}
|
||||
|
||||
function git_push_ref {
|
||||
# Arguments:
|
||||
# $1 - Path to the top level Consul source
|
||||
# $2 - Git ref (optional)
|
||||
# $3 - remote (optional - if not specified we will try to determine it)
|
||||
#
|
||||
# Returns:
|
||||
# 0 - success
|
||||
# * - error
|
||||
|
||||
if ! test -d "$1"
|
||||
then
|
||||
err "ERROR: '$1' is not a directory. push_git_release must be called with the path to the top level source as the first argument'"
|
||||
return 1
|
||||
fi
|
||||
|
||||
local sdir="$1"
|
||||
local ret=0
|
||||
local remote="$3"
|
||||
|
||||
# find the correct remote corresponding to the desired repo (basically prevent pushing enterprise to oss or oss to enterprise)
|
||||
if test -z "${remote}"
|
||||
then
|
||||
local remote=$(find_git_remote "${sdir}") || return 1
|
||||
status "Using git remote: ${remote}"
|
||||
fi
|
||||
|
||||
local ref=""
|
||||
|
||||
pushd "${sdir}" > /dev/null
|
||||
|
||||
if test -z "$2"
|
||||
then
|
||||
# If no git ref was provided we lookup the current local branch and its tracking branch
|
||||
# It must have a tracking upstream and it must be tracking the sanctioned git remote
|
||||
local head=$(git_branch "${sdir}") || return 1
|
||||
local upstream=$(git_upstream "${sdir}") || return 1
|
||||
|
||||
# upstream branch for this branch does not track the remote we need to push to
|
||||
# basically this checks that the upstream (could be something like origin/master) references the correct remote
|
||||
# if it doesn't then the string modification wont apply and the var will reamin unchanged and equal to itself.
|
||||
if test "${upstream#${remote}/}" == "${upstream}"
|
||||
then
|
||||
err "ERROR: Upstream branch '${upstream}' does not track the correct remote '${remote}' - cannot push"
|
||||
ret=1
|
||||
fi
|
||||
ref="refs/heads/${head}"
|
||||
else
|
||||
# A git ref was provided - get the full ref and make sure it isn't ambiguous and also to
|
||||
# be able to determine whether its a branch or tag we are pushing
|
||||
ref_out=$(git rev-parse --symbolic-full-name "$2" --)
|
||||
|
||||
# -ne 2 because it should have the ref on one line followed by a line with '--'
|
||||
if test "$(wc -l <<< "${ref_out}")" -ne 2
|
||||
then
|
||||
err "ERROR: Git ref '$2' is ambiguous"
|
||||
debug "${ref_out}"
|
||||
ret=1
|
||||
else
|
||||
ref=$(head -n 1 <<< "${ref_out}")
|
||||
fi
|
||||
fi
|
||||
|
||||
if test ${ret} -eq 0
|
||||
then
|
||||
case "${ref}" in
|
||||
refs/tags/*)
|
||||
status "Pushing tag ${ref#refs/tags/} to ${remote}"
|
||||
;;
|
||||
refs/heads/*)
|
||||
status "Pushing local branch ${ref#refs/tags/} to ${remote}"
|
||||
;;
|
||||
*)
|
||||
err "ERROR: git_push_ref func is refusing to push ref that isn't a branch or tag"
|
||||
return 1
|
||||
esac
|
||||
|
||||
if ! git push "${remote}" "${ref}"
|
||||
then
|
||||
err "ERROR: Failed to push ${ref} to remote: ${remote}"
|
||||
ret=1
|
||||
fi
|
||||
fi
|
||||
|
||||
popd > /dev/null
|
||||
|
||||
return $ret
|
||||
}
|
||||
|
||||
function update_version {
|
||||
# Arguments:
|
||||
# $1 - Path to the version file
|
||||
# $2 - Version string
|
||||
# $3 - PreRelease version (if unset will become an empty string)
|
||||
#
|
||||
# Returns:
|
||||
# 0 - success
|
||||
# * - error
|
||||
|
||||
if ! test -f "$1"
|
||||
then
|
||||
err "ERROR: '$1' is not a regular file. update_version must be called with the path to a go version file"
|
||||
return 1
|
||||
fi
|
||||
|
||||
if test -z "$2"
|
||||
then
|
||||
err "ERROR: The version specified was empty"
|
||||
return 1
|
||||
fi
|
||||
|
||||
local vfile="$1"
|
||||
local version="$2"
|
||||
local prerelease="$3"
|
||||
|
||||
sed ${SED_EXT} -i "" -e "s/(Version[[:space:]]*=[[:space:]]*)\"[^\"]*\"/\1\"${version}\"/g" -e "s/(VersionPrerelease[[:space:]]*=[[:space:]]*)\"[^\"]*\"/\1\"${prerelease}\"/g" "${vfile}"
|
||||
return $?
|
||||
}
|
||||
|
||||
function set_changelog_version {
|
||||
# Arguments:
|
||||
# $1 - Path to top level Consul source
|
||||
# $2 - Version to put into the Changelog
|
||||
# $3 - Release Date
|
||||
#
|
||||
# Returns:
|
||||
# 0 - success
|
||||
# * - error
|
||||
|
||||
local changelog="${1}/CHANGELOG.md"
|
||||
local version="$2"
|
||||
local rel_date="$3"
|
||||
|
||||
if ! test -f "${changelog}"
|
||||
then
|
||||
err "ERROR: File not found: ${changelog}"
|
||||
return 1
|
||||
fi
|
||||
|
||||
if test -z "${version}"
|
||||
then
|
||||
err "ERROR: Must specify a version to put into the changelog"
|
||||
return 1
|
||||
fi
|
||||
|
||||
if test -z "${rel_date}"
|
||||
then
|
||||
rel_date=$(date +"%B %d, %Y")
|
||||
fi
|
||||
|
||||
sed ${SED_EXT} -i "" -e "s/## UNRELEASED/## ${version} (${rel_date})/" "${changelog}"
|
||||
return $?
|
||||
}
|
||||
|
||||
function unset_changelog_version {
|
||||
# Arguments:
|
||||
# $1 - Path to top level Consul source
|
||||
#
|
||||
# Returns:
|
||||
# 0 - success
|
||||
# * - error
|
||||
|
||||
local changelog="${1}/CHANGELOG.md"
|
||||
|
||||
if ! test -f "${changelog}"
|
||||
then
|
||||
err "ERROR: File not found: ${changelog}"
|
||||
return 1
|
||||
fi
|
||||
|
||||
sed ${SED_EXT} -i "" -e "1 s/^## [0-9]+\.[0-9]+\.[0-9]+ \([^)]*\)/## UNRELEASED/" "${changelog}"
|
||||
return $?
|
||||
}
|
||||
|
||||
function add_unreleased_to_changelog {
|
||||
# Arguments:
|
||||
# $1 - Path to top level Consul source
|
||||
#
|
||||
# Returns:
|
||||
# 0 - success
|
||||
# * - error
|
||||
|
||||
local changelog="${1}/CHANGELOG.md"
|
||||
|
||||
if ! test -f "${changelog}"
|
||||
then
|
||||
err "ERROR: File not found: ${changelog}"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Check if we are already in unreleased mode
|
||||
if head -n 1 "${changelog}" | grep -q -c UNRELEASED
|
||||
then
|
||||
return 0
|
||||
fi
|
||||
|
||||
local tfile="$(mktemp) -t "CHANGELOG.md_")"
|
||||
(
|
||||
echo -e "## UNRELEASED\n" > "${tfile}" &&
|
||||
cat "${changelog}" >> "${tfile}" &&
|
||||
cp "${tfile}" "${changelog}"
|
||||
)
|
||||
local ret=$?
|
||||
rm "${tfile}"
|
||||
return $ret
|
||||
}
|
||||
|
||||
function set_release_mode {
|
||||
# Arguments:
|
||||
# $1 - Path to top level Consul source
|
||||
# $2 - The version of the release
|
||||
# $3 - The release date
|
||||
# $4 - The pre-release version
|
||||
#
|
||||
#
|
||||
# Returns:
|
||||
# 0 - success
|
||||
# * - error
|
||||
|
||||
if ! test -d "$1"
|
||||
then
|
||||
err "ERROR: '$1' is not a directory. set_release_mode must be called with the path to a git repo as the first argument"
|
||||
return 1
|
||||
fi
|
||||
|
||||
if test -z "$2"
|
||||
then
|
||||
err "ERROR: The version specified was empty"
|
||||
return 1
|
||||
fi
|
||||
|
||||
local sdir="$1"
|
||||
local vers="$2"
|
||||
local rel_date="$(date +"%B %d, %Y")"
|
||||
|
||||
if test -n "$3"
|
||||
then
|
||||
rel_date="$3"
|
||||
fi
|
||||
|
||||
local changelog_vers="${vers}"
|
||||
if test -n "$4"
|
||||
then
|
||||
changelog_vers="${vers}-$4"
|
||||
fi
|
||||
|
||||
status_stage "==> Updating CHANGELOG.md with release info: ${changelog_vers} (${rel_date})"
|
||||
set_changelog_version "${sdir}" "${changelog_vers}" "${rel_date}" || return 1
|
||||
|
||||
status_stage "==> Updating version/version.go"
|
||||
if ! update_version "${sdir}/version/version.go" "${vers}" "$4"
|
||||
then
|
||||
unset_changelog_version "${sdir}"
|
||||
return 1
|
||||
fi
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
function set_dev_mode {
|
||||
# Arguments:
|
||||
# $1 - Path to top level Consul source
|
||||
#
|
||||
# Returns:
|
||||
# 0 - success
|
||||
# * - error
|
||||
|
||||
if ! test -d "$1"
|
||||
then
|
||||
err "ERROR: '$1' is not a directory. set_dev_mode must be called with the path to a git repo as the first argument'"
|
||||
return 1
|
||||
fi
|
||||
|
||||
local sdir="$1"
|
||||
local vers="$(parse_version "${sdir}" false false)"
|
||||
|
||||
status_stage "==> Setting VersionPreRelease back to 'dev'"
|
||||
update_version "${sdir}/version/version.go" "${vers}" dev || return 1
|
||||
|
||||
status_stage "==> Adding new UNRELEASED label in CHANGELOG.md"
|
||||
add_unreleased_to_changelog "${sdir}" || return 1
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
function git_staging_empty {
|
||||
# Arguments:
|
||||
# $1 - Path to git repo
|
||||
#
|
||||
# Returns:
|
||||
# 0 - success (nothing staged)
|
||||
# * - error (staged files)
|
||||
|
||||
if ! test -d "$1"
|
||||
then
|
||||
err "ERROR: '$1' is not a directory. commit_dev_mode must be called with the path to a git repo as the first argument'"
|
||||
return 1
|
||||
fi
|
||||
|
||||
pushd "$1" > /dev/null
|
||||
|
||||
declare -i ret=0
|
||||
|
||||
for status in $(git status --porcelain=v2 | awk '{print $2}' | cut -b 1)
|
||||
do
|
||||
if test "${status}" != "."
|
||||
then
|
||||
ret=1
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
popd > /dev/null
|
||||
return ${ret}
|
||||
}
|
||||
|
||||
function commit_dev_mode {
|
||||
# Arguments:
|
||||
# $1 - Path to top level Consul source
|
||||
#
|
||||
# Returns:
|
||||
# 0 - success
|
||||
# * - error
|
||||
|
||||
if ! test -d "$1"
|
||||
then
|
||||
err "ERROR: '$1' is not a directory. commit_dev_mode must be called with the path to a git repo as the first argument'"
|
||||
return 1
|
||||
fi
|
||||
|
||||
status "Checking for previously staged files"
|
||||
git_staging_empty "$1" || return 1
|
||||
|
||||
declare -i ret=0
|
||||
|
||||
pushd "$1" > /dev/null
|
||||
|
||||
status "Staging CHANGELOG.md and version_*.go files"
|
||||
git add CHANGELOG.md && git add version/version*.go
|
||||
ret=$?
|
||||
|
||||
if test ${ret} -eq 0
|
||||
then
|
||||
status "Adding Commit"
|
||||
git commit -m "Putting source back into Dev Mode"
|
||||
ret=$?
|
||||
fi
|
||||
|
||||
popd >/dev/null
|
||||
return ${ret}
|
||||
}
|
||||
|
||||
function gpg_detach_sign {
|
||||
# Arguments:
|
||||
# $1 - File to sign
|
||||
# $2 - Alternative GPG key to use for signing
|
||||
#
|
||||
# Returns:
|
||||
# 0 - success
|
||||
# * - failure
|
||||
|
||||
# determine whether the gpg key to use is being overridden
|
||||
local gpg_key=${HASHICORP_GPG_KEY}
|
||||
if test -n "$2"
|
||||
then
|
||||
gpg_key=$2
|
||||
fi
|
||||
|
||||
gpg --default-key "${gpg_key}" --detach-sig --yes -v "$1"
|
||||
return $?
|
||||
}
|
||||
|
||||
function shasum_directory {
|
||||
# Arguments:
|
||||
# $1 - Path to directory containing the files to shasum
|
||||
# $2 - File to output sha sums to
|
||||
#
|
||||
# Returns:
|
||||
# 0 - success
|
||||
# * - failure
|
||||
|
||||
if ! test -d "$1"
|
||||
then
|
||||
err "ERROR: '$1' is not a directory and shasum_release requires passing a directory as the first argument"
|
||||
return 1
|
||||
fi
|
||||
|
||||
if test -z "$2"
|
||||
then
|
||||
err "ERROR: shasum_release requires a second argument to be the filename to output the shasums to but none was given"
|
||||
return 1
|
||||
fi
|
||||
|
||||
pushd $1 > /dev/null
|
||||
shasum -a256 * > "$2"
|
||||
ret=$?
|
||||
popd >/dev/null
|
||||
|
||||
return $ret
|
||||
}
|
436
build-support/functions/20-build.sh
Normal file
436
build-support/functions/20-build.sh
Normal file
@ -0,0 +1,436 @@
|
||||
function refresh_docker_images {
|
||||
# Arguments:
|
||||
# $1 - Path to top level Consul source
|
||||
# $2 - Which make target to invoke (optional)
|
||||
#
|
||||
# Return:
|
||||
# 0 - success
|
||||
# * - failure
|
||||
|
||||
if ! test -d "$1"
|
||||
then
|
||||
err "ERROR: '$1' is not a directory. refresh_docker_images must be called with the path to the top level source as the first argument'"
|
||||
return 1
|
||||
fi
|
||||
|
||||
local sdir="$1"
|
||||
local targets="$2"
|
||||
|
||||
test -n "${targets}" || targets="docker-images"
|
||||
|
||||
make -C "${sdir}" ${targets}
|
||||
return $?
|
||||
}
|
||||
|
||||
function build_ui {
|
||||
# Arguments:
|
||||
# $1 - Path to the top level Consul source
|
||||
# $2 - The docker image to run the build within (optional)
|
||||
#
|
||||
# Returns:
|
||||
# 0 - success
|
||||
# * - error
|
||||
#
|
||||
# Notes:
|
||||
# Use the GIT_COMMIT environment variable to pass off to the build
|
||||
|
||||
if ! test -d "$1"
|
||||
then
|
||||
err "ERROR: '$1' is not a directory. build_ui must be called with the path to the top level source as the first argument'"
|
||||
return 1
|
||||
fi
|
||||
|
||||
local image_name=${UI_BUILD_CONTAINER_DEFAULT}
|
||||
if test -n "$2"
|
||||
then
|
||||
image_name="$2"
|
||||
fi
|
||||
|
||||
local sdir="$1"
|
||||
local ui_dir="${1}/ui-v2"
|
||||
|
||||
# parse the version
|
||||
version=$(parse_version "${sdir}")
|
||||
|
||||
local commit_hash="${GIT_COMMIT}"
|
||||
if test -z "${commit_hash}"
|
||||
then
|
||||
commit_hash=$(git rev-parse --short HEAD)
|
||||
fi
|
||||
|
||||
# make sure we run within the ui dir
|
||||
pushd ${ui_dir} > /dev/null
|
||||
|
||||
status "Creating the UI Build Container with image: ${image_name}"
|
||||
local container_id=$(docker create -it -e "CONSUL_GIT_SHA=${commit_hash}" -e "CONSUL_VERSION=${version}" ${image_name})
|
||||
local ret=$?
|
||||
if test $ret -eq 0
|
||||
then
|
||||
status "Copying the source from '${ui_dir}' to /consul-src within the container"
|
||||
(
|
||||
tar -c $(ls | grep -v "^(node_modules\|dist)") | docker cp - ${container_id}:/consul-src &&
|
||||
status "Running build in container" && docker start -i ${container_id} &&
|
||||
rm -rf ${1}/ui-v2/dist &&
|
||||
status "Copying back artifacts" && docker cp ${container_id}:/consul-src/dist ${1}/ui-v2/dist
|
||||
)
|
||||
ret=$?
|
||||
docker rm ${container_id} > /dev/null
|
||||
fi
|
||||
|
||||
if test ${ret} -eq 0
|
||||
then
|
||||
rm -rf ${1}/pkg/web_ui/v2
|
||||
cp -r ${1}/ui-v2/dist ${1}/pkg/web_ui/v2
|
||||
fi
|
||||
popd > /dev/null
|
||||
return $ret
|
||||
}
|
||||
|
||||
function build_ui_legacy {
|
||||
# Arguments:
|
||||
# $1 - Path to the top level Consul source
|
||||
# $2 - The docker image to run the build within (optional)
|
||||
#
|
||||
# Returns:
|
||||
# 0 - success
|
||||
# * - error
|
||||
|
||||
if ! test -d "$1"
|
||||
then
|
||||
err "ERROR: '$1' is not a directory. build_ui_legacy must be called with the path to the top level source as the first argument'"
|
||||
return 1
|
||||
fi
|
||||
|
||||
local sdir="$1"
|
||||
local ui_legacy_dir="${sdir}/ui"
|
||||
|
||||
local image_name=${UI_LEGACY_BUILD_CONTAINER_DEFAULT}
|
||||
if test -n "$2"
|
||||
then
|
||||
image_name="$2"
|
||||
fi
|
||||
|
||||
pushd ${ui_legacy_dir} > /dev/null
|
||||
status "Creating the Legacy UI Build Container with image: ${image_name}"
|
||||
rm -r ${sdir}/pkg/web_ui/v1 >/dev/null 2>&1
|
||||
mkdir -p ${sdir}/pkg/web_ui/v1
|
||||
local container_id=$(docker create -it ${image_name})
|
||||
local ret=$?
|
||||
if test $ret -eq 0
|
||||
then
|
||||
status "Copying the source from '${ui_legacy_dir}' to /consul-src/ui within the container"
|
||||
(
|
||||
docker cp . ${container_id}:/consul-src/ui &&
|
||||
status "Running build in container" &&
|
||||
docker start -i ${container_id} &&
|
||||
status "Copying back artifacts" &&
|
||||
docker cp ${container_id}:/consul-src/pkg/web_ui/v1/. ${sdir}/pkg/web_ui/v1
|
||||
)
|
||||
ret=$?
|
||||
docker rm ${container_id} > /dev/null
|
||||
fi
|
||||
popd > /dev/null
|
||||
return $ret
|
||||
}
|
||||
|
||||
function build_assetfs {
|
||||
# Arguments:
|
||||
# $1 - Path to the top level Consul source
|
||||
# $2 - The docker image to run the build within (optional)
|
||||
#
|
||||
# Returns:
|
||||
# 0 - success
|
||||
# * - error
|
||||
#
|
||||
# Note:
|
||||
# The GIT_COMMIT, GIT_DIRTY and GIT_DESCRIBE environment variables will be used if present
|
||||
|
||||
if ! test -d "$1"
|
||||
then
|
||||
err "ERROR: '$1' is not a directory. build_assetfs must be called with the path to the top level source as the first argument'"
|
||||
return 1
|
||||
fi
|
||||
|
||||
local sdir="$1"
|
||||
local image_name=${GO_BUILD_CONTAINER_DEFAULT}
|
||||
if test -n "$2"
|
||||
then
|
||||
image_name="$2"
|
||||
fi
|
||||
|
||||
pushd ${sdir} > /dev/null
|
||||
status "Creating the Go Build Container with image: ${image_name}"
|
||||
local container_id=$(docker create -it -e GIT_COMMIT=${GIT_COMMIT} -e GIT_DIRTY=${GIT_DIRTY} -e GIT_DESCRIBE=${GIT_DESCRIBE} ${image_name} make static-assets ASSETFS_PATH=bindata_assetfs.go)
|
||||
local ret=$?
|
||||
if test $ret -eq 0
|
||||
then
|
||||
status "Copying the sources from '${sdir}/(pkg/web_ui|GNUmakefile)' to /go/src/github.com/hashicorp/consul/pkg"
|
||||
(
|
||||
tar -c pkg/web_ui GNUmakefile | docker cp - ${container_id}:/go/src/github.com/hashicorp/consul &&
|
||||
status "Running build in container" && docker start -i ${container_id} &&
|
||||
status "Copying back artifacts" && docker cp ${container_id}:/go/src/github.com/hashicorp/consul/bindata_assetfs.go ${sdir}/agent/bindata_assetfs.go
|
||||
)
|
||||
ret=$?
|
||||
docker rm ${container_id} > /dev/null
|
||||
fi
|
||||
popd >/dev/null
|
||||
return $ret
|
||||
}
|
||||
|
||||
function build_consul_post {
|
||||
# Arguments
|
||||
# $1 - Path to the top level Consul source
|
||||
# $2 - Subdirectory under pkg/bin (Optional)
|
||||
#
|
||||
# Returns:
|
||||
# 0 - success
|
||||
# * - error
|
||||
#
|
||||
# Notes:
|
||||
# pkg/bin is where to place binary packages
|
||||
# pkg.bin.new is where the just built binaries are located
|
||||
# bin is where to place the local systems versions
|
||||
|
||||
if ! test -d "$1"
|
||||
then
|
||||
err "ERROR: '$1' is not a directory. build_consul_post must be called with the path to the top level source as the first argument'"
|
||||
return 1
|
||||
fi
|
||||
|
||||
local sdir="$1"
|
||||
|
||||
local extra_dir_name="$2"
|
||||
local extra_dir=""
|
||||
|
||||
if test -n "${extra_dir_name}"
|
||||
then
|
||||
extra_dir="${extra_dir_name}/"
|
||||
fi
|
||||
|
||||
pushd "${sdir}" > /dev/null
|
||||
|
||||
# recreate the pkg dir
|
||||
rm -r pkg/bin/${extra_dir}* 2> /dev/null
|
||||
mkdir -p pkg/bin/${extra_dir} 2> /dev/null
|
||||
|
||||
# move all files in pkg.new into pkg
|
||||
cp -r pkg.bin.new/${extra_dir}* pkg/bin/${extra_dir}
|
||||
rm -r pkg.bin.new
|
||||
|
||||
DEV_PLATFORM="./pkg/bin/${extra_dir}$(go env GOOS)_$(go env GOARCH)"
|
||||
for F in $(find ${DEV_PLATFORM} -mindepth 1 -maxdepth 1 -type f )
|
||||
do
|
||||
# recreate the bin dir
|
||||
rm -r bin/* 2> /dev/null
|
||||
mkdir -p bin 2> /dev/null
|
||||
|
||||
cp ${F} bin/
|
||||
cp ${F} ${MAIN_GOPATH}/bin
|
||||
done
|
||||
|
||||
popd > /dev/null
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
function build_consul {
|
||||
# Arguments:
|
||||
# $1 - Path to the top level Consul source
|
||||
# $2 - Subdirectory to put binaries in under pkg/bin (optional - must specify if needing to specify the docker image)
|
||||
# $3 - The docker image to run the build within (optional)
|
||||
#
|
||||
# Returns:
|
||||
# 0 - success
|
||||
# * - error
|
||||
#
|
||||
# Note:
|
||||
# The GOLDFLAGS and GOTAGS environment variables will be used if set
|
||||
# If the CONSUL_DEV environment var is truthy only the local platform/architecture is built.
|
||||
# If the XC_OS or the XC_ARCH environment vars are present then only those platforms/architectures
|
||||
# will be built. Otherwise all supported platform/architectures are built
|
||||
|
||||
if ! test -d "$1"
|
||||
then
|
||||
err "ERROR: '$1' is not a directory. build_consul must be called with the path to the top level source as the first argument'"
|
||||
return 1
|
||||
fi
|
||||
|
||||
local sdir="$1"
|
||||
local extra_dir_name="$2"
|
||||
local extra_dir=""
|
||||
local image_name=${GO_BUILD_CONTAINER_DEFAULT}
|
||||
if test -n "$3"
|
||||
then
|
||||
image_name="$3"
|
||||
fi
|
||||
|
||||
pushd ${sdir} > /dev/null
|
||||
status "Creating the Go Build Container with image: ${image_name}"
|
||||
if is_set "${CONSUL_DEV}"
|
||||
then
|
||||
if test -z "${XC_OS}"
|
||||
then
|
||||
XC_OS=$(go env GOOS)
|
||||
fi
|
||||
|
||||
if test -z "${XC_ARCH}"
|
||||
then
|
||||
XC_ARCH=$(go env GOARCH)
|
||||
fi
|
||||
fi
|
||||
XC_OS=${XC_OS:-"solaris darwin freebsd linux windows"}
|
||||
XC_ARCH=${XC_ARCH:-"386 amd64 arm arm64"}
|
||||
|
||||
if test -n "${extra_dir_name}"
|
||||
then
|
||||
extra_dir="${extra_dir_name}/"
|
||||
fi
|
||||
|
||||
local container_id=$(docker create -it -e CGO_ENABLED=0 ${image_name} gox -os="${XC_OS}" -arch="${XC_ARCH}" -osarch="!darwin/arm !darwin/arm64" -ldflags "${GOLDFLAGS}" -output "pkg/bin/${extra_dir}{{.OS}}_{{.Arch}}/consul" -tags="${GOTAGS}")
|
||||
ret=$?
|
||||
|
||||
if test $ret -eq 0
|
||||
then
|
||||
status "Copying the source from '${sdir}' to /go/src/github.com/hashicorp/consul"
|
||||
(
|
||||
tar -c $(ls | grep -v "^(ui\|ui-v2\|website\|bin\|pkg\|.git)") | docker cp - ${container_id}:/go/src/github.com/hashicorp/consul &&
|
||||
status "Running build in container" &&
|
||||
docker start -i ${container_id} &&
|
||||
status "Copying back artifacts" &&
|
||||
docker cp ${container_id}:/go/src/github.com/hashicorp/consul/pkg/bin pkg.bin.new
|
||||
)
|
||||
ret=$?
|
||||
docker rm ${container_id} > /dev/null
|
||||
|
||||
if test $ret -eq 0
|
||||
then
|
||||
build_consul_post "${sdir}" "${extra_dir_name}"
|
||||
ret=$?
|
||||
else
|
||||
rm -r pkg.bin.new 2> /dev/null
|
||||
fi
|
||||
fi
|
||||
popd > /dev/null
|
||||
return $ret
|
||||
}
|
||||
|
||||
function build_consul_local {
|
||||
# Arguments:
|
||||
# $1 - Path to the top level Consul source
|
||||
# $2 - Space separated string of OSes to build. If empty will use env vars for determination.
|
||||
# $3 - Space separated string of architectures to build. If empty will use env vars for determination.
|
||||
# $4 - Subdirectory to put binaries in under pkg/bin (optional)
|
||||
#
|
||||
# Returns:
|
||||
# 0 - success
|
||||
# * - error
|
||||
#
|
||||
# Note:
|
||||
# The GOLDFLAGS and GOTAGS environment variables will be used if set
|
||||
# If the CONSUL_DEV environment var is truthy only the local platform/architecture is built.
|
||||
# If the XC_OS or the XC_ARCH environment vars are present then only those platforms/architectures
|
||||
# will be built. Otherwise all supported platform/architectures are built
|
||||
# The NOGOX environment variable will be used if present. This will prevent using gox and instead
|
||||
# build with go install
|
||||
|
||||
if ! test -d "$1"
|
||||
then
|
||||
err "ERROR: '$1' is not a directory. build_consul must be called with the path to the top level source as the first argument'"
|
||||
return 1
|
||||
fi
|
||||
|
||||
local sdir="$1"
|
||||
local build_os="$2"
|
||||
local build_arch="$3"
|
||||
local extra_dir_name="$4"
|
||||
local extra_dir=""
|
||||
|
||||
if test -n "${extra_dir_name}"
|
||||
then
|
||||
extra_dir="${extra_dir_name}/"
|
||||
fi
|
||||
|
||||
pushd ${sdir} > /dev/null
|
||||
if is_set "${CONSUL_DEV}"
|
||||
then
|
||||
if test -z "${XC_OS}"
|
||||
then
|
||||
XC_OS=$(go env GOOS)
|
||||
fi
|
||||
|
||||
if test -z "${XC_ARCH}"
|
||||
then
|
||||
XC_ARCH=$(go env GOARCH)
|
||||
fi
|
||||
fi
|
||||
XC_OS=${XC_OS:-"solaris darwin freebsd linux windows"}
|
||||
XC_ARCH=${XC_ARCH:-"386 amd64 arm arm64"}
|
||||
|
||||
if test -z "${build_os}"
|
||||
then
|
||||
build_os="${XC_OS}"
|
||||
fi
|
||||
|
||||
if test -z "${build_arch}"
|
||||
then
|
||||
build_arch="${XC_ARCH}"
|
||||
fi
|
||||
|
||||
local use_gox=1
|
||||
is_set "${NOGOX}" && use_gox=0
|
||||
which gox > /dev/null || use_gox=0
|
||||
|
||||
status_stage "==> Building Consul - OSes: ${build_os}, Architectures: ${build_arch}"
|
||||
mkdir pkg.bin.new 2> /dev/null
|
||||
if is_set "${use_gox}"
|
||||
then
|
||||
status "Using gox for concurrent compilation"
|
||||
|
||||
CGO_ENABLED=0 gox \
|
||||
-os="${build_os}" \
|
||||
-arch="${build_arch}" \
|
||||
-osarch="!darwin/arm !darwin/arm64" \
|
||||
-ldflags="${GOLDFLAGS}" \
|
||||
-output "pkg.bin.new/${extra_dir}{{.OS}}_{{.Arch}}/consul" \
|
||||
-tags="${GOTAGS}" \
|
||||
.
|
||||
|
||||
if test $? -ne 0
|
||||
then
|
||||
err "ERROR: Failed to build Consul"
|
||||
rm -r pkg.bin.new
|
||||
return 1
|
||||
fi
|
||||
else
|
||||
status "Building sequentially with go install"
|
||||
for os in ${build_os}
|
||||
do
|
||||
for arch in ${build_arch}
|
||||
do
|
||||
outdir="pkg.bin.new/${extra_dir}${os}_${arch}"
|
||||
osarch="${os}/${arch}"
|
||||
if test "${osarch}" == "darwin/arm" -o "${osarch}" == "darwin/arm64"
|
||||
then
|
||||
continue
|
||||
fi
|
||||
|
||||
mkdir -p "${outdir}"
|
||||
GOOS=${os} GOARCH=${arch} go install -ldflags "${GOLDFLAGS}" -tags "${GOTAGS}" && cp "${MAIN_GOPATH}/bin/consul" "${outdir}/consul"
|
||||
if test $? -ne 0
|
||||
then
|
||||
err "ERROR: Failed to build Consul for ${osarch}"
|
||||
rm -r pkg.bin.new
|
||||
return 1
|
||||
fi
|
||||
done
|
||||
done
|
||||
fi
|
||||
|
||||
build_consul_post "${sdir}" "${extra_dir_name}"
|
||||
if test $? -ne 0
|
||||
then
|
||||
err "ERROR: Failed postprocessing Consul binaries"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
529
build-support/functions/30-release.sh
Normal file
529
build-support/functions/30-release.sh
Normal file
@ -0,0 +1,529 @@
|
||||
function tag_release {
|
||||
# Arguments:
|
||||
# $1 - Path to top level consul source
|
||||
# $2 - Version string to use for tagging the release
|
||||
# $3 - Alternative GPG key id used for signing the release commit (optional)
|
||||
#
|
||||
# Returns:
|
||||
# 0 - success
|
||||
# * - error
|
||||
#
|
||||
# Notes:
|
||||
# If the RELEASE_UNSIGNED environment variable is set then no gpg signing will occur
|
||||
|
||||
if ! test -d "$1"
|
||||
then
|
||||
err "ERROR: '$1' is not a directory. tag_release must be called with the path to the top level source as the first argument'"
|
||||
return 1
|
||||
fi
|
||||
|
||||
if test -z "$2"
|
||||
then
|
||||
err "ERROR: tag_release must be called with a version number as the second argument"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# determine whether the gpg key to use is being overridden
|
||||
local gpg_key=${HASHICORP_GPG_KEY}
|
||||
if test -n "$3"
|
||||
then
|
||||
gpg_key=$3
|
||||
fi
|
||||
|
||||
pushd "$1" > /dev/null
|
||||
local ret=0
|
||||
|
||||
local branch_to_tag=$(git_branch) || ret=1
|
||||
|
||||
# perform an usngined release if requested (mainly for testing locally)
|
||||
if test ${ret} -ne 0
|
||||
then
|
||||
err "ERROR: Failed to determine git branch to tag"
|
||||
elif is_set "$RELEASE_UNSIGNED"
|
||||
then
|
||||
(
|
||||
git commit --allow-empty -a -m "Release v${2}" &&
|
||||
git tag -a -m "Version ${2}" "v${2}" "${branch_to_tag}"
|
||||
)
|
||||
ret=$?
|
||||
# perform a signed release (official releases should do this)
|
||||
elif have_gpg_key ${gpg_key}
|
||||
then
|
||||
(
|
||||
git commit --allow-empty -a --gpg-sign=${gpg_key} -m "Release v${2}" &&
|
||||
git tag -a -m "Version ${2}" -s -u ${gpg_key} "v${2}" "${branch_to_tag}"
|
||||
)
|
||||
ret=$?
|
||||
# unsigned release not requested and gpg key isn't useable
|
||||
else
|
||||
err "ERROR: GPG key ${gpg_key} is not in the local keychain - to continue set RELEASE_UNSIGNED=1 in the env"
|
||||
ret=1
|
||||
fi
|
||||
popd > /dev/null
|
||||
return $ret
|
||||
}
|
||||
|
||||
function package_binaries {
|
||||
# Arguments:
|
||||
# $1 - Path to the directory containing the built binaries
|
||||
# $2 - Destination path of the packaged binaries
|
||||
# $3 - Version
|
||||
#
|
||||
# Returns:
|
||||
# 0 - success
|
||||
# * - error
|
||||
|
||||
local sdir="$1"
|
||||
local ddir="$2"
|
||||
local vers="$3"
|
||||
local ret=0
|
||||
|
||||
|
||||
if ! test -d "${sdir}"
|
||||
then
|
||||
err "ERROR: '$1' is not a directory. package_binaries must be called with the path to the directory containing the binaries"
|
||||
return 1
|
||||
fi
|
||||
|
||||
rm -rf "${ddir}" > /dev/null 2>&1
|
||||
mkdir -p "${ddir}" >/dev/null 2>&1
|
||||
for platform in $(find "${sdir}" -mindepth 1 -maxdepth 1 -type d )
|
||||
do
|
||||
local os_arch=$(basename $platform)
|
||||
local dest="${ddir}/${CONSUL_PKG_NAME}_${vers}_${os_arch}.zip"
|
||||
status "Compressing ${os_arch} directory into ${dest}"
|
||||
pushd "${platform}" > /dev/null
|
||||
zip "${ddir}/${CONSUL_PKG_NAME}_${vers}_${os_arch}.zip" ./*
|
||||
ret=$?
|
||||
popd > /dev/null
|
||||
|
||||
if test "$ret" -ne 0
|
||||
then
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
return ${ret}
|
||||
}
|
||||
|
||||
function package_release_one {
|
||||
# Arguments:
|
||||
# $1 - Path to the top level Consul source
|
||||
# $2 - Version to use in the names of the zip files (optional)
|
||||
# $3 - Subdirectory under pkg/dist to use (optional)
|
||||
#
|
||||
# Returns:
|
||||
# 0 - success
|
||||
# * - error
|
||||
|
||||
if ! test -d "$1"
|
||||
then
|
||||
err "ERROR: '$1' is not a directory. package_release must be called with the path to the top level source as the first argument'"
|
||||
return 1
|
||||
fi
|
||||
|
||||
local sdir="$1"
|
||||
local ret=0
|
||||
local vers="$2"
|
||||
local extra_dir_name="$3"
|
||||
local extra_dir=""
|
||||
|
||||
if test -n "${extra_dir_name}"
|
||||
then
|
||||
extra_dir="${extra_dir_name}/"
|
||||
fi
|
||||
|
||||
if test -z "${vers}"
|
||||
then
|
||||
vers=$(get_version "${sdir}" true false)
|
||||
ret=$?
|
||||
if test "$ret" -ne 0
|
||||
then
|
||||
err "ERROR: failed to determine the version."
|
||||
return $ret
|
||||
fi
|
||||
fi
|
||||
|
||||
package_binaries "${sdir}/pkg/bin/${extra_dir}" "${sdir}/pkg/dist/${extra_dir}" "${vers}"
|
||||
return $?
|
||||
}
|
||||
|
||||
function package_release {
|
||||
# Arguments:
|
||||
# $1 - Path to the top level Consul source
|
||||
# $2 - Version to use in the names of the zip files (optional)
|
||||
#
|
||||
# Returns:
|
||||
# 0 - success
|
||||
# * - error
|
||||
|
||||
package_release_one "$1" "$2" ""
|
||||
return $?
|
||||
}
|
||||
|
||||
function shasum_release {
|
||||
# Arguments:
|
||||
# $1 - Path to the dist directory
|
||||
# $2 - Version of the release
|
||||
#
|
||||
# Returns:
|
||||
# 0 - success
|
||||
# * - failure
|
||||
|
||||
local sdir="$1"
|
||||
local vers="$2"
|
||||
|
||||
if ! test -d "$1"
|
||||
then
|
||||
err "ERROR: sign_release requires a path to the dist dir as the first argument"
|
||||
return 1
|
||||
fi
|
||||
|
||||
if test -z "${vers}"
|
||||
then
|
||||
err "ERROR: sign_release requires a version to be specified as the second argument"
|
||||
return 1
|
||||
fi
|
||||
|
||||
local hfile="${CONSUL_PKG_NAME}_${vers}_SHA256SUMS"
|
||||
|
||||
shasum_directory "${sdir}" "${sdir}/${hfile}"
|
||||
return $?
|
||||
}
|
||||
|
||||
function sign_release {
|
||||
# Arguments:
|
||||
# $1 - Path to distribution directory
|
||||
# $2 - Version
|
||||
# $2 - Alternative GPG key to use for signing
|
||||
#
|
||||
# Returns:
|
||||
# 0 - success
|
||||
# * - failure
|
||||
|
||||
local sdir="$1"
|
||||
local vers="$2"
|
||||
|
||||
if ! test -d "${sdir}"
|
||||
then
|
||||
err "ERROR: sign_release requires a path to the dist dir as the first argument"
|
||||
return 1
|
||||
fi
|
||||
|
||||
if test -z "${vers}"
|
||||
then
|
||||
err "ERROR: sign_release requires a version to be specified as the second argument"
|
||||
return 1
|
||||
fi
|
||||
|
||||
local hfile="${CONSUL_PKG_NAME}_${vers}_SHA256SUMS"
|
||||
|
||||
status_stage "==> Signing ${hfile}"
|
||||
gpg_detach_sign "${1}/${hfile}" "$3" || return 1
|
||||
return 0
|
||||
}
|
||||
|
||||
function check_release_one {
|
||||
# Arguments:
|
||||
# $1 - Path to the release files
|
||||
# $2 - Version to expect
|
||||
# $3 - boolean whether to expect the signature file
|
||||
# $4 - Release Name (optional)
|
||||
#
|
||||
# Returns:
|
||||
# 0 - success
|
||||
# * - failure
|
||||
|
||||
declare -i ret=0
|
||||
|
||||
declare -a expected_files
|
||||
|
||||
declare log_extra=""
|
||||
|
||||
if test -n "$4"
|
||||
then
|
||||
log_extra="for $4 "
|
||||
fi
|
||||
|
||||
expected_files+=("${CONSUL_PKG_NAME}_${2}_SHA256SUMS")
|
||||
echo "check sig: $3"
|
||||
if is_set "$3"
|
||||
then
|
||||
expected_files+=("${CONSUL_PKG_NAME}_${2}_SHA256SUMS.sig")
|
||||
fi
|
||||
|
||||
expected_files+=("${CONSUL_PKG_NAME}_${2}_darwin_386.zip")
|
||||
expected_files+=("${CONSUL_PKG_NAME}_${2}_darwin_amd64.zip")
|
||||
expected_files+=("${CONSUL_PKG_NAME}_${2}_freebsd_386.zip")
|
||||
expected_files+=("${CONSUL_PKG_NAME}_${2}_freebsd_amd64.zip")
|
||||
expected_files+=("${CONSUL_PKG_NAME}_${2}_freebsd_arm.zip")
|
||||
expected_files+=("${CONSUL_PKG_NAME}_${2}_linux_386.zip")
|
||||
expected_files+=("${CONSUL_PKG_NAME}_${2}_linux_amd64.zip")
|
||||
expected_files+=("${CONSUL_PKG_NAME}_${2}_linux_arm.zip")
|
||||
expected_files+=("${CONSUL_PKG_NAME}_${2}_linux_arm64.zip")
|
||||
expected_files+=("${CONSUL_PKG_NAME}_${2}_solaris_amd64.zip")
|
||||
expected_files+=("${CONSUL_PKG_NAME}_${2}_windows_386.zip")
|
||||
expected_files+=("${CONSUL_PKG_NAME}_${2}_windows_amd64.zip")
|
||||
|
||||
declare -a found_files
|
||||
|
||||
status_stage "==> Verifying release contents ${log_extra}- ${2}"
|
||||
debug "Expecting Files:"
|
||||
for fname in "${expected_files[@]}"
|
||||
do
|
||||
debug " $fname"
|
||||
done
|
||||
|
||||
pushd "$1" > /dev/null
|
||||
for actual_fname in $(ls)
|
||||
do
|
||||
local found=0
|
||||
for i in "${!expected_files[@]}"
|
||||
do
|
||||
local expected_fname="${expected_files[i]}"
|
||||
if test "${expected_fname}" == "${actual_fname}"
|
||||
then
|
||||
# remove from the expected_files array
|
||||
unset 'expected_files[i]'
|
||||
|
||||
# append to the list of found files
|
||||
found_files+=("${expected_fname}")
|
||||
|
||||
# mark it as found so we dont error
|
||||
found=1
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
if test $found -ne 1
|
||||
then
|
||||
err "ERROR: Release build has an extra file: ${actual_fname}"
|
||||
ret=1
|
||||
fi
|
||||
done
|
||||
popd > /dev/null
|
||||
|
||||
for fname in "${expected_files[@]}"
|
||||
do
|
||||
err "ERROR: Release build is missing a file: $fname"
|
||||
ret=1
|
||||
done
|
||||
|
||||
|
||||
if test $ret -eq 0
|
||||
then
|
||||
status "Release build contents:"
|
||||
for fname in "${found_files[@]}"
|
||||
do
|
||||
echo " $fname"
|
||||
done
|
||||
fi
|
||||
|
||||
return $ret
|
||||
}
|
||||
|
||||
function check_release {
|
||||
# Arguments:
|
||||
# $1 - Path to the release files
|
||||
# $2 - Version to expect
|
||||
# $3 - boolean whether to expect the signature file
|
||||
#
|
||||
# Returns:
|
||||
# 0 - success
|
||||
# * - failure
|
||||
|
||||
check_release_one "$1" "$2" "$3"
|
||||
return ${ret}
|
||||
}
|
||||
|
||||
|
||||
function build_consul_release {
|
||||
build_consul "$1" "" "$2"
|
||||
}
|
||||
|
||||
function build_release {
|
||||
# Arguments: (yeah there are lots)
|
||||
# $1 - Path to the top level Consul source
|
||||
# $2 - boolean whether to tag the release yet
|
||||
# $3 - boolean whether to build the binaries
|
||||
# $4 - boolean whether to generate the sha256 sums
|
||||
# $5 - version to set within version.go and the changelog
|
||||
# $6 - release date to set within the changelog
|
||||
# $7 - release version to set
|
||||
# $8 - alternative gpg key to use for signing operations (optional)
|
||||
#
|
||||
# Returns:
|
||||
# 0 - success
|
||||
# * - error
|
||||
|
||||
debug "Source Dir: $1"
|
||||
debug "Tag Release: $2"
|
||||
debug "Build Release: $3"
|
||||
debug "Sign Release: $4"
|
||||
debug "Version: $5"
|
||||
debug "Release Date: $6"
|
||||
debug "Release Vers: $7"
|
||||
debug "GPG Key: $8"
|
||||
|
||||
if ! test -d "$1"
|
||||
then
|
||||
err "ERROR: '$1' is not a directory. build_release must be called with the path to the top level source as the first argument'"
|
||||
return 1
|
||||
fi
|
||||
|
||||
if test -z "$2" -o -z "$3" -o -z "$4"
|
||||
then
|
||||
err "ERROR: build_release requires 4 arguments to be specified: <path to consul source> <tag release bool?> <build binaries bool?> <shasum 256 bool?>"
|
||||
return 1
|
||||
fi
|
||||
|
||||
local sdir="$1"
|
||||
local do_tag="$2"
|
||||
local do_build="$3"
|
||||
local do_sha256="$4"
|
||||
local gpg_key="$8"
|
||||
|
||||
if test -z "${gpg_key}"
|
||||
then
|
||||
gpg_key=${HASHICORP_GPG_KEY}
|
||||
fi
|
||||
|
||||
if ! is_set "${RELEASE_UNSIGNED}"
|
||||
then
|
||||
if ! have_gpg_key "${gpg_key}"
|
||||
then
|
||||
err "ERROR: Aborting build because no useable GPG key is present. Set RELEASE_UNSIGNED=1 to bypass this check"
|
||||
return 1
|
||||
fi
|
||||
fi
|
||||
|
||||
if ! is_git_clean "${sdir}" true && ! is_set "${ALLOW_DIRTY_GIT}"
|
||||
then
|
||||
err "ERROR: Refusing to build because Git is dirty. Set ALLOW_DIRTY_GIT=1 in the environment to proceed anyways"
|
||||
return 1
|
||||
fi
|
||||
|
||||
local set_vers="$5"
|
||||
local set_date="$6"
|
||||
local set_release="$7"
|
||||
|
||||
if test -z "${set_vers}"
|
||||
then
|
||||
set_vers=$(get_version "${sdir}" false false)
|
||||
set_release=$(parse_version "${sdir}" true false true)
|
||||
fi
|
||||
|
||||
if is_set "${do_tag}" && ! set_release_mode "${sdir}" "${set_vers}" "${set_date}" "${set_release}"
|
||||
then
|
||||
err "ERROR: Failed to put source into release mode"
|
||||
return 1
|
||||
fi
|
||||
|
||||
local vers="$(get_version ${sdir} true false)"
|
||||
if test $? -ne 0
|
||||
then
|
||||
err "Please specify a version (couldn't find one based on build tags)."
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Make sure we arent in dev mode
|
||||
unset CONSUL_DEV
|
||||
|
||||
if is_set "${do_build}"
|
||||
then
|
||||
status_stage "==> Refreshing Docker Build Images"
|
||||
refresh_docker_images "${sdir}"
|
||||
if test $? -ne 0
|
||||
then
|
||||
err "ERROR: Failed to refresh docker images"
|
||||
return 1
|
||||
fi
|
||||
|
||||
status_stage "==> Building Legacy UI for version ${vers}"
|
||||
build_ui_legacy "${sdir}" "${UI_LEGACY_BUILD_TAG}"
|
||||
if test $? -ne 0
|
||||
then
|
||||
err "ERROR: Failed to build the legacy ui"
|
||||
return 1
|
||||
fi
|
||||
|
||||
status_stage "==> Building UI for version ${vers}"
|
||||
build_ui "${sdir}" "${UI_BUILD_TAG}"
|
||||
if test $? -ne 0
|
||||
then
|
||||
err "ERROR: Failed to build the ui"
|
||||
return 1
|
||||
fi
|
||||
|
||||
status_stage "==> Building Static Assets for version ${vers}"
|
||||
build_assetfs "${sdir}" "${GO_BUILD_TAG}"
|
||||
if test $? -ne 0
|
||||
then
|
||||
err "ERROR: Failed to build the static assets"
|
||||
return 1
|
||||
fi
|
||||
|
||||
if is_set "${do_tag}"
|
||||
then
|
||||
git add "${sdir}/agent/bindata_assetfs.go"
|
||||
if test $? -ne 0
|
||||
then
|
||||
err "ERROR: Failed to git add the assetfs file"
|
||||
return 1
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
if is_set "${do_tag}"
|
||||
then
|
||||
status_stage "==> Tagging version ${vers}"
|
||||
tag_release "${sdir}" "${vers}" "${gpg_key}"
|
||||
if test $? -ne 0
|
||||
then
|
||||
err "ERROR: Failed to tag the release"
|
||||
return 1
|
||||
fi
|
||||
|
||||
update_git_env "${sdir}"
|
||||
fi
|
||||
|
||||
if is_set "${do_build}"
|
||||
then
|
||||
status_stage "==> Building Consul for version ${vers}"
|
||||
build_consul_release "${sdir}" "${GO_BUILD_TAG}"
|
||||
if test $? -ne 0
|
||||
then
|
||||
err "ERROR: Failed to build the Consul binaries"
|
||||
return 1
|
||||
fi
|
||||
|
||||
status_stage "==> Packaging up release binaries"
|
||||
package_release "${sdir}" "${vers}"
|
||||
if test $? -ne 0
|
||||
then
|
||||
err "ERROR: Failed to package the release binaries"
|
||||
return 1
|
||||
fi
|
||||
fi
|
||||
|
||||
status_stage "==> Generating SHA 256 Hashes for Binaries"
|
||||
shasum_release "${sdir}/pkg/dist" "${vers}"
|
||||
if test $? -ne 0
|
||||
then
|
||||
err "ERROR: Failed to generate SHA 256 hashes for the release"
|
||||
return 1
|
||||
fi
|
||||
|
||||
if is_set "${do_sha256}"
|
||||
then
|
||||
sign_release "${sdir}/pkg/dist" "${vers}" "${gpg_key}"
|
||||
if test $? -ne 0
|
||||
then
|
||||
err "ERROR: Failed to sign the SHA 256 hashes file"
|
||||
return 1
|
||||
fi
|
||||
fi
|
||||
|
||||
check_release "${sdir}/pkg/dist" "${vers}" "${do_sha256}"
|
||||
return $?
|
||||
}
|
344
build-support/functions/40-publish.sh
Normal file
344
build-support/functions/40-publish.sh
Normal file
@ -0,0 +1,344 @@
|
||||
function hashicorp_release {
|
||||
# Arguments:
|
||||
# $1 - Path to directory containing all of the release artifacts
|
||||
#
|
||||
# Returns:
|
||||
# 0 - success
|
||||
# * - failure
|
||||
#
|
||||
# Notes:
|
||||
# Requires the AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY environment variables
|
||||
# to be set
|
||||
|
||||
status "Uploading files"
|
||||
hc-releases upload "${1}" || return 1
|
||||
|
||||
status "Publishing the release"
|
||||
hc-releases publish || return 1
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
function confirm_git_remote {
|
||||
# Arguments:
|
||||
# $1 - Path to git repo
|
||||
# $2 - remote name
|
||||
#
|
||||
# Returns:
|
||||
# 0 - success
|
||||
# * - error
|
||||
#
|
||||
|
||||
local remote="$2"
|
||||
local url=$(git_remote_url "$1" "${remote}")
|
||||
|
||||
echo -e "\n\nConfigured Git Remote: ${remote}"
|
||||
echo -e "Configured Git URL: ${url}\n"
|
||||
|
||||
local answer=""
|
||||
|
||||
while true
|
||||
do
|
||||
case "${answer}" in
|
||||
[yY]* )
|
||||
status "Remote Accepted"
|
||||
return 0
|
||||
break
|
||||
;;
|
||||
[nN]* )
|
||||
err "Remote Rejected"
|
||||
return 1
|
||||
break
|
||||
;;
|
||||
* )
|
||||
read -p "Is this Git Remote correct to push ${CONSUL_PKG_NAME} to? [y/n]: " answer
|
||||
;;
|
||||
esac
|
||||
done
|
||||
}
|
||||
|
||||
function confirm_git_push_changes {
|
||||
# Arguments:
|
||||
# $1 - Path to git repo
|
||||
#
|
||||
# Returns:
|
||||
# 0 - success
|
||||
# * - error
|
||||
#
|
||||
|
||||
if ! test -d "$1"
|
||||
then
|
||||
err "ERROR: '$1' is not a directory. confirm_git_push_changes must be called with the path to a git repo as the first argument'"
|
||||
return 1
|
||||
fi
|
||||
|
||||
pushd "${1}" > /dev/null
|
||||
|
||||
|
||||
declare -i ret=0
|
||||
git_log_summary || ret=1
|
||||
if test ${ret} -eq 0
|
||||
then
|
||||
# put a empty line between the git changes and the prompt
|
||||
echo ""
|
||||
|
||||
local answer=""
|
||||
|
||||
while true
|
||||
do
|
||||
case "${answer}" in
|
||||
[yY]* )
|
||||
status "Changes Accepted"
|
||||
ret=0
|
||||
break
|
||||
;;
|
||||
[nN]* )
|
||||
err "Changes Rejected"
|
||||
ret=1
|
||||
break
|
||||
;;
|
||||
?)
|
||||
# bindata_assetfs.go will make these meaningless
|
||||
git_diff "$(pwd)" ":!agent/bindata_assetfs.go"|| ret 1
|
||||
answer=""
|
||||
;;
|
||||
* )
|
||||
read -p "Are these changes correct? [y/n] (or type ? to show the diff output): " answer
|
||||
;;
|
||||
esac
|
||||
done
|
||||
fi
|
||||
|
||||
popd > /dev/null
|
||||
return $ret
|
||||
}
|
||||
|
||||
function extract_consul_local {
|
||||
# Arguments:
|
||||
# $1 - Path to the zipped binary to test
|
||||
# $2 - Version to look for
|
||||
#
|
||||
# Returns:
|
||||
# 0 - success
|
||||
# * - error
|
||||
|
||||
local zfile="${1}/${CONSUL_PKG_NAME}_${2}_$(go env GOOS)_$(go env GOARCH).zip"
|
||||
|
||||
if ! test -f "${zfile}"
|
||||
then
|
||||
err "ERROR: File not found or is not a regular file: ${zfile}"
|
||||
return 1
|
||||
fi
|
||||
|
||||
local ret=0
|
||||
local tfile="$(mktemp) -t "${CONSUL_PKG_NAME}_")"
|
||||
|
||||
unzip -p "${zfile}" "consul" > "${tfile}"
|
||||
if test $? -eq 0
|
||||
then
|
||||
chmod +x "${tfile}"
|
||||
echo "${tfile}"
|
||||
return 0
|
||||
else
|
||||
err "ERROR: Failed to extract consul binary from the zip file"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
function confirm_consul_version {
|
||||
# Arguments:
|
||||
# $1 - consul exe to use
|
||||
#
|
||||
# Returns:
|
||||
# 0 - success
|
||||
# * - error
|
||||
local consul_exe="$1"
|
||||
|
||||
if ! test -x "${consul_exe}"
|
||||
then
|
||||
err "ERROR: '${consul_exe} is not an executable"
|
||||
return 1
|
||||
fi
|
||||
|
||||
"${consul_exe}" version
|
||||
|
||||
# put a empty line between the version output and the prompt
|
||||
echo ""
|
||||
|
||||
local answer=""
|
||||
|
||||
while true
|
||||
do
|
||||
case "${answer}" in
|
||||
[yY]* )
|
||||
status "Version Accepted"
|
||||
ret=0
|
||||
break
|
||||
;;
|
||||
[nN]* )
|
||||
err "Version Rejected"
|
||||
ret=1
|
||||
break
|
||||
;;
|
||||
* )
|
||||
read -p "Is this Consul version correct? [y/n]: " answer
|
||||
;;
|
||||
esac
|
||||
done
|
||||
}
|
||||
|
||||
function confirm_consul_info {
|
||||
# Arguments:
|
||||
# $1 - Path to a consul exe that can be run on this system
|
||||
#
|
||||
# Returns:
|
||||
# 0 - success
|
||||
# * - error
|
||||
|
||||
local consul_exe="$1"
|
||||
local log_file="$(mktemp) -t "consul_log_")"
|
||||
"${consul_exe}" agent -dev > "${log_file}" 2>&1 &
|
||||
local consul_pid=$!
|
||||
sleep 1
|
||||
status "First 25 lines/1s of the agents output:"
|
||||
head -n 25 "${log_file}"
|
||||
|
||||
echo ""
|
||||
local ret=0
|
||||
local answer=""
|
||||
|
||||
while true
|
||||
do
|
||||
case "${answer}" in
|
||||
[yY]* )
|
||||
status "Consul Agent Output Accepted"
|
||||
break
|
||||
;;
|
||||
[nN]* )
|
||||
err "Consul Agent Output Rejected"
|
||||
ret=1
|
||||
break
|
||||
;;
|
||||
* )
|
||||
read -p "Is this Consul Agent Output correct? [y/n]: " answer
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
if test "${ret}" -eq 0
|
||||
then
|
||||
status "Consul Info Output"
|
||||
"${consul_exe}" info
|
||||
echo ""
|
||||
local answer=""
|
||||
|
||||
while true
|
||||
do
|
||||
case "${answer}" in
|
||||
[yY]* )
|
||||
status "Consul Info Output Accepted"
|
||||
break
|
||||
;;
|
||||
[nN]* )
|
||||
err "Consul Info Output Rejected"
|
||||
return 1
|
||||
break
|
||||
;;
|
||||
* )
|
||||
read -p "Is this Consul Info Output correct? [y/n]: " answer
|
||||
;;
|
||||
esac
|
||||
done
|
||||
fi
|
||||
|
||||
status "Requesting Consul to leave the cluster / shutdown"
|
||||
"${consul_exe}" leave
|
||||
wait ${consul_pid} > /dev/null 2>&1
|
||||
|
||||
return $?
|
||||
}
|
||||
|
||||
function extract_consul {
|
||||
extract_consul_local "$1" "$2"
|
||||
}
|
||||
|
||||
|
||||
function publish_release {
|
||||
# Arguments:
|
||||
# $1 - Path to top level Consul source that contains the built release
|
||||
# $2 - boolean whether to publish to git upstream
|
||||
# $3 - boolean whether to publish to releases.hashicorp.com
|
||||
#
|
||||
# Returns:
|
||||
# 0 - success
|
||||
# * - error
|
||||
|
||||
if ! test -d "$1"
|
||||
then
|
||||
err "ERROR: '$1' is not a directory. publish_release must be called with the path to the top level source as the first argument'"
|
||||
return 1
|
||||
fi
|
||||
|
||||
local sdir="$1"
|
||||
local pub_git="$2"
|
||||
local pub_hc_releases="$3"
|
||||
|
||||
if test -z "${pub_git}"
|
||||
then
|
||||
pub_git=1
|
||||
fi
|
||||
|
||||
if test -z "${pub_hc_releases}"
|
||||
then
|
||||
pub_hc_releases=1
|
||||
fi
|
||||
|
||||
local vers="$(get_version ${sdir} true false)"
|
||||
if test $? -ne 0
|
||||
then
|
||||
err "Please specify a version (couldn't parse one from the source)."
|
||||
return 1
|
||||
fi
|
||||
|
||||
status_stage "==> Verifying release files"
|
||||
check_release "${sdir}/pkg/dist" "${vers}" true || return 1
|
||||
|
||||
status_stage "==> Extracting Consul version for local system"
|
||||
local consul_exe=$(extract_consul "${sdir}/pkg/dist" "${vers}") || return 1
|
||||
# make sure to remove the temp file
|
||||
trap "rm '${consul_exe}'" EXIT
|
||||
|
||||
status_stage "==> Confirming Consul Version"
|
||||
confirm_consul_version "${consul_exe}" || return 1
|
||||
|
||||
status_stage "==> Confirming Consul Agent Info"
|
||||
confirm_consul_info "${consul_exe}" || return 1
|
||||
|
||||
status_stage "==> Confirming Git is clean"
|
||||
is_git_clean "$1" true || return 1
|
||||
|
||||
status_stage "==> Confirming Git Changes"
|
||||
confirm_git_push_changes "$1" || return 1
|
||||
|
||||
status_stage "==> Checking for blacklisted Git Remote"
|
||||
local remote=$(find_git_remote "${sdir}") || return 1
|
||||
git_remote_not_blacklisted "${sdir}" "${remote}" || return 1
|
||||
|
||||
status_stage "==> Confirming Git Remote"
|
||||
confirm_git_remote "${sdir}" "${remote}" || return 1
|
||||
|
||||
if is_set "${pub_git}"
|
||||
then
|
||||
status_stage "==> Pushing to Git"
|
||||
git_push_ref "$1" "" "${remote}" || return 1
|
||||
git_push_ref "$1" "v${vers}" "${remote}" || return 1
|
||||
fi
|
||||
|
||||
if is_set "${pub_hc_releases}"
|
||||
then
|
||||
status_stage "==> Publishing to releases.hashicorp.com"
|
||||
hashicorp_release "${sdir}/pkg/dist" || return 1
|
||||
fi
|
||||
|
||||
return 0
|
||||
}
|
152
build-support/scripts/build-docker.sh
Executable file
152
build-support/scripts/build-docker.sh
Executable file
@ -0,0 +1,152 @@
|
||||
#!/bin/bash
|
||||
SCRIPT_NAME="$(basename ${BASH_SOURCE[0]})"
|
||||
pushd $(dirname ${BASH_SOURCE[0]}) > /dev/null
|
||||
SCRIPT_DIR=$(pwd)
|
||||
pushd ../.. > /dev/null
|
||||
SOURCE_DIR=$(pwd)
|
||||
popd > /dev/null
|
||||
pushd ../functions > /dev/null
|
||||
FN_DIR=$(pwd)
|
||||
popd > /dev/null
|
||||
popd > /dev/null
|
||||
|
||||
source "${SCRIPT_DIR}/functions.sh"
|
||||
|
||||
function usage {
|
||||
cat <<-EOF
|
||||
Usage: ${SCRIPT_NAME} (consul|ui|ui-legacy|static-assets) [<options ...>]
|
||||
|
||||
Description:
|
||||
This script will build the various Consul components within docker containers
|
||||
and copy all the relevant artifacts out of the containers back to the source.
|
||||
|
||||
Options:
|
||||
-i | --image IMAGE Alternative Docker image to run the build within.
|
||||
|
||||
-s | --source DIR Path to source to build.
|
||||
Defaults to "${SOURCE_DIR}"
|
||||
|
||||
-r | --refresh Enables refreshing the docker image prior to building.
|
||||
|
||||
-h | --help Print this help text.
|
||||
EOF
|
||||
}
|
||||
|
||||
function err_usage {
|
||||
err "$1"
|
||||
err ""
|
||||
err "$(usage)"
|
||||
}
|
||||
|
||||
function main {
|
||||
declare image=
|
||||
declare sdir="${SOURCE_DIR}"
|
||||
declare -i refresh=0
|
||||
declare command=""
|
||||
|
||||
while test $# -gt 0
|
||||
do
|
||||
case "$1" in
|
||||
-h | --help )
|
||||
usage
|
||||
return 0
|
||||
;;
|
||||
-i | --image )
|
||||
if test -z "$2"
|
||||
then
|
||||
err_usage "ERROR: option -i/--image requires an argument"
|
||||
return 1
|
||||
fi
|
||||
|
||||
image="$2"
|
||||
shift 2
|
||||
;;
|
||||
-s | --source )
|
||||
if test -z "$2"
|
||||
then
|
||||
err_usage "ERROR: option -s/--source requires an argument"
|
||||
return 1
|
||||
fi
|
||||
|
||||
if ! test -d "$2"
|
||||
then
|
||||
err_usage "ERROR: '$2' is not a directory and not suitable for the value of -s/--source"
|
||||
return 1
|
||||
fi
|
||||
|
||||
sdir="$2"
|
||||
shift 2
|
||||
;;
|
||||
-r | --refresh )
|
||||
refresh=1
|
||||
shift
|
||||
;;
|
||||
consul | ui | ui-legacy | static-assets )
|
||||
command="$1"
|
||||
shift
|
||||
;;
|
||||
* )
|
||||
err_usage "ERROR: Unknown argument '$1'"
|
||||
return 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
if test -z "${command}"
|
||||
then
|
||||
err_usage "ERROR: No command specified"
|
||||
return 1
|
||||
fi
|
||||
|
||||
case "${command}" in
|
||||
consul )
|
||||
if is_set "${refresh}"
|
||||
then
|
||||
status_stage "==> Refreshing Consul build container image"
|
||||
export GO_BUILD_TAG="${image:-${GO_BUILD_CONTAINER_DEFAULT}}"
|
||||
refresh_docker_images "${sdir}" go-build-image || return 1
|
||||
fi
|
||||
status_stage "==> Building Consul"
|
||||
build_consul "${sdir}" "" "${image}" || return 1
|
||||
;;
|
||||
static-assets )
|
||||
if is_set "${refresh}"
|
||||
then
|
||||
status_stage "==> Refreshing Consul build container image"
|
||||
export GO_BUILD_TAG="${image:-${GO_BUILD_CONTAINER_DEFAULT}}"
|
||||
refresh_docker_images "${sdir}" go-build-image || return 1
|
||||
fi
|
||||
status_stage "==> Building Static Assets"
|
||||
build_assetfs "${sdir}" "${image}" || return 1
|
||||
;;
|
||||
ui )
|
||||
if is_set "${refresh}"
|
||||
then
|
||||
status_stage "==> Refreshing UI build container image"
|
||||
export UI_BUILD_TAG="${image:-${UI_BUILD_CONTAINER_DEFAULT}}"
|
||||
refresh_docker_images "${sdir}" ui-build-image || return 1
|
||||
fi
|
||||
status_stage "==> Building UI"
|
||||
build_ui "${sdir}" "${image}" || return 1
|
||||
;;
|
||||
ui-legacy )
|
||||
if is_set "${refresh}"
|
||||
then
|
||||
status_stage "==> Refreshing Legacy UI build container image"
|
||||
export UI_LEGACY_BUILD_TAG="${image:-${UI_LEGACY_BUILD_CONTAINER_DEFAULT}}"
|
||||
refresh_docker_images "${sdir}" ui-legacy-build-image || return 1
|
||||
fi
|
||||
status_stage "==> Building UI"
|
||||
build_ui_legacy "${sdir}" "${image}" || return 1
|
||||
;;
|
||||
* )
|
||||
err_usage "ERROR: Unknown command: '${command}'"
|
||||
return 1
|
||||
;;
|
||||
esac
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
main "$@"
|
||||
exit $?
|
107
build-support/scripts/build-local.sh
Executable file
107
build-support/scripts/build-local.sh
Executable file
@ -0,0 +1,107 @@
|
||||
#!/bin/bash
|
||||
SCRIPT_NAME="$(basename ${BASH_SOURCE[0]})"
|
||||
pushd $(dirname ${BASH_SOURCE[0]}) > /dev/null
|
||||
SCRIPT_DIR=$(pwd)
|
||||
pushd ../.. > /dev/null
|
||||
SOURCE_DIR=$(pwd)
|
||||
popd > /dev/null
|
||||
pushd ../functions > /dev/null
|
||||
FN_DIR=$(pwd)
|
||||
popd > /dev/null
|
||||
popd > /dev/null
|
||||
|
||||
source "${SCRIPT_DIR}/functions.sh"
|
||||
|
||||
function usage {
|
||||
cat <<-EOF
|
||||
Usage: ${SCRIPT_NAME} [<options ...>]
|
||||
|
||||
Description:
|
||||
This script will build the Consul binary on the local system.
|
||||
All the requisite tooling must be installed for this to be
|
||||
successful.
|
||||
|
||||
Options:
|
||||
|
||||
-s | --source DIR Path to source to build.
|
||||
Defaults to "${SOURCE_DIR}"
|
||||
|
||||
-o | --os OSES Space separated string of OS
|
||||
platforms to build.
|
||||
|
||||
-a | --arch ARCH Space separated string of
|
||||
architectures to build.
|
||||
|
||||
-h | --help Print this help text.
|
||||
EOF
|
||||
}
|
||||
|
||||
function err_usage {
|
||||
err "$1"
|
||||
err ""
|
||||
err "$(usage)"
|
||||
}
|
||||
|
||||
function main {
|
||||
declare sdir="${SOURCE_DIR}"
|
||||
declare build_os=""
|
||||
declare build_arch=""
|
||||
|
||||
|
||||
while test $# -gt 0
|
||||
do
|
||||
case "$1" in
|
||||
-h | --help )
|
||||
usage
|
||||
return 0
|
||||
;;
|
||||
-s | --source )
|
||||
if test -z "$2"
|
||||
then
|
||||
err_usage "ERROR: option -s/--source requires an argument"
|
||||
return 1
|
||||
fi
|
||||
|
||||
if ! test -d "$2"
|
||||
then
|
||||
err_usage "ERROR: '$2' is not a directory and not suitable for the value of -s/--source"
|
||||
return 1
|
||||
fi
|
||||
|
||||
sdir="$2"
|
||||
shift 2
|
||||
;;
|
||||
-o | --os )
|
||||
if test -z "$2"
|
||||
then
|
||||
err_usage "ERROR: option -o/--os requires an argument"
|
||||
return 1
|
||||
fi
|
||||
|
||||
build_os="$2"
|
||||
shift 2
|
||||
;;
|
||||
-a | --arch )
|
||||
if test -z "$2"
|
||||
then
|
||||
err_usage "ERROR: option -a/--arch requires an argument"
|
||||
return 1
|
||||
fi
|
||||
|
||||
build_arch="$2"
|
||||
shift 2
|
||||
;;
|
||||
* )
|
||||
err_usage "ERROR: Unknown argument: '$1'"
|
||||
return 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
build_consul_local "${sdir}" "${build_os}" "${build_arch}" || return 1
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
main "$@"
|
||||
exit $?
|
100
build-support/scripts/dev.sh
Executable file
100
build-support/scripts/dev.sh
Executable file
@ -0,0 +1,100 @@
|
||||
#!/bin/bash
|
||||
SCRIPT_NAME="$(basename ${BASH_SOURCE[0]})"
|
||||
pushd $(dirname ${BASH_SOURCE[0]}) > /dev/null
|
||||
SCRIPT_DIR=$(pwd)
|
||||
pushd ../.. > /dev/null
|
||||
SOURCE_DIR=$(pwd)
|
||||
popd > /dev/null
|
||||
pushd ../functions > /dev/null
|
||||
FN_DIR=$(pwd)
|
||||
popd > /dev/null
|
||||
popd > /dev/null
|
||||
|
||||
source "${SCRIPT_DIR}/functions.sh"
|
||||
|
||||
function usage {
|
||||
cat <<-EOF
|
||||
Usage: ${SCRIPT_NAME} [<options ...>]
|
||||
|
||||
Description:
|
||||
|
||||
This script will put the source back into dev mode after a release.
|
||||
|
||||
Options:
|
||||
|
||||
-s | --source DIR Path to source to build.
|
||||
Defaults to "${SOURCE_DIR}"
|
||||
|
||||
--no-git Do not commit or attempt to push
|
||||
the changes back to the upstream.
|
||||
|
||||
-h | --help Print this help text.
|
||||
EOF
|
||||
}
|
||||
|
||||
function err_usage {
|
||||
err "$1"
|
||||
err ""
|
||||
err "$(usage)"
|
||||
}
|
||||
|
||||
function main {
|
||||
declare sdir="${SOURCE_DIR}"
|
||||
declare build_os=""
|
||||
declare build_arch=""
|
||||
declare -i do_git=1
|
||||
|
||||
|
||||
while test $# -gt 0
|
||||
do
|
||||
case "$1" in
|
||||
-h | --help )
|
||||
usage
|
||||
return 0
|
||||
;;
|
||||
-s | --source )
|
||||
if test -z "$2"
|
||||
then
|
||||
err_usage "ERROR: option -s/--source requires an argument"
|
||||
return 1
|
||||
fi
|
||||
|
||||
if ! test -d "$2"
|
||||
then
|
||||
err_usage "ERROR: '$2' is not a directory and not suitable for the value of -s/--source"
|
||||
return 1
|
||||
fi
|
||||
|
||||
sdir="$2"
|
||||
shift 2
|
||||
;;
|
||||
--no-git )
|
||||
do_git=0
|
||||
shift
|
||||
;;
|
||||
* )
|
||||
err_usage "ERROR: Unknown argument: '$1'"
|
||||
return 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
set_dev_mode "${sdir}" || return 1
|
||||
|
||||
if is_set "${do_git}"
|
||||
then
|
||||
status_stage "==> Commiting Dev Mode Changes"
|
||||
commit_dev_mode "${sdir}" || return 1
|
||||
|
||||
status_stage "==> Confirming Git Changes"
|
||||
confirm_git_push_changes "${sdir}" || return 1
|
||||
|
||||
status_stage "==> Pushing to Git"
|
||||
git_push_ref "${sdir}" || return 1
|
||||
fi
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
main "$@"
|
||||
exit $?
|
17
build-support/scripts/functions.sh
Executable file
17
build-support/scripts/functions.sh
Executable file
@ -0,0 +1,17 @@
|
||||
#
|
||||
# NOTE: This file is meant to be sourced from other bash scripts/shells
|
||||
#
|
||||
# It provides all the scripting around building Consul and the release process
|
||||
|
||||
pushd $(dirname ${BASH_SOURCE[0]}) > /dev/null
|
||||
pushd ../functions > /dev/null
|
||||
FUNC_DIR=$(pwd)
|
||||
popd > /dev/null
|
||||
popd > /dev/null
|
||||
|
||||
func_sources=$(find ${FUNC_DIR} -mindepth 1 -maxdepth 1 -name "*.sh" -type f | sort -n)
|
||||
|
||||
for src in $func_sources
|
||||
do
|
||||
source $src
|
||||
done
|
94
build-support/scripts/publish.sh
Executable file
94
build-support/scripts/publish.sh
Executable file
@ -0,0 +1,94 @@
|
||||
#!/bin/bash
|
||||
SCRIPT_NAME="$(basename ${BASH_SOURCE[0]})"
|
||||
pushd $(dirname ${BASH_SOURCE[0]}) > /dev/null
|
||||
SCRIPT_DIR=$(pwd)
|
||||
pushd ../.. > /dev/null
|
||||
SOURCE_DIR=$(pwd)
|
||||
popd > /dev/null
|
||||
pushd ../functions > /dev/null
|
||||
FN_DIR=$(pwd)
|
||||
popd > /dev/null
|
||||
popd > /dev/null
|
||||
|
||||
source "${SCRIPT_DIR}/functions.sh"
|
||||
|
||||
function usage {
|
||||
cat <<-EOF
|
||||
Usage: ${SCRIPT_NAME} [<options ...>]
|
||||
|
||||
Description:
|
||||
|
||||
This script will "publish" a Consul release. It expects a prebuilt release in
|
||||
pkg/dist matching the version in the repo and a clean git status. It will
|
||||
prompt you to confirm the consul version and git changes you are going to
|
||||
publish prior to pushing to git and to releases.hashicorp.com.
|
||||
|
||||
Options:
|
||||
-s | --source DIR Path to source to build.
|
||||
Defaults to "${SOURCE_DIR}"
|
||||
|
||||
-w | --website Publish to releases.hashicorp.com
|
||||
|
||||
-g | --git Push release commit and tag to Git
|
||||
|
||||
-h | --help Print this help text.
|
||||
EOF
|
||||
}
|
||||
|
||||
function err_usage {
|
||||
err "$1"
|
||||
err ""
|
||||
err "$(usage)"
|
||||
}
|
||||
|
||||
function main {
|
||||
declare sdir="${SOURCE_DIR}"
|
||||
declare -i website=0
|
||||
declare -i git_push=0
|
||||
|
||||
while test $# -gt 0
|
||||
do
|
||||
case "$1" in
|
||||
-h | --help )
|
||||
usage
|
||||
return 0
|
||||
;;
|
||||
-s | --source )
|
||||
if test -z "$2"
|
||||
then
|
||||
err_usage "ERROR: option -s/--source requires an argument"
|
||||
return 1
|
||||
fi
|
||||
|
||||
if ! test -d "$2"
|
||||
then
|
||||
err_usage "ERROR: '$2' is not a directory and not suitable for the value of -s/--source"
|
||||
return 1
|
||||
fi
|
||||
|
||||
sdir="$2"
|
||||
shift 2
|
||||
;;
|
||||
-w | --website )
|
||||
website=1
|
||||
shift
|
||||
;;
|
||||
-g | --git )
|
||||
git_push=1
|
||||
shift
|
||||
;;
|
||||
*)
|
||||
err_usage "ERROR: Unknown argument: '$1'"
|
||||
return 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
publish_release "${sdir}" "${git_push}" "${website}" || return 1
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
main "$@"
|
||||
exit $?
|
||||
|
156
build-support/scripts/release.sh
Executable file
156
build-support/scripts/release.sh
Executable file
@ -0,0 +1,156 @@
|
||||
#!/bin/bash
|
||||
SCRIPT_NAME="$(basename ${BASH_SOURCE[0]})"
|
||||
pushd $(dirname ${BASH_SOURCE[0]}) > /dev/null
|
||||
SCRIPT_DIR=$(pwd)
|
||||
pushd ../.. > /dev/null
|
||||
SOURCE_DIR=$(pwd)
|
||||
popd > /dev/null
|
||||
pushd ../functions > /dev/null
|
||||
FN_DIR=$(pwd)
|
||||
popd > /dev/null
|
||||
popd > /dev/null
|
||||
|
||||
source "${SCRIPT_DIR}/functions.sh"
|
||||
|
||||
function usage {
|
||||
cat <<-EOF
|
||||
Usage: ${SCRIPT_NAME} [<options ...>]
|
||||
|
||||
Description:
|
||||
|
||||
This script will do a full release build of Consul. Building each component
|
||||
is done within a docker container. In addition to building Consul this
|
||||
script will do a few more things.
|
||||
|
||||
* Update version/version*.go files
|
||||
* Update CHANGELOG.md to put things into release mode
|
||||
* Create a release commit. It changes in the commit include the CHANGELOG.md
|
||||
version files and the assetfs.
|
||||
* Tag the release
|
||||
* Generate the SHA256SUMS file for the binaries
|
||||
* Sign the SHA256SUMS file with a GPG key
|
||||
|
||||
|
||||
Options:
|
||||
-s | --source DIR Path to source to build.
|
||||
Defaults to "${SOURCE_DIR}"
|
||||
|
||||
-t | --tag BOOL Whether to add a release commit and tag the build.
|
||||
This also controls whether we put the tree into
|
||||
release mode
|
||||
Defaults to 1.
|
||||
|
||||
-b | --build BOOL Whether to perform the build of the ui's, assetfs and
|
||||
binaries. Defaults to 1.
|
||||
|
||||
-S | --sign BOOL Whether to sign the generated SHA256SUMS file.
|
||||
Defaults to 1.
|
||||
|
||||
-g | --gpg-key KEY Alternative GPG key to use for signing operations.
|
||||
Defaults to ${HASHICORP_GPG_KEY}
|
||||
|
||||
-v | --version VERSION The version of Consul to be built. If not specified
|
||||
the version will be parsed from the source.
|
||||
|
||||
-d | --date DATE The release date. Defaults to today.
|
||||
|
||||
-r | --release STRING The prerelease version. Defaults to an empty pre-release.
|
||||
|
||||
-h | --help Print this help text.
|
||||
EOF
|
||||
}
|
||||
|
||||
function err_usage {
|
||||
err "$1"
|
||||
err ""
|
||||
err "$(usage)"
|
||||
}
|
||||
|
||||
function ensure_arg {
|
||||
if test -z "$2"
|
||||
then
|
||||
err_usage "ERROR: option $1 requires an argument"
|
||||
return 1
|
||||
fi
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
function main {
|
||||
declare sdir="${SOURCE_DIR}"
|
||||
declare -i do_tag=1
|
||||
declare -i do_build=1
|
||||
declare -i do_sign=1
|
||||
declare gpg_key="${HASHICORP_GPG_KEY}"
|
||||
declare version=""
|
||||
declare release_ver=""
|
||||
declare release_date=$(date +"%B %d, %Y")
|
||||
|
||||
while test $# -gt 0
|
||||
do
|
||||
case "$1" in
|
||||
-h | --help )
|
||||
usage
|
||||
return 0
|
||||
;;
|
||||
-s | --source )
|
||||
ensure_arg "-s/--source" "$2" || return 1
|
||||
|
||||
if ! test -d "$2"
|
||||
then
|
||||
err_usage "ERROR: '$2' is not a directory and not suitable for the value of -s/--source"
|
||||
return 1
|
||||
fi
|
||||
|
||||
sdir="$2"
|
||||
shift 2
|
||||
;;
|
||||
-t | --tag )
|
||||
ensure_arg "-t/--tag" "$2" || return 1
|
||||
do_tag="$2"
|
||||
shift 2
|
||||
;;
|
||||
-b | --build )
|
||||
ensure_arg "-b/--build" "$2" || return 1
|
||||
do_build="$2"
|
||||
shift 2
|
||||
;;
|
||||
-S | --sign )
|
||||
ensure_arg "-s/--sign" "$2" || return 1
|
||||
do_sign="$2"
|
||||
shift 2
|
||||
;;
|
||||
-g | --gpg-key )
|
||||
ensure_arg "-g/--gpg-key" "$2" || return 1
|
||||
gpg_key="$2"
|
||||
shift 2
|
||||
;;
|
||||
-v | --version )
|
||||
ensure_arg "-v/--version" "$2" || return 1
|
||||
version="$2"
|
||||
shift 2
|
||||
;;
|
||||
-d | --date)
|
||||
ensure_arg "-d/--date" "$2" || return 1
|
||||
release_date="$2"
|
||||
shift 2
|
||||
;;
|
||||
-r | --release)
|
||||
ensure_arg "-r/--release" "$2" || return 1
|
||||
release_ver="$2"
|
||||
shift 2
|
||||
;;
|
||||
*)
|
||||
err_usage "ERROR: Unknown argument: '$1'"
|
||||
return 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
build_release "${sdir}" "${do_tag}" "${do_build}" "${do_sign}" "${version}" "${release_date}" "${release_ver}" "${gpg_key}"
|
||||
return $?
|
||||
}
|
||||
|
||||
main "$@"
|
||||
exit $?
|
||||
|
92
build-support/scripts/version.sh
Executable file
92
build-support/scripts/version.sh
Executable file
@ -0,0 +1,92 @@
|
||||
#!/bin/bash
|
||||
SCRIPT_NAME="$(basename ${BASH_SOURCE[0]})"
|
||||
pushd $(dirname ${BASH_SOURCE[0]}) > /dev/null
|
||||
SCRIPT_DIR=$(pwd)
|
||||
pushd ../.. > /dev/null
|
||||
SOURCE_DIR=$(pwd)
|
||||
popd > /dev/null
|
||||
pushd ../functions > /dev/null
|
||||
FN_DIR=$(pwd)
|
||||
popd > /dev/null
|
||||
popd > /dev/null
|
||||
|
||||
source "${SCRIPT_DIR}/functions.sh"
|
||||
|
||||
function usage {
|
||||
cat <<-EOF
|
||||
Usage: ${SCRIPT_NAME} [<options ...>]
|
||||
|
||||
Description:
|
||||
|
||||
This script is just a convenience around discover what the Consul
|
||||
version would be if you were to build it.
|
||||
|
||||
Options:
|
||||
-s | --source DIR Path to source to build.
|
||||
Defaults to "${SOURCE_DIR}"
|
||||
|
||||
-r | --release Include the release in the version
|
||||
|
||||
-g | --git Take git variables into account
|
||||
|
||||
-h | --help Print this help text.
|
||||
EOF
|
||||
}
|
||||
|
||||
function err_usage {
|
||||
err "$1"
|
||||
err ""
|
||||
err "$(usage)"
|
||||
}
|
||||
|
||||
function main {
|
||||
declare sdir="${SOURCE_DIR}"
|
||||
declare -i release=0
|
||||
declare -i git_info=0
|
||||
|
||||
while test $# -gt 0
|
||||
do
|
||||
case "$1" in
|
||||
-h | --help )
|
||||
usage
|
||||
return 0
|
||||
;;
|
||||
-s | --source )
|
||||
if test -z "$2"
|
||||
then
|
||||
err_usage "ERROR: option -s/--source requires an argument"
|
||||
return 1
|
||||
fi
|
||||
|
||||
if ! test -d "$2"
|
||||
then
|
||||
err_usage "ERROR: '$2' is not a directory and not suitable for the value of -s/--source"
|
||||
return 1
|
||||
fi
|
||||
|
||||
sdir="$2"
|
||||
shift 2
|
||||
;;
|
||||
-r | --release )
|
||||
release=1
|
||||
shift
|
||||
;;
|
||||
-g | --git )
|
||||
git_info=1
|
||||
shift
|
||||
;;
|
||||
*)
|
||||
err_usage "ERROR: Unknown argument: '$1'"
|
||||
return 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
parse_version "${sdir}" "${release}" "${git_info}" || return 1
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
main "$@"
|
||||
exit $?
|
||||
|
@ -27,6 +27,7 @@ services:
|
||||
- "8400:8400"
|
||||
- "8500:8500"
|
||||
- "8600:8600"
|
||||
- "8600:8600/udp"
|
||||
command: "agent -server -bootstrap-expect 3 -ui -client 0.0.0.0"
|
||||
|
||||
networks:
|
||||
|
@ -1,77 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
#
|
||||
# This script builds the application from source for multiple platforms.
|
||||
set -e
|
||||
|
||||
export CGO_ENABLED=0
|
||||
|
||||
# Get the parent directory of where this script is.
|
||||
SOURCE="${BASH_SOURCE[0]}"
|
||||
while [ -h "$SOURCE" ] ; do SOURCE="$(readlink "$SOURCE")"; done
|
||||
DIR="$( cd -P "$( dirname "$SOURCE" )/.." && pwd )"
|
||||
|
||||
# Change into that directory
|
||||
cd "$DIR"
|
||||
|
||||
# Determine the arch/os combos we're building for
|
||||
XC_ARCH=${XC_ARCH:-"386 amd64 arm arm64"}
|
||||
XC_OS=${XC_OS:-"solaris darwin freebsd linux windows"}
|
||||
|
||||
# Delete the old dir
|
||||
echo "==> Removing old directory..."
|
||||
rm -f bin/*
|
||||
rm -rf pkg/*
|
||||
mkdir -p bin/
|
||||
|
||||
# If it's dev mode, only build for ourself
|
||||
if [ "${CONSUL_DEV}x" != "x" ]; then
|
||||
XC_OS=$(go env GOOS)
|
||||
XC_ARCH=$(go env GOARCH)
|
||||
fi
|
||||
|
||||
# Build!
|
||||
echo "==> Building..."
|
||||
"`which gox`" \
|
||||
-os="${XC_OS}" \
|
||||
-arch="${XC_ARCH}" \
|
||||
-osarch="!darwin/arm !darwin/arm64" \
|
||||
-ldflags "${GOLDFLAGS}" \
|
||||
-output "pkg/{{.OS}}_{{.Arch}}/consul" \
|
||||
-tags="${GOTAGS}" \
|
||||
.
|
||||
|
||||
# Move all the compiled things to the $GOPATH/bin
|
||||
GOPATH=${GOPATH:-$(go env GOPATH)}
|
||||
case $(uname) in
|
||||
CYGWIN*)
|
||||
GOPATH="$(cygpath $GOPATH)"
|
||||
;;
|
||||
esac
|
||||
OLDIFS=$IFS
|
||||
IFS=: MAIN_GOPATH=($GOPATH)
|
||||
IFS=$OLDIFS
|
||||
|
||||
# Copy our OS/Arch to the bin/ directory
|
||||
DEV_PLATFORM="./pkg/$(go env GOOS)_$(go env GOARCH)"
|
||||
for F in $(find ${DEV_PLATFORM} -mindepth 1 -maxdepth 1 -type f); do
|
||||
cp ${F} bin/
|
||||
cp ${F} ${MAIN_GOPATH}/bin/
|
||||
done
|
||||
|
||||
if [ "${CONSUL_DEV}x" = "x" ]; then
|
||||
# Zip and copy to the dist dir
|
||||
echo "==> Packaging..."
|
||||
for PLATFORM in $(find ./pkg -mindepth 1 -maxdepth 1 -type d); do
|
||||
OSARCH=$(basename ${PLATFORM})
|
||||
echo "--> ${OSARCH}"
|
||||
|
||||
pushd $PLATFORM >/dev/null 2>&1
|
||||
zip ../${OSARCH}.zip ./*
|
||||
popd >/dev/null 2>&1
|
||||
done
|
||||
fi
|
||||
|
||||
# Done!
|
||||
echo
|
||||
echo "==> Results:"
|
||||
ls -hl bin/
|
@ -1,35 +0,0 @@
|
||||
FROM ubuntu:bionic
|
||||
|
||||
ENV GOVERSION 1.10.1
|
||||
|
||||
RUN apt-get update -y && \
|
||||
apt-get install --no-install-recommends -y -q \
|
||||
build-essential \
|
||||
ca-certificates \
|
||||
curl \
|
||||
git \
|
||||
ruby \
|
||||
ruby-dev \
|
||||
zip \
|
||||
zlib1g-dev \
|
||||
nodejs \
|
||||
npm \
|
||||
rsync && \
|
||||
gem install bundler && \
|
||||
npm install --global yarn && \
|
||||
npm install --global ember-cli
|
||||
|
||||
RUN mkdir /goroot && \
|
||||
mkdir /gopath && \
|
||||
curl https://storage.googleapis.com/golang/go${GOVERSION}.linux-amd64.tar.gz | \
|
||||
tar xzf - -C /goroot --strip-components=1
|
||||
|
||||
# We want to ensure that release builds never have any cgo dependencies so we
|
||||
# switch that off at the highest level.
|
||||
ENV CGO_ENABLED 0
|
||||
ENV GOPATH /gopath
|
||||
ENV GOROOT /goroot
|
||||
ENV PATH $GOROOT/bin:$GOPATH/bin:$PATH
|
||||
|
||||
RUN mkdir -p $GOPATH/src/github.com/hashicorp/consul
|
||||
WORKDIR $GOPATH/src/github.com/hashicorp/consul
|
@ -1,63 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
|
||||
# Get the version from the environment, or try to figure it out from the build tags.
|
||||
# We process the files in the same order Go does to find the last matching tag.
|
||||
if [ -z $VERSION ]; then
|
||||
# get the OSS version from version.go
|
||||
VERSION=$(awk -F\" '/Version =/ { print $2; exit }' <version/version.go)
|
||||
|
||||
# if we have build tags then try to determine the version
|
||||
for tag in "$GOTAGS"; do
|
||||
for file in $(ls version/version_*.go | sort); do
|
||||
if grep -q "// +build $tag" $file; then
|
||||
VERSION=$(awk -F\" '/Version =/ { print $2; exit }' <$file)
|
||||
fi
|
||||
done
|
||||
done
|
||||
fi
|
||||
if [ -z $VERSION ]; then
|
||||
echo "Please specify a version (couldn't find one based on build tags)."
|
||||
exit 1
|
||||
fi
|
||||
echo "==> Building version $VERSION..."
|
||||
|
||||
# Get the parent directory of where this script is.
|
||||
SOURCE="${BASH_SOURCE[0]}"
|
||||
while [ -h "$SOURCE" ] ; do SOURCE="$(readlink "$SOURCE")"; done
|
||||
DIR="$( cd -P "$( dirname "$SOURCE" )/.." && pwd )"
|
||||
|
||||
# Change into that dir because we expect that.
|
||||
cd $DIR
|
||||
|
||||
# Generate the tag.
|
||||
if [ -z $NOTAG ]; then
|
||||
echo "==> Tagging..."
|
||||
git commit --allow-empty -a --gpg-sign=348FFC4C -m "Release v$VERSION"
|
||||
git tag -a -m "Version $VERSION" -s -u 348FFC4C "v${VERSION}" master
|
||||
fi
|
||||
|
||||
# Do a hermetic build inside a Docker container.
|
||||
if [ -z $NOBUILD ]; then
|
||||
docker build -t hashicorp/consul-builder scripts/consul-builder/
|
||||
docker run --rm -e "GOTAGS=$GOTAGS" -v "$(pwd)":/gopath/src/github.com/hashicorp/consul hashicorp/consul-builder ./scripts/dist_build.sh
|
||||
fi
|
||||
|
||||
# Zip all the files.
|
||||
rm -rf ./pkg/dist
|
||||
mkdir -p ./pkg/dist
|
||||
for FILENAME in $(find ./pkg -mindepth 1 -maxdepth 1 -type f); do
|
||||
FILENAME=$(basename $FILENAME)
|
||||
cp ./pkg/${FILENAME} ./pkg/dist/consul_${VERSION}_${FILENAME}
|
||||
done
|
||||
|
||||
# Make the checksums.
|
||||
pushd ./pkg/dist
|
||||
shasum -a256 * > ./consul_${VERSION}_SHA256SUMS
|
||||
if [ -z $NOSIGN ]; then
|
||||
echo "==> Signing..."
|
||||
gpg --default-key 348FFC4C --detach-sig ./consul_${VERSION}_SHA256SUMS
|
||||
fi
|
||||
popd
|
||||
|
||||
exit 0
|
@ -1,46 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
|
||||
# Get the parent directory of where this script is.
|
||||
SOURCE="${BASH_SOURCE[0]}"
|
||||
while [ -h "$SOURCE" ] ; do SOURCE="$(readlink "$SOURCE")"; done
|
||||
DIR="$( cd -P "$( dirname "$SOURCE" )/.." && pwd )"
|
||||
|
||||
# Change into that dir because we expect that.
|
||||
cd $DIR
|
||||
|
||||
# Make sure build tools are available.
|
||||
make tools
|
||||
|
||||
# # Build the standalone version of the web assets for the sanity check.
|
||||
# pushd ui
|
||||
# bundle
|
||||
# make dist
|
||||
# popd
|
||||
|
||||
# pushd ui-v2
|
||||
# yarn install
|
||||
# make dist
|
||||
# popd
|
||||
|
||||
# # Fixup the timestamps to match what's checked in. This will allow us to cleanly
|
||||
# # verify that the checked-in content is up to date without spurious diffs of the
|
||||
# # file mod times.
|
||||
# pushd pkg
|
||||
# cat ../agent/bindata_assetfs.go | ../scripts/fixup_times.sh
|
||||
# popd
|
||||
|
||||
# # Regenerate the built-in web assets. If there are any diffs after doing this
|
||||
# # then we know something is up.
|
||||
# make static-assets
|
||||
# if ! git diff --quiet agent/bindata_assetfs.go; then
|
||||
# echo "Checked-in web assets are out of date, build aborted"
|
||||
# exit 1
|
||||
# fi
|
||||
|
||||
# Now we are ready to do a clean build of everything. We no longer distribute the
|
||||
# web UI so it's ok that gets blown away as part of this.
|
||||
rm -rf pkg
|
||||
make all
|
||||
|
||||
exit 0
|
@ -1,10 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
regex='bindataFileInfo.*name: \"(.+)\".*time.Unix.(.+),'
|
||||
while read line; do
|
||||
if [[ $line =~ $regex ]]; then
|
||||
file=${BASH_REMATCH[1]}
|
||||
ts=${BASH_REMATCH[2]}
|
||||
touch --date @$ts $file
|
||||
fi
|
||||
done
|
@ -1,18 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
|
||||
# Get the parent directory of where this script is.
|
||||
SOURCE="${BASH_SOURCE[0]}"
|
||||
while [ -h "$SOURCE" ] ; do SOURCE="$(readlink "$SOURCE")"; done
|
||||
DIR="$( cd -P "$( dirname "$SOURCE" )/.." && pwd )"
|
||||
|
||||
# Change into that dir because we expect that.
|
||||
cd $DIR
|
||||
|
||||
# Do a hermetic build inside a Docker container.
|
||||
if [ -z $NOBUILD ]; then
|
||||
docker build -t hashicorp/consul-builder scripts/consul-builder/
|
||||
docker run --rm -v "$(pwd)":/gopath/src/github.com/hashicorp/consul hashicorp/consul-builder ./scripts/ui_build.sh
|
||||
fi
|
||||
|
||||
exit 0
|
@ -1,31 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
|
||||
# Get the parent directory of where this script is.
|
||||
SOURCE="${BASH_SOURCE[0]}"
|
||||
while [ -h "$SOURCE" ] ; do SOURCE="$(readlink "$SOURCE")"; done
|
||||
DIR="$( cd -P "$( dirname "$SOURCE" )/.." && pwd )"
|
||||
|
||||
# Change into that dir because we expect that.
|
||||
cd $DIR
|
||||
|
||||
# Make sure build tools are available.
|
||||
make tools
|
||||
|
||||
# Build the web assets.
|
||||
echo "Building the V1 UI"
|
||||
pushd ui
|
||||
bundle
|
||||
make dist
|
||||
popd
|
||||
|
||||
echo "Building the V2 UI"
|
||||
pushd ui-v2
|
||||
yarn install
|
||||
make dist
|
||||
popd
|
||||
|
||||
# Make the static assets using the container version of the builder
|
||||
make static-assets
|
||||
|
||||
exit 0
|
@ -1,42 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
function install_go() {
|
||||
local go_version=1.9.1
|
||||
local download=
|
||||
|
||||
download="https://storage.googleapis.com/golang/go${go_version}.linux-amd64.tar.gz"
|
||||
|
||||
if [ -d /usr/local/go ] ; then
|
||||
return
|
||||
fi
|
||||
|
||||
wget -q -O /tmp/go.tar.gz ${download}
|
||||
|
||||
tar -C /tmp -xf /tmp/go.tar.gz
|
||||
sudo mv /tmp/go /usr/local
|
||||
sudo chown -R root:root /usr/local/go
|
||||
}
|
||||
|
||||
install_go
|
||||
|
||||
# Ensure that the GOPATH tree is owned by vagrant:vagrant
|
||||
mkdir -p /opt/gopath
|
||||
chown -R vagrant:vagrant /opt/gopath
|
||||
|
||||
# Ensure Go is on PATH
|
||||
if [ ! -e /usr/bin/go ] ; then
|
||||
ln -s /usr/local/go/bin/go /usr/bin/go
|
||||
fi
|
||||
if [ ! -e /usr/bin/gofmt ] ; then
|
||||
ln -s /usr/local/go/bin/gofmt /usr/bin/gofmt
|
||||
fi
|
||||
|
||||
|
||||
# Ensure new sessions know about GOPATH
|
||||
if [ ! -f /etc/profile.d/gopath.sh ] ; then
|
||||
cat <<EOT > /etc/profile.d/gopath.sh
|
||||
export GOPATH="/opt/gopath"
|
||||
export PATH="/opt/gopath/bin:\$PATH"
|
||||
EOT
|
||||
chmod 755 /etc/profile.d/gopath.sh
|
||||
fi
|
@ -1,30 +1,28 @@
|
||||
ROOT:=$(shell dirname $(realpath $(lastword $(MAKEFILE_LIST))))
|
||||
|
||||
server:
|
||||
yarn run start
|
||||
|
||||
dist:
|
||||
all: build
|
||||
|
||||
deps: node_modules
|
||||
|
||||
build: deps
|
||||
yarn run build
|
||||
mv dist ../pkg/web_ui/v2
|
||||
|
||||
lint:
|
||||
yarn run lint:js
|
||||
format:
|
||||
yarn run format:js
|
||||
|
||||
.PHONY: server dist lint format
|
||||
|
||||
.DEFAULT_GOAL=all
|
||||
.PHONY: deps test all build start
|
||||
all: deps
|
||||
deps: node_modules yarn.lock package.json
|
||||
node_modules:
|
||||
yarn
|
||||
build:
|
||||
yarn run build
|
||||
start:
|
||||
|
||||
start: deps
|
||||
yarn run start
|
||||
test:
|
||||
|
||||
test: deps
|
||||
yarn run test
|
||||
test-view:
|
||||
|
||||
test-view: deps
|
||||
yarn run test:view
|
||||
|
||||
lint: deps
|
||||
yarn run lint:js
|
||||
|
||||
format: deps
|
||||
yarn run format:js
|
||||
|
||||
node_modules: yarn.lock package.json
|
||||
yarn install
|
||||
|
||||
.PHONY: all deps build start test test-view lint format
|
||||
|
@ -13,7 +13,7 @@ export default Model.extend({
|
||||
ModifyIndex: attr('number'),
|
||||
LockDelay: attr('number'),
|
||||
Behavior: attr('string'),
|
||||
TTL: attr('number'),
|
||||
TTL: attr('string'),
|
||||
Checks: attr({
|
||||
defaultValue: function() {
|
||||
return [];
|
||||
|
7
ui-v2/app/routes/dc/index.js
Normal file
7
ui-v2/app/routes/dc/index.js
Normal file
@ -0,0 +1,7 @@
|
||||
import Route from '@ember/routing/route';
|
||||
|
||||
export default Route.extend({
|
||||
beforeModel: function() {
|
||||
this.transitionTo('dc.services');
|
||||
},
|
||||
});
|
@ -7,19 +7,27 @@ export default Service.extend({
|
||||
storage: window.localStorage,
|
||||
findHeaders: function() {
|
||||
// TODO: if possible this should be a promise
|
||||
const token = get(this, 'storage').getItem('token');
|
||||
// TODO: The old UI always sent ?token=
|
||||
// replicate the old functionality here
|
||||
// but remove this to be cleaner if its not necessary
|
||||
return {
|
||||
'X-Consul-Token': get(this, 'storage').getItem('token'),
|
||||
'X-Consul-Token': token === null ? '' : token,
|
||||
};
|
||||
},
|
||||
findAll: function(key) {
|
||||
return Promise.resolve({ token: get(this, 'storage').getItem('token') });
|
||||
const token = get(this, 'storage').getItem('token');
|
||||
return Promise.resolve({ token: token === null ? '' : token });
|
||||
},
|
||||
findBySlug: function(slug) {
|
||||
// TODO: Force localStorage to always be strings...
|
||||
// const value = get(this, 'storage').getItem(slug);
|
||||
return Promise.resolve(get(this, 'storage').getItem(slug));
|
||||
},
|
||||
persist: function(obj) {
|
||||
const storage = get(this, 'storage');
|
||||
Object.keys(obj).forEach((item, i) => {
|
||||
// TODO: ...everywhere
|
||||
storage.setItem(item, obj[item]);
|
||||
});
|
||||
return Promise.resolve(obj);
|
||||
|
@ -45,6 +45,7 @@
|
||||
@import 'components/notice';
|
||||
|
||||
@import 'routes/dc/service/index';
|
||||
@import 'routes/dc/nodes/index';
|
||||
@import 'routes/dc/kv/index';
|
||||
|
||||
main a {
|
||||
|
@ -1,4 +1,9 @@
|
||||
@import './icons';
|
||||
@import './tag';
|
||||
td strong {
|
||||
@extend %tag;
|
||||
background-color: $gray;
|
||||
}
|
||||
th {
|
||||
color: $text-light !important;
|
||||
}
|
||||
@ -53,12 +58,6 @@ table td a {
|
||||
tbody {
|
||||
overflow-x: visible !important;
|
||||
}
|
||||
td strong {
|
||||
display: inline-block;
|
||||
background-color: $gray;
|
||||
padding: 1px 5px;
|
||||
border-radius: $radius-small;
|
||||
}
|
||||
th,
|
||||
td:not(.actions),
|
||||
td:not(.actions) a {
|
||||
|
5
ui-v2/app/styles/components/tag.scss
Normal file
5
ui-v2/app/styles/components/tag.scss
Normal file
@ -0,0 +1,5 @@
|
||||
%tag {
|
||||
display: inline-block;
|
||||
padding: 1px 5px;
|
||||
border-radius: $radius-small;
|
||||
}
|
6
ui-v2/app/styles/routes/dc/nodes/index.scss
Normal file
6
ui-v2/app/styles/routes/dc/nodes/index.scss
Normal file
@ -0,0 +1,6 @@
|
||||
// TODO: Generalize this, also see services/index
|
||||
html.template-node.template-show td.tags span {
|
||||
@extend %tag;
|
||||
background-color: $gray;
|
||||
margin-bottom: 0.5em;
|
||||
}
|
@ -1,9 +1,15 @@
|
||||
@import '../../../components/tag';
|
||||
html.template-service.template-show main dl {
|
||||
display: flex;
|
||||
margin-bottom: 1.4em;
|
||||
}
|
||||
html.template-service.template-show main dt::after {
|
||||
content: ':';
|
||||
display: inline-block;
|
||||
margin-right: 0.2em;
|
||||
html.template-service.template-show main dt {
|
||||
display: none;
|
||||
}
|
||||
// TODO: Generalize this, also see nodes/index
|
||||
html.template-service.template-list td.tags span,
|
||||
html.template-service.template-show main dd span {
|
||||
@extend %tag;
|
||||
background-color: $gray;
|
||||
margin-bottom: 0.5em;
|
||||
}
|
||||
|
@ -20,9 +20,11 @@
|
||||
<td data-test-service-port="{{item.Port}}" class="port">
|
||||
{{item.Port}}
|
||||
</td>
|
||||
<td>
|
||||
<td data-test-service-tags class="tags">
|
||||
{{#if (gt item.Tags.length 0)}}
|
||||
{{join ', ' item.Tags}}
|
||||
{{#each item.Tags as |item|}}
|
||||
<span>{{item}}</span>
|
||||
{{/each}}
|
||||
{{/if}}
|
||||
</td>
|
||||
{{/block-slot}}
|
||||
|
@ -1,5 +1,6 @@
|
||||
{{#if (gt sessions.length 0)}}
|
||||
{{#tabular-collection
|
||||
data-test-sessions
|
||||
class="sessions"
|
||||
items=sessions as |item index|
|
||||
}}
|
||||
@ -22,7 +23,7 @@
|
||||
<td>
|
||||
{{item.LockDelay}}
|
||||
</td>
|
||||
<td>
|
||||
<td data-test-session-ttl="{{item.TTL}}">
|
||||
{{item.TTL}}
|
||||
</td>
|
||||
<td>
|
||||
|
@ -35,9 +35,11 @@
|
||||
<dd title="Critical" class={{if (lt item.ChecksCritical 1) 'zero'}} style={{criticalWidth}}>{{format_number item.ChecksCritical}}</dd>
|
||||
</dl>
|
||||
</td>
|
||||
<td style={{remainingWidth}}>
|
||||
<td class="tags" style={{remainingWidth}}>
|
||||
{{#if (gt item.Tags.length 0)}}
|
||||
{{join ', ' item.Tags}}
|
||||
{{#each item.Tags as |item|}}
|
||||
<span>{{item}}</span>
|
||||
{{/each}}
|
||||
{{/if}}
|
||||
</td>
|
||||
{{/block-slot}}
|
||||
|
@ -19,7 +19,9 @@
|
||||
<dl>
|
||||
<dt>Tags</dt>
|
||||
<dd data-test-tags>
|
||||
{{join ', ' item.Tags}}
|
||||
{{#each item.Tags as |item|}}
|
||||
<span>{{item}}</span>
|
||||
{{/each}}
|
||||
</dd>
|
||||
</dl>
|
||||
{{/if}}
|
||||
|
@ -13,7 +13,7 @@
|
||||
<fieldset>
|
||||
<label class="type-text">
|
||||
<span>ACL Token</span>
|
||||
{{ input type='password' value=item.token }}
|
||||
{{ input type='password' value=item.token name="token" }}
|
||||
<em>The token is sent with requests as the <code>X-Consul-Token</code> HTTP header parameter. This is used to control the ACL for the web UI.</em>
|
||||
</label>
|
||||
</fieldset>
|
||||
|
@ -29,12 +29,19 @@ module.exports = function(environment) {
|
||||
};
|
||||
ENV = Object.assign({}, ENV, {
|
||||
CONSUL_GIT_SHA: (function() {
|
||||
if (process.env.CONSUL_GIT_SHA) {
|
||||
return process.env.CONSUL_GIT_SHA
|
||||
}
|
||||
|
||||
return require('child_process')
|
||||
.execSync('git rev-parse --short HEAD')
|
||||
.toString()
|
||||
.trim();
|
||||
})(),
|
||||
CONSUL_VERSION: (function() {
|
||||
if (process.env.CONSUL_VERSION) {
|
||||
return process.env.CONSUL_VERSION
|
||||
}
|
||||
// see /scripts/dist.sh:8
|
||||
const version_go = `${path.dirname(path.dirname(__dirname))}/version/version.go`;
|
||||
const contents = fs.readFileSync(version_go).toString();
|
||||
@ -46,6 +53,13 @@ module.exports = function(environment) {
|
||||
.trim()
|
||||
.split('"')[1];
|
||||
})(),
|
||||
CONSUL_BINARY_TYPE: (function() {
|
||||
if (process.env.CONSUL_BINARY_TYPE) {
|
||||
return process.env.CONSUL_BINARY_TYPE
|
||||
}
|
||||
|
||||
return "oss"
|
||||
}),
|
||||
CONSUL_DOCUMENTATION_URL: 'https://www.consul.io/docs',
|
||||
CONSUL_COPYRIGHT_URL: 'https://www.hashicorp.com',
|
||||
CONSUL_COPYRIGHT_YEAR: '2018',
|
||||
|
12
ui-v2/tests/acceptance/dc/forwarding.feature
Normal file
12
ui-v2/tests/acceptance/dc/forwarding.feature
Normal file
@ -0,0 +1,12 @@
|
||||
@setupApplicationTest
|
||||
Feature: dc forwarding
|
||||
In order to arrive at a useful page when only specifying a dc in the url
|
||||
As a user
|
||||
I should be redirected to the services page for the dc
|
||||
Scenario: Arriving at the datacenter index page with no other url info
|
||||
Given 1 datacenter model with the value "datacenter"
|
||||
When I visit the dcs page for yaml
|
||||
---
|
||||
dc: datacenter
|
||||
---
|
||||
Then the url should be /datacenter/services
|
28
ui-v2/tests/acceptance/dc/nodes/sessions/list.feature
Normal file
28
ui-v2/tests/acceptance/dc/nodes/sessions/list.feature
Normal file
@ -0,0 +1,28 @@
|
||||
@setupApplicationTest
|
||||
Feature: dc / nodes / sessions /list: List Lock Sessions
|
||||
In order to get information regarding lock sessions
|
||||
As a user
|
||||
I should be able to see a listing of lock sessions with necessary information under the lock sessions tab for a node
|
||||
Scenario: Given 2 session with string TTLs
|
||||
Given 1 datacenter model with the value "dc1"
|
||||
And 1 node model from yaml
|
||||
---
|
||||
- ID: node-0
|
||||
---
|
||||
And 2 session models from yaml
|
||||
---
|
||||
- TTL: 30s
|
||||
- TTL: 60m
|
||||
---
|
||||
When I visit the node page for yaml
|
||||
---
|
||||
dc: dc1
|
||||
node: node-0
|
||||
---
|
||||
And I click lockSessions on the tabs
|
||||
Then I see lockSessionsIsSelected on the tabs
|
||||
Then I see TTL on the sessions like yaml
|
||||
---
|
||||
- 30s
|
||||
- 60m
|
||||
---
|
@ -17,7 +17,9 @@ Feature: dc / services / show: Show Service
|
||||
dc: dc1
|
||||
service: service-0
|
||||
---
|
||||
Then I see the text "Tag1, Tag2, Tag3" in "[data-test-tags]"
|
||||
Then I see the text "Tag1" in "[data-test-tags] span:nth-child(1)"
|
||||
Then I see the text "Tag2" in "[data-test-tags] span:nth-child(2)"
|
||||
Then I see the text "Tag3" in "[data-test-tags] span:nth-child(3)"
|
||||
Scenario: Given various services the various ports on their nodes are displayed
|
||||
Given 1 datacenter model with the value "dc1"
|
||||
And 3 node models
|
||||
@ -52,4 +54,3 @@ Feature: dc / services / show: Show Service
|
||||
- "2.2.2.2:8000"
|
||||
- "3.3.3.3:8888"
|
||||
---
|
||||
|
||||
|
15
ui-v2/tests/acceptance/settings/update.feature
Normal file
15
ui-v2/tests/acceptance/settings/update.feature
Normal file
@ -0,0 +1,15 @@
|
||||
@setupApplicationTest
|
||||
Feature: settings / update: Update Settings
|
||||
In order to authenticate with an ACL token
|
||||
As a user
|
||||
I need to be able to add my token via the UI
|
||||
Scenario: I click Save without actually typing anything
|
||||
Given 1 datacenter model with the value "datacenter"
|
||||
When I visit the settings page
|
||||
Then the url should be /settings
|
||||
And I submit
|
||||
Then I have settings like yaml
|
||||
---
|
||||
token: ''
|
||||
---
|
||||
|
10
ui-v2/tests/acceptance/steps/dc/forwarding-steps.js
Normal file
10
ui-v2/tests/acceptance/steps/dc/forwarding-steps.js
Normal file
@ -0,0 +1,10 @@
|
||||
import steps from '../steps';
|
||||
|
||||
// step definitions that are shared between features should be moved to the
|
||||
// tests/acceptance/steps/steps.js file
|
||||
|
||||
export default function(assert) {
|
||||
return steps(assert).then('I should find a file', function() {
|
||||
assert.ok(true, this.step);
|
||||
});
|
||||
}
|
10
ui-v2/tests/acceptance/steps/dc/nodes/sessions/list-steps.js
Normal file
10
ui-v2/tests/acceptance/steps/dc/nodes/sessions/list-steps.js
Normal file
@ -0,0 +1,10 @@
|
||||
import steps from '../../../steps';
|
||||
|
||||
// step definitions that are shared between features should be moved to the
|
||||
// tests/acceptance/steps/steps.js file
|
||||
|
||||
export default function(assert) {
|
||||
return steps(assert).then('I should find a file', function() {
|
||||
assert.ok(true, this.step);
|
||||
});
|
||||
}
|
10
ui-v2/tests/acceptance/steps/settings/update-steps.js
Normal file
10
ui-v2/tests/acceptance/steps/settings/update-steps.js
Normal file
@ -0,0 +1,10 @@
|
||||
import steps from '../steps';
|
||||
|
||||
// step definitions that are shared between features should be moved to the
|
||||
// tests/acceptance/steps/steps.js file
|
||||
|
||||
export default function(assert) {
|
||||
return steps(assert).then('I should find a file', function() {
|
||||
assert.ok(true, this.step);
|
||||
});
|
||||
}
|
10
ui-v2/tests/acceptance/steps/token-header-steps.js
Normal file
10
ui-v2/tests/acceptance/steps/token-header-steps.js
Normal file
@ -0,0 +1,10 @@
|
||||
import steps from './steps';
|
||||
|
||||
// step definitions that are shared between features should be moved to the
|
||||
// tests/acceptance/steps/steps.js file
|
||||
|
||||
export default function(assert) {
|
||||
return steps(assert).then('I should find a file', function() {
|
||||
assert.ok(true, this.step);
|
||||
});
|
||||
}
|
36
ui-v2/tests/acceptance/token-header.feature
Normal file
36
ui-v2/tests/acceptance/token-header.feature
Normal file
@ -0,0 +1,36 @@
|
||||
@setupApplicationTest
|
||||
Feature: token headers
|
||||
In order to authenticate with tokens
|
||||
As a user
|
||||
I need to be able to specify a ACL token AND/OR leave it blank to authenticate with the API
|
||||
Scenario: Arriving at the index page having not set a token previously
|
||||
Given 1 datacenter model with the value "datacenter"
|
||||
When I visit the index page
|
||||
Then the url should be /datacenter/services
|
||||
And a GET request is made to "/v1/catalog/datacenters" from yaml
|
||||
---
|
||||
headers:
|
||||
X-Consul-Token: ''
|
||||
---
|
||||
Scenario: Set the token to [Token] and then navigate to the index page
|
||||
Given 1 datacenter model with the value "datacenter"
|
||||
When I visit the settings page
|
||||
Then the url should be /settings
|
||||
Then I type with yaml
|
||||
---
|
||||
token: [Token]
|
||||
---
|
||||
And I submit
|
||||
When I visit the index page
|
||||
Then the url should be /datacenter/services
|
||||
And a GET request is made to "/v1/catalog/datacenters" from yaml
|
||||
---
|
||||
headers:
|
||||
X-Consul-Token: [Token]
|
||||
---
|
||||
Where:
|
||||
---------
|
||||
| Token |
|
||||
| token |
|
||||
| '' |
|
||||
---------
|
@ -17,6 +17,9 @@ export default function(type, count, obj) {
|
||||
key = 'CONSUL_ACL_COUNT';
|
||||
obj['CONSUL_ENABLE_ACLS'] = 1;
|
||||
break;
|
||||
case 'session':
|
||||
key = 'CONSUL_SESSION_COUNT';
|
||||
break;
|
||||
}
|
||||
if (key) {
|
||||
obj[key] = count;
|
||||
|
@ -16,6 +16,9 @@ export default function(type) {
|
||||
case 'acl':
|
||||
url = ['/v1/acl/list'];
|
||||
break;
|
||||
case 'session':
|
||||
url = ['/v1/session/node/'];
|
||||
break;
|
||||
}
|
||||
return function(actual) {
|
||||
if (url === null) {
|
||||
|
@ -64,6 +64,7 @@ function setupScenario(featureAnnotations, scenarioAnnotations) {
|
||||
}
|
||||
return function(model) {
|
||||
model.afterEach(function() {
|
||||
window.localStorage.clear();
|
||||
api.server.reset();
|
||||
});
|
||||
};
|
||||
|
@ -1,5 +1,6 @@
|
||||
import index from 'consul-ui/tests/pages/index';
|
||||
import dcs from 'consul-ui/tests/pages/dc';
|
||||
import settings from 'consul-ui/tests/pages/settings';
|
||||
import services from 'consul-ui/tests/pages/dc/services/index';
|
||||
import service from 'consul-ui/tests/pages/dc/services/show';
|
||||
import nodes from 'consul-ui/tests/pages/dc/nodes/index';
|
||||
@ -12,6 +13,7 @@ import acl from 'consul-ui/tests/pages/dc/acls/edit';
|
||||
export default {
|
||||
index,
|
||||
dcs,
|
||||
settings,
|
||||
services,
|
||||
service,
|
||||
nodes,
|
||||
|
@ -1,7 +1,7 @@
|
||||
import { create, visitable, attribute, collection, clickable } from 'ember-cli-page-object';
|
||||
|
||||
export default create({
|
||||
visit: visitable('/:dc/services/'),
|
||||
visit: visitable('/:dc/'),
|
||||
dcs: collection('[data-test-datacenter-picker]'),
|
||||
showDatacenters: clickable('[data-test-datacenter-selected]'),
|
||||
selectedDc: attribute('data-test-datacenter-selected', '[data-test-datacenter-selected]'),
|
||||
|
@ -10,4 +10,7 @@ export default create({
|
||||
services: collection('#services [data-test-tabular-row]', {
|
||||
port: attribute('data-test-service-port', '.port'),
|
||||
}),
|
||||
sessions: collection('#lock-sessions [data-test-tabular-row]', {
|
||||
TTL: attribute('data-test-session-ttl', '[data-test-session-ttl]'),
|
||||
}),
|
||||
});
|
||||
|
6
ui-v2/tests/pages/settings.js
Normal file
6
ui-v2/tests/pages/settings.js
Normal file
@ -0,0 +1,6 @@
|
||||
import { create, visitable, clickable } from 'ember-cli-page-object';
|
||||
|
||||
export default create({
|
||||
visit: visitable('/settings'),
|
||||
submit: clickable('[type=submit]'),
|
||||
});
|
@ -33,6 +33,9 @@ export default function(assert) {
|
||||
case 'acls':
|
||||
model = 'acl';
|
||||
break;
|
||||
case 'sessions':
|
||||
model = 'session';
|
||||
break;
|
||||
}
|
||||
cb(null, model);
|
||||
}, yadda)
|
||||
@ -150,6 +153,37 @@ export default function(assert) {
|
||||
);
|
||||
});
|
||||
})
|
||||
// TODO: This one can replace the above one, it covers more use cases
|
||||
// also DRY it out a bit
|
||||
.then('a $method request is made to "$url" from yaml\n$yaml', function(method, url, yaml) {
|
||||
const request = api.server.history[api.server.history.length - 2];
|
||||
assert.equal(
|
||||
request.method,
|
||||
method,
|
||||
`Expected the request method to be ${method}, was ${request.method}`
|
||||
);
|
||||
assert.equal(request.url, url, `Expected the request url to be ${url}, was ${request.url}`);
|
||||
let data = yaml.body || {};
|
||||
const body = JSON.parse(request.requestBody);
|
||||
Object.keys(data).forEach(function(key, i, arr) {
|
||||
assert.equal(
|
||||
body[key],
|
||||
data[key],
|
||||
`Expected the payload to contain ${key} to equal ${body[key]}, ${key} was ${data[key]}`
|
||||
);
|
||||
});
|
||||
data = yaml.headers || {};
|
||||
const headers = request.requestHeaders;
|
||||
Object.keys(data).forEach(function(key, i, arr) {
|
||||
assert.equal(
|
||||
headers[key],
|
||||
data[key],
|
||||
`Expected the payload to contain ${key} to equal ${headers[key]}, ${key} was ${
|
||||
data[key]
|
||||
}`
|
||||
);
|
||||
});
|
||||
})
|
||||
.then('a $method request is made to "$url" with the body "$body"', function(
|
||||
method,
|
||||
url,
|
||||
@ -212,6 +246,15 @@ export default function(assert) {
|
||||
);
|
||||
})
|
||||
// TODO: Make this accept a 'contains' word so you can search for text containing also
|
||||
.then('I have settings like yaml\n$yaml', function(data) {
|
||||
// TODO: Inject this
|
||||
const settings = window.localStorage;
|
||||
Object.keys(data).forEach(function(prop) {
|
||||
const actual = settings.getItem(prop);
|
||||
const expected = data[prop];
|
||||
assert.strictEqual(actual, expected, `Expected settings to be ${expected} was ${actual}`);
|
||||
});
|
||||
})
|
||||
.then('I see $property on the $component like yaml\n$yaml', function(
|
||||
property,
|
||||
component,
|
||||
@ -219,6 +262,8 @@ export default function(assert) {
|
||||
) {
|
||||
const _component = currentPage[component];
|
||||
const iterator = new Array(_component.length).fill(true);
|
||||
// this will catch if we get aren't managing to select a component
|
||||
assert.ok(iterator.length > 0);
|
||||
iterator.forEach(function(item, i, arr) {
|
||||
const actual = _component.objectAt(i)[property];
|
||||
// anything coming from the DOM is going to be text/strings
|
||||
|
11
ui-v2/tests/unit/routes/dc/index-test.js
Normal file
11
ui-v2/tests/unit/routes/dc/index-test.js
Normal file
@ -0,0 +1,11 @@
|
||||
import { moduleFor, test } from 'ember-qunit';
|
||||
|
||||
moduleFor('route:dc/index', 'Unit | Route | dc/index', {
|
||||
// Specify the other units that are required for this test.
|
||||
// needs: ['controller:foo']
|
||||
});
|
||||
|
||||
test('it exists', function(assert) {
|
||||
let route = this.subject();
|
||||
assert.ok(route);
|
||||
});
|
@ -35,8 +35,12 @@ func GetHumanVersion() string {
|
||||
if GitDescribe == "" && release == "" {
|
||||
release = "dev"
|
||||
}
|
||||
|
||||
if release != "" {
|
||||
version += fmt.Sprintf("-%s", release)
|
||||
if !strings.HasSuffix(version, "-"+release) {
|
||||
// if we tagged a prerelease version then the release is in the version already
|
||||
version += fmt.Sprintf("-%s", release)
|
||||
}
|
||||
if GitCommit != "" {
|
||||
version += fmt.Sprintf(" (%s)", GitCommit)
|
||||
}
|
||||
|
@ -777,6 +777,12 @@ Consul will not enable TLS for the HTTP API unless the `https` port has been ass
|
||||
[RFC 6724](https://tools.ietf.org/html/rfc6724) and as a result it should
|
||||
be increasingly uncommon to need to change this value with modern
|
||||
resolvers).
|
||||
|
||||
* <a name="enable_additional_node_meta_txt"></a><a href="#enable_additional_node_meta_txt">`enable_additional_node_meta_txt`</a> -
|
||||
When set to true, Consul will add TXT records for Node metadata into the Additional section of the DNS responses for several
|
||||
query types such as SRV queries. When set to false those records are emitted. This does not impact the behavior of those
|
||||
same TXT records when they would be added to the Answer section of the response like when querying with type TXT or ANY. This
|
||||
defaults to true.
|
||||
|
||||
* <a name="domain"></a><a href="#domain">`domain`</a> Equivalent to the
|
||||
[`-domain` command-line flag](#_domain).
|
||||
@ -1296,3 +1302,4 @@ items which are reloaded include:
|
||||
* <a href="#node_meta">Node Metadata</a>
|
||||
* <a href="#telemetry-prefix_filter">Metric Prefix Filter</a>
|
||||
* <a href="#discard_check_output">Discard Check Output</a>
|
||||
* <a href="#limits">RPC rate limiting</a>
|
@ -48,13 +48,88 @@ Below is sample output of a telemetry dump:
|
||||
|
||||
# Key Metrics
|
||||
|
||||
These are some metrics emitted that can help you understand the health of your cluster at a glance. For a full list of metrics emitted by Consul, see [Metrics Reference](#metrics-reference)
|
||||
|
||||
### Transaction timing
|
||||
|
||||
| Metric Name | Description |
|
||||
| :----------------------- | :---------- |
|
||||
| `consul.kvs.apply` | This measures the time it takes to complete an update to the KV store. |
|
||||
| `consul.txn.apply` | This measures the time spent applying a transaction operation. |
|
||||
| `consul.raft.apply` | This counts the number of Raft transactions occurring over the interval. |
|
||||
| `consul.raft.commitTime` | This measures the time it takes to commit a new entry to the Raft log on the leader. |
|
||||
|
||||
**Why they're important:** Taken together, these metrics indicate how long it takes to complete write operations in various parts of the Consul cluster. Generally these should all be fairly consistent and no more than a few milliseconds. Sudden changes in any of the timing values could be due to unexpected load on the Consul servers, or due to problems on the servers themselves.
|
||||
|
||||
**What to look for:** Deviations (in any of these metrics) of more than 50% from baseline over the previous hour.
|
||||
|
||||
### Leadership changes
|
||||
|
||||
| Metric Name | Description |
|
||||
| :---------- | :---------- |
|
||||
| `consul.raft.leader.lastContact` | Measures the time since the leader was last able to contact the follower nodes when checking its leader lease. |
|
||||
| `consul.raft.state.candidate` | This increments whenever a Consul server starts an election. |
|
||||
| `consul.raft.state.leader` | This increments whenever a Consul server becomes a leader. |
|
||||
|
||||
**Why they're important:** Normally, your Consul cluster should have a stable leader. If there are frequent elections or leadership changes, it would likely indicate network issues between the Consul servers, or that the Consul servers themselves are unable to keep up with the load.
|
||||
|
||||
**What to look for:** For a healthy cluster, you're looking for a `lastContact` lower than 200ms, `leader` > 0 and `candidate` == 0. Deviations from this might indicate flapping leadership.
|
||||
|
||||
### Autopilot
|
||||
|
||||
| Metric Name | Description |
|
||||
| :---------- | :---------- |
|
||||
| `consul.autopilot.healthy` | This tracks the overall health of the local server cluster. If all servers are considered healthy by Autopilot, this will be set to 1. If any are unhealthy, this will be 0. |
|
||||
|
||||
**Why it's important:** Obviously, you want your cluster to be healthy.
|
||||
|
||||
**What to look for:** Alert if `healthy` is 0.
|
||||
|
||||
### Memory usage
|
||||
|
||||
| Metric Name | Description |
|
||||
| :---------- | :---------- |
|
||||
| `consul.runtime.alloc_bytes` | This measures the number of bytes allocated by the Consul process. |
|
||||
| `consul.runtime.sys_bytes` | This is the total number of bytes of memory obtained from the OS. |
|
||||
|
||||
**Why they're important:** Consul keeps all of its data in memory. If Consul consumes all available memory, it will crash.
|
||||
|
||||
**What to look for:** If `consul.runtime.sys_bytes` exceeds 90% of total avaliable system memory.
|
||||
|
||||
### Garbage collection
|
||||
|
||||
| Metric Name | Description |
|
||||
| :---------- | :---------- |
|
||||
| `consul.runtime.total_gc_pause_ns` | Number of nanoseconds consumed by stop-the-world garbage collection (GC) pauses since Consul started. |
|
||||
|
||||
**Why it's important:** GC pause is a "stop-the-world" event, meaning that all runtime threads are blocked until GC completes. Normally these pauses last only a few nanoseconds. But if memory usage is high, the Go runtime may GC so frequently that it starts to slow down Consul.
|
||||
|
||||
**What to look for:** Warning if `total_gc_pause_ns` exceeds 2 seconds/minute, critical if it exceeds 5 seconds/minute.
|
||||
|
||||
**NOTE:** `total_gc_pause_ns` is a cumulative counter, so in order to calculate rates (such as GC/minute),
|
||||
you will need to apply a function such as InfluxDB's [`non_negative_difference()`](https://docs.influxdata.com/influxdb/v1.5/query_language/functions/#non-negative-difference).
|
||||
|
||||
### Network activity - RPC Count
|
||||
|
||||
| Metric Name | Description |
|
||||
| :---------- | :---------- |
|
||||
| `consul.client.rpc` | Increments whenever a Consul agent in client mode makes an RPC request to a Consul server |
|
||||
| `consul.client.rpc.exceeded` | Increments whenever a Consul agent in client mode makes an RPC request to a Consul server gets rate limited by that agent's [`limits`](/docs/agent/options.html#limits) configuration. |
|
||||
| `consul.client.rpc.failed` | Increments whenever a Consul agent in client mode makes an RPC request to a Consul server and fails. |
|
||||
|
||||
**Why they're important:** These measurements indicate the current load created from a Consul agent, including when the load becomes high enough to be rate limited. A high RPC count, especially from `consul.client.rpcexceeded` meaning that the requests are being rate-limited, could imply a misconfigured Consul agent.
|
||||
|
||||
**What to look for:**
|
||||
Sudden large changes to the `consul.client.rpc` metrics (greater than 50% deviation from baseline).
|
||||
`consul.client.rpc.exceeded` or `consul.client.rpc.failed` count > 0, as it implies that an agent is being rate-limited or fails to make an RPC request to a Consul server
|
||||
|
||||
When telemetry is being streamed to an external metrics store, the interval is defined to
|
||||
be that store's flush interval. Otherwise, the interval can be assumed to be 10 seconds
|
||||
when retrieving metrics from the built-in store using the above described signals.
|
||||
|
||||
## Agent Health
|
||||
## Metrics Reference
|
||||
|
||||
These metrics are used to monitor the health of specific Consul agents.
|
||||
This is a full list of metrics emitted by Consul.
|
||||
|
||||
<table class="table table-bordered table-striped">
|
||||
<tr>
|
||||
|
@ -167,7 +167,7 @@ Failed nodes will be automatically removed after 72 hours. This can happen if a
|
||||
|
||||
This sequence can be accelerated with the [`force-leave`](https://www.consul.io/docs/commands/force-leave.html) command. Nodes running as servers will be removed from the Raft quorum. Force-leave may also be used to remove nodes that have accidentally joined the datacenter. Force-leave can only be applied to the nodes in its respective datacenter and cannot be executed on the nodes outside the datacenter.
|
||||
|
||||
Alternately, nodes can also be removed using `remove-peer` if `force-leave` is not effective in removing the nodes.
|
||||
Alternately, server nodes can also be removed using `remove-peer` if `force-leave` is not effective in removing the nodes.
|
||||
|
||||
$ consul operator raft remove-peer -address=x.x.x.x:8300
|
||||
|
||||
|
@ -27,13 +27,13 @@ By default this is `http://localhost:8500/ui`.
|
||||
You can view a live demo of the Consul Web UI
|
||||
[here](http://demo.consul.io).
|
||||
|
||||
## How to Use the New UI
|
||||
## How to Use the Legacy UI
|
||||
|
||||
On May 11, 2018, our redesign of the web UI went into beta. You can use it with
|
||||
Consul 1.1.0 by setting the environment variable `CONSUL_UI_BETA` to `true`.
|
||||
Without this environment variable, the web UI will default to the old version. To
|
||||
use the old UI version, either set `CONSUL_UI_BETA` to false, or don't include
|
||||
that environment variable at all.
|
||||
As of Consul version 1.2.0 the original Consul UI is deprecated. You can
|
||||
still enable it by setting the environment variable `CONSUL_UI_LEGACY` to `true`.
|
||||
Without this environment variable, the web UI will default to the latest version.
|
||||
To use the latest UI version, either set `CONSUL_UI_LEGACY` to false or don't
|
||||
include that environment variable at all.
|
||||
|
||||
## Next Steps
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user