Merge branch 'release/1.9.x' into release/1.9.0

This commit is contained in:
Mike Morris 2020-11-24 14:50:39 -05:00 committed by GitHub
commit 3ee6d1c14f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
84 changed files with 747 additions and 620 deletions

18
.changelog/9191.txt Normal file
View File

@ -0,0 +1,18 @@
```release-note:deprecation
cli: **(Enterprise only)** The `-non-voting-server` flag is deprecated in favor of the new `-read-replica` flag. The `-non-voting-server` flag is still present along side the new flag but it will be removed in a future release.
```
```release-note:improvement
cli: **(Enterprise only)** A new `-read-replica` flag can now be used to enable running a server as a read only replica. Previously this was enabled with the now deprecated `-non-voting-server` flag.
```
```release-note:deprecation
config: **(Enterprise only)** The `non_voting_server` configuration setting is deprecated in favor of the new `read_replica` setting. The `non_voting_server` configuration setting is still present but will be removed in a future release.
```
```release-note:improvement
config: **(Enterprise only)** A new `read_replica` configuration setting can now be used to enable running a server as a read only replica. Previously this was enabled with the now deprecated `non_voting_server` setting.
```
```release-note:deprecation
server: **(Enterprise only)** Addition of the `nonvoter` tag to the service registration made for read replicas is deprecated in favor of the new tag name of `read_replica`. Both are present in the registration but the `nonvoter` tag will be completely removed in a future release.
```
```release-note:deprecation
gossip: **(Enterprise only)** Read replicas now advertise themselves by setting the `read_replica` tag. The old `nonvoter` tag is still present but is deprecated and will be removed in a future release.
```

View File

@ -236,16 +236,15 @@ jobs:
name: go test -race
command: |
mkdir -p $TEST_RESULTS_DIR /tmp/jsonfile
pkgs="$(go list ./... | \
grep -E -v '^github.com/hashicorp/consul/agent(/consul|/local|/xds|/routine-leak-checker)?$' | \
grep -E -v '^github.com/hashicorp/consul/command/')"
gotestsum \
--format=short-verbose \
--jsonfile /tmp/jsonfile/go-test-race.log \
--junitfile $TEST_RESULTS_DIR/gotestsum-report.xml -- \
-tags="$GOTAGS" -p 2 \
-race -gcflags=all=-d=checkptr=0 \
./agent/{ae,cache,cache-types,checks,config,pool,proxycfg,router}/... \
./agent/consul/{authmethod,fsm,state,stream}/... \
./agent/{grpc,rpc,rpcclient,submatview}/... \
./snapshot
$pkgs
- store_test_results:
path: *TEST_RESULTS_DIR

View File

@ -195,8 +195,6 @@ func (m *mockAuthorizer) Snapshot(ctx *AuthorizerContext) EnforcementDecision {
}
func TestACL_Enforce(t *testing.T) {
t.Parallel()
type testCase struct {
method string
resource Resource

View File

@ -94,11 +94,7 @@ func (authz testAuthorizer) Snapshot(*AuthorizerContext) EnforcementDecision {
}
func TestChainedAuthorizer(t *testing.T) {
t.Parallel()
t.Run("No Authorizers", func(t *testing.T) {
t.Parallel()
authz := NewChainedAuthorizer([]Authorizer{})
checkDenyACLRead(t, authz, "foo", nil)
checkDenyACLWrite(t, authz, "foo", nil)
@ -129,8 +125,6 @@ func TestChainedAuthorizer(t *testing.T) {
})
t.Run("Authorizer Defaults", func(t *testing.T) {
t.Parallel()
authz := NewChainedAuthorizer([]Authorizer{testAuthorizer(Default)})
checkDenyACLRead(t, authz, "foo", nil)
checkDenyACLWrite(t, authz, "foo", nil)
@ -161,8 +155,6 @@ func TestChainedAuthorizer(t *testing.T) {
})
t.Run("Authorizer No Defaults", func(t *testing.T) {
t.Parallel()
authz := NewChainedAuthorizer([]Authorizer{testAuthorizer(Allow)})
checkAllowACLRead(t, authz, "foo", nil)
checkAllowACLWrite(t, authz, "foo", nil)
@ -193,8 +185,6 @@ func TestChainedAuthorizer(t *testing.T) {
})
t.Run("First Found", func(t *testing.T) {
t.Parallel()
authz := NewChainedAuthorizer([]Authorizer{testAuthorizer(Deny), testAuthorizer(Allow)})
checkDenyACLRead(t, authz, "foo", nil)
checkDenyACLWrite(t, authz, "foo", nil)

View File

@ -13,8 +13,6 @@ import (
// ensure compatibility from version to version those tests have been only minimally altered. The tests in this
// file are specific to the newer functionality.
func TestPolicyAuthorizer(t *testing.T) {
t.Parallel()
type aclCheck struct {
name string
prefix string
@ -446,8 +444,6 @@ func TestPolicyAuthorizer(t *testing.T) {
name := name
tcase := tcase
t.Run(name, func(t *testing.T) {
t.Parallel()
authz, err := NewPolicyAuthorizer([]*Policy{tcase.policy}, nil)
require.NoError(t, err)
@ -458,7 +454,6 @@ func TestPolicyAuthorizer(t *testing.T) {
}
t.Run(checkName, func(t *testing.T) {
check := check
t.Parallel()
check.check(t, authz, check.prefix, nil)
})
@ -468,8 +463,6 @@ func TestPolicyAuthorizer(t *testing.T) {
}
func TestAnyAllowed(t *testing.T) {
t.Parallel()
type radixInsertion struct {
segment string
value *policyAuthorizerRadixLeaf
@ -719,8 +712,6 @@ func TestAnyAllowed(t *testing.T) {
}
func TestAllAllowed(t *testing.T) {
t.Parallel()
type radixInsertion struct {
segment string
value *policyAuthorizerRadixLeaf

View File

@ -5,11 +5,7 @@ import (
)
func TestStaticAuthorizer(t *testing.T) {
t.Parallel()
t.Run("AllowAll", func(t *testing.T) {
t.Parallel()
authz := AllowAll()
checkDenyACLRead(t, authz, "foo", nil)
checkDenyACLWrite(t, authz, "foo", nil)
@ -40,7 +36,6 @@ func TestStaticAuthorizer(t *testing.T) {
})
t.Run("DenyAll", func(t *testing.T) {
t.Parallel()
authz := DenyAll()
checkDenyACLRead(t, authz, "foo", nil)
checkDenyACLWrite(t, authz, "foo", nil)
@ -71,7 +66,6 @@ func TestStaticAuthorizer(t *testing.T) {
})
t.Run("ManageAll", func(t *testing.T) {
t.Parallel()
authz := ManageAll()
checkAllowACLRead(t, authz, "foo", nil)
checkAllowACLWrite(t, authz, "foo", nil)

View File

@ -1099,8 +1099,8 @@ func newConsulConfig(runtimeCfg *config.RuntimeConfig, logger hclog.Logger) (*co
if runtimeCfg.SessionTTLMin != 0 {
cfg.SessionTTLMin = runtimeCfg.SessionTTLMin
}
if runtimeCfg.NonVotingServer {
cfg.NonVoter = runtimeCfg.NonVotingServer
if runtimeCfg.ReadReplica {
cfg.ReadReplica = runtimeCfg.ReadReplica
}
// These are fully specified in the agent defaults, so we can simply

View File

@ -634,6 +634,7 @@ type testAutoConfig struct {
ac *AutoConfig
tokenUpdates chan struct{}
originalToken string
stop func()
initialRoots *structs.IndexedCARoots
initialCert *structs.IssuedCert
@ -835,6 +836,7 @@ func startedAutoConfig(t *testing.T, autoEncrypt bool) testAutoConfig {
initialRoots: indexedRoots,
initialCert: cert,
extraCerts: extraCerts,
stop: cancel,
}
}
@ -1098,16 +1100,15 @@ func TestFallback(t *testing.T) {
// now wait for the fallback routine to be invoked
require.True(t, waitForChans(100*time.Millisecond, fallbackCtx.Done()), "fallback routines did not get invoked within the alotted time")
// persisting these to disk happens after the RPC we waited on above will have fired
// There is no deterministic way to know once its been written so we wrap this in a retry.
testretry.Run(t, func(r *testretry.R) {
resp, err := testAC.ac.readPersistedAutoConfig()
require.NoError(r, err)
testAC.stop()
<-testAC.ac.done
// ensure the roots got persisted to disk
require.Equal(r, thirdCert.CertPEM, resp.Certificate.GetCertPEM())
require.Equal(r, secondRoots.ActiveRootID, resp.CARoots.GetActiveRootID())
})
resp, err := testAC.ac.readPersistedAutoConfig()
require.NoError(t, err)
// ensure the roots got persisted to disk
require.Equal(t, thirdCert.CertPEM, resp.Certificate.GetCertPEM())
require.Equal(t, secondRoots.ActiveRootID, resp.CARoots.GetActiveRootID())
}
func TestIntroToken(t *testing.T) {

View File

@ -1034,7 +1034,7 @@ func (b *Builder) Build() (rt RuntimeConfig, err error) {
NodeID: types.NodeID(b.stringVal(c.NodeID)),
NodeMeta: c.NodeMeta,
NodeName: b.nodeName(c.NodeName),
NonVotingServer: b.boolVal(c.NonVotingServer),
ReadReplica: b.boolVal(c.ReadReplica),
PidFile: b.stringVal(c.PidFile),
PrimaryDatacenter: primaryDatacenter,
PrimaryGateways: b.expandAllOptionalAddrs("primary_gateways", c.PrimaryGateways),

View File

@ -13,6 +13,9 @@ var (
"non_voting_server": func(c *Config) {
// to maintain existing compatibility we don't nullify the value
},
"read_replica": func(c *Config) {
// to maintain existing compatibility we don't nullify the value
},
"segment": func(c *Config) {
// to maintain existing compatibility we don't nullify the value
},

View File

@ -24,11 +24,18 @@ func TestBuilder_validateEnterpriseConfigKeys(t *testing.T) {
cases := map[string]testCase{
"non_voting_server": {
config: Config{
NonVotingServer: &boolVal,
ReadReplica: &boolVal,
},
keys: []string{"non_voting_server"},
badKeys: []string{"non_voting_server"},
},
"read_replica": {
config: Config{
ReadReplica: &boolVal,
},
keys: []string{"read_replica"},
badKeys: []string{"read_replica"},
},
"segment": {
config: Config{
SegmentName: &stringVal,
@ -118,11 +125,11 @@ func TestBuilder_validateEnterpriseConfigKeys(t *testing.T) {
},
"multi": {
config: Config{
NonVotingServer: &boolVal,
SegmentName: &stringVal,
ReadReplica: &boolVal,
SegmentName: &stringVal,
},
keys: []string{"non_voting_server", "segment", "acl.tokens.agent_master"},
badKeys: []string{"non_voting_server", "segment"},
keys: []string{"non_voting_server", "read_replica", "segment", "acl.tokens.agent_master"},
badKeys: []string{"non_voting_server", "read_replica", "segment"},
},
}

View File

@ -289,7 +289,7 @@ type Config struct {
// Enterprise Only
Audit *Audit `json:"audit,omitempty" hcl:"audit" mapstructure:"audit"`
// Enterprise Only
NonVotingServer *bool `json:"non_voting_server,omitempty" hcl:"non_voting_server" mapstructure:"non_voting_server"`
ReadReplica *bool `json:"read_replica,omitempty" hcl:"read_replica" mapstructure:"read_replica" alias:"non_voting_server"`
// Enterprise Only
SegmentName *string `json:"segment,omitempty" hcl:"segment" mapstructure:"segment"`
// Enterprise Only

View File

@ -89,7 +89,8 @@ func AddFlags(fs *flag.FlagSet, f *BuilderOpts) {
add(&f.Config.NodeName, "node", "Name of this node. Must be unique in the cluster.")
add(&f.Config.NodeID, "node-id", "A unique ID for this node across space and time. Defaults to a randomly-generated ID that persists in the data-dir.")
add(&f.Config.NodeMeta, "node-meta", "An arbitrary metadata key/value pair for this node, of the format `key:value`. Can be specified multiple times.")
add(&f.Config.NonVotingServer, "non-voting-server", "(Enterprise-only) This flag is used to make the server not participate in the Raft quorum, and have it only receive the data replication stream. This can be used to add read scalability to a cluster in cases where a high volume of reads to servers are needed.")
add(&f.Config.ReadReplica, "non-voting-server", "(Enterprise-only) DEPRECATED: -read-replica should be used instead")
add(&f.Config.ReadReplica, "read-replica", "(Enterprise-only) This flag is used to make the server not participate in the Raft quorum, and have it only receive the data replication stream. This can be used to add read scalability to a cluster in cases where a high volume of reads to servers are needed.")
add(&f.Config.PidFile, "pid-file", "Path to file to store agent PID.")
add(&f.Config.RPCProtocol, "protocol", "Sets the protocol version. Defaults to latest.")
add(&f.Config.RaftProtocol, "raft-protocol", "Sets the Raft protocol version. Defaults to latest.")

View File

@ -845,12 +845,12 @@ type RuntimeConfig struct {
// flag: -node-meta "key:value" -node-meta "key:value" ...
NodeMeta map[string]string
// NonVotingServer is whether this server will act as a non-voting member
// ReadReplica is whether this server will act as a non-voting member
// of the cluster to help provide read scalability. (Enterprise-only)
//
// hcl: non_voting_server = (true|false)
// flag: -non-voting-server
NonVotingServer bool
ReadReplica bool
// PidFile is the file to store our PID in.
//

View File

@ -12,12 +12,16 @@ var entTokenConfigSanitize = `"EnterpriseConfig": {},`
func entFullRuntimeConfig(rt *RuntimeConfig) {}
var enterpriseNonVotingServerWarnings []string = []string{enterpriseConfigKeyError{key: "non_voting_server"}.Error()}
var enterpriseReadReplicaWarnings []string = []string{enterpriseConfigKeyError{key: "read_replica"}.Error()}
var enterpriseConfigKeyWarnings []string
func init() {
for k := range enterpriseConfigMap {
if k == "non_voting_server" {
// this is an alias for "read_replica" so we shouldn't see it in warnings
continue
}
enterpriseConfigKeyWarnings = append(enterpriseConfigKeyWarnings, enterpriseConfigKeyError{key: k}.Error())
}
}

View File

@ -610,10 +610,10 @@ func TestBuilder_BuildAndValidate_ConfigFlagsAndEdgecases(t *testing.T) {
`-data-dir=` + dataDir,
},
patch: func(rt *RuntimeConfig) {
rt.NonVotingServer = true
rt.ReadReplica = true
rt.DataDir = dataDir
},
warns: enterpriseNonVotingServerWarnings,
warns: enterpriseReadReplicaWarnings,
},
{
desc: "-pid-file",
@ -5315,6 +5315,7 @@ func TestFullConfig(t *testing.T) {
"raft_snapshot_threshold": 16384,
"raft_snapshot_interval": "30s",
"raft_trailing_logs": 83749,
"read_replica": true,
"reconnect_timeout": "23739s",
"reconnect_timeout_wan": "26694s",
"recursors": [ "63.38.39.58", "92.49.18.18" ],
@ -6004,6 +6005,7 @@ func TestFullConfig(t *testing.T) {
raft_snapshot_threshold = 16384
raft_snapshot_interval = "30s"
raft_trailing_logs = 83749
read_replica = true
reconnect_timeout = "23739s"
reconnect_timeout_wan = "26694s"
recursors = [ "63.38.39.58", "92.49.18.18" ]
@ -6749,7 +6751,7 @@ func TestFullConfig(t *testing.T) {
NodeID: types.NodeID("AsUIlw99"),
NodeMeta: map[string]string{"5mgGQMBk": "mJLtVMSG", "A7ynFMJB": "0Nx6RGab"},
NodeName: "otlLxGaI",
NonVotingServer: true,
ReadReplica: true,
PidFile: "43xN80Km",
PrimaryDatacenter: "ejtmd43d",
PrimaryGateways: []string{"aej8eeZo", "roh2KahS"},
@ -7684,13 +7686,13 @@ func TestSanitize(t *testing.T) {
"NodeID": "",
"NodeMeta": {},
"NodeName": "",
"NonVotingServer": false,
"PidFile": "",
"PrimaryDatacenter": "",
"PrimaryGateways": [
"pmgw_foo=bar pmgw_key=baz pmgw_secret=boom pmgw_bang=bar"
],
"PrimaryGatewaysInterval": "0s",
"ReadReplica": false,
"RPCAdvertiseAddr": "",
"RPCBindAddr": "",
"RPCHandshakeTimeout": "0s",

View File

@ -110,9 +110,9 @@ type Config struct {
// RaftConfig is the configuration used for Raft in the local DC
RaftConfig *raft.Config
// (Enterprise-only) NonVoter is used to prevent this server from being added
// (Enterprise-only) ReadReplica is used to prevent this server from being added
// as a voting member of the Raft cluster.
NonVoter bool
ReadReplica bool
// NotifyListen is called after the RPC listener has been configured.
// RPCAdvertise will be set to the listener address if it hasn't been

View File

@ -1230,7 +1230,9 @@ func (s *Server) handleAliveMember(member serf.Member) error {
Warning: 1,
},
Meta: map[string]string{
// DEPRECATED - remove nonvoter in favor of read_replica in a future version of consul
"non_voter": strconv.FormatBool(member.Tags["nonvoter"] == "1"),
"read_replica": strconv.FormatBool(member.Tags["read_replica"] == "1"),
"raft_version": strconv.Itoa(parts.RaftVersion),
"serf_protocol_current": strconv.FormatUint(uint64(member.ProtocolCur), 10),
"serf_protocol_min": strconv.FormatUint(uint64(member.ProtocolMin), 10),

View File

@ -334,7 +334,9 @@ func TestLeader_CheckServersMeta(t *testing.T) {
versionToExpect := "19.7.9"
retry.Run(t, func(r *retry.R) {
// DEPRECATED - remove nonvoter tag in favor of read_replica in a future version of consul
member.Tags["nonvoter"] = "1"
member.Tags["read_replica"] = "1"
member.Tags["build"] = versionToExpect
err := s1.handleAliveMember(member)
if err != nil {
@ -347,9 +349,13 @@ func TestLeader_CheckServersMeta(t *testing.T) {
if service == nil {
r.Fatal("client not registered")
}
// DEPRECATED - remove non_voter in favor of read_replica in a future version of consul
if service.Meta["non_voter"] != "true" {
r.Fatalf("Expected to be non_voter == true, was: %s", service.Meta["non_voter"])
}
if service.Meta["read_replica"] != "true" {
r.Fatalf("Expected to be read_replica == true, was: %s", service.Meta["non_voter"])
}
newVersion := service.Meta["version"]
if newVersion != versionToExpect {
r.Fatalf("Expected version to be updated to %s, was %s", versionToExpect, newVersion)

View File

@ -61,8 +61,11 @@ func (s *Server) setupSerf(conf *serf.Config, ch chan serf.Event, path string, w
if s.config.BootstrapExpect != 0 {
conf.Tags["expect"] = fmt.Sprintf("%d", s.config.BootstrapExpect)
}
if s.config.NonVoter {
if s.config.ReadReplica {
// DEPRECATED - This tag should be removed when we no longer want to support
// upgrades from 1.8.x and below
conf.Tags["nonvoter"] = "1"
conf.Tags["read_replica"] = "1"
}
if s.config.UseTLS {
conf.Tags["use_tls"] = "1"
@ -351,7 +354,7 @@ func (s *Server) maybeBootstrap() {
s.logger.Error("Member has bootstrap mode. Expect disabled.", "member", member)
return
}
if !p.NonVoter {
if !p.ReadReplica {
voters++
}
servers = append(servers, *p)
@ -410,7 +413,7 @@ func (s *Server) maybeBootstrap() {
id := raft.ServerID(server.ID)
suffrage := raft.Voter
if server.NonVoter {
if server.ReadReplica {
suffrage = raft.Nonvoter
}
peer := raft.Server{

View File

@ -240,7 +240,7 @@ func testServerDCExpectNonVoter(t *testing.T, dc string, expect int) (string, *S
c.Datacenter = dc
c.Bootstrap = false
c.BootstrapExpect = expect
c.NonVoter = true
c.ReadReplica = true
})
}

View File

@ -3,6 +3,7 @@ package state
import (
memdb "github.com/hashicorp/go-memdb"
"github.com/hashicorp/consul/acl"
"github.com/hashicorp/consul/agent/consul/stream"
"github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/proto/pbsubscribe"
@ -10,6 +11,10 @@ import (
// EventPayloadCheckServiceNode is used as the Payload for a stream.Event to
// indicates changes to a CheckServiceNode for service health.
//
// The stream.Payload methods implemented by EventPayloadCheckServiceNode are
// do not mutate the payload, making it safe to use in an Event sent to
// stream.EventPublisher.Publish.
type EventPayloadCheckServiceNode struct {
Op pbsubscribe.CatalogOp
Value *structs.CheckServiceNode
@ -19,7 +24,11 @@ type EventPayloadCheckServiceNode struct {
key string
}
func (e EventPayloadCheckServiceNode) FilterByKey(key, namespace string) bool {
func (e EventPayloadCheckServiceNode) HasReadPermission(authz acl.Authorizer) bool {
return e.Value.CanRead(authz) == acl.Allow
}
func (e EventPayloadCheckServiceNode) MatchesKey(key, namespace string) bool {
if key == "" && namespace == "" {
return true
}

View File

@ -1476,7 +1476,7 @@ func TestEventPayloadCheckServiceNode_FilterByKey(t *testing.T) {
t.Skip("cant test namespace matching without namespace support")
}
require.Equal(t, tc.expected, tc.payload.FilterByKey(tc.key, tc.namespace))
require.Equal(t, tc.expected, tc.payload.MatchesKey(tc.key, tc.namespace))
}
var testCases = []testCase{

View File

@ -410,10 +410,14 @@ type nodePayload struct {
node *structs.ServiceNode
}
func (p nodePayload) FilterByKey(key, _ string) bool {
func (p nodePayload) MatchesKey(key, _ string) bool {
return p.key == key
}
func (p nodePayload) HasReadPermission(acl.Authorizer) bool {
return true
}
func createTokenAndWaitForACLEventPublish(t *testing.T, s *Store) *structs.ACLToken {
token := &structs.ACLToken{
AccessorID: "3af117a9-2233-4cf4-8ff8-3c749c9906b4",

View File

@ -4,7 +4,11 @@ to the state store.
*/
package stream
import "fmt"
import (
"fmt"
"github.com/hashicorp/consul/acl"
)
// Topic is an identifier that partitions events. A subscription will only receive
// events which match the Topic.
@ -18,72 +22,81 @@ type Event struct {
Payload Payload
}
// A Payload contains the topic-specific data in an event. The payload methods
// should not modify the state of the payload if the Event is being submitted to
// EventPublisher.Publish.
type Payload interface {
// FilterByKey must return true if the Payload should be included in a subscription
// MatchesKey must return true if the Payload should be included in a subscription
// requested with the key and namespace.
// Generally this means that the payload matches the key and namespace or
// the payload is a special framing event that should be returned to every
// subscription.
FilterByKey(key, namespace string) bool
MatchesKey(key, namespace string) bool
// HasReadPermission uses the acl.Authorizer to determine if the items in the
// Payload are visible to the request. It returns true if the payload is
// authorized for Read, otherwise returns false.
HasReadPermission(authz acl.Authorizer) bool
}
// Len returns the number of events contained within this event. If the Payload
// is a []Event, the length of that slice is returned. Otherwise 1 is returned.
func (e Event) Len() int {
if batch, ok := e.Payload.(PayloadEvents); ok {
return len(batch)
}
return 1
// PayloadEvents is a Payload that may be returned by Subscription.Next when
// there are multiple events at an index.
//
// Note that unlike most other Payload, PayloadEvents is mutable and it is NOT
// safe to send to EventPublisher.Publish.
type PayloadEvents struct {
Items []Event
}
// Filter returns an Event filtered to only those Events where f returns true.
// If the second return value is false, every Event was removed by the filter.
func (e Event) Filter(f func(Event) bool) (Event, bool) {
batch, ok := e.Payload.(PayloadEvents)
if !ok {
return e, f(e)
}
func newPayloadEvents(items ...Event) *PayloadEvents {
return &PayloadEvents{Items: items}
}
func (p *PayloadEvents) filter(f func(Event) bool) bool {
items := p.Items
// To avoid extra allocations, iterate over the list of events first and
// get a count of the total desired size. This trades off some extra cpu
// time in the worse case (when not all items match the filter), for
// fewer memory allocations.
var size int
for idx := range batch {
if f(batch[idx]) {
for idx := range items {
if f(items[idx]) {
size++
}
}
if len(batch) == size || size == 0 {
return e, size != 0
if len(items) == size || size == 0 {
return size != 0
}
filtered := make(PayloadEvents, 0, size)
for idx := range batch {
event := batch[idx]
filtered := make([]Event, 0, size)
for idx := range items {
event := items[idx]
if f(event) {
filtered = append(filtered, event)
}
}
if len(filtered) == 0 {
return e, false
}
e.Payload = filtered
return e, true
}
// PayloadEvents is an Payload which contains multiple Events.
type PayloadEvents []Event
// TODO: this method is not called, but needs to exist so that we can store
// a slice of events as a payload. In the future we should be able to refactor
// Event.Filter so that this FilterByKey includes the re-slicing.
func (e PayloadEvents) FilterByKey(_, _ string) bool {
p.Items = filtered
return true
}
func (e PayloadEvents) Events() []Event {
return e
// MatchesKey filters the PayloadEvents to those which match the key and namespace.
func (p *PayloadEvents) MatchesKey(key, namespace string) bool {
return p.filter(func(event Event) bool {
return event.Payload.MatchesKey(key, namespace)
})
}
func (p *PayloadEvents) Len() int {
return len(p.Items)
}
// HasReadPermission filters the PayloadEvents to those which are authorized
// for reading by authz.
func (p *PayloadEvents) HasReadPermission(authz acl.Authorizer) bool {
return p.filter(func(event Event) bool {
return event.Payload.HasReadPermission(authz)
})
}
// IsEndOfSnapshot returns true if this is a framing event that indicates the
@ -100,24 +113,34 @@ func (e Event) IsNewSnapshotToFollow() bool {
return e.Payload == newSnapshotToFollow{}
}
type endOfSnapshot struct{}
type framingEvent struct{}
func (endOfSnapshot) FilterByKey(string, string) bool {
func (framingEvent) MatchesKey(string, string) bool {
return true
}
type newSnapshotToFollow struct{}
func (newSnapshotToFollow) FilterByKey(string, string) bool {
func (framingEvent) HasReadPermission(acl.Authorizer) bool {
return true
}
type endOfSnapshot struct {
framingEvent
}
type newSnapshotToFollow struct {
framingEvent
}
type closeSubscriptionPayload struct {
tokensSecretIDs []string
}
func (closeSubscriptionPayload) FilterByKey(string, string) bool {
return true
func (closeSubscriptionPayload) MatchesKey(string, string) bool {
return false
}
func (closeSubscriptionPayload) HasReadPermission(acl.Authorizer) bool {
return false
}
// NewCloseSubscriptionEvent returns a special Event that is handled by the

View File

@ -91,7 +91,8 @@ func NewEventPublisher(handlers SnapshotHandlers, snapCacheTTL time.Duration) *E
return e
}
// Publish events to all subscribers of the event Topic.
// Publish events to all subscribers of the event Topic. The events will be shared
// with all subscriptions, so the Payload used in Event.Payload must be immutable.
func (e *EventPublisher) Publish(events []Event) {
if len(events) > 0 {
e.publishCh <- events

View File

@ -7,6 +7,8 @@ import (
"time"
"github.com/stretchr/testify/require"
"github.com/hashicorp/consul/acl"
)
type intTopic int
@ -63,17 +65,22 @@ var testSnapshotEvent = Event{
}
type simplePayload struct {
key string
value string
key string
value string
noReadPerm bool
}
func (p simplePayload) FilterByKey(key, _ string) bool {
func (p simplePayload) MatchesKey(key, _ string) bool {
if key == "" {
return true
}
return p.key == key
}
func (p simplePayload) HasReadPermission(acl.Authorizer) bool {
return !p.noReadPerm
}
func newTestSnapshotHandlers() SnapshotHandlers {
return SnapshotHandlers{
testTopic: func(req SubscribeRequest, buf SnapshotAppender) (uint64, error) {

View File

@ -15,3 +15,163 @@ func TestEvent_IsEndOfSnapshot(t *testing.T) {
require.False(t, e.IsEndOfSnapshot())
})
}
func newSimpleEvent(key string, index uint64) Event {
return Event{Index: index, Payload: simplePayload{key: key}}
}
func TestPayloadEvents_FilterByKey(t *testing.T) {
type testCase struct {
name string
req SubscribeRequest
events []Event
expectEvent bool
expected *PayloadEvents
expectedCap int
}
fn := func(t *testing.T, tc testCase) {
events := make([]Event, 0, 5)
events = append(events, tc.events...)
pe := &PayloadEvents{Items: events}
ok := pe.MatchesKey(tc.req.Key, tc.req.Namespace)
require.Equal(t, tc.expectEvent, ok)
if !tc.expectEvent {
return
}
require.Equal(t, tc.expected, pe)
// test if there was a new array allocated or not
require.Equal(t, tc.expectedCap, cap(pe.Items))
}
var testCases = []testCase{
{
name: "all events match, no key or namespace",
req: SubscribeRequest{Topic: testTopic},
events: []Event{
newSimpleEvent("One", 102),
newSimpleEvent("Two", 102)},
expectEvent: true,
expected: newPayloadEvents(
newSimpleEvent("One", 102),
newSimpleEvent("Two", 102)),
expectedCap: 5,
},
{
name: "all events match, no namespace",
req: SubscribeRequest{Topic: testTopic, Key: "Same"},
events: []Event{
newSimpleEvent("Same", 103),
newSimpleEvent("Same", 103)},
expectEvent: true,
expected: newPayloadEvents(
newSimpleEvent("Same", 103),
newSimpleEvent("Same", 103)),
expectedCap: 5,
},
{
name: "all events match, no key",
req: SubscribeRequest{Topic: testTopic, Namespace: "apps"},
events: []Event{
newNSEvent("Something", "apps"),
newNSEvent("Other", "apps")},
expectEvent: true,
expected: newPayloadEvents(
newNSEvent("Something", "apps"),
newNSEvent("Other", "apps")),
expectedCap: 5,
},
{
name: "some evens match, no namespace",
req: SubscribeRequest{Topic: testTopic, Key: "Same"},
events: []Event{
newSimpleEvent("Same", 104),
newSimpleEvent("Other", 104),
newSimpleEvent("Same", 104)},
expectEvent: true,
expected: newPayloadEvents(
newSimpleEvent("Same", 104),
newSimpleEvent("Same", 104)),
expectedCap: 2,
},
{
name: "some events match, no key",
req: SubscribeRequest{Topic: testTopic, Namespace: "apps"},
events: []Event{
newNSEvent("app1", "apps"),
newNSEvent("db1", "dbs"),
newNSEvent("app2", "apps")},
expectEvent: true,
expected: newPayloadEvents(
newNSEvent("app1", "apps"),
newNSEvent("app2", "apps")),
expectedCap: 2,
},
{
name: "no events match key",
req: SubscribeRequest{Topic: testTopic, Key: "Other"},
events: []Event{
newSimpleEvent("Same", 0),
newSimpleEvent("Same", 0)},
},
{
name: "no events match namespace",
req: SubscribeRequest{Topic: testTopic, Namespace: "apps"},
events: []Event{
newNSEvent("app1", "group1"),
newNSEvent("app2", "group2")},
expectEvent: false,
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
fn(t, tc)
})
}
}
func newNSEvent(key, namespace string) Event {
return Event{Index: 22, Payload: nsPayload{key: key, namespace: namespace}}
}
type nsPayload struct {
framingEvent
key string
namespace string
value string
}
func (p nsPayload) MatchesKey(key, namespace string) bool {
return (key == "" || key == p.key) && (namespace == "" || namespace == p.namespace)
}
func TestPayloadEvents_HasReadPermission(t *testing.T) {
t.Run("some events filtered", func(t *testing.T) {
ep := newPayloadEvents(
Event{Payload: simplePayload{key: "one", noReadPerm: true}},
Event{Payload: simplePayload{key: "two", noReadPerm: false}},
Event{Payload: simplePayload{key: "three", noReadPerm: true}},
Event{Payload: simplePayload{key: "four", noReadPerm: false}})
require.True(t, ep.HasReadPermission(nil))
expected := []Event{
{Payload: simplePayload{key: "two"}},
{Payload: simplePayload{key: "four"}},
}
require.Equal(t, expected, ep.Items)
})
t.Run("all events filtered", func(t *testing.T) {
ep := newPayloadEvents(
Event{Payload: simplePayload{key: "one", noReadPerm: true}},
Event{Payload: simplePayload{key: "two", noReadPerm: true}},
Event{Payload: simplePayload{key: "three", noReadPerm: true}},
Event{Payload: simplePayload{key: "four", noReadPerm: true}})
require.False(t, ep.HasReadPermission(nil))
})
}

View File

@ -101,8 +101,8 @@ func (s *Subscription) Next(ctx context.Context) (Event, error) {
if len(next.Events) == 0 {
continue
}
event, ok := filterByKey(s.req, next.Events)
if !ok {
event := newEventFromBatch(s.req, next.Events)
if !event.Payload.MatchesKey(s.req.Key, s.req.Namespace) {
continue
}
return event, nil
@ -128,22 +128,10 @@ func newEventFromBatch(req SubscribeRequest, events []Event) Event {
return Event{
Topic: req.Topic,
Index: first.Index,
Payload: PayloadEvents(events),
Payload: newPayloadEvents(events...),
}
}
func filterByKey(req SubscribeRequest, events []Event) (Event, bool) {
event := newEventFromBatch(req, events)
if req.Key == "" && req.Namespace == "" {
return event, true
}
fn := func(e Event) bool {
return e.Payload.FilterByKey(req.Key, req.Namespace)
}
return event.Filter(fn)
}
// Close the subscription. Subscribers will receive an error when they call Next,
// and will need to perform a new Subscribe request.
// It is safe to call from any goroutine.

View File

@ -138,147 +138,29 @@ func publishTestEvent(index uint64, b *eventBuffer, key string) {
b.Append([]Event{e})
}
func newSimpleEvent(key string, index uint64) Event {
return Event{Index: index, Payload: simplePayload{key: key}}
}
func TestFilterByKey(t *testing.T) {
type testCase struct {
name string
req SubscribeRequest
events []Event
expectEvent bool
expected Event
expectedCap int
}
fn := func(t *testing.T, tc testCase) {
events := make(PayloadEvents, 0, 5)
events = append(events, tc.events...)
actual, ok := filterByKey(tc.req, events)
require.Equal(t, tc.expectEvent, ok)
if !tc.expectEvent {
return
func TestNewEventsFromBatch(t *testing.T) {
t.Run("single item", func(t *testing.T) {
first := Event{
Topic: testTopic,
Index: 1234,
Payload: simplePayload{key: "key"},
}
require.Equal(t, tc.expected, actual)
// test if there was a new array allocated or not
require.Equal(t, tc.expectedCap, cap(actual.Payload.(PayloadEvents)))
}
var testCases = []testCase{
{
name: "all events match, no key or namespace",
req: SubscribeRequest{Topic: testTopic},
events: []Event{
newSimpleEvent("One", 102),
newSimpleEvent("Two", 102)},
expectEvent: true,
expected: Event{
Topic: testTopic,
Index: 102,
Payload: PayloadEvents{
newSimpleEvent("One", 102),
newSimpleEvent("Two", 102)}},
expectedCap: 5,
},
{
name: "all events match, no namespace",
req: SubscribeRequest{Topic: testTopic, Key: "Same"},
events: []Event{
newSimpleEvent("Same", 103),
newSimpleEvent("Same", 103)},
expectEvent: true,
expected: Event{
Topic: testTopic,
Index: 103,
Payload: PayloadEvents{
newSimpleEvent("Same", 103),
newSimpleEvent("Same", 103)}},
expectedCap: 5,
},
{
name: "all events match, no key",
req: SubscribeRequest{Topic: testTopic, Namespace: "apps"},
events: []Event{
newNSEvent("Something", "apps"),
newNSEvent("Other", "apps")},
expectEvent: true,
expected: Event{
Topic: testTopic,
Index: 22,
Payload: PayloadEvents{
newNSEvent("Something", "apps"),
newNSEvent("Other", "apps")}},
expectedCap: 5,
},
{
name: "some evens match, no namespace",
req: SubscribeRequest{Topic: testTopic, Key: "Same"},
events: []Event{
newSimpleEvent("Same", 104),
newSimpleEvent("Other", 104),
newSimpleEvent("Same", 104)},
expectEvent: true,
expected: Event{
Topic: testTopic,
Index: 104,
Payload: PayloadEvents{
newSimpleEvent("Same", 104),
newSimpleEvent("Same", 104)}},
expectedCap: 2,
},
{
name: "some events match, no key",
req: SubscribeRequest{Topic: testTopic, Namespace: "apps"},
events: []Event{
newNSEvent("app1", "apps"),
newNSEvent("db1", "dbs"),
newNSEvent("app2", "apps")},
expectEvent: true,
expected: Event{
Topic: testTopic,
Index: 22,
Payload: PayloadEvents{
newNSEvent("app1", "apps"),
newNSEvent("app2", "apps")}},
expectedCap: 2,
},
{
name: "no events match key",
req: SubscribeRequest{Topic: testTopic, Key: "Other"},
events: []Event{
newSimpleEvent("Same", 0),
newSimpleEvent("Same", 0)},
},
{
name: "no events match namespace",
req: SubscribeRequest{Topic: testTopic, Namespace: "apps"},
events: []Event{
newNSEvent("app1", "group1"),
newNSEvent("app2", "group2")},
expectEvent: false,
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
fn(t, tc)
})
}
}
func newNSEvent(key, namespace string) Event {
return Event{Index: 22, Payload: nsPayload{key: key, namespace: namespace}}
}
type nsPayload struct {
key string
namespace string
value string
}
func (p nsPayload) FilterByKey(key, namespace string) bool {
return (key == "" || key == p.key) && (namespace == "" || namespace == p.namespace)
e := newEventFromBatch(SubscribeRequest{}, []Event{first})
require.Equal(t, first, e)
})
t.Run("many items", func(t *testing.T) {
events := []Event{
newSimpleEvent("foo", 9999),
newSimpleEvent("foo", 9999),
newSimpleEvent("zee", 9999),
}
req := SubscribeRequest{Topic: testTopic}
e := newEventFromBatch(req, events)
expected := Event{
Topic: testTopic,
Index: 9999,
Payload: newPayloadEvents(events...),
}
require.Equal(t, expected, e)
})
}

View File

@ -40,7 +40,7 @@ type Server struct {
RaftVersion int
Addr net.Addr
Status serf.MemberStatus
NonVoter bool
ReadReplica bool
ACLs structs.ACLMode
FeatureFlags map[string]int
@ -160,7 +160,10 @@ func IsConsulServer(m serf.Member) (bool, *Server) {
}
// Check if the server is a non voter
// DEPRECATED - remove looking for the nonvoter tag eventually once we don't have to support
// read replicas running v1.8.x and below.
_, nonVoter := m.Tags["nonvoter"]
_, readReplica := m.Tags["read_replica"]
addr := &net.TCPAddr{IP: m.Addr, Port: port}
@ -182,7 +185,8 @@ func IsConsulServer(m serf.Member) (bool, *Server) {
RaftVersion: raftVsn,
Status: m.Status,
UseTLS: useTLS,
NonVoter: nonVoter,
// DEPRECATED - remove nonVoter check once support for that tag is removed
ReadReplica: nonVoter || readReplica,
ACLs: acls,
FeatureFlags: featureFlags,
}

View File

@ -66,7 +66,7 @@ func TestIsConsulServer(t *testing.T) {
"expect": "3",
"raft_vsn": "3",
"use_tls": "1",
"nonvoter": "1",
"read_replica": "1",
},
Status: serf.StatusLeft,
}
@ -101,7 +101,7 @@ func TestIsConsulServer(t *testing.T) {
if !parts.UseTLS {
t.Fatalf("bad: %v", parts.UseTLS)
}
if !parts.NonVoter {
if !parts.ReadReplica {
t.Fatalf("unexpected voter")
}
m.Tags["bootstrap"] = "1"
@ -130,10 +130,16 @@ func TestIsConsulServer(t *testing.T) {
t.Fatalf("unexpected bootstrap")
}
delete(m.Tags, "nonvoter")
delete(m.Tags, "read_replica")
ok, parts = metadata.IsConsulServer(m)
if !ok || parts.NonVoter {
t.Fatalf("unexpected nonvoter")
if !ok || parts.ReadReplica {
t.Fatalf("unexpected read replica")
}
m.Tags["nonvoter"] = "1"
ok, parts = metadata.IsConsulServer(m)
if !ok || !parts.ReadReplica {
t.Fatalf("expected read replica")
}
delete(m.Tags, "role")

View File

@ -1,22 +0,0 @@
package subscribe
import (
"github.com/hashicorp/consul/acl"
"github.com/hashicorp/consul/agent/consul/state"
"github.com/hashicorp/consul/agent/consul/stream"
)
// EnforceACL takes an acl.Authorizer and returns the decision for whether the
// event is allowed to be sent to this client or not.
func enforceACL(authz acl.Authorizer, e stream.Event) acl.EnforcementDecision {
switch {
case e.IsEndOfSnapshot(), e.IsNewSnapshotToFollow():
return acl.Allow
}
switch p := e.Payload.(type) {
case state.EventPayloadCheckServiceNode:
return p.Value.CanRead(authz)
}
return acl.Deny
}

View File

@ -58,12 +58,20 @@ func (l *eventLogger) Trace(e stream.Event) {
case e.IsEndOfSnapshot():
l.snapshotDone = true
l.logger.Trace("snapshot complete", "index", e.Index, "sent", l.count)
return
case e.IsNewSnapshotToFollow():
l.logger.Trace("starting new snapshot", "sent", l.count)
return
case l.snapshotDone:
l.logger.Trace("sending events", "index", e.Index, "sent", l.count, "batch_size", e.Len())
}
l.count += uint64(e.Len())
size := 1
if l, ok := e.Payload.(length); ok {
size = l.Len()
}
l.logger.Trace("sending events", "index", e.Index, "sent", l.count, "batch_size", size)
l.count += uint64(size)
}
type length interface {
Len() int
}

View File

@ -132,10 +132,8 @@ func filterByAuth(authz acl.Authorizer, event stream.Event) (stream.Event, bool)
if authz == nil {
return event, true
}
fn := func(e stream.Event) bool {
return enforceACL(authz, e) == acl.Allow
}
return event.Filter(fn)
return event, event.Payload.HasReadPermission(authz)
}
func newEventFromStreamEvent(event stream.Event) *pbsubscribe.Event {
@ -154,10 +152,10 @@ func newEventFromStreamEvent(event stream.Event) *pbsubscribe.Event {
func setPayload(e *pbsubscribe.Event, payload stream.Payload) {
switch p := payload.(type) {
case stream.PayloadEvents:
case *stream.PayloadEvents:
e.Payload = &pbsubscribe.Event_EventBatch{
EventBatch: &pbsubscribe.EventBatch{
Events: batchEventsFromEventSlice(p),
Events: batchEventsFromEventSlice(p.Items),
},
}
case state.EventPayloadCheckServiceNode:

View File

@ -917,8 +917,8 @@ func TestNewEventFromSteamEvent(t *testing.T) {
name: "event batch",
event: stream.Event{
Index: 2002,
Payload: stream.PayloadEvents{
{
Payload: newPayloadEvents(
stream.Event{
Index: 2002,
Payload: state.EventPayloadCheckServiceNode{
Op: pbsubscribe.CatalogOp_Register,
@ -928,7 +928,7 @@ func TestNewEventFromSteamEvent(t *testing.T) {
},
},
},
{
stream.Event{
Index: 2002,
Payload: state.EventPayloadCheckServiceNode{
Op: pbsubscribe.CatalogOp_Deregister,
@ -937,8 +937,7 @@ func TestNewEventFromSteamEvent(t *testing.T) {
Service: &structs.NodeService{Service: "web1"},
},
},
},
},
}),
},
expected: pbsubscribe.Event{
Index: 2002,
@ -1008,6 +1007,10 @@ func TestNewEventFromSteamEvent(t *testing.T) {
}
}
func newPayloadEvents(items ...stream.Event) *stream.PayloadEvents {
return &stream.PayloadEvents{Items: items}
}
// newEventFromSubscription is used to return framing events. EndOfSnapshot and
// NewSnapshotToFollow are not exported, but we can get them from a subscription.
func newEventFromSubscription(t *testing.T, index uint64) stream.Event {

View File

@ -11,12 +11,47 @@ import (
"github.com/hashicorp/consul/api"
bexpr "github.com/hashicorp/go-bexpr"
"github.com/mitchellh/pointerstructure"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
var dumpFieldConfig = flag.Bool("dump-field-config", false, "generate field config dump file")
func TestPointerStructure(t *testing.T) {
csn := CheckServiceNode{
Node: &Node{
ID: "f18f3a10-2153-40ae-af7d-68db0e856498",
Node: "node1",
Address: "198.18.0.1",
},
Service: &NodeService{
ID: "test",
Service: "test",
Port: 1234,
TaggedAddresses: map[string]ServiceAddress{
"wan": {
Address: "1.1.1.1",
Port: 443,
},
},
},
}
ptr := pointerstructure.Pointer{
Parts: []string{
"Service",
"TaggedAddresses",
"wan",
"Address",
},
}
val, err := ptr.Get(csn)
require.NoError(t, err)
require.Equal(t, "1.1.1.1", val)
}
///////////////////////////////////////////////////////////////////////////////
//
// NOTE: The tests within this file are designed to validate that the fields

View File

@ -160,6 +160,15 @@ const (
// configured to use TLS. Any other value indicates that it was not setup in
// that manner.
MemberTagValueUseTLS = "1"
// MemberTagKeyReadReplica is the key used to indicate that the member is a read
// replica server (will remain a Raft non-voter).
// Read Replicas are a Consul Enterprise feature.
MemberTagKeyReadReplica = "read_replica"
// MemberTagValueReadReplica is the value of the MemberTagKeyReadReplica key when
// the member is in fact a read-replica. Any other value indicates that it is not.
// Read Replicas are a Consul Enterprise feature.
MemberTagValueReadReplica = "1"
)
type MemberACLMode string

View File

@ -5,12 +5,12 @@ import (
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/hashicorp/consul/agent"
"github.com/hashicorp/consul/api"
"github.com/hashicorp/consul/connect"
"github.com/hashicorp/consul/sdk/testutil"
"github.com/stretchr/testify/require"
)
func TestUpstreamResolverFuncFromClient(t *testing.T) {
@ -79,8 +79,6 @@ func TestUpstreamResolverFuncFromClient(t *testing.T) {
}
func TestAgentConfigWatcherSidecarProxy(t *testing.T) {
t.Parallel()
a := agent.StartTestAgent(t, agent.TestAgent{Name: "agent_smith"})
defer a.Shutdown()

View File

@ -5,7 +5,7 @@ import (
"net"
"testing"
"github.com/hashicorp/consul/testrpc"
"github.com/stretchr/testify/require"
"github.com/hashicorp/consul/agent"
agConnect "github.com/hashicorp/consul/agent/connect"
@ -14,12 +14,10 @@ import (
"github.com/hashicorp/consul/sdk/freeport"
"github.com/hashicorp/consul/sdk/testutil"
"github.com/hashicorp/consul/sdk/testutil/retry"
"github.com/stretchr/testify/require"
"github.com/hashicorp/consul/testrpc"
)
func TestProxy_public(t *testing.T) {
t.Parallel()
require := require.New(t)
ports := freeport.MustTake(1)

View File

@ -14,13 +14,13 @@ import (
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/hashicorp/consul/agent"
"github.com/hashicorp/consul/agent/connect"
"github.com/hashicorp/consul/api"
"github.com/hashicorp/consul/sdk/testutil/retry"
"github.com/hashicorp/consul/testrpc"
"github.com/stretchr/testify/require"
)
// Assert io.Closer implementation
@ -89,8 +89,8 @@ func TestService_Dial(t *testing.T) {
err := testSvr.Serve()
require.NoError(err)
}()
defer testSvr.Close()
<-testSvr.Listening
defer testSvr.Close()
}
// Always expect to be connecting to a "DB"

View File

@ -10,11 +10,12 @@ import (
"net/http"
"sync/atomic"
"github.com/hashicorp/go-hclog"
testing "github.com/mitchellh/go-testing-interface"
"github.com/hashicorp/consul/agent/connect"
"github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/sdk/freeport"
"github.com/hashicorp/go-hclog"
testing "github.com/mitchellh/go-testing-interface"
)
// TestService returns a Service instance based on a static TLS Config.
@ -124,8 +125,8 @@ func (s *TestServer) Serve() error {
if err != nil {
return err
}
close(s.Listening)
s.l = l
close(s.Listening)
log.Printf("test connect service listening on %s", s.Addr)
for {

View File

@ -13,9 +13,10 @@ import (
"strings"
"sync"
"github.com/hashicorp/go-hclog"
"github.com/hashicorp/consul/agent/connect"
"github.com/hashicorp/consul/api"
"github.com/hashicorp/go-hclog"
)
// parseLeafX509Cert will parse an X509 certificate
@ -460,5 +461,7 @@ func (cfg *dynamicTLSConfig) Ready() bool {
// method will not stop returning a nil chan in that case. It is only useful
// for initial startup. For ongoing health Ready() should be used.
func (cfg *dynamicTLSConfig) ReadyWait() <-chan struct{} {
cfg.RLock()
defer cfg.RUnlock()
return cfg.readyCh
}

View File

@ -4,6 +4,7 @@ import (
"fmt"
"os"
"path/filepath"
"sort"
"strconv"
"strings"
"sync"
@ -93,23 +94,28 @@ func (l *LogFile) pruneFiles() error {
if l.MaxFiles == 0 {
return nil
}
pattern := l.fileNamePattern()
//get all the files that match the log file pattern
globExpression := filepath.Join(l.logPath, fmt.Sprintf(pattern, "*"))
matches, err := filepath.Glob(globExpression)
pattern := filepath.Join(l.logPath, fmt.Sprintf(l.fileNamePattern(), "*"))
matches, err := filepath.Glob(pattern)
if err != nil {
return err
}
var stale int
if l.MaxFiles <= -1 {
// Prune everything
stale = len(matches)
} else {
// Prune if there are more files stored than the configured max
stale = len(matches) - l.MaxFiles
switch {
case l.MaxFiles < 0:
return removeFiles(matches)
case len(matches) < l.MaxFiles:
return nil
}
for i := 0; i < stale; i++ {
if err := os.Remove(matches[i]); err != nil {
sort.Strings(matches)
last := len(matches) - l.MaxFiles
return removeFiles(matches[:last])
}
func removeFiles(files []string) error {
for _, file := range files {
if err := os.Remove(file); err != nil {
return err
}
}

View File

@ -2,135 +2,122 @@ package logging
import (
"io/ioutil"
"os"
"path/filepath"
"sort"
"testing"
"time"
"github.com/stretchr/testify/require"
"github.com/hashicorp/consul/sdk/testutil"
)
const (
testFileName = "Consul.log"
testDuration = 50 * time.Millisecond
testBytes = 10
)
func TestLogFile_timeRotation(t *testing.T) {
tempDir := testutil.TempDir(t, "LogWriterTime")
func TestLogFile_Rotation_MaxDuration(t *testing.T) {
tempDir := testutil.TempDir(t, "")
logFile := LogFile{
fileName: testFileName,
fileName: "consul.log",
logPath: tempDir,
duration: testDuration,
duration: 50 * time.Millisecond,
}
logFile.Write([]byte("Hello World"))
time.Sleep(3 * testDuration)
time.Sleep(3 * logFile.duration)
logFile.Write([]byte("Second File"))
want := 2
if got, _ := ioutil.ReadDir(tempDir); len(got) != want {
t.Errorf("Expected %d files, got %v file(s)", want, len(got))
}
require.Len(t, listDir(t, tempDir), 2)
}
func TestLogFile_openNew(t *testing.T) {
tempDir := testutil.TempDir(t, "LogWriterOpen")
logFile := LogFile{fileName: testFileName, logPath: tempDir, duration: testDuration}
if err := logFile.openNew(); err != nil {
t.Errorf("Expected open file %s, got an error (%s)", testFileName, err)
logFile := LogFile{
fileName: "consul.log",
logPath: testutil.TempDir(t, ""),
duration: defaultRotateDuration,
}
err := logFile.openNew()
require.NoError(t, err)
if _, err := ioutil.ReadFile(logFile.FileInfo.Name()); err != nil {
t.Errorf("Expected readable file %s, got an error (%s)", logFile.FileInfo.Name(), err)
}
msg := "[INFO] Something"
_, err = logFile.Write([]byte(msg))
require.NoError(t, err)
content, err := ioutil.ReadFile(logFile.FileInfo.Name())
require.NoError(t, err)
require.Contains(t, string(content), msg)
}
func TestLogFile_byteRotation(t *testing.T) {
func TestLogFile_Rotation_MaxBytes(t *testing.T) {
tempDir := testutil.TempDir(t, "LogWriterBytes")
logFile := LogFile{
fileName: testFileName,
fileName: "somefile.log",
logPath: tempDir,
MaxBytes: testBytes,
duration: 24 * time.Hour,
MaxBytes: 10,
duration: defaultRotateDuration,
}
logFile.Write([]byte("Hello World"))
logFile.Write([]byte("Second File"))
want := 2
tempFiles, _ := ioutil.ReadDir(tempDir)
if got := tempFiles; len(got) != want {
t.Errorf("Expected %d files, got %v file(s)", want, len(got))
}
require.Len(t, listDir(t, tempDir), 2)
}
func TestLogFile_deleteArchives(t *testing.T) {
tempDir := testutil.TempDir(t, "LogWriteDeleteArchives")
func TestLogFile_PruneFiles(t *testing.T) {
tempDir := testutil.TempDir(t, t.Name())
logFile := LogFile{
fileName: testFileName,
fileName: "consul.log",
logPath: tempDir,
MaxBytes: testBytes,
duration: 24 * time.Hour,
MaxBytes: 10,
duration: defaultRotateDuration,
MaxFiles: 1,
}
logFile.Write([]byte("[INFO] Hello World"))
logFile.Write([]byte("[INFO] Second File"))
logFile.Write([]byte("[INFO] Third File"))
want := 2
tempFiles, _ := ioutil.ReadDir(tempDir)
if got := tempFiles; len(got) != want {
t.Errorf("Expected %d files, got %v file(s)", want, len(got))
return
}
for _, tempFile := range tempFiles {
var bytes []byte
var err error
path := filepath.Join(tempDir, tempFile.Name())
if bytes, err = ioutil.ReadFile(path); err != nil {
t.Errorf(err.Error())
return
}
contents := string(bytes)
if contents == "[INFO] Hello World" {
t.Errorf("Should have deleted the eldest log file")
return
}
}
logFiles := listDir(t, tempDir)
sort.Strings(logFiles)
require.Len(t, logFiles, 2)
content, err := ioutil.ReadFile(filepath.Join(tempDir, logFiles[0]))
require.NoError(t, err)
require.Contains(t, string(content), "Second File")
content, err = ioutil.ReadFile(filepath.Join(tempDir, logFiles[1]))
require.NoError(t, err)
require.Contains(t, string(content), "Third File")
}
func TestLogFile_deleteArchivesDisabled(t *testing.T) {
func TestLogFile_PruneFiles_Disabled(t *testing.T) {
tempDir := testutil.TempDir(t, t.Name())
logFile := LogFile{
fileName: testFileName,
fileName: "somename.log",
logPath: tempDir,
MaxBytes: testBytes,
duration: 24 * time.Hour,
MaxBytes: 10,
duration: defaultRotateDuration,
MaxFiles: 0,
}
logFile.Write([]byte("[INFO] Hello World"))
logFile.Write([]byte("[INFO] Second File"))
logFile.Write([]byte("[INFO] Third File"))
want := 3
tempFiles, _ := ioutil.ReadDir(tempDir)
if got := tempFiles; len(got) != want {
t.Errorf("Expected %d files, got %v file(s)", want, len(got))
return
}
require.Len(t, listDir(t, tempDir), 3)
}
func TestLogFile_rotationDisabled(t *testing.T) {
func TestLogFile_FileRotation_Disabled(t *testing.T) {
tempDir := testutil.TempDir(t, t.Name())
logFile := LogFile{
fileName: testFileName,
fileName: "consul.log",
logPath: tempDir,
MaxBytes: testBytes,
duration: 24 * time.Hour,
MaxBytes: 10,
MaxFiles: -1,
}
logFile.Write([]byte("[INFO] Hello World"))
logFile.Write([]byte("[INFO] Second File"))
logFile.Write([]byte("[INFO] Third File"))
want := 1
tempFiles, _ := ioutil.ReadDir(tempDir)
if got := tempFiles; len(got) != want {
t.Errorf("Expected %d files, got %v file(s)", want, len(got))
return
}
require.Len(t, listDir(t, tempDir), 1)
}
func listDir(t *testing.T, name string) []string {
t.Helper()
fh, err := os.Open(name)
require.NoError(t, err)
files, err := fh.Readdirnames(100)
require.NoError(t, err)
return files
}

View File

@ -40,15 +40,8 @@ type Config struct {
LogRotateMaxFiles int
}
const (
// defaultRotateDuration is the default time taken by the agent to rotate logs
defaultRotateDuration = 24 * time.Hour
)
var (
logRotateDuration time.Duration
logRotateBytes int
)
// defaultRotateDuration is the default time taken by the agent to rotate logs
const defaultRotateDuration = 24 * time.Hour
type LogSetupErrorFn func(string)
@ -86,28 +79,22 @@ func Setup(config Config, out io.Writer) (hclog.InterceptLogger, error) {
// Create a file logger if the user has specified the path to the log file
if config.LogFilePath != "" {
dir, fileName := filepath.Split(config.LogFilePath)
// If a path is provided but has no fileName a default is provided.
if fileName == "" {
fileName = "consul.log"
}
// Try to enter the user specified log rotation duration first
if config.LogRotateDuration != 0 {
logRotateDuration = config.LogRotateDuration
} else {
// Default to 24 hrs if no rotation period is specified
logRotateDuration = defaultRotateDuration
}
// User specified byte limit for log rotation if one is provided
if config.LogRotateBytes != 0 {
logRotateBytes = config.LogRotateBytes
if config.LogRotateDuration == 0 {
config.LogRotateDuration = defaultRotateDuration
}
logFile := &LogFile{
fileName: fileName,
logPath: dir,
duration: logRotateDuration,
MaxBytes: logRotateBytes,
duration: config.LogRotateDuration,
MaxBytes: config.LogRotateBytes,
MaxFiles: config.LogRotateMaxFiles,
}
if err := logFile.pruneFiles(); err != nil {
return nil, fmt.Errorf("Failed to prune log files: %w", err)
}
if err := logFile.openNew(); err != nil {
return nil, fmt.Errorf("Failed to setup logging: %w", err)
}

View File

@ -1,35 +1,46 @@
<svg width="{{size}}" height="{{size}}">
<g class="tomography" transform="translate({{div size 2}}, {{div size 2}})">
<g>
<circle class="background" r="{{circle.[0]}}"/>
<circle class="axis" r="{{circle.[1]}}"/>
<circle class="axis" r="{{circle.[2]}}"/>
<circle class="axis" r="{{circle.[3]}}"/>
<circle class="border" r="{{circle.[4]}}"/>
</g>
<g class="lines">
{{#each distances as |item|}}
<line transform="rotate({{item.rotate}})" y2="{{item.y2}}" data-node="{{item.d}}" data-distance="{{item.distance}}" data-segment="{{item.segment}}"/>
{{/each}}
</g>
<g class="labels">
<circle class="point" r="5"/>
<g class="tick" transform="translate(0, {{labels.[0]}})">
<line x2="70"/>
<text x="75" y="0" dy=".32em">{{format-number milliseconds.[0] maximumFractionDigits=2}}ms</text>
</g>
<g class="tick" transform="translate(0, {{labels.[1]}})">
<line x2="70"/>
<text x="75" y="0" dy=".32em">{{format-number milliseconds.[1] maximumFractionDigits=2}}ms</text>
</g>
<g class="tick" transform="translate(0, {{labels.[2]}})">
<line x2="70"/>
<text x="75" y="0" dy=".32em">{{format-number milliseconds.[2] maximumFractionDigits=2}}ms</text>
</g>
<g class="tick" transform="translate(0, {{labels.[3]}})">
<line x2="70"/>
<text x="75" y="0" dy=".32em">{{format-number milliseconds.[3] maximumFractionDigits=2}}ms</text>
</g>
</g>
</g>
</svg>
<div
class="tomography-graph"
...attributes
>
<svg width={{this.size}} height={{this.size}}>
<g transform="translate({{div this.size 2}}, {{div this.size 2}})">
<g>
<circle class="background" r={{this.circle.[0]}} />
<circle class="axis" r={{this.circle.[1]}} />
<circle class="axis" r={{this.circle.[2]}} />
<circle class="axis" r={{this.circle.[3]}} />
<circle class="border" r={{this.circle.[4]}} />
</g>
<g class="lines">
{{#each this.distances as |item|}}
<line
transform="rotate({{item.rotate}})"
y2={{item.y2}}
data-node={{item.d}}
data-distance={{item.distance}}
data-segment={{item.segment}}
/>
{{/each}}
</g>
<g class="labels">
<circle class="point" r="5" />
<g class="tick" transform="translate(0, {{this.labels.[0]}})">
<line x2="70" />
<text x="75" y="0" dy=".32em">{{format-number this.milliseconds.[0] maximumFractionDigits=2}}ms</text>
</g>
<g class="tick" transform="translate(0, {{this.labels.[1]}})">
<line x2="70" />
<text x="75" y="0" dy=".32em">{{format-number this.milliseconds.[1] maximumFractionDigits=2}}ms</text>
</g>
<g class="tick" transform="translate(0, {{this.labels.[2]}})">
<line x2="70" />
<text x="75" y="0" dy=".32em">{{format-number this.milliseconds.[2] maximumFractionDigits=2}}ms</text>
</g>
<g class="tick" transform="translate(0, {{this.labels.[3]}})">
<line x2="70" />
<text x="75" y="0" dy=".32em">{{format-number this.milliseconds.[3] maximumFractionDigits=2}}ms</text>
</g>
</g>
</g>
</svg>
</div>

Before

Width:  |  Height:  |  Size: 1.7 KiB

After

Width:  |  Height:  |  Size: 1.9 KiB

View File

@ -1,5 +1,5 @@
import Component from '@ember/component';
import { computed, set, get } from '@ember/object';
import Component from '@glimmer/component';
import { tracked } from '@glimmer/tracking';
const size = 336;
const insetSize = size / 2 - 8;
@ -9,52 +9,39 @@ const inset = function(num) {
const milliseconds = function(num, max) {
return max > 0 ? parseInt(max * num) / 100 : 0;
};
export default Component.extend({
size: size,
tomography: 0,
max: -999999999,
init: function() {
this._super(...arguments);
this.circle = [inset(1), inset(0.25), inset(0.5), inset(0.75), inset(1)];
this.labels = [inset(-0.25), inset(-0.5), inset(-0.75), inset(-1)];
},
milliseconds: computed('distances', 'max', function() {
const max = get(this, 'max');
return [
milliseconds(25, max),
milliseconds(50, max),
milliseconds(75, max),
milliseconds(100, max),
];
}),
distances: computed('tomography', function() {
const tomography = get(this, 'tomography');
let distances = get(tomography, 'distances') || [];
// TODO: This should probably be moved into the milliseconds computedProperty
/*eslint ember/no-side-effects: "warn"*/
distances.forEach((d, i) => {
if (d.distance > get(this, 'max')) {
set(this, 'max', d.distance);
}
});
let n = get(distances, 'length');
if (n > 360) {
export default class TomographyGraph extends Component {
@tracked max = -999999999;
size = size;
circle = [inset(1), inset(0.25), inset(0.5), inset(0.75), inset(1)];
labels = [inset(-0.25), inset(-0.5), inset(-0.75), inset(-1)];
get milliseconds() {
const distances = this.args.distances || [];
const max = distances.reduce((prev, d) => Math.max(prev, d.distance), this.max);
return [25, 50, 75, 100].map(item => milliseconds(item, max));
}
get distances() {
const distances = this.args.distances || [];
const max = distances.reduce((prev, d) => Math.max(prev, d.distance), this.max);
const len = distances.length;
if (len > 360) {
// We have more nodes than we want to show, take a random sampling to keep
// the number around 360.
const sampling = 360 / n;
const sampling = 360 / len;
distances = distances.filter(function(_, i) {
return i == 0 || i == n - 1 || Math.random() < sampling;
return i == 0 || i == len - 1 || Math.random() < sampling;
});
n = get(distances, 'length');
}
return distances.map((d, i) => {
return {
rotate: (i * 360) / n,
y2: -insetSize * (d.distance / get(this, 'max')),
rotate: (i * 360) / distances.length,
y2: -insetSize * (d.distance / max),
node: d.node,
distance: d.distance,
segment: d.segment,
};
});
}),
});
}
}

View File

@ -0,0 +1,33 @@
.tomography-graph {
.background {
fill: $gray-050;
}
.axis {
fill: none;
stroke: $gray-300;
stroke-dasharray: 4 4;
}
.border {
fill: none;
stroke: $gray-300;
}
.point {
stroke: $gray-400;
fill: $magenta-600;
}
.lines line {
stroke: $magenta-600;
}
.lines line:hover {
stroke: $gray-300;
stroke-width: 2px;
}
.tick line {
stroke: $gray-300;
}
.tick text {
font-size: $typo-size-600;
text-anchor: start;
color: $gray-900;
}
}

View File

@ -1,6 +1,8 @@
<div
class="list-collection list-collection-scroll-{{scroll}}"
style={{{style}}}
style={{{concat
'height:' style.height 'px'
}}}
id={{guid}}
...attributes
>

View File

@ -2,17 +2,16 @@ import { inject as service } from '@ember/service';
import { computed, get, set } from '@ember/object';
import Component from 'ember-collection/components/ember-collection';
import PercentageColumns from 'ember-collection/layouts/percentage-columns';
import style from 'ember-computed-style';
import Slotted from 'block-slots';
const formatItemStyle = PercentageColumns.prototype.formatItemStyle;
export default Component.extend(Slotted, {
tagName: '',
dom: service('dom'),
tagName: '',
height: 500,
cellHeight: 70,
style: style('getStyle'),
checked: null,
scroll: 'virtual',
init: function() {
@ -43,7 +42,7 @@ export default Component.extend(Slotted, {
return style;
};
},
getStyle: computed('height', function() {
style: computed('height', function() {
if (this.scroll !== 'virtual') {
return {};
}

View File

@ -1,5 +1,12 @@
<nav
style={{if selectedWidth (concat '--selected-width:' selectedWidth ';--selected-left:' selectedLeft ';--selected-height:' selectedHeight ';--selected-top:' selectedTop) undefined}}
style={{{if selectedWidth (concat
'--selected-width:' selectedWidth ';'
'--selected-left:' selectedLeft ';'
'--selected-height:' selectedHeight ';'
'--selected-top:' selectedTop
)
undefined
}}}
role="tablist"
class={{concat 'tab-nav' (if isAnimatable ' animatable' '')}}
id={{guid}}>

View File

@ -1,3 +1,11 @@
<table
class="tabular-collection dom-recycling {{if hasActions 'has-actions' ''}}"
id={{guid}}
style={{{concat
'height:' style.height 'px'
}}}
...attributes
>
{{on-window 'resize' (action "resize") }}
{{yield}}
{{#if hasCaption}}
@ -25,4 +33,5 @@
{{/if}}
</tr>
{{~/each~}}
</EmberNativeScrollable>
</EmberNativeScrollable>
</table>

View File

@ -3,18 +3,13 @@ import { computed, get, set } from '@ember/object';
import CollectionComponent from 'ember-collection/components/ember-collection';
import needsRevalidate from 'ember-collection/utils/needs-revalidate';
import Grid from 'ember-collection/layouts/grid';
import style from 'ember-computed-style';
import Slotted from 'block-slots';
const formatItemStyle = Grid.prototype.formatItemStyle;
export default CollectionComponent.extend(Slotted, {
tagName: 'table',
classNames: ['dom-recycling'],
classNameBindings: ['hasActions'],
attributeBindings: ['style'],
tagName: '',
dom: service('dom'),
style: style('getStyle'),
width: 1150,
rowHeight: 50,
maxHeight: 500,
@ -22,6 +17,7 @@ export default CollectionComponent.extend(Slotted, {
hasCaption: false,
init: function() {
this._super(...arguments);
this.guid = this.dom.guid(this);
// TODO: The row height should auto calculate properly from the CSS
const o = this;
this['cell-layout'] = new Grid(get(this, 'width'), get(this, 'rowHeight'));
@ -35,9 +31,10 @@ export default CollectionComponent.extend(Slotted, {
},
didInsertElement: function() {
this._super(...arguments);
this.$element = this.dom.element(`#${this.guid}`);
this.actions.resize.apply(this, [{ target: this.dom.viewport() }]);
},
getStyle: computed('rowHeight', '_items', 'maxRows', 'maxHeight', function() {
style: computed('rowHeight', '_items', 'maxRows', 'maxHeight', function() {
const maxRows = get(this, 'rows');
let height = get(this, 'maxHeight');
if (maxRows) {
@ -68,7 +65,7 @@ export default CollectionComponent.extend(Slotted, {
},
actions: {
resize: function(e) {
const $tbody = this.element;
const $tbody = this.$element;
const $appContent = this.dom.element('.app-view');
if ($appContent) {
const border = 1;

View File

@ -8,20 +8,22 @@
{{on-window 'resize' (action 'redraw')}}
{{#if data.labels}}
<a class="sparkline-key-link" {{action (mut shouldShowKey) true}}>Key</a>
{{#if (not empty)}}
{{#if data.labels}}
<a class="sparkline-key-link" {{action (mut shouldShowKey) true}}>Key</a>
{{/if}}
{{/if}}
<div class="sparkline-wrapper">
<div class="tooltip">
<div class="sparkline-time">Timestamp</div>
</div>
{{#unless data}}
{{#if empty}}
<TopologyMetrics::Status
@noMetricsReason={{@noMetricsReason}}
@error={{error}}
/>
{{/unless}}
{{/if}}
<svg class="sparkline"></svg>
</div>

View File

@ -7,6 +7,7 @@ import { scaleLinear, scaleTime, scaleOrdinal } from 'd3-scale';
import { schemeTableau10 } from 'd3-scale-chromatic';
import { area, stack, stackOrderReverse } from 'd3-shape';
import { max, extent, bisector } from 'd3-array';
import { set } from '@ember/object';
dayjs.extend(Calendar);
@ -21,7 +22,7 @@ function niceTimeWithSeconds(d) {
export default Component.extend({
data: null,
empty: false,
actions: {
redraw: function(evt) {
this.drawGraphs();
@ -35,6 +36,7 @@ export default Component.extend({
drawGraphs: function() {
if (!this.data) {
set(this, 'empty', true);
return;
}
@ -59,10 +61,10 @@ export default Component.extend({
if (series.length == 0 || keys.length == 0) {
// Put the graph in an error state that might get fixed if metrics show up
// on next poll.
let loader = this.element.querySelector('.sparkline-loader');
loader.innerHTML = 'No Metrics Available';
loader.style.display = 'block';
set(this, 'empty', true);
return;
} else {
set(this, 'empty', false);
}
let st = stack()

View File

@ -30,7 +30,7 @@ export default class CoordinateService extends RepositoryService {
if (get(coordinates, 'length') > 1) {
results = tomography(
node,
coordinates.map(item => get(item, 'data'))
coordinates
);
}
results.meta = get(coordinates, 'meta');

View File

@ -22,7 +22,6 @@
@import './components/healthcheck-output';
@import './components/freetext-filter';
@import './components/filter-bar';
@import './components/tomography-graph';
@import './components/flash-message';
@import './components/code-editor';
@import './components/confirmation-dialog';
@ -55,6 +54,7 @@
@import 'consul-ui/components/notice';
@import 'consul-ui/components/modal-dialog';
@import 'consul-ui/components/consul/tomography/graph';
@import 'consul-ui/components/consul/discovery-chain';
@import 'consul-ui/components/consul/upstream-instance/list';
@import 'consul-ui/components/consul/exposed-path/list';

View File

@ -1,31 +0,0 @@
.tomography .background {
fill: $gray-050;
}
.tomography .axis {
fill: none;
stroke: $gray-300;
stroke-dasharray: 4 4;
}
.tomography .border {
fill: none;
stroke: $gray-300;
}
.tomography .point {
stroke: $gray-400;
fill: $magenta-600;
}
.tomography .lines line {
stroke: $magenta-600;
}
.tomography .lines line:hover {
stroke: $gray-300;
stroke-width: 2px;
}
.tomography .tick line {
stroke: $gray-300;
}
.tomography .tick text {
font-size: $typo-size-600;
text-anchor: start;
color: $gray-900;
}

View File

@ -1,5 +1,5 @@
<HeadLayout />
{{title 'Consul' separator=' - '}}
{{page-title 'Consul' separator=' - '}}
{{#if (not-eq router.currentRouteName 'application')}}
<HashicorpConsul
id="wrapper"

View File

@ -1,11 +1,11 @@
{{#if isAuthorized }}
{{#if create }}
{{title 'New Policy'}}
{{page-title 'New Policy'}}
{{else}}
{{title 'Edit Policy'}}
{{page-title 'Edit Policy'}}
{{/if}}
{{else}}
{{title 'Access Controls'}}
{{page-title 'Access Controls'}}
{{/if}}
<AppView
@authorized={{isAuthorized}}

View File

@ -1,7 +1,7 @@
{{#if isAuthorized }}
{{title 'Policies'}}
{{page-title 'Policies'}}
{{else}}
{{title 'Access Controls'}}
{{page-title 'Access Controls'}}
{{/if}}
{{#let (hash
kinds=(if kind (split kind ',') undefined)

View File

@ -1,11 +1,11 @@
{{#if isAuthorized }}
{{#if item.ID}}
{{title 'Edit Role'}}
{{page-title 'Edit Role'}}
{{else}}
{{title 'New Role'}}
{{page-title 'New Role'}}
{{/if}}
{{else}}
{{title 'Access Controls'}}
{{page-title 'Access Controls'}}
{{/if}}
<AppView
@authorized={{isAuthorized}}

View File

@ -1,7 +1,7 @@
{{#if isAuthorized }}
{{title 'Roles'}}
{{page-title 'Roles'}}
{{else}}
{{title 'Access Controls'}}
{{page-title 'Access Controls'}}
{{/if}}
{{#let (or sortBy "Name:asc") as |sort|}}

View File

@ -1,11 +1,11 @@
{{#if isAuthorized }}
{{#if create}}
{{title 'New Token'}}
{{page-title 'New Token'}}
{{else}}
{{title 'Edit Token'}}
{{page-title 'Edit Token'}}
{{/if}}
{{else}}
{{title 'Access Controls'}}
{{page-title 'Access Controls'}}
{{/if}}
<AppView
@authorized={{isAuthorized}}

View File

@ -1,7 +1,7 @@
{{#if isAuthorized }}
{{title 'Tokens'}}
{{page-title 'Tokens'}}
{{else}}
{{title 'Access Controls'}}
{{page-title 'Access Controls'}}
{{/if}}
{{#let (hash

View File

@ -1,7 +1,7 @@
{{#if item.ID}}
{{title 'Edit Intention'}}
{{page-title 'Edit Intention'}}
{{else}}
{{title 'New Intention'}}
{{page-title 'New Intention'}}
{{/if}}
<AppView>
<BlockSlot @name="breadcrumbs">

View File

@ -1,4 +1,4 @@
{{title 'Intentions'}}
{{page-title 'Intentions'}}
<DataLoader @src={{concat '/' nspace '/' dc '/intentions'}} as |api|>
<BlockSlot @name="error">

View File

@ -1,7 +1,7 @@
{{#if item.Key }}
{{title 'Edit Key/Value'}}
{{page-title 'Edit Key/Value'}}
{{else}}
{{title 'New Key/Value'}}
{{page-title 'New Key/Value'}}
{{/if}}
<AppView>
<BlockSlot @name="breadcrumbs">

View File

@ -1,4 +1,4 @@
{{title 'Key/Value'}}
{{page-title 'Key/Value'}}
{{#let (or sortBy "isFolder:asc") as |sort|}}
<AppView>
<BlockSlot @name="breadcrumbs">

View File

@ -1,4 +1,4 @@
{{title 'Nodes'}}
{{page-title 'Nodes'}}
<EventSource @src={{items}} />
<EventSource @src={{leader}} />
{{#let (hash

View File

@ -1,4 +1,4 @@
{{title item.Node}}
{{page-title item.Node}}
<DataLoader as |api|>
<BlockSlot @name="data">

View File

@ -22,7 +22,7 @@
</dd>
</dl>
</div>
<Consul::Tomography::Graph @tomography={{tomography}} />
<Consul::Tomography::Graph @distances={{tomography.distances}} />
</div>
</div>

View File

@ -1,7 +1,7 @@
{{#if create }}
{{title 'New Namespace'}}
{{page-title 'New Namespace'}}
{{else}}
{{title 'Edit Namespace'}}
{{page-title 'Edit Namespace'}}
{{/if}}
<AppView>
<BlockSlot @name="notification" as |status type item error|>

View File

@ -1,4 +1,4 @@
{{title 'Namespaces'}}
{{page-title 'Namespaces'}}
{{#let (or sortBy "Name:asc") as |sort|}}
<EventSource @src={{items}} />
<AppView>

View File

@ -1,4 +1,4 @@
{{title 'Services'}}
{{page-title 'Services'}}
<EventSource @src={{items}} />
{{#let (hash
statuses=(if status (split status ',') undefined)

View File

@ -1,4 +1,4 @@
{{title item.Service.ID}}
{{page-title item.Service.ID}}
<EventSource @src={{item}} @onerror={{action "error"}} />
<EventSource @src={{proxy}} />
<EventSource @src={{proxyMeta}} />

View File

@ -4,7 +4,7 @@
<EventSource @src={{proxies}} />
<EventSource @src={{gatewayServices}} />
<EventSource @src={{topology}} />
{{title item.Service.Service}}
{{page-title item.Service.Service}}
<AppView>
<BlockSlot @name="notification" as |status type|>
<Consul::Service::Notifications

View File

@ -1,4 +1,4 @@
{{title "Settings"}}
{{page-title "Settings"}}
<AppView>
<BlockSlot @name="header">
<h1>

View File

@ -104,7 +104,6 @@
"ember-cli-yadda": "^0.5.0",
"ember-collection": "^1.0.0-alpha.9",
"ember-composable-helpers": "~4.0.0",
"ember-computed-style": "^0.3.0",
"ember-data": "~3.20.4",
"ember-data-model-fragments": "5.0.0-beta.0",
"ember-exam": "^4.0.0",

View File

@ -7935,13 +7935,6 @@ ember-composable-helpers@~4.0.0:
ember-cli-babel "^7.11.1"
resolve "^1.10.0"
ember-computed-style@^0.3.0:
version "0.3.0"
resolved "https://registry.yarnpkg.com/ember-computed-style/-/ember-computed-style-0.3.0.tgz#a04375f8b48fbf72fd61e76da3358075bd195ae9"
integrity sha512-EMMlKa8HKUCUapkJVgi/2VPyqUTWxIGs/rNsxMcMiOLFErHv6D1Mw8tvByuXeCtW8KrbwAFX3vaCMNYsAZAjvQ==
dependencies:
ember-cli-babel "^6.6.0"
ember-concurrency-decorators@^2.0.0:
version "2.0.1"
resolved "https://registry.yarnpkg.com/ember-concurrency-decorators/-/ember-concurrency-decorators-2.0.1.tgz#f5465785e6cf44684fb158ae6ab3aa1b131fae43"

View File

@ -1 +1 @@
export default '1.9.0'
export default '1.9.0'

View File

@ -479,7 +479,10 @@ The options below are all specified on the command-line.
This overrides the default server RPC port 8300. This is available in Consul 1.2.2
and later.
- `-non-voting-server` ((#\_non_voting_server)) <EnterpriseAlert inline /> - This
- `-non-voting-server` ((#\_non_voting_server)) <EnterpriseAlert inline /> - **This field
is deprecated in Consul 1.9.1. See the [`-read-replica`](#_read_replica) flag instead.**
- `-read-replica` ((#\_read_replica)) <EnterpriseAlert inline /> - This
flag is used to make the server not participate in the Raft quorum, and have it
only receive the data replication stream. This can be used to add read scalability
to a cluster in cases where a high volume of reads to servers are needed.
@ -1853,7 +1856,9 @@ Valid time units are 'ns', 'us' (or 'µs'), 'ms', 's', 'm', 'h'."
- `server` Equivalent to the [`-server` command-line flag](#_server).
- `non_voting_server` - Equivalent to the [`-non-voting-server` command-line flag](#_non_voting_server).
- `non_voting_server` - **This field is deprecated in Consul 1.9.1. See the [`read_replica`](#read_replica) field instead.**
- `read_replica` - Equivalent to the [`-read-replica` command-line flag](#_read_replica).
- `server_name` When provided, this overrides the [`node_name`](#_node)
for the TLS certificate. It can be used to ensure that the certificate name matches

View File

@ -4,12 +4,10 @@ page_title: Consul Enterprise Enhanced Read Scalability
sidebar_title: Enhanced Read Scalability
description: >-
Consul Enterprise supports increased read scalability without impacting write
latency by introducing
non-voting servers.
latency by introducing read replicas.
---
# Enhanced Read Scalability with Non-Voting Servers
# Enhanced Read Scalability with Read Replicas
<EnterpriseAlert>
This feature requires{' '}
@ -18,10 +16,10 @@ description: >-
</EnterpriseAlert>
Consul Enterprise provides the ability to scale clustered Consul servers
to include voting and non-voting servers. Non-voting servers still receive data from the cluster replication,
to include voting servers and read replicas. Read replicas still receive data from the cluster replication,
however, they do not take part in quorum election operations. Expanding your Consul cluster in this way can scale
reads without impacting write latency.
For more details, review the [Consul server configuration](/docs/agent/options)
documentation and the [-non-voting-server](/docs/agent/options#_non_voting_server)
documentation and the [-read-replica](/docs/agent/options#_read_replica)
configuration flag.