mirror of https://github.com/status-im/consul.git
Merge remote-tracking branch 'hashicorp/master' into enable_tag_drift_03
This commit is contained in:
commit
0b3faf6e4a
|
@ -9,14 +9,16 @@ BUG FIXES:
|
|||
* Allow services with `/` characters in the UI [GH-988]
|
||||
* Token hiding in HTTP logs bug fixed [GH-1020]
|
||||
* RFC6598 addresses are accepted as private IP's [GH-1050]
|
||||
* Tokens passed from the CLI or API work for maint mode [GH-1230]
|
||||
|
||||
IMPROVEMENTS:
|
||||
|
||||
* Advertised gossip/rpc addresses can now be configured [GH-1004]
|
||||
* Atlas integration options are reload-able via SIGHUP [GH-1199]
|
||||
* Atlas endpoint is a configurable option and CLI arg [GH-1201]
|
||||
|
||||
MISC:
|
||||
|
||||
* Protocol version bumped to 3 for serf protocol 5 [GH-996]
|
||||
* Vagrantfile fixed for VMware [GH-1042]
|
||||
|
||||
## 0.5.2 (May 18, 2015)
|
||||
|
|
|
@ -1276,7 +1276,7 @@ func serviceMaintCheckID(serviceID string) string {
|
|||
|
||||
// EnableServiceMaintenance will register a false health check against the given
|
||||
// service ID with critical status. This will exclude the service from queries.
|
||||
func (a *Agent) EnableServiceMaintenance(serviceID, reason string) error {
|
||||
func (a *Agent) EnableServiceMaintenance(serviceID, reason, token string) error {
|
||||
service, ok := a.state.Services()[serviceID]
|
||||
if !ok {
|
||||
return fmt.Errorf("No service registered with ID %q", serviceID)
|
||||
|
@ -1303,7 +1303,7 @@ func (a *Agent) EnableServiceMaintenance(serviceID, reason string) error {
|
|||
ServiceName: service.Service,
|
||||
Status: structs.HealthCritical,
|
||||
}
|
||||
a.AddCheck(check, nil, true, "")
|
||||
a.AddCheck(check, nil, true, token)
|
||||
a.logger.Printf("[INFO] agent: Service %q entered maintenance mode", serviceID)
|
||||
|
||||
return nil
|
||||
|
@ -1330,7 +1330,7 @@ func (a *Agent) DisableServiceMaintenance(serviceID string) error {
|
|||
}
|
||||
|
||||
// EnableNodeMaintenance places a node into maintenance mode.
|
||||
func (a *Agent) EnableNodeMaintenance(reason string) {
|
||||
func (a *Agent) EnableNodeMaintenance(reason, token string) {
|
||||
// Ensure node maintenance is not already enabled
|
||||
if _, ok := a.state.Checks()[nodeMaintCheckID]; ok {
|
||||
return
|
||||
|
@ -1349,7 +1349,7 @@ func (a *Agent) EnableNodeMaintenance(reason string) {
|
|||
Notes: reason,
|
||||
Status: structs.HealthCritical,
|
||||
}
|
||||
a.AddCheck(check, nil, true, "")
|
||||
a.AddCheck(check, nil, true, token)
|
||||
a.logger.Printf("[INFO] agent: Node entered maintenance mode")
|
||||
}
|
||||
|
||||
|
|
|
@ -266,9 +266,13 @@ func (s *HTTPServer) AgentServiceMaintenance(resp http.ResponseWriter, req *http
|
|||
return nil, nil
|
||||
}
|
||||
|
||||
// Get the provided token, if any
|
||||
var token string
|
||||
s.parseToken(req, &token)
|
||||
|
||||
if enable {
|
||||
reason := params.Get("reason")
|
||||
if err = s.agent.EnableServiceMaintenance(serviceID, reason); err != nil {
|
||||
if err = s.agent.EnableServiceMaintenance(serviceID, reason, token); err != nil {
|
||||
resp.WriteHeader(404)
|
||||
resp.Write([]byte(err.Error()))
|
||||
return nil, nil
|
||||
|
@ -307,8 +311,12 @@ func (s *HTTPServer) AgentNodeMaintenance(resp http.ResponseWriter, req *http.Re
|
|||
return nil, nil
|
||||
}
|
||||
|
||||
// Get the provided token, if any
|
||||
var token string
|
||||
s.parseToken(req, &token)
|
||||
|
||||
if enable {
|
||||
s.agent.EnableNodeMaintenance(params.Get("reason"))
|
||||
s.agent.EnableNodeMaintenance(params.Get("reason"), token)
|
||||
} else {
|
||||
s.agent.DisableNodeMaintenance()
|
||||
}
|
||||
|
|
|
@ -655,7 +655,7 @@ func TestHTTPAgent_EnableServiceMaintenance(t *testing.T) {
|
|||
}
|
||||
|
||||
// Force the service into maintenance mode
|
||||
req, _ := http.NewRequest("PUT", "/v1/agent/service/maintenance/test?enable=true&reason=broken", nil)
|
||||
req, _ := http.NewRequest("PUT", "/v1/agent/service/maintenance/test?enable=true&reason=broken&token=mytoken", nil)
|
||||
resp := httptest.NewRecorder()
|
||||
if _, err := srv.AgentServiceMaintenance(resp, req); err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
|
@ -671,6 +671,11 @@ func TestHTTPAgent_EnableServiceMaintenance(t *testing.T) {
|
|||
t.Fatalf("should have registered maintenance check")
|
||||
}
|
||||
|
||||
// Ensure the token was added
|
||||
if token := srv.agent.state.CheckToken(checkID); token != "mytoken" {
|
||||
t.Fatalf("expected 'mytoken', got '%s'", token)
|
||||
}
|
||||
|
||||
// Ensure the reason was set in notes
|
||||
if check.Notes != "broken" {
|
||||
t.Fatalf("bad: %#v", check)
|
||||
|
@ -693,7 +698,7 @@ func TestHTTPAgent_DisableServiceMaintenance(t *testing.T) {
|
|||
}
|
||||
|
||||
// Force the service into maintenance mode
|
||||
if err := srv.agent.EnableServiceMaintenance("test", ""); err != nil {
|
||||
if err := srv.agent.EnableServiceMaintenance("test", "", ""); err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
|
||||
|
@ -749,7 +754,7 @@ func TestHTTPAgent_EnableNodeMaintenance(t *testing.T) {
|
|||
|
||||
// Force the node into maintenance mode
|
||||
req, _ := http.NewRequest(
|
||||
"PUT", "/v1/agent/self/maintenance?enable=true&reason=broken", nil)
|
||||
"PUT", "/v1/agent/self/maintenance?enable=true&reason=broken&token=mytoken", nil)
|
||||
resp := httptest.NewRecorder()
|
||||
if _, err := srv.AgentNodeMaintenance(resp, req); err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
|
@ -764,6 +769,11 @@ func TestHTTPAgent_EnableNodeMaintenance(t *testing.T) {
|
|||
t.Fatalf("should have registered maintenance check")
|
||||
}
|
||||
|
||||
// Check that the token was used
|
||||
if token := srv.agent.state.CheckToken(nodeMaintCheckID); token != "mytoken" {
|
||||
t.Fatalf("expected 'mytoken', got '%s'", token)
|
||||
}
|
||||
|
||||
// Ensure the reason was set in notes
|
||||
if check.Notes != "broken" {
|
||||
t.Fatalf("bad: %#v", check)
|
||||
|
@ -777,7 +787,7 @@ func TestHTTPAgent_DisableNodeMaintenance(t *testing.T) {
|
|||
defer srv.agent.Shutdown()
|
||||
|
||||
// Force the node into maintenance mode
|
||||
srv.agent.EnableNodeMaintenance("")
|
||||
srv.agent.EnableNodeMaintenance("", "")
|
||||
|
||||
// Leave maintenance mode
|
||||
req, _ := http.NewRequest("PUT", "/v1/agent/self/maintenance?enable=false", nil)
|
||||
|
|
|
@ -25,6 +25,8 @@ func nextConfig() *Config {
|
|||
idx := int(atomic.AddUint64(&offset, 1))
|
||||
conf := DefaultConfig()
|
||||
|
||||
conf.Version = "a.b"
|
||||
conf.VersionPrerelease = "c.d"
|
||||
conf.AdvertiseAddr = "127.0.0.1"
|
||||
conf.Bootstrap = true
|
||||
conf.Datacenter = "dc1"
|
||||
|
@ -1205,7 +1207,7 @@ func TestAgent_ServiceMaintenanceMode(t *testing.T) {
|
|||
}
|
||||
|
||||
// Enter maintenance mode for the service
|
||||
if err := agent.EnableServiceMaintenance("redis", "broken"); err != nil {
|
||||
if err := agent.EnableServiceMaintenance("redis", "broken", "mytoken"); err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
|
||||
|
@ -1216,6 +1218,11 @@ func TestAgent_ServiceMaintenanceMode(t *testing.T) {
|
|||
t.Fatalf("should have registered critical maintenance check")
|
||||
}
|
||||
|
||||
// Check that the token was used to register the check
|
||||
if token := agent.state.CheckToken(checkID); token != "mytoken" {
|
||||
t.Fatalf("expected 'mytoken', got: '%s'", token)
|
||||
}
|
||||
|
||||
// Ensure the reason was set in notes
|
||||
if check.Notes != "broken" {
|
||||
t.Fatalf("bad: %#v", check)
|
||||
|
@ -1232,7 +1239,7 @@ func TestAgent_ServiceMaintenanceMode(t *testing.T) {
|
|||
}
|
||||
|
||||
// Enter service maintenance mode without providing a reason
|
||||
if err := agent.EnableServiceMaintenance("redis", ""); err != nil {
|
||||
if err := agent.EnableServiceMaintenance("redis", "", ""); err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
|
||||
|
@ -1297,7 +1304,7 @@ func TestAgent_NodeMaintenanceMode(t *testing.T) {
|
|||
defer agent.Shutdown()
|
||||
|
||||
// Enter maintenance mode for the node
|
||||
agent.EnableNodeMaintenance("broken")
|
||||
agent.EnableNodeMaintenance("broken", "mytoken")
|
||||
|
||||
// Make sure the critical health check was added
|
||||
check, ok := agent.state.Checks()[nodeMaintCheckID]
|
||||
|
@ -1305,6 +1312,11 @@ func TestAgent_NodeMaintenanceMode(t *testing.T) {
|
|||
t.Fatalf("should have registered critical node check")
|
||||
}
|
||||
|
||||
// Check that the token was used to register the check
|
||||
if token := agent.state.CheckToken(nodeMaintCheckID); token != "mytoken" {
|
||||
t.Fatalf("expected 'mytoken', got: '%s'", token)
|
||||
}
|
||||
|
||||
// Ensure the reason was set in notes
|
||||
if check.Notes != "broken" {
|
||||
t.Fatalf("bad: %#v", check)
|
||||
|
@ -1319,7 +1331,7 @@ func TestAgent_NodeMaintenanceMode(t *testing.T) {
|
|||
}
|
||||
|
||||
// Enter maintenance mode without passing a reason
|
||||
agent.EnableNodeMaintenance("")
|
||||
agent.EnableNodeMaintenance("", "")
|
||||
|
||||
// Make sure the check was registered with the default note
|
||||
check, ok = agent.state.Checks()[nodeMaintCheckID]
|
||||
|
|
|
@ -48,6 +48,7 @@ type Command struct {
|
|||
httpServers []*HTTPServer
|
||||
dnsServer *DNSServer
|
||||
scadaProvider *scada.Provider
|
||||
scadaHttp *HTTPServer
|
||||
}
|
||||
|
||||
// readConfig is responsible for setup of our configuration using
|
||||
|
@ -80,12 +81,14 @@ func (c *Command) readConfig() *Config {
|
|||
|
||||
cmdFlags.StringVar(&cmdConfig.ClientAddr, "client", "", "address to bind client listeners to (DNS, HTTP, HTTPS, RPC)")
|
||||
cmdFlags.StringVar(&cmdConfig.BindAddr, "bind", "", "address to bind server listeners to")
|
||||
cmdFlags.IntVar(&cmdConfig.Ports.HTTP, "http-port", 0, "http port to use")
|
||||
cmdFlags.StringVar(&cmdConfig.AdvertiseAddr, "advertise", "", "address to advertise instead of bind addr")
|
||||
cmdFlags.StringVar(&cmdConfig.AdvertiseAddrWan, "advertise-wan", "", "address to advertise on wan instead of bind or advertise addr")
|
||||
|
||||
cmdFlags.StringVar(&cmdConfig.AtlasInfrastructure, "atlas", "", "infrastructure name in Atlas")
|
||||
cmdFlags.StringVar(&cmdConfig.AtlasToken, "atlas-token", "", "authentication token for Atlas")
|
||||
cmdFlags.BoolVar(&cmdConfig.AtlasJoin, "atlas-join", false, "auto-join with Atlas")
|
||||
cmdFlags.StringVar(&cmdConfig.AtlasEndpoint, "atlas-endpoint", "", "endpoint for Atlas integration")
|
||||
|
||||
cmdFlags.IntVar(&cmdConfig.Protocol, "protocol", -1, "protocol version")
|
||||
|
||||
|
@ -345,20 +348,14 @@ func (c *Command) setupAgent(config *Config, logOutput io.Writer, logWriter *log
|
|||
c.rpcServer = NewAgentRPC(agent, rpcListener, logOutput, logWriter)
|
||||
|
||||
// Enable the SCADA integration
|
||||
var scadaList net.Listener
|
||||
if config.AtlasInfrastructure != "" {
|
||||
provider, list, err := NewProvider(config, logOutput)
|
||||
if err != nil {
|
||||
agent.Shutdown()
|
||||
c.Ui.Error(fmt.Sprintf("Error starting SCADA connection: %s", err))
|
||||
return err
|
||||
}
|
||||
c.scadaProvider = provider
|
||||
scadaList = list
|
||||
if err := c.setupScadaConn(config); err != nil {
|
||||
agent.Shutdown()
|
||||
c.Ui.Error(fmt.Sprintf("Error starting SCADA connection: %s", err))
|
||||
return err
|
||||
}
|
||||
|
||||
if config.Ports.HTTP > 0 || config.Ports.HTTPS > 0 || scadaList != nil {
|
||||
servers, err := NewHTTPServers(agent, config, scadaList, logOutput)
|
||||
if config.Ports.HTTP > 0 || config.Ports.HTTPS > 0 {
|
||||
servers, err := NewHTTPServers(agent, config, logOutput)
|
||||
if err != nil {
|
||||
agent.Shutdown()
|
||||
c.Ui.Error(fmt.Sprintf("Error starting http servers: %s", err))
|
||||
|
@ -684,9 +681,16 @@ AFTER_MIGRATE:
|
|||
for _, server := range c.httpServers {
|
||||
defer server.Shutdown()
|
||||
}
|
||||
if c.scadaProvider != nil {
|
||||
defer c.scadaProvider.Shutdown()
|
||||
}
|
||||
|
||||
// Check and shut down the SCADA listeners at the end
|
||||
defer func() {
|
||||
if c.scadaHttp != nil {
|
||||
c.scadaHttp.Shutdown()
|
||||
}
|
||||
if c.scadaProvider != nil {
|
||||
c.scadaProvider.Shutdown()
|
||||
}
|
||||
}()
|
||||
|
||||
// Join startup nodes if specified
|
||||
if err := c.startupJoin(config); err != nil {
|
||||
|
@ -904,9 +908,46 @@ func (c *Command) handleReload(config *Config) *Config {
|
|||
}(wp)
|
||||
}
|
||||
|
||||
// Reload SCADA client if we have a change
|
||||
if newConf.AtlasInfrastructure != config.AtlasInfrastructure ||
|
||||
newConf.AtlasToken != config.AtlasToken ||
|
||||
newConf.AtlasEndpoint != config.AtlasEndpoint {
|
||||
if err := c.setupScadaConn(newConf); err != nil {
|
||||
c.Ui.Error(fmt.Sprintf("Failed reloading SCADA client: %s", err))
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return newConf
|
||||
}
|
||||
|
||||
// startScadaClient is used to start a new SCADA provider and listener,
|
||||
// replacing any existing listeners.
|
||||
func (c *Command) setupScadaConn(config *Config) error {
|
||||
// Shut down existing SCADA listeners
|
||||
if c.scadaProvider != nil {
|
||||
c.scadaProvider.Shutdown()
|
||||
}
|
||||
if c.scadaHttp != nil {
|
||||
c.scadaHttp.Shutdown()
|
||||
}
|
||||
|
||||
// No-op if we don't have an infrastructure
|
||||
if config.AtlasInfrastructure == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Create the new provider and listener
|
||||
c.Ui.Output("Connecting to Atlas: " + config.AtlasInfrastructure)
|
||||
provider, list, err := NewProvider(config, c.logOutput)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.scadaProvider = provider
|
||||
c.scadaHttp = newScadaHttp(c.agent, list)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Command) Synopsis() string {
|
||||
return "Runs a Consul agent"
|
||||
}
|
||||
|
@ -924,8 +965,10 @@ Options:
|
|||
-atlas=org/name Sets the Atlas infrastructure name, enables SCADA.
|
||||
-atlas-join Enables auto-joining the Atlas cluster
|
||||
-atlas-token=token Provides the Atlas API token
|
||||
-atlas-endpoint=1.2.3.4 The address of the endpoint for Atlas integration.
|
||||
-bootstrap Sets server to bootstrap mode
|
||||
-bind=0.0.0.0 Sets the bind address for cluster communication
|
||||
-http-port=8500 Sets the HTTP API port to listen on
|
||||
-bootstrap-expect=0 Sets server to expect bootstrap mode.
|
||||
-client=127.0.0.1 Sets the address to bind for client access.
|
||||
This includes RPC, DNS, HTTP and HTTPS (if configured)
|
||||
|
|
|
@ -6,6 +6,7 @@ import (
|
|||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/consul/testutil"
|
||||
|
@ -246,3 +247,55 @@ func TestSetupAgent_RPCUnixSocket_FileExists(t *testing.T) {
|
|||
t.Fatalf("bad permissions: %s", fi.Mode())
|
||||
}
|
||||
}
|
||||
|
||||
func TestSetupScadaConn(t *testing.T) {
|
||||
// Create a config and assign an infra name
|
||||
conf1 := nextConfig()
|
||||
conf1.AtlasInfrastructure = "hashicorp/test1"
|
||||
conf1.AtlasToken = "abc"
|
||||
|
||||
dir, agent := makeAgent(t, conf1)
|
||||
defer os.RemoveAll(dir)
|
||||
defer agent.Shutdown()
|
||||
|
||||
cmd := &Command{
|
||||
ShutdownCh: make(chan struct{}),
|
||||
Ui: new(cli.MockUi),
|
||||
agent: agent,
|
||||
}
|
||||
|
||||
// First start creates the scada conn
|
||||
if err := cmd.setupScadaConn(conf1); err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
list := cmd.scadaHttp.listener.(*scadaListener)
|
||||
if list == nil || list.addr.infra != "hashicorp/test1" {
|
||||
t.Fatalf("bad: %#v", list)
|
||||
}
|
||||
http1 := cmd.scadaHttp
|
||||
provider1 := cmd.scadaProvider
|
||||
|
||||
// Performing setup again tears down original and replaces
|
||||
// with a new SCADA client.
|
||||
conf2 := nextConfig()
|
||||
conf2.AtlasInfrastructure = "hashicorp/test2"
|
||||
conf2.AtlasToken = "123"
|
||||
if err := cmd.setupScadaConn(conf2); err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
if cmd.scadaHttp == http1 || cmd.scadaProvider == provider1 {
|
||||
t.Fatalf("should change: %#v %#v", cmd.scadaHttp, cmd.scadaProvider)
|
||||
}
|
||||
list = cmd.scadaHttp.listener.(*scadaListener)
|
||||
if list == nil || list.addr.infra != "hashicorp/test2" {
|
||||
t.Fatalf("bad: %#v", list)
|
||||
}
|
||||
|
||||
// Original provider and listener must be closed
|
||||
if !provider1.IsShutdown() {
|
||||
t.Fatalf("should be shutdown")
|
||||
}
|
||||
if _, err := http1.listener.Accept(); !strings.Contains(err.Error(), "closed") {
|
||||
t.Fatalf("should be closed")
|
||||
}
|
||||
}
|
||||
|
|
|
@ -363,6 +363,10 @@ type Config struct {
|
|||
// to it's cluster. Requires Atlas integration.
|
||||
AtlasJoin bool `mapstructure:"atlas_join"`
|
||||
|
||||
// AtlasEndpoint is the SCADA endpoint used for Atlas integration. If
|
||||
// empty, the defaults from the provider are used.
|
||||
AtlasEndpoint string `mapstructure:"atlas_endpoint"`
|
||||
|
||||
// AEInterval controls the anti-entropy interval. This is how often
|
||||
// the agent attempts to reconcile it's local state with the server'
|
||||
// representation of our state. Defaults to every 60s.
|
||||
|
@ -1056,6 +1060,9 @@ func MergeConfig(a, b *Config) *Config {
|
|||
if b.AtlasJoin {
|
||||
result.AtlasJoin = true
|
||||
}
|
||||
if b.AtlasEndpoint != "" {
|
||||
result.AtlasEndpoint = b.AtlasEndpoint
|
||||
}
|
||||
if b.SessionTTLMinRaw != "" {
|
||||
result.SessionTTLMin = b.SessionTTLMin
|
||||
result.SessionTTLMinRaw = b.SessionTTLMinRaw
|
||||
|
|
|
@ -706,7 +706,13 @@ func TestDecodeConfig(t *testing.T) {
|
|||
}
|
||||
|
||||
// Atlas configs
|
||||
input = `{"atlas_infrastructure": "hashicorp/prod", "atlas_token": "abcdefg", "atlas_acl_token": "123456789", "atlas_join": true}`
|
||||
input = `{
|
||||
"atlas_infrastructure": "hashicorp/prod",
|
||||
"atlas_token": "abcdefg",
|
||||
"atlas_acl_token": "123456789",
|
||||
"atlas_join": true,
|
||||
"atlas_endpoint": "foo.bar:1111"
|
||||
}`
|
||||
config, err = DecodeConfig(bytes.NewReader([]byte(input)))
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
|
@ -724,6 +730,9 @@ func TestDecodeConfig(t *testing.T) {
|
|||
if !config.AtlasJoin {
|
||||
t.Fatalf("bad: %#v", config)
|
||||
}
|
||||
if config.AtlasEndpoint != "foo.bar:1111" {
|
||||
t.Fatalf("bad: %#v", config)
|
||||
}
|
||||
|
||||
// SessionTTLMin
|
||||
input = `{"session_ttl_min": "5s"}`
|
||||
|
|
|
@ -352,9 +352,9 @@ INVALID:
|
|||
|
||||
// nodeLookup is used to handle a node query
|
||||
func (d *DNSServer) nodeLookup(network, datacenter, node string, req, resp *dns.Msg) {
|
||||
// Only handle ANY and A type requests
|
||||
// Only handle ANY, A and AAAA type requests
|
||||
qType := req.Question[0].Qtype
|
||||
if qType != dns.TypeANY && qType != dns.TypeA {
|
||||
if qType != dns.TypeANY && qType != dns.TypeA && qType != dns.TypeAAAA {
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -511,9 +511,17 @@ RPC:
|
|||
// Perform a random shuffle
|
||||
shuffleServiceNodes(out.Nodes)
|
||||
|
||||
// Add various responses depending on the request
|
||||
qType := req.Question[0].Qtype
|
||||
d.serviceNodeRecords(out.Nodes, req, resp, ttl)
|
||||
|
||||
if qType == dns.TypeSRV {
|
||||
d.serviceSRVRecords(datacenter, out.Nodes, req, resp, ttl)
|
||||
}
|
||||
|
||||
// If the network is not TCP, restrict the number of responses
|
||||
if network != "tcp" && len(out.Nodes) > maxServiceResponses {
|
||||
out.Nodes = out.Nodes[:maxServiceResponses]
|
||||
if network != "tcp" && len(resp.Answer) > maxServiceResponses {
|
||||
resp.Answer = resp.Answer[:maxServiceResponses]
|
||||
|
||||
// Flag that there are more records to return in the UDP response
|
||||
if d.config.EnableTruncate {
|
||||
|
@ -521,12 +529,10 @@ RPC:
|
|||
}
|
||||
}
|
||||
|
||||
// Add various responses depending on the request
|
||||
qType := req.Question[0].Qtype
|
||||
d.serviceNodeRecords(out.Nodes, req, resp, ttl)
|
||||
|
||||
if qType == dns.TypeSRV {
|
||||
d.serviceSRVRecords(datacenter, out.Nodes, req, resp, ttl)
|
||||
// If the answer is empty, return not found
|
||||
if len(resp.Answer) == 0 {
|
||||
d.addSOA(d.domain, resp)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -293,7 +293,7 @@ func TestDNS_NodeLookup_AAAA(t *testing.T) {
|
|||
}
|
||||
|
||||
m := new(dns.Msg)
|
||||
m.SetQuestion("bar.node.consul.", dns.TypeANY)
|
||||
m.SetQuestion("bar.node.consul.", dns.TypeAAAA)
|
||||
|
||||
c := new(dns.Client)
|
||||
addr, _ := srv.agent.config.ClientListener("", srv.agent.config.Ports.DNS)
|
||||
|
@ -1416,6 +1416,72 @@ func TestDNS_ServiceLookup_Truncate(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestDNS_ServiceLookup_MaxResponses(t *testing.T) {
|
||||
dir, srv := makeDNSServer(t)
|
||||
defer os.RemoveAll(dir)
|
||||
defer srv.agent.Shutdown()
|
||||
|
||||
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
|
||||
// Register nodes
|
||||
for i := 0; i < 6*maxServiceResponses; i++ {
|
||||
nodeAddress := fmt.Sprintf("127.0.0.%d", i+1)
|
||||
if i > 3 {
|
||||
nodeAddress = fmt.Sprintf("fe80::%d", i+1)
|
||||
}
|
||||
args := &structs.RegisterRequest{
|
||||
Datacenter: "dc1",
|
||||
Node: fmt.Sprintf("foo%d", i),
|
||||
Address: nodeAddress,
|
||||
Service: &structs.NodeService{
|
||||
Service: "web",
|
||||
Port: 8000,
|
||||
},
|
||||
}
|
||||
|
||||
var out struct{}
|
||||
if err := srv.agent.RPC("Catalog.Register", args, &out); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure the response is randomized each time.
|
||||
m := new(dns.Msg)
|
||||
m.SetQuestion("web.service.consul.", dns.TypeANY)
|
||||
|
||||
addr, _ := srv.agent.config.ClientListener("", srv.agent.config.Ports.DNS)
|
||||
c := new(dns.Client)
|
||||
in, _, err := c.Exchange(m, addr.String())
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
if len(in.Answer) != 3 {
|
||||
t.Fatalf("should receive 3 answers for ANY")
|
||||
}
|
||||
|
||||
m.SetQuestion("web.service.consul.", dns.TypeA)
|
||||
in, _, err = c.Exchange(m, addr.String())
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
if len(in.Answer) != 3 {
|
||||
t.Fatalf("should receive 3 answers for A")
|
||||
}
|
||||
|
||||
m.SetQuestion("web.service.consul.", dns.TypeAAAA)
|
||||
in, _, err = c.Exchange(m, addr.String())
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
if len(in.Answer) != 3 {
|
||||
t.Fatalf("should receive 3 answers for AAAA")
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestDNS_ServiceLookup_CNAME(t *testing.T) {
|
||||
recursor := makeRecursor(t, []dns.RR{
|
||||
dnsCNAME("www.google.com", "google.com"),
|
||||
|
@ -1933,5 +1999,95 @@ func TestDNS_NonExistingLookup(t *testing.T) {
|
|||
if soaRec.Hdr.Ttl != 0 {
|
||||
t.Fatalf("Bad: %#v", in.Ns[0])
|
||||
}
|
||||
}
|
||||
|
||||
func TestDNS_NonExistingLookupEmptyAorAAAA(t *testing.T) {
|
||||
dir, srv := makeDNSServer(t)
|
||||
defer os.RemoveAll(dir)
|
||||
defer srv.agent.Shutdown()
|
||||
|
||||
testutil.WaitForLeader(t, srv.agent.RPC, "dc1")
|
||||
|
||||
// register v6 only service
|
||||
args := &structs.RegisterRequest{
|
||||
Datacenter: "dc1",
|
||||
Node: "foov6",
|
||||
Address: "fe80::1",
|
||||
Service: &structs.NodeService{
|
||||
Service: "webv6",
|
||||
Port: 8000,
|
||||
},
|
||||
}
|
||||
|
||||
var out struct{}
|
||||
if err := srv.agent.RPC("Catalog.Register", args, &out); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
// register v4 only service
|
||||
args = &structs.RegisterRequest{
|
||||
Datacenter: "dc1",
|
||||
Node: "foov4",
|
||||
Address: "127.0.0.1",
|
||||
Service: &structs.NodeService{
|
||||
Service: "webv4",
|
||||
Port: 8000,
|
||||
},
|
||||
}
|
||||
|
||||
if err := srv.agent.RPC("Catalog.Register", args, &out); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
// check for ipv6 records on ipv4 only service
|
||||
m := new(dns.Msg)
|
||||
m.SetQuestion("webv4.service.consul.", dns.TypeAAAA)
|
||||
|
||||
addr, _ := srv.agent.config.ClientListener("", srv.agent.config.Ports.DNS)
|
||||
c := new(dns.Client)
|
||||
in, _, err := c.Exchange(m, addr.String())
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
if len(in.Ns) != 1 {
|
||||
t.Fatalf("Bad: %#v", in)
|
||||
}
|
||||
|
||||
soaRec, ok := in.Ns[0].(*dns.SOA)
|
||||
if !ok {
|
||||
t.Fatalf("Bad: %#v", in.Ns[0])
|
||||
}
|
||||
if soaRec.Hdr.Ttl != 0 {
|
||||
t.Fatalf("Bad: %#v", in.Ns[0])
|
||||
}
|
||||
|
||||
if in.Rcode != dns.RcodeSuccess {
|
||||
t.Fatalf("Bad: %#v", in)
|
||||
}
|
||||
|
||||
// check for ipv4 records on ipv6 only service
|
||||
m.SetQuestion("webv6.service.consul.", dns.TypeA)
|
||||
|
||||
in, _, err = c.Exchange(m, addr.String())
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
if len(in.Ns) != 1 {
|
||||
t.Fatalf("Bad: %#v", in)
|
||||
}
|
||||
|
||||
soaRec, ok = in.Ns[0].(*dns.SOA)
|
||||
if !ok {
|
||||
t.Fatalf("Bad: %#v", in.Ns[0])
|
||||
}
|
||||
if soaRec.Hdr.Ttl != 0 {
|
||||
t.Fatalf("Bad: %#v", in.Ns[0])
|
||||
}
|
||||
|
||||
if in.Rcode != dns.RcodeSuccess {
|
||||
t.Fatalf("Bad: %#v", in)
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -41,7 +41,7 @@ type HTTPServer struct {
|
|||
|
||||
// NewHTTPServers starts new HTTP servers to provide an interface to
|
||||
// the agent.
|
||||
func NewHTTPServers(agent *Agent, config *Config, scada net.Listener, logOutput io.Writer) ([]*HTTPServer, error) {
|
||||
func NewHTTPServers(agent *Agent, config *Config, logOutput io.Writer) ([]*HTTPServer, error) {
|
||||
var servers []*HTTPServer
|
||||
|
||||
if config.Ports.HTTPS > 0 {
|
||||
|
@ -142,29 +142,30 @@ func NewHTTPServers(agent *Agent, config *Config, scada net.Listener, logOutput
|
|||
servers = append(servers, srv)
|
||||
}
|
||||
|
||||
if scada != nil {
|
||||
// Create the mux
|
||||
mux := http.NewServeMux()
|
||||
|
||||
// Create the server
|
||||
srv := &HTTPServer{
|
||||
agent: agent,
|
||||
mux: mux,
|
||||
listener: scada,
|
||||
logger: log.New(logOutput, "", log.LstdFlags),
|
||||
uiDir: config.UiDir,
|
||||
addr: scadaHTTPAddr,
|
||||
}
|
||||
srv.registerHandlers(false) // Never allow debug for SCADA
|
||||
|
||||
// Start the server
|
||||
go http.Serve(scada, mux)
|
||||
servers = append(servers, srv)
|
||||
}
|
||||
|
||||
return servers, nil
|
||||
}
|
||||
|
||||
// newScadaHttp creates a new HTTP server wrapping the SCADA
|
||||
// listener such that HTTP calls can be sent from the brokers.
|
||||
func newScadaHttp(agent *Agent, list net.Listener) *HTTPServer {
|
||||
// Create the mux
|
||||
mux := http.NewServeMux()
|
||||
|
||||
// Create the server
|
||||
srv := &HTTPServer{
|
||||
agent: agent,
|
||||
mux: mux,
|
||||
listener: list,
|
||||
logger: agent.logger,
|
||||
addr: scadaHTTPAddr,
|
||||
}
|
||||
srv.registerHandlers(false) // Never allow debug for SCADA
|
||||
|
||||
// Start the server
|
||||
go http.Serve(list, mux)
|
||||
return srv
|
||||
}
|
||||
|
||||
// tcpKeepAliveListener sets TCP keep-alive timeouts on accepted
|
||||
// connections. It's used by NewHttpServer so
|
||||
// dead TCP connections eventually go away.
|
||||
|
@ -269,13 +270,10 @@ func (s *HTTPServer) registerHandlers(enableDebug bool) {
|
|||
s.mux.Handle("/ui/", http.StripPrefix("/ui/", http.FileServer(http.Dir(s.uiDir))))
|
||||
}
|
||||
|
||||
// Enable the special endpoints for UI or SCADA
|
||||
if s.uiDir != "" || s.agent.config.AtlasInfrastructure != "" {
|
||||
// API's are under /internal/ui/ to avoid conflict
|
||||
s.mux.HandleFunc("/v1/internal/ui/nodes", s.wrap(s.UINodes))
|
||||
s.mux.HandleFunc("/v1/internal/ui/node/", s.wrap(s.UINodeInfo))
|
||||
s.mux.HandleFunc("/v1/internal/ui/services", s.wrap(s.UIServices))
|
||||
}
|
||||
// API's are under /internal/ui/ to avoid conflict
|
||||
s.mux.HandleFunc("/v1/internal/ui/nodes", s.wrap(s.UINodes))
|
||||
s.mux.HandleFunc("/v1/internal/ui/node/", s.wrap(s.UINodeInfo))
|
||||
s.mux.HandleFunc("/v1/internal/ui/services", s.wrap(s.UIServices))
|
||||
}
|
||||
|
||||
// wrap is used to wrap functions to make them more convenient
|
||||
|
|
|
@ -38,7 +38,7 @@ func makeHTTPServerWithConfig(t *testing.T, cb func(c *Config)) (string, *HTTPSe
|
|||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
conf.UiDir = uiDir
|
||||
servers, err := NewHTTPServers(agent, conf, nil, agent.logOutput)
|
||||
servers, err := NewHTTPServers(agent, conf, agent.logOutput)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
@ -148,7 +148,7 @@ func TestHTTPServer_UnixSocket_FileExists(t *testing.T) {
|
|||
defer os.RemoveAll(dir)
|
||||
|
||||
// Try to start the server with the same path anyways.
|
||||
if _, err := NewHTTPServers(agent, conf, nil, agent.logOutput); err != nil {
|
||||
if _, err := NewHTTPServers(agent, conf, agent.logOutput); err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
|
||||
|
@ -516,6 +516,39 @@ func TestACLResolution(t *testing.T) {
|
|||
})
|
||||
}
|
||||
|
||||
func TestScadaHTTP(t *testing.T) {
|
||||
// Create the agent
|
||||
dir, agent := makeAgent(t, nextConfig())
|
||||
defer os.RemoveAll(dir)
|
||||
defer agent.Shutdown()
|
||||
|
||||
// Create a generic listener
|
||||
list, err := net.Listen("tcp", ":0")
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
defer list.Close()
|
||||
|
||||
// Create the SCADA HTTP server
|
||||
scadaHttp := newScadaHttp(agent, list)
|
||||
|
||||
// Returned server uses the listener and scada addr
|
||||
if scadaHttp.listener != list {
|
||||
t.Fatalf("bad listener: %#v", scadaHttp)
|
||||
}
|
||||
if scadaHttp.addr != scadaHTTPAddr {
|
||||
t.Fatalf("expected %v, got: %v", scadaHttp.addr, scadaHTTPAddr)
|
||||
}
|
||||
|
||||
// Check that debug endpoints were not enabled. This will cause
|
||||
// the serve mux to panic if the routes are already handled.
|
||||
mockFn := func(w http.ResponseWriter, r *http.Request) {}
|
||||
scadaHttp.mux.HandleFunc("/debug/pprof/", mockFn)
|
||||
scadaHttp.mux.HandleFunc("/debug/pprof/cmdline", mockFn)
|
||||
scadaHttp.mux.HandleFunc("/debug/pprof/profile", mockFn)
|
||||
scadaHttp.mux.HandleFunc("/debug/pprof/symbol", mockFn)
|
||||
}
|
||||
|
||||
// assertIndex tests that X-Consul-Index is set and non-zero
|
||||
func assertIndex(t *testing.T, resp *httptest.ResponseRecorder) {
|
||||
header := resp.Header().Get("X-Consul-Index")
|
||||
|
|
|
@ -47,6 +47,7 @@ func ProviderConfig(c *Config) *client.ProviderConfig {
|
|||
Handlers: map[string]client.CapabilityProvider{
|
||||
"http": nil,
|
||||
},
|
||||
Endpoint: c.AtlasEndpoint,
|
||||
ResourceGroup: c.AtlasInfrastructure,
|
||||
Token: c.AtlasToken,
|
||||
}
|
||||
|
|
|
@ -43,6 +43,7 @@ func TestProviderConfig(t *testing.T) {
|
|||
conf.Server = true
|
||||
conf.AtlasInfrastructure = "armon/test"
|
||||
conf.AtlasToken = "foobarbaz"
|
||||
conf.AtlasEndpoint = "foo.bar:1111"
|
||||
pc := ProviderConfig(conf)
|
||||
|
||||
expect := &client.ProviderConfig{
|
||||
|
@ -62,6 +63,7 @@ func TestProviderConfig(t *testing.T) {
|
|||
Handlers: map[string]client.CapabilityProvider{
|
||||
"http": nil,
|
||||
},
|
||||
Endpoint: "foo.bar:1111",
|
||||
ResourceGroup: "armon/test",
|
||||
Token: "foobarbaz",
|
||||
}
|
||||
|
|
|
@ -65,6 +65,7 @@ Options:
|
|||
a semaphore is used.
|
||||
-name="" Optional name to associate with lock session.
|
||||
-token="" ACL token to use. Defaults to that of agent.
|
||||
-pass-stdin Pass stdin to child process.
|
||||
-verbose Enables verbose output
|
||||
`
|
||||
return strings.TrimSpace(helpText)
|
||||
|
@ -74,11 +75,13 @@ func (c *LockCommand) Run(args []string) int {
|
|||
var childDone chan struct{}
|
||||
var name, token string
|
||||
var limit int
|
||||
var passStdin bool
|
||||
cmdFlags := flag.NewFlagSet("watch", flag.ContinueOnError)
|
||||
cmdFlags.Usage = func() { c.Ui.Output(c.Help()) }
|
||||
cmdFlags.IntVar(&limit, "n", 1, "")
|
||||
cmdFlags.StringVar(&name, "name", "", "")
|
||||
cmdFlags.StringVar(&token, "token", "", "")
|
||||
cmdFlags.BoolVar(&passStdin, "pass-stdin", false, "")
|
||||
cmdFlags.BoolVar(&c.verbose, "verbose", false, "")
|
||||
httpAddr := HTTPAddrFlag(cmdFlags)
|
||||
if err := cmdFlags.Parse(args); err != nil {
|
||||
|
@ -160,7 +163,7 @@ func (c *LockCommand) Run(args []string) int {
|
|||
// Start the child process
|
||||
childDone = make(chan struct{})
|
||||
go func() {
|
||||
if err := c.startChild(script, childDone); err != nil {
|
||||
if err := c.startChild(script, childDone, passStdin); err != nil {
|
||||
c.Ui.Error(fmt.Sprintf("%s", err))
|
||||
}
|
||||
}()
|
||||
|
@ -182,7 +185,10 @@ func (c *LockCommand) Run(args []string) int {
|
|||
goto RELEASE
|
||||
}
|
||||
|
||||
// Kill the child
|
||||
// Prevent starting a new child. The lock is never released
|
||||
// after this point.
|
||||
c.childLock.Lock()
|
||||
// Kill any existing child
|
||||
if err := c.killChild(childDone); err != nil {
|
||||
c.Ui.Error(fmt.Sprintf("%s", err))
|
||||
}
|
||||
|
@ -261,7 +267,7 @@ func (c *LockCommand) setupSemaphore(client *api.Client, limit int, prefix, name
|
|||
|
||||
// startChild is a long running routine used to start and
|
||||
// wait for the child process to exit.
|
||||
func (c *LockCommand) startChild(script string, doneCh chan struct{}) error {
|
||||
func (c *LockCommand) startChild(script string, doneCh chan struct{}, passStdin bool) error {
|
||||
defer close(doneCh)
|
||||
if c.verbose {
|
||||
c.Ui.Info(fmt.Sprintf("Starting handler '%s'", script))
|
||||
|
@ -277,7 +283,14 @@ func (c *LockCommand) startChild(script string, doneCh chan struct{}) error {
|
|||
cmd.Env = append(os.Environ(),
|
||||
"CONSUL_LOCK_HELD=true",
|
||||
)
|
||||
cmd.Stdin = nil
|
||||
if passStdin {
|
||||
if c.verbose {
|
||||
c.Ui.Info("Stdin passed to handler process")
|
||||
}
|
||||
cmd.Stdin = os.Stdin
|
||||
} else {
|
||||
cmd.Stdin = nil
|
||||
}
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
|
||||
|
@ -308,9 +321,7 @@ func (c *LockCommand) startChild(script string, doneCh chan struct{}) error {
|
|||
// on the first attempt.
|
||||
func (c *LockCommand) killChild(childDone chan struct{}) error {
|
||||
// Get the child process
|
||||
c.childLock.Lock()
|
||||
child := c.child
|
||||
c.childLock.Unlock()
|
||||
|
||||
// If there is no child process (failed to start), we can quit early
|
||||
if child == nil {
|
||||
|
|
|
@ -70,7 +70,7 @@ func testAgentWithConfig(t *testing.T, cb func(c *agent.Config)) *agentWrapper {
|
|||
|
||||
conf.Addresses.HTTP = "127.0.0.1"
|
||||
httpAddr := fmt.Sprintf("127.0.0.1:%d", conf.Ports.HTTP)
|
||||
http, err := agent.NewHTTPServers(a, conf, nil, os.Stderr)
|
||||
http, err := agent.NewHTTPServers(a, conf, os.Stderr)
|
||||
if err != nil {
|
||||
os.RemoveAll(dir)
|
||||
t.Fatalf(fmt.Sprintf("err: %v", err))
|
||||
|
|
|
@ -32,7 +32,6 @@ func init() {
|
|||
protocolVersionMap = map[uint8]uint8{
|
||||
1: 4,
|
||||
2: 4,
|
||||
3: 5,
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -26,7 +26,7 @@ import (
|
|||
// protocol versions.
|
||||
const (
|
||||
ProtocolVersionMin uint8 = 1
|
||||
ProtocolVersionMax = 3
|
||||
ProtocolVersionMax = 2
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
|
@ -120,9 +120,10 @@ func TestServer_StartStop(t *testing.T) {
|
|||
config := DefaultConfig()
|
||||
config.DataDir = dir
|
||||
|
||||
private, err := GetPrivateIP()
|
||||
// Advertise on localhost.
|
||||
private, _, err := net.ParseCIDR("127.0.0.1/32")
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
t.Fatalf("failed to parse 127.0.0.1 cidr: %v", err)
|
||||
}
|
||||
|
||||
config.RPCAdvertise = &net.TCPAddr{
|
||||
|
|
|
@ -169,6 +169,10 @@ func GetPrivateIP() (net.IP, error) {
|
|||
return nil, fmt.Errorf("Failed to get interface addresses: %v", err)
|
||||
}
|
||||
|
||||
return getPrivateIP(addresses)
|
||||
}
|
||||
|
||||
func getPrivateIP(addresses []net.Addr) (net.IP, error) {
|
||||
var candidates []net.IP
|
||||
|
||||
// Find private IPv4 address
|
||||
|
@ -200,6 +204,7 @@ func GetPrivateIP() (net.IP, error) {
|
|||
default:
|
||||
return nil, fmt.Errorf("Multiple private IPs found. Please configure one.")
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Converts bytes to an integer
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package consul
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"net"
|
||||
"regexp"
|
||||
"testing"
|
||||
|
@ -28,6 +29,74 @@ func TestToLowerList(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestGetPrivateIP(t *testing.T) {
|
||||
ip, _, err := net.ParseCIDR("10.1.2.3/32")
|
||||
if err != nil {
|
||||
t.Fatalf("failed to parse private cidr: %v", err)
|
||||
}
|
||||
|
||||
pubIP, _, err := net.ParseCIDR("8.8.8.8/32")
|
||||
if err != nil {
|
||||
t.Fatalf("failed to parse public cidr: %v", err)
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
addrs []net.Addr
|
||||
expected net.IP
|
||||
err error
|
||||
}{
|
||||
{
|
||||
addrs: []net.Addr{
|
||||
&net.IPAddr{
|
||||
IP: ip,
|
||||
},
|
||||
&net.IPAddr{
|
||||
IP: pubIP,
|
||||
},
|
||||
},
|
||||
expected: ip,
|
||||
},
|
||||
{
|
||||
addrs: []net.Addr{
|
||||
&net.IPAddr{
|
||||
IP: pubIP,
|
||||
},
|
||||
},
|
||||
err: errors.New("No private IP address found"),
|
||||
},
|
||||
{
|
||||
addrs: []net.Addr{
|
||||
&net.IPAddr{
|
||||
IP: ip,
|
||||
},
|
||||
&net.IPAddr{
|
||||
IP: ip,
|
||||
},
|
||||
&net.IPAddr{
|
||||
IP: pubIP,
|
||||
},
|
||||
},
|
||||
err: errors.New("Multiple private IPs found. Please configure one."),
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
ip, err := getPrivateIP(test.addrs)
|
||||
switch {
|
||||
case test.err != nil && err != nil:
|
||||
if err.Error() != test.err.Error() {
|
||||
t.Fatalf("unexpected error: %v != %v", test.err, err)
|
||||
}
|
||||
case (test.err == nil && err != nil) || (test.err != nil && err == nil):
|
||||
t.Fatalf("unexpected error: %v != %v", test.err, err)
|
||||
default:
|
||||
if !test.expected.Equal(ip) {
|
||||
t.Fatalf("unexpected ip: %v != %v", ip, test.expected)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsPrivateIP(t *testing.T) {
|
||||
if !isPrivateIP("192.168.1.1") {
|
||||
t.Fatalf("bad")
|
||||
|
|
File diff suppressed because one or more lines are too long
|
@ -1,12 +1,12 @@
|
|||
GIT
|
||||
remote: git://github.com/hashicorp/middleman-hashicorp.git
|
||||
revision: 76f0f284ad44cea0457484ea83467192f02daf87
|
||||
revision: 93983af15fd8d480bec266b50c9211a48e495815
|
||||
specs:
|
||||
middleman-hashicorp (0.1.0)
|
||||
bootstrap-sass (~> 3.3)
|
||||
builder (~> 3.2)
|
||||
less (~> 2.6)
|
||||
middleman (~> 3.3)
|
||||
middleman (~> 3.4)
|
||||
middleman-livereload (~> 3.4)
|
||||
middleman-minify-html (~> 3.4)
|
||||
middleman-syntax (~> 2.0)
|
||||
|
@ -21,21 +21,25 @@ GIT
|
|||
GEM
|
||||
remote: https://rubygems.org/
|
||||
specs:
|
||||
activesupport (4.1.12)
|
||||
i18n (~> 0.6, >= 0.6.9)
|
||||
activesupport (4.2.4)
|
||||
i18n (~> 0.7)
|
||||
json (~> 1.7, >= 1.7.7)
|
||||
minitest (~> 5.1)
|
||||
thread_safe (~> 0.1)
|
||||
thread_safe (~> 0.3, >= 0.3.4)
|
||||
tzinfo (~> 1.1)
|
||||
autoprefixer-rails (5.2.1)
|
||||
autoprefixer-rails (5.2.1.3)
|
||||
execjs
|
||||
json
|
||||
bootstrap-sass (3.3.5.1)
|
||||
autoprefixer-rails (>= 5.0.0.1)
|
||||
sass (>= 3.3.0)
|
||||
builder (3.2.2)
|
||||
celluloid (0.16.0)
|
||||
timers (~> 4.0.0)
|
||||
capybara (2.4.4)
|
||||
mime-types (>= 1.16)
|
||||
nokogiri (>= 1.3.3)
|
||||
rack (>= 1.0.0)
|
||||
rack-test (>= 0.5.4)
|
||||
xpath (~> 2.0)
|
||||
chunky_png (1.3.4)
|
||||
coffee-script (2.4.1)
|
||||
coffee-script-source
|
||||
|
@ -59,16 +63,15 @@ GEM
|
|||
eventmachine (>= 0.12.9)
|
||||
http_parser.rb (~> 0.6.0)
|
||||
erubis (2.7.0)
|
||||
eventmachine (1.0.7)
|
||||
execjs (2.5.2)
|
||||
eventmachine (1.0.8)
|
||||
execjs (2.6.0)
|
||||
ffi (1.9.10)
|
||||
git-version-bump (0.15.1)
|
||||
haml (4.0.6)
|
||||
haml (4.0.7)
|
||||
tilt
|
||||
hike (1.2.3)
|
||||
hitimes (1.2.2)
|
||||
hooks (0.4.0)
|
||||
uber (~> 0.0.4)
|
||||
hooks (0.4.1)
|
||||
uber (~> 0.0.14)
|
||||
htmlcompressor (0.2.0)
|
||||
http_parser.rb (0.6.0)
|
||||
i18n (0.7.0)
|
||||
|
@ -77,34 +80,33 @@ GEM
|
|||
less (2.6.0)
|
||||
commonjs (~> 0.2.7)
|
||||
libv8 (3.16.14.11)
|
||||
listen (2.10.1)
|
||||
celluloid (~> 0.16.0)
|
||||
listen (3.0.3)
|
||||
rb-fsevent (>= 0.9.3)
|
||||
rb-inotify (>= 0.9)
|
||||
middleman (3.3.13)
|
||||
middleman (3.4.0)
|
||||
coffee-script (~> 2.2)
|
||||
compass (>= 1.0.0, < 2.0.0)
|
||||
compass-import-once (= 1.0.5)
|
||||
execjs (~> 2.0)
|
||||
haml (>= 4.0.5)
|
||||
kramdown (~> 1.2)
|
||||
middleman-core (= 3.3.13)
|
||||
middleman-core (= 3.4.0)
|
||||
middleman-sprockets (>= 3.1.2)
|
||||
sass (>= 3.4.0, < 4.0)
|
||||
uglifier (~> 2.5)
|
||||
middleman-core (3.3.13)
|
||||
activesupport (~> 4.1.0)
|
||||
middleman-core (3.4.0)
|
||||
activesupport (~> 4.1)
|
||||
bundler (~> 1.1)
|
||||
capybara (~> 2.4.4)
|
||||
erubis
|
||||
hooks (~> 0.3)
|
||||
i18n (~> 0.7.0)
|
||||
listen (>= 2.7.9, < 3.0)
|
||||
listen (~> 3.0.3)
|
||||
padrino-helpers (~> 0.12.3)
|
||||
rack (>= 1.4.5, < 2.0)
|
||||
rack-test (~> 0.6.2)
|
||||
thor (>= 0.15.2, < 2.0)
|
||||
tilt (~> 1.4.1, < 2.0)
|
||||
middleman-livereload (3.4.2)
|
||||
middleman-livereload (3.4.3)
|
||||
em-websocket (~> 0.5.1)
|
||||
middleman-core (>= 3.3)
|
||||
rack-livereload (~> 0.3.15)
|
||||
|
@ -119,8 +121,12 @@ GEM
|
|||
middleman-syntax (2.0.0)
|
||||
middleman-core (~> 3.2)
|
||||
rouge (~> 1.0)
|
||||
minitest (5.7.0)
|
||||
mime-types (2.6.1)
|
||||
mini_portile (0.6.2)
|
||||
minitest (5.8.0)
|
||||
multi_json (1.11.2)
|
||||
nokogiri (1.6.6.2)
|
||||
mini_portile (~> 0.6.0)
|
||||
padrino-helpers (0.12.5)
|
||||
i18n (~> 0.6, >= 0.6.7)
|
||||
padrino-support (= 0.12.5)
|
||||
|
@ -128,7 +134,7 @@ GEM
|
|||
padrino-support (0.12.5)
|
||||
activesupport (>= 3.1)
|
||||
rack (1.6.4)
|
||||
rack-contrib (1.3.0)
|
||||
rack-contrib (1.4.0)
|
||||
git-version-bump (~> 0.15)
|
||||
rack (~> 1.4)
|
||||
rack-livereload (0.3.16)
|
||||
|
@ -136,16 +142,16 @@ GEM
|
|||
rack-protection (1.5.3)
|
||||
rack
|
||||
rack-rewrite (1.5.1)
|
||||
rack-ssl-enforcer (0.2.8)
|
||||
rack-ssl-enforcer (0.2.9)
|
||||
rack-test (0.6.3)
|
||||
rack (>= 1.0)
|
||||
rb-fsevent (0.9.5)
|
||||
rb-fsevent (0.9.6)
|
||||
rb-inotify (0.9.5)
|
||||
ffi (>= 0.5.0)
|
||||
redcarpet (3.3.2)
|
||||
ref (2.0.0)
|
||||
rouge (1.9.1)
|
||||
sass (3.4.16)
|
||||
sass (3.4.18)
|
||||
sprockets (2.12.4)
|
||||
hike (~> 1.2)
|
||||
multi_json (~> 1.0)
|
||||
|
@ -166,14 +172,14 @@ GEM
|
|||
thor (0.19.1)
|
||||
thread_safe (0.3.5)
|
||||
tilt (1.4.1)
|
||||
timers (4.0.1)
|
||||
hitimes
|
||||
tzinfo (1.2.2)
|
||||
thread_safe (~> 0.1)
|
||||
uber (0.0.13)
|
||||
uglifier (2.7.1)
|
||||
uber (0.0.15)
|
||||
uglifier (2.7.2)
|
||||
execjs (>= 0.3.0)
|
||||
json (>= 1.8.0)
|
||||
xpath (2.0.0)
|
||||
nokogiri (~> 1.3)
|
||||
|
||||
PLATFORMS
|
||||
ruby
|
||||
|
|
|
@ -330,7 +330,8 @@ The return code is 200 on success.
|
|||
|
||||
The register endpoint is used to add a new service, with an optional health check,
|
||||
to the local agent. There is more documentation on services [here](/docs/agent/services.html).
|
||||
The agent is responsible for managing the status of the service and keeping the Catalog in sync.
|
||||
The agent is responsible for managing the status of its local services, and for sending updates
|
||||
about its local services to the servers to keep the global Catalog in sync.
|
||||
|
||||
The register endpoint expects a JSON request body to be PUT. The request
|
||||
body must look like:
|
||||
|
|
|
@ -63,6 +63,13 @@ The options below are all specified on the command-line.
|
|||
API authentication token. This can also be provided
|
||||
using the `ATLAS_TOKEN` environment variable. Required for use with Atlas.
|
||||
|
||||
* <a name="_atlas_endpoint"></a><a href="#_atlas_endpoint">`-atlas-endpoint`</a> - The endpoint
|
||||
address used for Atlas integration. Used only if the `-atlas` and
|
||||
`-atlas-token` options are specified. This is optional, and defaults to the
|
||||
public Atlas endpoints. This can also be specified using the `SCADA_ENDPOINT`
|
||||
environment variable. The CLI option takes precedence, followed by the
|
||||
configuration file directive, and lastly, the environment variable.
|
||||
|
||||
* <a name="_bootstrap"></a><a href="#_bootstrap">`-bootstrap`</a> - This flag is used to control if a
|
||||
server is in "bootstrap" mode. It is important that
|
||||
no more than one server *per* datacenter be running in this mode. Technically, a server in bootstrap mode
|
||||
|
@ -139,6 +146,11 @@ The options below are all specified on the command-line.
|
|||
initialized with an encryption key, then the provided key is ignored and
|
||||
a warning will be displayed.
|
||||
|
||||
* <a name="_http_port"></a><a href="#_http_port">`-http-port`</a> - the HTTP API port to listen on.
|
||||
This overrides the default port 8500. This option is very useful when deploying Consul
|
||||
to an environment which communicates the HTTP port through the environment e.g. PaaS like CloudFoundry, allowing
|
||||
you to set the port directly via a Procfile.
|
||||
|
||||
* <a name="_join"></a><a href="#_join">`-join`</a> - Address of another agent
|
||||
to join upon starting up. This can be
|
||||
specified multiple times to specify multiple agents to join. If Consul is
|
||||
|
@ -351,6 +363,9 @@ definitions support being updated during a reload.
|
|||
* <a name="atlas_token"></a><a href="#atlas_token">`atlas_token`</a> Equivalent to the
|
||||
[`-atlas-token` command-line flag](#_atlas_token).
|
||||
|
||||
* <a name="atlas_endpoint"></a><a href="#atlas_endpoint">`atlas_endpoint`</a> Equivalent to the
|
||||
[`-atlas-endpoint` command-line flag](#_atlas_endpoint).
|
||||
|
||||
* <a name="bootstrap"></a><a href="#bootstrap">`bootstrap`</a> Equivalent to the
|
||||
[`-bootstrap` command-line flag](#_bootstrap).
|
||||
|
||||
|
@ -643,3 +658,6 @@ items which are reloaded include:
|
|||
* Services
|
||||
* Watches
|
||||
* HTTP Client Address
|
||||
* Atlas Token
|
||||
* Atlas Infrastructure
|
||||
* Atlas Endpoint
|
||||
|
|
|
@ -22,7 +22,7 @@ getting a better view of what Consul is doing.
|
|||
Additionally, if the [`statsite_addr` configuration option](/docs/agent/options.html#statsite_addr)
|
||||
is provided, the telemetry information will be streamed to a
|
||||
[statsite](http://github.com/armon/statsite) server where it can be
|
||||
aggregate and flushed to Graphite or any other metrics store.
|
||||
aggregated and flushed to Graphite or any other metrics store.
|
||||
|
||||
Below is sample output of a telemetry dump:
|
||||
|
||||
|
|
|
@ -60,5 +60,7 @@ The list of available flags are:
|
|||
|
||||
* `-token` - ACL token to use. Defaults to that of agent.
|
||||
|
||||
* `-pass-stdin` - Pass stdin to child process.
|
||||
|
||||
* `-verbose` - Enables verbose output.
|
||||
|
||||
|
|
|
@ -13,10 +13,21 @@ requires elevated privileges. Instead of running Consul with an administrative
|
|||
or root account, it is possible to instead forward appropriate queries to Consul,
|
||||
running on an unprivileged port, from another DNS server.
|
||||
|
||||
In this guide, we will demonstrate forwarding from [BIND](https://www.isc.org/downloads/bind/).
|
||||
In this guide, we will demonstrate forwarding from [BIND](https://www.isc.org/downloads/bind/)
|
||||
as well as [dnsmasq](http://www.thekelleys.org.uk/dnsmasq/doc.html).
|
||||
For the sake of simplicity, BIND and Consul are running on the same machine in this example,
|
||||
but this is not required.
|
||||
|
||||
It is worth mentioning that, by default, Consul does not resolve DNS
|
||||
records outside the `.consul.` zone unless the
|
||||
[recursors](/docs/agent/options.html#recursors) configuration option
|
||||
has been set. As an example of how this changes Consul's behavior,
|
||||
suppose a Consul DNS reply includes a CNAME record pointing outside
|
||||
the `.consul` TLD. The DNS reply will only include CNAME records by
|
||||
default. By contrast, when `recursors` is set and the upstream resolver is
|
||||
functioning correctly, Consul will try to resolve CNAMEs and include
|
||||
any records (e.g. A, AAAA, PTR) for them in its DNS reply.
|
||||
|
||||
### BIND Setup
|
||||
|
||||
First, you have to disable DNSSEC so that Consul and BIND can communicate.
|
||||
|
@ -60,6 +71,16 @@ zone "consul" IN {
|
|||
Here we assume Consul is running with default settings and is serving
|
||||
DNS on port 8600.
|
||||
|
||||
### Dnsmasq Setup
|
||||
|
||||
Dnsmasq is typically configured via files in the `/etc/dnsmasq.d` directory. To configure Consul, create the file `/etc/dnsmasq.d/10-consul` with the following contents:
|
||||
|
||||
```text
|
||||
server=/consul/127.0.0.1#8600
|
||||
```
|
||||
|
||||
Once that configuration is created, restart the dnsmasq service.
|
||||
|
||||
### Testing
|
||||
|
||||
First, perform a DNS query against Consul directly to be sure that the record exists:
|
||||
|
|
|
@ -93,6 +93,12 @@ description: |-
|
|||
<li>
|
||||
<a href="https://github.com/kelseyhightower/confd">confd</a> - Manage local application configuration files using templates and data from etcd or Consul
|
||||
</li>
|
||||
<li>
|
||||
<a href="https://github.com/CiscoCloud/consul-cli">consul-cli</a> - Command line interface to Consul HTTP API
|
||||
</li>
|
||||
<li>
|
||||
<a href="https://github.com/zeroXten/consul-do">consul-do</a> - Do something, such as run HA cronjobs, based on Consul leadership status
|
||||
</li>
|
||||
<li>
|
||||
<a href="http://xordataexchange.github.io/crypt/">crypt</a> - Store and retrieve encrypted configuration parameters from etcd or Consul
|
||||
</li>
|
||||
|
@ -120,6 +126,9 @@ description: |-
|
|||
<li>
|
||||
<a href="https://github.com/CiscoCloud/mesos-consul">mesos-consul</a> - Service registry bridge for Mesos
|
||||
</li>
|
||||
<li>
|
||||
<a href="http://opennodecloud.com/products/nodefabric.html">NodeFabric</a> - Turnkey CentOS 7 Atomic Host image with integrated Consul, Registrator and HAProxy - enabling rapid MariaDB-Galera and Ceph deployments
|
||||
</li>
|
||||
</ul>
|
||||
|
||||
<p>
|
||||
|
|
Loading…
Reference in New Issue