mirror of https://github.com/status-im/consul.git
Merge pull request #1550 from hashicorp/f-devmode
Add a `-dev` mode for easy prototyping
This commit is contained in:
commit
c1fe9092ed
|
@ -124,7 +124,7 @@ func Create(config *Config, logOutput io.Writer) (*Agent, error) {
|
||||||
if config.Datacenter == "" {
|
if config.Datacenter == "" {
|
||||||
return nil, fmt.Errorf("Must configure a Datacenter")
|
return nil, fmt.Errorf("Must configure a Datacenter")
|
||||||
}
|
}
|
||||||
if config.DataDir == "" {
|
if config.DataDir == "" && !config.DevMode {
|
||||||
return nil, fmt.Errorf("Must configure a DataDir")
|
return nil, fmt.Errorf("Must configure a DataDir")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -227,6 +227,9 @@ func (a *Agent) consulConfig() *consul.Config {
|
||||||
base = consul.DefaultConfig()
|
base = consul.DefaultConfig()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Apply dev mode
|
||||||
|
base.DevMode = a.config.DevMode
|
||||||
|
|
||||||
// Override with our config
|
// Override with our config
|
||||||
if a.config.Datacenter != "" {
|
if a.config.Datacenter != "" {
|
||||||
base.Datacenter = a.config.Datacenter
|
base.Datacenter = a.config.Datacenter
|
||||||
|
@ -748,7 +751,7 @@ func (a *Agent) AddService(service *structs.NodeService, chkTypes CheckTypes, pe
|
||||||
a.state.AddService(service, token)
|
a.state.AddService(service, token)
|
||||||
|
|
||||||
// Persist the service to a file
|
// Persist the service to a file
|
||||||
if persist {
|
if persist && !a.config.DevMode {
|
||||||
if err := a.persistService(service); err != nil {
|
if err := a.persistService(service); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -958,7 +961,7 @@ func (a *Agent) AddCheck(check *structs.HealthCheck, chkType *CheckType, persist
|
||||||
a.state.AddCheck(check, token)
|
a.state.AddCheck(check, token)
|
||||||
|
|
||||||
// Persist the check
|
// Persist the check
|
||||||
if persist {
|
if persist && !a.config.DevMode {
|
||||||
return a.persistCheck(check, chkType)
|
return a.persistCheck(check, chkType)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1022,6 +1025,10 @@ func (a *Agent) UpdateCheck(checkID, status, output string) error {
|
||||||
// Set the status through CheckTTL to reset the TTL
|
// Set the status through CheckTTL to reset the TTL
|
||||||
check.SetStatus(status, output)
|
check.SetStatus(status, output)
|
||||||
|
|
||||||
|
if a.config.DevMode {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// Always persist the state for TTL checks
|
// Always persist the state for TTL checks
|
||||||
if err := a.persistCheckState(check, status, output); err != nil {
|
if err := a.persistCheckState(check, status, output); err != nil {
|
||||||
return fmt.Errorf("failed persisting state for check %q: %s", checkID, err)
|
return fmt.Errorf("failed persisting state for check %q: %s", checkID, err)
|
||||||
|
|
|
@ -60,12 +60,14 @@ func (c *Command) readConfig() *Config {
|
||||||
var retryInterval string
|
var retryInterval string
|
||||||
var retryIntervalWan string
|
var retryIntervalWan string
|
||||||
var dnsRecursors []string
|
var dnsRecursors []string
|
||||||
|
var dev bool
|
||||||
cmdFlags := flag.NewFlagSet("agent", flag.ContinueOnError)
|
cmdFlags := flag.NewFlagSet("agent", flag.ContinueOnError)
|
||||||
cmdFlags.Usage = func() { c.Ui.Output(c.Help()) }
|
cmdFlags.Usage = func() { c.Ui.Output(c.Help()) }
|
||||||
|
|
||||||
cmdFlags.Var((*AppendSliceValue)(&configFiles), "config-file", "json file to read config from")
|
cmdFlags.Var((*AppendSliceValue)(&configFiles), "config-file", "json file to read config from")
|
||||||
cmdFlags.Var((*AppendSliceValue)(&configFiles), "config-dir", "directory of json files to read")
|
cmdFlags.Var((*AppendSliceValue)(&configFiles), "config-dir", "directory of json files to read")
|
||||||
cmdFlags.Var((*AppendSliceValue)(&dnsRecursors), "recursor", "address of an upstream DNS server")
|
cmdFlags.Var((*AppendSliceValue)(&dnsRecursors), "recursor", "address of an upstream DNS server")
|
||||||
|
cmdFlags.BoolVar(&dev, "dev", false, "development server mode")
|
||||||
|
|
||||||
cmdFlags.StringVar(&cmdConfig.LogLevel, "log-level", "", "log level")
|
cmdFlags.StringVar(&cmdConfig.LogLevel, "log-level", "", "log level")
|
||||||
cmdFlags.StringVar(&cmdConfig.NodeName, "node", "", "node name")
|
cmdFlags.StringVar(&cmdConfig.NodeName, "node", "", "node name")
|
||||||
|
@ -137,7 +139,13 @@ func (c *Command) readConfig() *Config {
|
||||||
cmdConfig.RetryIntervalWan = dur
|
cmdConfig.RetryIntervalWan = dur
|
||||||
}
|
}
|
||||||
|
|
||||||
config := DefaultConfig()
|
var config *Config
|
||||||
|
if dev {
|
||||||
|
config = DevConfig()
|
||||||
|
} else {
|
||||||
|
config = DefaultConfig()
|
||||||
|
}
|
||||||
|
|
||||||
if len(configFiles) > 0 {
|
if len(configFiles) > 0 {
|
||||||
fileConfig, err := ReadConfigPaths(configFiles)
|
fileConfig, err := ReadConfigPaths(configFiles)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -162,7 +170,7 @@ func (c *Command) readConfig() *Config {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure we have a data directory
|
// Ensure we have a data directory
|
||||||
if config.DataDir == "" {
|
if config.DataDir == "" && !dev {
|
||||||
c.Ui.Error("Must specify data directory using -data-dir")
|
c.Ui.Error("Must specify data directory using -data-dir")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -94,6 +94,10 @@ type DNSConfig struct {
|
||||||
// Some of this is configurable as CLI flags, but most must
|
// Some of this is configurable as CLI flags, but most must
|
||||||
// be set using a configuration file.
|
// be set using a configuration file.
|
||||||
type Config struct {
|
type Config struct {
|
||||||
|
// DevMode enables a fast-path mode of opertaion to bring up an in-memory
|
||||||
|
// server with minimal configuration. Useful for developing Consul.
|
||||||
|
DevMode bool `mapstructure:"-"`
|
||||||
|
|
||||||
// Bootstrap is used to bring up the first Consul server, and
|
// Bootstrap is used to bring up the first Consul server, and
|
||||||
// permits that node to elect itself leader
|
// permits that node to elect itself leader
|
||||||
Bootstrap bool `mapstructure:"bootstrap"`
|
Bootstrap bool `mapstructure:"bootstrap"`
|
||||||
|
@ -521,6 +525,18 @@ func DefaultConfig() *Config {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DevConfig is used to return a set of configuration to use for dev mode.
|
||||||
|
func DevConfig() *Config {
|
||||||
|
conf := DefaultConfig()
|
||||||
|
conf.DevMode = true
|
||||||
|
conf.LogLevel = "DEBUG"
|
||||||
|
conf.Server = true
|
||||||
|
conf.EnableDebug = true
|
||||||
|
conf.DisableAnonymousSignature = true
|
||||||
|
conf.EnableUi = true
|
||||||
|
return conf
|
||||||
|
}
|
||||||
|
|
||||||
// EncryptBytes returns the encryption key configured.
|
// EncryptBytes returns the encryption key configured.
|
||||||
func (c *Config) EncryptBytes() ([]byte, error) {
|
func (c *Config) EncryptBytes() ([]byte, error) {
|
||||||
return base64.StdEncoding.DecodeString(c.EncryptKey)
|
return base64.StdEncoding.DecodeString(c.EncryptKey)
|
||||||
|
|
|
@ -54,6 +54,9 @@ type Config struct {
|
||||||
// DataDir is the directory to store our state in
|
// DataDir is the directory to store our state in
|
||||||
DataDir string
|
DataDir string
|
||||||
|
|
||||||
|
// DevMode is used to enable a development server mode.
|
||||||
|
DevMode bool
|
||||||
|
|
||||||
// Node name is the name we use to advertise. Defaults to hostname.
|
// Node name is the name we use to advertise. Defaults to hostname.
|
||||||
NodeName string
|
NodeName string
|
||||||
|
|
||||||
|
|
|
@ -110,6 +110,7 @@ type Server struct {
|
||||||
raftPeers raft.PeerStore
|
raftPeers raft.PeerStore
|
||||||
raftStore *raftboltdb.BoltStore
|
raftStore *raftboltdb.BoltStore
|
||||||
raftTransport *raft.NetworkTransport
|
raftTransport *raft.NetworkTransport
|
||||||
|
raftInmem *raft.InmemStore
|
||||||
|
|
||||||
// reconcileCh is used to pass events from the serf handler
|
// reconcileCh is used to pass events from the serf handler
|
||||||
// into the leader manager, so that the strong state can be
|
// into the leader manager, so that the strong state can be
|
||||||
|
@ -173,7 +174,7 @@ func NewServer(config *Config) (*Server, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check for a data directory!
|
// Check for a data directory!
|
||||||
if config.DataDir == "" {
|
if config.DataDir == "" && !config.DevMode {
|
||||||
return nil, fmt.Errorf("Config must provide a DataDir")
|
return nil, fmt.Errorf("Config must provide a DataDir")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -301,7 +302,9 @@ func (s *Server) setupSerf(conf *serf.Config, ch chan serf.Event, path string, w
|
||||||
conf.MemberlistConfig.LogOutput = s.config.LogOutput
|
conf.MemberlistConfig.LogOutput = s.config.LogOutput
|
||||||
conf.LogOutput = s.config.LogOutput
|
conf.LogOutput = s.config.LogOutput
|
||||||
conf.EventCh = ch
|
conf.EventCh = ch
|
||||||
|
if !s.config.DevMode {
|
||||||
conf.SnapshotPath = filepath.Join(s.config.DataDir, path)
|
conf.SnapshotPath = filepath.Join(s.config.DataDir, path)
|
||||||
|
}
|
||||||
conf.ProtocolVersion = protocolVersionMap[s.config.ProtocolVersion]
|
conf.ProtocolVersion = protocolVersionMap[s.config.ProtocolVersion]
|
||||||
conf.RejoinAfterLeave = s.config.RejoinAfterLeave
|
conf.RejoinAfterLeave = s.config.RejoinAfterLeave
|
||||||
if wan {
|
if wan {
|
||||||
|
@ -327,7 +330,7 @@ func (s *Server) setupSerf(conf *serf.Config, ch chan serf.Event, path string, w
|
||||||
// setupRaft is used to setup and initialize Raft
|
// setupRaft is used to setup and initialize Raft
|
||||||
func (s *Server) setupRaft() error {
|
func (s *Server) setupRaft() error {
|
||||||
// If we are in bootstrap mode, enable a single node cluster
|
// If we are in bootstrap mode, enable a single node cluster
|
||||||
if s.config.Bootstrap {
|
if s.config.Bootstrap || s.config.DevMode {
|
||||||
s.config.RaftConfig.EnableSingleNode = true
|
s.config.RaftConfig.EnableSingleNode = true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -338,6 +341,24 @@ func (s *Server) setupRaft() error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Create a transport layer
|
||||||
|
trans := raft.NewNetworkTransport(s.raftLayer, 3, 10*time.Second, s.config.LogOutput)
|
||||||
|
s.raftTransport = trans
|
||||||
|
|
||||||
|
var log raft.LogStore
|
||||||
|
var stable raft.StableStore
|
||||||
|
var snap raft.SnapshotStore
|
||||||
|
var peers raft.PeerStore
|
||||||
|
|
||||||
|
if s.config.DevMode {
|
||||||
|
store := raft.NewInmemStore()
|
||||||
|
s.raftInmem = store
|
||||||
|
stable = store
|
||||||
|
log = store
|
||||||
|
snap = raft.NewDiscardSnapshotStore()
|
||||||
|
peers = &raft.StaticPeers{}
|
||||||
|
s.raftPeers = peers
|
||||||
|
} else {
|
||||||
// Create the base raft path
|
// Create the base raft path
|
||||||
path := filepath.Join(s.config.DataDir, raftState)
|
path := filepath.Join(s.config.DataDir, raftState)
|
||||||
if err := ensurePath(path, true); err != nil {
|
if err := ensurePath(path, true); err != nil {
|
||||||
|
@ -350,6 +371,7 @@ func (s *Server) setupRaft() error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
s.raftStore = store
|
s.raftStore = store
|
||||||
|
stable = store
|
||||||
|
|
||||||
// Wrap the store in a LogCache to improve performance
|
// Wrap the store in a LogCache to improve performance
|
||||||
cacheStore, err := raft.NewLogCache(raftLogCacheSize, store)
|
cacheStore, err := raft.NewLogCache(raftLogCacheSize, store)
|
||||||
|
@ -357,6 +379,7 @@ func (s *Server) setupRaft() error {
|
||||||
store.Close()
|
store.Close()
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
log = cacheStore
|
||||||
|
|
||||||
// Create the snapshot store
|
// Create the snapshot store
|
||||||
snapshots, err := raft.NewFileSnapshotStore(path, snapshotsRetained, s.config.LogOutput)
|
snapshots, err := raft.NewFileSnapshotStore(path, snapshotsRetained, s.config.LogOutput)
|
||||||
|
@ -364,19 +387,20 @@ func (s *Server) setupRaft() error {
|
||||||
store.Close()
|
store.Close()
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
snap = snapshots
|
||||||
// Create a transport layer
|
|
||||||
trans := raft.NewNetworkTransport(s.raftLayer, 3, 10*time.Second, s.config.LogOutput)
|
|
||||||
s.raftTransport = trans
|
|
||||||
|
|
||||||
// Setup the peer store
|
// Setup the peer store
|
||||||
s.raftPeers = raft.NewJSONPeers(path, trans)
|
s.raftPeers = raft.NewJSONPeers(path, trans)
|
||||||
|
peers = s.raftPeers
|
||||||
|
}
|
||||||
|
|
||||||
// Ensure local host is always included if we are in bootstrap mode
|
// Ensure local host is always included if we are in bootstrap mode
|
||||||
if s.config.Bootstrap {
|
if s.config.Bootstrap {
|
||||||
peers, err := s.raftPeers.Peers()
|
peers, err := s.raftPeers.Peers()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
store.Close()
|
if s.raftStore != nil {
|
||||||
|
s.raftStore.Close()
|
||||||
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if !raft.PeerContained(peers, trans.LocalAddr()) {
|
if !raft.PeerContained(peers, trans.LocalAddr()) {
|
||||||
|
@ -388,10 +412,12 @@ func (s *Server) setupRaft() error {
|
||||||
s.config.RaftConfig.LogOutput = s.config.LogOutput
|
s.config.RaftConfig.LogOutput = s.config.LogOutput
|
||||||
|
|
||||||
// Setup the Raft store
|
// Setup the Raft store
|
||||||
s.raft, err = raft.NewRaft(s.config.RaftConfig, s.fsm, cacheStore, store,
|
s.raft, err = raft.NewRaft(s.config.RaftConfig, s.fsm, log, stable,
|
||||||
snapshots, s.raftPeers, trans)
|
snap, s.raftPeers, trans)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
store.Close()
|
if s.raftStore != nil {
|
||||||
|
s.raftStore.Close()
|
||||||
|
}
|
||||||
trans.Close()
|
trans.Close()
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -484,7 +510,9 @@ func (s *Server) Shutdown() error {
|
||||||
if err := future.Error(); err != nil {
|
if err := future.Error(); err != nil {
|
||||||
s.logger.Printf("[WARN] consul: Error shutting down raft: %s", err)
|
s.logger.Printf("[WARN] consul: Error shutting down raft: %s", err)
|
||||||
}
|
}
|
||||||
|
if s.raftStore != nil {
|
||||||
s.raftStore.Close()
|
s.raftStore.Close()
|
||||||
|
}
|
||||||
|
|
||||||
// Clear the peer set on a graceful leave to avoid
|
// Clear the peer set on a graceful leave to avoid
|
||||||
// triggering elections on a rejoin.
|
// triggering elections on a rejoin.
|
||||||
|
|
|
@ -124,6 +124,12 @@ The options below are all specified on the command-line.
|
||||||
the use of filesystem locking, meaning some types of mounted folders (e.g. VirtualBox
|
the use of filesystem locking, meaning some types of mounted folders (e.g. VirtualBox
|
||||||
shared folders) may not be suitable.
|
shared folders) may not be suitable.
|
||||||
|
|
||||||
|
* <a name="_dev"></a><a href="#_dev">`-dev`</a> - Enable development server
|
||||||
|
mode. This is useful for quickly starting a Consul agent with all persistence
|
||||||
|
options turned off, enabling an in-memory server which can be used for rapid
|
||||||
|
prototyping or developing against the API. This mode is **not** intended for
|
||||||
|
production use as it does not write any data to disk.
|
||||||
|
|
||||||
* <a name="_dc"></a><a href="#_dc">`-dc`</a> - This flag controls the datacenter in
|
* <a name="_dc"></a><a href="#_dc">`-dc`</a> - This flag controls the datacenter in
|
||||||
which the agent is running. If not provided,
|
which the agent is running. If not provided,
|
||||||
it defaults to "dc1". Consul has first-class support for multiple datacenters, but
|
it defaults to "dc1". Consul has first-class support for multiple datacenters, but
|
||||||
|
|
|
@ -2,8 +2,11 @@
|
||||||
layout: "intro"
|
layout: "intro"
|
||||||
page_title: "Run the Agent"
|
page_title: "Run the Agent"
|
||||||
sidebar_current: "gettingstarted-agent"
|
sidebar_current: "gettingstarted-agent"
|
||||||
description: |-
|
description: >
|
||||||
After Consul is installed, the agent must be run. The agent can either run in server or client mode. Each datacenter must have at least one server, though a cluster of 3 or 5 servers is recommended. A single server deployment is highly discouraged as data loss is inevitable in a failure scenario.
|
The Consul agent can run in either server or client mode. Each datacenter
|
||||||
|
must have at least one server, though a cluster of 3 or 5 servers is
|
||||||
|
recommended. A single server deployment is highly discouraged in production
|
||||||
|
as data loss is inevitable in a failure scenario.
|
||||||
---
|
---
|
||||||
|
|
||||||
# Run the Consul Agent
|
# Run the Consul Agent
|
||||||
|
@ -22,34 +25,45 @@ For more detail on bootstrapping a datacenter, see
|
||||||
|
|
||||||
## Starting the Agent
|
## Starting the Agent
|
||||||
|
|
||||||
For simplicity, we'll run a single Consul agent in server mode:
|
For simplicity, we'll start the Consul agent in development mode for now. This
|
||||||
|
mode is useful for bringing up a single-node Consul environment quickly and
|
||||||
|
easily. It is **not** intended to be used in production as it does not persist
|
||||||
|
any state.
|
||||||
|
|
||||||
```text
|
```text
|
||||||
$ consul agent -server -bootstrap-expect 1 -data-dir /tmp/consul
|
$ consul agent -dev
|
||||||
==> WARNING: BootstrapExpect Mode is specified as 1; this is the same as Bootstrap mode.
|
|
||||||
==> WARNING: Bootstrap mode enabled! Do not enable unless necessary
|
|
||||||
==> Starting Consul agent...
|
==> Starting Consul agent...
|
||||||
==> Starting Consul agent RPC...
|
==> Starting Consul agent RPC...
|
||||||
==> Consul agent running!
|
==> Consul agent running!
|
||||||
Node name: 'Armons-MacBook-Air'
|
Node name: 'Armons-MacBook-Air'
|
||||||
Datacenter: 'dc1'
|
Datacenter: 'dc1'
|
||||||
Server: true (bootstrap: true)
|
Server: true (bootstrap: false)
|
||||||
Client Addr: 127.0.0.1 (HTTP: 8500, DNS: 8600, RPC: 8400)
|
Client Addr: 127.0.0.1 (HTTP: 8500, HTTPS: -1, DNS: 8600, RPC: 8400)
|
||||||
Cluster Addr: 10.1.10.38 (LAN: 8301, WAN: 8302)
|
Cluster Addr: 172.20.20.11 (LAN: 8301, WAN: 8302)
|
||||||
|
Gossip encrypt: false, RPC-TLS: false, TLS-Incoming: false
|
||||||
|
Atlas: <disabled>
|
||||||
|
|
||||||
==> Log data will now stream in as it occurs:
|
==> Log data will now stream in as it occurs:
|
||||||
|
|
||||||
[INFO] serf: EventMemberJoin: Armons-MacBook-Air.local 10.1.10.38
|
[INFO] raft: Node at 172.20.20.11:8300 [Follower] entering Follower state
|
||||||
[INFO] raft: Node at 10.1.10.38:8300 [Follower] entering Follower state
|
[INFO] serf: EventMemberJoin: Armons-MacBook-Air 172.20.20.11
|
||||||
[INFO] consul: adding server for datacenter: dc1, addr: 10.1.10.38:8300
|
[INFO] consul: adding LAN server Armons-MacBook-Air (Addr: 172.20.20.11:8300) (DC: dc1)
|
||||||
[ERR] agent: failed to sync remote state: rpc error: No cluster leader
|
[INFO] serf: EventMemberJoin: Armons-MacBook-Air.dc1 172.20.20.11
|
||||||
|
[INFO] consul: adding WAN server Armons-MacBook-Air.dc1 (Addr: 172.20.20.11:8300) (DC: dc1)
|
||||||
|
[ERR] agent: failed to sync remote state: No cluster leader
|
||||||
[WARN] raft: Heartbeat timeout reached, starting election
|
[WARN] raft: Heartbeat timeout reached, starting election
|
||||||
[INFO] raft: Node at 10.1.10.38:8300 [Candidate] entering Candidate state
|
[INFO] raft: Node at 172.20.20.11:8300 [Candidate] entering Candidate state
|
||||||
|
[DEBUG] raft: Votes needed: 1
|
||||||
|
[DEBUG] raft: Vote granted. Tally: 1
|
||||||
[INFO] raft: Election won. Tally: 1
|
[INFO] raft: Election won. Tally: 1
|
||||||
[INFO] raft: Node at 10.1.10.38:8300 [Leader] entering Leader state
|
[INFO] raft: Node at 172.20.20.11:8300 [Leader] entering Leader state
|
||||||
|
[INFO] raft: Disabling EnableSingleNode (bootstrap)
|
||||||
[INFO] consul: cluster leadership acquired
|
[INFO] consul: cluster leadership acquired
|
||||||
|
[DEBUG] raft: Node 172.20.20.11:8300 updated peer set (2): [172.20.20.11:8300]
|
||||||
|
[DEBUG] consul: reset tombstone GC to index 2
|
||||||
[INFO] consul: New leader elected: Armons-MacBook-Air
|
[INFO] consul: New leader elected: Armons-MacBook-Air
|
||||||
[INFO] consul: member 'Armons-MacBook-Air' joined, marking health alive
|
[INFO] consul: member 'Armons-MacBook-Air' joined, marking health alive
|
||||||
|
[INFO] agent: Synced service 'consul'
|
||||||
```
|
```
|
||||||
|
|
||||||
As you can see, the Consul agent has started and has output some log
|
As you can see, the Consul agent has started and has output some log
|
||||||
|
@ -70,8 +84,8 @@ section, but for now, you should only see one member (yourself):
|
||||||
|
|
||||||
```text
|
```text
|
||||||
$ consul members
|
$ consul members
|
||||||
Node Address Status Type Build Protocol
|
Node Address Status Type Build Protocol DC
|
||||||
Armons-MacBook-Air 10.1.10.38:8301 alive server 0.5.1 2
|
Armons-MacBook-Air 172.20.20.11:8301 alive server 0.6.1dev 2 dc1
|
||||||
```
|
```
|
||||||
|
|
||||||
The output shows our own node, the address it is running on, its
|
The output shows our own node, the address it is running on, its
|
||||||
|
@ -87,7 +101,7 @@ request to the Consul servers:
|
||||||
|
|
||||||
```text
|
```text
|
||||||
$ curl localhost:8500/v1/catalog/nodes
|
$ curl localhost:8500/v1/catalog/nodes
|
||||||
[{"Node":"Armons-MacBook-Air","Address":"10.1.10.38"}]
|
[{"Node":"Armons-MacBook-Air","Address":"172.20.20.11","CreateIndex":3,"ModifyIndex":4}]
|
||||||
```
|
```
|
||||||
|
|
||||||
In addition to the HTTP API, the [DNS interface](/docs/agent/dns.html) can
|
In addition to the HTTP API, the [DNS interface](/docs/agent/dns.html) can
|
||||||
|
@ -104,7 +118,7 @@ $ dig @127.0.0.1 -p 8600 Armons-MacBook-Air.node.consul
|
||||||
;Armons-MacBook-Air.node.consul. IN A
|
;Armons-MacBook-Air.node.consul. IN A
|
||||||
|
|
||||||
;; ANSWER SECTION:
|
;; ANSWER SECTION:
|
||||||
Armons-MacBook-Air.node.consul. 0 IN A 10.1.10.38
|
Armons-MacBook-Air.node.consul. 0 IN A 172.20.20.11
|
||||||
```
|
```
|
||||||
|
|
||||||
## <a name="stopping"></a>Stopping the Agent
|
## <a name="stopping"></a>Stopping the Agent
|
||||||
|
|
|
@ -0,0 +1 @@
|
||||||
|
{"service": {"name": "web", "tags": ["rails"], "port": 80}}
|
|
@ -2,8 +2,11 @@
|
||||||
layout: "intro"
|
layout: "intro"
|
||||||
page_title: "Consul Cluster"
|
page_title: "Consul Cluster"
|
||||||
sidebar_current: "gettingstarted-join"
|
sidebar_current: "gettingstarted-join"
|
||||||
description: |-
|
description: >
|
||||||
We've started our first agent and registered and queried a service on that agent. This showed how easy it is to use Consul but didn't show how this could be extended to a scalable, production-grade service discovery infrastructure. In this step, we'll create our first real cluster with multiple members.
|
When a Consul agent is started, it begins as an isolated cluster of its own.
|
||||||
|
To learn about other cluster members, the agent must join one or more other
|
||||||
|
nodes using a provided join address. In this step, we will set up a two-node
|
||||||
|
cluster and join the nodes together.
|
||||||
---
|
---
|
||||||
|
|
||||||
# Consul Cluster
|
# Consul Cluster
|
||||||
|
@ -40,6 +43,12 @@ of our cluster. We start by logging in to the first node:
|
||||||
$ vagrant ssh n1
|
$ vagrant ssh n1
|
||||||
```
|
```
|
||||||
|
|
||||||
|
In our previous examples, we used the [`-dev`
|
||||||
|
flag](/docs/agent/options.html#_dev) to quickly set up a development server.
|
||||||
|
However, this is not sufficient for use in a clustered environment. We will
|
||||||
|
omit the `-dev` flag from here on, and instead specify our clustering flags as
|
||||||
|
outlined below.
|
||||||
|
|
||||||
Each node in a cluster must have a unique name. By default, Consul uses the
|
Each node in a cluster must have a unique name. By default, Consul uses the
|
||||||
hostname of the machine, but we'll manually override it using the [`-node`
|
hostname of the machine, but we'll manually override it using the [`-node`
|
||||||
command-line option](/docs/agent/options.html#_node).
|
command-line option](/docs/agent/options.html#_node).
|
||||||
|
@ -53,9 +62,17 @@ multiple interfaces, so specifying a `bind` address assures that you will
|
||||||
never bind Consul to the wrong interface.
|
never bind Consul to the wrong interface.
|
||||||
|
|
||||||
The first node will act as our sole server in this cluster, and we indicate
|
The first node will act as our sole server in this cluster, and we indicate
|
||||||
this with the [`server` switch](/docs/agent/options.html#_server). Finally, we
|
this with the [`server` switch](/docs/agent/options.html#_server).
|
||||||
add the [`config-dir` flag](/docs/agent/options.html#_config_dir), marking
|
|
||||||
where service and check definitions can be found.
|
The [`-bootstrap-expect` flag](/docs/agent/options.html#_bootstrap_expect)
|
||||||
|
hints to the Consul server the number of additional server nodes we are
|
||||||
|
expecting to join. The purpose of this flag is to delay the bootstrapping of
|
||||||
|
the replicated log until the expected number of servers has successfully joined.
|
||||||
|
You can read more about this in the [bootstrapping
|
||||||
|
guide](/docs/guides/bootstrapping.html).
|
||||||
|
|
||||||
|
Finally, we add the [`config-dir` flag](/docs/agent/options.html#_config_dir),
|
||||||
|
marking where service and check definitions can be found.
|
||||||
|
|
||||||
All together, these settings yield a
|
All together, these settings yield a
|
||||||
[`consul agent`](/docs/commands/agent.html) command like this:
|
[`consul agent`](/docs/commands/agent.html) command like this:
|
||||||
|
|
|
@ -2,8 +2,11 @@
|
||||||
layout: "intro"
|
layout: "intro"
|
||||||
page_title: "Registering Services"
|
page_title: "Registering Services"
|
||||||
sidebar_current: "gettingstarted-services"
|
sidebar_current: "gettingstarted-services"
|
||||||
description: |-
|
description: >
|
||||||
In the previous step, we ran our first agent, saw the cluster members (well, our cluster member), and queried that node. Now, we'll register our first service and query that service.
|
A service can be registered either by providing a service definition or by
|
||||||
|
making the appropriate calls to the HTTP API. A configuration file is the
|
||||||
|
most common, so we will use this approach to register a service, and then
|
||||||
|
query that service using the REST API and DNS interfaces.
|
||||||
---
|
---
|
||||||
|
|
||||||
# Registering Services
|
# Registering Services
|
||||||
|
@ -44,8 +47,7 @@ $ echo '{"service": {"name": "web", "tags": ["rails"], "port": 80}}' \
|
||||||
Now, restart the agent, providing the configuration directory:
|
Now, restart the agent, providing the configuration directory:
|
||||||
|
|
||||||
```text
|
```text
|
||||||
$ consul agent -server -bootstrap-expect 1 -data-dir /tmp/consul \
|
$ consul agent -dev -config-dir /etc/consul.d
|
||||||
-config-dir /etc/consul.d
|
|
||||||
==> Starting Consul agent...
|
==> Starting Consul agent...
|
||||||
...
|
...
|
||||||
[INFO] agent: Synced service 'web'
|
[INFO] agent: Synced service 'web'
|
||||||
|
@ -53,7 +55,8 @@ $ consul agent -server -bootstrap-expect 1 -data-dir /tmp/consul \
|
||||||
```
|
```
|
||||||
|
|
||||||
You'll notice in the output that it "synced" the web service. This means
|
You'll notice in the output that it "synced" the web service. This means
|
||||||
that it loaded the information from the configuration.
|
that the agent loaded the service definition from the configuration file,
|
||||||
|
and has successfully registered it in the service catalog.
|
||||||
|
|
||||||
If you wanted to register multiple services, you could create multiple
|
If you wanted to register multiple services, you could create multiple
|
||||||
service definition files in the Consul configuration directory.
|
service definition files in the Consul configuration directory.
|
||||||
|
@ -100,10 +103,10 @@ $ dig @127.0.0.1 -p 8600 web.service.consul SRV
|
||||||
;web.service.consul. IN SRV
|
;web.service.consul. IN SRV
|
||||||
|
|
||||||
;; ANSWER SECTION:
|
;; ANSWER SECTION:
|
||||||
web.service.consul. 0 IN SRV 1 1 80 agent-one.node.dc1.consul.
|
web.service.consul. 0 IN SRV 1 1 80 Armons-MacBook-Air.node.dc1.consul.
|
||||||
|
|
||||||
;; ADDITIONAL SECTION:
|
;; ADDITIONAL SECTION:
|
||||||
agent-one.node.dc1.consul. 0 IN A 172.20.20.11
|
Armons-MacBook-Air.node.dc1.consul. 0 IN A 172.20.20.11
|
||||||
```
|
```
|
||||||
|
|
||||||
The `SRV` record says that the web service is running on port 80 and exists on
|
The `SRV` record says that the web service is running on port 80 and exists on
|
||||||
|
|
Loading…
Reference in New Issue