diff --git a/command/agent/agent.go b/command/agent/agent.go
index afe247e1d0..b74a1dcf10 100644
--- a/command/agent/agent.go
+++ b/command/agent/agent.go
@@ -124,7 +124,7 @@ func Create(config *Config, logOutput io.Writer) (*Agent, error) {
if config.Datacenter == "" {
return nil, fmt.Errorf("Must configure a Datacenter")
}
- if config.DataDir == "" {
+ if config.DataDir == "" && !config.DevMode {
return nil, fmt.Errorf("Must configure a DataDir")
}
@@ -227,6 +227,9 @@ func (a *Agent) consulConfig() *consul.Config {
base = consul.DefaultConfig()
}
+ // Apply dev mode
+ base.DevMode = a.config.DevMode
+
// Override with our config
if a.config.Datacenter != "" {
base.Datacenter = a.config.Datacenter
@@ -748,7 +751,7 @@ func (a *Agent) AddService(service *structs.NodeService, chkTypes CheckTypes, pe
a.state.AddService(service, token)
// Persist the service to a file
- if persist {
+ if persist && !a.config.DevMode {
if err := a.persistService(service); err != nil {
return err
}
@@ -958,7 +961,7 @@ func (a *Agent) AddCheck(check *structs.HealthCheck, chkType *CheckType, persist
a.state.AddCheck(check, token)
// Persist the check
- if persist {
+ if persist && !a.config.DevMode {
return a.persistCheck(check, chkType)
}
@@ -1022,6 +1025,10 @@ func (a *Agent) UpdateCheck(checkID, status, output string) error {
// Set the status through CheckTTL to reset the TTL
check.SetStatus(status, output)
+ if a.config.DevMode {
+ return nil
+ }
+
// Always persist the state for TTL checks
if err := a.persistCheckState(check, status, output); err != nil {
return fmt.Errorf("failed persisting state for check %q: %s", checkID, err)
diff --git a/command/agent/command.go b/command/agent/command.go
index 050be0d3c8..ffdfcef070 100644
--- a/command/agent/command.go
+++ b/command/agent/command.go
@@ -60,12 +60,14 @@ func (c *Command) readConfig() *Config {
var retryInterval string
var retryIntervalWan string
var dnsRecursors []string
+ var dev bool
cmdFlags := flag.NewFlagSet("agent", flag.ContinueOnError)
cmdFlags.Usage = func() { c.Ui.Output(c.Help()) }
cmdFlags.Var((*AppendSliceValue)(&configFiles), "config-file", "json file to read config from")
cmdFlags.Var((*AppendSliceValue)(&configFiles), "config-dir", "directory of json files to read")
cmdFlags.Var((*AppendSliceValue)(&dnsRecursors), "recursor", "address of an upstream DNS server")
+ cmdFlags.BoolVar(&dev, "dev", false, "development server mode")
cmdFlags.StringVar(&cmdConfig.LogLevel, "log-level", "", "log level")
cmdFlags.StringVar(&cmdConfig.NodeName, "node", "", "node name")
@@ -137,7 +139,13 @@ func (c *Command) readConfig() *Config {
cmdConfig.RetryIntervalWan = dur
}
- config := DefaultConfig()
+ var config *Config
+ if dev {
+ config = DevConfig()
+ } else {
+ config = DefaultConfig()
+ }
+
if len(configFiles) > 0 {
fileConfig, err := ReadConfigPaths(configFiles)
if err != nil {
@@ -162,7 +170,7 @@ func (c *Command) readConfig() *Config {
}
// Ensure we have a data directory
- if config.DataDir == "" {
+ if config.DataDir == "" && !dev {
c.Ui.Error("Must specify data directory using -data-dir")
return nil
}
diff --git a/command/agent/config.go b/command/agent/config.go
index 79836f8811..2f18c2dc3e 100644
--- a/command/agent/config.go
+++ b/command/agent/config.go
@@ -94,6 +94,10 @@ type DNSConfig struct {
// Some of this is configurable as CLI flags, but most must
// be set using a configuration file.
type Config struct {
+ // DevMode enables a fast-path mode of opertaion to bring up an in-memory
+ // server with minimal configuration. Useful for developing Consul.
+ DevMode bool `mapstructure:"-"`
+
// Bootstrap is used to bring up the first Consul server, and
// permits that node to elect itself leader
Bootstrap bool `mapstructure:"bootstrap"`
@@ -521,6 +525,18 @@ func DefaultConfig() *Config {
}
}
+// DevConfig is used to return a set of configuration to use for dev mode.
+func DevConfig() *Config {
+ conf := DefaultConfig()
+ conf.DevMode = true
+ conf.LogLevel = "DEBUG"
+ conf.Server = true
+ conf.EnableDebug = true
+ conf.DisableAnonymousSignature = true
+ conf.EnableUi = true
+ return conf
+}
+
// EncryptBytes returns the encryption key configured.
func (c *Config) EncryptBytes() ([]byte, error) {
return base64.StdEncoding.DecodeString(c.EncryptKey)
diff --git a/consul/config.go b/consul/config.go
index bfb3fa809b..78b3fc8753 100644
--- a/consul/config.go
+++ b/consul/config.go
@@ -54,6 +54,9 @@ type Config struct {
// DataDir is the directory to store our state in
DataDir string
+ // DevMode is used to enable a development server mode.
+ DevMode bool
+
// Node name is the name we use to advertise. Defaults to hostname.
NodeName string
diff --git a/consul/server.go b/consul/server.go
index eec5802127..7a59cb5945 100644
--- a/consul/server.go
+++ b/consul/server.go
@@ -110,6 +110,7 @@ type Server struct {
raftPeers raft.PeerStore
raftStore *raftboltdb.BoltStore
raftTransport *raft.NetworkTransport
+ raftInmem *raft.InmemStore
// reconcileCh is used to pass events from the serf handler
// into the leader manager, so that the strong state can be
@@ -173,7 +174,7 @@ func NewServer(config *Config) (*Server, error) {
}
// Check for a data directory!
- if config.DataDir == "" {
+ if config.DataDir == "" && !config.DevMode {
return nil, fmt.Errorf("Config must provide a DataDir")
}
@@ -301,7 +302,9 @@ func (s *Server) setupSerf(conf *serf.Config, ch chan serf.Event, path string, w
conf.MemberlistConfig.LogOutput = s.config.LogOutput
conf.LogOutput = s.config.LogOutput
conf.EventCh = ch
- conf.SnapshotPath = filepath.Join(s.config.DataDir, path)
+ if !s.config.DevMode {
+ conf.SnapshotPath = filepath.Join(s.config.DataDir, path)
+ }
conf.ProtocolVersion = protocolVersionMap[s.config.ProtocolVersion]
conf.RejoinAfterLeave = s.config.RejoinAfterLeave
if wan {
@@ -327,7 +330,7 @@ func (s *Server) setupSerf(conf *serf.Config, ch chan serf.Event, path string, w
// setupRaft is used to setup and initialize Raft
func (s *Server) setupRaft() error {
// If we are in bootstrap mode, enable a single node cluster
- if s.config.Bootstrap {
+ if s.config.Bootstrap || s.config.DevMode {
s.config.RaftConfig.EnableSingleNode = true
}
@@ -338,45 +341,66 @@ func (s *Server) setupRaft() error {
return err
}
- // Create the base raft path
- path := filepath.Join(s.config.DataDir, raftState)
- if err := ensurePath(path, true); err != nil {
- return err
- }
-
- // Create the backend raft store for logs and stable storage
- store, err := raftboltdb.NewBoltStore(filepath.Join(path, "raft.db"))
- if err != nil {
- return err
- }
- s.raftStore = store
-
- // Wrap the store in a LogCache to improve performance
- cacheStore, err := raft.NewLogCache(raftLogCacheSize, store)
- if err != nil {
- store.Close()
- return err
- }
-
- // Create the snapshot store
- snapshots, err := raft.NewFileSnapshotStore(path, snapshotsRetained, s.config.LogOutput)
- if err != nil {
- store.Close()
- return err
- }
-
// Create a transport layer
trans := raft.NewNetworkTransport(s.raftLayer, 3, 10*time.Second, s.config.LogOutput)
s.raftTransport = trans
- // Setup the peer store
- s.raftPeers = raft.NewJSONPeers(path, trans)
+ var log raft.LogStore
+ var stable raft.StableStore
+ var snap raft.SnapshotStore
+ var peers raft.PeerStore
+
+ if s.config.DevMode {
+ store := raft.NewInmemStore()
+ s.raftInmem = store
+ stable = store
+ log = store
+ snap = raft.NewDiscardSnapshotStore()
+ peers = &raft.StaticPeers{}
+ s.raftPeers = peers
+ } else {
+ // Create the base raft path
+ path := filepath.Join(s.config.DataDir, raftState)
+ if err := ensurePath(path, true); err != nil {
+ return err
+ }
+
+ // Create the backend raft store for logs and stable storage
+ store, err := raftboltdb.NewBoltStore(filepath.Join(path, "raft.db"))
+ if err != nil {
+ return err
+ }
+ s.raftStore = store
+ stable = store
+
+ // Wrap the store in a LogCache to improve performance
+ cacheStore, err := raft.NewLogCache(raftLogCacheSize, store)
+ if err != nil {
+ store.Close()
+ return err
+ }
+ log = cacheStore
+
+ // Create the snapshot store
+ snapshots, err := raft.NewFileSnapshotStore(path, snapshotsRetained, s.config.LogOutput)
+ if err != nil {
+ store.Close()
+ return err
+ }
+ snap = snapshots
+
+ // Setup the peer store
+ s.raftPeers = raft.NewJSONPeers(path, trans)
+ peers = s.raftPeers
+ }
// Ensure local host is always included if we are in bootstrap mode
if s.config.Bootstrap {
peers, err := s.raftPeers.Peers()
if err != nil {
- store.Close()
+ if s.raftStore != nil {
+ s.raftStore.Close()
+ }
return err
}
if !raft.PeerContained(peers, trans.LocalAddr()) {
@@ -388,10 +412,12 @@ func (s *Server) setupRaft() error {
s.config.RaftConfig.LogOutput = s.config.LogOutput
// Setup the Raft store
- s.raft, err = raft.NewRaft(s.config.RaftConfig, s.fsm, cacheStore, store,
- snapshots, s.raftPeers, trans)
+ s.raft, err = raft.NewRaft(s.config.RaftConfig, s.fsm, log, stable,
+ snap, s.raftPeers, trans)
if err != nil {
- store.Close()
+ if s.raftStore != nil {
+ s.raftStore.Close()
+ }
trans.Close()
return err
}
@@ -484,7 +510,9 @@ func (s *Server) Shutdown() error {
if err := future.Error(); err != nil {
s.logger.Printf("[WARN] consul: Error shutting down raft: %s", err)
}
- s.raftStore.Close()
+ if s.raftStore != nil {
+ s.raftStore.Close()
+ }
// Clear the peer set on a graceful leave to avoid
// triggering elections on a rejoin.
diff --git a/website/source/docs/agent/options.html.markdown b/website/source/docs/agent/options.html.markdown
index c2b85a2396..e7703cd290 100644
--- a/website/source/docs/agent/options.html.markdown
+++ b/website/source/docs/agent/options.html.markdown
@@ -124,6 +124,12 @@ The options below are all specified on the command-line.
the use of filesystem locking, meaning some types of mounted folders (e.g. VirtualBox
shared folders) may not be suitable.
+* `-dev` - Enable development server
+ mode. This is useful for quickly starting a Consul agent with all persistence
+ options turned off, enabling an in-memory server which can be used for rapid
+ prototyping or developing against the API. This mode is **not** intended for
+ production use as it does not write any data to disk.
+
* `-dc` - This flag controls the datacenter in
which the agent is running. If not provided,
it defaults to "dc1". Consul has first-class support for multiple datacenters, but
diff --git a/website/source/intro/getting-started/agent.html.markdown b/website/source/intro/getting-started/agent.html.markdown
index 28ddff23db..920e0f6892 100644
--- a/website/source/intro/getting-started/agent.html.markdown
+++ b/website/source/intro/getting-started/agent.html.markdown
@@ -2,8 +2,11 @@
layout: "intro"
page_title: "Run the Agent"
sidebar_current: "gettingstarted-agent"
-description: |-
- After Consul is installed, the agent must be run. The agent can either run in server or client mode. Each datacenter must have at least one server, though a cluster of 3 or 5 servers is recommended. A single server deployment is highly discouraged as data loss is inevitable in a failure scenario.
+description: >
+ The Consul agent can run in either server or client mode. Each datacenter
+ must have at least one server, though a cluster of 3 or 5 servers is
+ recommended. A single server deployment is highly discouraged in production
+ as data loss is inevitable in a failure scenario.
---
# Run the Consul Agent
@@ -22,34 +25,45 @@ For more detail on bootstrapping a datacenter, see
## Starting the Agent
-For simplicity, we'll run a single Consul agent in server mode:
+For simplicity, we'll start the Consul agent in development mode for now. This
+mode is useful for bringing up a single-node Consul environment quickly and
+easily. It is **not** intended to be used in production as it does not persist
+any state.
```text
-$ consul agent -server -bootstrap-expect 1 -data-dir /tmp/consul
-==> WARNING: BootstrapExpect Mode is specified as 1; this is the same as Bootstrap mode.
-==> WARNING: Bootstrap mode enabled! Do not enable unless necessary
+$ consul agent -dev
==> Starting Consul agent...
==> Starting Consul agent RPC...
==> Consul agent running!
- Node name: 'Armons-MacBook-Air'
- Datacenter: 'dc1'
- Server: true (bootstrap: true)
- Client Addr: 127.0.0.1 (HTTP: 8500, DNS: 8600, RPC: 8400)
- Cluster Addr: 10.1.10.38 (LAN: 8301, WAN: 8302)
+ Node name: 'Armons-MacBook-Air'
+ Datacenter: 'dc1'
+ Server: true (bootstrap: false)
+ Client Addr: 127.0.0.1 (HTTP: 8500, HTTPS: -1, DNS: 8600, RPC: 8400)
+ Cluster Addr: 172.20.20.11 (LAN: 8301, WAN: 8302)
+ Gossip encrypt: false, RPC-TLS: false, TLS-Incoming: false
+ Atlas:
==> Log data will now stream in as it occurs:
-[INFO] serf: EventMemberJoin: Armons-MacBook-Air.local 10.1.10.38
-[INFO] raft: Node at 10.1.10.38:8300 [Follower] entering Follower state
-[INFO] consul: adding server for datacenter: dc1, addr: 10.1.10.38:8300
-[ERR] agent: failed to sync remote state: rpc error: No cluster leader
+[INFO] raft: Node at 172.20.20.11:8300 [Follower] entering Follower state
+[INFO] serf: EventMemberJoin: Armons-MacBook-Air 172.20.20.11
+[INFO] consul: adding LAN server Armons-MacBook-Air (Addr: 172.20.20.11:8300) (DC: dc1)
+[INFO] serf: EventMemberJoin: Armons-MacBook-Air.dc1 172.20.20.11
+[INFO] consul: adding WAN server Armons-MacBook-Air.dc1 (Addr: 172.20.20.11:8300) (DC: dc1)
+[ERR] agent: failed to sync remote state: No cluster leader
[WARN] raft: Heartbeat timeout reached, starting election
-[INFO] raft: Node at 10.1.10.38:8300 [Candidate] entering Candidate state
+[INFO] raft: Node at 172.20.20.11:8300 [Candidate] entering Candidate state
+[DEBUG] raft: Votes needed: 1
+[DEBUG] raft: Vote granted. Tally: 1
[INFO] raft: Election won. Tally: 1
-[INFO] raft: Node at 10.1.10.38:8300 [Leader] entering Leader state
+[INFO] raft: Node at 172.20.20.11:8300 [Leader] entering Leader state
+[INFO] raft: Disabling EnableSingleNode (bootstrap)
[INFO] consul: cluster leadership acquired
+[DEBUG] raft: Node 172.20.20.11:8300 updated peer set (2): [172.20.20.11:8300]
+[DEBUG] consul: reset tombstone GC to index 2
[INFO] consul: New leader elected: Armons-MacBook-Air
[INFO] consul: member 'Armons-MacBook-Air' joined, marking health alive
+[INFO] agent: Synced service 'consul'
```
As you can see, the Consul agent has started and has output some log
@@ -70,8 +84,8 @@ section, but for now, you should only see one member (yourself):
```text
$ consul members
-Node Address Status Type Build Protocol
-Armons-MacBook-Air 10.1.10.38:8301 alive server 0.5.1 2
+Node Address Status Type Build Protocol DC
+Armons-MacBook-Air 172.20.20.11:8301 alive server 0.6.1dev 2 dc1
```
The output shows our own node, the address it is running on, its
@@ -87,7 +101,7 @@ request to the Consul servers:
```text
$ curl localhost:8500/v1/catalog/nodes
-[{"Node":"Armons-MacBook-Air","Address":"10.1.10.38"}]
+[{"Node":"Armons-MacBook-Air","Address":"172.20.20.11","CreateIndex":3,"ModifyIndex":4}]
```
In addition to the HTTP API, the [DNS interface](/docs/agent/dns.html) can
@@ -104,7 +118,7 @@ $ dig @127.0.0.1 -p 8600 Armons-MacBook-Air.node.consul
;Armons-MacBook-Air.node.consul. IN A
;; ANSWER SECTION:
-Armons-MacBook-Air.node.consul. 0 IN A 10.1.10.38
+Armons-MacBook-Air.node.consul. 0 IN A 172.20.20.11
```
## Stopping the Agent
diff --git a/website/source/intro/getting-started/consul.d/web.json b/website/source/intro/getting-started/consul.d/web.json
new file mode 100644
index 0000000000..1dc4674f97
--- /dev/null
+++ b/website/source/intro/getting-started/consul.d/web.json
@@ -0,0 +1 @@
+{"service": {"name": "web", "tags": ["rails"], "port": 80}}
diff --git a/website/source/intro/getting-started/join.html.markdown b/website/source/intro/getting-started/join.html.markdown
index a4c3dc368a..d7a68d1cae 100644
--- a/website/source/intro/getting-started/join.html.markdown
+++ b/website/source/intro/getting-started/join.html.markdown
@@ -2,8 +2,11 @@
layout: "intro"
page_title: "Consul Cluster"
sidebar_current: "gettingstarted-join"
-description: |-
- We've started our first agent and registered and queried a service on that agent. This showed how easy it is to use Consul but didn't show how this could be extended to a scalable, production-grade service discovery infrastructure. In this step, we'll create our first real cluster with multiple members.
+description: >
+ When a Consul agent is started, it begins as an isolated cluster of its own.
+ To learn about other cluster members, the agent must join one or more other
+ nodes using a provided join address. In this step, we will set up a two-node
+ cluster and join the nodes together.
---
# Consul Cluster
@@ -40,6 +43,12 @@ of our cluster. We start by logging in to the first node:
$ vagrant ssh n1
```
+In our previous examples, we used the [`-dev`
+flag](/docs/agent/options.html#_dev) to quickly set up a development server.
+However, this is not sufficient for use in a clustered environment. We will
+omit the `-dev` flag from here on, and instead specify our clustering flags as
+outlined below.
+
Each node in a cluster must have a unique name. By default, Consul uses the
hostname of the machine, but we'll manually override it using the [`-node`
command-line option](/docs/agent/options.html#_node).
@@ -53,9 +62,17 @@ multiple interfaces, so specifying a `bind` address assures that you will
never bind Consul to the wrong interface.
The first node will act as our sole server in this cluster, and we indicate
-this with the [`server` switch](/docs/agent/options.html#_server). Finally, we
-add the [`config-dir` flag](/docs/agent/options.html#_config_dir), marking
-where service and check definitions can be found.
+this with the [`server` switch](/docs/agent/options.html#_server).
+
+The [`-bootstrap-expect` flag](/docs/agent/options.html#_bootstrap_expect)
+hints to the Consul server the number of additional server nodes we are
+expecting to join. The purpose of this flag is to delay the bootstrapping of
+the replicated log until the expected number of servers has successfully joined.
+You can read more about this in the [bootstrapping
+guide](/docs/guides/bootstrapping.html).
+
+Finally, we add the [`config-dir` flag](/docs/agent/options.html#_config_dir),
+marking where service and check definitions can be found.
All together, these settings yield a
[`consul agent`](/docs/commands/agent.html) command like this:
diff --git a/website/source/intro/getting-started/services.html.markdown b/website/source/intro/getting-started/services.html.markdown
index 38924a9abd..70f77c695b 100644
--- a/website/source/intro/getting-started/services.html.markdown
+++ b/website/source/intro/getting-started/services.html.markdown
@@ -2,8 +2,11 @@
layout: "intro"
page_title: "Registering Services"
sidebar_current: "gettingstarted-services"
-description: |-
- In the previous step, we ran our first agent, saw the cluster members (well, our cluster member), and queried that node. Now, we'll register our first service and query that service.
+description: >
+ A service can be registered either by providing a service definition or by
+ making the appropriate calls to the HTTP API. A configuration file is the
+ most common, so we will use this approach to register a service, and then
+ query that service using the REST API and DNS interfaces.
---
# Registering Services
@@ -44,8 +47,7 @@ $ echo '{"service": {"name": "web", "tags": ["rails"], "port": 80}}' \
Now, restart the agent, providing the configuration directory:
```text
-$ consul agent -server -bootstrap-expect 1 -data-dir /tmp/consul \
- -config-dir /etc/consul.d
+$ consul agent -dev -config-dir /etc/consul.d
==> Starting Consul agent...
...
[INFO] agent: Synced service 'web'
@@ -53,7 +55,8 @@ $ consul agent -server -bootstrap-expect 1 -data-dir /tmp/consul \
```
You'll notice in the output that it "synced" the web service. This means
-that it loaded the information from the configuration.
+that the agent loaded the service definition from the configuration file,
+and has successfully registered it in the service catalog.
If you wanted to register multiple services, you could create multiple
service definition files in the Consul configuration directory.
@@ -97,13 +100,13 @@ $ dig @127.0.0.1 -p 8600 web.service.consul SRV
...
;; QUESTION SECTION:
-;web.service.consul. IN SRV
+;web.service.consul. IN SRV
;; ANSWER SECTION:
-web.service.consul. 0 IN SRV 1 1 80 agent-one.node.dc1.consul.
+web.service.consul. 0 IN SRV 1 1 80 Armons-MacBook-Air.node.dc1.consul.
;; ADDITIONAL SECTION:
-agent-one.node.dc1.consul. 0 IN A 172.20.20.11
+Armons-MacBook-Air.node.dc1.consul. 0 IN A 172.20.20.11
```
The `SRV` record says that the web service is running on port 80 and exists on