2018-04-03 18:10:59 +00:00
|
|
|
package proxy
|
|
|
|
|
|
|
|
import (
|
2018-04-26 13:01:20 +00:00
|
|
|
"crypto/x509"
|
2018-04-03 18:10:59 +00:00
|
|
|
|
|
|
|
"github.com/hashicorp/consul/api"
|
|
|
|
"github.com/hashicorp/consul/connect"
|
2018-06-08 15:18:58 +00:00
|
|
|
"github.com/hashicorp/consul/lib"
|
2020-01-28 23:50:41 +00:00
|
|
|
"github.com/hashicorp/go-hclog"
|
2018-04-03 18:10:59 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
// Proxy implements the built-in connect proxy.
|
|
|
|
type Proxy struct {
|
|
|
|
client *api.Client
|
|
|
|
cfgWatcher ConfigWatcher
|
|
|
|
stopChan chan struct{}
|
2020-01-28 23:50:41 +00:00
|
|
|
logger hclog.Logger
|
2018-04-26 13:01:20 +00:00
|
|
|
service *connect.Service
|
2018-04-03 18:10:59 +00:00
|
|
|
}
|
|
|
|
|
2018-05-19 07:11:51 +00:00
|
|
|
// New returns a proxy with the given configuration source.
|
|
|
|
//
|
|
|
|
// The ConfigWatcher can be used to update the configuration of the proxy.
|
|
|
|
// Whenever a new configuration is detected, the proxy will reconfigure itself.
|
2020-01-28 23:50:41 +00:00
|
|
|
func New(client *api.Client, cw ConfigWatcher, logger hclog.Logger) (*Proxy, error) {
|
2018-05-19 07:11:51 +00:00
|
|
|
return &Proxy{
|
2018-04-26 13:01:20 +00:00
|
|
|
client: client,
|
|
|
|
cfgWatcher: cw,
|
|
|
|
stopChan: make(chan struct{}),
|
|
|
|
logger: logger,
|
2018-05-19 07:11:51 +00:00
|
|
|
}, nil
|
2018-04-03 18:10:59 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Serve the proxy instance until a fatal error occurs or proxy is closed.
|
|
|
|
func (p *Proxy) Serve() error {
|
|
|
|
var cfg *Config
|
|
|
|
|
2018-06-15 20:04:04 +00:00
|
|
|
// failCh is used to stop Serve and return an error from another goroutine we
|
|
|
|
// spawn.
|
|
|
|
failCh := make(chan error, 1)
|
|
|
|
|
2018-04-03 18:10:59 +00:00
|
|
|
// Watch for config changes (initial setup happens on first "change")
|
|
|
|
for {
|
|
|
|
select {
|
2018-06-15 20:04:04 +00:00
|
|
|
case err := <-failCh:
|
|
|
|
// don't log here, we can log with better context at the point where we
|
|
|
|
// write the err to the chan
|
|
|
|
return err
|
|
|
|
|
2018-04-03 18:10:59 +00:00
|
|
|
case newCfg := <-p.cfgWatcher.Watch():
|
2020-01-28 23:50:41 +00:00
|
|
|
p.logger.Debug("got new config")
|
2018-04-26 13:01:20 +00:00
|
|
|
|
2018-04-03 18:10:59 +00:00
|
|
|
if cfg == nil {
|
|
|
|
// Initial setup
|
|
|
|
|
2018-06-08 15:18:58 +00:00
|
|
|
// Setup telemetry if configured
|
2020-11-16 23:54:50 +00:00
|
|
|
// NOTE(kit): As far as I can tell, all of the metrics in the proxy are generated at runtime, so we
|
|
|
|
// don't have any static metrics we initialize at start.
|
2018-06-14 12:52:48 +00:00
|
|
|
_, err := lib.InitTelemetry(newCfg.Telemetry)
|
|
|
|
if err != nil {
|
2020-01-28 23:50:41 +00:00
|
|
|
p.logger.Error("proxy telemetry config error", "error", err)
|
2018-06-08 15:18:58 +00:00
|
|
|
}
|
|
|
|
|
2018-04-26 13:01:20 +00:00
|
|
|
// Setup Service instance now we know target ID etc
|
2018-05-19 07:20:43 +00:00
|
|
|
service, err := newCfg.Service(p.client, p.logger)
|
2018-04-26 13:01:20 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
p.service = service
|
|
|
|
|
|
|
|
go func() {
|
|
|
|
<-service.ReadyWait()
|
2020-01-28 23:50:41 +00:00
|
|
|
p.logger.Info("Proxy loaded config and ready to serve")
|
2018-04-26 13:01:20 +00:00
|
|
|
tcfg := service.ServerTLSConfig()
|
|
|
|
cert, _ := tcfg.GetCertificate(nil)
|
|
|
|
leaf, _ := x509.ParseCertificate(cert.Certificate[0])
|
2018-09-27 14:00:51 +00:00
|
|
|
roots, err := connect.CommonNamesFromCertPool(tcfg.RootCAs)
|
|
|
|
if err != nil {
|
2020-01-28 23:50:41 +00:00
|
|
|
p.logger.Error("Failed to parse root subjects", "error", err)
|
2018-09-27 14:00:51 +00:00
|
|
|
} else {
|
2020-01-28 23:50:41 +00:00
|
|
|
p.logger.Info("Parsed TLS identity", "uri", leaf.URIs[0], "roots", roots)
|
2018-09-27 14:00:51 +00:00
|
|
|
}
|
2018-04-26 13:01:20 +00:00
|
|
|
|
2018-06-15 20:04:04 +00:00
|
|
|
// Only start a listener if we have a port set. This allows
|
|
|
|
// the configuration to disable our public listener.
|
|
|
|
if newCfg.PublicListener.BindPort != 0 {
|
|
|
|
newCfg.PublicListener.applyDefaults()
|
|
|
|
l := NewPublicListener(p.service, newCfg.PublicListener, p.logger)
|
|
|
|
err = p.startListener("public listener", l)
|
|
|
|
if err != nil {
|
|
|
|
// This should probably be fatal.
|
2020-01-28 23:50:41 +00:00
|
|
|
p.logger.Error("failed to start public listener", "error", err)
|
2018-06-15 20:04:04 +00:00
|
|
|
failCh <- err
|
|
|
|
}
|
2018-09-27 14:00:51 +00:00
|
|
|
|
2018-05-19 07:46:06 +00:00
|
|
|
}
|
2018-06-15 20:04:04 +00:00
|
|
|
}()
|
2018-04-03 18:10:59 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// TODO(banks) update/remove upstreams properly based on a diff with current. Can
|
|
|
|
// store a map of uc.String() to Listener here and then use it to only
|
|
|
|
// start one of each and stop/modify if changes occur.
|
|
|
|
for _, uc := range newCfg.Upstreams {
|
|
|
|
uc.applyDefaults()
|
|
|
|
|
2018-04-26 13:01:20 +00:00
|
|
|
if uc.LocalBindPort < 1 {
|
2020-01-28 23:50:41 +00:00
|
|
|
p.logger.Error("upstream has no local_bind_port. "+
|
|
|
|
"Can't start upstream.", "upstream", uc.String())
|
2018-04-26 13:01:20 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2018-09-12 16:07:47 +00:00
|
|
|
l := NewUpstreamListener(p.service, p.client, uc, p.logger)
|
2018-04-03 18:10:59 +00:00
|
|
|
err := p.startListener(uc.String(), l)
|
|
|
|
if err != nil {
|
2020-01-28 23:50:41 +00:00
|
|
|
p.logger.Error("failed to start upstream",
|
|
|
|
"upstream", uc.String(),
|
|
|
|
"error", err,
|
|
|
|
)
|
2018-04-03 18:10:59 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
cfg = newCfg
|
|
|
|
|
|
|
|
case <-p.stopChan:
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// startPublicListener is run from the internal state machine loop
|
|
|
|
func (p *Proxy) startListener(name string, l *Listener) error {
|
2020-01-28 23:50:41 +00:00
|
|
|
p.logger.Info("Starting listener", "listener", name, "bind_addr", l.BindAddr())
|
2018-04-03 18:10:59 +00:00
|
|
|
go func() {
|
|
|
|
err := l.Serve()
|
|
|
|
if err != nil {
|
2020-01-28 23:50:41 +00:00
|
|
|
p.logger.Error("listener stopped with error", "listener", name, "error", err)
|
2018-04-03 18:10:59 +00:00
|
|
|
return
|
|
|
|
}
|
2020-01-28 23:50:41 +00:00
|
|
|
p.logger.Info("listener stopped", "listener", name)
|
2018-04-03 18:10:59 +00:00
|
|
|
}()
|
|
|
|
|
|
|
|
go func() {
|
|
|
|
<-p.stopChan
|
|
|
|
l.Close()
|
2018-04-26 13:01:20 +00:00
|
|
|
|
2018-04-03 18:10:59 +00:00
|
|
|
}()
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Close stops the proxy and terminates all active connections. It must be
|
|
|
|
// called only once.
|
|
|
|
func (p *Proxy) Close() {
|
|
|
|
close(p.stopChan)
|
2018-04-26 13:01:20 +00:00
|
|
|
if p.service != nil {
|
|
|
|
p.service.Close()
|
|
|
|
}
|
2018-04-03 18:10:59 +00:00
|
|
|
}
|