2023-03-28 18:39:22 +00:00
|
|
|
// Copyright (c) HashiCorp, Inc.
|
2023-08-11 13:12:13 +00:00
|
|
|
// SPDX-License-Identifier: BUSL-1.1
|
2023-03-28 18:39:22 +00:00
|
|
|
|
2014-01-02 21:12:05 +00:00
|
|
|
package agent
|
|
|
|
|
|
|
|
import (
|
2020-06-15 15:01:25 +00:00
|
|
|
"context"
|
2016-10-28 02:01:32 +00:00
|
|
|
"encoding/hex"
|
2021-04-13 20:07:10 +00:00
|
|
|
"errors"
|
2014-01-02 21:12:05 +00:00
|
|
|
"fmt"
|
2022-09-30 04:44:45 +00:00
|
|
|
"math"
|
2014-01-03 01:58:58 +00:00
|
|
|
"net"
|
2020-08-17 21:24:49 +00:00
|
|
|
"regexp"
|
2014-01-03 01:58:58 +00:00
|
|
|
"strings"
|
2017-06-29 14:42:17 +00:00
|
|
|
"sync/atomic"
|
2014-01-02 21:12:05 +00:00
|
|
|
"time"
|
2014-11-03 19:40:55 +00:00
|
|
|
|
2022-09-30 04:44:45 +00:00
|
|
|
"github.com/armon/go-metrics"
|
|
|
|
"github.com/armon/go-radix"
|
2020-09-30 21:38:13 +00:00
|
|
|
"github.com/hashicorp/go-hclog"
|
|
|
|
"github.com/miekg/dns"
|
|
|
|
|
2022-04-05 21:10:06 +00:00
|
|
|
"github.com/hashicorp/consul/acl"
|
2019-02-25 19:06:01 +00:00
|
|
|
cachetype "github.com/hashicorp/consul/agent/cache-types"
|
2017-09-25 18:40:42 +00:00
|
|
|
"github.com/hashicorp/consul/agent/config"
|
2017-07-06 10:34:00 +00:00
|
|
|
"github.com/hashicorp/consul/agent/structs"
|
2018-09-07 14:30:47 +00:00
|
|
|
"github.com/hashicorp/consul/api"
|
2024-02-12 19:27:25 +00:00
|
|
|
dnsutil "github.com/hashicorp/consul/internal/dnsutil"
|
2024-01-10 16:19:20 +00:00
|
|
|
libdns "github.com/hashicorp/consul/internal/dnsutil"
|
2019-06-04 14:02:38 +00:00
|
|
|
"github.com/hashicorp/consul/ipaddr"
|
2016-02-12 07:58:48 +00:00
|
|
|
"github.com/hashicorp/consul/lib"
|
2020-01-28 23:50:41 +00:00
|
|
|
"github.com/hashicorp/consul/logging"
|
2014-01-02 21:12:05 +00:00
|
|
|
)
|
|
|
|
|
2014-01-03 01:58:58 +00:00
|
|
|
const (
|
2016-03-30 02:27:02 +00:00
|
|
|
// UDP can fit ~25 A records in a 512B response, and ~14 AAAA
|
2019-04-24 18:11:54 +00:00
|
|
|
// records. Limit further to prevent unintentional configuration
|
2016-03-30 02:27:02 +00:00
|
|
|
// abuse that would have a negative effect on application response
|
|
|
|
// times.
|
2019-01-07 21:53:54 +00:00
|
|
|
maxUDPAnswerLimit = 8
|
|
|
|
maxRecurseRecords = 5
|
|
|
|
maxRecursionLevelDefault = 3
|
2016-11-08 19:45:12 +00:00
|
|
|
|
|
|
|
// Increment a counter when requests staler than this are served
|
|
|
|
staleCounterThreshold = 5 * time.Second
|
2017-06-14 23:22:54 +00:00
|
|
|
|
|
|
|
defaultMaxUDPSize = 512
|
2022-09-30 04:44:45 +00:00
|
|
|
|
|
|
|
// If a consumer sets a buffer size greater than this amount we will default it down
|
|
|
|
// to this amount to ensure that consul does respond. Previously if consumer had a larger buffer
|
|
|
|
// size than 65535 - 60 bytes (maximim 60 bytes for IP header. UDP header will be offset in the
|
|
|
|
// trimUDP call) consul would fail to respond and the consumer timesout
|
|
|
|
// the request.
|
|
|
|
maxUDPDatagramSize = math.MaxUint16 - 68
|
2014-01-03 01:58:58 +00:00
|
|
|
)
|
|
|
|
|
Added SOA configuration for DNS settings. (#4714)
This will allow to fine TUNE SOA settings sent by Consul in DNS responses,
for instance to be able to control negative ttl.
Will fix: https://github.com/hashicorp/consul/issues/4713
# Example
Override all settings:
* min_ttl: 0 => 60s
* retry: 600 (10m) => 300s (5 minutes),
* expire: 86400 (24h) => 43200 (12h)
* refresh: 3600 (1h) => 1800 (30 minutes)
```
consul agent -dev -hcl 'dns_config={soa={min_ttl=60,retry=300,expire=43200,refresh=1800}}'
```
Result:
```
dig +multiline @localhost -p 8600 service.consul
; <<>> DiG 9.12.1 <<>> +multiline @localhost -p 8600 service.consul
; (2 servers found)
;; global options: +cmd
;; Got answer:
;; ->>HEADER<<- opcode: QUERY, status: NXDOMAIN, id: 36557
;; flags: qr aa rd; QUERY: 1, ANSWER: 0, AUTHORITY: 1, ADDITIONAL: 1
;; WARNING: recursion requested but not available
;; OPT PSEUDOSECTION:
; EDNS: version: 0, flags:; udp: 4096
;; QUESTION SECTION:
;service.consul. IN A
;; AUTHORITY SECTION:
consul. 0 IN SOA ns.consul. hostmaster.consul. (
1537959133 ; serial
1800 ; refresh (30 minutes)
300 ; retry (5 minutes)
43200 ; expire (12 hours)
60 ; minimum (1 minute)
)
;; Query time: 4 msec
;; SERVER: 127.0.0.1#8600(127.0.0.1)
;; WHEN: Wed Sep 26 12:52:13 CEST 2018
;; MSG SIZE rcvd: 93
```
2018-10-10 19:50:56 +00:00
|
|
|
type dnsSOAConfig struct {
|
|
|
|
Refresh uint32 // 3600 by default
|
|
|
|
Retry uint32 // 600
|
|
|
|
Expire uint32 // 86400
|
2019-04-24 18:11:54 +00:00
|
|
|
Minttl uint32 // 0
|
Added SOA configuration for DNS settings. (#4714)
This will allow to fine TUNE SOA settings sent by Consul in DNS responses,
for instance to be able to control negative ttl.
Will fix: https://github.com/hashicorp/consul/issues/4713
# Example
Override all settings:
* min_ttl: 0 => 60s
* retry: 600 (10m) => 300s (5 minutes),
* expire: 86400 (24h) => 43200 (12h)
* refresh: 3600 (1h) => 1800 (30 minutes)
```
consul agent -dev -hcl 'dns_config={soa={min_ttl=60,retry=300,expire=43200,refresh=1800}}'
```
Result:
```
dig +multiline @localhost -p 8600 service.consul
; <<>> DiG 9.12.1 <<>> +multiline @localhost -p 8600 service.consul
; (2 servers found)
;; global options: +cmd
;; Got answer:
;; ->>HEADER<<- opcode: QUERY, status: NXDOMAIN, id: 36557
;; flags: qr aa rd; QUERY: 1, ANSWER: 0, AUTHORITY: 1, ADDITIONAL: 1
;; WARNING: recursion requested but not available
;; OPT PSEUDOSECTION:
; EDNS: version: 0, flags:; udp: 4096
;; QUESTION SECTION:
;service.consul. IN A
;; AUTHORITY SECTION:
consul. 0 IN SOA ns.consul. hostmaster.consul. (
1537959133 ; serial
1800 ; refresh (30 minutes)
300 ; retry (5 minutes)
43200 ; expire (12 hours)
60 ; minimum (1 minute)
)
;; Query time: 4 msec
;; SERVER: 127.0.0.1#8600(127.0.0.1)
;; WHEN: Wed Sep 26 12:52:13 CEST 2018
;; MSG SIZE rcvd: 93
```
2018-10-10 19:50:56 +00:00
|
|
|
}
|
|
|
|
|
2017-09-25 18:40:42 +00:00
|
|
|
type dnsConfig struct {
|
2021-07-19 22:22:51 +00:00
|
|
|
AllowStale bool
|
|
|
|
Datacenter string
|
|
|
|
EnableTruncate bool
|
|
|
|
MaxStale time.Duration
|
|
|
|
UseCache bool
|
|
|
|
CacheMaxAge time.Duration
|
|
|
|
NodeName string
|
|
|
|
NodeTTL time.Duration
|
|
|
|
OnlyPassing bool
|
2024-01-10 16:19:20 +00:00
|
|
|
RecursorStrategy structs.RecursorStrategy
|
2021-07-19 22:22:51 +00:00
|
|
|
RecursorTimeout time.Duration
|
|
|
|
Recursors []string
|
|
|
|
SegmentName string
|
|
|
|
UDPAnswerLimit int
|
|
|
|
ARecordLimit int
|
|
|
|
NodeMetaTXT bool
|
|
|
|
SOAConfig dnsSOAConfig
|
2019-04-24 18:11:54 +00:00
|
|
|
// TTLRadix sets service TTLs by prefix, eg: "database-*"
|
|
|
|
TTLRadix *radix.Tree
|
|
|
|
// TTLStict sets TTLs to service by full name match. It Has higher priority than TTLRadix
|
|
|
|
TTLStrict map[string]time.Duration
|
|
|
|
DisableCompression bool
|
2019-12-10 02:26:41 +00:00
|
|
|
|
|
|
|
enterpriseDNSConfig
|
2017-09-25 18:40:42 +00:00
|
|
|
}
|
|
|
|
|
2020-04-16 21:00:48 +00:00
|
|
|
type serviceLookup struct {
|
2022-11-29 18:23:18 +00:00
|
|
|
PeerName string
|
2020-04-16 21:00:48 +00:00
|
|
|
Datacenter string
|
|
|
|
Service string
|
|
|
|
Tag string
|
|
|
|
MaxRecursionLevel int
|
|
|
|
Connect bool
|
|
|
|
Ingress bool
|
2022-04-05 21:10:06 +00:00
|
|
|
acl.EnterpriseMeta
|
2020-04-16 21:00:48 +00:00
|
|
|
}
|
|
|
|
|
2022-06-10 18:23:51 +00:00
|
|
|
type nodeLookup struct {
|
|
|
|
Datacenter string
|
2022-11-29 18:23:18 +00:00
|
|
|
PeerName string
|
2022-06-10 18:23:51 +00:00
|
|
|
Node string
|
|
|
|
Tag string
|
|
|
|
MaxRecursionLevel int
|
|
|
|
acl.EnterpriseMeta
|
|
|
|
}
|
|
|
|
|
2014-01-02 21:12:05 +00:00
|
|
|
// DNSServer is used to wrap an Agent and expose various
|
|
|
|
// service discovery endpoints using a DNS interface.
|
|
|
|
type DNSServer struct {
|
2017-05-24 13:22:56 +00:00
|
|
|
*dns.Server
|
2019-06-27 10:00:37 +00:00
|
|
|
agent *Agent
|
|
|
|
mux *dns.ServeMux
|
|
|
|
domain string
|
|
|
|
altDomain string
|
2020-01-28 23:50:41 +00:00
|
|
|
logger hclog.Logger
|
2019-04-24 18:11:54 +00:00
|
|
|
|
|
|
|
// config stores the config as an atomic value (for hot-reloading). It is always of type *dnsConfig
|
|
|
|
config atomic.Value
|
|
|
|
|
|
|
|
// recursorEnabled stores whever the recursor handler is enabled as an atomic flag.
|
|
|
|
// the recursor handler is only enabled if recursors are configured. This flag is used during config hot-reloading
|
|
|
|
recursorEnabled uint32
|
2021-08-19 20:09:42 +00:00
|
|
|
|
2022-04-05 21:10:06 +00:00
|
|
|
defaultEnterpriseMeta acl.EnterpriseMeta
|
2014-01-02 21:12:05 +00:00
|
|
|
}
|
|
|
|
|
2017-05-24 13:22:56 +00:00
|
|
|
func NewDNSServer(a *Agent) (*DNSServer, error) {
|
2019-06-27 10:00:37 +00:00
|
|
|
// Make sure domains are FQDN, make them case insensitive for ServeMux
|
2017-09-25 18:40:42 +00:00
|
|
|
domain := dns.Fqdn(strings.ToLower(a.config.DNSDomain))
|
2019-06-27 10:00:37 +00:00
|
|
|
altDomain := dns.Fqdn(strings.ToLower(a.config.DNSAltDomain))
|
2014-01-02 21:12:05 +00:00
|
|
|
srv := &DNSServer{
|
2021-08-19 20:09:42 +00:00
|
|
|
agent: a,
|
|
|
|
domain: domain,
|
|
|
|
altDomain: altDomain,
|
|
|
|
logger: a.logger.Named(logging.DNS),
|
2021-10-26 20:08:55 +00:00
|
|
|
defaultEnterpriseMeta: *a.AgentEnterpriseMeta(),
|
2022-09-30 04:44:45 +00:00
|
|
|
mux: dns.NewServeMux(),
|
2014-01-02 21:12:05 +00:00
|
|
|
}
|
2019-04-24 18:11:54 +00:00
|
|
|
cfg, err := GetDNSConfig(a.config)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
srv.config.Store(cfg)
|
2014-01-02 21:12:05 +00:00
|
|
|
|
2022-09-30 04:44:45 +00:00
|
|
|
srv.mux.HandleFunc("arpa.", srv.handlePtr)
|
|
|
|
srv.mux.HandleFunc(srv.domain, srv.handleQuery)
|
|
|
|
// this is not an empty string check because NewDNSServer will have
|
|
|
|
// converted the configured alt domain into an FQDN which will ensure that
|
|
|
|
// the value ends with a ".". Therefore "." is the empty string equivalent
|
|
|
|
// for originally having no alternate domain set. If there is a reason
|
|
|
|
// why consul should be configured to handle the root zone I have yet
|
|
|
|
// to think of it.
|
|
|
|
if srv.altDomain != "." {
|
|
|
|
srv.mux.HandleFunc(srv.altDomain, srv.handleQuery)
|
|
|
|
}
|
|
|
|
srv.toggleRecursorHandlerFromConfig(cfg)
|
|
|
|
|
2017-05-24 13:22:56 +00:00
|
|
|
return srv, nil
|
|
|
|
}
|
2014-10-31 19:19:41 +00:00
|
|
|
|
Added SOA configuration for DNS settings. (#4714)
This will allow to fine TUNE SOA settings sent by Consul in DNS responses,
for instance to be able to control negative ttl.
Will fix: https://github.com/hashicorp/consul/issues/4713
# Example
Override all settings:
* min_ttl: 0 => 60s
* retry: 600 (10m) => 300s (5 minutes),
* expire: 86400 (24h) => 43200 (12h)
* refresh: 3600 (1h) => 1800 (30 minutes)
```
consul agent -dev -hcl 'dns_config={soa={min_ttl=60,retry=300,expire=43200,refresh=1800}}'
```
Result:
```
dig +multiline @localhost -p 8600 service.consul
; <<>> DiG 9.12.1 <<>> +multiline @localhost -p 8600 service.consul
; (2 servers found)
;; global options: +cmd
;; Got answer:
;; ->>HEADER<<- opcode: QUERY, status: NXDOMAIN, id: 36557
;; flags: qr aa rd; QUERY: 1, ANSWER: 0, AUTHORITY: 1, ADDITIONAL: 1
;; WARNING: recursion requested but not available
;; OPT PSEUDOSECTION:
; EDNS: version: 0, flags:; udp: 4096
;; QUESTION SECTION:
;service.consul. IN A
;; AUTHORITY SECTION:
consul. 0 IN SOA ns.consul. hostmaster.consul. (
1537959133 ; serial
1800 ; refresh (30 minutes)
300 ; retry (5 minutes)
43200 ; expire (12 hours)
60 ; minimum (1 minute)
)
;; Query time: 4 msec
;; SERVER: 127.0.0.1#8600(127.0.0.1)
;; WHEN: Wed Sep 26 12:52:13 CEST 2018
;; MSG SIZE rcvd: 93
```
2018-10-10 19:50:56 +00:00
|
|
|
// GetDNSConfig takes global config and creates the config used by DNS server
|
2019-04-24 18:11:54 +00:00
|
|
|
func GetDNSConfig(conf *config.RuntimeConfig) (*dnsConfig, error) {
|
|
|
|
cfg := &dnsConfig{
|
|
|
|
AllowStale: conf.DNSAllowStale,
|
|
|
|
ARecordLimit: conf.DNSARecordLimit,
|
|
|
|
Datacenter: conf.Datacenter,
|
|
|
|
EnableTruncate: conf.DNSEnableTruncate,
|
|
|
|
MaxStale: conf.DNSMaxStale,
|
|
|
|
NodeName: conf.NodeName,
|
|
|
|
NodeTTL: conf.DNSNodeTTL,
|
|
|
|
OnlyPassing: conf.DNSOnlyPassing,
|
2021-07-19 22:22:51 +00:00
|
|
|
RecursorStrategy: conf.DNSRecursorStrategy,
|
2019-04-24 18:11:54 +00:00
|
|
|
RecursorTimeout: conf.DNSRecursorTimeout,
|
|
|
|
SegmentName: conf.SegmentName,
|
|
|
|
UDPAnswerLimit: conf.DNSUDPAnswerLimit,
|
|
|
|
NodeMetaTXT: conf.DNSNodeMetaTXT,
|
|
|
|
DisableCompression: conf.DNSDisableCompression,
|
|
|
|
UseCache: conf.DNSUseCache,
|
|
|
|
CacheMaxAge: conf.DNSCacheMaxAge,
|
|
|
|
SOAConfig: dnsSOAConfig{
|
Added SOA configuration for DNS settings. (#4714)
This will allow to fine TUNE SOA settings sent by Consul in DNS responses,
for instance to be able to control negative ttl.
Will fix: https://github.com/hashicorp/consul/issues/4713
# Example
Override all settings:
* min_ttl: 0 => 60s
* retry: 600 (10m) => 300s (5 minutes),
* expire: 86400 (24h) => 43200 (12h)
* refresh: 3600 (1h) => 1800 (30 minutes)
```
consul agent -dev -hcl 'dns_config={soa={min_ttl=60,retry=300,expire=43200,refresh=1800}}'
```
Result:
```
dig +multiline @localhost -p 8600 service.consul
; <<>> DiG 9.12.1 <<>> +multiline @localhost -p 8600 service.consul
; (2 servers found)
;; global options: +cmd
;; Got answer:
;; ->>HEADER<<- opcode: QUERY, status: NXDOMAIN, id: 36557
;; flags: qr aa rd; QUERY: 1, ANSWER: 0, AUTHORITY: 1, ADDITIONAL: 1
;; WARNING: recursion requested but not available
;; OPT PSEUDOSECTION:
; EDNS: version: 0, flags:; udp: 4096
;; QUESTION SECTION:
;service.consul. IN A
;; AUTHORITY SECTION:
consul. 0 IN SOA ns.consul. hostmaster.consul. (
1537959133 ; serial
1800 ; refresh (30 minutes)
300 ; retry (5 minutes)
43200 ; expire (12 hours)
60 ; minimum (1 minute)
)
;; Query time: 4 msec
;; SERVER: 127.0.0.1#8600(127.0.0.1)
;; WHEN: Wed Sep 26 12:52:13 CEST 2018
;; MSG SIZE rcvd: 93
```
2018-10-10 19:50:56 +00:00
|
|
|
Expire: conf.DNSSOA.Expire,
|
|
|
|
Minttl: conf.DNSSOA.Minttl,
|
|
|
|
Refresh: conf.DNSSOA.Refresh,
|
|
|
|
Retry: conf.DNSSOA.Retry,
|
|
|
|
},
|
2019-12-10 02:26:41 +00:00
|
|
|
enterpriseDNSConfig: getEnterpriseDNSConfig(conf),
|
2017-09-25 18:40:42 +00:00
|
|
|
}
|
2019-04-24 18:11:54 +00:00
|
|
|
if conf.DNSServiceTTL != nil {
|
|
|
|
cfg.TTLRadix = radix.New()
|
|
|
|
cfg.TTLStrict = make(map[string]time.Duration)
|
|
|
|
|
|
|
|
for key, ttl := range conf.DNSServiceTTL {
|
|
|
|
// All suffix with '*' are put in radix
|
|
|
|
// This include '*' that will match anything
|
|
|
|
if strings.HasSuffix(key, "*") {
|
|
|
|
cfg.TTLRadix.Insert(key[:len(key)-1], ttl)
|
|
|
|
} else {
|
|
|
|
cfg.TTLStrict[key] = ttl
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for _, r := range conf.DNSRecursors {
|
|
|
|
ra, err := recursorAddr(r)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("Invalid recursor address: %v", err)
|
|
|
|
}
|
|
|
|
cfg.Recursors = append(cfg.Recursors, ra)
|
|
|
|
}
|
|
|
|
|
|
|
|
return cfg, nil
|
2017-09-25 18:40:42 +00:00
|
|
|
}
|
|
|
|
|
2018-10-19 08:53:19 +00:00
|
|
|
// GetTTLForService Find the TTL for a given service.
|
|
|
|
// return ttl, true if found, 0, false otherwise
|
2019-04-24 18:11:54 +00:00
|
|
|
func (cfg *dnsConfig) GetTTLForService(service string) (time.Duration, bool) {
|
|
|
|
if cfg.TTLStrict != nil {
|
|
|
|
ttl, ok := cfg.TTLStrict[service]
|
2018-10-19 08:53:19 +00:00
|
|
|
if ok {
|
|
|
|
return ttl, true
|
|
|
|
}
|
2019-04-24 18:11:54 +00:00
|
|
|
}
|
|
|
|
if cfg.TTLRadix != nil {
|
|
|
|
_, ttlRaw, ok := cfg.TTLRadix.LongestPrefix(service)
|
2018-10-19 08:53:19 +00:00
|
|
|
if ok {
|
|
|
|
return ttlRaw.(time.Duration), true
|
|
|
|
}
|
|
|
|
}
|
2019-04-24 18:11:54 +00:00
|
|
|
return 0, false
|
2018-10-19 08:53:19 +00:00
|
|
|
}
|
|
|
|
|
2018-01-28 18:40:13 +00:00
|
|
|
func (d *DNSServer) ListenAndServe(network, addr string, notif func()) error {
|
2018-01-28 18:53:30 +00:00
|
|
|
d.Server = &dns.Server{
|
2017-05-24 13:22:56 +00:00
|
|
|
Addr: addr,
|
|
|
|
Net: network,
|
2019-04-24 18:11:54 +00:00
|
|
|
Handler: d.mux,
|
2017-05-24 13:22:56 +00:00
|
|
|
NotifyStartedFunc: notif,
|
|
|
|
}
|
|
|
|
if network == "udp" {
|
2018-01-28 18:40:13 +00:00
|
|
|
d.UDPSize = 65535
|
2014-01-02 21:12:05 +00:00
|
|
|
}
|
2018-01-28 18:40:13 +00:00
|
|
|
return d.Server.ListenAndServe()
|
2014-01-02 21:12:05 +00:00
|
|
|
}
|
|
|
|
|
2024-01-10 16:19:20 +00:00
|
|
|
func (d *DNSServer) Shutdown() {
|
|
|
|
if d.Server != nil {
|
|
|
|
d.logger.Info("Stopping server",
|
|
|
|
"protocol", "DNS",
|
|
|
|
"address", d.Server.Addr,
|
|
|
|
"network", d.Server.Net,
|
|
|
|
)
|
|
|
|
err := d.Server.Shutdown()
|
|
|
|
if err != nil {
|
|
|
|
d.logger.Error("Error stopping DNS server", "error", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// GetAddr is a function to return the server address if is not nil.
|
|
|
|
func (d *DNSServer) GetAddr() string {
|
|
|
|
if d.Server != nil {
|
|
|
|
return d.Server.Addr
|
|
|
|
}
|
|
|
|
return ""
|
|
|
|
}
|
|
|
|
|
2019-04-24 18:11:54 +00:00
|
|
|
// toggleRecursorHandlerFromConfig enables or disables the recursor handler based on config idempotently
|
|
|
|
func (d *DNSServer) toggleRecursorHandlerFromConfig(cfg *dnsConfig) {
|
|
|
|
shouldEnable := len(cfg.Recursors) > 0
|
|
|
|
|
|
|
|
if shouldEnable && atomic.CompareAndSwapUint32(&d.recursorEnabled, 0, 1) {
|
|
|
|
d.mux.HandleFunc(".", d.handleRecurse)
|
2020-01-28 23:50:41 +00:00
|
|
|
d.logger.Debug("recursor enabled")
|
2019-04-24 18:11:54 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
if !shouldEnable && atomic.CompareAndSwapUint32(&d.recursorEnabled, 1, 0) {
|
|
|
|
d.mux.HandleRemove(".")
|
2020-01-28 23:50:41 +00:00
|
|
|
d.logger.Debug("recursor disabled")
|
2019-04-24 18:11:54 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// ReloadConfig hot-reloads the server config with new parameters under config.RuntimeConfig.DNS*
|
|
|
|
func (d *DNSServer) ReloadConfig(newCfg *config.RuntimeConfig) error {
|
|
|
|
cfg, err := GetDNSConfig(newCfg)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
d.config.Store(cfg)
|
|
|
|
d.toggleRecursorHandlerFromConfig(cfg)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2018-09-11 13:37:46 +00:00
|
|
|
// setEDNS is used to set the responses EDNS size headers and
|
|
|
|
// possibly the ECS headers as well if they were present in the
|
|
|
|
// original request
|
|
|
|
func setEDNS(request *dns.Msg, response *dns.Msg, ecsGlobal bool) {
|
2021-04-13 18:46:59 +00:00
|
|
|
edns := request.IsEdns0()
|
|
|
|
if edns == nil {
|
|
|
|
return
|
|
|
|
}
|
2018-09-11 13:37:46 +00:00
|
|
|
|
2021-04-13 18:46:59 +00:00
|
|
|
// cannot just use the SetEdns0 function as we need to embed
|
|
|
|
// the ECS option as well
|
|
|
|
ednsResp := new(dns.OPT)
|
|
|
|
ednsResp.Hdr.Name = "."
|
|
|
|
ednsResp.Hdr.Rrtype = dns.TypeOPT
|
|
|
|
ednsResp.SetUDPSize(edns.UDPSize())
|
|
|
|
|
|
|
|
// Setup the ECS option if present
|
|
|
|
if subnet := ednsSubnetForRequest(request); subnet != nil {
|
|
|
|
subOp := new(dns.EDNS0_SUBNET)
|
|
|
|
subOp.Code = dns.EDNS0SUBNET
|
|
|
|
subOp.Family = subnet.Family
|
|
|
|
subOp.Address = subnet.Address
|
|
|
|
subOp.SourceNetmask = subnet.SourceNetmask
|
|
|
|
if c := response.Rcode; ecsGlobal || c == dns.RcodeNameError || c == dns.RcodeServerFailure || c == dns.RcodeRefused || c == dns.RcodeNotImplemented {
|
|
|
|
// reply is globally valid and should be cached accordingly
|
|
|
|
subOp.SourceScope = 0
|
|
|
|
} else {
|
|
|
|
// reply is only valid for the subnet it was queried with
|
|
|
|
subOp.SourceScope = subnet.SourceNetmask
|
|
|
|
}
|
|
|
|
ednsResp.Option = append(ednsResp.Option, subOp)
|
2018-09-11 13:37:46 +00:00
|
|
|
}
|
2021-04-13 18:46:59 +00:00
|
|
|
|
|
|
|
response.Extra = append(response.Extra, ednsResp)
|
2018-09-11 13:37:46 +00:00
|
|
|
}
|
|
|
|
|
2014-02-23 01:31:11 +00:00
|
|
|
// recursorAddr is used to add a port to the recursor if omitted.
|
|
|
|
func recursorAddr(recursor string) (string, error) {
|
|
|
|
// Add the port if none
|
|
|
|
START:
|
|
|
|
_, _, err := net.SplitHostPort(recursor)
|
2020-02-18 16:09:11 +00:00
|
|
|
if ae, ok := err.(*net.AddrError); ok {
|
|
|
|
if ae.Err == "missing port in address" {
|
|
|
|
recursor = ipaddr.FormatAddressPort(recursor, 53)
|
|
|
|
goto START
|
|
|
|
} else if ae.Err == "too many colons in address" {
|
|
|
|
if ip := net.ParseIP(recursor); ip != nil && ip.To4() == nil {
|
|
|
|
recursor = ipaddr.FormatAddressPort(recursor, 53)
|
|
|
|
goto START
|
|
|
|
}
|
|
|
|
}
|
2014-02-23 01:31:11 +00:00
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Get the address
|
|
|
|
addr, err := net.ResolveTCPAddr("tcp", recursor)
|
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Return string
|
|
|
|
return addr.String(), nil
|
|
|
|
}
|
|
|
|
|
2019-12-10 02:26:41 +00:00
|
|
|
func serviceNodeCanonicalDNSName(sn *structs.ServiceNode, domain string) string {
|
2020-06-22 19:14:12 +00:00
|
|
|
return serviceCanonicalDNSName(sn.ServiceName, "service", sn.Datacenter, domain, &sn.EnterpriseMeta)
|
|
|
|
}
|
|
|
|
|
2022-04-05 21:10:06 +00:00
|
|
|
func serviceIngressDNSName(service, datacenter, domain string, entMeta *acl.EnterpriseMeta) string {
|
2020-06-22 19:14:12 +00:00
|
|
|
return serviceCanonicalDNSName(service, "ingress", datacenter, domain, entMeta)
|
2019-12-10 02:26:41 +00:00
|
|
|
}
|
|
|
|
|
2021-10-16 20:56:18 +00:00
|
|
|
// getResponseDomain returns alt-domain if it is configured and request is made with alt-domain,
|
|
|
|
// respects DNS case insensitivity
|
|
|
|
func (d *DNSServer) getResponseDomain(questionName string) string {
|
|
|
|
labels := dns.SplitDomainName(questionName)
|
|
|
|
domain := d.domain
|
|
|
|
for i := len(labels) - 1; i >= 0; i-- {
|
|
|
|
currentSuffix := strings.Join(labels[i:], ".") + "."
|
|
|
|
if strings.EqualFold(currentSuffix, d.domain) || strings.EqualFold(currentSuffix, d.altDomain) {
|
|
|
|
domain = currentSuffix
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return domain
|
|
|
|
}
|
|
|
|
|
2014-11-23 08:16:37 +00:00
|
|
|
// handlePtr is used to handle "reverse" DNS queries
|
|
|
|
func (d *DNSServer) handlePtr(resp dns.ResponseWriter, req *dns.Msg) {
|
|
|
|
q := req.Question[0]
|
|
|
|
defer func(s time.Time) {
|
2024-02-13 17:08:01 +00:00
|
|
|
// V1 DNS-style metrics
|
2017-10-04 23:43:27 +00:00
|
|
|
metrics.MeasureSinceWithLabels([]string{"dns", "ptr_query"}, s,
|
|
|
|
[]metrics.Label{{Name: "node", Value: d.agent.config.NodeName}})
|
2024-02-13 17:08:01 +00:00
|
|
|
|
|
|
|
// V2 DNS-style metrics for forward compatibility
|
|
|
|
metrics.MeasureSinceWithLabels([]string{"dns", "query"}, s,
|
|
|
|
[]metrics.Label{
|
|
|
|
{Name: "node", Value: d.agent.config.NodeName},
|
|
|
|
{Name: "type", Value: dns.Type(dns.TypePTR).String()},
|
|
|
|
})
|
|
|
|
|
2020-01-28 23:50:41 +00:00
|
|
|
d.logger.Debug("request served from client",
|
|
|
|
"question", q,
|
|
|
|
"latency", time.Since(s).String(),
|
|
|
|
"client", resp.RemoteAddr().String(),
|
|
|
|
"client_network", resp.RemoteAddr().Network(),
|
|
|
|
)
|
2014-11-23 08:16:37 +00:00
|
|
|
}(time.Now())
|
|
|
|
|
2019-04-24 18:11:54 +00:00
|
|
|
cfg := d.config.Load().(*dnsConfig)
|
|
|
|
|
2014-11-23 08:16:37 +00:00
|
|
|
// Setup the message response
|
|
|
|
m := new(dns.Msg)
|
|
|
|
m.SetReply(req)
|
2019-04-24 18:11:54 +00:00
|
|
|
m.Compress = !cfg.DisableCompression
|
2014-11-23 08:16:37 +00:00
|
|
|
m.Authoritative = true
|
2024-02-21 17:44:04 +00:00
|
|
|
recursionAvailable := atomic.LoadUint32(&(d.recursorEnabled)) == 1
|
|
|
|
m.RecursionAvailable = recursionAvailable
|
2014-11-23 08:16:37 +00:00
|
|
|
|
|
|
|
// Only add the SOA if requested
|
|
|
|
if req.Question[0].Qtype == dns.TypeSOA {
|
2024-01-12 22:07:42 +00:00
|
|
|
d.addSOAToMessage(cfg, m, q.Name)
|
2014-11-23 08:16:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
datacenter := d.agent.config.Datacenter
|
|
|
|
|
|
|
|
// Get the QName without the domain suffix
|
|
|
|
qName := strings.ToLower(dns.Fqdn(req.Question[0].Name))
|
|
|
|
|
|
|
|
args := structs.DCSpecificRequest{
|
2015-06-12 22:58:53 +00:00
|
|
|
Datacenter: datacenter,
|
|
|
|
QueryOptions: structs.QueryOptions{
|
2023-09-20 21:50:06 +00:00
|
|
|
Token: d.coalesceDNSToken(),
|
2019-04-24 18:11:54 +00:00
|
|
|
AllowStale: cfg.AllowStale,
|
2015-06-12 22:58:53 +00:00
|
|
|
},
|
2014-11-23 08:16:37 +00:00
|
|
|
}
|
|
|
|
var out structs.IndexedNodes
|
|
|
|
|
2014-11-24 19:09:04 +00:00
|
|
|
// TODO: Replace ListNodes with an internal RPC that can do the filter
|
2014-12-04 23:25:06 +00:00
|
|
|
// server side to avoid transferring the entire node list.
|
2022-12-14 15:24:22 +00:00
|
|
|
if err := d.agent.RPC(context.Background(), "Catalog.ListNodes", &args, &out); err == nil {
|
2014-11-23 08:16:37 +00:00
|
|
|
for _, n := range out.Nodes {
|
2022-11-29 18:23:18 +00:00
|
|
|
lookup := serviceLookup{
|
|
|
|
// Peering PTR lookups are currently not supported, so we don't
|
|
|
|
// need to populate that field for creating the node FQDN.
|
|
|
|
// PeerName: n.PeerName,
|
|
|
|
Datacenter: n.Datacenter,
|
|
|
|
EnterpriseMeta: *n.GetEnterpriseMeta(),
|
|
|
|
}
|
2014-11-23 08:16:37 +00:00
|
|
|
arpa, _ := dns.ReverseAddr(n.Address)
|
|
|
|
if arpa == qName {
|
|
|
|
ptr := &dns.PTR{
|
|
|
|
Hdr: dns.RR_Header{Name: q.Name, Rrtype: dns.TypePTR, Class: dns.ClassINET, Ttl: 0},
|
2022-11-29 18:23:18 +00:00
|
|
|
Ptr: nodeCanonicalDNSName(lookup, n.Node, d.domain),
|
2014-11-23 08:16:37 +00:00
|
|
|
}
|
|
|
|
m.Answer = append(m.Answer, ptr)
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-21 20:27:41 +00:00
|
|
|
// only look into the services if we didn't find a node
|
|
|
|
if len(m.Answer) == 0 {
|
|
|
|
// lookup the service address
|
2024-01-29 16:40:10 +00:00
|
|
|
ip := libdns.IPFromARPA(qName)
|
|
|
|
var serviceAddress string
|
|
|
|
if ip != nil {
|
|
|
|
serviceAddress = ip.String()
|
|
|
|
}
|
2018-05-21 20:27:41 +00:00
|
|
|
sargs := structs.ServiceSpecificRequest{
|
|
|
|
Datacenter: datacenter,
|
|
|
|
QueryOptions: structs.QueryOptions{
|
2023-09-20 21:50:06 +00:00
|
|
|
Token: d.coalesceDNSToken(),
|
2019-04-24 18:11:54 +00:00
|
|
|
AllowStale: cfg.AllowStale,
|
2018-05-21 20:27:41 +00:00
|
|
|
},
|
|
|
|
ServiceAddress: serviceAddress,
|
2021-09-17 23:36:20 +00:00
|
|
|
EnterpriseMeta: *d.defaultEnterpriseMeta.WithWildcardNamespace(),
|
2018-05-21 20:27:41 +00:00
|
|
|
}
|
2018-05-03 20:54:14 +00:00
|
|
|
|
2018-05-21 20:27:41 +00:00
|
|
|
var sout structs.IndexedServiceNodes
|
2022-12-14 15:24:22 +00:00
|
|
|
if err := d.agent.RPC(context.Background(), "Catalog.ServiceNodes", &sargs, &sout); err == nil {
|
2018-05-21 20:27:41 +00:00
|
|
|
for _, n := range sout.ServiceNodes {
|
|
|
|
if n.ServiceAddress == serviceAddress {
|
|
|
|
ptr := &dns.PTR{
|
|
|
|
Hdr: dns.RR_Header{Name: q.Name, Rrtype: dns.TypePTR, Class: dns.ClassINET, Ttl: 0},
|
2019-12-10 02:26:41 +00:00
|
|
|
Ptr: serviceNodeCanonicalDNSName(n, d.domain),
|
2018-05-21 20:27:41 +00:00
|
|
|
}
|
|
|
|
m.Answer = append(m.Answer, ptr)
|
|
|
|
break
|
2018-05-03 20:54:14 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-07-27 21:22:36 +00:00
|
|
|
// nothing found locally, recurse
|
|
|
|
if len(m.Answer) == 0 {
|
2024-02-21 17:44:04 +00:00
|
|
|
if recursionAvailable {
|
|
|
|
d.handleRecurse(resp, req)
|
|
|
|
return
|
|
|
|
} else {
|
|
|
|
m.SetRcode(req, dns.RcodeNameError)
|
|
|
|
d.addSOAToMessage(cfg, m, q.Name)
|
|
|
|
}
|
2015-07-27 21:22:36 +00:00
|
|
|
}
|
|
|
|
|
2018-09-11 13:37:46 +00:00
|
|
|
// ptr record responses are globally valid
|
|
|
|
setEDNS(req, m, true)
|
2017-06-14 23:22:54 +00:00
|
|
|
|
2014-11-23 08:16:37 +00:00
|
|
|
// Write out the complete response
|
|
|
|
if err := resp.WriteMsg(m); err != nil {
|
2020-01-28 23:50:41 +00:00
|
|
|
d.logger.Warn("failed to respond", "error", err)
|
2014-11-23 08:16:37 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-09-15 12:22:08 +00:00
|
|
|
// handleQuery is used to handle DNS queries in the configured domain
|
2014-01-03 01:58:58 +00:00
|
|
|
func (d *DNSServer) handleQuery(resp dns.ResponseWriter, req *dns.Msg) {
|
|
|
|
q := req.Question[0]
|
|
|
|
defer func(s time.Time) {
|
2024-02-13 17:08:01 +00:00
|
|
|
// V1 DNS-style metrics
|
2017-10-04 23:43:27 +00:00
|
|
|
metrics.MeasureSinceWithLabels([]string{"dns", "domain_query"}, s,
|
|
|
|
[]metrics.Label{{Name: "node", Value: d.agent.config.NodeName}})
|
2024-02-13 17:08:01 +00:00
|
|
|
|
|
|
|
// V2 DNS-style metrics for forward compatibility
|
|
|
|
metrics.MeasureSinceWithLabels([]string{"dns", "query"}, s,
|
|
|
|
[]metrics.Label{
|
|
|
|
{Name: "node", Value: d.agent.config.NodeName},
|
|
|
|
{Name: "type", Value: dns.Type(q.Qtype).String()},
|
|
|
|
})
|
|
|
|
|
2020-01-28 23:50:41 +00:00
|
|
|
d.logger.Debug("request served from client",
|
|
|
|
"name", q.Name,
|
2024-02-13 17:08:01 +00:00
|
|
|
"type", dns.Type(q.Qtype).String(),
|
|
|
|
"class", dns.Class(q.Qclass).String(),
|
2020-01-28 23:50:41 +00:00
|
|
|
"latency", time.Since(s).String(),
|
|
|
|
"client", resp.RemoteAddr().String(),
|
|
|
|
"client_network", resp.RemoteAddr().Network(),
|
|
|
|
)
|
2014-01-03 01:58:58 +00:00
|
|
|
}(time.Now())
|
|
|
|
|
2014-02-14 22:22:49 +00:00
|
|
|
// Switch to TCP if the client is
|
|
|
|
network := "udp"
|
|
|
|
if _, ok := resp.RemoteAddr().(*net.TCPAddr); ok {
|
|
|
|
network = "tcp"
|
|
|
|
}
|
|
|
|
|
2019-04-24 18:11:54 +00:00
|
|
|
cfg := d.config.Load().(*dnsConfig)
|
|
|
|
|
2023-09-20 21:50:06 +00:00
|
|
|
// Set up the message response
|
2014-01-03 01:58:58 +00:00
|
|
|
m := new(dns.Msg)
|
|
|
|
m.SetReply(req)
|
2019-04-24 18:11:54 +00:00
|
|
|
m.Compress = !cfg.DisableCompression
|
2014-01-03 01:58:58 +00:00
|
|
|
m.Authoritative = true
|
2019-04-24 18:11:54 +00:00
|
|
|
m.RecursionAvailable = (len(cfg.Recursors) > 0)
|
2014-02-25 20:46:11 +00:00
|
|
|
|
2021-04-13 20:07:10 +00:00
|
|
|
var err error
|
2018-09-11 13:37:46 +00:00
|
|
|
|
2017-08-04 11:24:04 +00:00
|
|
|
switch req.Question[0].Qtype {
|
|
|
|
case dns.TypeSOA:
|
2024-01-12 22:07:42 +00:00
|
|
|
ns, glue := d.getNameserversAndNodeRecord(req.Question[0].Name, cfg, maxRecursionLevelDefault)
|
|
|
|
m.Answer = append(m.Answer, d.makeSOARecord(cfg, q.Name))
|
2017-08-04 11:24:04 +00:00
|
|
|
m.Ns = append(m.Ns, ns...)
|
|
|
|
m.Extra = append(m.Extra, glue...)
|
|
|
|
m.SetRcode(req, dns.RcodeSuccess)
|
|
|
|
|
|
|
|
case dns.TypeNS:
|
2024-01-12 22:07:42 +00:00
|
|
|
ns, glue := d.getNameserversAndNodeRecord(req.Question[0].Name, cfg, maxRecursionLevelDefault)
|
2017-08-04 11:24:04 +00:00
|
|
|
m.Answer = ns
|
2017-08-04 21:53:42 +00:00
|
|
|
m.Extra = glue
|
2017-08-04 11:24:04 +00:00
|
|
|
m.SetRcode(req, dns.RcodeSuccess)
|
|
|
|
|
2017-08-07 09:09:41 +00:00
|
|
|
case dns.TypeAXFR:
|
|
|
|
m.SetRcode(req, dns.RcodeNotImplemented)
|
|
|
|
|
2017-08-04 11:24:04 +00:00
|
|
|
default:
|
2021-04-13 22:15:48 +00:00
|
|
|
err = d.dispatch(resp.RemoteAddr(), req, m, maxRecursionLevelDefault)
|
2021-04-13 20:43:23 +00:00
|
|
|
rCode := rCodeFromError(err)
|
2021-07-16 16:34:27 +00:00
|
|
|
if rCode == dns.RcodeNameError || errors.Is(err, errNoData) {
|
2024-01-12 22:07:42 +00:00
|
|
|
d.addSOAToMessage(cfg, m, q.Name)
|
2021-04-13 20:43:23 +00:00
|
|
|
}
|
|
|
|
m.SetRcode(req, rCode)
|
2017-08-04 11:24:04 +00:00
|
|
|
}
|
2014-01-03 23:43:35 +00:00
|
|
|
|
2021-04-13 20:07:10 +00:00
|
|
|
setEDNS(req, m, !errors.Is(err, errECSNotGlobal))
|
2017-06-14 23:22:54 +00:00
|
|
|
|
2021-04-13 21:48:29 +00:00
|
|
|
d.trimDNSResponse(cfg, network, req, m)
|
2021-04-13 20:43:23 +00:00
|
|
|
|
2014-01-03 23:43:35 +00:00
|
|
|
if err := resp.WriteMsg(m); err != nil {
|
2020-01-28 23:50:41 +00:00
|
|
|
d.logger.Warn("failed to respond", "error", err)
|
2014-01-03 23:43:35 +00:00
|
|
|
}
|
2014-01-03 01:58:58 +00:00
|
|
|
}
|
|
|
|
|
2024-01-12 22:07:42 +00:00
|
|
|
// Craft dns records for an SOA
|
|
|
|
func (d *DNSServer) makeSOARecord(cfg *dnsConfig, questionName string) *dns.SOA {
|
2021-10-05 14:47:27 +00:00
|
|
|
domain := d.domain
|
|
|
|
if d.altDomain != "" && strings.HasSuffix(questionName, "."+d.altDomain) {
|
|
|
|
domain = d.altDomain
|
|
|
|
}
|
|
|
|
|
2017-08-04 11:24:04 +00:00
|
|
|
return &dns.SOA{
|
2014-01-02 23:10:13 +00:00
|
|
|
Hdr: dns.RR_Header{
|
2021-10-05 14:47:27 +00:00
|
|
|
Name: domain,
|
2014-01-02 23:10:13 +00:00
|
|
|
Rrtype: dns.TypeSOA,
|
|
|
|
Class: dns.ClassINET,
|
Added SOA configuration for DNS settings. (#4714)
This will allow to fine TUNE SOA settings sent by Consul in DNS responses,
for instance to be able to control negative ttl.
Will fix: https://github.com/hashicorp/consul/issues/4713
# Example
Override all settings:
* min_ttl: 0 => 60s
* retry: 600 (10m) => 300s (5 minutes),
* expire: 86400 (24h) => 43200 (12h)
* refresh: 3600 (1h) => 1800 (30 minutes)
```
consul agent -dev -hcl 'dns_config={soa={min_ttl=60,retry=300,expire=43200,refresh=1800}}'
```
Result:
```
dig +multiline @localhost -p 8600 service.consul
; <<>> DiG 9.12.1 <<>> +multiline @localhost -p 8600 service.consul
; (2 servers found)
;; global options: +cmd
;; Got answer:
;; ->>HEADER<<- opcode: QUERY, status: NXDOMAIN, id: 36557
;; flags: qr aa rd; QUERY: 1, ANSWER: 0, AUTHORITY: 1, ADDITIONAL: 1
;; WARNING: recursion requested but not available
;; OPT PSEUDOSECTION:
; EDNS: version: 0, flags:; udp: 4096
;; QUESTION SECTION:
;service.consul. IN A
;; AUTHORITY SECTION:
consul. 0 IN SOA ns.consul. hostmaster.consul. (
1537959133 ; serial
1800 ; refresh (30 minutes)
300 ; retry (5 minutes)
43200 ; expire (12 hours)
60 ; minimum (1 minute)
)
;; Query time: 4 msec
;; SERVER: 127.0.0.1#8600(127.0.0.1)
;; WHEN: Wed Sep 26 12:52:13 CEST 2018
;; MSG SIZE rcvd: 93
```
2018-10-10 19:50:56 +00:00
|
|
|
// Has to be consistent with MinTTL to avoid invalidation
|
2019-04-24 18:11:54 +00:00
|
|
|
Ttl: cfg.SOAConfig.Minttl,
|
2014-01-02 23:10:13 +00:00
|
|
|
},
|
2021-10-05 14:47:27 +00:00
|
|
|
Ns: "ns." + domain,
|
Added SOA configuration for DNS settings. (#4714)
This will allow to fine TUNE SOA settings sent by Consul in DNS responses,
for instance to be able to control negative ttl.
Will fix: https://github.com/hashicorp/consul/issues/4713
# Example
Override all settings:
* min_ttl: 0 => 60s
* retry: 600 (10m) => 300s (5 minutes),
* expire: 86400 (24h) => 43200 (12h)
* refresh: 3600 (1h) => 1800 (30 minutes)
```
consul agent -dev -hcl 'dns_config={soa={min_ttl=60,retry=300,expire=43200,refresh=1800}}'
```
Result:
```
dig +multiline @localhost -p 8600 service.consul
; <<>> DiG 9.12.1 <<>> +multiline @localhost -p 8600 service.consul
; (2 servers found)
;; global options: +cmd
;; Got answer:
;; ->>HEADER<<- opcode: QUERY, status: NXDOMAIN, id: 36557
;; flags: qr aa rd; QUERY: 1, ANSWER: 0, AUTHORITY: 1, ADDITIONAL: 1
;; WARNING: recursion requested but not available
;; OPT PSEUDOSECTION:
; EDNS: version: 0, flags:; udp: 4096
;; QUESTION SECTION:
;service.consul. IN A
;; AUTHORITY SECTION:
consul. 0 IN SOA ns.consul. hostmaster.consul. (
1537959133 ; serial
1800 ; refresh (30 minutes)
300 ; retry (5 minutes)
43200 ; expire (12 hours)
60 ; minimum (1 minute)
)
;; Query time: 4 msec
;; SERVER: 127.0.0.1#8600(127.0.0.1)
;; WHEN: Wed Sep 26 12:52:13 CEST 2018
;; MSG SIZE rcvd: 93
```
2018-10-10 19:50:56 +00:00
|
|
|
Serial: uint32(time.Now().Unix()),
|
2021-10-05 14:47:27 +00:00
|
|
|
Mbox: "hostmaster." + domain,
|
2019-04-24 18:11:54 +00:00
|
|
|
Refresh: cfg.SOAConfig.Refresh,
|
|
|
|
Retry: cfg.SOAConfig.Retry,
|
|
|
|
Expire: cfg.SOAConfig.Expire,
|
|
|
|
Minttl: cfg.SOAConfig.Minttl,
|
2014-01-02 23:10:13 +00:00
|
|
|
}
|
2017-08-04 11:24:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// addSOA is used to add an SOA record to a message for the given domain
|
2024-01-12 22:07:42 +00:00
|
|
|
func (d *DNSServer) addSOAToMessage(cfg *dnsConfig, msg *dns.Msg, questionName string) {
|
|
|
|
msg.Ns = append(msg.Ns, d.makeSOARecord(cfg, questionName))
|
2017-08-04 11:24:04 +00:00
|
|
|
}
|
|
|
|
|
2024-01-12 22:07:42 +00:00
|
|
|
// getNameserversAndNodeRecord returns the names and ip addresses of up to three random servers
|
2017-08-04 11:24:04 +00:00
|
|
|
// in the current cluster which serve as authoritative name servers for zone.
|
2024-01-12 22:07:42 +00:00
|
|
|
func (d *DNSServer) getNameserversAndNodeRecord(questionName string, cfg *dnsConfig, maxRecursionLevel int) (ns []dns.RR, extra []dns.RR) {
|
2020-04-16 21:00:48 +00:00
|
|
|
out, err := d.lookupServiceNodes(cfg, serviceLookup{
|
|
|
|
Datacenter: d.agent.config.Datacenter,
|
|
|
|
Service: structs.ConsulServiceName,
|
|
|
|
Connect: false,
|
|
|
|
Ingress: false,
|
2021-08-19 20:09:42 +00:00
|
|
|
EnterpriseMeta: d.defaultEnterpriseMeta,
|
2020-04-16 21:00:48 +00:00
|
|
|
})
|
2017-08-21 12:16:41 +00:00
|
|
|
if err != nil {
|
2020-01-28 23:50:41 +00:00
|
|
|
d.logger.Warn("Unable to get list of servers", "error", err)
|
2017-08-21 12:16:41 +00:00
|
|
|
return nil, nil
|
|
|
|
}
|
2017-08-08 11:55:58 +00:00
|
|
|
|
2017-08-21 12:16:41 +00:00
|
|
|
if len(out.Nodes) == 0 {
|
2020-01-28 23:50:41 +00:00
|
|
|
d.logger.Warn("no servers found")
|
2017-08-21 12:16:41 +00:00
|
|
|
return
|
|
|
|
}
|
2017-08-04 11:24:04 +00:00
|
|
|
|
2017-08-21 12:16:41 +00:00
|
|
|
// shuffle the nodes to randomize the output
|
|
|
|
out.Nodes.Shuffle()
|
|
|
|
|
|
|
|
for _, o := range out.Nodes {
|
2019-08-05 15:19:18 +00:00
|
|
|
name, dc := o.Node.Node, o.Node.Datacenter
|
2017-08-21 12:16:41 +00:00
|
|
|
|
2024-01-10 16:19:20 +00:00
|
|
|
if libdns.InvalidNameRe.MatchString(name) {
|
2020-01-28 23:50:41 +00:00
|
|
|
d.logger.Warn("Skipping invalid node for NS records", "node", name)
|
2017-08-07 21:02:33 +00:00
|
|
|
continue
|
|
|
|
}
|
2021-10-16 22:18:03 +00:00
|
|
|
respDomain := d.getResponseDomain(questionName)
|
|
|
|
fqdn := name + ".node." + dc + "." + respDomain
|
2017-08-21 12:16:41 +00:00
|
|
|
fqdn = dns.Fqdn(strings.ToLower(fqdn))
|
2017-08-04 11:24:04 +00:00
|
|
|
|
2017-08-08 11:55:58 +00:00
|
|
|
// NS record
|
2017-08-04 11:24:04 +00:00
|
|
|
nsrr := &dns.NS{
|
|
|
|
Hdr: dns.RR_Header{
|
2021-10-16 22:18:03 +00:00
|
|
|
Name: respDomain,
|
2017-08-04 11:24:04 +00:00
|
|
|
Rrtype: dns.TypeNS,
|
|
|
|
Class: dns.ClassINET,
|
2019-04-24 18:11:54 +00:00
|
|
|
Ttl: uint32(cfg.NodeTTL / time.Second),
|
2017-08-04 11:24:04 +00:00
|
|
|
},
|
2017-08-21 12:16:41 +00:00
|
|
|
Ns: fqdn,
|
2017-08-04 11:24:04 +00:00
|
|
|
}
|
|
|
|
ns = append(ns, nsrr)
|
2017-08-08 11:55:58 +00:00
|
|
|
|
2020-03-18 23:28:36 +00:00
|
|
|
extra = append(extra, d.makeRecordFromNode(o.Node, dns.TypeANY, fqdn, cfg.NodeTTL, maxRecursionLevel)...)
|
2017-08-04 11:24:04 +00:00
|
|
|
|
|
|
|
// don't provide more than 3 servers
|
|
|
|
if len(ns) >= 3 {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return
|
2014-01-02 23:10:13 +00:00
|
|
|
}
|
2014-01-03 01:58:58 +00:00
|
|
|
|
2024-01-12 22:07:42 +00:00
|
|
|
// parseDatacenter will do the following:
|
|
|
|
// - if zero labels are passed, return true without modifying the datacenter parameter
|
|
|
|
// - if one label is passed, set the datacenter parameter to the label and return true
|
|
|
|
// - Otherwise it will return false without modifying the datacenter parameter
|
2019-12-10 02:26:41 +00:00
|
|
|
func (d *DNSServer) parseDatacenter(labels []string, datacenter *string) bool {
|
|
|
|
switch len(labels) {
|
|
|
|
case 1:
|
|
|
|
*datacenter = labels[0]
|
|
|
|
return true
|
|
|
|
case 0:
|
|
|
|
return true
|
|
|
|
default:
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-04-13 20:07:10 +00:00
|
|
|
var errECSNotGlobal = fmt.Errorf("ECS response is not global")
|
|
|
|
var errNameNotFound = fmt.Errorf("DNS name not found")
|
2021-04-13 20:43:23 +00:00
|
|
|
|
2021-07-16 16:34:27 +00:00
|
|
|
// errNoData is used to indicate no resource records exist for the specified query type.
|
|
|
|
// Per the recommendation from Section 2.2 of RFC 2308, the server will return a TYPE 2
|
|
|
|
// NODATA response in which the RCODE is set to NOERROR (RcodeSuccess), the Answer
|
|
|
|
// section is empty, and the Authority section contains the SOA record.
|
|
|
|
var errNoData = fmt.Errorf("no DNS Answer")
|
2021-04-13 20:43:23 +00:00
|
|
|
|
|
|
|
// ecsNotGlobalError may be used to wrap an error or nil, to indicate that the
|
|
|
|
// EDNS client subnet source scope is not global.
|
|
|
|
type ecsNotGlobalError struct {
|
|
|
|
error
|
|
|
|
}
|
|
|
|
|
|
|
|
func (e ecsNotGlobalError) Error() string {
|
|
|
|
if e.error == nil {
|
|
|
|
return ""
|
|
|
|
}
|
|
|
|
return e.error.Error()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (e ecsNotGlobalError) Is(other error) bool {
|
|
|
|
return other == errECSNotGlobal
|
|
|
|
}
|
|
|
|
|
|
|
|
func (e ecsNotGlobalError) Unwrap() error {
|
|
|
|
return e.error
|
|
|
|
}
|
2021-04-13 20:07:10 +00:00
|
|
|
|
2022-07-06 16:30:04 +00:00
|
|
|
type queryLocality struct {
|
|
|
|
// datacenter is the datacenter parsed from a label that has an explicit datacenter part.
|
|
|
|
// Example query: <service>.virtual.<namespace>.ns.<partition>.ap.<datacenter>.dc.consul
|
|
|
|
datacenter string
|
|
|
|
|
2022-11-29 18:23:18 +00:00
|
|
|
// peer is the peer name parsed from a label that has explicit parts.
|
|
|
|
// Example query: <service>.virtual.<namespace>.ns.<peer>.peer.<partition>.ap.consul
|
|
|
|
peer string
|
|
|
|
|
|
|
|
// peerOrDatacenter is parsed from DNS queries where the datacenter and peer name are
|
|
|
|
// specified in the same query part.
|
2022-07-06 16:30:04 +00:00
|
|
|
// Example query: <service>.virtual.<peerOrDatacenter>.consul
|
2022-11-29 18:23:18 +00:00
|
|
|
//
|
|
|
|
// Note that this field should only be a "peer" for virtual queries, since virtual IPs should
|
|
|
|
// not be shared between datacenters. In all other cases, it should be considered a DC.
|
2022-07-06 16:30:04 +00:00
|
|
|
peerOrDatacenter string
|
|
|
|
|
|
|
|
acl.EnterpriseMeta
|
|
|
|
}
|
|
|
|
|
|
|
|
func (l queryLocality) effectiveDatacenter(defaultDC string) string {
|
|
|
|
// Prefer the value parsed from a query with explicit parts: <namespace>.ns.<partition>.ap.<datacenter>.dc
|
|
|
|
if l.datacenter != "" {
|
|
|
|
return l.datacenter
|
|
|
|
}
|
|
|
|
// Fall back to the ambiguously parsed DC or Peer.
|
|
|
|
if l.peerOrDatacenter != "" {
|
|
|
|
return l.peerOrDatacenter
|
|
|
|
}
|
|
|
|
// If all are empty, use a default value.
|
|
|
|
return defaultDC
|
|
|
|
}
|
|
|
|
|
2021-04-13 18:05:42 +00:00
|
|
|
// dispatch is used to parse a request and invoke the correct handler.
|
2019-01-07 21:53:54 +00:00
|
|
|
// parameter maxRecursionLevel will handle whether recursive call can be performed
|
2021-04-13 22:15:48 +00:00
|
|
|
func (d *DNSServer) dispatch(remoteAddr net.Addr, req, resp *dns.Msg, maxRecursionLevel int) error {
|
2021-10-17 12:02:13 +00:00
|
|
|
// Choose correct response domain
|
|
|
|
respDomain := d.getResponseDomain(req.Question[0].Name)
|
|
|
|
|
2014-01-03 01:58:58 +00:00
|
|
|
// Get the QName without the domain suffix
|
2014-07-23 08:28:54 +00:00
|
|
|
qName := strings.ToLower(dns.Fqdn(req.Question[0].Name))
|
2019-06-27 10:00:37 +00:00
|
|
|
qName = d.trimDomain(qName)
|
2014-01-03 01:58:58 +00:00
|
|
|
|
|
|
|
// Split into the label parts
|
|
|
|
labels := dns.SplitDomainName(qName)
|
|
|
|
|
2019-04-24 18:11:54 +00:00
|
|
|
cfg := d.config.Load().(*dnsConfig)
|
|
|
|
|
2019-12-10 02:26:41 +00:00
|
|
|
var queryKind string
|
|
|
|
var queryParts []string
|
|
|
|
var querySuffixes []string
|
|
|
|
|
|
|
|
done := false
|
|
|
|
for i := len(labels) - 1; i >= 0 && !done; i-- {
|
|
|
|
switch labels[i] {
|
2021-12-03 00:29:50 +00:00
|
|
|
case "service", "connect", "virtual", "ingress", "node", "query", "addr":
|
2019-12-10 02:26:41 +00:00
|
|
|
queryParts = labels[:i]
|
|
|
|
querySuffixes = labels[i+1:]
|
|
|
|
queryKind = labels[i]
|
|
|
|
done = true
|
|
|
|
default:
|
|
|
|
// If this is a SRV query the "service" label is optional, we add it back to use the
|
|
|
|
// existing code-path.
|
|
|
|
if req.Question[0].Qtype == dns.TypeSRV && strings.HasPrefix(labels[i], "_") {
|
|
|
|
queryKind = "service"
|
|
|
|
queryParts = labels[:i+1]
|
|
|
|
querySuffixes = labels[i+1:]
|
|
|
|
done = true
|
|
|
|
}
|
|
|
|
}
|
2014-01-03 01:58:58 +00:00
|
|
|
}
|
2017-01-30 18:36:48 +00:00
|
|
|
|
2021-04-13 20:07:10 +00:00
|
|
|
invalid := func() error {
|
2020-10-21 19:16:03 +00:00
|
|
|
d.logger.Warn("QName invalid", "qname", qName)
|
2021-04-13 20:07:10 +00:00
|
|
|
return errNameNotFound
|
2020-10-21 19:16:03 +00:00
|
|
|
}
|
|
|
|
|
2019-12-10 02:26:41 +00:00
|
|
|
switch queryKind {
|
2014-01-03 01:58:58 +00:00
|
|
|
case "service":
|
2019-12-10 02:26:41 +00:00
|
|
|
n := len(queryParts)
|
|
|
|
if n < 1 {
|
2020-10-21 19:16:03 +00:00
|
|
|
return invalid()
|
2019-12-10 02:26:41 +00:00
|
|
|
}
|
|
|
|
|
2023-04-14 16:24:46 +00:00
|
|
|
localities, err := d.parseSamenessGroupLocality(cfg, querySuffixes, invalid)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
2022-11-29 18:23:18 +00:00
|
|
|
}
|
|
|
|
|
2023-04-14 16:24:46 +00:00
|
|
|
// Loop over the localities and return as soon as a lookup is successful
|
|
|
|
for _, locality := range localities {
|
|
|
|
d.logger.Debug("labels", "querySuffixes", querySuffixes)
|
2014-08-18 19:45:56 +00:00
|
|
|
|
2023-04-14 16:24:46 +00:00
|
|
|
lookup := serviceLookup{
|
|
|
|
Datacenter: locality.effectiveDatacenter(d.agent.config.Datacenter),
|
|
|
|
PeerName: locality.peer,
|
|
|
|
Connect: false,
|
|
|
|
Ingress: false,
|
|
|
|
MaxRecursionLevel: maxRecursionLevel,
|
|
|
|
EnterpriseMeta: locality.EnterpriseMeta,
|
|
|
|
}
|
|
|
|
// Only one of dc or peer can be used.
|
|
|
|
if lookup.PeerName != "" {
|
|
|
|
lookup.Datacenter = ""
|
2014-08-18 19:45:56 +00:00
|
|
|
}
|
|
|
|
|
2023-04-14 16:24:46 +00:00
|
|
|
// Support RFC 2782 style syntax
|
|
|
|
if n == 2 && strings.HasPrefix(queryParts[1], "_") && strings.HasPrefix(queryParts[0], "_") {
|
|
|
|
// Grab the tag since we make nuke it if it's tcp
|
|
|
|
tag := queryParts[1][1:]
|
2014-04-21 22:33:01 +00:00
|
|
|
|
2023-04-14 16:24:46 +00:00
|
|
|
// Treat _name._tcp.service.consul as a default, no need to filter on that tag
|
|
|
|
if tag == "tcp" {
|
|
|
|
tag = ""
|
|
|
|
}
|
|
|
|
|
|
|
|
lookup.Tag = tag
|
|
|
|
lookup.Service = queryParts[0][1:]
|
|
|
|
// _name._tag.service.consul
|
|
|
|
} else {
|
|
|
|
// Consul 0.3 and prior format for SRV queries
|
|
|
|
// Support "." in the label, re-join all the parts
|
|
|
|
tag := ""
|
|
|
|
if n >= 2 {
|
|
|
|
tag = strings.Join(queryParts[:n-1], ".")
|
|
|
|
}
|
2014-08-18 19:45:56 +00:00
|
|
|
|
2023-04-14 16:24:46 +00:00
|
|
|
lookup.Tag = tag
|
|
|
|
lookup.Service = queryParts[n-1]
|
|
|
|
// tag[.tag].name.service.consul
|
|
|
|
}
|
2014-08-18 19:45:56 +00:00
|
|
|
|
2024-01-12 22:07:42 +00:00
|
|
|
err = d.handleServiceQuery(cfg, lookup, req, resp)
|
2023-04-14 16:24:46 +00:00
|
|
|
// Return if we are error free right away, otherwise loop again if we can
|
|
|
|
if err == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
2020-04-16 21:00:48 +00:00
|
|
|
|
2023-04-14 16:24:46 +00:00
|
|
|
// We've exhausted all DNS possibilities so return here
|
|
|
|
return err
|
2018-03-09 17:09:21 +00:00
|
|
|
case "connect":
|
2019-12-10 02:26:41 +00:00
|
|
|
if len(queryParts) < 1 {
|
2020-10-21 19:16:03 +00:00
|
|
|
return invalid()
|
2018-03-09 17:09:21 +00:00
|
|
|
}
|
|
|
|
|
2022-07-06 16:30:04 +00:00
|
|
|
locality, ok := d.parseLocality(querySuffixes, cfg)
|
|
|
|
if !ok {
|
2020-10-21 19:16:03 +00:00
|
|
|
return invalid()
|
2019-12-10 02:26:41 +00:00
|
|
|
}
|
2018-03-09 17:09:21 +00:00
|
|
|
|
2022-11-29 18:23:18 +00:00
|
|
|
// Peering is not currently supported for connect queries.
|
|
|
|
// Exposing this likely would not provide much value, since users would
|
|
|
|
// need to be very familiar with our TLS / SNI / mesh gateways to leverage it.
|
2020-04-16 21:00:48 +00:00
|
|
|
lookup := serviceLookup{
|
2022-07-06 16:30:04 +00:00
|
|
|
Datacenter: locality.effectiveDatacenter(d.agent.config.Datacenter),
|
2020-04-16 21:00:48 +00:00
|
|
|
Service: queryParts[len(queryParts)-1],
|
|
|
|
Connect: true,
|
|
|
|
Ingress: false,
|
|
|
|
MaxRecursionLevel: maxRecursionLevel,
|
2022-07-06 16:30:04 +00:00
|
|
|
EnterpriseMeta: locality.EnterpriseMeta,
|
2020-04-16 21:00:48 +00:00
|
|
|
}
|
2019-12-10 02:26:41 +00:00
|
|
|
// name.connect.consul
|
2024-01-12 22:07:42 +00:00
|
|
|
return d.handleServiceQuery(cfg, lookup, req, resp)
|
2021-04-13 18:48:24 +00:00
|
|
|
|
2021-12-03 00:29:50 +00:00
|
|
|
case "virtual":
|
|
|
|
if len(queryParts) < 1 {
|
|
|
|
return invalid()
|
|
|
|
}
|
|
|
|
|
2022-07-06 16:30:04 +00:00
|
|
|
locality, ok := d.parseLocality(querySuffixes, cfg)
|
|
|
|
if !ok {
|
2021-12-03 00:29:50 +00:00
|
|
|
return invalid()
|
|
|
|
}
|
|
|
|
|
|
|
|
args := structs.ServiceSpecificRequest{
|
2022-07-06 16:30:04 +00:00
|
|
|
// The datacenter of the request is not specified because cross-datacenter virtual IP
|
|
|
|
// queries are not supported. This guard rail is in place because virtual IPs are allocated
|
|
|
|
// within a DC, therefore their uniqueness is not guaranteed globally.
|
2022-11-29 18:23:18 +00:00
|
|
|
PeerName: locality.peer,
|
2021-12-03 00:29:50 +00:00
|
|
|
ServiceName: queryParts[len(queryParts)-1],
|
2022-07-06 16:30:04 +00:00
|
|
|
EnterpriseMeta: locality.EnterpriseMeta,
|
2021-12-03 00:29:50 +00:00
|
|
|
QueryOptions: structs.QueryOptions{
|
2023-09-20 21:50:06 +00:00
|
|
|
Token: d.coalesceDNSToken(),
|
2021-12-03 00:29:50 +00:00
|
|
|
},
|
|
|
|
}
|
2022-11-29 18:23:18 +00:00
|
|
|
if args.PeerName == "" {
|
|
|
|
// If the peer name was not explicitly defined, fall back to the ambiguously-parsed version.
|
|
|
|
args.PeerName = locality.peerOrDatacenter
|
|
|
|
}
|
|
|
|
|
2021-12-03 00:29:50 +00:00
|
|
|
var out string
|
2022-12-14 15:24:22 +00:00
|
|
|
if err := d.agent.RPC(context.Background(), "Catalog.VirtualIPForService", &args, &out); err != nil {
|
2021-12-03 00:29:50 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
if out != "" {
|
|
|
|
resp.Answer = append(resp.Answer, &dns.A{
|
|
|
|
Hdr: dns.RR_Header{
|
|
|
|
Name: qName + respDomain,
|
|
|
|
Rrtype: dns.TypeA,
|
|
|
|
Class: dns.ClassINET,
|
|
|
|
Ttl: uint32(cfg.NodeTTL / time.Second),
|
|
|
|
},
|
|
|
|
A: net.ParseIP(out),
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
|
2020-04-16 21:00:48 +00:00
|
|
|
case "ingress":
|
|
|
|
if len(queryParts) < 1 {
|
2020-10-21 19:16:03 +00:00
|
|
|
return invalid()
|
2020-04-16 21:00:48 +00:00
|
|
|
}
|
|
|
|
|
2022-07-06 16:30:04 +00:00
|
|
|
locality, ok := d.parseLocality(querySuffixes, cfg)
|
|
|
|
if !ok {
|
2020-10-21 19:16:03 +00:00
|
|
|
return invalid()
|
2020-04-16 21:00:48 +00:00
|
|
|
}
|
|
|
|
|
2022-11-29 18:23:18 +00:00
|
|
|
// Peering is not currently supported for ingress queries.
|
|
|
|
// We probably should not be encouraging chained calls from ingress to peers anyway.
|
2020-04-16 21:00:48 +00:00
|
|
|
lookup := serviceLookup{
|
2022-07-06 16:30:04 +00:00
|
|
|
Datacenter: locality.effectiveDatacenter(d.agent.config.Datacenter),
|
2020-04-16 21:00:48 +00:00
|
|
|
Service: queryParts[len(queryParts)-1],
|
|
|
|
Connect: false,
|
|
|
|
Ingress: true,
|
|
|
|
MaxRecursionLevel: maxRecursionLevel,
|
2022-07-06 16:30:04 +00:00
|
|
|
EnterpriseMeta: locality.EnterpriseMeta,
|
2020-04-16 21:00:48 +00:00
|
|
|
}
|
|
|
|
// name.ingress.consul
|
2024-01-12 22:07:42 +00:00
|
|
|
return d.handleServiceQuery(cfg, lookup, req, resp)
|
2021-04-13 18:48:24 +00:00
|
|
|
|
2014-01-03 01:58:58 +00:00
|
|
|
case "node":
|
2019-12-10 02:26:41 +00:00
|
|
|
if len(queryParts) < 1 {
|
2020-10-21 19:16:03 +00:00
|
|
|
return invalid()
|
2019-12-10 02:26:41 +00:00
|
|
|
}
|
|
|
|
|
2022-07-06 16:30:04 +00:00
|
|
|
locality, ok := d.parseLocality(querySuffixes, cfg)
|
|
|
|
if !ok {
|
2022-06-10 18:23:51 +00:00
|
|
|
return invalid()
|
|
|
|
}
|
|
|
|
|
2022-07-06 16:30:04 +00:00
|
|
|
// Nodes are only registered in the default namespace so queries
|
|
|
|
// must not specify a non-default namespace.
|
|
|
|
if !locality.InDefaultNamespace() {
|
2020-10-21 19:16:03 +00:00
|
|
|
return invalid()
|
2014-01-03 01:58:58 +00:00
|
|
|
}
|
2015-11-17 16:40:47 +00:00
|
|
|
|
2014-04-21 22:33:01 +00:00
|
|
|
// Allow a "." in the node name, just join all the parts
|
2019-12-10 02:26:41 +00:00
|
|
|
node := strings.Join(queryParts, ".")
|
2022-06-10 18:23:51 +00:00
|
|
|
|
|
|
|
lookup := nodeLookup{
|
2022-07-06 16:30:04 +00:00
|
|
|
Datacenter: locality.effectiveDatacenter(d.agent.config.Datacenter),
|
2022-11-29 18:23:18 +00:00
|
|
|
PeerName: locality.peer,
|
2022-06-10 18:23:51 +00:00
|
|
|
Node: node,
|
|
|
|
MaxRecursionLevel: maxRecursionLevel,
|
2022-07-06 16:30:04 +00:00
|
|
|
EnterpriseMeta: locality.EnterpriseMeta,
|
2022-06-10 18:23:51 +00:00
|
|
|
}
|
2022-11-29 18:23:18 +00:00
|
|
|
// Only one of dc or peer can be used.
|
|
|
|
if lookup.PeerName != "" {
|
|
|
|
lookup.Datacenter = ""
|
|
|
|
}
|
2022-06-10 18:23:51 +00:00
|
|
|
|
2024-01-12 22:07:42 +00:00
|
|
|
return d.handleNodeQuery(cfg, lookup, req, resp)
|
2021-04-13 18:48:24 +00:00
|
|
|
|
2015-11-12 17:28:05 +00:00
|
|
|
case "query":
|
2022-11-20 22:21:24 +00:00
|
|
|
n := len(queryParts)
|
2022-07-06 16:30:04 +00:00
|
|
|
datacenter := d.agent.config.Datacenter
|
|
|
|
|
2019-12-10 02:26:41 +00:00
|
|
|
// ensure we have a query name
|
2022-11-20 22:21:24 +00:00
|
|
|
if n < 1 {
|
2020-10-21 19:16:03 +00:00
|
|
|
return invalid()
|
2019-12-10 02:26:41 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if !d.parseDatacenter(querySuffixes, &datacenter) {
|
2020-10-21 19:16:03 +00:00
|
|
|
return invalid()
|
2015-11-12 17:28:05 +00:00
|
|
|
}
|
2015-11-17 16:40:47 +00:00
|
|
|
|
2022-11-20 22:21:24 +00:00
|
|
|
query := ""
|
|
|
|
|
|
|
|
// If the first and last DNS query parts begin with _, this is an RFC 2782 style SRV lookup.
|
|
|
|
// This allows for prepared query names to include "." (for backwards compatibility).
|
|
|
|
// Otherwise, this is a standard prepared query lookup.
|
|
|
|
if n >= 2 && strings.HasPrefix(queryParts[0], "_") && strings.HasPrefix(queryParts[n-1], "_") {
|
|
|
|
// The last DNS query part is the protocol field (ignored).
|
|
|
|
// All prior parts are the prepared query name or ID.
|
|
|
|
query = strings.Join(queryParts[:n-1], ".")
|
|
|
|
|
|
|
|
// Strip leading underscore
|
|
|
|
query = query[1:]
|
|
|
|
} else {
|
|
|
|
// Allow a "." in the query name, just join all the parts.
|
|
|
|
query = strings.Join(queryParts, ".")
|
|
|
|
}
|
|
|
|
|
2024-01-12 22:07:42 +00:00
|
|
|
err := d.handlePreparedQuery(cfg, datacenter, query, remoteAddr, req, resp, maxRecursionLevel)
|
2021-04-13 20:43:23 +00:00
|
|
|
return ecsNotGlobalError{error: err}
|
2015-11-12 17:28:05 +00:00
|
|
|
|
2016-10-28 02:01:32 +00:00
|
|
|
case "addr":
|
2019-12-10 02:26:41 +00:00
|
|
|
// <address>.addr.<suffixes>.<domain> - addr must be the second label, datacenter is optional
|
2021-06-25 00:44:44 +00:00
|
|
|
|
2019-12-10 02:26:41 +00:00
|
|
|
if len(queryParts) != 1 {
|
2020-10-21 19:16:03 +00:00
|
|
|
return invalid()
|
2016-10-28 02:01:32 +00:00
|
|
|
}
|
|
|
|
|
2019-12-10 02:26:41 +00:00
|
|
|
switch len(queryParts[0]) / 2 {
|
2016-10-28 02:01:32 +00:00
|
|
|
// IPv4
|
|
|
|
case 4:
|
2019-12-10 02:26:41 +00:00
|
|
|
ip, err := hex.DecodeString(queryParts[0])
|
2016-10-28 02:01:32 +00:00
|
|
|
if err != nil {
|
2020-10-21 19:16:03 +00:00
|
|
|
return invalid()
|
2016-10-28 02:01:32 +00:00
|
|
|
}
|
2022-07-06 16:30:04 +00:00
|
|
|
// check if the query type is A for IPv4 or ANY
|
2021-06-25 00:44:44 +00:00
|
|
|
aRecord := &dns.A{
|
2016-10-28 02:01:32 +00:00
|
|
|
Hdr: dns.RR_Header{
|
2021-10-17 12:02:13 +00:00
|
|
|
Name: qName + respDomain,
|
2016-10-28 02:01:32 +00:00
|
|
|
Rrtype: dns.TypeA,
|
|
|
|
Class: dns.ClassINET,
|
2019-04-24 18:11:54 +00:00
|
|
|
Ttl: uint32(cfg.NodeTTL / time.Second),
|
2016-10-28 02:01:32 +00:00
|
|
|
},
|
|
|
|
A: ip,
|
2021-06-25 00:44:44 +00:00
|
|
|
}
|
|
|
|
if req.Question[0].Qtype != dns.TypeA && req.Question[0].Qtype != dns.TypeANY {
|
|
|
|
resp.Extra = append(resp.Answer, aRecord)
|
|
|
|
} else {
|
|
|
|
resp.Answer = append(resp.Answer, aRecord)
|
|
|
|
}
|
2016-10-28 02:01:32 +00:00
|
|
|
// IPv6
|
|
|
|
case 16:
|
2019-12-10 02:26:41 +00:00
|
|
|
ip, err := hex.DecodeString(queryParts[0])
|
2016-10-28 02:01:32 +00:00
|
|
|
if err != nil {
|
2020-10-21 19:16:03 +00:00
|
|
|
return invalid()
|
2016-10-28 02:01:32 +00:00
|
|
|
}
|
2022-07-06 16:30:04 +00:00
|
|
|
// check if the query type is AAAA for IPv6 or ANY
|
2021-06-25 00:44:44 +00:00
|
|
|
aaaaRecord := &dns.AAAA{
|
2016-10-28 02:01:32 +00:00
|
|
|
Hdr: dns.RR_Header{
|
2021-10-17 12:02:13 +00:00
|
|
|
Name: qName + respDomain,
|
2016-10-28 02:01:32 +00:00
|
|
|
Rrtype: dns.TypeAAAA,
|
|
|
|
Class: dns.ClassINET,
|
2019-04-24 18:11:54 +00:00
|
|
|
Ttl: uint32(cfg.NodeTTL / time.Second),
|
2016-10-28 02:01:32 +00:00
|
|
|
},
|
|
|
|
AAAA: ip,
|
2021-06-25 00:44:44 +00:00
|
|
|
}
|
|
|
|
if req.Question[0].Qtype != dns.TypeAAAA && req.Question[0].Qtype != dns.TypeANY {
|
|
|
|
resp.Extra = append(resp.Extra, aaaaRecord)
|
|
|
|
} else {
|
|
|
|
resp.Answer = append(resp.Answer, aaaaRecord)
|
|
|
|
}
|
2024-01-17 03:36:02 +00:00
|
|
|
default:
|
|
|
|
return invalid()
|
2016-10-28 02:01:32 +00:00
|
|
|
}
|
2021-04-13 20:07:10 +00:00
|
|
|
return nil
|
2021-04-13 18:48:24 +00:00
|
|
|
default:
|
|
|
|
return invalid()
|
2014-01-03 01:58:58 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-06-27 10:00:37 +00:00
|
|
|
func (d *DNSServer) trimDomain(query string) string {
|
|
|
|
longer := d.domain
|
|
|
|
shorter := d.altDomain
|
|
|
|
|
|
|
|
if len(shorter) > len(longer) {
|
|
|
|
longer, shorter = shorter, longer
|
|
|
|
}
|
|
|
|
|
2023-06-26 14:57:11 +00:00
|
|
|
if strings.HasSuffix(query, "."+strings.TrimLeft(longer, ".")) {
|
2019-06-27 10:00:37 +00:00
|
|
|
return strings.TrimSuffix(query, longer)
|
|
|
|
}
|
|
|
|
return strings.TrimSuffix(query, shorter)
|
|
|
|
}
|
|
|
|
|
2021-07-16 16:34:27 +00:00
|
|
|
// rCodeFromError return the appropriate DNS response code for a given error
|
2021-04-13 20:43:23 +00:00
|
|
|
func rCodeFromError(err error) int {
|
|
|
|
switch {
|
|
|
|
case err == nil:
|
2020-06-22 13:01:48 +00:00
|
|
|
return dns.RcodeSuccess
|
2021-07-16 16:34:27 +00:00
|
|
|
case errors.Is(err, errNoData):
|
2021-04-13 20:43:23 +00:00
|
|
|
return dns.RcodeSuccess
|
|
|
|
case errors.Is(err, errECSNotGlobal):
|
|
|
|
return rCodeFromError(errors.Unwrap(err))
|
|
|
|
case errors.Is(err, errNameNotFound):
|
2020-06-22 13:01:48 +00:00
|
|
|
return dns.RcodeNameError
|
2021-04-13 20:43:23 +00:00
|
|
|
case structs.IsErrNoDCPath(err) || structs.IsErrQueryNotFound(err):
|
|
|
|
return dns.RcodeNameError
|
|
|
|
default:
|
|
|
|
return dns.RcodeServerFailure
|
2020-06-22 13:01:48 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-01-12 22:07:42 +00:00
|
|
|
// handleNodeQuery is used to handle a node query
|
|
|
|
func (d *DNSServer) handleNodeQuery(cfg *dnsConfig, lookup nodeLookup, req, resp *dns.Msg) error {
|
2017-08-10 04:43:24 +00:00
|
|
|
// Only handle ANY, A, AAAA, and TXT type requests
|
2014-01-03 01:58:58 +00:00
|
|
|
qType := req.Question[0].Qtype
|
2017-08-01 07:01:49 +00:00
|
|
|
if qType != dns.TypeANY && qType != dns.TypeA && qType != dns.TypeAAAA && qType != dns.TypeTXT {
|
2021-06-18 21:57:20 +00:00
|
|
|
return nil
|
2014-01-03 01:58:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Make an RPC request
|
2019-02-25 19:06:01 +00:00
|
|
|
args := &structs.NodeSpecificRequest{
|
2022-06-10 18:23:51 +00:00
|
|
|
Datacenter: lookup.Datacenter,
|
2022-11-29 18:23:18 +00:00
|
|
|
PeerName: lookup.PeerName,
|
2022-06-10 18:23:51 +00:00
|
|
|
Node: lookup.Node,
|
2015-06-12 22:58:53 +00:00
|
|
|
QueryOptions: structs.QueryOptions{
|
2023-09-20 21:50:06 +00:00
|
|
|
Token: d.coalesceDNSToken(),
|
2019-04-24 18:11:54 +00:00
|
|
|
AllowStale: cfg.AllowStale,
|
2015-06-12 22:58:53 +00:00
|
|
|
},
|
2022-06-10 18:23:51 +00:00
|
|
|
EnterpriseMeta: lookup.EnterpriseMeta,
|
2014-01-03 01:58:58 +00:00
|
|
|
}
|
2019-04-24 18:11:54 +00:00
|
|
|
out, err := d.lookupNode(cfg, args)
|
2019-02-25 19:06:01 +00:00
|
|
|
if err != nil {
|
2021-04-13 20:43:23 +00:00
|
|
|
return fmt.Errorf("failed rpc request: %w", err)
|
2014-01-03 01:58:58 +00:00
|
|
|
}
|
|
|
|
|
2019-08-05 15:19:18 +00:00
|
|
|
// If we have no out.NodeServices.Nodeaddress, return not found!
|
2014-03-05 23:03:23 +00:00
|
|
|
if out.NodeServices == nil {
|
2021-04-13 20:43:23 +00:00
|
|
|
return errNameNotFound
|
2014-01-03 01:58:58 +00:00
|
|
|
}
|
|
|
|
|
2014-02-25 20:07:20 +00:00
|
|
|
// Add the node record
|
2016-06-15 18:02:51 +00:00
|
|
|
n := out.NodeServices.Node
|
2019-08-05 15:19:18 +00:00
|
|
|
|
|
|
|
metaTarget := &resp.Extra
|
|
|
|
if qType == dns.TypeTXT || qType == dns.TypeANY {
|
|
|
|
metaTarget = &resp.Answer
|
|
|
|
}
|
|
|
|
|
|
|
|
q := req.Question[0]
|
|
|
|
// Only compute A and CNAME record if query is not TXT type
|
|
|
|
if qType != dns.TypeTXT {
|
2022-06-10 18:23:51 +00:00
|
|
|
records := d.makeRecordFromNode(n, q.Qtype, q.Name, cfg.NodeTTL, lookup.MaxRecursionLevel)
|
2014-02-26 01:41:48 +00:00
|
|
|
resp.Answer = append(resp.Answer, records...)
|
2014-01-03 01:58:58 +00:00
|
|
|
}
|
2019-08-05 15:19:18 +00:00
|
|
|
|
|
|
|
if cfg.NodeMetaTXT || qType == dns.TypeTXT || qType == dns.TypeANY {
|
2024-01-12 22:07:42 +00:00
|
|
|
metas := d.makeTXTRecordFromNodeMeta(q.Name, n, cfg.NodeTTL)
|
2019-08-05 15:19:18 +00:00
|
|
|
*metaTarget = append(*metaTarget, metas...)
|
2018-07-09 15:41:58 +00:00
|
|
|
}
|
2021-04-13 20:43:23 +00:00
|
|
|
return nil
|
2014-02-25 20:07:20 +00:00
|
|
|
}
|
2014-01-03 01:58:58 +00:00
|
|
|
|
2024-01-12 22:07:42 +00:00
|
|
|
// lookupNode is used to look up a node in the Consul catalog within NodeServices.
|
|
|
|
// If the config is set to UseCache, it will get the record from the agent cache.
|
2019-04-24 18:11:54 +00:00
|
|
|
func (d *DNSServer) lookupNode(cfg *dnsConfig, args *structs.NodeSpecificRequest) (*structs.IndexedNodeServices, error) {
|
2019-02-25 19:06:01 +00:00
|
|
|
var out structs.IndexedNodeServices
|
|
|
|
|
2019-04-24 18:11:54 +00:00
|
|
|
useCache := cfg.UseCache
|
2019-02-25 19:06:01 +00:00
|
|
|
RPC:
|
|
|
|
if useCache {
|
2020-06-15 15:01:25 +00:00
|
|
|
raw, _, err := d.agent.cache.Get(context.TODO(), cachetype.NodeServicesName, args)
|
2019-02-25 19:06:01 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
reply, ok := raw.(*structs.IndexedNodeServices)
|
|
|
|
if !ok {
|
|
|
|
// This should never happen, but we want to protect against panics
|
|
|
|
return nil, fmt.Errorf("internal error: response type not correct")
|
|
|
|
}
|
|
|
|
out = *reply
|
|
|
|
} else {
|
2022-12-14 15:24:22 +00:00
|
|
|
if err := d.agent.RPC(context.Background(), "Catalog.NodeServices", &args, &out); err != nil {
|
2019-02-25 19:06:01 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Verify that request is not too stale, redo the request
|
|
|
|
if args.AllowStale {
|
2019-04-24 18:11:54 +00:00
|
|
|
if out.LastContact > cfg.MaxStale {
|
2019-02-25 19:06:01 +00:00
|
|
|
args.AllowStale = false
|
|
|
|
useCache = false
|
2020-01-28 23:50:41 +00:00
|
|
|
d.logger.Warn("Query results too stale, re-requesting")
|
2019-02-25 19:06:01 +00:00
|
|
|
goto RPC
|
|
|
|
} else if out.LastContact > staleCounterThreshold {
|
|
|
|
metrics.IncrCounter([]string{"dns", "stale_queries"}, 1)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return &out, nil
|
|
|
|
}
|
|
|
|
|
2017-08-10 05:00:06 +00:00
|
|
|
// encodeKVasRFC1464 encodes a key-value pair according to RFC1464
|
2017-08-10 21:55:50 +00:00
|
|
|
func encodeKVasRFC1464(key, value string) (txt string) {
|
2017-08-11 01:37:17 +00:00
|
|
|
// For details on these replacements c.f. https://www.ietf.org/rfc/rfc1464.txt
|
|
|
|
key = strings.Replace(key, "`", "``", -1)
|
|
|
|
key = strings.Replace(key, "=", "`=", -1)
|
|
|
|
|
|
|
|
// Backquote the leading spaces
|
|
|
|
leadingSpacesRE := regexp.MustCompile("^ +")
|
|
|
|
numLeadingSpaces := len(leadingSpacesRE.FindString(key))
|
|
|
|
key = leadingSpacesRE.ReplaceAllString(key, strings.Repeat("` ", numLeadingSpaces))
|
|
|
|
|
|
|
|
// Backquote the trailing spaces
|
|
|
|
trailingSpacesRE := regexp.MustCompile(" +$")
|
|
|
|
numTrailingSpaces := len(trailingSpacesRE.FindString(key))
|
|
|
|
key = trailingSpacesRE.ReplaceAllString(key, strings.Repeat("` ", numTrailingSpaces))
|
|
|
|
|
|
|
|
value = strings.Replace(value, "`", "``", -1)
|
|
|
|
|
|
|
|
return key + "=" + value
|
2017-08-01 07:01:49 +00:00
|
|
|
}
|
|
|
|
|
2016-08-12 21:51:50 +00:00
|
|
|
// indexRRs populates a map which indexes a given list of RRs by name. NOTE that
|
2016-08-12 19:16:21 +00:00
|
|
|
// the names are all squashed to lower case so we can perform case-insensitive
|
|
|
|
// lookups; the RRs are not modified.
|
2016-08-12 21:51:50 +00:00
|
|
|
func indexRRs(rrs []dns.RR, index map[string]dns.RR) {
|
2016-08-12 04:46:14 +00:00
|
|
|
for _, rr := range rrs {
|
2016-08-12 19:16:21 +00:00
|
|
|
name := strings.ToLower(rr.Header().Name)
|
2016-08-12 04:46:14 +00:00
|
|
|
if _, ok := index[name]; !ok {
|
|
|
|
index[name] = rr
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// syncExtra takes a DNS response message and sets the extra data to the most
|
|
|
|
// minimal set needed to cover the answer data. A pre-made index of RRs is given
|
|
|
|
// so that can be re-used between calls. This assumes that the extra data is
|
|
|
|
// only used to provide info for SRV records. If that's not the case, then this
|
|
|
|
// will wipe out any additional data.
|
|
|
|
func syncExtra(index map[string]dns.RR, resp *dns.Msg) {
|
|
|
|
extra := make([]dns.RR, 0, len(resp.Answer))
|
2016-08-12 05:01:23 +00:00
|
|
|
resolved := make(map[string]struct{}, len(resp.Answer))
|
2016-08-12 04:46:14 +00:00
|
|
|
for _, ansRR := range resp.Answer {
|
|
|
|
srv, ok := ansRR.(*dns.SRV)
|
|
|
|
if !ok {
|
|
|
|
continue
|
|
|
|
}
|
2016-08-12 19:16:21 +00:00
|
|
|
|
|
|
|
// Note that we always use lower case when using the index so
|
|
|
|
// that compares are not case-sensitive. We don't alter the actual
|
|
|
|
// RRs we add into the extra section, however.
|
|
|
|
target := strings.ToLower(srv.Target)
|
2016-08-12 04:46:14 +00:00
|
|
|
|
|
|
|
RESOLVE:
|
2016-08-12 05:01:23 +00:00
|
|
|
if _, ok := resolved[target]; ok {
|
2016-08-12 04:46:14 +00:00
|
|
|
continue
|
|
|
|
}
|
2016-08-12 05:01:23 +00:00
|
|
|
resolved[target] = struct{}{}
|
2016-08-12 04:46:14 +00:00
|
|
|
|
|
|
|
extraRR, ok := index[target]
|
|
|
|
if ok {
|
|
|
|
extra = append(extra, extraRR)
|
|
|
|
if cname, ok := extraRR.(*dns.CNAME); ok {
|
2016-08-12 19:16:21 +00:00
|
|
|
target = strings.ToLower(cname.Target)
|
2016-08-12 04:46:14 +00:00
|
|
|
goto RESOLVE
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
resp.Extra = extra
|
|
|
|
}
|
|
|
|
|
2018-04-16 23:10:52 +00:00
|
|
|
// dnsBinaryTruncate find the optimal number of records using a fast binary search and return
|
|
|
|
// it in order to return a DNS answer lower than maxSize parameter.
|
2018-04-16 22:50:00 +00:00
|
|
|
func dnsBinaryTruncate(resp *dns.Msg, maxSize int, index map[string]dns.RR, hasExtra bool) int {
|
|
|
|
originalAnswser := resp.Answer
|
|
|
|
startIndex := 0
|
2018-04-17 07:31:30 +00:00
|
|
|
endIndex := len(resp.Answer) + 1
|
2018-04-16 22:50:00 +00:00
|
|
|
for endIndex-startIndex > 1 {
|
|
|
|
median := startIndex + (endIndex-startIndex)/2
|
|
|
|
|
|
|
|
resp.Answer = originalAnswser[:median]
|
|
|
|
if hasExtra {
|
|
|
|
syncExtra(index, resp)
|
|
|
|
}
|
|
|
|
aLen := resp.Len()
|
|
|
|
if aLen <= maxSize {
|
|
|
|
if maxSize-aLen < 10 {
|
|
|
|
// We are good, increasing will go out of bounds
|
|
|
|
return median
|
|
|
|
}
|
|
|
|
startIndex = median
|
|
|
|
} else {
|
|
|
|
endIndex = median
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return startIndex
|
|
|
|
}
|
|
|
|
|
2018-03-07 09:01:12 +00:00
|
|
|
// trimTCPResponse limit the MaximumSize of messages to 64k as it is the limit
|
|
|
|
// of DNS responses
|
2021-07-15 20:49:23 +00:00
|
|
|
func trimTCPResponse(req, resp *dns.Msg) (trimmed bool) {
|
2018-03-07 09:01:12 +00:00
|
|
|
hasExtra := len(resp.Extra) > 0
|
2018-03-07 15:14:36 +00:00
|
|
|
// There is some overhead, 65535 does not work
|
2018-05-16 10:47:35 +00:00
|
|
|
maxSize := 65523 // 64k - 12 bytes DNS raw overhead
|
2018-03-07 09:01:12 +00:00
|
|
|
|
|
|
|
// We avoid some function calls and allocations by only handling the
|
|
|
|
// extra data when necessary.
|
|
|
|
var index map[string]dns.RR
|
2018-03-07 23:26:41 +00:00
|
|
|
|
2018-05-31 16:15:52 +00:00
|
|
|
// It is not possible to return more than 4k records even with compression
|
2018-06-11 15:49:04 +00:00
|
|
|
// Since we are performing binary search it is not a big deal, but it
|
|
|
|
// improves a bit performance, even with binary search
|
2018-05-16 10:11:49 +00:00
|
|
|
truncateAt := 4096
|
2018-03-09 17:25:29 +00:00
|
|
|
if req.Question[0].Qtype == dns.TypeSRV {
|
2018-05-31 16:15:52 +00:00
|
|
|
// More than 1024 SRV records do not fit in 64k
|
2018-05-16 10:11:49 +00:00
|
|
|
truncateAt = 1024
|
2018-03-09 17:25:29 +00:00
|
|
|
}
|
|
|
|
if len(resp.Answer) > truncateAt {
|
|
|
|
resp.Answer = resp.Answer[:truncateAt]
|
2018-03-07 23:26:41 +00:00
|
|
|
}
|
2018-03-07 09:01:12 +00:00
|
|
|
if hasExtra {
|
|
|
|
index = make(map[string]dns.RR, len(resp.Extra))
|
|
|
|
indexRRs(resp.Extra, index)
|
|
|
|
}
|
|
|
|
truncated := false
|
|
|
|
|
|
|
|
// This enforces the given limit on 64k, the max limit for DNS messages
|
2018-05-16 10:47:35 +00:00
|
|
|
for len(resp.Answer) > 1 && resp.Len() > maxSize {
|
2018-03-07 09:01:12 +00:00
|
|
|
truncated = true
|
2021-07-15 20:49:23 +00:00
|
|
|
// first try to remove the NS section may be it will truncate enough
|
|
|
|
if len(resp.Ns) != 0 {
|
|
|
|
resp.Ns = []dns.RR{}
|
|
|
|
}
|
2018-04-16 22:50:00 +00:00
|
|
|
// More than 100 bytes, find with a binary search
|
|
|
|
if resp.Len()-maxSize > 100 {
|
|
|
|
bestIndex := dnsBinaryTruncate(resp, maxSize, index, hasExtra)
|
|
|
|
resp.Answer = resp.Answer[:bestIndex]
|
|
|
|
} else {
|
|
|
|
resp.Answer = resp.Answer[:len(resp.Answer)-1]
|
|
|
|
}
|
2018-03-07 09:01:12 +00:00
|
|
|
if hasExtra {
|
|
|
|
syncExtra(index, resp)
|
|
|
|
}
|
|
|
|
}
|
2021-07-15 20:49:23 +00:00
|
|
|
|
2018-03-07 09:01:12 +00:00
|
|
|
return truncated
|
|
|
|
}
|
|
|
|
|
2016-08-12 04:46:14 +00:00
|
|
|
// trimUDPResponse makes sure a UDP response is not longer than allowed by RFC
|
2016-08-11 23:24:44 +00:00
|
|
|
// 1035. Enforce an arbitrary limit that can be further ratcheted down by
|
2016-08-12 04:46:14 +00:00
|
|
|
// config, and then make sure the response doesn't exceed 512 bytes. Any extra
|
|
|
|
// records will be trimmed along with answers.
|
2017-09-25 18:40:42 +00:00
|
|
|
func trimUDPResponse(req, resp *dns.Msg, udpAnswerLimit int) (trimmed bool) {
|
2016-02-18 00:54:28 +00:00
|
|
|
numAnswers := len(resp.Answer)
|
2016-08-12 17:29:57 +00:00
|
|
|
hasExtra := len(resp.Extra) > 0
|
2017-06-14 23:22:54 +00:00
|
|
|
maxSize := defaultMaxUDPSize
|
|
|
|
|
|
|
|
// Update to the maximum edns size
|
|
|
|
if edns := req.IsEdns0(); edns != nil {
|
|
|
|
if size := edns.UDPSize(); size > uint16(maxSize) {
|
|
|
|
maxSize = int(size)
|
|
|
|
}
|
|
|
|
}
|
2022-09-30 04:44:45 +00:00
|
|
|
// Overriding maxSize as the maxSize cannot be larger than the
|
|
|
|
// maxUDPDatagram size. Reliability guarantees disappear > than this amount.
|
|
|
|
if maxSize > maxUDPDatagramSize {
|
|
|
|
maxSize = maxUDPDatagramSize
|
|
|
|
}
|
2016-08-12 17:29:57 +00:00
|
|
|
|
|
|
|
// We avoid some function calls and allocations by only handling the
|
|
|
|
// extra data when necessary.
|
|
|
|
var index map[string]dns.RR
|
|
|
|
if hasExtra {
|
2016-08-12 21:51:50 +00:00
|
|
|
index = make(map[string]dns.RR, len(resp.Extra))
|
|
|
|
indexRRs(resp.Extra, index)
|
2016-08-12 17:29:57 +00:00
|
|
|
}
|
2016-02-18 00:54:28 +00:00
|
|
|
|
2016-03-07 18:37:54 +00:00
|
|
|
// This cuts UDP responses to a useful but limited number of responses.
|
2017-09-25 18:40:42 +00:00
|
|
|
maxAnswers := lib.MinInt(maxUDPAnswerLimit, udpAnswerLimit)
|
2018-05-16 09:00:51 +00:00
|
|
|
compress := resp.Compress
|
2017-06-14 23:22:54 +00:00
|
|
|
if maxSize == defaultMaxUDPSize && numAnswers > maxAnswers {
|
2018-05-16 09:00:51 +00:00
|
|
|
// We disable computation of Len ONLY for non-eDNS request (512 bytes)
|
|
|
|
resp.Compress = false
|
2016-03-30 02:27:02 +00:00
|
|
|
resp.Answer = resp.Answer[:maxAnswers]
|
2016-08-12 17:29:57 +00:00
|
|
|
if hasExtra {
|
|
|
|
syncExtra(index, resp)
|
|
|
|
}
|
2016-02-18 00:54:28 +00:00
|
|
|
}
|
|
|
|
|
2017-06-14 23:22:54 +00:00
|
|
|
// This enforces the given limit on the number bytes. The default is 512 as
|
|
|
|
// per the RFC, but EDNS0 allows for the user to specify larger sizes. Note
|
|
|
|
// that we temporarily switch to uncompressed so that we limit to a response
|
|
|
|
// that will not exceed 512 bytes uncompressed, which is more conservative and
|
|
|
|
// will allow our responses to be compliant even if some downstream server
|
|
|
|
// uncompresses them.
|
2018-05-16 09:00:51 +00:00
|
|
|
// Even when size is too big for one single record, try to send it anyway
|
2022-09-30 04:44:45 +00:00
|
|
|
// (useful for 512 bytes messages). 8 is removed from maxSize to ensure that we account
|
|
|
|
// for the udp header (8 bytes).
|
|
|
|
for len(resp.Answer) > 1 && resp.Len() > maxSize-8 {
|
2021-07-15 20:49:23 +00:00
|
|
|
// first try to remove the NS section may be it will truncate enough
|
|
|
|
if len(resp.Ns) != 0 {
|
|
|
|
resp.Ns = []dns.RR{}
|
|
|
|
}
|
2018-04-16 22:50:00 +00:00
|
|
|
// More than 100 bytes, find with a binary search
|
|
|
|
if resp.Len()-maxSize > 100 {
|
|
|
|
bestIndex := dnsBinaryTruncate(resp, maxSize, index, hasExtra)
|
|
|
|
resp.Answer = resp.Answer[:bestIndex]
|
|
|
|
} else {
|
|
|
|
resp.Answer = resp.Answer[:len(resp.Answer)-1]
|
|
|
|
}
|
2016-08-12 17:29:57 +00:00
|
|
|
if hasExtra {
|
|
|
|
syncExtra(index, resp)
|
|
|
|
}
|
2016-02-18 00:54:28 +00:00
|
|
|
}
|
2018-05-16 09:00:51 +00:00
|
|
|
// For 512 non-eDNS responses, while we compute size non-compressed,
|
|
|
|
// we send result compressed
|
2016-08-11 23:24:44 +00:00
|
|
|
resp.Compress = compress
|
2016-03-30 02:52:31 +00:00
|
|
|
return len(resp.Answer) < numAnswers
|
2016-02-18 00:54:28 +00:00
|
|
|
}
|
|
|
|
|
2018-03-07 09:01:12 +00:00
|
|
|
// trimDNSResponse will trim the response for UDP and TCP
|
2020-06-23 17:43:24 +00:00
|
|
|
func (d *DNSServer) trimDNSResponse(cfg *dnsConfig, network string, req, resp *dns.Msg) {
|
|
|
|
var trimmed bool
|
2021-07-15 20:49:23 +00:00
|
|
|
originalSize := resp.Len()
|
|
|
|
originalNumRecords := len(resp.Answer)
|
2018-03-07 09:01:12 +00:00
|
|
|
if network != "tcp" {
|
2019-04-24 18:11:54 +00:00
|
|
|
trimmed = trimUDPResponse(req, resp, cfg.UDPAnswerLimit)
|
2018-03-07 09:01:12 +00:00
|
|
|
} else {
|
2021-07-15 20:49:23 +00:00
|
|
|
trimmed = trimTCPResponse(req, resp)
|
2018-03-07 09:01:12 +00:00
|
|
|
}
|
|
|
|
// Flag that there are more records to return in the UDP response
|
2021-07-15 20:49:23 +00:00
|
|
|
if trimmed {
|
|
|
|
if cfg.EnableTruncate {
|
|
|
|
resp.Truncated = true
|
|
|
|
}
|
|
|
|
d.logger.Debug("DNS response too large, truncated",
|
|
|
|
"protocol", network,
|
|
|
|
"question", req.Question,
|
|
|
|
"records", fmt.Sprintf("%d/%d", len(resp.Answer), originalNumRecords),
|
|
|
|
"size", fmt.Sprintf("%d/%d", resp.Len(), originalSize),
|
|
|
|
)
|
2018-03-07 09:01:12 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-01-12 22:07:42 +00:00
|
|
|
// lookupServiceNodes is used to look up a node in the Consul health catalog within ServiceNodes.
|
|
|
|
// If the config is set to UseCache, it will get the record from the agent cache.
|
2020-04-16 21:00:48 +00:00
|
|
|
func (d *DNSServer) lookupServiceNodes(cfg *dnsConfig, lookup serviceLookup) (structs.IndexedCheckServiceNodes, error) {
|
2021-02-10 09:34:09 +00:00
|
|
|
serviceTags := []string{}
|
|
|
|
if lookup.Tag != "" {
|
|
|
|
serviceTags = []string{lookup.Tag}
|
|
|
|
}
|
2014-01-08 23:13:27 +00:00
|
|
|
args := structs.ServiceSpecificRequest{
|
2022-11-29 18:23:18 +00:00
|
|
|
PeerName: lookup.PeerName,
|
2020-04-16 21:00:48 +00:00
|
|
|
Connect: lookup.Connect,
|
|
|
|
Ingress: lookup.Ingress,
|
|
|
|
Datacenter: lookup.Datacenter,
|
|
|
|
ServiceName: lookup.Service,
|
2021-02-10 09:34:09 +00:00
|
|
|
ServiceTags: serviceTags,
|
2020-04-16 21:00:48 +00:00
|
|
|
TagFilter: lookup.Tag != "",
|
2015-06-12 22:58:53 +00:00
|
|
|
QueryOptions: structs.QueryOptions{
|
2023-09-20 21:50:06 +00:00
|
|
|
Token: d.coalesceDNSToken(),
|
2020-09-30 21:38:13 +00:00
|
|
|
AllowStale: cfg.AllowStale,
|
|
|
|
MaxAge: cfg.CacheMaxAge,
|
|
|
|
UseCache: cfg.UseCache,
|
|
|
|
MaxStaleDuration: cfg.MaxStale,
|
2015-06-12 22:58:53 +00:00
|
|
|
},
|
2020-04-16 21:00:48 +00:00
|
|
|
EnterpriseMeta: lookup.EnterpriseMeta,
|
2019-12-10 02:26:41 +00:00
|
|
|
}
|
|
|
|
|
2020-09-30 21:38:13 +00:00
|
|
|
out, _, err := d.agent.rpcClientHealth.ServiceNodes(context.TODO(), args)
|
|
|
|
if err != nil {
|
|
|
|
return out, err
|
2014-06-08 22:49:24 +00:00
|
|
|
}
|
|
|
|
|
2014-01-15 21:30:04 +00:00
|
|
|
// Filter out any service nodes due to health checks
|
2019-03-04 14:22:01 +00:00
|
|
|
// We copy the slice to avoid modifying the result if it comes from the cache
|
|
|
|
nodes := make(structs.CheckServiceNodes, len(out.Nodes))
|
|
|
|
copy(nodes, out.Nodes)
|
2019-04-24 18:11:54 +00:00
|
|
|
out.Nodes = nodes.Filter(cfg.OnlyPassing)
|
2017-08-21 12:05:39 +00:00
|
|
|
return out, nil
|
|
|
|
}
|
|
|
|
|
2024-01-12 22:07:42 +00:00
|
|
|
// handleServiceQuery is used to handle a service query
|
|
|
|
func (d *DNSServer) handleServiceQuery(cfg *dnsConfig, lookup serviceLookup, req, resp *dns.Msg) error {
|
2020-04-16 21:00:48 +00:00
|
|
|
out, err := d.lookupServiceNodes(cfg, lookup)
|
2017-08-21 12:05:39 +00:00
|
|
|
if err != nil {
|
2021-04-13 20:43:23 +00:00
|
|
|
return fmt.Errorf("rpc request failed: %w", err)
|
2017-08-21 12:05:39 +00:00
|
|
|
}
|
|
|
|
|
2015-07-29 21:16:48 +00:00
|
|
|
// If we have no nodes, return not found!
|
|
|
|
if len(out.Nodes) == 0 {
|
2021-04-13 20:43:23 +00:00
|
|
|
return errNameNotFound
|
2015-07-29 21:16:48 +00:00
|
|
|
}
|
|
|
|
|
2014-02-14 20:26:51 +00:00
|
|
|
// Perform a random shuffle
|
2015-11-07 01:02:05 +00:00
|
|
|
out.Nodes.Shuffle()
|
2014-02-14 20:26:51 +00:00
|
|
|
|
2017-08-21 08:48:01 +00:00
|
|
|
// Determine the TTL
|
2020-04-16 21:00:48 +00:00
|
|
|
ttl, _ := cfg.GetTTLForService(lookup.Service)
|
2017-08-21 08:48:01 +00:00
|
|
|
|
2014-01-03 21:00:03 +00:00
|
|
|
// Add various responses depending on the request
|
|
|
|
qType := req.Question[0].Qtype
|
2014-02-26 01:41:48 +00:00
|
|
|
if qType == dns.TypeSRV {
|
2024-01-12 22:07:42 +00:00
|
|
|
d.addServiceSRVRecordsToMessage(cfg, lookup, out.Nodes, req, resp, ttl, lookup.MaxRecursionLevel)
|
2016-08-12 04:46:14 +00:00
|
|
|
} else {
|
2024-01-12 22:07:42 +00:00
|
|
|
d.addServiceNodeRecordsToMessage(cfg, lookup, out.Nodes, req, resp, ttl, lookup.MaxRecursionLevel)
|
2014-01-03 21:00:03 +00:00
|
|
|
}
|
2015-08-25 20:37:33 +00:00
|
|
|
|
2021-04-13 21:48:29 +00:00
|
|
|
if len(resp.Answer) == 0 {
|
2021-07-16 16:34:27 +00:00
|
|
|
return errNoData
|
2015-11-12 17:28:05 +00:00
|
|
|
}
|
2021-04-13 20:43:23 +00:00
|
|
|
return nil
|
2015-11-12 17:28:05 +00:00
|
|
|
}
|
|
|
|
|
2018-04-13 16:57:25 +00:00
|
|
|
func ednsSubnetForRequest(req *dns.Msg) *dns.EDNS0_SUBNET {
|
2018-04-12 14:40:46 +00:00
|
|
|
// IsEdns0 returns the EDNS RR if present or nil otherwise
|
2018-04-10 18:50:50 +00:00
|
|
|
edns := req.IsEdns0()
|
2018-04-13 16:57:25 +00:00
|
|
|
|
2018-04-10 18:50:50 +00:00
|
|
|
if edns == nil {
|
|
|
|
return nil
|
|
|
|
}
|
2018-04-13 16:57:25 +00:00
|
|
|
|
2018-04-10 18:50:50 +00:00
|
|
|
for _, o := range edns.Option {
|
|
|
|
if subnet, ok := o.(*dns.EDNS0_SUBNET); ok {
|
|
|
|
return subnet
|
|
|
|
}
|
|
|
|
}
|
2018-04-13 16:57:25 +00:00
|
|
|
|
|
|
|
return nil
|
2018-04-10 18:50:50 +00:00
|
|
|
}
|
|
|
|
|
2024-01-12 22:07:42 +00:00
|
|
|
// handlePreparedQuery is used to handle a prepared query.
|
|
|
|
func (d *DNSServer) handlePreparedQuery(cfg *dnsConfig, datacenter, query string, remoteAddr net.Addr, req, resp *dns.Msg, maxRecursionLevel int) error {
|
2015-11-12 17:28:05 +00:00
|
|
|
// Execute the prepared query.
|
|
|
|
args := structs.PreparedQueryExecuteRequest{
|
|
|
|
Datacenter: datacenter,
|
|
|
|
QueryIDOrName: query,
|
|
|
|
QueryOptions: structs.QueryOptions{
|
2023-09-20 21:50:06 +00:00
|
|
|
Token: d.coalesceDNSToken(),
|
2019-04-24 18:11:54 +00:00
|
|
|
AllowStale: cfg.AllowStale,
|
|
|
|
MaxAge: cfg.CacheMaxAge,
|
2015-11-12 17:28:05 +00:00
|
|
|
},
|
2016-06-30 19:11:48 +00:00
|
|
|
|
2016-06-30 23:51:18 +00:00
|
|
|
// Always pass the local agent through. In the DNS interface, there
|
|
|
|
// is no provision for passing additional query parameters, so we
|
|
|
|
// send the local agent's data through to allow distance sorting
|
|
|
|
// relative to ourself on the server side.
|
|
|
|
Agent: structs.QuerySource{
|
2021-08-19 20:09:42 +00:00
|
|
|
Datacenter: d.agent.config.Datacenter,
|
|
|
|
Segment: d.agent.config.SegmentName,
|
|
|
|
Node: d.agent.config.NodeName,
|
|
|
|
NodePartition: d.agent.config.PartitionOrEmpty(),
|
2016-06-30 19:11:48 +00:00
|
|
|
},
|
2015-11-12 17:28:05 +00:00
|
|
|
}
|
2018-04-13 16:57:25 +00:00
|
|
|
|
2018-04-10 18:50:50 +00:00
|
|
|
subnet := ednsSubnetForRequest(req)
|
2018-04-13 16:57:25 +00:00
|
|
|
|
2018-04-10 18:50:50 +00:00
|
|
|
if subnet != nil {
|
|
|
|
args.Source.Ip = subnet.Address.String()
|
2018-04-11 21:02:04 +00:00
|
|
|
} else {
|
|
|
|
switch v := remoteAddr.(type) {
|
2018-04-13 16:57:25 +00:00
|
|
|
case *net.UDPAddr:
|
|
|
|
args.Source.Ip = v.IP.String()
|
|
|
|
case *net.TCPAddr:
|
|
|
|
args.Source.Ip = v.IP.String()
|
|
|
|
case *net.IPAddr:
|
|
|
|
args.Source.Ip = v.IP.String()
|
2018-04-11 21:02:04 +00:00
|
|
|
}
|
2018-04-10 18:50:50 +00:00
|
|
|
}
|
2015-11-12 17:28:05 +00:00
|
|
|
|
2019-04-24 18:11:54 +00:00
|
|
|
out, err := d.lookupPreparedQuery(cfg, args)
|
2020-06-22 13:01:48 +00:00
|
|
|
if err != nil {
|
2021-04-13 20:43:23 +00:00
|
|
|
return err
|
2019-02-25 19:06:01 +00:00
|
|
|
}
|
|
|
|
|
2015-11-13 11:39:07 +00:00
|
|
|
// TODO (slackpad) - What's a safe limit we can set here? It seems like
|
|
|
|
// with dup filtering done at this level we need to get everything to
|
|
|
|
// match the previous behavior. We can optimize by pushing more filtering
|
|
|
|
// into the query execution, but for now I think we need to get the full
|
2015-11-14 01:18:15 +00:00
|
|
|
// response. We could also choose a large arbitrary number that will
|
2016-02-12 07:58:48 +00:00
|
|
|
// likely work in practice, like 10*maxUDPAnswerLimit which should help
|
2015-11-14 01:18:15 +00:00
|
|
|
// reduce bandwidth if there are thousands of nodes available.
|
2015-11-12 17:28:05 +00:00
|
|
|
|
|
|
|
// Determine the TTL. The parse should never fail since we vet it when
|
2015-11-13 18:38:44 +00:00
|
|
|
// the query is created, but we check anyway. If the query didn't
|
|
|
|
// specify a TTL then we will try to use the agent's service-specific
|
|
|
|
// TTL configs.
|
2015-11-12 17:28:05 +00:00
|
|
|
var ttl time.Duration
|
|
|
|
if out.DNS.TTL != "" {
|
|
|
|
var err error
|
|
|
|
ttl, err = time.ParseDuration(out.DNS.TTL)
|
|
|
|
if err != nil {
|
2020-01-28 23:50:41 +00:00
|
|
|
d.logger.Warn("Failed to parse TTL for prepared query , ignoring",
|
|
|
|
"ttl", out.DNS.TTL,
|
|
|
|
"prepared_query", query,
|
|
|
|
)
|
2015-11-12 17:28:05 +00:00
|
|
|
}
|
2019-04-24 18:11:54 +00:00
|
|
|
} else {
|
|
|
|
ttl, _ = cfg.GetTTLForService(out.Service)
|
2015-11-12 17:28:05 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// If we have no nodes, return not found!
|
|
|
|
if len(out.Nodes) == 0 {
|
2021-04-13 20:43:23 +00:00
|
|
|
return errNameNotFound
|
2015-11-12 17:28:05 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Add various responses depending on the request.
|
|
|
|
qType := req.Question[0].Qtype
|
2022-11-29 18:23:18 +00:00
|
|
|
|
|
|
|
// This serviceLookup only needs the datacenter field populated,
|
|
|
|
// because peering is not supported with prepared queries.
|
|
|
|
lookup := serviceLookup{Datacenter: out.Datacenter}
|
2016-08-13 00:26:23 +00:00
|
|
|
if qType == dns.TypeSRV {
|
2024-01-12 22:07:42 +00:00
|
|
|
d.addServiceSRVRecordsToMessage(cfg, lookup, out.Nodes, req, resp, ttl, maxRecursionLevel)
|
2016-08-12 04:46:14 +00:00
|
|
|
} else {
|
2024-01-12 22:07:42 +00:00
|
|
|
d.addServiceNodeRecordsToMessage(cfg, lookup, out.Nodes, req, resp, ttl, maxRecursionLevel)
|
2015-11-12 17:28:05 +00:00
|
|
|
}
|
|
|
|
|
2021-04-13 21:48:29 +00:00
|
|
|
if len(resp.Answer) == 0 {
|
2021-07-16 16:34:27 +00:00
|
|
|
return errNoData
|
2015-08-25 20:37:33 +00:00
|
|
|
}
|
2021-04-13 20:43:23 +00:00
|
|
|
return nil
|
2014-01-03 21:00:03 +00:00
|
|
|
}
|
|
|
|
|
2024-01-12 22:07:42 +00:00
|
|
|
// lookupPreparedQuery is used to execute a PreparedQuery against the Consul catalog.
|
|
|
|
// If the config is set to UseCache, it will use agent cache.
|
2019-04-24 18:11:54 +00:00
|
|
|
func (d *DNSServer) lookupPreparedQuery(cfg *dnsConfig, args structs.PreparedQueryExecuteRequest) (*structs.PreparedQueryExecuteResponse, error) {
|
2019-02-25 19:06:01 +00:00
|
|
|
var out structs.PreparedQueryExecuteResponse
|
|
|
|
|
|
|
|
RPC:
|
2019-04-24 18:11:54 +00:00
|
|
|
if cfg.UseCache {
|
2020-06-15 15:01:25 +00:00
|
|
|
raw, m, err := d.agent.cache.Get(context.TODO(), cachetype.PreparedQueryName, &args)
|
2019-02-25 19:06:01 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
reply, ok := raw.(*structs.PreparedQueryExecuteResponse)
|
|
|
|
if !ok {
|
|
|
|
// This should never happen, but we want to protect against panics
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2020-01-28 23:50:41 +00:00
|
|
|
d.logger.Trace("cache results for prepared query",
|
|
|
|
"cache_hit", m.Hit,
|
|
|
|
"prepared_query", args.QueryIDOrName,
|
|
|
|
)
|
2019-02-25 19:06:01 +00:00
|
|
|
|
|
|
|
out = *reply
|
|
|
|
} else {
|
2022-12-14 15:24:22 +00:00
|
|
|
if err := d.agent.RPC(context.Background(), "PreparedQuery.Execute", &args, &out); err != nil {
|
2019-02-25 19:06:01 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Verify that request is not too stale, redo the request.
|
|
|
|
if args.AllowStale {
|
2019-04-24 18:11:54 +00:00
|
|
|
if out.LastContact > cfg.MaxStale {
|
2019-02-25 19:06:01 +00:00
|
|
|
args.AllowStale = false
|
2020-01-28 23:50:41 +00:00
|
|
|
d.logger.Warn("Query results too stale, re-requesting")
|
2019-02-25 19:06:01 +00:00
|
|
|
goto RPC
|
|
|
|
} else if out.LastContact > staleCounterThreshold {
|
|
|
|
metrics.IncrCounter([]string{"dns", "stale_queries"}, 1)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return &out, nil
|
|
|
|
}
|
|
|
|
|
2024-01-12 22:07:42 +00:00
|
|
|
// addServiceNodeRecordsToMessage is used to add the node records for a service lookup
|
|
|
|
func (d *DNSServer) addServiceNodeRecordsToMessage(cfg *dnsConfig, lookup serviceLookup, nodes structs.CheckServiceNodes, req, resp *dns.Msg, ttl time.Duration, maxRecursionLevel int) {
|
2014-01-06 22:56:41 +00:00
|
|
|
handled := make(map[string]struct{})
|
2018-07-03 15:04:19 +00:00
|
|
|
var answerCNAME []dns.RR = nil
|
2016-02-12 07:58:48 +00:00
|
|
|
|
2018-03-06 01:07:42 +00:00
|
|
|
count := 0
|
2014-01-03 21:00:03 +00:00
|
|
|
for _, node := range nodes {
|
2020-01-17 14:54:17 +00:00
|
|
|
// Add the node record
|
|
|
|
had_answer := false
|
2024-01-12 22:07:42 +00:00
|
|
|
records, _ := d.makeNodeServiceRecords(lookup, node, req, ttl, cfg, maxRecursionLevel)
|
2020-01-17 14:54:17 +00:00
|
|
|
if len(records) == 0 {
|
|
|
|
continue
|
|
|
|
}
|
2017-05-29 14:08:54 +00:00
|
|
|
|
2015-12-22 11:31:40 +00:00
|
|
|
// Avoid duplicate entries, possible if a node has
|
|
|
|
// the same service on multiple ports, etc.
|
2020-01-17 14:54:17 +00:00
|
|
|
if _, ok := handled[records[0].String()]; ok {
|
2014-01-06 22:56:41 +00:00
|
|
|
continue
|
|
|
|
}
|
2020-01-17 14:54:17 +00:00
|
|
|
handled[records[0].String()] = struct{}{}
|
2014-01-06 22:56:41 +00:00
|
|
|
|
2021-05-25 02:22:37 +00:00
|
|
|
switch records[0].(type) {
|
|
|
|
case *dns.CNAME:
|
|
|
|
// keep track of the first CNAME + associated RRs but don't add to the resp.Answer yet
|
|
|
|
// this will only be added if no non-CNAME RRs are found
|
|
|
|
if len(answerCNAME) == 0 {
|
|
|
|
answerCNAME = records
|
2018-07-10 14:26:45 +00:00
|
|
|
}
|
2021-05-25 02:22:37 +00:00
|
|
|
default:
|
|
|
|
resp.Answer = append(resp.Answer, records...)
|
|
|
|
had_answer = true
|
2018-07-09 15:41:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if had_answer {
|
2018-03-06 01:07:42 +00:00
|
|
|
count++
|
2019-04-24 18:11:54 +00:00
|
|
|
if count == cfg.ARecordLimit {
|
2018-03-06 01:07:42 +00:00
|
|
|
// We stop only if greater than 0 or we reached the limit
|
|
|
|
return
|
|
|
|
}
|
2014-01-03 21:00:03 +00:00
|
|
|
}
|
|
|
|
}
|
2018-07-03 15:04:19 +00:00
|
|
|
|
|
|
|
if len(resp.Answer) == 0 && len(answerCNAME) > 0 {
|
|
|
|
resp.Answer = answerCNAME
|
|
|
|
}
|
2014-01-03 21:00:03 +00:00
|
|
|
}
|
|
|
|
|
2018-09-07 14:30:47 +00:00
|
|
|
func findWeight(node structs.CheckServiceNode) int {
|
|
|
|
// By default, when only_passing is false, warning and passing nodes are returned
|
|
|
|
// Those values will be used if using a client with support while server has no
|
|
|
|
// support for weights
|
|
|
|
weightPassing := 1
|
|
|
|
weightWarning := 1
|
|
|
|
if node.Service.Weights != nil {
|
|
|
|
weightPassing = node.Service.Weights.Passing
|
|
|
|
weightWarning = node.Service.Weights.Warning
|
|
|
|
}
|
|
|
|
serviceChecks := make(api.HealthChecks, 0)
|
|
|
|
for _, c := range node.Checks {
|
|
|
|
if c.ServiceName == node.Service.Service || c.ServiceName == "" {
|
|
|
|
healthCheck := &api.HealthCheck{
|
|
|
|
Node: c.Node,
|
|
|
|
CheckID: string(c.CheckID),
|
|
|
|
Name: c.Name,
|
|
|
|
Status: c.Status,
|
|
|
|
Notes: c.Notes,
|
|
|
|
Output: c.Output,
|
|
|
|
ServiceID: c.ServiceID,
|
|
|
|
ServiceName: c.ServiceName,
|
|
|
|
ServiceTags: c.ServiceTags,
|
|
|
|
}
|
|
|
|
serviceChecks = append(serviceChecks, healthCheck)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
status := serviceChecks.AggregatedStatus()
|
|
|
|
switch status {
|
|
|
|
case api.HealthWarning:
|
|
|
|
return weightWarning
|
|
|
|
case api.HealthPassing:
|
|
|
|
return weightPassing
|
|
|
|
case api.HealthMaint:
|
|
|
|
// Not used in theory
|
|
|
|
return 0
|
|
|
|
case api.HealthCritical:
|
|
|
|
// Should not happen since already filtered
|
|
|
|
return 0
|
|
|
|
default:
|
|
|
|
// When non-standard status, return 1
|
|
|
|
return 1
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-11-29 18:23:18 +00:00
|
|
|
func (d *DNSServer) encodeIPAsFqdn(questionName string, lookup serviceLookup, ip net.IP) string {
|
2019-08-05 15:19:18 +00:00
|
|
|
ipv4 := ip.To4()
|
2021-10-17 00:15:57 +00:00
|
|
|
respDomain := d.getResponseDomain(questionName)
|
2022-11-29 18:23:18 +00:00
|
|
|
ipStr := hex.EncodeToString(ip)
|
2019-08-05 15:19:18 +00:00
|
|
|
if ipv4 != nil {
|
2022-11-29 18:23:18 +00:00
|
|
|
ipStr = ipStr[len(ipStr)-(net.IPv4len*2):]
|
|
|
|
}
|
|
|
|
if lookup.PeerName != "" {
|
|
|
|
// Exclude the datacenter from the FQDN on the addr for peers.
|
|
|
|
// This technically makes no difference, since the addr endpoint ignores the DC
|
|
|
|
// component of the request, but do it anyway for a less confusing experience.
|
|
|
|
return fmt.Sprintf("%s.addr.%s", ipStr, respDomain)
|
2019-08-05 15:19:18 +00:00
|
|
|
}
|
2022-11-29 18:23:18 +00:00
|
|
|
return fmt.Sprintf("%s.addr.%s.%s", ipStr, lookup.Datacenter, respDomain)
|
2019-08-05 15:19:18 +00:00
|
|
|
}
|
|
|
|
|
2024-01-12 22:07:42 +00:00
|
|
|
// Craft dns records for a an A record for an IP address
|
2019-08-05 15:19:18 +00:00
|
|
|
func makeARecord(qType uint16, ip net.IP, ttl time.Duration) dns.RR {
|
|
|
|
|
|
|
|
var ipRecord dns.RR
|
|
|
|
ipv4 := ip.To4()
|
|
|
|
if ipv4 != nil {
|
|
|
|
if qType == dns.TypeSRV || qType == dns.TypeA || qType == dns.TypeANY || qType == dns.TypeNS || qType == dns.TypeTXT {
|
|
|
|
ipRecord = &dns.A{
|
|
|
|
Hdr: dns.RR_Header{
|
|
|
|
Rrtype: dns.TypeA,
|
|
|
|
Class: dns.ClassINET,
|
|
|
|
Ttl: uint32(ttl / time.Second),
|
|
|
|
},
|
|
|
|
A: ipv4,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else if qType == dns.TypeSRV || qType == dns.TypeAAAA || qType == dns.TypeANY || qType == dns.TypeNS || qType == dns.TypeTXT {
|
|
|
|
ipRecord = &dns.AAAA{
|
|
|
|
Hdr: dns.RR_Header{
|
|
|
|
Rrtype: dns.TypeAAAA,
|
|
|
|
Class: dns.ClassINET,
|
|
|
|
Ttl: uint32(ttl / time.Second),
|
|
|
|
},
|
|
|
|
AAAA: ip,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return ipRecord
|
|
|
|
}
|
|
|
|
|
|
|
|
// Craft dns records for a node
|
|
|
|
// In case of an SRV query the answer will be a IN SRV and additional data will store an IN A to the node IP
|
|
|
|
// Otherwise it will return a IN A record
|
2020-03-18 23:28:36 +00:00
|
|
|
func (d *DNSServer) makeRecordFromNode(node *structs.Node, qType uint16, qName string, ttl time.Duration, maxRecursionLevel int) []dns.RR {
|
2024-02-12 19:27:25 +00:00
|
|
|
addrTranslate := dnsutil.TranslateAddressAcceptDomain
|
2020-01-17 14:54:17 +00:00
|
|
|
if qType == dns.TypeA {
|
2024-02-12 19:27:25 +00:00
|
|
|
addrTranslate |= dnsutil.TranslateAddressAcceptIPv4
|
2020-01-17 14:54:17 +00:00
|
|
|
} else if qType == dns.TypeAAAA {
|
2024-02-12 19:27:25 +00:00
|
|
|
addrTranslate |= dnsutil.TranslateAddressAcceptIPv6
|
2020-01-17 14:54:17 +00:00
|
|
|
} else {
|
2024-02-12 19:27:25 +00:00
|
|
|
addrTranslate |= dnsutil.TranslateAddressAcceptAny
|
2020-01-17 14:54:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
addr := d.agent.TranslateAddress(node.Datacenter, node.Address, node.TaggedAddresses, addrTranslate)
|
2019-08-05 15:19:18 +00:00
|
|
|
ip := net.ParseIP(addr)
|
|
|
|
|
|
|
|
var res []dns.RR
|
|
|
|
|
|
|
|
if ip == nil {
|
|
|
|
res = append(res, &dns.CNAME{
|
|
|
|
Hdr: dns.RR_Header{
|
|
|
|
Name: qName,
|
|
|
|
Rrtype: dns.TypeCNAME,
|
|
|
|
Class: dns.ClassINET,
|
|
|
|
Ttl: uint32(ttl / time.Second),
|
|
|
|
},
|
|
|
|
Target: dns.Fqdn(node.Address),
|
|
|
|
})
|
|
|
|
|
|
|
|
res = append(res,
|
|
|
|
d.resolveCNAME(d.config.Load().(*dnsConfig), dns.Fqdn(node.Address), maxRecursionLevel)...,
|
|
|
|
)
|
|
|
|
|
|
|
|
return res
|
|
|
|
}
|
|
|
|
|
|
|
|
ipRecord := makeARecord(qType, ip, ttl)
|
|
|
|
if ipRecord == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
ipRecord.Header().Name = qName
|
|
|
|
return []dns.RR{ipRecord}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Craft dns records for a service
|
|
|
|
// In case of an SRV query the answer will be a IN SRV and additional data will store an IN A to the node IP
|
|
|
|
// Otherwise it will return a IN A record
|
2022-11-29 18:23:18 +00:00
|
|
|
func (d *DNSServer) makeRecordFromServiceNode(lookup serviceLookup, serviceNode structs.CheckServiceNode, addr net.IP, req *dns.Msg, ttl time.Duration) ([]dns.RR, []dns.RR) {
|
2019-08-05 15:19:18 +00:00
|
|
|
q := req.Question[0]
|
|
|
|
ipRecord := makeARecord(q.Qtype, addr, ttl)
|
|
|
|
if ipRecord == nil {
|
|
|
|
return nil, nil
|
|
|
|
}
|
2022-11-29 18:23:18 +00:00
|
|
|
|
2019-08-05 15:19:18 +00:00
|
|
|
if q.Qtype == dns.TypeSRV {
|
2022-11-29 18:23:18 +00:00
|
|
|
respDomain := d.getResponseDomain(q.Name)
|
|
|
|
nodeFQDN := nodeCanonicalDNSName(lookup, serviceNode.Node.Node, respDomain)
|
2019-08-05 15:19:18 +00:00
|
|
|
answers := []dns.RR{
|
|
|
|
&dns.SRV{
|
|
|
|
Hdr: dns.RR_Header{
|
|
|
|
Name: q.Name,
|
|
|
|
Rrtype: dns.TypeSRV,
|
|
|
|
Class: dns.ClassINET,
|
|
|
|
Ttl: uint32(ttl / time.Second),
|
|
|
|
},
|
|
|
|
Priority: 1,
|
|
|
|
Weight: uint16(findWeight(serviceNode)),
|
2022-11-29 18:23:18 +00:00
|
|
|
Port: uint16(d.agent.TranslateServicePort(lookup.Datacenter, serviceNode.Service.Port, serviceNode.Service.TaggedAddresses)),
|
2019-08-05 15:19:18 +00:00
|
|
|
Target: nodeFQDN,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
ipRecord.Header().Name = nodeFQDN
|
|
|
|
return answers, []dns.RR{ipRecord}
|
|
|
|
}
|
|
|
|
|
|
|
|
ipRecord.Header().Name = q.Name
|
|
|
|
return []dns.RR{ipRecord}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Craft dns records for an IP
|
|
|
|
// In case of an SRV query the answer will be a IN SRV and additional data will store an IN A to the IP
|
|
|
|
// Otherwise it will return a IN A record
|
2022-11-29 18:23:18 +00:00
|
|
|
func (d *DNSServer) makeRecordFromIP(lookup serviceLookup, addr net.IP, serviceNode structs.CheckServiceNode, req *dns.Msg, ttl time.Duration) ([]dns.RR, []dns.RR) {
|
2019-08-05 15:19:18 +00:00
|
|
|
q := req.Question[0]
|
|
|
|
ipRecord := makeARecord(q.Qtype, addr, ttl)
|
|
|
|
if ipRecord == nil {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
if q.Qtype == dns.TypeSRV {
|
2022-11-29 18:23:18 +00:00
|
|
|
ipFQDN := d.encodeIPAsFqdn(q.Name, lookup, addr)
|
2019-08-05 15:19:18 +00:00
|
|
|
answers := []dns.RR{
|
|
|
|
&dns.SRV{
|
|
|
|
Hdr: dns.RR_Header{
|
|
|
|
Name: q.Name,
|
|
|
|
Rrtype: dns.TypeSRV,
|
|
|
|
Class: dns.ClassINET,
|
|
|
|
Ttl: uint32(ttl / time.Second),
|
|
|
|
},
|
|
|
|
Priority: 1,
|
|
|
|
Weight: uint16(findWeight(serviceNode)),
|
2022-11-29 18:23:18 +00:00
|
|
|
Port: uint16(d.agent.TranslateServicePort(lookup.Datacenter, serviceNode.Service.Port, serviceNode.Service.TaggedAddresses)),
|
2019-08-05 15:19:18 +00:00
|
|
|
Target: ipFQDN,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
ipRecord.Header().Name = ipFQDN
|
|
|
|
return answers, []dns.RR{ipRecord}
|
|
|
|
}
|
|
|
|
|
|
|
|
ipRecord.Header().Name = q.Name
|
|
|
|
return []dns.RR{ipRecord}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Craft dns records for an FQDN
|
|
|
|
// In case of an SRV query the answer will be a IN SRV and additional data will store an IN A to the IP
|
|
|
|
// Otherwise it will return a CNAME and a IN A record
|
2022-11-29 18:23:18 +00:00
|
|
|
func (d *DNSServer) makeRecordFromFQDN(lookup serviceLookup, fqdn string, serviceNode structs.CheckServiceNode, req *dns.Msg, ttl time.Duration, cfg *dnsConfig, maxRecursionLevel int) ([]dns.RR, []dns.RR) {
|
2019-08-05 15:19:18 +00:00
|
|
|
edns := req.IsEdns0() != nil
|
|
|
|
q := req.Question[0]
|
|
|
|
|
|
|
|
more := d.resolveCNAME(cfg, dns.Fqdn(fqdn), maxRecursionLevel)
|
|
|
|
var additional []dns.RR
|
|
|
|
extra := 0
|
|
|
|
MORE_REC:
|
|
|
|
for _, rr := range more {
|
|
|
|
switch rr.Header().Rrtype {
|
|
|
|
case dns.TypeCNAME, dns.TypeA, dns.TypeAAAA:
|
|
|
|
// set the TTL manually
|
|
|
|
rr.Header().Ttl = uint32(ttl / time.Second)
|
|
|
|
additional = append(additional, rr)
|
|
|
|
|
|
|
|
extra++
|
|
|
|
if extra == maxRecurseRecords && !edns {
|
|
|
|
break MORE_REC
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if q.Qtype == dns.TypeSRV {
|
|
|
|
answers := []dns.RR{
|
|
|
|
&dns.SRV{
|
|
|
|
Hdr: dns.RR_Header{
|
|
|
|
Name: q.Name,
|
|
|
|
Rrtype: dns.TypeSRV,
|
|
|
|
Class: dns.ClassINET,
|
|
|
|
Ttl: uint32(ttl / time.Second),
|
|
|
|
},
|
|
|
|
Priority: 1,
|
|
|
|
Weight: uint16(findWeight(serviceNode)),
|
2022-11-29 18:23:18 +00:00
|
|
|
Port: uint16(d.agent.TranslateServicePort(lookup.Datacenter, serviceNode.Service.Port, serviceNode.Service.TaggedAddresses)),
|
2019-08-05 15:19:18 +00:00
|
|
|
Target: dns.Fqdn(fqdn),
|
|
|
|
},
|
|
|
|
}
|
|
|
|
return answers, additional
|
|
|
|
}
|
|
|
|
|
|
|
|
answers := []dns.RR{
|
|
|
|
&dns.CNAME{
|
|
|
|
Hdr: dns.RR_Header{
|
|
|
|
Name: q.Name,
|
|
|
|
Rrtype: dns.TypeCNAME,
|
|
|
|
Class: dns.ClassINET,
|
|
|
|
Ttl: uint32(ttl / time.Second),
|
|
|
|
},
|
|
|
|
Target: dns.Fqdn(fqdn),
|
|
|
|
}}
|
|
|
|
answers = append(answers, additional...)
|
|
|
|
|
|
|
|
return answers, nil
|
|
|
|
}
|
|
|
|
|
2024-01-12 22:07:42 +00:00
|
|
|
// Craft dns records from a CheckServiceNode struct
|
|
|
|
func (d *DNSServer) makeNodeServiceRecords(lookup serviceLookup, node structs.CheckServiceNode, req *dns.Msg, ttl time.Duration, cfg *dnsConfig, maxRecursionLevel int) ([]dns.RR, []dns.RR) {
|
2024-02-12 19:27:25 +00:00
|
|
|
addrTranslate := dnsutil.TranslateAddressAcceptDomain
|
2020-01-17 14:54:17 +00:00
|
|
|
if req.Question[0].Qtype == dns.TypeA {
|
2024-02-12 19:27:25 +00:00
|
|
|
addrTranslate |= dnsutil.TranslateAddressAcceptIPv4
|
2020-01-17 14:54:17 +00:00
|
|
|
} else if req.Question[0].Qtype == dns.TypeAAAA {
|
2024-02-12 19:27:25 +00:00
|
|
|
addrTranslate |= dnsutil.TranslateAddressAcceptIPv6
|
2020-01-17 14:54:17 +00:00
|
|
|
} else {
|
2024-02-12 19:27:25 +00:00
|
|
|
addrTranslate |= dnsutil.TranslateAddressAcceptAny
|
2020-01-17 14:54:17 +00:00
|
|
|
}
|
|
|
|
|
2022-11-29 18:23:18 +00:00
|
|
|
// The datacenter should be empty during translation if it is a peering lookup.
|
|
|
|
// This should be fine because we should always prefer the WAN address.
|
|
|
|
serviceAddr := d.agent.TranslateServiceAddress(lookup.Datacenter, node.Service.Address, node.Service.TaggedAddresses, addrTranslate)
|
2020-01-17 14:54:17 +00:00
|
|
|
nodeAddr := d.agent.TranslateAddress(node.Node.Datacenter, node.Node.Address, node.Node.TaggedAddresses, addrTranslate)
|
|
|
|
if serviceAddr == "" && nodeAddr == "" {
|
|
|
|
return nil, nil
|
|
|
|
}
|
2019-08-05 15:19:18 +00:00
|
|
|
|
|
|
|
nodeIPAddr := net.ParseIP(nodeAddr)
|
|
|
|
serviceIPAddr := net.ParseIP(serviceAddr)
|
|
|
|
|
|
|
|
// There is no service address and the node address is an IP
|
|
|
|
if serviceAddr == "" && nodeIPAddr != nil {
|
|
|
|
if node.Node.Address != nodeAddr {
|
|
|
|
// Do not CNAME node address in case of WAN address
|
2022-11-29 18:23:18 +00:00
|
|
|
return d.makeRecordFromIP(lookup, nodeIPAddr, node, req, ttl)
|
2019-08-05 15:19:18 +00:00
|
|
|
}
|
|
|
|
|
2022-11-29 18:23:18 +00:00
|
|
|
return d.makeRecordFromServiceNode(lookup, node, nodeIPAddr, req, ttl)
|
2019-08-05 15:19:18 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// There is no service address and the node address is a FQDN (external service)
|
|
|
|
if serviceAddr == "" {
|
2022-11-29 18:23:18 +00:00
|
|
|
return d.makeRecordFromFQDN(lookup, nodeAddr, node, req, ttl, cfg, maxRecursionLevel)
|
2019-08-05 15:19:18 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// The service address is an IP
|
|
|
|
if serviceIPAddr != nil {
|
2022-11-29 18:23:18 +00:00
|
|
|
return d.makeRecordFromIP(lookup, serviceIPAddr, node, req, ttl)
|
2019-08-05 15:19:18 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// If the service address is a CNAME for the service we are looking
|
|
|
|
// for then use the node address.
|
|
|
|
if dns.Fqdn(serviceAddr) == req.Question[0].Name && nodeIPAddr != nil {
|
2022-11-29 18:23:18 +00:00
|
|
|
return d.makeRecordFromServiceNode(lookup, node, nodeIPAddr, req, ttl)
|
2019-08-05 15:19:18 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// The service address is a FQDN (external service)
|
2022-11-29 18:23:18 +00:00
|
|
|
return d.makeRecordFromFQDN(lookup, serviceAddr, node, req, ttl, cfg, maxRecursionLevel)
|
2019-08-05 15:19:18 +00:00
|
|
|
}
|
|
|
|
|
2024-01-12 22:07:42 +00:00
|
|
|
// Craft dns records for TXT from a node's metadata
|
|
|
|
func (d *DNSServer) makeTXTRecordFromNodeMeta(qName string, node *structs.Node, ttl time.Duration) []dns.RR {
|
2020-04-05 09:12:41 +00:00
|
|
|
extra := make([]dns.RR, 0, len(node.Meta))
|
2019-08-05 15:19:18 +00:00
|
|
|
for key, value := range node.Meta {
|
|
|
|
txt := value
|
|
|
|
if !strings.HasPrefix(strings.ToLower(key), "rfc1035-") {
|
|
|
|
txt = encodeKVasRFC1464(key, value)
|
|
|
|
}
|
|
|
|
|
|
|
|
extra = append(extra, &dns.TXT{
|
|
|
|
Hdr: dns.RR_Header{
|
|
|
|
Name: qName,
|
|
|
|
Rrtype: dns.TypeTXT,
|
|
|
|
Class: dns.ClassINET,
|
|
|
|
Ttl: uint32(ttl / time.Second),
|
|
|
|
},
|
|
|
|
Txt: []string{txt},
|
|
|
|
})
|
|
|
|
}
|
|
|
|
return extra
|
|
|
|
}
|
|
|
|
|
2024-01-12 22:07:42 +00:00
|
|
|
// addServiceSRVRecordsToMessage is used to add the SRV records for a service lookup
|
|
|
|
func (d *DNSServer) addServiceSRVRecordsToMessage(cfg *dnsConfig, lookup serviceLookup, nodes structs.CheckServiceNodes, req, resp *dns.Msg, ttl time.Duration, maxRecursionLevel int) {
|
2014-01-06 22:56:41 +00:00
|
|
|
handled := make(map[string]struct{})
|
2017-06-14 23:22:54 +00:00
|
|
|
|
2014-01-03 21:00:03 +00:00
|
|
|
for _, node := range nodes {
|
2014-01-06 22:56:41 +00:00
|
|
|
// Avoid duplicate entries, possible if a node has
|
|
|
|
// the same service the same port, etc.
|
2022-11-29 18:23:18 +00:00
|
|
|
|
|
|
|
// The datacenter should be empty during translation if it is a peering lookup.
|
|
|
|
// This should be fine because we should always prefer the WAN address.
|
2024-02-12 19:27:25 +00:00
|
|
|
serviceAddress := d.agent.TranslateServiceAddress(lookup.Datacenter, node.Service.Address, node.Service.TaggedAddresses, dnsutil.TranslateAddressAcceptAny)
|
2022-11-29 18:23:18 +00:00
|
|
|
servicePort := d.agent.TranslateServicePort(lookup.Datacenter, node.Service.Port, node.Service.TaggedAddresses)
|
2019-08-05 15:19:18 +00:00
|
|
|
tuple := fmt.Sprintf("%s:%s:%d", node.Node.Node, serviceAddress, servicePort)
|
2014-01-06 22:56:41 +00:00
|
|
|
if _, ok := handled[tuple]; ok {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
handled[tuple] = struct{}{}
|
|
|
|
|
2024-01-12 22:07:42 +00:00
|
|
|
answers, extra := d.makeNodeServiceRecords(lookup, node, req, ttl, cfg, maxRecursionLevel)
|
2015-01-05 22:48:30 +00:00
|
|
|
|
2021-10-16 20:56:18 +00:00
|
|
|
respDomain := d.getResponseDomain(req.Question[0].Name)
|
2019-08-05 15:19:18 +00:00
|
|
|
resp.Answer = append(resp.Answer, answers...)
|
|
|
|
resp.Extra = append(resp.Extra, extra...)
|
2018-07-09 15:41:58 +00:00
|
|
|
|
2019-08-05 15:19:18 +00:00
|
|
|
if cfg.NodeMetaTXT {
|
2024-01-12 22:07:42 +00:00
|
|
|
resp.Extra = append(resp.Extra, d.makeTXTRecordFromNodeMeta(nodeCanonicalDNSName(lookup, node.Node.Node, respDomain), node.Node, ttl)...)
|
2014-01-03 21:00:03 +00:00
|
|
|
}
|
|
|
|
}
|
2014-01-03 01:58:58 +00:00
|
|
|
}
|
2014-01-03 23:43:35 +00:00
|
|
|
|
|
|
|
// handleRecurse is used to handle recursive DNS queries
|
|
|
|
func (d *DNSServer) handleRecurse(resp dns.ResponseWriter, req *dns.Msg) {
|
2019-04-24 18:11:54 +00:00
|
|
|
cfg := d.config.Load().(*dnsConfig)
|
|
|
|
|
2014-01-03 23:43:35 +00:00
|
|
|
q := req.Question[0]
|
|
|
|
network := "udp"
|
|
|
|
defer func(s time.Time) {
|
2020-01-28 23:50:41 +00:00
|
|
|
d.logger.Debug("request served from client",
|
|
|
|
"question", q,
|
|
|
|
"network", network,
|
|
|
|
"latency", time.Since(s).String(),
|
|
|
|
"client", resp.RemoteAddr().String(),
|
|
|
|
"client_network", resp.RemoteAddr().Network(),
|
|
|
|
)
|
2014-01-03 23:43:35 +00:00
|
|
|
}(time.Now())
|
|
|
|
|
|
|
|
// Switch to TCP if the client is
|
|
|
|
if _, ok := resp.RemoteAddr().(*net.TCPAddr); ok {
|
|
|
|
network = "tcp"
|
|
|
|
}
|
|
|
|
|
|
|
|
// Recursively resolve
|
2019-04-24 18:11:54 +00:00
|
|
|
c := &dns.Client{Net: network, Timeout: cfg.RecursorTimeout}
|
2014-11-03 19:40:55 +00:00
|
|
|
var r *dns.Msg
|
|
|
|
var rtt time.Duration
|
|
|
|
var err error
|
2021-07-19 22:22:51 +00:00
|
|
|
for _, idx := range cfg.RecursorStrategy.Indexes(len(cfg.Recursors)) {
|
|
|
|
recursor := cfg.Recursors[idx]
|
2014-11-03 19:40:55 +00:00
|
|
|
r, rtt, err = c.Exchange(req, recursor)
|
2018-08-02 14:12:52 +00:00
|
|
|
// Check if the response is valid and has the desired Response code
|
|
|
|
if r != nil && (r.Rcode != dns.RcodeSuccess && r.Rcode != dns.RcodeNameError) {
|
2020-01-28 23:50:41 +00:00
|
|
|
d.logger.Debug("recurse failed for question",
|
|
|
|
"question", q,
|
|
|
|
"rtt", rtt,
|
|
|
|
"recursor", recursor,
|
|
|
|
"rcode", dns.RcodeToString[r.Rcode],
|
|
|
|
)
|
2018-08-02 14:12:52 +00:00
|
|
|
// If we still have recursors to forward the query to,
|
|
|
|
// we move forward onto the next one else the loop ends
|
|
|
|
continue
|
2019-12-16 21:31:27 +00:00
|
|
|
} else if err == nil || (r != nil && r.Truncated) {
|
2016-08-11 23:24:44 +00:00
|
|
|
// Compress the response; we don't know if the incoming
|
|
|
|
// response was compressed or not, so by not compressing
|
|
|
|
// we might generate an invalid packet on the way out.
|
2019-04-24 18:11:54 +00:00
|
|
|
r.Compress = !cfg.DisableCompression
|
2016-08-11 23:24:44 +00:00
|
|
|
|
2014-11-03 19:40:55 +00:00
|
|
|
// Forward the response
|
2020-01-28 23:50:41 +00:00
|
|
|
d.logger.Debug("recurse succeeded for question",
|
|
|
|
"question", q,
|
|
|
|
"rtt", rtt,
|
|
|
|
"recursor", recursor,
|
|
|
|
)
|
2014-11-03 19:40:55 +00:00
|
|
|
if err := resp.WriteMsg(r); err != nil {
|
2020-01-28 23:50:41 +00:00
|
|
|
d.logger.Warn("failed to respond", "error", err)
|
2014-11-03 19:40:55 +00:00
|
|
|
}
|
2014-10-31 19:19:41 +00:00
|
|
|
return
|
|
|
|
}
|
2020-01-28 23:50:41 +00:00
|
|
|
d.logger.Error("recurse failed", "error", err)
|
2014-01-03 23:43:35 +00:00
|
|
|
}
|
2014-11-03 19:40:55 +00:00
|
|
|
|
|
|
|
// If all resolvers fail, return a SERVFAIL message
|
2020-01-28 23:50:41 +00:00
|
|
|
d.logger.Error("all resolvers failed for question from client",
|
|
|
|
"question", q,
|
|
|
|
"client", resp.RemoteAddr().String(),
|
|
|
|
"client_network", resp.RemoteAddr().Network(),
|
|
|
|
)
|
2014-11-03 19:40:55 +00:00
|
|
|
m := &dns.Msg{}
|
|
|
|
m.SetReply(req)
|
2019-04-24 18:11:54 +00:00
|
|
|
m.Compress = !cfg.DisableCompression
|
2014-11-03 19:40:55 +00:00
|
|
|
m.RecursionAvailable = true
|
|
|
|
m.SetRcode(req, dns.RcodeServerFailure)
|
2017-06-14 23:22:54 +00:00
|
|
|
if edns := req.IsEdns0(); edns != nil {
|
2018-09-11 13:37:46 +00:00
|
|
|
setEDNS(req, m, true)
|
2017-06-14 23:22:54 +00:00
|
|
|
}
|
2014-11-03 19:40:55 +00:00
|
|
|
resp.WriteMsg(m)
|
2014-01-03 23:43:35 +00:00
|
|
|
}
|
2014-02-25 20:46:11 +00:00
|
|
|
|
|
|
|
// resolveCNAME is used to recursively resolve CNAME records
|
2019-04-24 18:11:54 +00:00
|
|
|
func (d *DNSServer) resolveCNAME(cfg *dnsConfig, name string, maxRecursionLevel int) []dns.RR {
|
2016-10-27 02:23:51 +00:00
|
|
|
// If the CNAME record points to a Consul address, resolve it internally
|
2019-06-27 10:00:37 +00:00
|
|
|
// Convert query to lowercase because DNS is case insensitive; d.domain and
|
|
|
|
// d.altDomain are already converted
|
2019-01-07 21:53:54 +00:00
|
|
|
|
2019-06-27 10:00:37 +00:00
|
|
|
if ln := strings.ToLower(name); strings.HasSuffix(ln, "."+d.domain) || strings.HasSuffix(ln, "."+d.altDomain) {
|
2019-01-07 21:53:54 +00:00
|
|
|
if maxRecursionLevel < 1 {
|
2020-01-28 23:50:41 +00:00
|
|
|
d.logger.Error("Infinite recursion detected for name, won't perform any CNAME resolution.", "name", name)
|
2019-01-07 21:53:54 +00:00
|
|
|
return nil
|
|
|
|
}
|
2016-10-27 02:23:51 +00:00
|
|
|
req := &dns.Msg{}
|
|
|
|
resp := &dns.Msg{}
|
|
|
|
|
|
|
|
req.SetQuestion(name, dns.TypeANY)
|
2021-04-13 20:07:10 +00:00
|
|
|
// TODO: handle error response
|
2021-04-13 22:15:48 +00:00
|
|
|
d.dispatch(nil, req, resp, maxRecursionLevel-1)
|
2016-10-27 02:23:51 +00:00
|
|
|
|
|
|
|
return resp.Answer
|
|
|
|
}
|
|
|
|
|
2014-02-25 20:46:11 +00:00
|
|
|
// Do nothing if we don't have a recursor
|
2019-04-24 18:11:54 +00:00
|
|
|
if len(cfg.Recursors) == 0 {
|
2014-02-25 20:46:11 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ask for any A records
|
|
|
|
m := new(dns.Msg)
|
|
|
|
m.SetQuestion(name, dns.TypeA)
|
|
|
|
|
|
|
|
// Make a DNS lookup request
|
2019-04-24 18:11:54 +00:00
|
|
|
c := &dns.Client{Net: "udp", Timeout: cfg.RecursorTimeout}
|
2014-11-03 19:40:55 +00:00
|
|
|
var r *dns.Msg
|
|
|
|
var rtt time.Duration
|
|
|
|
var err error
|
2021-07-19 22:22:51 +00:00
|
|
|
for _, idx := range cfg.RecursorStrategy.Indexes(len(cfg.Recursors)) {
|
|
|
|
recursor := cfg.Recursors[idx]
|
2014-11-03 19:40:55 +00:00
|
|
|
r, rtt, err = c.Exchange(m, recursor)
|
|
|
|
if err == nil {
|
2020-01-28 23:50:41 +00:00
|
|
|
d.logger.Debug("cname recurse RTT for name",
|
|
|
|
"name", name,
|
|
|
|
"rtt", rtt,
|
|
|
|
)
|
2014-11-03 19:40:55 +00:00
|
|
|
return r.Answer
|
2014-10-31 19:19:41 +00:00
|
|
|
}
|
2020-01-28 23:50:41 +00:00
|
|
|
d.logger.Error("cname recurse failed for name",
|
|
|
|
"name", name,
|
|
|
|
"error", err,
|
|
|
|
)
|
2014-02-25 20:46:11 +00:00
|
|
|
}
|
2020-01-28 23:50:41 +00:00
|
|
|
d.logger.Error("all resolvers failed for name", "name", name)
|
2014-10-31 19:19:41 +00:00
|
|
|
return nil
|
2014-02-25 20:46:11 +00:00
|
|
|
}
|
2023-09-20 21:50:06 +00:00
|
|
|
|
|
|
|
func (d *DNSServer) coalesceDNSToken() string {
|
|
|
|
if d.agent.tokens.DNSToken() != "" {
|
|
|
|
return d.agent.tokens.DNSToken()
|
|
|
|
} else {
|
|
|
|
return d.agent.tokens.UserToken()
|
|
|
|
}
|
|
|
|
}
|