mirror of https://github.com/status-im/consul.git
Implement data filtering of some endpoints (#5579)
Fixes: #4222 # Data Filtering This PR will implement filtering for the following endpoints: ## Supported HTTP Endpoints - `/agent/checks` - `/agent/services` - `/catalog/nodes` - `/catalog/service/:service` - `/catalog/connect/:service` - `/catalog/node/:node` - `/health/node/:node` - `/health/checks/:service` - `/health/service/:service` - `/health/connect/:service` - `/health/state/:state` - `/internal/ui/nodes` - `/internal/ui/services` More can be added going forward and any endpoint which is used to list some data is a good candidate. ## Usage When using the HTTP API a `filter` query parameter can be used to pass a filter expression to Consul. Filter Expressions take the general form of: ``` <selector> == <value> <selector> != <value> <value> in <selector> <value> not in <selector> <selector> contains <value> <selector> not contains <value> <selector> is empty <selector> is not empty not <other expression> <expression 1> and <expression 2> <expression 1> or <expression 2> ``` Normal boolean logic and precedence is supported. All of the actual filtering and evaluation logic is coming from the [go-bexpr](https://github.com/hashicorp/go-bexpr) library ## Other changes Adding the `Internal.ServiceDump` RPC endpoint. This will allow the UI to filter services better.
This commit is contained in:
parent
ffc5c33550
commit
afa1cc98d1
|
@ -31,6 +31,7 @@ import (
|
|||
"github.com/hashicorp/consul/lib/file"
|
||||
"github.com/hashicorp/consul/logger"
|
||||
"github.com/hashicorp/consul/types"
|
||||
bexpr "github.com/hashicorp/go-bexpr"
|
||||
"github.com/hashicorp/logutils"
|
||||
"github.com/hashicorp/serf/coordinate"
|
||||
"github.com/hashicorp/serf/serf"
|
||||
|
@ -219,6 +220,9 @@ func (s *HTTPServer) AgentServices(resp http.ResponseWriter, req *http.Request)
|
|||
var token string
|
||||
s.parseToken(req, &token)
|
||||
|
||||
var filterExpression string
|
||||
s.parseFilter(req, &filterExpression)
|
||||
|
||||
services := s.agent.State.Services()
|
||||
if err := s.agent.filterServices(token, &services); err != nil {
|
||||
return nil, err
|
||||
|
@ -238,7 +242,12 @@ func (s *HTTPServer) AgentServices(resp http.ResponseWriter, req *http.Request)
|
|||
agentSvcs[id] = &agentService
|
||||
}
|
||||
|
||||
return agentSvcs, nil
|
||||
filter, err := bexpr.CreateFilter(filterExpression, nil, agentSvcs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return filter.Execute(agentSvcs)
|
||||
}
|
||||
|
||||
// GET /v1/agent/service/:service_id
|
||||
|
@ -403,6 +412,13 @@ func (s *HTTPServer) AgentChecks(resp http.ResponseWriter, req *http.Request) (i
|
|||
var token string
|
||||
s.parseToken(req, &token)
|
||||
|
||||
var filterExpression string
|
||||
s.parseFilter(req, &filterExpression)
|
||||
filter, err := bexpr.CreateFilter(filterExpression, nil, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
checks := s.agent.State.Checks()
|
||||
if err := s.agent.filterChecks(token, &checks); err != nil {
|
||||
return nil, err
|
||||
|
@ -417,7 +433,7 @@ func (s *HTTPServer) AgentChecks(resp http.ResponseWriter, req *http.Request) (i
|
|||
}
|
||||
}
|
||||
|
||||
return checks, nil
|
||||
return filter.Execute(checks)
|
||||
}
|
||||
|
||||
func (s *HTTPServer) AgentMembers(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
|
|
|
@ -10,6 +10,7 @@ import (
|
|||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
"os"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
@ -27,8 +28,8 @@ import (
|
|||
"github.com/hashicorp/consul/api"
|
||||
"github.com/hashicorp/consul/lib"
|
||||
"github.com/hashicorp/consul/logger"
|
||||
"github.com/hashicorp/consul/testrpc"
|
||||
"github.com/hashicorp/consul/sdk/testutil/retry"
|
||||
"github.com/hashicorp/consul/testrpc"
|
||||
"github.com/hashicorp/consul/types"
|
||||
"github.com/hashicorp/go-uuid"
|
||||
"github.com/hashicorp/serf/serf"
|
||||
|
@ -101,6 +102,48 @@ func TestAgent_Services(t *testing.T) {
|
|||
assert.Equal(t, prxy1.Upstreams.ToAPI(), val["mysql"].Connect.Proxy.Upstreams)
|
||||
}
|
||||
|
||||
func TestAgent_ServicesFiltered(t *testing.T) {
|
||||
t.Parallel()
|
||||
a := NewTestAgent(t, t.Name(), "")
|
||||
defer a.Shutdown()
|
||||
|
||||
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
||||
srv1 := &structs.NodeService{
|
||||
ID: "mysql",
|
||||
Service: "mysql",
|
||||
Tags: []string{"master"},
|
||||
Meta: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
Port: 5000,
|
||||
}
|
||||
require.NoError(t, a.State.AddService(srv1, ""))
|
||||
|
||||
// Add another service
|
||||
srv2 := &structs.NodeService{
|
||||
ID: "redis",
|
||||
Service: "redis",
|
||||
Tags: []string{"kv"},
|
||||
Meta: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
Port: 1234,
|
||||
}
|
||||
require.NoError(t, a.State.AddService(srv2, ""))
|
||||
|
||||
req, _ := http.NewRequest("GET", "/v1/agent/services?filter="+url.QueryEscape("foo in Meta"), nil)
|
||||
obj, err := a.srv.AgentServices(nil, req)
|
||||
require.NoError(t, err)
|
||||
val := obj.(map[string]*api.AgentService)
|
||||
require.Len(t, val, 2)
|
||||
|
||||
req, _ = http.NewRequest("GET", "/v1/agent/services?filter="+url.QueryEscape("kv in Tags"), nil)
|
||||
obj, err = a.srv.AgentServices(nil, req)
|
||||
require.NoError(t, err)
|
||||
val = obj.(map[string]*api.AgentService)
|
||||
require.Len(t, val, 1)
|
||||
}
|
||||
|
||||
// This tests that the agent services endpoint (/v1/agent/services) returns
|
||||
// Connect proxies.
|
||||
func TestAgent_Services_ExternalConnectProxy(t *testing.T) {
|
||||
|
@ -629,6 +672,37 @@ func TestAgent_Checks(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestAgent_ChecksWithFilter(t *testing.T) {
|
||||
t.Parallel()
|
||||
a := NewTestAgent(t, t.Name(), "")
|
||||
defer a.Shutdown()
|
||||
|
||||
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
||||
chk1 := &structs.HealthCheck{
|
||||
Node: a.Config.NodeName,
|
||||
CheckID: "mysql",
|
||||
Name: "mysql",
|
||||
Status: api.HealthPassing,
|
||||
}
|
||||
a.State.AddCheck(chk1, "")
|
||||
|
||||
chk2 := &structs.HealthCheck{
|
||||
Node: a.Config.NodeName,
|
||||
CheckID: "redis",
|
||||
Name: "redis",
|
||||
Status: api.HealthPassing,
|
||||
}
|
||||
a.State.AddCheck(chk2, "")
|
||||
|
||||
req, _ := http.NewRequest("GET", "/v1/agent/checks?filter="+url.QueryEscape("Name == `redis`"), nil)
|
||||
obj, err := a.srv.AgentChecks(nil, req)
|
||||
require.NoError(t, err)
|
||||
val := obj.(map[types.CheckID]*structs.HealthCheck)
|
||||
require.Len(t, val, 1)
|
||||
_, ok := val["redis"]
|
||||
require.True(t, ok)
|
||||
}
|
||||
|
||||
func TestAgent_HealthServiceByID(t *testing.T) {
|
||||
t.Parallel()
|
||||
a := NewTestAgent(t, t.Name(), "")
|
||||
|
|
|
@ -4,12 +4,13 @@ import (
|
|||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/consul/agent/structs"
|
||||
"github.com/hashicorp/consul/testrpc"
|
||||
"github.com/hashicorp/consul/sdk/testutil/retry"
|
||||
"github.com/hashicorp/consul/testrpc"
|
||||
"github.com/hashicorp/serf/coordinate"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
@ -153,6 +154,42 @@ func TestCatalogNodes_MetaFilter(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestCatalogNodes_Filter(t *testing.T) {
|
||||
t.Parallel()
|
||||
a := NewTestAgent(t, t.Name(), "")
|
||||
defer a.Shutdown()
|
||||
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
||||
|
||||
// Register a node with a meta field
|
||||
args := &structs.RegisterRequest{
|
||||
Datacenter: "dc1",
|
||||
Node: "foo",
|
||||
Address: "127.0.0.1",
|
||||
NodeMeta: map[string]string{
|
||||
"somekey": "somevalue",
|
||||
},
|
||||
}
|
||||
|
||||
var out struct{}
|
||||
require.NoError(t, a.RPC("Catalog.Register", args, &out))
|
||||
|
||||
req, _ := http.NewRequest("GET", "/v1/catalog/nodes?filter="+url.QueryEscape("Meta.somekey == somevalue"), nil)
|
||||
resp := httptest.NewRecorder()
|
||||
obj, err := a.srv.CatalogNodes(resp, req)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify an index is set
|
||||
assertIndex(t, resp)
|
||||
|
||||
// Verify we only get the node with the correct meta field back
|
||||
nodes := obj.(structs.Nodes)
|
||||
require.Len(t, nodes, 1)
|
||||
|
||||
v, ok := nodes[0].Meta["somekey"]
|
||||
require.True(t, ok)
|
||||
require.Equal(t, v, "somevalue")
|
||||
}
|
||||
|
||||
func TestCatalogNodes_WanTranslation(t *testing.T) {
|
||||
t.Parallel()
|
||||
a1 := NewTestAgent(t, t.Name(), `
|
||||
|
@ -651,6 +688,69 @@ func TestCatalogServiceNodes_NodeMetaFilter(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestCatalogServiceNodes_Filter(t *testing.T) {
|
||||
t.Parallel()
|
||||
a := NewTestAgent(t, t.Name(), "")
|
||||
defer a.Shutdown()
|
||||
|
||||
queryPath := "/v1/catalog/service/api?filter=" + url.QueryEscape("ServiceMeta.somekey == somevalue")
|
||||
|
||||
// Make sure an empty list is returned, not a nil
|
||||
{
|
||||
req, _ := http.NewRequest("GET", queryPath, nil)
|
||||
resp := httptest.NewRecorder()
|
||||
obj, err := a.srv.CatalogServiceNodes(resp, req)
|
||||
require.NoError(t, err)
|
||||
|
||||
assertIndex(t, resp)
|
||||
|
||||
nodes := obj.(structs.ServiceNodes)
|
||||
require.Empty(t, nodes)
|
||||
}
|
||||
|
||||
// Register node
|
||||
args := &structs.RegisterRequest{
|
||||
Datacenter: "dc1",
|
||||
Node: "foo",
|
||||
Address: "127.0.0.1",
|
||||
Service: &structs.NodeService{
|
||||
Service: "api",
|
||||
Meta: map[string]string{
|
||||
"somekey": "somevalue",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
var out struct{}
|
||||
require.NoError(t, a.RPC("Catalog.Register", args, &out))
|
||||
|
||||
// Register a second service for the node
|
||||
args = &structs.RegisterRequest{
|
||||
Datacenter: "dc1",
|
||||
Node: "foo",
|
||||
Address: "127.0.0.1",
|
||||
Service: &structs.NodeService{
|
||||
ID: "api2",
|
||||
Service: "api",
|
||||
Meta: map[string]string{
|
||||
"somekey": "notvalue",
|
||||
},
|
||||
},
|
||||
SkipNodeUpdate: true,
|
||||
}
|
||||
|
||||
require.NoError(t, a.RPC("Catalog.Register", args, &out))
|
||||
|
||||
req, _ := http.NewRequest("GET", queryPath, nil)
|
||||
resp := httptest.NewRecorder()
|
||||
obj, err := a.srv.CatalogServiceNodes(resp, req)
|
||||
require.NoError(t, err)
|
||||
assertIndex(t, resp)
|
||||
|
||||
nodes := obj.(structs.ServiceNodes)
|
||||
require.Len(t, nodes, 1)
|
||||
}
|
||||
|
||||
func TestCatalogServiceNodes_WanTranslation(t *testing.T) {
|
||||
t.Parallel()
|
||||
a1 := NewTestAgent(t, t.Name(), `
|
||||
|
@ -884,6 +984,44 @@ func TestCatalogConnectServiceNodes_good(t *testing.T) {
|
|||
assert.Equal(args.Service.Proxy, nodes[0].ServiceProxy)
|
||||
}
|
||||
|
||||
func TestCatalogConnectServiceNodes_Filter(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
a := NewTestAgent(t, t.Name(), "")
|
||||
defer a.Shutdown()
|
||||
testrpc.WaitForLeader(t, a.RPC, "dc1")
|
||||
|
||||
// Register
|
||||
args := structs.TestRegisterRequestProxy(t)
|
||||
args.Service.Address = "127.0.0.55"
|
||||
var out struct{}
|
||||
require.NoError(t, a.RPC("Catalog.Register", args, &out))
|
||||
|
||||
args = structs.TestRegisterRequestProxy(t)
|
||||
args.Service.Address = "127.0.0.55"
|
||||
args.Service.Meta = map[string]string{
|
||||
"version": "2",
|
||||
}
|
||||
args.Service.ID = "web-proxy2"
|
||||
args.SkipNodeUpdate = true
|
||||
require.NoError(t, a.RPC("Catalog.Register", args, &out))
|
||||
|
||||
req, _ := http.NewRequest("GET", fmt.Sprintf(
|
||||
"/v1/catalog/connect/%s?filter=%s",
|
||||
args.Service.Proxy.DestinationServiceName,
|
||||
url.QueryEscape("ServiceMeta.version == 2")), nil)
|
||||
resp := httptest.NewRecorder()
|
||||
obj, err := a.srv.CatalogConnectServiceNodes(resp, req)
|
||||
require.NoError(t, err)
|
||||
assertIndex(t, resp)
|
||||
|
||||
nodes := obj.(structs.ServiceNodes)
|
||||
require.Len(t, nodes, 1)
|
||||
require.Equal(t, structs.ServiceKindConnectProxy, nodes[0].ServiceKind)
|
||||
require.Equal(t, args.Service.Address, nodes[0].ServiceAddress)
|
||||
require.Equal(t, args.Service.Proxy, nodes[0].ServiceProxy)
|
||||
}
|
||||
|
||||
func TestCatalogNodeServices(t *testing.T) {
|
||||
t.Parallel()
|
||||
a := NewTestAgent(t, t.Name(), "")
|
||||
|
@ -927,6 +1065,43 @@ func TestCatalogNodeServices(t *testing.T) {
|
|||
require.Equal(t, args.Service.Proxy, services.Services["web-proxy"].Proxy)
|
||||
}
|
||||
|
||||
func TestCatalogNodeServices_Filter(t *testing.T) {
|
||||
t.Parallel()
|
||||
a := NewTestAgent(t, t.Name(), "")
|
||||
defer a.Shutdown()
|
||||
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
||||
|
||||
// Register node with a regular service and connect proxy
|
||||
args := &structs.RegisterRequest{
|
||||
Datacenter: "dc1",
|
||||
Node: "foo",
|
||||
Address: "127.0.0.1",
|
||||
Service: &structs.NodeService{
|
||||
Service: "api",
|
||||
Tags: []string{"a"},
|
||||
},
|
||||
}
|
||||
|
||||
var out struct{}
|
||||
require.NoError(t, a.RPC("Catalog.Register", args, &out))
|
||||
|
||||
// Register a connect proxy
|
||||
args.Service = structs.TestNodeServiceProxy(t)
|
||||
require.NoError(t, a.RPC("Catalog.Register", args, &out))
|
||||
|
||||
req, _ := http.NewRequest("GET", "/v1/catalog/node/foo?dc=dc1&filter="+url.QueryEscape("Kind == `connect-proxy`"), nil)
|
||||
resp := httptest.NewRecorder()
|
||||
obj, err := a.srv.CatalogNodeServices(resp, req)
|
||||
require.NoError(t, err)
|
||||
assertIndex(t, resp)
|
||||
|
||||
services := obj.(*structs.NodeServices)
|
||||
require.Len(t, services.Services, 1)
|
||||
|
||||
// Proxy service should have it's config intact
|
||||
require.Equal(t, args.Service.Proxy, services.Services["web-proxy"].Proxy)
|
||||
}
|
||||
|
||||
// Test that the services on a node contain all the Connect proxies on
|
||||
// the node as well with their fields properly populated.
|
||||
func TestCatalogNodeServices_ConnectProxy(t *testing.T) {
|
||||
|
|
|
@ -11,6 +11,7 @@ import (
|
|||
"github.com/hashicorp/consul/agent/structs"
|
||||
"github.com/hashicorp/consul/ipaddr"
|
||||
"github.com/hashicorp/consul/types"
|
||||
bexpr "github.com/hashicorp/go-bexpr"
|
||||
"github.com/hashicorp/go-memdb"
|
||||
"github.com/hashicorp/go-uuid"
|
||||
)
|
||||
|
@ -219,6 +220,11 @@ func (c *Catalog) ListNodes(args *structs.DCSpecificRequest, reply *structs.Inde
|
|||
return err
|
||||
}
|
||||
|
||||
filter, err := bexpr.CreateFilter(args.Filter, nil, reply.Nodes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return c.srv.blockingQuery(
|
||||
&args.QueryOptions,
|
||||
&reply.QueryMeta,
|
||||
|
@ -239,6 +245,13 @@ func (c *Catalog) ListNodes(args *structs.DCSpecificRequest, reply *structs.Inde
|
|||
if err := c.srv.filterACL(args.Token, reply); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
raw, err := filter.Execute(reply.Nodes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
reply.Nodes = raw.(structs.Nodes)
|
||||
|
||||
return c.srv.sortNodesByDistanceFrom(args.Source, reply.Nodes)
|
||||
})
|
||||
}
|
||||
|
@ -327,7 +340,12 @@ func (c *Catalog) ServiceNodes(args *structs.ServiceSpecificRequest, reply *stru
|
|||
}
|
||||
}
|
||||
|
||||
err := c.srv.blockingQuery(
|
||||
filter, err := bexpr.CreateFilter(args.Filter, nil, reply.ServiceNodes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = c.srv.blockingQuery(
|
||||
&args.QueryOptions,
|
||||
&reply.QueryMeta,
|
||||
func(ws memdb.WatchSet, state *state.Store) error {
|
||||
|
@ -346,9 +364,19 @@ func (c *Catalog) ServiceNodes(args *structs.ServiceSpecificRequest, reply *stru
|
|||
}
|
||||
reply.ServiceNodes = filtered
|
||||
}
|
||||
|
||||
if err := c.srv.filterACL(args.Token, reply); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// This is safe to do even when the filter is nil - its just a no-op then
|
||||
raw, err := filter.Execute(reply.ServiceNodes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
reply.ServiceNodes = raw.(structs.ServiceNodes)
|
||||
|
||||
return c.srv.sortNodesByDistanceFrom(args.Source, reply.ServiceNodes)
|
||||
})
|
||||
|
||||
|
@ -400,6 +428,12 @@ func (c *Catalog) NodeServices(args *structs.NodeSpecificRequest, reply *structs
|
|||
return fmt.Errorf("Must provide node")
|
||||
}
|
||||
|
||||
var filterType map[string]*structs.NodeService
|
||||
filter, err := bexpr.CreateFilter(args.Filter, nil, filterType)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return c.srv.blockingQuery(
|
||||
&args.QueryOptions,
|
||||
&reply.QueryMeta,
|
||||
|
@ -410,6 +444,18 @@ func (c *Catalog) NodeServices(args *structs.NodeSpecificRequest, reply *structs
|
|||
}
|
||||
|
||||
reply.Index, reply.NodeServices = index, services
|
||||
return c.srv.filterACL(args.Token, reply)
|
||||
if err := c.srv.filterACL(args.Token, reply); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if reply.NodeServices != nil {
|
||||
raw, err := filter.Execute(reply.NodeServices.Services)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
reply.NodeServices.Services = raw.(map[string]*structs.NodeService)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
|
|
@ -12,8 +12,8 @@ import (
|
|||
"github.com/hashicorp/consul/agent/structs"
|
||||
"github.com/hashicorp/consul/api"
|
||||
"github.com/hashicorp/consul/lib"
|
||||
"github.com/hashicorp/consul/testrpc"
|
||||
"github.com/hashicorp/consul/sdk/testutil/retry"
|
||||
"github.com/hashicorp/consul/testrpc"
|
||||
"github.com/hashicorp/consul/types"
|
||||
"github.com/hashicorp/net-rpc-msgpackrpc"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
@ -926,6 +926,82 @@ func TestCatalog_ListNodes_NodeMetaFilter(t *testing.T) {
|
|||
})
|
||||
}
|
||||
|
||||
func TestCatalog_RPC_Filter(t *testing.T) {
|
||||
t.Parallel()
|
||||
dir1, s1 := testServer(t)
|
||||
defer os.RemoveAll(dir1)
|
||||
defer s1.Shutdown()
|
||||
codec := rpcClient(t, s1)
|
||||
defer codec.Close()
|
||||
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
// prep the cluster with some data we can use in our filters
|
||||
registerTestCatalogEntries(t, codec)
|
||||
|
||||
// Run the tests against the test server
|
||||
|
||||
t.Run("ListNodes", func(t *testing.T) {
|
||||
args := structs.DCSpecificRequest{
|
||||
Datacenter: "dc1",
|
||||
QueryOptions: structs.QueryOptions{Filter: "Meta.os == linux"},
|
||||
}
|
||||
|
||||
out := new(structs.IndexedNodes)
|
||||
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Catalog.ListNodes", &args, out))
|
||||
require.Len(t, out.Nodes, 2)
|
||||
require.Condition(t, func() bool {
|
||||
return (out.Nodes[0].Node == "foo" && out.Nodes[1].Node == "baz") ||
|
||||
(out.Nodes[0].Node == "baz" && out.Nodes[1].Node == "foo")
|
||||
})
|
||||
|
||||
args.Filter = "Meta.os == linux and Meta.env == qa"
|
||||
out = new(structs.IndexedNodes)
|
||||
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Catalog.ListNodes", &args, out))
|
||||
require.Len(t, out.Nodes, 1)
|
||||
require.Equal(t, "baz", out.Nodes[0].Node)
|
||||
})
|
||||
|
||||
t.Run("ServiceNodes", func(t *testing.T) {
|
||||
args := structs.ServiceSpecificRequest{
|
||||
Datacenter: "dc1",
|
||||
ServiceName: "redis",
|
||||
QueryOptions: structs.QueryOptions{Filter: "ServiceMeta.version == 1"},
|
||||
}
|
||||
|
||||
out := new(structs.IndexedServiceNodes)
|
||||
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Catalog.ServiceNodes", &args, &out))
|
||||
require.Len(t, out.ServiceNodes, 2)
|
||||
require.Condition(t, func() bool {
|
||||
return (out.ServiceNodes[0].Node == "foo" && out.ServiceNodes[1].Node == "bar") ||
|
||||
(out.ServiceNodes[0].Node == "bar" && out.ServiceNodes[1].Node == "foo")
|
||||
})
|
||||
|
||||
args.Filter = "ServiceMeta.version == 2"
|
||||
out = new(structs.IndexedServiceNodes)
|
||||
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Catalog.ServiceNodes", &args, &out))
|
||||
require.Len(t, out.ServiceNodes, 1)
|
||||
require.Equal(t, "foo", out.ServiceNodes[0].Node)
|
||||
})
|
||||
|
||||
t.Run("NodeServices", func(t *testing.T) {
|
||||
args := structs.NodeSpecificRequest{
|
||||
Datacenter: "dc1",
|
||||
Node: "baz",
|
||||
QueryOptions: structs.QueryOptions{Filter: "Service == web"},
|
||||
}
|
||||
|
||||
out := new(structs.IndexedNodeServices)
|
||||
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Catalog.NodeServices", &args, &out))
|
||||
require.Len(t, out.NodeServices.Services, 2)
|
||||
|
||||
args.Filter = "Service == web and Meta.version == 2"
|
||||
out = new(structs.IndexedNodeServices)
|
||||
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Catalog.NodeServices", &args, &out))
|
||||
require.Len(t, out.NodeServices.Services, 1)
|
||||
})
|
||||
}
|
||||
|
||||
func TestCatalog_ListNodes_StaleRead(t *testing.T) {
|
||||
t.Parallel()
|
||||
dir1, s1 := testServer(t)
|
||||
|
|
|
@ -7,6 +7,7 @@ import (
|
|||
"github.com/armon/go-metrics"
|
||||
"github.com/hashicorp/consul/agent/consul/state"
|
||||
"github.com/hashicorp/consul/agent/structs"
|
||||
bexpr "github.com/hashicorp/go-bexpr"
|
||||
"github.com/hashicorp/go-memdb"
|
||||
)
|
||||
|
||||
|
@ -22,6 +23,11 @@ func (h *Health) ChecksInState(args *structs.ChecksInStateRequest,
|
|||
return err
|
||||
}
|
||||
|
||||
filter, err := bexpr.CreateFilter(args.Filter, nil, reply.HealthChecks)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return h.srv.blockingQuery(
|
||||
&args.QueryOptions,
|
||||
&reply.QueryMeta,
|
||||
|
@ -41,6 +47,13 @@ func (h *Health) ChecksInState(args *structs.ChecksInStateRequest,
|
|||
if err := h.srv.filterACL(args.Token, reply); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
raw, err := filter.Execute(reply.HealthChecks)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
reply.HealthChecks = raw.(structs.HealthChecks)
|
||||
|
||||
return h.srv.sortNodesByDistanceFrom(args.Source, reply.HealthChecks)
|
||||
})
|
||||
}
|
||||
|
@ -52,6 +65,11 @@ func (h *Health) NodeChecks(args *structs.NodeSpecificRequest,
|
|||
return err
|
||||
}
|
||||
|
||||
filter, err := bexpr.CreateFilter(args.Filter, nil, reply.HealthChecks)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return h.srv.blockingQuery(
|
||||
&args.QueryOptions,
|
||||
&reply.QueryMeta,
|
||||
|
@ -61,7 +79,16 @@ func (h *Health) NodeChecks(args *structs.NodeSpecificRequest,
|
|||
return err
|
||||
}
|
||||
reply.Index, reply.HealthChecks = index, checks
|
||||
return h.srv.filterACL(args.Token, reply)
|
||||
if err := h.srv.filterACL(args.Token, reply); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
raw, err := filter.Execute(reply.HealthChecks)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
reply.HealthChecks = raw.(structs.HealthChecks)
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -78,6 +105,11 @@ func (h *Health) ServiceChecks(args *structs.ServiceSpecificRequest,
|
|||
return err
|
||||
}
|
||||
|
||||
filter, err := bexpr.CreateFilter(args.Filter, nil, reply.HealthChecks)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return h.srv.blockingQuery(
|
||||
&args.QueryOptions,
|
||||
&reply.QueryMeta,
|
||||
|
@ -97,6 +129,13 @@ func (h *Health) ServiceChecks(args *structs.ServiceSpecificRequest,
|
|||
if err := h.srv.filterACL(args.Token, reply); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
raw, err := filter.Execute(reply.HealthChecks)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
reply.HealthChecks = raw.(structs.HealthChecks)
|
||||
|
||||
return h.srv.sortNodesByDistanceFrom(args.Source, reply.HealthChecks)
|
||||
})
|
||||
}
|
||||
|
@ -138,7 +177,12 @@ func (h *Health) ServiceNodes(args *structs.ServiceSpecificRequest, reply *struc
|
|||
}
|
||||
}
|
||||
|
||||
err := h.srv.blockingQuery(
|
||||
filter, err := bexpr.CreateFilter(args.Filter, nil, reply.Nodes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = h.srv.blockingQuery(
|
||||
&args.QueryOptions,
|
||||
&reply.QueryMeta,
|
||||
func(ws memdb.WatchSet, state *state.Store) error {
|
||||
|
@ -151,9 +195,17 @@ func (h *Health) ServiceNodes(args *structs.ServiceSpecificRequest, reply *struc
|
|||
if len(args.NodeMetaFilters) > 0 {
|
||||
reply.Nodes = nodeMetaFilter(args.NodeMetaFilters, reply.Nodes)
|
||||
}
|
||||
|
||||
if err := h.srv.filterACL(args.Token, reply); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
raw, err := filter.Execute(reply.Nodes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
reply.Nodes = raw.(structs.CheckServiceNodes)
|
||||
|
||||
return h.srv.sortNodesByDistanceFrom(args.Source, reply.Nodes)
|
||||
})
|
||||
|
||||
|
|
|
@ -9,6 +9,7 @@ import (
|
|||
"github.com/hashicorp/consul/api"
|
||||
"github.com/hashicorp/consul/lib"
|
||||
"github.com/hashicorp/consul/testrpc"
|
||||
"github.com/hashicorp/consul/types"
|
||||
"github.com/hashicorp/net-rpc-msgpackrpc"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
@ -1176,3 +1177,115 @@ func TestHealth_ChecksInState_FilterACL(t *testing.T) {
|
|||
// that to false (the regression value of *not* changing this is better
|
||||
// for now until we change the sense of the version 8 ACL flag).
|
||||
}
|
||||
|
||||
func TestHealth_RPC_Filter(t *testing.T) {
|
||||
t.Parallel()
|
||||
dir1, s1 := testServer(t)
|
||||
defer os.RemoveAll(dir1)
|
||||
defer s1.Shutdown()
|
||||
codec := rpcClient(t, s1)
|
||||
defer codec.Close()
|
||||
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
// prep the cluster with some data we can use in our filters
|
||||
registerTestCatalogEntries(t, codec)
|
||||
|
||||
// Run the tests against the test server
|
||||
|
||||
t.Run("NodeChecks", func(t *testing.T) {
|
||||
args := structs.NodeSpecificRequest{
|
||||
Datacenter: "dc1",
|
||||
Node: "foo",
|
||||
QueryOptions: structs.QueryOptions{Filter: "ServiceName == redis and v1 in ServiceTags"},
|
||||
}
|
||||
|
||||
out := new(structs.IndexedHealthChecks)
|
||||
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Health.NodeChecks", &args, out))
|
||||
require.Len(t, out.HealthChecks, 1)
|
||||
require.Equal(t, types.CheckID("foo:redisV1"), out.HealthChecks[0].CheckID)
|
||||
|
||||
args.Filter = "ServiceID == ``"
|
||||
out = new(structs.IndexedHealthChecks)
|
||||
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Health.NodeChecks", &args, out))
|
||||
require.Len(t, out.HealthChecks, 2)
|
||||
})
|
||||
|
||||
t.Run("ServiceChecks", func(t *testing.T) {
|
||||
args := structs.ServiceSpecificRequest{
|
||||
Datacenter: "dc1",
|
||||
ServiceName: "redis",
|
||||
QueryOptions: structs.QueryOptions{Filter: "Node == foo"},
|
||||
}
|
||||
|
||||
out := new(structs.IndexedHealthChecks)
|
||||
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Health.ServiceChecks", &args, out))
|
||||
// 1 service check for each instance
|
||||
require.Len(t, out.HealthChecks, 2)
|
||||
|
||||
args.Filter = "Node == bar"
|
||||
out = new(structs.IndexedHealthChecks)
|
||||
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Health.ServiceChecks", &args, out))
|
||||
// 1 service check for each instance
|
||||
require.Len(t, out.HealthChecks, 1)
|
||||
|
||||
args.Filter = "Node == foo and v1 in ServiceTags"
|
||||
out = new(structs.IndexedHealthChecks)
|
||||
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Health.ServiceChecks", &args, out))
|
||||
// 1 service check for the matching instance
|
||||
require.Len(t, out.HealthChecks, 1)
|
||||
})
|
||||
|
||||
t.Run("ServiceNodes", func(t *testing.T) {
|
||||
args := structs.ServiceSpecificRequest{
|
||||
Datacenter: "dc1",
|
||||
ServiceName: "redis",
|
||||
QueryOptions: structs.QueryOptions{Filter: "Service.Meta.version == 2"},
|
||||
}
|
||||
|
||||
out := new(structs.IndexedCheckServiceNodes)
|
||||
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Health.ServiceNodes", &args, out))
|
||||
require.Len(t, out.Nodes, 1)
|
||||
|
||||
args.ServiceName = "web"
|
||||
args.Filter = "Node.Meta.os == linux"
|
||||
out = new(structs.IndexedCheckServiceNodes)
|
||||
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Health.ServiceNodes", &args, out))
|
||||
require.Len(t, out.Nodes, 2)
|
||||
require.Equal(t, "baz", out.Nodes[0].Node.Node)
|
||||
require.Equal(t, "baz", out.Nodes[1].Node.Node)
|
||||
|
||||
args.Filter = "Node.Meta.os == linux and Service.Meta.version == 1"
|
||||
out = new(structs.IndexedCheckServiceNodes)
|
||||
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Health.ServiceNodes", &args, out))
|
||||
require.Len(t, out.Nodes, 1)
|
||||
})
|
||||
|
||||
t.Run("ChecksInState", func(t *testing.T) {
|
||||
args := structs.ChecksInStateRequest{
|
||||
Datacenter: "dc1",
|
||||
State: api.HealthAny,
|
||||
QueryOptions: structs.QueryOptions{Filter: "Node == baz"},
|
||||
}
|
||||
|
||||
out := new(structs.IndexedHealthChecks)
|
||||
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Health.ChecksInState", &args, out))
|
||||
require.Len(t, out.HealthChecks, 6)
|
||||
|
||||
args.Filter = "Status == warning or Status == critical"
|
||||
out = new(structs.IndexedHealthChecks)
|
||||
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Health.ChecksInState", &args, out))
|
||||
require.Len(t, out.HealthChecks, 2)
|
||||
|
||||
args.State = api.HealthCritical
|
||||
args.Filter = "Node == baz"
|
||||
out = new(structs.IndexedHealthChecks)
|
||||
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Health.ChecksInState", &args, out))
|
||||
require.Len(t, out.HealthChecks, 1)
|
||||
|
||||
args.State = api.HealthWarning
|
||||
out = new(structs.IndexedHealthChecks)
|
||||
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Health.ChecksInState", &args, out))
|
||||
require.Len(t, out.HealthChecks, 1)
|
||||
})
|
||||
}
|
||||
|
|
|
@ -4,9 +4,14 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/rpc"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/consul/agent/structs"
|
||||
"github.com/hashicorp/consul/api"
|
||||
"github.com/hashicorp/consul/sdk/testutil/retry"
|
||||
"github.com/hashicorp/consul/types"
|
||||
"github.com/hashicorp/net-rpc-msgpackrpc"
|
||||
"github.com/hashicorp/raft"
|
||||
"github.com/hashicorp/serf/serf"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
@ -180,3 +185,301 @@ func serfMembersContains(members []serf.Member, addr string) bool {
|
|||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func registerTestCatalogEntries(t *testing.T, codec rpc.ClientCodec) {
|
||||
t.Helper()
|
||||
|
||||
// prep the cluster with some data we can use in our filters
|
||||
registrations := map[string]*structs.RegisterRequest{
|
||||
"Node foo": &structs.RegisterRequest{
|
||||
Datacenter: "dc1",
|
||||
Node: "foo",
|
||||
ID: types.NodeID("e0155642-135d-4739-9853-a1ee6c9f945b"),
|
||||
Address: "127.0.0.2",
|
||||
TaggedAddresses: map[string]string{
|
||||
"lan": "127.0.0.2",
|
||||
"wan": "198.18.0.2",
|
||||
},
|
||||
NodeMeta: map[string]string{
|
||||
"env": "production",
|
||||
"os": "linux",
|
||||
},
|
||||
Checks: structs.HealthChecks{
|
||||
&structs.HealthCheck{
|
||||
Node: "foo",
|
||||
CheckID: "foo:alive",
|
||||
Name: "foo-liveness",
|
||||
Status: api.HealthPassing,
|
||||
Notes: "foo is alive and well",
|
||||
},
|
||||
&structs.HealthCheck{
|
||||
Node: "foo",
|
||||
CheckID: "foo:ssh",
|
||||
Name: "foo-remote-ssh",
|
||||
Status: api.HealthPassing,
|
||||
Notes: "foo has ssh access",
|
||||
},
|
||||
},
|
||||
},
|
||||
"Service redis v1 on foo": &structs.RegisterRequest{
|
||||
Datacenter: "dc1",
|
||||
Node: "foo",
|
||||
SkipNodeUpdate: true,
|
||||
Service: &structs.NodeService{
|
||||
Kind: structs.ServiceKindTypical,
|
||||
ID: "redisV1",
|
||||
Service: "redis",
|
||||
Tags: []string{"v1"},
|
||||
Meta: map[string]string{"version": "1"},
|
||||
Port: 1234,
|
||||
Address: "198.18.1.2",
|
||||
},
|
||||
Checks: structs.HealthChecks{
|
||||
&structs.HealthCheck{
|
||||
Node: "foo",
|
||||
CheckID: "foo:redisV1",
|
||||
Name: "redis-liveness",
|
||||
Status: api.HealthPassing,
|
||||
Notes: "redis v1 is alive and well",
|
||||
ServiceID: "redisV1",
|
||||
ServiceName: "redis",
|
||||
},
|
||||
},
|
||||
},
|
||||
"Service redis v2 on foo": &structs.RegisterRequest{
|
||||
Datacenter: "dc1",
|
||||
Node: "foo",
|
||||
SkipNodeUpdate: true,
|
||||
Service: &structs.NodeService{
|
||||
Kind: structs.ServiceKindTypical,
|
||||
ID: "redisV2",
|
||||
Service: "redis",
|
||||
Tags: []string{"v2"},
|
||||
Meta: map[string]string{"version": "2"},
|
||||
Port: 1235,
|
||||
Address: "198.18.1.2",
|
||||
},
|
||||
Checks: structs.HealthChecks{
|
||||
&structs.HealthCheck{
|
||||
Node: "foo",
|
||||
CheckID: "foo:redisV2",
|
||||
Name: "redis-v2-liveness",
|
||||
Status: api.HealthPassing,
|
||||
Notes: "redis v2 is alive and well",
|
||||
ServiceID: "redisV2",
|
||||
ServiceName: "redis",
|
||||
},
|
||||
},
|
||||
},
|
||||
"Node bar": &structs.RegisterRequest{
|
||||
Datacenter: "dc1",
|
||||
Node: "bar",
|
||||
ID: types.NodeID("c6e7a976-8f4f-44b5-bdd3-631be7e8ecac"),
|
||||
Address: "127.0.0.3",
|
||||
TaggedAddresses: map[string]string{
|
||||
"lan": "127.0.0.3",
|
||||
"wan": "198.18.0.3",
|
||||
},
|
||||
NodeMeta: map[string]string{
|
||||
"env": "production",
|
||||
"os": "windows",
|
||||
},
|
||||
Checks: structs.HealthChecks{
|
||||
&structs.HealthCheck{
|
||||
Node: "bar",
|
||||
CheckID: "bar:alive",
|
||||
Name: "bar-liveness",
|
||||
Status: api.HealthPassing,
|
||||
Notes: "bar is alive and well",
|
||||
},
|
||||
},
|
||||
},
|
||||
"Service redis v1 on bar": &structs.RegisterRequest{
|
||||
Datacenter: "dc1",
|
||||
Node: "bar",
|
||||
SkipNodeUpdate: true,
|
||||
Service: &structs.NodeService{
|
||||
Kind: structs.ServiceKindTypical,
|
||||
ID: "redisV1",
|
||||
Service: "redis",
|
||||
Tags: []string{"v1"},
|
||||
Meta: map[string]string{"version": "1"},
|
||||
Port: 1234,
|
||||
Address: "198.18.1.3",
|
||||
},
|
||||
Checks: structs.HealthChecks{
|
||||
&structs.HealthCheck{
|
||||
Node: "bar",
|
||||
CheckID: "bar:redisV1",
|
||||
Name: "redis-liveness",
|
||||
Status: api.HealthPassing,
|
||||
Notes: "redis v1 is alive and well",
|
||||
ServiceID: "redisV1",
|
||||
ServiceName: "redis",
|
||||
},
|
||||
},
|
||||
},
|
||||
"Service web v1 on bar": &structs.RegisterRequest{
|
||||
Datacenter: "dc1",
|
||||
Node: "bar",
|
||||
SkipNodeUpdate: true,
|
||||
Service: &structs.NodeService{
|
||||
Kind: structs.ServiceKindTypical,
|
||||
ID: "webV1",
|
||||
Service: "web",
|
||||
Tags: []string{"v1", "connect"},
|
||||
Meta: map[string]string{"version": "1", "connect": "enabled"},
|
||||
Port: 443,
|
||||
Address: "198.18.1.4",
|
||||
Connect: structs.ServiceConnect{Native: true},
|
||||
},
|
||||
Checks: structs.HealthChecks{
|
||||
&structs.HealthCheck{
|
||||
Node: "bar",
|
||||
CheckID: "bar:web:v1",
|
||||
Name: "web-v1-liveness",
|
||||
Status: api.HealthPassing,
|
||||
Notes: "web connect v1 is alive and well",
|
||||
ServiceID: "webV1",
|
||||
ServiceName: "web",
|
||||
},
|
||||
},
|
||||
},
|
||||
"Node baz": &structs.RegisterRequest{
|
||||
Datacenter: "dc1",
|
||||
Node: "baz",
|
||||
ID: types.NodeID("12f96b27-a7b0-47bd-add7-044a2bfc7bfb"),
|
||||
Address: "127.0.0.4",
|
||||
TaggedAddresses: map[string]string{
|
||||
"lan": "127.0.0.4",
|
||||
},
|
||||
NodeMeta: map[string]string{
|
||||
"env": "qa",
|
||||
"os": "linux",
|
||||
},
|
||||
Checks: structs.HealthChecks{
|
||||
&structs.HealthCheck{
|
||||
Node: "baz",
|
||||
CheckID: "baz:alive",
|
||||
Name: "baz-liveness",
|
||||
Status: api.HealthPassing,
|
||||
Notes: "baz is alive and well",
|
||||
},
|
||||
&structs.HealthCheck{
|
||||
Node: "baz",
|
||||
CheckID: "baz:ssh",
|
||||
Name: "baz-remote-ssh",
|
||||
Status: api.HealthPassing,
|
||||
Notes: "baz has ssh access",
|
||||
},
|
||||
},
|
||||
},
|
||||
"Service web v1 on baz": &structs.RegisterRequest{
|
||||
Datacenter: "dc1",
|
||||
Node: "baz",
|
||||
SkipNodeUpdate: true,
|
||||
Service: &structs.NodeService{
|
||||
Kind: structs.ServiceKindTypical,
|
||||
ID: "webV1",
|
||||
Service: "web",
|
||||
Tags: []string{"v1", "connect"},
|
||||
Meta: map[string]string{"version": "1", "connect": "enabled"},
|
||||
Port: 443,
|
||||
Address: "198.18.1.4",
|
||||
Connect: structs.ServiceConnect{Native: true},
|
||||
},
|
||||
Checks: structs.HealthChecks{
|
||||
&structs.HealthCheck{
|
||||
Node: "baz",
|
||||
CheckID: "baz:web:v1",
|
||||
Name: "web-v1-liveness",
|
||||
Status: api.HealthPassing,
|
||||
Notes: "web connect v1 is alive and well",
|
||||
ServiceID: "webV1",
|
||||
ServiceName: "web",
|
||||
},
|
||||
},
|
||||
},
|
||||
"Service web v2 on baz": &structs.RegisterRequest{
|
||||
Datacenter: "dc1",
|
||||
Node: "baz",
|
||||
SkipNodeUpdate: true,
|
||||
Service: &structs.NodeService{
|
||||
Kind: structs.ServiceKindTypical,
|
||||
ID: "webV2",
|
||||
Service: "web",
|
||||
Tags: []string{"v2", "connect"},
|
||||
Meta: map[string]string{"version": "2", "connect": "enabled"},
|
||||
Port: 8443,
|
||||
Address: "198.18.1.4",
|
||||
Connect: structs.ServiceConnect{Native: true},
|
||||
},
|
||||
Checks: structs.HealthChecks{
|
||||
&structs.HealthCheck{
|
||||
Node: "baz",
|
||||
CheckID: "baz:web:v2",
|
||||
Name: "web-v2-liveness",
|
||||
Status: api.HealthPassing,
|
||||
Notes: "web connect v2 is alive and well",
|
||||
ServiceID: "webV2",
|
||||
ServiceName: "web",
|
||||
},
|
||||
},
|
||||
},
|
||||
"Service critical on baz": &structs.RegisterRequest{
|
||||
Datacenter: "dc1",
|
||||
Node: "baz",
|
||||
SkipNodeUpdate: true,
|
||||
Service: &structs.NodeService{
|
||||
Kind: structs.ServiceKindTypical,
|
||||
ID: "criticalV2",
|
||||
Service: "critical",
|
||||
Tags: []string{"v2"},
|
||||
Meta: map[string]string{"version": "2"},
|
||||
Port: 8080,
|
||||
Address: "198.18.1.4",
|
||||
},
|
||||
Checks: structs.HealthChecks{
|
||||
&structs.HealthCheck{
|
||||
Node: "baz",
|
||||
CheckID: "baz:critical:v2",
|
||||
Name: "critical-v2-liveness",
|
||||
Status: api.HealthCritical,
|
||||
Notes: "critical v2 is in the critical state",
|
||||
ServiceID: "criticalV2",
|
||||
ServiceName: "critical",
|
||||
},
|
||||
},
|
||||
},
|
||||
"Service warning on baz": &structs.RegisterRequest{
|
||||
Datacenter: "dc1",
|
||||
Node: "baz",
|
||||
SkipNodeUpdate: true,
|
||||
Service: &structs.NodeService{
|
||||
Kind: structs.ServiceKindTypical,
|
||||
ID: "warningV2",
|
||||
Service: "warning",
|
||||
Tags: []string{"v2"},
|
||||
Meta: map[string]string{"version": "2"},
|
||||
Port: 8081,
|
||||
Address: "198.18.1.4",
|
||||
},
|
||||
Checks: structs.HealthChecks{
|
||||
&structs.HealthCheck{
|
||||
Node: "baz",
|
||||
CheckID: "baz:warning:v2",
|
||||
Name: "warning-v2-liveness",
|
||||
Status: api.HealthWarning,
|
||||
Notes: "warning v2 is in the warning state",
|
||||
ServiceID: "warningV2",
|
||||
ServiceName: "warning",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for name, reg := range registrations {
|
||||
err := msgpackrpc.CallWithCodec(codec, "Catalog.Register", reg, nil)
|
||||
require.NoError(t, err, "Failed catalog registration %q: %v", name, err)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -6,6 +6,7 @@ import (
|
|||
"github.com/hashicorp/consul/acl"
|
||||
"github.com/hashicorp/consul/agent/consul/state"
|
||||
"github.com/hashicorp/consul/agent/structs"
|
||||
bexpr "github.com/hashicorp/go-bexpr"
|
||||
"github.com/hashicorp/go-memdb"
|
||||
"github.com/hashicorp/go-multierror"
|
||||
"github.com/hashicorp/serf/serf"
|
||||
|
@ -46,6 +47,11 @@ func (m *Internal) NodeDump(args *structs.DCSpecificRequest,
|
|||
return err
|
||||
}
|
||||
|
||||
filter, err := bexpr.CreateFilter(args.Filter, nil, reply.Dump)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return m.srv.blockingQuery(
|
||||
&args.QueryOptions,
|
||||
&reply.QueryMeta,
|
||||
|
@ -56,7 +62,51 @@ func (m *Internal) NodeDump(args *structs.DCSpecificRequest,
|
|||
}
|
||||
|
||||
reply.Index, reply.Dump = index, dump
|
||||
return m.srv.filterACL(args.Token, reply)
|
||||
if err := m.srv.filterACL(args.Token, reply); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
raw, err := filter.Execute(reply.Dump)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
reply.Dump = raw.(structs.NodeDump)
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func (m *Internal) ServiceDump(args *structs.DCSpecificRequest, reply *structs.IndexedCheckServiceNodes) error {
|
||||
if done, err := m.srv.forward("Internal.ServiceDump", args, args, reply); done {
|
||||
return err
|
||||
}
|
||||
|
||||
filter, err := bexpr.CreateFilter(args.Filter, nil, reply.Nodes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return m.srv.blockingQuery(
|
||||
&args.QueryOptions,
|
||||
&reply.QueryMeta,
|
||||
func(ws memdb.WatchSet, state *state.Store) error {
|
||||
index, nodes, err := state.ServiceDump(ws)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
reply.Index, reply.Nodes = index, nodes
|
||||
if err := m.srv.filterACL(args.Token, reply); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
raw, err := filter.Execute(reply.Nodes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
reply.Nodes = raw.(structs.CheckServiceNodes)
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
|
|
|
@ -11,6 +11,8 @@ import (
|
|||
"github.com/hashicorp/consul/lib"
|
||||
"github.com/hashicorp/consul/testrpc"
|
||||
"github.com/hashicorp/net-rpc-msgpackrpc"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestInternal_NodeInfo(t *testing.T) {
|
||||
|
@ -159,6 +161,64 @@ func TestInternal_NodeDump(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestInternal_NodeDump_Filter(t *testing.T) {
|
||||
t.Parallel()
|
||||
dir1, s1 := testServer(t)
|
||||
defer os.RemoveAll(dir1)
|
||||
defer s1.Shutdown()
|
||||
codec := rpcClient(t, s1)
|
||||
defer codec.Close()
|
||||
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
arg := structs.RegisterRequest{
|
||||
Datacenter: "dc1",
|
||||
Node: "foo",
|
||||
Address: "127.0.0.1",
|
||||
Service: &structs.NodeService{
|
||||
ID: "db",
|
||||
Service: "db",
|
||||
Tags: []string{"master"},
|
||||
},
|
||||
Check: &structs.HealthCheck{
|
||||
Name: "db connect",
|
||||
Status: api.HealthPassing,
|
||||
ServiceID: "db",
|
||||
},
|
||||
}
|
||||
var out struct{}
|
||||
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Catalog.Register", &arg, &out))
|
||||
|
||||
arg = structs.RegisterRequest{
|
||||
Datacenter: "dc1",
|
||||
Node: "bar",
|
||||
Address: "127.0.0.2",
|
||||
Service: &structs.NodeService{
|
||||
ID: "db",
|
||||
Service: "db",
|
||||
Tags: []string{"slave"},
|
||||
},
|
||||
Check: &structs.HealthCheck{
|
||||
Name: "db connect",
|
||||
Status: api.HealthWarning,
|
||||
ServiceID: "db",
|
||||
},
|
||||
}
|
||||
|
||||
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Catalog.Register", &arg, &out))
|
||||
|
||||
var out2 structs.IndexedNodeDump
|
||||
req := structs.DCSpecificRequest{
|
||||
Datacenter: "dc1",
|
||||
QueryOptions: structs.QueryOptions{Filter: "master in Services.Tags"},
|
||||
}
|
||||
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Internal.NodeDump", &req, &out2))
|
||||
|
||||
nodes := out2.Dump
|
||||
require.Len(t, nodes, 1)
|
||||
require.Equal(t, "foo", nodes[0].Node)
|
||||
}
|
||||
|
||||
func TestInternal_KeyringOperation(t *testing.T) {
|
||||
t.Parallel()
|
||||
key1 := "H1dfkSZOVnP/JUnaBfTzXg=="
|
||||
|
@ -378,3 +438,48 @@ func TestInternal_EventFire_Token(t *testing.T) {
|
|||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestInternal_ServiceDump(t *testing.T) {
|
||||
t.Parallel()
|
||||
dir1, s1 := testServer(t)
|
||||
defer os.RemoveAll(dir1)
|
||||
defer s1.Shutdown()
|
||||
codec := rpcClient(t, s1)
|
||||
defer codec.Close()
|
||||
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
// prep the cluster with some data we can use in our filters
|
||||
registerTestCatalogEntries(t, codec)
|
||||
|
||||
doRequest := func(t *testing.T, filter string) structs.CheckServiceNodes {
|
||||
t.Helper()
|
||||
args := structs.DCSpecificRequest{
|
||||
Datacenter: "dc1",
|
||||
QueryOptions: structs.QueryOptions{Filter: filter},
|
||||
}
|
||||
|
||||
var out structs.IndexedCheckServiceNodes
|
||||
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Internal.ServiceDump", &args, &out))
|
||||
return out.Nodes
|
||||
}
|
||||
|
||||
// Run the tests against the test server
|
||||
t.Run("No Filter", func(t *testing.T) {
|
||||
nodes := doRequest(t, "")
|
||||
// redis (3), web (3), critical (1), warning (1) and consul (1)
|
||||
require.Len(t, nodes, 9)
|
||||
})
|
||||
|
||||
t.Run("Filter Node foo and service version 1", func(t *testing.T) {
|
||||
nodes := doRequest(t, "Node.Node == foo and Service.Meta.version == 1")
|
||||
require.Len(t, nodes, 1)
|
||||
require.Equal(t, "redis", nodes[0].Service.Service)
|
||||
require.Equal(t, "redisV1", nodes[0].Service.ID)
|
||||
})
|
||||
|
||||
t.Run("Filter service web", func(t *testing.T) {
|
||||
nodes := doRequest(t, "Service.Service == web")
|
||||
require.Len(t, nodes, 3)
|
||||
})
|
||||
}
|
||||
|
|
|
@ -2131,6 +2131,27 @@ func (s *Store) NodeDump(ws memdb.WatchSet) (uint64, structs.NodeDump, error) {
|
|||
return s.parseNodes(tx, ws, idx, nodes)
|
||||
}
|
||||
|
||||
func (s *Store) ServiceDump(ws memdb.WatchSet) (uint64, structs.CheckServiceNodes, error) {
|
||||
tx := s.db.Txn(false)
|
||||
defer tx.Abort()
|
||||
|
||||
// Get the table index
|
||||
idx := maxIndexWatchTxn(tx, ws, "nodes", "services", "checks")
|
||||
|
||||
services, err := tx.Get("services", "id")
|
||||
if err != nil {
|
||||
return 0, nil, fmt.Errorf("failed service lookup: %s", err)
|
||||
}
|
||||
|
||||
var results structs.ServiceNodes
|
||||
for service := services.Next(); service != nil; service = services.Next() {
|
||||
sn := service.(*structs.ServiceNode)
|
||||
results = append(results, sn)
|
||||
}
|
||||
|
||||
return s.parseCheckServiceNodes(tx, nil, idx, "", results, err)
|
||||
}
|
||||
|
||||
// parseNodes takes an iterator over a set of nodes and returns a struct
|
||||
// containing the nodes along with all of their associated services
|
||||
// and/or health checks.
|
||||
|
|
|
@ -214,14 +214,19 @@ func (s *Store) maxIndex(tables ...string) uint64 {
|
|||
// maxIndexTxn is a helper used to retrieve the highest known index
|
||||
// amongst a set of tables in the db.
|
||||
func maxIndexTxn(tx *memdb.Txn, tables ...string) uint64 {
|
||||
return maxIndexWatchTxn(tx, nil, tables...)
|
||||
}
|
||||
|
||||
func maxIndexWatchTxn(tx *memdb.Txn, ws memdb.WatchSet, tables ...string) uint64 {
|
||||
var lindex uint64
|
||||
for _, table := range tables {
|
||||
ti, err := tx.First("index", "id", table)
|
||||
ch, ti, err := tx.FirstWatch("index", "id", table)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("unknown index: %s err: %s", table, err))
|
||||
}
|
||||
if idx, ok := ti.(*IndexEntry); ok && idx.Value > lindex {
|
||||
lindex = idx.Value
|
||||
ws.Add(ch)
|
||||
}
|
||||
}
|
||||
return lindex
|
||||
|
|
|
@ -6,13 +6,14 @@ import (
|
|||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/consul/agent/structs"
|
||||
"github.com/hashicorp/consul/api"
|
||||
"github.com/hashicorp/consul/testrpc"
|
||||
"github.com/hashicorp/consul/sdk/testutil/retry"
|
||||
"github.com/hashicorp/consul/testrpc"
|
||||
"github.com/hashicorp/serf/coordinate"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
@ -107,6 +108,52 @@ func TestHealthChecksInState_NodeMetaFilter(t *testing.T) {
|
|||
})
|
||||
}
|
||||
|
||||
func TestHealthChecksInState_Filter(t *testing.T) {
|
||||
t.Parallel()
|
||||
a := NewTestAgent(t, t.Name(), "")
|
||||
defer a.Shutdown()
|
||||
|
||||
args := &structs.RegisterRequest{
|
||||
Datacenter: "dc1",
|
||||
Node: "bar",
|
||||
Address: "127.0.0.1",
|
||||
NodeMeta: map[string]string{"somekey": "somevalue"},
|
||||
Check: &structs.HealthCheck{
|
||||
Node: "bar",
|
||||
Name: "node check",
|
||||
Status: api.HealthCritical,
|
||||
},
|
||||
}
|
||||
var out struct{}
|
||||
require.NoError(t, a.RPC("Catalog.Register", args, &out))
|
||||
|
||||
args = &structs.RegisterRequest{
|
||||
Datacenter: "dc1",
|
||||
Node: "bar",
|
||||
Address: "127.0.0.1",
|
||||
NodeMeta: map[string]string{"somekey": "somevalue"},
|
||||
Check: &structs.HealthCheck{
|
||||
Node: "bar",
|
||||
Name: "node check 2",
|
||||
Status: api.HealthCritical,
|
||||
},
|
||||
SkipNodeUpdate: true,
|
||||
}
|
||||
require.NoError(t, a.RPC("Catalog.Register", args, &out))
|
||||
|
||||
req, _ := http.NewRequest("GET", "/v1/health/state/critical?filter="+url.QueryEscape("Name == `node check 2`"), nil)
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
resp := httptest.NewRecorder()
|
||||
obj, err := a.srv.HealthChecksInState(resp, req)
|
||||
require.NoError(r, err)
|
||||
require.NoError(r, checkIndex(resp))
|
||||
|
||||
// Should be 1 health check for the server
|
||||
nodes := obj.(structs.HealthChecks)
|
||||
require.Len(r, nodes, 1)
|
||||
})
|
||||
}
|
||||
|
||||
func TestHealthChecksInState_DistanceSort(t *testing.T) {
|
||||
t.Parallel()
|
||||
a := NewTestAgent(t, t.Name(), "")
|
||||
|
@ -216,6 +263,50 @@ func TestHealthNodeChecks(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestHealthNodeChecks_Filtering(t *testing.T) {
|
||||
t.Parallel()
|
||||
a := NewTestAgent(t, t.Name(), "")
|
||||
defer a.Shutdown()
|
||||
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
||||
|
||||
// Create a node check
|
||||
args := &structs.RegisterRequest{
|
||||
Datacenter: "dc1",
|
||||
Node: "test-health-node",
|
||||
Address: "127.0.0.2",
|
||||
Check: &structs.HealthCheck{
|
||||
Node: "test-health-node",
|
||||
Name: "check1",
|
||||
},
|
||||
}
|
||||
|
||||
var out struct{}
|
||||
require.NoError(t, a.RPC("Catalog.Register", args, &out))
|
||||
|
||||
// Create a second check
|
||||
args = &structs.RegisterRequest{
|
||||
Datacenter: "dc1",
|
||||
Node: "test-health-node",
|
||||
Address: "127.0.0.2",
|
||||
Check: &structs.HealthCheck{
|
||||
Node: "test-health-node",
|
||||
Name: "check2",
|
||||
},
|
||||
SkipNodeUpdate: true,
|
||||
}
|
||||
require.NoError(t, a.RPC("Catalog.Register", args, &out))
|
||||
|
||||
req, _ := http.NewRequest("GET", "/v1/health/node/test-health-node?filter="+url.QueryEscape("Name == check2"), nil)
|
||||
resp := httptest.NewRecorder()
|
||||
obj, err := a.srv.HealthNodeChecks(resp, req)
|
||||
require.NoError(t, err)
|
||||
assertIndex(t, resp)
|
||||
|
||||
// Should be 1 health check for the server
|
||||
nodes := obj.(structs.HealthChecks)
|
||||
require.Len(t, nodes, 1)
|
||||
}
|
||||
|
||||
func TestHealthServiceChecks(t *testing.T) {
|
||||
t.Parallel()
|
||||
a := NewTestAgent(t, t.Name(), "")
|
||||
|
@ -321,6 +412,67 @@ func TestHealthServiceChecks_NodeMetaFilter(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestHealthServiceChecks_Filtering(t *testing.T) {
|
||||
t.Parallel()
|
||||
a := NewTestAgent(t, t.Name(), "")
|
||||
defer a.Shutdown()
|
||||
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
||||
|
||||
req, _ := http.NewRequest("GET", "/v1/health/checks/consul?dc=dc1&node-meta=somekey:somevalue", nil)
|
||||
resp := httptest.NewRecorder()
|
||||
obj, err := a.srv.HealthServiceChecks(resp, req)
|
||||
require.NoError(t, err)
|
||||
assertIndex(t, resp)
|
||||
|
||||
// Should be a non-nil empty list
|
||||
nodes := obj.(structs.HealthChecks)
|
||||
require.Empty(t, nodes)
|
||||
|
||||
args := &structs.RegisterRequest{
|
||||
Datacenter: "dc1",
|
||||
Node: a.Config.NodeName,
|
||||
Address: "127.0.0.1",
|
||||
NodeMeta: map[string]string{"somekey": "somevalue"},
|
||||
Check: &structs.HealthCheck{
|
||||
Node: a.Config.NodeName,
|
||||
Name: "consul check",
|
||||
ServiceID: "consul",
|
||||
},
|
||||
SkipNodeUpdate: true,
|
||||
}
|
||||
|
||||
var out struct{}
|
||||
require.NoError(t, a.RPC("Catalog.Register", args, &out))
|
||||
|
||||
// Create a new node, service and check
|
||||
args = &structs.RegisterRequest{
|
||||
Datacenter: "dc1",
|
||||
Node: "test-health-node",
|
||||
Address: "127.0.0.2",
|
||||
NodeMeta: map[string]string{"somekey": "somevalue"},
|
||||
Service: &structs.NodeService{
|
||||
ID: "consul",
|
||||
Service: "consul",
|
||||
},
|
||||
Check: &structs.HealthCheck{
|
||||
Node: "test-health-node",
|
||||
Name: "consul check",
|
||||
ServiceID: "consul",
|
||||
},
|
||||
}
|
||||
require.NoError(t, a.RPC("Catalog.Register", args, &out))
|
||||
|
||||
req, _ = http.NewRequest("GET", "/v1/health/checks/consul?dc=dc1&filter="+url.QueryEscape("Node == `test-health-node`"), nil)
|
||||
resp = httptest.NewRecorder()
|
||||
obj, err = a.srv.HealthServiceChecks(resp, req)
|
||||
require.NoError(t, err)
|
||||
assertIndex(t, resp)
|
||||
|
||||
// Should be 1 health check for consul
|
||||
nodes = obj.(structs.HealthChecks)
|
||||
require.Len(t, nodes, 1)
|
||||
}
|
||||
|
||||
func TestHealthServiceChecks_DistanceSort(t *testing.T) {
|
||||
t.Parallel()
|
||||
a := NewTestAgent(t, t.Name(), "")
|
||||
|
@ -580,6 +732,68 @@ func TestHealthServiceNodes_NodeMetaFilter(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestHealthServiceNodes_Filter(t *testing.T) {
|
||||
t.Parallel()
|
||||
a := NewTestAgent(t, t.Name(), "")
|
||||
defer a.Shutdown()
|
||||
testrpc.WaitForLeader(t, a.RPC, "dc1")
|
||||
|
||||
req, _ := http.NewRequest("GET", "/v1/health/service/consul?dc=dc1&filter="+url.QueryEscape("Node.Node == `test-health-node`"), nil)
|
||||
resp := httptest.NewRecorder()
|
||||
obj, err := a.srv.HealthServiceNodes(resp, req)
|
||||
require.NoError(t, err)
|
||||
assertIndex(t, resp)
|
||||
|
||||
// Should be a non-nil empty list
|
||||
nodes := obj.(structs.CheckServiceNodes)
|
||||
require.Empty(t, nodes)
|
||||
|
||||
args := &structs.RegisterRequest{
|
||||
Datacenter: "dc1",
|
||||
Node: a.Config.NodeName,
|
||||
Address: "127.0.0.1",
|
||||
NodeMeta: map[string]string{"somekey": "somevalue"},
|
||||
Check: &structs.HealthCheck{
|
||||
Node: a.Config.NodeName,
|
||||
Name: "consul check",
|
||||
ServiceID: "consul",
|
||||
},
|
||||
}
|
||||
|
||||
var out struct{}
|
||||
require.NoError(t, a.RPC("Catalog.Register", args, &out))
|
||||
|
||||
// Create a new node, service and check
|
||||
args = &structs.RegisterRequest{
|
||||
Datacenter: "dc1",
|
||||
Node: "test-health-node",
|
||||
Address: "127.0.0.2",
|
||||
NodeMeta: map[string]string{"somekey": "somevalue"},
|
||||
Service: &structs.NodeService{
|
||||
ID: "consul",
|
||||
Service: "consul",
|
||||
},
|
||||
Check: &structs.HealthCheck{
|
||||
Node: "test-health-node",
|
||||
Name: "consul check",
|
||||
ServiceID: "consul",
|
||||
},
|
||||
}
|
||||
require.NoError(t, a.RPC("Catalog.Register", args, &out))
|
||||
|
||||
req, _ = http.NewRequest("GET", "/v1/health/service/consul?dc=dc1&filter="+url.QueryEscape("Node.Node == `test-health-node`"), nil)
|
||||
resp = httptest.NewRecorder()
|
||||
obj, err = a.srv.HealthServiceNodes(resp, req)
|
||||
require.NoError(t, err)
|
||||
|
||||
assertIndex(t, resp)
|
||||
|
||||
// Should be a non-nil empty list for checks
|
||||
nodes = obj.(structs.CheckServiceNodes)
|
||||
require.Len(t, nodes, 1)
|
||||
require.Len(t, nodes[0].Checks, 1)
|
||||
}
|
||||
|
||||
func TestHealthServiceNodes_DistanceSort(t *testing.T) {
|
||||
t.Parallel()
|
||||
a := NewTestAgent(t, t.Name(), "")
|
||||
|
@ -868,6 +1082,44 @@ func TestHealthConnectServiceNodes(t *testing.T) {
|
|||
assert.Len(nodes[0].Checks, 0)
|
||||
}
|
||||
|
||||
func TestHealthConnectServiceNodes_Filter(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
a := NewTestAgent(t, t.Name(), "")
|
||||
defer a.Shutdown()
|
||||
testrpc.WaitForLeader(t, a.RPC, "dc1")
|
||||
|
||||
// Register
|
||||
args := structs.TestRegisterRequestProxy(t)
|
||||
args.Service.Address = "127.0.0.55"
|
||||
var out struct{}
|
||||
require.NoError(t, a.RPC("Catalog.Register", args, &out))
|
||||
|
||||
args = structs.TestRegisterRequestProxy(t)
|
||||
args.Service.Address = "127.0.0.55"
|
||||
args.Service.Meta = map[string]string{
|
||||
"version": "2",
|
||||
}
|
||||
args.Service.ID = "web-proxy2"
|
||||
args.SkipNodeUpdate = true
|
||||
require.NoError(t, a.RPC("Catalog.Register", args, &out))
|
||||
|
||||
req, _ := http.NewRequest("GET", fmt.Sprintf(
|
||||
"/v1/health/connect/%s?filter=%s",
|
||||
args.Service.Proxy.DestinationServiceName,
|
||||
url.QueryEscape("Service.Meta.version == 2")), nil)
|
||||
resp := httptest.NewRecorder()
|
||||
obj, err := a.srv.HealthConnectServiceNodes(resp, req)
|
||||
require.NoError(t, err)
|
||||
assertIndex(t, resp)
|
||||
|
||||
nodes := obj.(structs.CheckServiceNodes)
|
||||
require.Len(t, nodes, 1)
|
||||
require.Equal(t, structs.ServiceKindConnectProxy, nodes[0].Service.Kind)
|
||||
require.Equal(t, args.Service.Address, nodes[0].Service.Address)
|
||||
require.Equal(t, args.Service.Proxy, nodes[0].Service.Proxy)
|
||||
}
|
||||
|
||||
func TestHealthConnectServiceNodes_PassingFilter(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
|
|
|
@ -878,6 +878,7 @@ func (s *HTTPServer) parseMetaFilter(req *http.Request) map[string]string {
|
|||
func (s *HTTPServer) parseInternal(resp http.ResponseWriter, req *http.Request, dc *string, b *structs.QueryOptions, resolveProxyToken bool) bool {
|
||||
s.parseDC(req, dc)
|
||||
s.parseTokenInternal(req, &b.Token, resolveProxyToken)
|
||||
s.parseFilter(req, &b.Filter)
|
||||
if s.parseConsistency(resp, req, b) {
|
||||
return true
|
||||
}
|
||||
|
@ -923,3 +924,9 @@ func (s *HTTPServer) checkWriteAccess(req *http.Request) error {
|
|||
|
||||
return ForbiddenError{}
|
||||
}
|
||||
|
||||
func (s *HTTPServer) parseFilter(req *http.Request, filter *string) {
|
||||
if other := req.URL.Query().Get("filter"); other != "" {
|
||||
*filter = other
|
||||
}
|
||||
}
|
||||
|
|
|
@ -38,7 +38,7 @@ type ConnectProxyConfig struct {
|
|||
|
||||
// Config is the arbitrary configuration data provided with the proxy
|
||||
// registration.
|
||||
Config map[string]interface{} `json:",omitempty"`
|
||||
Config map[string]interface{} `json:",omitempty" bexpr:"-"`
|
||||
|
||||
// Upstreams describes any upstream dependencies the proxy instance should
|
||||
// setup.
|
||||
|
@ -121,7 +121,7 @@ type Upstream struct {
|
|||
// Config is an opaque config that is specific to the proxy process being run.
|
||||
// It can be used to pass arbitrary configuration for this specific upstream
|
||||
// to the proxy.
|
||||
Config map[string]interface{}
|
||||
Config map[string]interface{} `bexpr:"-"`
|
||||
}
|
||||
|
||||
// Validate sanity checks the struct is valid
|
||||
|
|
|
@ -26,8 +26,8 @@ type MessageType uint8
|
|||
// RaftIndex is used to track the index used while creating
|
||||
// or modifying a given struct type.
|
||||
type RaftIndex struct {
|
||||
CreateIndex uint64
|
||||
ModifyIndex uint64
|
||||
CreateIndex uint64 `bexpr:"-"`
|
||||
ModifyIndex uint64 `bexpr:"-"`
|
||||
}
|
||||
|
||||
// These are serialized between Consul servers and stored in Consul snapshots,
|
||||
|
@ -164,6 +164,10 @@ type QueryOptions struct {
|
|||
// ignored if the endpoint supports background refresh caching. See
|
||||
// https://www.consul.io/api/index.html#agent-caching for more details.
|
||||
StaleIfError time.Duration
|
||||
|
||||
// Filter specifies the go-bexpr filter expression to be used for
|
||||
// filtering the data prior to returning a response
|
||||
Filter string
|
||||
}
|
||||
|
||||
// IsRead is always true for QueryOption.
|
||||
|
@ -332,10 +336,13 @@ func (r *DCSpecificRequest) CacheInfo() cache.RequestInfo {
|
|||
MustRevalidate: r.MustRevalidate,
|
||||
}
|
||||
|
||||
// To calculate the cache key we only hash the node filters. The
|
||||
// datacenter is handled by the cache framework. The other fields are
|
||||
// To calculate the cache key we only hash the node meta filters and the bexpr filter.
|
||||
// The datacenter is handled by the cache framework. The other fields are
|
||||
// not, but should not be used in any cache types.
|
||||
v, err := hashstructure.Hash(r.NodeMetaFilters, nil)
|
||||
v, err := hashstructure.Hash([]interface{}{
|
||||
r.NodeMetaFilters,
|
||||
r.Filter,
|
||||
}, nil)
|
||||
if err == nil {
|
||||
// If there is an error, we don't set the key. A blank key forces
|
||||
// no cache for this request so the request is forwarded directly
|
||||
|
@ -406,6 +413,7 @@ func (r *ServiceSpecificRequest) CacheInfo() cache.RequestInfo {
|
|||
r.ServiceAddress,
|
||||
r.TagFilter,
|
||||
r.Connect,
|
||||
r.Filter,
|
||||
}, nil)
|
||||
if err == nil {
|
||||
// If there is an error, we don't set the key. A blank key forces
|
||||
|
@ -444,6 +452,7 @@ func (r *NodeSpecificRequest) CacheInfo() cache.RequestInfo {
|
|||
|
||||
v, err := hashstructure.Hash([]interface{}{
|
||||
r.Node,
|
||||
r.Filter,
|
||||
}, nil)
|
||||
if err == nil {
|
||||
// If there is an error, we don't set the key. A blank key forces
|
||||
|
@ -477,7 +486,7 @@ type Node struct {
|
|||
TaggedAddresses map[string]string
|
||||
Meta map[string]string
|
||||
|
||||
RaftIndex
|
||||
RaftIndex `bexpr:"-"`
|
||||
}
|
||||
type Nodes []*Node
|
||||
|
||||
|
@ -580,11 +589,11 @@ type ServiceNode struct {
|
|||
ServicePort int
|
||||
ServiceEnableTagOverride bool
|
||||
// DEPRECATED (ProxyDestination) - remove this when removing ProxyDestination
|
||||
ServiceProxyDestination string
|
||||
ServiceProxyDestination string `bexpr:"-"`
|
||||
ServiceProxy ConnectProxyConfig
|
||||
ServiceConnect ServiceConnect
|
||||
|
||||
RaftIndex
|
||||
RaftIndex `bexpr:"-"`
|
||||
}
|
||||
|
||||
// PartialClone() returns a clone of the given service node, minus the node-
|
||||
|
@ -695,7 +704,7 @@ type NodeService struct {
|
|||
// may be a service that isn't present in the catalog. This is expected and
|
||||
// allowed to allow for proxies to come up earlier than their target services.
|
||||
// DEPRECATED (ProxyDestination) - remove this when removing ProxyDestination
|
||||
ProxyDestination string
|
||||
ProxyDestination string `bexpr:"-"`
|
||||
|
||||
// Proxy is the configuration set for Kind = connect-proxy. It is mandatory in
|
||||
// that case and an error to be set for any other kind. This config is part of
|
||||
|
@ -730,9 +739,9 @@ type NodeService struct {
|
|||
// internal only. Right now our agent endpoints return api structs which don't
|
||||
// include it but this is a safety net incase we change that or there is
|
||||
// somewhere this is used in API output.
|
||||
LocallyRegisteredAsSidecar bool `json:"-"`
|
||||
LocallyRegisteredAsSidecar bool `json:"-" bexpr:"-"`
|
||||
|
||||
RaftIndex
|
||||
RaftIndex `bexpr:"-"`
|
||||
}
|
||||
|
||||
// ServiceConnect are the shared Connect settings between all service
|
||||
|
@ -744,7 +753,7 @@ type ServiceConnect struct {
|
|||
// Proxy configures a connect proxy instance for the service. This is
|
||||
// only used for agent service definitions and is invalid for non-agent
|
||||
// (catalog API) definitions.
|
||||
Proxy *ServiceDefinitionConnectProxy `json:",omitempty"`
|
||||
Proxy *ServiceDefinitionConnectProxy `json:",omitempty" bexpr:"-"`
|
||||
|
||||
// SidecarService is a nested Service Definition to register at the same time.
|
||||
// It's purely a convenience mechanism to allow specifying a sidecar service
|
||||
|
@ -753,7 +762,7 @@ type ServiceConnect struct {
|
|||
// boilerplate needed to register a sidecar service separately, but the end
|
||||
// result is identical to just making a second service registration via any
|
||||
// other means.
|
||||
SidecarService *ServiceDefinition `json:",omitempty"`
|
||||
SidecarService *ServiceDefinition `json:",omitempty" bexpr:"-"`
|
||||
}
|
||||
|
||||
// Validate validates the node service configuration.
|
||||
|
@ -925,9 +934,9 @@ type HealthCheck struct {
|
|||
ServiceName string // optional service name
|
||||
ServiceTags []string // optional service tags
|
||||
|
||||
Definition HealthCheckDefinition
|
||||
Definition HealthCheckDefinition `bexpr:"-"`
|
||||
|
||||
RaftIndex
|
||||
RaftIndex `bexpr:"-"`
|
||||
}
|
||||
|
||||
type HealthCheckDefinition struct {
|
||||
|
|
|
@ -0,0 +1,518 @@
|
|||
package structs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/consul/api"
|
||||
bexpr "github.com/hashicorp/go-bexpr"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
const dumpFieldConfig bool = false
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// NOTE: The tests within this file are designed to validate that the fields
|
||||
// that will be available for filtering for various data types in the
|
||||
// structs package have the correct field configurations. If you need
|
||||
// to update this file to get the tests passing again then you definitely
|
||||
// should update the documentation as well.
|
||||
//
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
type fieldConfigTest struct {
|
||||
dataType interface{}
|
||||
expected bexpr.FieldConfigurations
|
||||
}
|
||||
|
||||
var expectedFieldConfigUpstreams bexpr.FieldConfigurations = bexpr.FieldConfigurations{
|
||||
"DestinationType": &bexpr.FieldConfiguration{
|
||||
StructFieldName: "DestinationType",
|
||||
CoerceFn: bexpr.CoerceString,
|
||||
SupportedOperations: []bexpr.MatchOperator{bexpr.MatchEqual, bexpr.MatchNotEqual},
|
||||
},
|
||||
"DestinationNamespace": &bexpr.FieldConfiguration{
|
||||
StructFieldName: "DestinationNamespace",
|
||||
CoerceFn: bexpr.CoerceString,
|
||||
SupportedOperations: []bexpr.MatchOperator{bexpr.MatchEqual, bexpr.MatchNotEqual},
|
||||
},
|
||||
"DestinationName": &bexpr.FieldConfiguration{
|
||||
StructFieldName: "DestinationName",
|
||||
CoerceFn: bexpr.CoerceString,
|
||||
SupportedOperations: []bexpr.MatchOperator{bexpr.MatchEqual, bexpr.MatchNotEqual},
|
||||
},
|
||||
"Datacenter": &bexpr.FieldConfiguration{
|
||||
StructFieldName: "Datacenter",
|
||||
CoerceFn: bexpr.CoerceString,
|
||||
SupportedOperations: []bexpr.MatchOperator{bexpr.MatchEqual, bexpr.MatchNotEqual},
|
||||
},
|
||||
"LocalBindAddress": &bexpr.FieldConfiguration{
|
||||
StructFieldName: "LocalBindAddress",
|
||||
CoerceFn: bexpr.CoerceString,
|
||||
SupportedOperations: []bexpr.MatchOperator{bexpr.MatchEqual, bexpr.MatchNotEqual},
|
||||
},
|
||||
"LocalBindPort": &bexpr.FieldConfiguration{
|
||||
StructFieldName: "LocalBindPort",
|
||||
CoerceFn: bexpr.CoerceInt,
|
||||
SupportedOperations: []bexpr.MatchOperator{bexpr.MatchEqual, bexpr.MatchNotEqual},
|
||||
},
|
||||
}
|
||||
|
||||
var expectedFieldConfigConnectProxyConfig bexpr.FieldConfigurations = bexpr.FieldConfigurations{
|
||||
"DestinationServiceName": &bexpr.FieldConfiguration{
|
||||
StructFieldName: "DestinationServiceName",
|
||||
CoerceFn: bexpr.CoerceString,
|
||||
SupportedOperations: []bexpr.MatchOperator{bexpr.MatchEqual, bexpr.MatchNotEqual},
|
||||
},
|
||||
"DestinationServiceID": &bexpr.FieldConfiguration{
|
||||
StructFieldName: "DestinationServiceID",
|
||||
CoerceFn: bexpr.CoerceString,
|
||||
SupportedOperations: []bexpr.MatchOperator{bexpr.MatchEqual, bexpr.MatchNotEqual},
|
||||
},
|
||||
"LocalServiceAddress": &bexpr.FieldConfiguration{
|
||||
StructFieldName: "LocalServiceAddress",
|
||||
CoerceFn: bexpr.CoerceString,
|
||||
SupportedOperations: []bexpr.MatchOperator{bexpr.MatchEqual, bexpr.MatchNotEqual},
|
||||
},
|
||||
"LocalServicePort": &bexpr.FieldConfiguration{
|
||||
StructFieldName: "LocalServicePort",
|
||||
CoerceFn: bexpr.CoerceInt,
|
||||
SupportedOperations: []bexpr.MatchOperator{bexpr.MatchEqual, bexpr.MatchNotEqual},
|
||||
},
|
||||
"Upstreams": &bexpr.FieldConfiguration{
|
||||
StructFieldName: "Upstreams",
|
||||
SupportedOperations: []bexpr.MatchOperator{bexpr.MatchIsEmpty, bexpr.MatchIsNotEmpty},
|
||||
SubFields: expectedFieldConfigUpstreams,
|
||||
},
|
||||
}
|
||||
|
||||
var expectedFieldConfigServiceConnect bexpr.FieldConfigurations = bexpr.FieldConfigurations{
|
||||
"Native": &bexpr.FieldConfiguration{
|
||||
StructFieldName: "Native",
|
||||
CoerceFn: bexpr.CoerceBool,
|
||||
SupportedOperations: []bexpr.MatchOperator{bexpr.MatchEqual, bexpr.MatchNotEqual},
|
||||
},
|
||||
}
|
||||
|
||||
var expectedFieldConfigWeights bexpr.FieldConfigurations = bexpr.FieldConfigurations{
|
||||
"Passing": &bexpr.FieldConfiguration{
|
||||
StructFieldName: "Passing",
|
||||
CoerceFn: bexpr.CoerceInt,
|
||||
SupportedOperations: []bexpr.MatchOperator{bexpr.MatchEqual, bexpr.MatchNotEqual},
|
||||
},
|
||||
"Warning": &bexpr.FieldConfiguration{
|
||||
StructFieldName: "Warning",
|
||||
CoerceFn: bexpr.CoerceInt,
|
||||
SupportedOperations: []bexpr.MatchOperator{bexpr.MatchEqual, bexpr.MatchNotEqual},
|
||||
},
|
||||
}
|
||||
|
||||
var expectedFieldConfigMapStringValue bexpr.FieldConfigurations = bexpr.FieldConfigurations{
|
||||
bexpr.FieldNameAny: &bexpr.FieldConfiguration{
|
||||
CoerceFn: bexpr.CoerceString,
|
||||
SupportedOperations: []bexpr.MatchOperator{bexpr.MatchEqual, bexpr.MatchNotEqual},
|
||||
},
|
||||
}
|
||||
|
||||
// these are not all in a table because some of them reference each other
|
||||
var expectedFieldConfigNode bexpr.FieldConfigurations = bexpr.FieldConfigurations{
|
||||
"ID": &bexpr.FieldConfiguration{
|
||||
StructFieldName: "ID",
|
||||
CoerceFn: bexpr.CoerceString,
|
||||
SupportedOperations: []bexpr.MatchOperator{bexpr.MatchEqual, bexpr.MatchNotEqual},
|
||||
},
|
||||
"Node": &bexpr.FieldConfiguration{
|
||||
StructFieldName: "Node",
|
||||
CoerceFn: bexpr.CoerceString,
|
||||
SupportedOperations: []bexpr.MatchOperator{bexpr.MatchEqual, bexpr.MatchNotEqual},
|
||||
},
|
||||
"Address": &bexpr.FieldConfiguration{
|
||||
StructFieldName: "Address",
|
||||
CoerceFn: bexpr.CoerceString,
|
||||
SupportedOperations: []bexpr.MatchOperator{bexpr.MatchEqual, bexpr.MatchNotEqual},
|
||||
},
|
||||
"Datacenter": &bexpr.FieldConfiguration{
|
||||
StructFieldName: "Datacenter",
|
||||
CoerceFn: bexpr.CoerceString,
|
||||
SupportedOperations: []bexpr.MatchOperator{bexpr.MatchEqual, bexpr.MatchNotEqual},
|
||||
},
|
||||
"TaggedAddresses": &bexpr.FieldConfiguration{
|
||||
StructFieldName: "TaggedAddresses",
|
||||
CoerceFn: bexpr.CoerceString,
|
||||
SupportedOperations: []bexpr.MatchOperator{bexpr.MatchIsEmpty, bexpr.MatchIsNotEmpty, bexpr.MatchIn, bexpr.MatchNotIn},
|
||||
SubFields: bexpr.FieldConfigurations{
|
||||
bexpr.FieldNameAny: &bexpr.FieldConfiguration{
|
||||
CoerceFn: bexpr.CoerceString,
|
||||
SupportedOperations: []bexpr.MatchOperator{bexpr.MatchEqual, bexpr.MatchNotEqual},
|
||||
},
|
||||
},
|
||||
},
|
||||
"Meta": &bexpr.FieldConfiguration{
|
||||
StructFieldName: "Meta",
|
||||
CoerceFn: bexpr.CoerceString,
|
||||
SupportedOperations: []bexpr.MatchOperator{bexpr.MatchIsEmpty, bexpr.MatchIsNotEmpty, bexpr.MatchIn, bexpr.MatchNotIn},
|
||||
SubFields: bexpr.FieldConfigurations{
|
||||
bexpr.FieldNameAny: &bexpr.FieldConfiguration{
|
||||
CoerceFn: bexpr.CoerceString,
|
||||
SupportedOperations: []bexpr.MatchOperator{bexpr.MatchEqual, bexpr.MatchNotEqual},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
var expectedFieldConfigNodeService bexpr.FieldConfigurations = bexpr.FieldConfigurations{
|
||||
"Kind": &bexpr.FieldConfiguration{
|
||||
StructFieldName: "Kind",
|
||||
CoerceFn: bexpr.CoerceString,
|
||||
SupportedOperations: []bexpr.MatchOperator{bexpr.MatchEqual, bexpr.MatchNotEqual},
|
||||
},
|
||||
"ID": &bexpr.FieldConfiguration{
|
||||
StructFieldName: "ID",
|
||||
CoerceFn: bexpr.CoerceString,
|
||||
SupportedOperations: []bexpr.MatchOperator{bexpr.MatchEqual, bexpr.MatchNotEqual},
|
||||
},
|
||||
"Service": &bexpr.FieldConfiguration{
|
||||
StructFieldName: "Service",
|
||||
CoerceFn: bexpr.CoerceString,
|
||||
SupportedOperations: []bexpr.MatchOperator{bexpr.MatchEqual, bexpr.MatchNotEqual},
|
||||
},
|
||||
"Tags": &bexpr.FieldConfiguration{
|
||||
StructFieldName: "Tags",
|
||||
CoerceFn: bexpr.CoerceString,
|
||||
SupportedOperations: []bexpr.MatchOperator{bexpr.MatchIsEmpty, bexpr.MatchIsNotEmpty, bexpr.MatchIn, bexpr.MatchNotIn},
|
||||
},
|
||||
"Address": &bexpr.FieldConfiguration{
|
||||
StructFieldName: "Address",
|
||||
CoerceFn: bexpr.CoerceString,
|
||||
SupportedOperations: []bexpr.MatchOperator{bexpr.MatchEqual, bexpr.MatchNotEqual},
|
||||
},
|
||||
"Meta": &bexpr.FieldConfiguration{
|
||||
StructFieldName: "Meta",
|
||||
CoerceFn: bexpr.CoerceString,
|
||||
SupportedOperations: []bexpr.MatchOperator{bexpr.MatchIsEmpty, bexpr.MatchIsNotEmpty, bexpr.MatchIn, bexpr.MatchNotIn},
|
||||
SubFields: expectedFieldConfigMapStringValue,
|
||||
},
|
||||
"Port": &bexpr.FieldConfiguration{
|
||||
StructFieldName: "Port",
|
||||
CoerceFn: bexpr.CoerceInt,
|
||||
SupportedOperations: []bexpr.MatchOperator{bexpr.MatchEqual, bexpr.MatchNotEqual},
|
||||
},
|
||||
"Weights": &bexpr.FieldConfiguration{
|
||||
StructFieldName: "Weights",
|
||||
SubFields: expectedFieldConfigWeights,
|
||||
},
|
||||
"EnableTagOverride": &bexpr.FieldConfiguration{
|
||||
StructFieldName: "EnableTagOverride",
|
||||
CoerceFn: bexpr.CoerceBool,
|
||||
SupportedOperations: []bexpr.MatchOperator{bexpr.MatchEqual, bexpr.MatchNotEqual},
|
||||
},
|
||||
"Proxy": &bexpr.FieldConfiguration{
|
||||
StructFieldName: "Proxy",
|
||||
SubFields: expectedFieldConfigConnectProxyConfig,
|
||||
},
|
||||
"ServiceConnect": &bexpr.FieldConfiguration{
|
||||
StructFieldName: "ServiceConnect",
|
||||
SubFields: expectedFieldConfigServiceConnect,
|
||||
},
|
||||
}
|
||||
|
||||
var expectedFieldConfigServiceNode bexpr.FieldConfigurations = bexpr.FieldConfigurations{
|
||||
"ID": &bexpr.FieldConfiguration{
|
||||
StructFieldName: "ID",
|
||||
CoerceFn: bexpr.CoerceString,
|
||||
SupportedOperations: []bexpr.MatchOperator{bexpr.MatchEqual, bexpr.MatchNotEqual},
|
||||
},
|
||||
"Node": &bexpr.FieldConfiguration{
|
||||
StructFieldName: "Node",
|
||||
CoerceFn: bexpr.CoerceString,
|
||||
SupportedOperations: []bexpr.MatchOperator{bexpr.MatchEqual, bexpr.MatchNotEqual},
|
||||
},
|
||||
"Address": &bexpr.FieldConfiguration{
|
||||
StructFieldName: "Address",
|
||||
CoerceFn: bexpr.CoerceString,
|
||||
SupportedOperations: []bexpr.MatchOperator{bexpr.MatchEqual, bexpr.MatchNotEqual},
|
||||
},
|
||||
"Datacenter": &bexpr.FieldConfiguration{
|
||||
StructFieldName: "Datacenter",
|
||||
CoerceFn: bexpr.CoerceString,
|
||||
SupportedOperations: []bexpr.MatchOperator{bexpr.MatchEqual, bexpr.MatchNotEqual},
|
||||
},
|
||||
"TaggedAddresses": &bexpr.FieldConfiguration{
|
||||
StructFieldName: "TaggedAddresses",
|
||||
CoerceFn: bexpr.CoerceString,
|
||||
SupportedOperations: []bexpr.MatchOperator{bexpr.MatchIsEmpty, bexpr.MatchIsNotEmpty, bexpr.MatchIn, bexpr.MatchNotIn},
|
||||
SubFields: expectedFieldConfigMapStringValue,
|
||||
},
|
||||
"NodeMeta": &bexpr.FieldConfiguration{
|
||||
StructFieldName: "NodeMeta",
|
||||
CoerceFn: bexpr.CoerceString,
|
||||
SupportedOperations: []bexpr.MatchOperator{bexpr.MatchIsEmpty, bexpr.MatchIsNotEmpty, bexpr.MatchIn, bexpr.MatchNotIn},
|
||||
SubFields: expectedFieldConfigMapStringValue,
|
||||
},
|
||||
"ServiceKind": &bexpr.FieldConfiguration{
|
||||
StructFieldName: "ServiceKind",
|
||||
CoerceFn: bexpr.CoerceString,
|
||||
SupportedOperations: []bexpr.MatchOperator{bexpr.MatchEqual, bexpr.MatchNotEqual},
|
||||
},
|
||||
"ServiceID": &bexpr.FieldConfiguration{
|
||||
StructFieldName: "ServiceID",
|
||||
CoerceFn: bexpr.CoerceString,
|
||||
SupportedOperations: []bexpr.MatchOperator{bexpr.MatchEqual, bexpr.MatchNotEqual},
|
||||
},
|
||||
"ServiceName": &bexpr.FieldConfiguration{
|
||||
StructFieldName: "ServiceName",
|
||||
CoerceFn: bexpr.CoerceString,
|
||||
SupportedOperations: []bexpr.MatchOperator{bexpr.MatchEqual, bexpr.MatchNotEqual},
|
||||
},
|
||||
"ServiceTags": &bexpr.FieldConfiguration{
|
||||
StructFieldName: "ServiceTags",
|
||||
CoerceFn: bexpr.CoerceString,
|
||||
SupportedOperations: []bexpr.MatchOperator{bexpr.MatchIsEmpty, bexpr.MatchIsNotEmpty, bexpr.MatchIn, bexpr.MatchNotIn},
|
||||
},
|
||||
"ServiceAddress": &bexpr.FieldConfiguration{
|
||||
StructFieldName: "ServiceAddress",
|
||||
CoerceFn: bexpr.CoerceString,
|
||||
SupportedOperations: []bexpr.MatchOperator{bexpr.MatchEqual, bexpr.MatchNotEqual},
|
||||
},
|
||||
"ServiceMeta": &bexpr.FieldConfiguration{
|
||||
StructFieldName: "ServiceMeta",
|
||||
CoerceFn: bexpr.CoerceString,
|
||||
SupportedOperations: []bexpr.MatchOperator{bexpr.MatchIsEmpty, bexpr.MatchIsNotEmpty, bexpr.MatchIn, bexpr.MatchNotIn},
|
||||
SubFields: expectedFieldConfigMapStringValue,
|
||||
},
|
||||
"ServicePort": &bexpr.FieldConfiguration{
|
||||
StructFieldName: "ServicePort",
|
||||
CoerceFn: bexpr.CoerceInt,
|
||||
SupportedOperations: []bexpr.MatchOperator{bexpr.MatchEqual, bexpr.MatchNotEqual},
|
||||
},
|
||||
"ServiceWeights": &bexpr.FieldConfiguration{
|
||||
StructFieldName: "ServiceWeights",
|
||||
SubFields: expectedFieldConfigWeights,
|
||||
},
|
||||
"ServiceEnableTagOverride": &bexpr.FieldConfiguration{
|
||||
StructFieldName: "ServiceEnableTagOverride",
|
||||
CoerceFn: bexpr.CoerceBool,
|
||||
SupportedOperations: []bexpr.MatchOperator{bexpr.MatchEqual, bexpr.MatchNotEqual},
|
||||
},
|
||||
"ServiceProxy": &bexpr.FieldConfiguration{
|
||||
StructFieldName: "ServiceProxy",
|
||||
SubFields: expectedFieldConfigConnectProxyConfig,
|
||||
},
|
||||
"ServiceConnect": &bexpr.FieldConfiguration{
|
||||
StructFieldName: "ServiceConnect",
|
||||
SubFields: expectedFieldConfigServiceConnect,
|
||||
},
|
||||
}
|
||||
|
||||
var expectedFieldConfigHealthCheck bexpr.FieldConfigurations = bexpr.FieldConfigurations{
|
||||
"Node": &bexpr.FieldConfiguration{
|
||||
CoerceFn: bexpr.CoerceString,
|
||||
SupportedOperations: []bexpr.MatchOperator{bexpr.MatchEqual, bexpr.MatchNotEqual},
|
||||
StructFieldName: "Node",
|
||||
},
|
||||
"CheckId": &bexpr.FieldConfiguration{
|
||||
CoerceFn: bexpr.CoerceString,
|
||||
SupportedOperations: []bexpr.MatchOperator{bexpr.MatchEqual, bexpr.MatchNotEqual},
|
||||
StructFieldName: "CheckId",
|
||||
},
|
||||
"Name": &bexpr.FieldConfiguration{
|
||||
CoerceFn: bexpr.CoerceString,
|
||||
SupportedOperations: []bexpr.MatchOperator{bexpr.MatchEqual, bexpr.MatchNotEqual},
|
||||
StructFieldName: "Name",
|
||||
},
|
||||
"Status": &bexpr.FieldConfiguration{
|
||||
CoerceFn: bexpr.CoerceString,
|
||||
SupportedOperations: []bexpr.MatchOperator{bexpr.MatchEqual, bexpr.MatchNotEqual},
|
||||
StructFieldName: "Status",
|
||||
},
|
||||
"Notes": &bexpr.FieldConfiguration{
|
||||
CoerceFn: bexpr.CoerceString,
|
||||
SupportedOperations: []bexpr.MatchOperator{bexpr.MatchEqual, bexpr.MatchNotEqual},
|
||||
StructFieldName: "Notes",
|
||||
},
|
||||
"Output": &bexpr.FieldConfiguration{
|
||||
CoerceFn: bexpr.CoerceString,
|
||||
SupportedOperations: []bexpr.MatchOperator{bexpr.MatchEqual, bexpr.MatchNotEqual},
|
||||
StructFieldName: "Output",
|
||||
},
|
||||
"ServiceID": &bexpr.FieldConfiguration{
|
||||
CoerceFn: bexpr.CoerceString,
|
||||
SupportedOperations: []bexpr.MatchOperator{bexpr.MatchEqual, bexpr.MatchNotEqual},
|
||||
StructFieldName: "ServiceID",
|
||||
},
|
||||
"ServiceName": &bexpr.FieldConfiguration{
|
||||
CoerceFn: bexpr.CoerceString,
|
||||
SupportedOperations: []bexpr.MatchOperator{bexpr.MatchEqual, bexpr.MatchNotEqual},
|
||||
StructFieldName: "ServiceName",
|
||||
},
|
||||
"ServiceTags": &bexpr.FieldConfiguration{
|
||||
CoerceFn: bexpr.CoerceString,
|
||||
SupportedOperations: []bexpr.MatchOperator{bexpr.MatchIsEmpty, bexpr.MatchIsNotEmpty, bexpr.MatchIn, bexpr.MatchNotIn},
|
||||
StructFieldName: "ServiceTags",
|
||||
},
|
||||
}
|
||||
|
||||
var expectedFieldConfigCheckServiceNode bexpr.FieldConfigurations = bexpr.FieldConfigurations{
|
||||
"Node": &bexpr.FieldConfiguration{
|
||||
StructFieldName: "Node",
|
||||
SubFields: expectedFieldConfigNode,
|
||||
},
|
||||
"Service": &bexpr.FieldConfiguration{
|
||||
StructFieldName: "Service",
|
||||
SubFields: expectedFieldConfigNodeService,
|
||||
},
|
||||
"Checks": &bexpr.FieldConfiguration{
|
||||
StructFieldName: "Checks",
|
||||
SupportedOperations: []bexpr.MatchOperator{bexpr.MatchIsEmpty, bexpr.MatchIsNotEmpty},
|
||||
SubFields: expectedFieldConfigHealthCheck,
|
||||
},
|
||||
}
|
||||
|
||||
var expectedFieldConfigNodeInfo bexpr.FieldConfigurations = bexpr.FieldConfigurations{
|
||||
"ID": &bexpr.FieldConfiguration{
|
||||
StructFieldName: "ID",
|
||||
CoerceFn: bexpr.CoerceString,
|
||||
SupportedOperations: []bexpr.MatchOperator{bexpr.MatchEqual, bexpr.MatchNotEqual},
|
||||
},
|
||||
"Node": &bexpr.FieldConfiguration{
|
||||
StructFieldName: "Node",
|
||||
CoerceFn: bexpr.CoerceString,
|
||||
SupportedOperations: []bexpr.MatchOperator{bexpr.MatchEqual, bexpr.MatchNotEqual},
|
||||
},
|
||||
"Address": &bexpr.FieldConfiguration{
|
||||
StructFieldName: "Address",
|
||||
CoerceFn: bexpr.CoerceString,
|
||||
SupportedOperations: []bexpr.MatchOperator{bexpr.MatchEqual, bexpr.MatchNotEqual},
|
||||
},
|
||||
"TaggedAddresses": &bexpr.FieldConfiguration{
|
||||
StructFieldName: "TaggedAddresses",
|
||||
CoerceFn: bexpr.CoerceString,
|
||||
SupportedOperations: []bexpr.MatchOperator{bexpr.MatchIsEmpty, bexpr.MatchIsNotEmpty, bexpr.MatchIn, bexpr.MatchNotIn},
|
||||
SubFields: bexpr.FieldConfigurations{
|
||||
bexpr.FieldNameAny: &bexpr.FieldConfiguration{
|
||||
CoerceFn: bexpr.CoerceString,
|
||||
SupportedOperations: []bexpr.MatchOperator{bexpr.MatchEqual, bexpr.MatchNotEqual},
|
||||
},
|
||||
},
|
||||
},
|
||||
"Meta": &bexpr.FieldConfiguration{
|
||||
StructFieldName: "Meta",
|
||||
CoerceFn: bexpr.CoerceString,
|
||||
SupportedOperations: []bexpr.MatchOperator{bexpr.MatchIsEmpty, bexpr.MatchIsNotEmpty, bexpr.MatchIn, bexpr.MatchNotIn},
|
||||
SubFields: bexpr.FieldConfigurations{
|
||||
bexpr.FieldNameAny: &bexpr.FieldConfiguration{
|
||||
CoerceFn: bexpr.CoerceString,
|
||||
SupportedOperations: []bexpr.MatchOperator{bexpr.MatchEqual, bexpr.MatchNotEqual},
|
||||
},
|
||||
},
|
||||
},
|
||||
"Services": &bexpr.FieldConfiguration{
|
||||
StructFieldName: "Services",
|
||||
SupportedOperations: []bexpr.MatchOperator{bexpr.MatchIsEmpty, bexpr.MatchIsNotEmpty},
|
||||
SubFields: expectedFieldConfigNodeService,
|
||||
},
|
||||
"Checks": &bexpr.FieldConfiguration{
|
||||
StructFieldName: "Checks",
|
||||
SupportedOperations: []bexpr.MatchOperator{bexpr.MatchIsEmpty, bexpr.MatchIsNotEmpty},
|
||||
SubFields: expectedFieldConfigHealthCheck,
|
||||
},
|
||||
}
|
||||
|
||||
// Only need to generate the field configurations for the top level filtered types
|
||||
// The internal types will be checked within these.
|
||||
var fieldConfigTests map[string]fieldConfigTest = map[string]fieldConfigTest{
|
||||
"Node": fieldConfigTest{
|
||||
dataType: (*Node)(nil),
|
||||
expected: expectedFieldConfigNode,
|
||||
},
|
||||
"NodeService": fieldConfigTest{
|
||||
dataType: (*NodeService)(nil),
|
||||
expected: expectedFieldConfigNodeService,
|
||||
},
|
||||
"ServiceNode": fieldConfigTest{
|
||||
dataType: (*ServiceNode)(nil),
|
||||
expected: expectedFieldConfigServiceNode,
|
||||
},
|
||||
"HealthCheck": fieldConfigTest{
|
||||
dataType: (*HealthCheck)(nil),
|
||||
expected: expectedFieldConfigHealthCheck,
|
||||
},
|
||||
"CheckServiceNode": fieldConfigTest{
|
||||
dataType: (*CheckServiceNode)(nil),
|
||||
expected: expectedFieldConfigCheckServiceNode,
|
||||
},
|
||||
"NodeInfo": fieldConfigTest{
|
||||
dataType: (*NodeInfo)(nil),
|
||||
expected: expectedFieldConfigNodeInfo,
|
||||
},
|
||||
"api.AgentService": fieldConfigTest{
|
||||
dataType: (*api.AgentService)(nil),
|
||||
// this also happens to ensure that our API representation of a service that can be
|
||||
// registered with an agent stays in sync with our internal NodeService structure
|
||||
expected: expectedFieldConfigNodeService,
|
||||
},
|
||||
}
|
||||
|
||||
func validateFieldConfigurationsRecurse(t *testing.T, expected, actual bexpr.FieldConfigurations, path string) bool {
|
||||
t.Helper()
|
||||
|
||||
ok := assert.Len(t, actual, len(expected), "Actual FieldConfigurations length of %d != expected length of %d for path %q", len(actual), len(expected), path)
|
||||
|
||||
for fieldName, expectedConfig := range expected {
|
||||
actualConfig, ok := actual[fieldName]
|
||||
ok = ok && assert.True(t, ok, "Actual configuration is missing field %q", fieldName)
|
||||
ok = ok && assert.Equal(t, expectedConfig.StructFieldName, actualConfig.StructFieldName, "Field %q on path %q have different StructFieldNames - Expected: %q, Actual: %q", fieldName, path, expectedConfig.StructFieldName, actualConfig.StructFieldName)
|
||||
ok = ok && assert.ElementsMatch(t, expectedConfig.SupportedOperations, actualConfig.SupportedOperations, "Fields %q on path %q have different SupportedOperations - Expected: %v, Actual: %v", fieldName, path, expectedConfig.SupportedOperations, actualConfig.SupportedOperations)
|
||||
|
||||
newPath := string(fieldName)
|
||||
if newPath == "" {
|
||||
newPath = "*"
|
||||
}
|
||||
if path != "" {
|
||||
newPath = fmt.Sprintf("%s.%s", path, newPath)
|
||||
}
|
||||
ok = ok && validateFieldConfigurationsRecurse(t, expectedConfig.SubFields, actualConfig.SubFields, newPath)
|
||||
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return ok
|
||||
}
|
||||
|
||||
func validateFieldConfigurations(t *testing.T, expected, actual bexpr.FieldConfigurations) {
|
||||
t.Helper()
|
||||
require.True(t, validateFieldConfigurationsRecurse(t, expected, actual, ""))
|
||||
}
|
||||
|
||||
func TestStructs_FilterFieldConfigurations(t *testing.T) {
|
||||
t.Parallel()
|
||||
for name, tcase := range fieldConfigTests {
|
||||
// capture these values in the closure
|
||||
name := name
|
||||
tcase := tcase
|
||||
t.Run(name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
fields, err := bexpr.GenerateFieldConfigurations(tcase.dataType)
|
||||
if dumpFieldConfig {
|
||||
fmt.Printf("===== %s =====\n%s\n", name, fields)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
validateFieldConfigurations(t, tcase.expected, fields)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkStructs_FilterFieldConfigurations(b *testing.B) {
|
||||
for name, tcase := range fieldConfigTests {
|
||||
b.Run(name, func(b *testing.B) {
|
||||
for n := 0; n < b.N; n++ {
|
||||
_, err := bexpr.GenerateFieldConfigurations(tcase.dataType)
|
||||
require.NoError(b, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
|
@ -36,6 +36,7 @@ func (s *HTTPServer) UINodes(resp http.ResponseWriter, req *http.Request) (inter
|
|||
if done := s.parse(resp, req, &args.Datacenter, &args.QueryOptions); done {
|
||||
return nil, nil
|
||||
}
|
||||
s.parseFilter(req, &args.Filter)
|
||||
|
||||
// Make the RPC request
|
||||
var out structs.IndexedNodeDump
|
||||
|
@ -120,11 +121,13 @@ func (s *HTTPServer) UIServices(resp http.ResponseWriter, req *http.Request) (in
|
|||
return nil, nil
|
||||
}
|
||||
|
||||
s.parseFilter(req, &args.Filter)
|
||||
|
||||
// Make the RPC request
|
||||
var out structs.IndexedNodeDump
|
||||
var out structs.IndexedCheckServiceNodes
|
||||
defer setMeta(resp, &out.QueryMeta)
|
||||
RPC:
|
||||
if err := s.agent.RPC("Internal.NodeDump", &args, &out); err != nil {
|
||||
if err := s.agent.RPC("Internal.ServiceDump", &args, &out); err != nil {
|
||||
// Retry the request allowing stale data if no leader
|
||||
if strings.Contains(err.Error(), structs.ErrNoLeader.Error()) && !args.AllowStale {
|
||||
args.AllowStale = true
|
||||
|
@ -134,10 +137,10 @@ RPC:
|
|||
}
|
||||
|
||||
// Generate the summary
|
||||
return summarizeServices(out.Dump), nil
|
||||
return summarizeServices(out.Nodes), nil
|
||||
}
|
||||
|
||||
func summarizeServices(dump structs.NodeDump) []*ServiceSummary {
|
||||
func summarizeServices(dump structs.CheckServiceNodes) []*ServiceSummary {
|
||||
// Collect the summary information
|
||||
var services []string
|
||||
summary := make(map[string]*ServiceSummary)
|
||||
|
@ -151,48 +154,48 @@ func summarizeServices(dump structs.NodeDump) []*ServiceSummary {
|
|||
return serv
|
||||
}
|
||||
|
||||
// Aggregate all the node information
|
||||
for _, node := range dump {
|
||||
nodeServices := make([]*ServiceSummary, len(node.Services))
|
||||
for idx, service := range node.Services {
|
||||
sum := getService(service.Service)
|
||||
sum.Tags = service.Tags
|
||||
sum.Nodes = append(sum.Nodes, node.Node)
|
||||
sum.Kind = service.Kind
|
||||
|
||||
// If there is an external source, add it to the list of external
|
||||
// sources. We only want to add unique sources so there is extra
|
||||
// accounting here with an unexported field to maintain the set
|
||||
// of sources.
|
||||
if len(service.Meta) > 0 && service.Meta[metaExternalSource] != "" {
|
||||
source := service.Meta[metaExternalSource]
|
||||
if sum.externalSourceSet == nil {
|
||||
sum.externalSourceSet = make(map[string]struct{})
|
||||
}
|
||||
if _, ok := sum.externalSourceSet[source]; !ok {
|
||||
sum.externalSourceSet[source] = struct{}{}
|
||||
sum.ExternalSources = append(sum.ExternalSources, source)
|
||||
for _, csn := range dump {
|
||||
svc := csn.Service
|
||||
sum := getService(svc.Service)
|
||||
sum.Nodes = append(sum.Nodes, csn.Node.Node)
|
||||
sum.Kind = svc.Kind
|
||||
for _, tag := range svc.Tags {
|
||||
found := false
|
||||
for _, existing := range sum.Tags {
|
||||
if existing == tag {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
nodeServices[idx] = sum
|
||||
if !found {
|
||||
sum.Tags = append(sum.Tags, tag)
|
||||
}
|
||||
}
|
||||
for _, check := range node.Checks {
|
||||
var services []*ServiceSummary
|
||||
if check.ServiceName == "" {
|
||||
services = nodeServices
|
||||
} else {
|
||||
services = []*ServiceSummary{getService(check.ServiceName)}
|
||||
|
||||
// If there is an external source, add it to the list of external
|
||||
// sources. We only want to add unique sources so there is extra
|
||||
// accounting here with an unexported field to maintain the set
|
||||
// of sources.
|
||||
if len(svc.Meta) > 0 && svc.Meta[metaExternalSource] != "" {
|
||||
source := svc.Meta[metaExternalSource]
|
||||
if sum.externalSourceSet == nil {
|
||||
sum.externalSourceSet = make(map[string]struct{})
|
||||
}
|
||||
for _, sum := range services {
|
||||
switch check.Status {
|
||||
case api.HealthPassing:
|
||||
sum.ChecksPassing++
|
||||
case api.HealthWarning:
|
||||
sum.ChecksWarning++
|
||||
case api.HealthCritical:
|
||||
sum.ChecksCritical++
|
||||
}
|
||||
if _, ok := sum.externalSourceSet[source]; !ok {
|
||||
sum.externalSourceSet[source] = struct{}{}
|
||||
sum.ExternalSources = append(sum.ExternalSources, source)
|
||||
}
|
||||
}
|
||||
|
||||
for _, check := range csn.Checks {
|
||||
switch check.Status {
|
||||
case api.HealthPassing:
|
||||
sum.ChecksPassing++
|
||||
case api.HealthWarning:
|
||||
sum.ChecksWarning++
|
||||
case api.HealthCritical:
|
||||
sum.ChecksCritical++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -7,9 +7,9 @@ import (
|
|||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/consul/testrpc"
|
||||
|
@ -18,6 +18,7 @@ import (
|
|||
"github.com/hashicorp/consul/api"
|
||||
"github.com/hashicorp/consul/sdk/testutil"
|
||||
cleanhttp "github.com/hashicorp/go-cleanhttp"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestUiIndex(t *testing.T) {
|
||||
|
@ -102,6 +103,48 @@ func TestUiNodes(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestUiNodes_Filter(t *testing.T) {
|
||||
t.Parallel()
|
||||
a := NewTestAgent(t, t.Name(), "")
|
||||
defer a.Shutdown()
|
||||
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
||||
|
||||
args := &structs.RegisterRequest{
|
||||
Datacenter: "dc1",
|
||||
Node: "test",
|
||||
Address: "127.0.0.1",
|
||||
NodeMeta: map[string]string{
|
||||
"os": "linux",
|
||||
},
|
||||
}
|
||||
|
||||
var out struct{}
|
||||
require.NoError(t, a.RPC("Catalog.Register", args, &out))
|
||||
|
||||
args = &structs.RegisterRequest{
|
||||
Datacenter: "dc1",
|
||||
Node: "test2",
|
||||
Address: "127.0.0.1",
|
||||
NodeMeta: map[string]string{
|
||||
"os": "macos",
|
||||
},
|
||||
}
|
||||
require.NoError(t, a.RPC("Catalog.Register", args, &out))
|
||||
|
||||
req, _ := http.NewRequest("GET", "/v1/internal/ui/nodes/dc1?filter="+url.QueryEscape("Meta.os == linux"), nil)
|
||||
resp := httptest.NewRecorder()
|
||||
obj, err := a.srv.UINodes(resp, req)
|
||||
require.NoError(t, err)
|
||||
assertIndex(t, resp)
|
||||
|
||||
// Should be 2 nodes, and all the empty lists should be non-nil
|
||||
nodes := obj.(structs.NodeDump)
|
||||
require.Len(t, nodes, 1)
|
||||
require.Equal(t, nodes[0].Node, "test")
|
||||
require.Empty(t, nodes[0].Services)
|
||||
require.Empty(t, nodes[0].Checks)
|
||||
}
|
||||
|
||||
func TestUiNodeInfo(t *testing.T) {
|
||||
t.Parallel()
|
||||
a := NewTestAgent(t, t.Name(), "")
|
||||
|
@ -152,113 +195,207 @@ func TestUiNodeInfo(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestSummarizeServices(t *testing.T) {
|
||||
func TestUiServices(t *testing.T) {
|
||||
t.Parallel()
|
||||
dump := structs.NodeDump{
|
||||
&structs.NodeInfo{
|
||||
Node: "foo",
|
||||
Address: "127.0.0.1",
|
||||
Services: []*structs.NodeService{
|
||||
&structs.NodeService{
|
||||
Kind: structs.ServiceKindTypical,
|
||||
Service: "api",
|
||||
Tags: []string{"tag1", "tag2"},
|
||||
},
|
||||
&structs.NodeService{
|
||||
Kind: structs.ServiceKindConnectProxy,
|
||||
Service: "web",
|
||||
Tags: []string{},
|
||||
Meta: map[string]string{metaExternalSource: "k8s"},
|
||||
},
|
||||
},
|
||||
Checks: []*structs.HealthCheck{
|
||||
a := NewTestAgent(t, t.Name(), "")
|
||||
defer a.Shutdown()
|
||||
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
||||
|
||||
requests := []*structs.RegisterRequest{
|
||||
// register foo node
|
||||
&structs.RegisterRequest{
|
||||
Datacenter: "dc1",
|
||||
Node: "foo",
|
||||
Address: "127.0.0.1",
|
||||
Checks: structs.HealthChecks{
|
||||
&structs.HealthCheck{
|
||||
Status: api.HealthPassing,
|
||||
ServiceName: "",
|
||||
},
|
||||
&structs.HealthCheck{
|
||||
Status: api.HealthPassing,
|
||||
ServiceName: "web",
|
||||
},
|
||||
&structs.HealthCheck{
|
||||
Status: api.HealthWarning,
|
||||
ServiceName: "api",
|
||||
Node: "foo",
|
||||
Name: "node check",
|
||||
Status: api.HealthPassing,
|
||||
},
|
||||
},
|
||||
},
|
||||
&structs.NodeInfo{
|
||||
Node: "bar",
|
||||
Address: "127.0.0.2",
|
||||
Services: []*structs.NodeService{
|
||||
&structs.NodeService{
|
||||
Kind: structs.ServiceKindConnectProxy,
|
||||
Service: "web",
|
||||
Tags: []string{},
|
||||
Meta: map[string]string{metaExternalSource: "k8s"},
|
||||
//register api service on node foo
|
||||
&structs.RegisterRequest{
|
||||
Datacenter: "dc1",
|
||||
Node: "foo",
|
||||
SkipNodeUpdate: true,
|
||||
Service: &structs.NodeService{
|
||||
Kind: structs.ServiceKindTypical,
|
||||
Service: "api",
|
||||
Tags: []string{"tag1", "tag2"},
|
||||
},
|
||||
Checks: structs.HealthChecks{
|
||||
&structs.HealthCheck{
|
||||
Node: "foo",
|
||||
Name: "api svc check",
|
||||
ServiceName: "api",
|
||||
Status: api.HealthWarning,
|
||||
},
|
||||
},
|
||||
},
|
||||
// register web svc on node foo
|
||||
&structs.RegisterRequest{
|
||||
Datacenter: "dc1",
|
||||
Node: "foo",
|
||||
SkipNodeUpdate: true,
|
||||
Service: &structs.NodeService{
|
||||
Kind: structs.ServiceKindConnectProxy,
|
||||
Service: "web",
|
||||
Tags: []string{},
|
||||
Meta: map[string]string{metaExternalSource: "k8s"},
|
||||
Port: 1234,
|
||||
Proxy: structs.ConnectProxyConfig{
|
||||
DestinationServiceName: "api",
|
||||
},
|
||||
},
|
||||
Checks: structs.HealthChecks{
|
||||
&structs.HealthCheck{
|
||||
Node: "foo",
|
||||
Name: "web svc check",
|
||||
ServiceName: "web",
|
||||
Status: api.HealthPassing,
|
||||
},
|
||||
},
|
||||
},
|
||||
// register bar node with service web
|
||||
&structs.RegisterRequest{
|
||||
Datacenter: "dc1",
|
||||
Node: "bar",
|
||||
Address: "127.0.0.2",
|
||||
Service: &structs.NodeService{
|
||||
Kind: structs.ServiceKindConnectProxy,
|
||||
Service: "web",
|
||||
Tags: []string{},
|
||||
Meta: map[string]string{metaExternalSource: "k8s"},
|
||||
Port: 1234,
|
||||
Proxy: structs.ConnectProxyConfig{
|
||||
DestinationServiceName: "api",
|
||||
},
|
||||
},
|
||||
Checks: []*structs.HealthCheck{
|
||||
&structs.HealthCheck{
|
||||
Node: "bar",
|
||||
Name: "web svc check",
|
||||
Status: api.HealthCritical,
|
||||
ServiceName: "web",
|
||||
},
|
||||
},
|
||||
},
|
||||
&structs.NodeInfo{
|
||||
Node: "zip",
|
||||
Address: "127.0.0.3",
|
||||
Services: []*structs.NodeService{
|
||||
&structs.NodeService{
|
||||
Service: "cache",
|
||||
Tags: []string{},
|
||||
},
|
||||
// register zip node with service cache
|
||||
&structs.RegisterRequest{
|
||||
Datacenter: "dc1",
|
||||
Node: "zip",
|
||||
Address: "127.0.0.3",
|
||||
Service: &structs.NodeService{
|
||||
Service: "cache",
|
||||
Tags: []string{},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
summary := summarizeServices(dump)
|
||||
if len(summary) != 3 {
|
||||
t.Fatalf("bad: %v", summary)
|
||||
for _, args := range requests {
|
||||
var out struct{}
|
||||
require.NoError(t, a.RPC("Catalog.Register", args, &out))
|
||||
}
|
||||
|
||||
expectAPI := &ServiceSummary{
|
||||
Kind: structs.ServiceKindTypical,
|
||||
Name: "api",
|
||||
Tags: []string{"tag1", "tag2"},
|
||||
Nodes: []string{"foo"},
|
||||
ChecksPassing: 1,
|
||||
ChecksWarning: 1,
|
||||
ChecksCritical: 0,
|
||||
}
|
||||
if !reflect.DeepEqual(summary[0], expectAPI) {
|
||||
t.Fatalf("bad: %v", summary[0])
|
||||
}
|
||||
t.Run("No Filter", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
req, _ := http.NewRequest("GET", "/v1/internal/ui/services/dc1", nil)
|
||||
resp := httptest.NewRecorder()
|
||||
obj, err := a.srv.UIServices(resp, req)
|
||||
require.NoError(t, err)
|
||||
assertIndex(t, resp)
|
||||
|
||||
expectCache := &ServiceSummary{
|
||||
Kind: structs.ServiceKindTypical,
|
||||
Name: "cache",
|
||||
Tags: []string{},
|
||||
Nodes: []string{"zip"},
|
||||
ChecksPassing: 0,
|
||||
ChecksWarning: 0,
|
||||
ChecksCritical: 0,
|
||||
}
|
||||
if !reflect.DeepEqual(summary[1], expectCache) {
|
||||
t.Fatalf("bad: %v", summary[1])
|
||||
}
|
||||
// Should be 2 nodes, and all the empty lists should be non-nil
|
||||
summary := obj.([]*ServiceSummary)
|
||||
require.Len(t, summary, 4)
|
||||
|
||||
expectWeb := &ServiceSummary{
|
||||
Kind: structs.ServiceKindConnectProxy,
|
||||
Name: "web",
|
||||
Tags: []string{},
|
||||
Nodes: []string{"bar", "foo"},
|
||||
ChecksPassing: 2,
|
||||
ChecksWarning: 0,
|
||||
ChecksCritical: 1,
|
||||
ExternalSources: []string{"k8s"},
|
||||
}
|
||||
summary[2].externalSourceSet = nil
|
||||
if !reflect.DeepEqual(summary[2], expectWeb) {
|
||||
t.Fatalf("bad: %v", summary[2])
|
||||
}
|
||||
// internal accounting that users don't see can be blown away
|
||||
for _, sum := range summary {
|
||||
sum.externalSourceSet = nil
|
||||
}
|
||||
|
||||
expected := []*ServiceSummary{
|
||||
&ServiceSummary{
|
||||
Kind: structs.ServiceKindTypical,
|
||||
Name: "api",
|
||||
Tags: []string{"tag1", "tag2"},
|
||||
Nodes: []string{"foo"},
|
||||
ChecksPassing: 2,
|
||||
ChecksWarning: 1,
|
||||
ChecksCritical: 0,
|
||||
},
|
||||
&ServiceSummary{
|
||||
Kind: structs.ServiceKindTypical,
|
||||
Name: "cache",
|
||||
Tags: nil,
|
||||
Nodes: []string{"zip"},
|
||||
ChecksPassing: 0,
|
||||
ChecksWarning: 0,
|
||||
ChecksCritical: 0,
|
||||
},
|
||||
&ServiceSummary{
|
||||
Kind: structs.ServiceKindConnectProxy,
|
||||
Name: "web",
|
||||
Tags: nil,
|
||||
Nodes: []string{"bar", "foo"},
|
||||
ChecksPassing: 2,
|
||||
ChecksWarning: 1,
|
||||
ChecksCritical: 1,
|
||||
ExternalSources: []string{"k8s"},
|
||||
},
|
||||
&ServiceSummary{
|
||||
Kind: structs.ServiceKindTypical,
|
||||
Name: "consul",
|
||||
Tags: nil,
|
||||
Nodes: []string{a.Config.NodeName},
|
||||
ChecksPassing: 1,
|
||||
ChecksWarning: 0,
|
||||
ChecksCritical: 0,
|
||||
},
|
||||
}
|
||||
require.ElementsMatch(t, expected, summary)
|
||||
})
|
||||
|
||||
t.Run("Filtered", func(t *testing.T) {
|
||||
filterQuery := url.QueryEscape("Service.Service == web or Service.Service == api")
|
||||
req, _ := http.NewRequest("GET", "/v1/internal/ui/services?filter="+filterQuery, nil)
|
||||
resp := httptest.NewRecorder()
|
||||
obj, err := a.srv.UIServices(resp, req)
|
||||
require.NoError(t, err)
|
||||
assertIndex(t, resp)
|
||||
|
||||
// Should be 2 nodes, and all the empty lists should be non-nil
|
||||
summary := obj.([]*ServiceSummary)
|
||||
require.Len(t, summary, 2)
|
||||
|
||||
// internal accounting that users don't see can be blown away
|
||||
for _, sum := range summary {
|
||||
sum.externalSourceSet = nil
|
||||
}
|
||||
|
||||
expected := []*ServiceSummary{
|
||||
&ServiceSummary{
|
||||
Kind: structs.ServiceKindTypical,
|
||||
Name: "api",
|
||||
Tags: []string{"tag1", "tag2"},
|
||||
Nodes: []string{"foo"},
|
||||
ChecksPassing: 2,
|
||||
ChecksWarning: 1,
|
||||
ChecksCritical: 0,
|
||||
},
|
||||
&ServiceSummary{
|
||||
Kind: structs.ServiceKindConnectProxy,
|
||||
Name: "web",
|
||||
Tags: nil,
|
||||
Nodes: []string{"bar", "foo"},
|
||||
ChecksPassing: 2,
|
||||
ChecksWarning: 1,
|
||||
ChecksCritical: 1,
|
||||
ExternalSources: []string{"k8s"},
|
||||
},
|
||||
}
|
||||
require.ElementsMatch(t, expected, summary)
|
||||
})
|
||||
}
|
||||
|
|
38
api/agent.go
38
api/agent.go
|
@ -84,11 +84,11 @@ type AgentService struct {
|
|||
Address string
|
||||
Weights AgentWeights
|
||||
EnableTagOverride bool
|
||||
CreateIndex uint64 `json:",omitempty"`
|
||||
ModifyIndex uint64 `json:",omitempty"`
|
||||
ContentHash string `json:",omitempty"`
|
||||
CreateIndex uint64 `json:",omitempty" bexpr:"-"`
|
||||
ModifyIndex uint64 `json:",omitempty" bexpr:"-"`
|
||||
ContentHash string `json:",omitempty" bexpr:"-"`
|
||||
// DEPRECATED (ProxyDestination) - remove this field
|
||||
ProxyDestination string `json:",omitempty"`
|
||||
ProxyDestination string `json:",omitempty" bexpr:"-"`
|
||||
Proxy *AgentServiceConnectProxyConfig `json:",omitempty"`
|
||||
Connect *AgentServiceConnect `json:",omitempty"`
|
||||
}
|
||||
|
@ -103,8 +103,8 @@ type AgentServiceChecksInfo struct {
|
|||
// AgentServiceConnect represents the Connect configuration of a service.
|
||||
type AgentServiceConnect struct {
|
||||
Native bool `json:",omitempty"`
|
||||
Proxy *AgentServiceConnectProxy `json:",omitempty"`
|
||||
SidecarService *AgentServiceRegistration `json:",omitempty"`
|
||||
Proxy *AgentServiceConnectProxy `json:",omitempty" bexpr:"-"`
|
||||
SidecarService *AgentServiceRegistration `json:",omitempty" bexpr:"-"`
|
||||
}
|
||||
|
||||
// AgentServiceConnectProxy represents the Connect Proxy configuration of a
|
||||
|
@ -112,7 +112,7 @@ type AgentServiceConnect struct {
|
|||
type AgentServiceConnectProxy struct {
|
||||
ExecMode ProxyExecMode `json:",omitempty"`
|
||||
Command []string `json:",omitempty"`
|
||||
Config map[string]interface{} `json:",omitempty"`
|
||||
Config map[string]interface{} `json:",omitempty" bexpr:"-"`
|
||||
Upstreams []Upstream `json:",omitempty"`
|
||||
}
|
||||
|
||||
|
@ -123,7 +123,7 @@ type AgentServiceConnectProxyConfig struct {
|
|||
DestinationServiceID string `json:",omitempty"`
|
||||
LocalServiceAddress string `json:",omitempty"`
|
||||
LocalServicePort int `json:",omitempty"`
|
||||
Config map[string]interface{} `json:",omitempty"`
|
||||
Config map[string]interface{} `json:",omitempty" bexpr:"-"`
|
||||
Upstreams []Upstream
|
||||
}
|
||||
|
||||
|
@ -278,9 +278,9 @@ type ConnectProxyConfig struct {
|
|||
ContentHash string
|
||||
// DEPRECATED(managed-proxies) - this struct is re-used for sidecar configs
|
||||
// but they don't need ExecMode or Command
|
||||
ExecMode ProxyExecMode `json:",omitempty"`
|
||||
Command []string `json:",omitempty"`
|
||||
Config map[string]interface{}
|
||||
ExecMode ProxyExecMode `json:",omitempty"`
|
||||
Command []string `json:",omitempty"`
|
||||
Config map[string]interface{} `bexpr:"-"`
|
||||
Upstreams []Upstream
|
||||
}
|
||||
|
||||
|
@ -292,7 +292,7 @@ type Upstream struct {
|
|||
Datacenter string `json:",omitempty"`
|
||||
LocalBindAddress string `json:",omitempty"`
|
||||
LocalBindPort int `json:",omitempty"`
|
||||
Config map[string]interface{} `json:",omitempty"`
|
||||
Config map[string]interface{} `json:",omitempty" bexpr:"-"`
|
||||
}
|
||||
|
||||
// Agent can be used to query the Agent endpoints
|
||||
|
@ -387,7 +387,14 @@ func (a *Agent) NodeName() (string, error) {
|
|||
|
||||
// Checks returns the locally registered checks
|
||||
func (a *Agent) Checks() (map[string]*AgentCheck, error) {
|
||||
return a.ChecksWithFilter("")
|
||||
}
|
||||
|
||||
// ChecksWithFilter returns a subset of the locally registered checks that match
|
||||
// the given filter expression
|
||||
func (a *Agent) ChecksWithFilter(filter string) (map[string]*AgentCheck, error) {
|
||||
r := a.c.newRequest("GET", "/v1/agent/checks")
|
||||
r.filterQuery(filter)
|
||||
_, resp, err := requireOK(a.c.doRequest(r))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -403,7 +410,14 @@ func (a *Agent) Checks() (map[string]*AgentCheck, error) {
|
|||
|
||||
// Services returns the locally registered services
|
||||
func (a *Agent) Services() (map[string]*AgentService, error) {
|
||||
return a.ServicesWithFilter("")
|
||||
}
|
||||
|
||||
// ServicesWithFilter returns a subset of the locally registered services that match
|
||||
// the given filter expression
|
||||
func (a *Agent) ServicesWithFilter(filter string) (map[string]*AgentService, error) {
|
||||
r := a.c.newRequest("GET", "/v1/agent/services")
|
||||
r.filterQuery(filter)
|
||||
_, resp, err := requireOK(a.c.doRequest(r))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
|
@ -231,6 +231,42 @@ func TestAPI_AgentServices(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestAPI_AgentServicesWithFilter(t *testing.T) {
|
||||
t.Parallel()
|
||||
c, s := makeClient(t)
|
||||
defer s.Stop()
|
||||
|
||||
agent := c.Agent()
|
||||
|
||||
reg := &AgentServiceRegistration{
|
||||
Name: "foo",
|
||||
ID: "foo",
|
||||
Tags: []string{"bar", "baz"},
|
||||
Port: 8000,
|
||||
Check: &AgentServiceCheck{
|
||||
TTL: "15s",
|
||||
},
|
||||
}
|
||||
require.NoError(t, agent.ServiceRegister(reg))
|
||||
|
||||
reg = &AgentServiceRegistration{
|
||||
Name: "foo",
|
||||
ID: "foo2",
|
||||
Tags: []string{"foo", "baz"},
|
||||
Port: 8001,
|
||||
Check: &AgentServiceCheck{
|
||||
TTL: "15s",
|
||||
},
|
||||
}
|
||||
require.NoError(t, agent.ServiceRegister(reg))
|
||||
|
||||
services, err := agent.ServicesWithFilter("foo in Tags")
|
||||
require.NoError(t, err)
|
||||
require.Len(t, services, 1)
|
||||
_, ok := services["foo2"]
|
||||
require.True(t, ok)
|
||||
}
|
||||
|
||||
func TestAPI_AgentServices_ManagedConnectProxy(t *testing.T) {
|
||||
t.Parallel()
|
||||
c, s := makeClient(t)
|
||||
|
@ -860,6 +896,31 @@ func TestAPI_AgentChecks(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestAPI_AgentChecksWithFilter(t *testing.T) {
|
||||
t.Parallel()
|
||||
c, s := makeClient(t)
|
||||
defer s.Stop()
|
||||
|
||||
agent := c.Agent()
|
||||
|
||||
reg := &AgentCheckRegistration{
|
||||
Name: "foo",
|
||||
}
|
||||
reg.TTL = "15s"
|
||||
require.NoError(t, agent.CheckRegister(reg))
|
||||
reg = &AgentCheckRegistration{
|
||||
Name: "bar",
|
||||
}
|
||||
reg.TTL = "15s"
|
||||
require.NoError(t, agent.CheckRegister(reg))
|
||||
|
||||
checks, err := agent.ChecksWithFilter("Name == foo")
|
||||
require.NoError(t, err)
|
||||
require.Len(t, checks, 1)
|
||||
_, ok := checks["foo"]
|
||||
require.True(t, ok)
|
||||
}
|
||||
|
||||
func TestAPI_AgentScriptCheck(t *testing.T) {
|
||||
t.Parallel()
|
||||
c, s := makeClientWithConfig(t, nil, func(c *testutil.TestServerConfig) {
|
||||
|
|
15
api/api.go
15
api/api.go
|
@ -146,6 +146,10 @@ type QueryOptions struct {
|
|||
// ctx is an optional context pass through to the underlying HTTP
|
||||
// request layer. Use Context() and WithContext() to manage this.
|
||||
ctx context.Context
|
||||
|
||||
// Filter requests filtering data prior to it being returned. The string
|
||||
// is a go-bexpr compatible expression.
|
||||
Filter string
|
||||
}
|
||||
|
||||
func (o *QueryOptions) Context() context.Context {
|
||||
|
@ -614,6 +618,9 @@ func (r *request) setQueryOptions(q *QueryOptions) {
|
|||
if q.Near != "" {
|
||||
r.params.Set("near", q.Near)
|
||||
}
|
||||
if q.Filter != "" {
|
||||
r.params.Set("filter", q.Filter)
|
||||
}
|
||||
if len(q.NodeMeta) > 0 {
|
||||
for key, value := range q.NodeMeta {
|
||||
r.params.Add("node-meta", key+":"+value)
|
||||
|
@ -897,3 +904,11 @@ func requireOK(d time.Duration, resp *http.Response, e error) (time.Duration, *h
|
|||
}
|
||||
return d, resp, nil
|
||||
}
|
||||
|
||||
func (req *request) filterQuery(filter string) {
|
||||
if filter == "" {
|
||||
return
|
||||
}
|
||||
|
||||
req.params.Set("filter", filter)
|
||||
}
|
||||
|
|
298
api/api_test.go
298
api/api_test.go
|
@ -83,6 +83,304 @@ func testKey() string {
|
|||
buf[10:16])
|
||||
}
|
||||
|
||||
func testNodeServiceCheckRegistrations(t *testing.T, client *Client, datacenter string) {
|
||||
t.Helper()
|
||||
|
||||
registrations := map[string]*CatalogRegistration{
|
||||
"Node foo": &CatalogRegistration{
|
||||
Datacenter: datacenter,
|
||||
Node: "foo",
|
||||
ID: "e0155642-135d-4739-9853-a1ee6c9f945b",
|
||||
Address: "127.0.0.2",
|
||||
TaggedAddresses: map[string]string{
|
||||
"lan": "127.0.0.2",
|
||||
"wan": "198.18.0.2",
|
||||
},
|
||||
NodeMeta: map[string]string{
|
||||
"env": "production",
|
||||
"os": "linux",
|
||||
},
|
||||
Checks: HealthChecks{
|
||||
&HealthCheck{
|
||||
Node: "foo",
|
||||
CheckID: "foo:alive",
|
||||
Name: "foo-liveness",
|
||||
Status: HealthPassing,
|
||||
Notes: "foo is alive and well",
|
||||
},
|
||||
&HealthCheck{
|
||||
Node: "foo",
|
||||
CheckID: "foo:ssh",
|
||||
Name: "foo-remote-ssh",
|
||||
Status: HealthPassing,
|
||||
Notes: "foo has ssh access",
|
||||
},
|
||||
},
|
||||
},
|
||||
"Service redis v1 on foo": &CatalogRegistration{
|
||||
Datacenter: datacenter,
|
||||
Node: "foo",
|
||||
SkipNodeUpdate: true,
|
||||
Service: &AgentService{
|
||||
Kind: ServiceKindTypical,
|
||||
ID: "redisV1",
|
||||
Service: "redis",
|
||||
Tags: []string{"v1"},
|
||||
Meta: map[string]string{"version": "1"},
|
||||
Port: 1234,
|
||||
Address: "198.18.1.2",
|
||||
},
|
||||
Checks: HealthChecks{
|
||||
&HealthCheck{
|
||||
Node: "foo",
|
||||
CheckID: "foo:redisV1",
|
||||
Name: "redis-liveness",
|
||||
Status: HealthPassing,
|
||||
Notes: "redis v1 is alive and well",
|
||||
ServiceID: "redisV1",
|
||||
ServiceName: "redis",
|
||||
},
|
||||
},
|
||||
},
|
||||
"Service redis v2 on foo": &CatalogRegistration{
|
||||
Datacenter: datacenter,
|
||||
Node: "foo",
|
||||
SkipNodeUpdate: true,
|
||||
Service: &AgentService{
|
||||
Kind: ServiceKindTypical,
|
||||
ID: "redisV2",
|
||||
Service: "redis",
|
||||
Tags: []string{"v2"},
|
||||
Meta: map[string]string{"version": "2"},
|
||||
Port: 1235,
|
||||
Address: "198.18.1.2",
|
||||
},
|
||||
Checks: HealthChecks{
|
||||
&HealthCheck{
|
||||
Node: "foo",
|
||||
CheckID: "foo:redisV2",
|
||||
Name: "redis-v2-liveness",
|
||||
Status: HealthPassing,
|
||||
Notes: "redis v2 is alive and well",
|
||||
ServiceID: "redisV2",
|
||||
ServiceName: "redis",
|
||||
},
|
||||
},
|
||||
},
|
||||
"Node bar": &CatalogRegistration{
|
||||
Datacenter: datacenter,
|
||||
Node: "bar",
|
||||
ID: "c6e7a976-8f4f-44b5-bdd3-631be7e8ecac",
|
||||
Address: "127.0.0.3",
|
||||
TaggedAddresses: map[string]string{
|
||||
"lan": "127.0.0.3",
|
||||
"wan": "198.18.0.3",
|
||||
},
|
||||
NodeMeta: map[string]string{
|
||||
"env": "production",
|
||||
"os": "windows",
|
||||
},
|
||||
Checks: HealthChecks{
|
||||
&HealthCheck{
|
||||
Node: "bar",
|
||||
CheckID: "bar:alive",
|
||||
Name: "bar-liveness",
|
||||
Status: HealthPassing,
|
||||
Notes: "bar is alive and well",
|
||||
},
|
||||
},
|
||||
},
|
||||
"Service redis v1 on bar": &CatalogRegistration{
|
||||
Datacenter: datacenter,
|
||||
Node: "bar",
|
||||
SkipNodeUpdate: true,
|
||||
Service: &AgentService{
|
||||
Kind: ServiceKindTypical,
|
||||
ID: "redisV1",
|
||||
Service: "redis",
|
||||
Tags: []string{"v1"},
|
||||
Meta: map[string]string{"version": "1"},
|
||||
Port: 1234,
|
||||
Address: "198.18.1.3",
|
||||
},
|
||||
Checks: HealthChecks{
|
||||
&HealthCheck{
|
||||
Node: "bar",
|
||||
CheckID: "bar:redisV1",
|
||||
Name: "redis-liveness",
|
||||
Status: HealthPassing,
|
||||
Notes: "redis v1 is alive and well",
|
||||
ServiceID: "redisV1",
|
||||
ServiceName: "redis",
|
||||
},
|
||||
},
|
||||
},
|
||||
"Service web v1 on bar": &CatalogRegistration{
|
||||
Datacenter: datacenter,
|
||||
Node: "bar",
|
||||
SkipNodeUpdate: true,
|
||||
Service: &AgentService{
|
||||
Kind: ServiceKindTypical,
|
||||
ID: "webV1",
|
||||
Service: "web",
|
||||
Tags: []string{"v1", "connect"},
|
||||
Meta: map[string]string{"version": "1", "connect": "enabled"},
|
||||
Port: 443,
|
||||
Address: "198.18.1.4",
|
||||
Connect: &AgentServiceConnect{Native: true},
|
||||
},
|
||||
Checks: HealthChecks{
|
||||
&HealthCheck{
|
||||
Node: "bar",
|
||||
CheckID: "bar:web:v1",
|
||||
Name: "web-v1-liveness",
|
||||
Status: HealthPassing,
|
||||
Notes: "web connect v1 is alive and well",
|
||||
ServiceID: "webV1",
|
||||
ServiceName: "web",
|
||||
},
|
||||
},
|
||||
},
|
||||
"Node baz": &CatalogRegistration{
|
||||
Datacenter: datacenter,
|
||||
Node: "baz",
|
||||
ID: "12f96b27-a7b0-47bd-add7-044a2bfc7bfb",
|
||||
Address: "127.0.0.4",
|
||||
TaggedAddresses: map[string]string{
|
||||
"lan": "127.0.0.4",
|
||||
},
|
||||
NodeMeta: map[string]string{
|
||||
"env": "qa",
|
||||
"os": "linux",
|
||||
},
|
||||
Checks: HealthChecks{
|
||||
&HealthCheck{
|
||||
Node: "baz",
|
||||
CheckID: "baz:alive",
|
||||
Name: "baz-liveness",
|
||||
Status: HealthPassing,
|
||||
Notes: "baz is alive and well",
|
||||
},
|
||||
&HealthCheck{
|
||||
Node: "baz",
|
||||
CheckID: "baz:ssh",
|
||||
Name: "baz-remote-ssh",
|
||||
Status: HealthPassing,
|
||||
Notes: "baz has ssh access",
|
||||
},
|
||||
},
|
||||
},
|
||||
"Service web v1 on baz": &CatalogRegistration{
|
||||
Datacenter: datacenter,
|
||||
Node: "baz",
|
||||
SkipNodeUpdate: true,
|
||||
Service: &AgentService{
|
||||
Kind: ServiceKindTypical,
|
||||
ID: "webV1",
|
||||
Service: "web",
|
||||
Tags: []string{"v1", "connect"},
|
||||
Meta: map[string]string{"version": "1", "connect": "enabled"},
|
||||
Port: 443,
|
||||
Address: "198.18.1.4",
|
||||
Connect: &AgentServiceConnect{Native: true},
|
||||
},
|
||||
Checks: HealthChecks{
|
||||
&HealthCheck{
|
||||
Node: "baz",
|
||||
CheckID: "baz:web:v1",
|
||||
Name: "web-v1-liveness",
|
||||
Status: HealthPassing,
|
||||
Notes: "web connect v1 is alive and well",
|
||||
ServiceID: "webV1",
|
||||
ServiceName: "web",
|
||||
},
|
||||
},
|
||||
},
|
||||
"Service web v2 on baz": &CatalogRegistration{
|
||||
Datacenter: datacenter,
|
||||
Node: "baz",
|
||||
SkipNodeUpdate: true,
|
||||
Service: &AgentService{
|
||||
Kind: ServiceKindTypical,
|
||||
ID: "webV2",
|
||||
Service: "web",
|
||||
Tags: []string{"v2", "connect"},
|
||||
Meta: map[string]string{"version": "2", "connect": "enabled"},
|
||||
Port: 8443,
|
||||
Address: "198.18.1.4",
|
||||
Connect: &AgentServiceConnect{Native: true},
|
||||
},
|
||||
Checks: HealthChecks{
|
||||
&HealthCheck{
|
||||
Node: "baz",
|
||||
CheckID: "baz:web:v2",
|
||||
Name: "web-v2-liveness",
|
||||
Status: HealthPassing,
|
||||
Notes: "web connect v2 is alive and well",
|
||||
ServiceID: "webV2",
|
||||
ServiceName: "web",
|
||||
},
|
||||
},
|
||||
},
|
||||
"Service critical on baz": &CatalogRegistration{
|
||||
Datacenter: datacenter,
|
||||
Node: "baz",
|
||||
SkipNodeUpdate: true,
|
||||
Service: &AgentService{
|
||||
Kind: ServiceKindTypical,
|
||||
ID: "criticalV2",
|
||||
Service: "critical",
|
||||
Tags: []string{"v2"},
|
||||
Meta: map[string]string{"version": "2"},
|
||||
Port: 8080,
|
||||
Address: "198.18.1.4",
|
||||
},
|
||||
Checks: HealthChecks{
|
||||
&HealthCheck{
|
||||
Node: "baz",
|
||||
CheckID: "baz:critical:v2",
|
||||
Name: "critical-v2-liveness",
|
||||
Status: HealthCritical,
|
||||
Notes: "critical v2 is in the critical state",
|
||||
ServiceID: "criticalV2",
|
||||
ServiceName: "critical",
|
||||
},
|
||||
},
|
||||
},
|
||||
"Service warning on baz": &CatalogRegistration{
|
||||
Datacenter: datacenter,
|
||||
Node: "baz",
|
||||
SkipNodeUpdate: true,
|
||||
Service: &AgentService{
|
||||
Kind: ServiceKindTypical,
|
||||
ID: "warningV2",
|
||||
Service: "warning",
|
||||
Tags: []string{"v2"},
|
||||
Meta: map[string]string{"version": "2"},
|
||||
Port: 8081,
|
||||
Address: "198.18.1.4",
|
||||
},
|
||||
Checks: HealthChecks{
|
||||
&HealthCheck{
|
||||
Node: "baz",
|
||||
CheckID: "baz:warning:v2",
|
||||
Name: "warning-v2-liveness",
|
||||
Status: HealthWarning,
|
||||
Notes: "warning v2 is in the warning state",
|
||||
ServiceID: "warningV2",
|
||||
ServiceName: "warning",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
catalog := client.Catalog()
|
||||
for name, reg := range registrations {
|
||||
_, err := catalog.Register(reg, nil)
|
||||
require.NoError(t, err, "Failed catalog registration for %q: %v", name, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAPI_DefaultConfig_env(t *testing.T) {
|
||||
// t.Parallel() // DO NOT ENABLE !!!
|
||||
// do not enable t.Parallel for this test since it modifies global state
|
||||
|
|
|
@ -125,6 +125,36 @@ func TestAPI_CatalogNodes_MetaFilter(t *testing.T) {
|
|||
})
|
||||
}
|
||||
|
||||
func TestAPI_CatalogNodes_Filter(t *testing.T) {
|
||||
t.Parallel()
|
||||
c, s := makeClient(t)
|
||||
defer s.Stop()
|
||||
|
||||
// this sets up the catalog entries with things we can filter on
|
||||
testNodeServiceCheckRegistrations(t, c, "dc1")
|
||||
|
||||
catalog := c.Catalog()
|
||||
nodes, _, err := catalog.Nodes(nil)
|
||||
require.NoError(t, err)
|
||||
// 3 nodes inserted by the setup func above plus the agent itself
|
||||
require.Len(t, nodes, 4)
|
||||
|
||||
// now filter down to just a couple nodes with a specific meta entry
|
||||
nodes, _, err = catalog.Nodes(&QueryOptions{Filter: "Meta.env == production"})
|
||||
require.NoError(t, err)
|
||||
require.Len(t, nodes, 2)
|
||||
|
||||
// filter out everything that isn't bar or baz
|
||||
nodes, _, err = catalog.Nodes(&QueryOptions{Filter: "Node == bar or Node == baz"})
|
||||
require.NoError(t, err)
|
||||
require.Len(t, nodes, 2)
|
||||
|
||||
// check for non-existent ip for the node addr
|
||||
nodes, _, err = catalog.Nodes(&QueryOptions{Filter: "Address == `10.0.0.1`"})
|
||||
require.NoError(t, err)
|
||||
require.Empty(t, nodes)
|
||||
}
|
||||
|
||||
func TestAPI_CatalogServices(t *testing.T) {
|
||||
t.Parallel()
|
||||
c, s := makeClient(t)
|
||||
|
@ -399,6 +429,39 @@ func TestAPI_CatalogService_NodeMetaFilter(t *testing.T) {
|
|||
})
|
||||
}
|
||||
|
||||
func TestAPI_CatalogService_Filter(t *testing.T) {
|
||||
t.Parallel()
|
||||
c, s := makeClient(t)
|
||||
defer s.Stop()
|
||||
|
||||
// this sets up the catalog entries with things we can filter on
|
||||
testNodeServiceCheckRegistrations(t, c, "dc1")
|
||||
|
||||
catalog := c.Catalog()
|
||||
|
||||
services, _, err := catalog.Service("redis", "", &QueryOptions{Filter: "ServiceMeta.version == 1"})
|
||||
require.NoError(t, err)
|
||||
// finds it on both foo and bar nodes
|
||||
require.Len(t, services, 2)
|
||||
|
||||
require.Condition(t, func() bool {
|
||||
return (services[0].Node == "foo" && services[1].Node == "bar") ||
|
||||
(services[0].Node == "bar" && services[1].Node == "foo")
|
||||
})
|
||||
|
||||
services, _, err = catalog.Service("redis", "", &QueryOptions{Filter: "NodeMeta.os != windows"})
|
||||
require.NoError(t, err)
|
||||
// finds both service instances on foo
|
||||
require.Len(t, services, 2)
|
||||
require.Equal(t, "foo", services[0].Node)
|
||||
require.Equal(t, "foo", services[1].Node)
|
||||
|
||||
services, _, err = catalog.Service("redis", "", &QueryOptions{Filter: "Address == `10.0.0.1`"})
|
||||
require.NoError(t, err)
|
||||
require.Empty(t, services)
|
||||
|
||||
}
|
||||
|
||||
func testUpstreams(t *testing.T) []Upstream {
|
||||
return []Upstream{
|
||||
{
|
||||
|
@ -595,6 +658,30 @@ func TestAPI_CatalogConnectNative(t *testing.T) {
|
|||
})
|
||||
}
|
||||
|
||||
func TestAPI_CatalogConnect_Filter(t *testing.T) {
|
||||
t.Parallel()
|
||||
c, s := makeClient(t)
|
||||
defer s.Stop()
|
||||
|
||||
// this sets up the catalog entries with things we can filter on
|
||||
testNodeServiceCheckRegistrations(t, c, "dc1")
|
||||
|
||||
catalog := c.Catalog()
|
||||
|
||||
services, _, err := catalog.Connect("web", "", &QueryOptions{Filter: "ServicePort == 443"})
|
||||
require.NoError(t, err)
|
||||
require.Len(t, services, 2)
|
||||
require.Condition(t, func() bool {
|
||||
return (services[0].Node == "bar" && services[1].Node == "baz") ||
|
||||
(services[0].Node == "baz" && services[1].Node == "bar")
|
||||
})
|
||||
|
||||
// All the web-connect services are native
|
||||
services, _, err = catalog.Connect("web", "", &QueryOptions{Filter: "ServiceConnect.Native != true"})
|
||||
require.NoError(t, err)
|
||||
require.Empty(t, services)
|
||||
}
|
||||
|
||||
func TestAPI_CatalogNode(t *testing.T) {
|
||||
t.Parallel()
|
||||
c, s := makeClient(t)
|
||||
|
@ -646,6 +733,29 @@ func TestAPI_CatalogNode(t *testing.T) {
|
|||
})
|
||||
}
|
||||
|
||||
func TestAPI_CatalogNode_Filter(t *testing.T) {
|
||||
t.Parallel()
|
||||
c, s := makeClient(t)
|
||||
defer s.Stop()
|
||||
|
||||
// this sets up the catalog entries with things we can filter on
|
||||
testNodeServiceCheckRegistrations(t, c, "dc1")
|
||||
|
||||
catalog := c.Catalog()
|
||||
|
||||
// should have only 1 matching service
|
||||
info, _, err := catalog.Node("bar", &QueryOptions{Filter: "connect in Tags"})
|
||||
require.NoError(t, err)
|
||||
require.Len(t, info.Services, 1)
|
||||
require.Contains(t, info.Services, "webV1")
|
||||
require.Equal(t, "web", info.Services["webV1"].Service)
|
||||
|
||||
// should get two services for the node
|
||||
info, _, err = catalog.Node("baz", &QueryOptions{Filter: "connect in Tags"})
|
||||
require.NoError(t, err)
|
||||
require.Len(t, info.Services, 2)
|
||||
}
|
||||
|
||||
func TestAPI_CatalogRegistration(t *testing.T) {
|
||||
t.Parallel()
|
||||
c, s := makeClient(t)
|
||||
|
|
|
@ -36,6 +36,27 @@ func TestAPI_HealthNode(t *testing.T) {
|
|||
})
|
||||
}
|
||||
|
||||
func TestAPI_HealthNode_Filter(t *testing.T) {
|
||||
t.Parallel()
|
||||
c, s := makeClient(t)
|
||||
defer s.Stop()
|
||||
|
||||
// this sets up the catalog entries with things we can filter on
|
||||
testNodeServiceCheckRegistrations(t, c, "dc1")
|
||||
|
||||
health := c.Health()
|
||||
|
||||
// filter for just the redis service checks
|
||||
checks, _, err := health.Node("foo", &QueryOptions{Filter: "ServiceName == redis"})
|
||||
require.NoError(t, err)
|
||||
require.Len(t, checks, 2)
|
||||
|
||||
// filter out service checks
|
||||
checks, _, err = health.Node("foo", &QueryOptions{Filter: "ServiceID == ``"})
|
||||
require.NoError(t, err)
|
||||
require.Len(t, checks, 2)
|
||||
}
|
||||
|
||||
func TestAPI_HealthChecks_AggregatedStatus(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
|
@ -257,6 +278,32 @@ func TestAPI_HealthChecks_NodeMetaFilter(t *testing.T) {
|
|||
})
|
||||
}
|
||||
|
||||
func TestAPI_HealthChecks_Filter(t *testing.T) {
|
||||
t.Parallel()
|
||||
c, s := makeClient(t)
|
||||
defer s.Stop()
|
||||
|
||||
// this sets up the catalog entries with things we can filter on
|
||||
testNodeServiceCheckRegistrations(t, c, "dc1")
|
||||
|
||||
health := c.Health()
|
||||
|
||||
checks, _, err := health.Checks("redis", &QueryOptions{Filter: "Node == foo"})
|
||||
require.NoError(t, err)
|
||||
// 1 service check for each instance
|
||||
require.Len(t, checks, 2)
|
||||
|
||||
checks, _, err = health.Checks("redis", &QueryOptions{Filter: "Node == bar"})
|
||||
require.NoError(t, err)
|
||||
// 1 service check for each instance
|
||||
require.Len(t, checks, 1)
|
||||
|
||||
checks, _, err = health.Checks("redis", &QueryOptions{Filter: "Node == foo and v1 in ServiceTags"})
|
||||
require.NoError(t, err)
|
||||
// 1 service check for the matching instance
|
||||
require.Len(t, checks, 1)
|
||||
}
|
||||
|
||||
func TestAPI_HealthService(t *testing.T) {
|
||||
t.Parallel()
|
||||
c, s := makeClient(t)
|
||||
|
@ -386,6 +433,31 @@ func TestAPI_HealthService_NodeMetaFilter(t *testing.T) {
|
|||
})
|
||||
}
|
||||
|
||||
func TestAPI_HealthService_Filter(t *testing.T) {
|
||||
t.Parallel()
|
||||
c, s := makeClient(t)
|
||||
defer s.Stop()
|
||||
|
||||
// this sets up the catalog entries with things we can filter on
|
||||
testNodeServiceCheckRegistrations(t, c, "dc1")
|
||||
|
||||
health := c.Health()
|
||||
|
||||
services, _, err := health.Service("redis", "", false, &QueryOptions{Filter: "Service.Meta.version == 2"})
|
||||
require.NoError(t, err)
|
||||
require.Len(t, services, 1)
|
||||
|
||||
services, _, err = health.Service("web", "", false, &QueryOptions{Filter: "Node.Meta.os == linux"})
|
||||
require.NoError(t, err)
|
||||
require.Len(t, services, 2)
|
||||
require.Equal(t, "baz", services[0].Node.Node)
|
||||
require.Equal(t, "baz", services[1].Node.Node)
|
||||
|
||||
services, _, err = health.Service("web", "", false, &QueryOptions{Filter: "Node.Meta.os == linux and Service.Meta.version == 1"})
|
||||
require.NoError(t, err)
|
||||
require.Len(t, services, 1)
|
||||
}
|
||||
|
||||
func TestAPI_HealthConnect(t *testing.T) {
|
||||
t.Parallel()
|
||||
c, s := makeClient(t)
|
||||
|
@ -440,6 +512,27 @@ func TestAPI_HealthConnect(t *testing.T) {
|
|||
})
|
||||
}
|
||||
|
||||
func TestAPI_HealthConnect_Filter(t *testing.T) {
|
||||
t.Parallel()
|
||||
c, s := makeClient(t)
|
||||
defer s.Stop()
|
||||
|
||||
// this sets up the catalog entries with things we can filter on
|
||||
testNodeServiceCheckRegistrations(t, c, "dc1")
|
||||
|
||||
health := c.Health()
|
||||
|
||||
services, _, err := health.Connect("web", "", false, &QueryOptions{Filter: "Node.Meta.os == linux"})
|
||||
require.NoError(t, err)
|
||||
require.Len(t, services, 2)
|
||||
require.Equal(t, "baz", services[0].Node.Node)
|
||||
require.Equal(t, "baz", services[1].Node.Node)
|
||||
|
||||
services, _, err = health.Service("web", "", false, &QueryOptions{Filter: "Node.Meta.os == linux and Service.Meta.version == 1"})
|
||||
require.NoError(t, err)
|
||||
require.Len(t, services, 1)
|
||||
}
|
||||
|
||||
func TestAPI_HealthState(t *testing.T) {
|
||||
t.Parallel()
|
||||
c, s := makeClient(t)
|
||||
|
@ -482,3 +575,30 @@ func TestAPI_HealthState_NodeMetaFilter(t *testing.T) {
|
|||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestAPI_HealthState_Filter(t *testing.T) {
|
||||
t.Parallel()
|
||||
c, s := makeClient(t)
|
||||
defer s.Stop()
|
||||
|
||||
// this sets up the catalog entries with things we can filter on
|
||||
testNodeServiceCheckRegistrations(t, c, "dc1")
|
||||
|
||||
health := c.Health()
|
||||
|
||||
checks, _, err := health.State(HealthAny, &QueryOptions{Filter: "Node == baz"})
|
||||
require.NoError(t, err)
|
||||
require.Len(t, checks, 6)
|
||||
|
||||
checks, _, err = health.State(HealthAny, &QueryOptions{Filter: "Status == warning or Status == critical"})
|
||||
require.NoError(t, err)
|
||||
require.Len(t, checks, 2)
|
||||
|
||||
checks, _, err = health.State(HealthCritical, &QueryOptions{Filter: "Node == baz"})
|
||||
require.NoError(t, err)
|
||||
require.Len(t, checks, 1)
|
||||
|
||||
checks, _, err = health.State(HealthWarning, &QueryOptions{Filter: "Node == baz"})
|
||||
require.NoError(t, err)
|
||||
require.Len(t, checks, 1)
|
||||
}
|
||||
|
|
|
@ -3,11 +3,13 @@ package nodes
|
|||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/hashicorp/consul/api"
|
||||
"github.com/hashicorp/consul/command/flags"
|
||||
"github.com/hashicorp/consul/command/helpers"
|
||||
"github.com/mitchellh/cli"
|
||||
"github.com/ryanuber/columnize"
|
||||
)
|
||||
|
@ -29,11 +31,15 @@ type cmd struct {
|
|||
near string
|
||||
nodeMeta map[string]string
|
||||
service string
|
||||
filter string
|
||||
|
||||
testStdin io.Reader
|
||||
}
|
||||
|
||||
// init sets up command flags and help text
|
||||
func (c *cmd) init() {
|
||||
c.flags = flag.NewFlagSet("", flag.ContinueOnError)
|
||||
c.flags.StringVar(&c.filter, "filter", "", "Filter to use with the request")
|
||||
c.flags.BoolVar(&c.detailed, "detailed", false, "Output detailed information about "+
|
||||
"the nodes including their addresses and metadata.")
|
||||
c.flags.StringVar(&c.near, "near", "", "Node name to sort the node list in ascending "+
|
||||
|
@ -68,11 +74,21 @@ func (c *cmd) Run(args []string) int {
|
|||
return 1
|
||||
}
|
||||
|
||||
if c.filter != "" {
|
||||
data, err := helpers.LoadDataSource(c.filter, c.testStdin)
|
||||
if err != nil {
|
||||
c.UI.Error(fmt.Sprintf("Could not process filter argument: %v", err))
|
||||
return 1
|
||||
}
|
||||
c.filter = data
|
||||
}
|
||||
|
||||
var nodes []*api.Node
|
||||
if c.service != "" {
|
||||
services, _, err := client.Catalog().Service(c.service, "", &api.QueryOptions{
|
||||
Near: c.near,
|
||||
NodeMeta: c.nodeMeta,
|
||||
Filter: c.filter,
|
||||
})
|
||||
if err != nil {
|
||||
c.UI.Error(fmt.Sprintf("Error listing nodes for service: %s", err))
|
||||
|
@ -96,6 +112,7 @@ func (c *cmd) Run(args []string) int {
|
|||
nodes, _, err = client.Catalog().Nodes(&api.QueryOptions{
|
||||
Near: c.near,
|
||||
NodeMeta: c.nodeMeta,
|
||||
Filter: c.filter,
|
||||
})
|
||||
if err != nil {
|
||||
c.UI.Error(fmt.Sprintf("Error listing nodes: %s", err))
|
||||
|
|
|
@ -95,6 +95,23 @@ func TestCatalogListNodesCommand(t *testing.T) {
|
|||
}
|
||||
})
|
||||
|
||||
t.Run("filter", func(t *testing.T) {
|
||||
ui := cli.NewMockUi()
|
||||
c := New(ui)
|
||||
args := []string{
|
||||
"-http-addr=" + a.HTTPAddr(),
|
||||
"-filter", "Meta.foo == bar",
|
||||
}
|
||||
code := c.Run(args)
|
||||
if code != 0 {
|
||||
t.Fatalf("bad exit code %d: %s", code, ui.ErrorWriter.String())
|
||||
}
|
||||
output := ui.ErrorWriter.String()
|
||||
if expected := "No nodes match the given query"; !strings.Contains(output, expected) {
|
||||
t.Errorf("expected %q to contain %q", output, expected)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("near", func(t *testing.T) {
|
||||
ui := cli.NewMockUi()
|
||||
c := New(ui)
|
||||
|
|
2
go.mod
2
go.mod
|
@ -53,6 +53,7 @@ require (
|
|||
github.com/gotestyourself/gotestyourself v2.2.0+incompatible // indirect
|
||||
github.com/hashicorp/consul/api v1.0.1
|
||||
github.com/hashicorp/consul/sdk v0.1.0
|
||||
github.com/hashicorp/go-bexpr v0.1.0
|
||||
github.com/hashicorp/go-checkpoint v0.0.0-20171009173528-1545e56e46de
|
||||
github.com/hashicorp/go-cleanhttp v0.5.1
|
||||
github.com/hashicorp/go-discover v0.0.0-20190403160810-22221edb15cd
|
||||
|
@ -111,6 +112,7 @@ require (
|
|||
github.com/shirou/gopsutil v0.0.0-20181107111621-48177ef5f880
|
||||
github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4 // indirect
|
||||
github.com/spf13/pflag v1.0.3 // indirect
|
||||
github.com/stretchr/objx v0.1.1 // indirect
|
||||
github.com/stretchr/testify v1.3.0
|
||||
golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3
|
||||
golang.org/x/net v0.0.0-20181201002055-351d144fa1fc
|
||||
|
|
4
go.sum
4
go.sum
|
@ -136,6 +136,8 @@ github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed h1:5upAirOpQc
|
|||
github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4=
|
||||
github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA=
|
||||
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||
github.com/hashicorp/go-bexpr v0.1.0 h1:hA/9CWGPsQ6YZXvPvizD+VEEjBG4V6Un0Qcyav5ghK4=
|
||||
github.com/hashicorp/go-bexpr v0.1.0/go.mod h1:ANbpTX1oAql27TZkKVeW8p1w8NTdnyzPe/0qqPCKohU=
|
||||
github.com/hashicorp/go-checkpoint v0.0.0-20171009173528-1545e56e46de h1:XDCSythtg8aWSRSO29uwhgh7b127fWr+m5SemqjSUL8=
|
||||
github.com/hashicorp/go-checkpoint v0.0.0-20171009173528-1545e56e46de/go.mod h1:xIwEieBHERyEvaeKF/TcHh1Hu+lxPM+n2vT1+g9I4m4=
|
||||
github.com/hashicorp/go-cleanhttp v0.5.1 h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVoDkXMzJM=
|
||||
|
@ -322,6 +324,8 @@ github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg=
|
|||
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/stretchr/objx v0.1.0 h1:4G4v2dO3VZwixGIRoQ5Lfboy6nUhCyYzaqnIAPPhYs4=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
|
||||
|
|
|
@ -84,11 +84,11 @@ type AgentService struct {
|
|||
Address string
|
||||
Weights AgentWeights
|
||||
EnableTagOverride bool
|
||||
CreateIndex uint64 `json:",omitempty"`
|
||||
ModifyIndex uint64 `json:",omitempty"`
|
||||
ContentHash string `json:",omitempty"`
|
||||
CreateIndex uint64 `json:",omitempty" bexpr:"-"`
|
||||
ModifyIndex uint64 `json:",omitempty" bexpr:"-"`
|
||||
ContentHash string `json:",omitempty" bexpr:"-"`
|
||||
// DEPRECATED (ProxyDestination) - remove this field
|
||||
ProxyDestination string `json:",omitempty"`
|
||||
ProxyDestination string `json:",omitempty" bexpr:"-"`
|
||||
Proxy *AgentServiceConnectProxyConfig `json:",omitempty"`
|
||||
Connect *AgentServiceConnect `json:",omitempty"`
|
||||
}
|
||||
|
@ -103,8 +103,8 @@ type AgentServiceChecksInfo struct {
|
|||
// AgentServiceConnect represents the Connect configuration of a service.
|
||||
type AgentServiceConnect struct {
|
||||
Native bool `json:",omitempty"`
|
||||
Proxy *AgentServiceConnectProxy `json:",omitempty"`
|
||||
SidecarService *AgentServiceRegistration `json:",omitempty"`
|
||||
Proxy *AgentServiceConnectProxy `json:",omitempty" bexpr:"-"`
|
||||
SidecarService *AgentServiceRegistration `json:",omitempty" bexpr:"-"`
|
||||
}
|
||||
|
||||
// AgentServiceConnectProxy represents the Connect Proxy configuration of a
|
||||
|
@ -112,7 +112,7 @@ type AgentServiceConnect struct {
|
|||
type AgentServiceConnectProxy struct {
|
||||
ExecMode ProxyExecMode `json:",omitempty"`
|
||||
Command []string `json:",omitempty"`
|
||||
Config map[string]interface{} `json:",omitempty"`
|
||||
Config map[string]interface{} `json:",omitempty" bexpr:"-"`
|
||||
Upstreams []Upstream `json:",omitempty"`
|
||||
}
|
||||
|
||||
|
@ -123,7 +123,7 @@ type AgentServiceConnectProxyConfig struct {
|
|||
DestinationServiceID string `json:",omitempty"`
|
||||
LocalServiceAddress string `json:",omitempty"`
|
||||
LocalServicePort int `json:",omitempty"`
|
||||
Config map[string]interface{} `json:",omitempty"`
|
||||
Config map[string]interface{} `json:",omitempty" bexpr:"-"`
|
||||
Upstreams []Upstream
|
||||
}
|
||||
|
||||
|
@ -278,9 +278,9 @@ type ConnectProxyConfig struct {
|
|||
ContentHash string
|
||||
// DEPRECATED(managed-proxies) - this struct is re-used for sidecar configs
|
||||
// but they don't need ExecMode or Command
|
||||
ExecMode ProxyExecMode `json:",omitempty"`
|
||||
Command []string `json:",omitempty"`
|
||||
Config map[string]interface{}
|
||||
ExecMode ProxyExecMode `json:",omitempty"`
|
||||
Command []string `json:",omitempty"`
|
||||
Config map[string]interface{} `bexpr:"-"`
|
||||
Upstreams []Upstream
|
||||
}
|
||||
|
||||
|
@ -292,7 +292,7 @@ type Upstream struct {
|
|||
Datacenter string `json:",omitempty"`
|
||||
LocalBindAddress string `json:",omitempty"`
|
||||
LocalBindPort int `json:",omitempty"`
|
||||
Config map[string]interface{} `json:",omitempty"`
|
||||
Config map[string]interface{} `json:",omitempty" bexpr:"-"`
|
||||
}
|
||||
|
||||
// Agent can be used to query the Agent endpoints
|
||||
|
@ -387,7 +387,14 @@ func (a *Agent) NodeName() (string, error) {
|
|||
|
||||
// Checks returns the locally registered checks
|
||||
func (a *Agent) Checks() (map[string]*AgentCheck, error) {
|
||||
return a.ChecksWithFilter("")
|
||||
}
|
||||
|
||||
// ChecksWithFilter returns a subset of the locally registered checks that match
|
||||
// the given filter expression
|
||||
func (a *Agent) ChecksWithFilter(filter string) (map[string]*AgentCheck, error) {
|
||||
r := a.c.newRequest("GET", "/v1/agent/checks")
|
||||
r.filterQuery(filter)
|
||||
_, resp, err := requireOK(a.c.doRequest(r))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -403,7 +410,14 @@ func (a *Agent) Checks() (map[string]*AgentCheck, error) {
|
|||
|
||||
// Services returns the locally registered services
|
||||
func (a *Agent) Services() (map[string]*AgentService, error) {
|
||||
return a.ServicesWithFilter("")
|
||||
}
|
||||
|
||||
// ServicesWithFilter returns a subset of the locally registered services that match
|
||||
// the given filter expression
|
||||
func (a *Agent) ServicesWithFilter(filter string) (map[string]*AgentService, error) {
|
||||
r := a.c.newRequest("GET", "/v1/agent/services")
|
||||
r.filterQuery(filter)
|
||||
_, resp, err := requireOK(a.c.doRequest(r))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
|
@ -146,6 +146,10 @@ type QueryOptions struct {
|
|||
// ctx is an optional context pass through to the underlying HTTP
|
||||
// request layer. Use Context() and WithContext() to manage this.
|
||||
ctx context.Context
|
||||
|
||||
// Filter requests filtering data prior to it being returned. The string
|
||||
// is a go-bexpr compatible expression.
|
||||
Filter string
|
||||
}
|
||||
|
||||
func (o *QueryOptions) Context() context.Context {
|
||||
|
@ -614,6 +618,9 @@ func (r *request) setQueryOptions(q *QueryOptions) {
|
|||
if q.Near != "" {
|
||||
r.params.Set("near", q.Near)
|
||||
}
|
||||
if q.Filter != "" {
|
||||
r.params.Set("filter", q.Filter)
|
||||
}
|
||||
if len(q.NodeMeta) > 0 {
|
||||
for key, value := range q.NodeMeta {
|
||||
r.params.Add("node-meta", key+":"+value)
|
||||
|
@ -897,3 +904,11 @@ func requireOK(d time.Duration, resp *http.Response, e error) (time.Duration, *h
|
|||
}
|
||||
return d, resp, nil
|
||||
}
|
||||
|
||||
func (req *request) filterQuery(filter string) {
|
||||
if filter == "" {
|
||||
return
|
||||
}
|
||||
|
||||
req.params.Set("filter", filter)
|
||||
}
|
||||
|
|
|
@ -0,0 +1,4 @@
|
|||
/expr-parse
|
||||
/expr-eval
|
||||
/filter
|
||||
/simple
|
|
@ -0,0 +1,373 @@
|
|||
Mozilla Public License Version 2.0
|
||||
==================================
|
||||
|
||||
1. Definitions
|
||||
--------------
|
||||
|
||||
1.1. "Contributor"
|
||||
means each individual or legal entity that creates, contributes to
|
||||
the creation of, or owns Covered Software.
|
||||
|
||||
1.2. "Contributor Version"
|
||||
means the combination of the Contributions of others (if any) used
|
||||
by a Contributor and that particular Contributor's Contribution.
|
||||
|
||||
1.3. "Contribution"
|
||||
means Covered Software of a particular Contributor.
|
||||
|
||||
1.4. "Covered Software"
|
||||
means Source Code Form to which the initial Contributor has attached
|
||||
the notice in Exhibit A, the Executable Form of such Source Code
|
||||
Form, and Modifications of such Source Code Form, in each case
|
||||
including portions thereof.
|
||||
|
||||
1.5. "Incompatible With Secondary Licenses"
|
||||
means
|
||||
|
||||
(a) that the initial Contributor has attached the notice described
|
||||
in Exhibit B to the Covered Software; or
|
||||
|
||||
(b) that the Covered Software was made available under the terms of
|
||||
version 1.1 or earlier of the License, but not also under the
|
||||
terms of a Secondary License.
|
||||
|
||||
1.6. "Executable Form"
|
||||
means any form of the work other than Source Code Form.
|
||||
|
||||
1.7. "Larger Work"
|
||||
means a work that combines Covered Software with other material, in
|
||||
a separate file or files, that is not Covered Software.
|
||||
|
||||
1.8. "License"
|
||||
means this document.
|
||||
|
||||
1.9. "Licensable"
|
||||
means having the right to grant, to the maximum extent possible,
|
||||
whether at the time of the initial grant or subsequently, any and
|
||||
all of the rights conveyed by this License.
|
||||
|
||||
1.10. "Modifications"
|
||||
means any of the following:
|
||||
|
||||
(a) any file in Source Code Form that results from an addition to,
|
||||
deletion from, or modification of the contents of Covered
|
||||
Software; or
|
||||
|
||||
(b) any new file in Source Code Form that contains any Covered
|
||||
Software.
|
||||
|
||||
1.11. "Patent Claims" of a Contributor
|
||||
means any patent claim(s), including without limitation, method,
|
||||
process, and apparatus claims, in any patent Licensable by such
|
||||
Contributor that would be infringed, but for the grant of the
|
||||
License, by the making, using, selling, offering for sale, having
|
||||
made, import, or transfer of either its Contributions or its
|
||||
Contributor Version.
|
||||
|
||||
1.12. "Secondary License"
|
||||
means either the GNU General Public License, Version 2.0, the GNU
|
||||
Lesser General Public License, Version 2.1, the GNU Affero General
|
||||
Public License, Version 3.0, or any later versions of those
|
||||
licenses.
|
||||
|
||||
1.13. "Source Code Form"
|
||||
means the form of the work preferred for making modifications.
|
||||
|
||||
1.14. "You" (or "Your")
|
||||
means an individual or a legal entity exercising rights under this
|
||||
License. For legal entities, "You" includes any entity that
|
||||
controls, is controlled by, or is under common control with You. For
|
||||
purposes of this definition, "control" means (a) the power, direct
|
||||
or indirect, to cause the direction or management of such entity,
|
||||
whether by contract or otherwise, or (b) ownership of more than
|
||||
fifty percent (50%) of the outstanding shares or beneficial
|
||||
ownership of such entity.
|
||||
|
||||
2. License Grants and Conditions
|
||||
--------------------------------
|
||||
|
||||
2.1. Grants
|
||||
|
||||
Each Contributor hereby grants You a world-wide, royalty-free,
|
||||
non-exclusive license:
|
||||
|
||||
(a) under intellectual property rights (other than patent or trademark)
|
||||
Licensable by such Contributor to use, reproduce, make available,
|
||||
modify, display, perform, distribute, and otherwise exploit its
|
||||
Contributions, either on an unmodified basis, with Modifications, or
|
||||
as part of a Larger Work; and
|
||||
|
||||
(b) under Patent Claims of such Contributor to make, use, sell, offer
|
||||
for sale, have made, import, and otherwise transfer either its
|
||||
Contributions or its Contributor Version.
|
||||
|
||||
2.2. Effective Date
|
||||
|
||||
The licenses granted in Section 2.1 with respect to any Contribution
|
||||
become effective for each Contribution on the date the Contributor first
|
||||
distributes such Contribution.
|
||||
|
||||
2.3. Limitations on Grant Scope
|
||||
|
||||
The licenses granted in this Section 2 are the only rights granted under
|
||||
this License. No additional rights or licenses will be implied from the
|
||||
distribution or licensing of Covered Software under this License.
|
||||
Notwithstanding Section 2.1(b) above, no patent license is granted by a
|
||||
Contributor:
|
||||
|
||||
(a) for any code that a Contributor has removed from Covered Software;
|
||||
or
|
||||
|
||||
(b) for infringements caused by: (i) Your and any other third party's
|
||||
modifications of Covered Software, or (ii) the combination of its
|
||||
Contributions with other software (except as part of its Contributor
|
||||
Version); or
|
||||
|
||||
(c) under Patent Claims infringed by Covered Software in the absence of
|
||||
its Contributions.
|
||||
|
||||
This License does not grant any rights in the trademarks, service marks,
|
||||
or logos of any Contributor (except as may be necessary to comply with
|
||||
the notice requirements in Section 3.4).
|
||||
|
||||
2.4. Subsequent Licenses
|
||||
|
||||
No Contributor makes additional grants as a result of Your choice to
|
||||
distribute the Covered Software under a subsequent version of this
|
||||
License (see Section 10.2) or under the terms of a Secondary License (if
|
||||
permitted under the terms of Section 3.3).
|
||||
|
||||
2.5. Representation
|
||||
|
||||
Each Contributor represents that the Contributor believes its
|
||||
Contributions are its original creation(s) or it has sufficient rights
|
||||
to grant the rights to its Contributions conveyed by this License.
|
||||
|
||||
2.6. Fair Use
|
||||
|
||||
This License is not intended to limit any rights You have under
|
||||
applicable copyright doctrines of fair use, fair dealing, or other
|
||||
equivalents.
|
||||
|
||||
2.7. Conditions
|
||||
|
||||
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
|
||||
in Section 2.1.
|
||||
|
||||
3. Responsibilities
|
||||
-------------------
|
||||
|
||||
3.1. Distribution of Source Form
|
||||
|
||||
All distribution of Covered Software in Source Code Form, including any
|
||||
Modifications that You create or to which You contribute, must be under
|
||||
the terms of this License. You must inform recipients that the Source
|
||||
Code Form of the Covered Software is governed by the terms of this
|
||||
License, and how they can obtain a copy of this License. You may not
|
||||
attempt to alter or restrict the recipients' rights in the Source Code
|
||||
Form.
|
||||
|
||||
3.2. Distribution of Executable Form
|
||||
|
||||
If You distribute Covered Software in Executable Form then:
|
||||
|
||||
(a) such Covered Software must also be made available in Source Code
|
||||
Form, as described in Section 3.1, and You must inform recipients of
|
||||
the Executable Form how they can obtain a copy of such Source Code
|
||||
Form by reasonable means in a timely manner, at a charge no more
|
||||
than the cost of distribution to the recipient; and
|
||||
|
||||
(b) You may distribute such Executable Form under the terms of this
|
||||
License, or sublicense it under different terms, provided that the
|
||||
license for the Executable Form does not attempt to limit or alter
|
||||
the recipients' rights in the Source Code Form under this License.
|
||||
|
||||
3.3. Distribution of a Larger Work
|
||||
|
||||
You may create and distribute a Larger Work under terms of Your choice,
|
||||
provided that You also comply with the requirements of this License for
|
||||
the Covered Software. If the Larger Work is a combination of Covered
|
||||
Software with a work governed by one or more Secondary Licenses, and the
|
||||
Covered Software is not Incompatible With Secondary Licenses, this
|
||||
License permits You to additionally distribute such Covered Software
|
||||
under the terms of such Secondary License(s), so that the recipient of
|
||||
the Larger Work may, at their option, further distribute the Covered
|
||||
Software under the terms of either this License or such Secondary
|
||||
License(s).
|
||||
|
||||
3.4. Notices
|
||||
|
||||
You may not remove or alter the substance of any license notices
|
||||
(including copyright notices, patent notices, disclaimers of warranty,
|
||||
or limitations of liability) contained within the Source Code Form of
|
||||
the Covered Software, except that You may alter any license notices to
|
||||
the extent required to remedy known factual inaccuracies.
|
||||
|
||||
3.5. Application of Additional Terms
|
||||
|
||||
You may choose to offer, and to charge a fee for, warranty, support,
|
||||
indemnity or liability obligations to one or more recipients of Covered
|
||||
Software. However, You may do so only on Your own behalf, and not on
|
||||
behalf of any Contributor. You must make it absolutely clear that any
|
||||
such warranty, support, indemnity, or liability obligation is offered by
|
||||
You alone, and You hereby agree to indemnify every Contributor for any
|
||||
liability incurred by such Contributor as a result of warranty, support,
|
||||
indemnity or liability terms You offer. You may include additional
|
||||
disclaimers of warranty and limitations of liability specific to any
|
||||
jurisdiction.
|
||||
|
||||
4. Inability to Comply Due to Statute or Regulation
|
||||
---------------------------------------------------
|
||||
|
||||
If it is impossible for You to comply with any of the terms of this
|
||||
License with respect to some or all of the Covered Software due to
|
||||
statute, judicial order, or regulation then You must: (a) comply with
|
||||
the terms of this License to the maximum extent possible; and (b)
|
||||
describe the limitations and the code they affect. Such description must
|
||||
be placed in a text file included with all distributions of the Covered
|
||||
Software under this License. Except to the extent prohibited by statute
|
||||
or regulation, such description must be sufficiently detailed for a
|
||||
recipient of ordinary skill to be able to understand it.
|
||||
|
||||
5. Termination
|
||||
--------------
|
||||
|
||||
5.1. The rights granted under this License will terminate automatically
|
||||
if You fail to comply with any of its terms. However, if You become
|
||||
compliant, then the rights granted under this License from a particular
|
||||
Contributor are reinstated (a) provisionally, unless and until such
|
||||
Contributor explicitly and finally terminates Your grants, and (b) on an
|
||||
ongoing basis, if such Contributor fails to notify You of the
|
||||
non-compliance by some reasonable means prior to 60 days after You have
|
||||
come back into compliance. Moreover, Your grants from a particular
|
||||
Contributor are reinstated on an ongoing basis if such Contributor
|
||||
notifies You of the non-compliance by some reasonable means, this is the
|
||||
first time You have received notice of non-compliance with this License
|
||||
from such Contributor, and You become compliant prior to 30 days after
|
||||
Your receipt of the notice.
|
||||
|
||||
5.2. If You initiate litigation against any entity by asserting a patent
|
||||
infringement claim (excluding declaratory judgment actions,
|
||||
counter-claims, and cross-claims) alleging that a Contributor Version
|
||||
directly or indirectly infringes any patent, then the rights granted to
|
||||
You by any and all Contributors for the Covered Software under Section
|
||||
2.1 of this License shall terminate.
|
||||
|
||||
5.3. In the event of termination under Sections 5.1 or 5.2 above, all
|
||||
end user license agreements (excluding distributors and resellers) which
|
||||
have been validly granted by You or Your distributors under this License
|
||||
prior to termination shall survive termination.
|
||||
|
||||
************************************************************************
|
||||
* *
|
||||
* 6. Disclaimer of Warranty *
|
||||
* ------------------------- *
|
||||
* *
|
||||
* Covered Software is provided under this License on an "as is" *
|
||||
* basis, without warranty of any kind, either expressed, implied, or *
|
||||
* statutory, including, without limitation, warranties that the *
|
||||
* Covered Software is free of defects, merchantable, fit for a *
|
||||
* particular purpose or non-infringing. The entire risk as to the *
|
||||
* quality and performance of the Covered Software is with You. *
|
||||
* Should any Covered Software prove defective in any respect, You *
|
||||
* (not any Contributor) assume the cost of any necessary servicing, *
|
||||
* repair, or correction. This disclaimer of warranty constitutes an *
|
||||
* essential part of this License. No use of any Covered Software is *
|
||||
* authorized under this License except under this disclaimer. *
|
||||
* *
|
||||
************************************************************************
|
||||
|
||||
************************************************************************
|
||||
* *
|
||||
* 7. Limitation of Liability *
|
||||
* -------------------------- *
|
||||
* *
|
||||
* Under no circumstances and under no legal theory, whether tort *
|
||||
* (including negligence), contract, or otherwise, shall any *
|
||||
* Contributor, or anyone who distributes Covered Software as *
|
||||
* permitted above, be liable to You for any direct, indirect, *
|
||||
* special, incidental, or consequential damages of any character *
|
||||
* including, without limitation, damages for lost profits, loss of *
|
||||
* goodwill, work stoppage, computer failure or malfunction, or any *
|
||||
* and all other commercial damages or losses, even if such party *
|
||||
* shall have been informed of the possibility of such damages. This *
|
||||
* limitation of liability shall not apply to liability for death or *
|
||||
* personal injury resulting from such party's negligence to the *
|
||||
* extent applicable law prohibits such limitation. Some *
|
||||
* jurisdictions do not allow the exclusion or limitation of *
|
||||
* incidental or consequential damages, so this exclusion and *
|
||||
* limitation may not apply to You. *
|
||||
* *
|
||||
************************************************************************
|
||||
|
||||
8. Litigation
|
||||
-------------
|
||||
|
||||
Any litigation relating to this License may be brought only in the
|
||||
courts of a jurisdiction where the defendant maintains its principal
|
||||
place of business and such litigation shall be governed by laws of that
|
||||
jurisdiction, without reference to its conflict-of-law provisions.
|
||||
Nothing in this Section shall prevent a party's ability to bring
|
||||
cross-claims or counter-claims.
|
||||
|
||||
9. Miscellaneous
|
||||
----------------
|
||||
|
||||
This License represents the complete agreement concerning the subject
|
||||
matter hereof. If any provision of this License is held to be
|
||||
unenforceable, such provision shall be reformed only to the extent
|
||||
necessary to make it enforceable. Any law or regulation which provides
|
||||
that the language of a contract shall be construed against the drafter
|
||||
shall not be used to construe this License against a Contributor.
|
||||
|
||||
10. Versions of the License
|
||||
---------------------------
|
||||
|
||||
10.1. New Versions
|
||||
|
||||
Mozilla Foundation is the license steward. Except as provided in Section
|
||||
10.3, no one other than the license steward has the right to modify or
|
||||
publish new versions of this License. Each version will be given a
|
||||
distinguishing version number.
|
||||
|
||||
10.2. Effect of New Versions
|
||||
|
||||
You may distribute the Covered Software under the terms of the version
|
||||
of the License under which You originally received the Covered Software,
|
||||
or under the terms of any subsequent version published by the license
|
||||
steward.
|
||||
|
||||
10.3. Modified Versions
|
||||
|
||||
If you create software not governed by this License, and you want to
|
||||
create a new license for such software, you may create and use a
|
||||
modified version of this License if you rename the license and remove
|
||||
any references to the name of the license steward (except to note that
|
||||
such modified license differs from this License).
|
||||
|
||||
10.4. Distributing Source Code Form that is Incompatible With Secondary
|
||||
Licenses
|
||||
|
||||
If You choose to distribute Source Code Form that is Incompatible With
|
||||
Secondary Licenses under the terms of this version of the License, the
|
||||
notice described in Exhibit B of this License must be attached.
|
||||
|
||||
Exhibit A - Source Code Form License Notice
|
||||
-------------------------------------------
|
||||
|
||||
This Source Code Form is subject to the terms of the Mozilla Public
|
||||
License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
If it is not possible or desirable to put the notice in a particular
|
||||
file, then You may include the notice in a location (such as a LICENSE
|
||||
file in a relevant directory) where a recipient would be likely to look
|
||||
for such a notice.
|
||||
|
||||
You may add additional accurate notices of copyright ownership.
|
||||
|
||||
Exhibit B - "Incompatible With Secondary Licenses" Notice
|
||||
---------------------------------------------------------
|
||||
|
||||
This Source Code Form is "Incompatible With Secondary Licenses", as
|
||||
defined by the Mozilla Public License, v. 2.0.
|
|
@ -0,0 +1,64 @@
|
|||
GOTEST_PKGS=$(shell go list ./... | grep -v examples)
|
||||
|
||||
BENCHTIME ?= 2s
|
||||
BENCHTESTS ?= .
|
||||
|
||||
BENCHFULL?=0
|
||||
ifeq (${BENCHFULL},1)
|
||||
BENCHFULL_ARG=-bench-full -timeout 60m
|
||||
else
|
||||
BENCHFULL_ARG=
|
||||
endif
|
||||
|
||||
TEST_VERBOSE?=0
|
||||
ifeq (${TEST_VERBOSE},1)
|
||||
TEST_VERBOSE_ARG=-v
|
||||
else
|
||||
TEST_VERBOSE_ARG=
|
||||
endif
|
||||
|
||||
TEST_RESULTS?="/tmp/test-results"
|
||||
grammar.go: grammar.peg
|
||||
@echo "Regenerating Parser"
|
||||
@go generate ./
|
||||
|
||||
generate: grammar.go
|
||||
|
||||
test: generate
|
||||
@go test $(TEST_VERBOSE_ARG) $(GOTEST_PKGS)
|
||||
|
||||
test-ci: generate
|
||||
@gotestsum --junitfile $(TEST_RESULTS)/gotestsum-report.xml -- $(GOTEST_PKGS)
|
||||
|
||||
bench: generate
|
||||
@go test $(TEST_VERBOSE_ARG) -run DONTRUNTESTS -bench $(BENCHTESTS) $(BENCHFULL_ARG) -benchtime=$(BENCHTIME) $(GOTEST_PKGS)
|
||||
|
||||
coverage: generate
|
||||
@go test -coverprofile /tmp/coverage.out $(GOTEST_PKGS)
|
||||
@go tool cover -html /tmp/coverage.out
|
||||
|
||||
fmt: generate
|
||||
@gofmt -w -s
|
||||
|
||||
examples: simple expr-parse expr-eval filter
|
||||
|
||||
simple:
|
||||
@go build ./examples/simple
|
||||
|
||||
expr-parse:
|
||||
@go build ./examples/expr-parse
|
||||
|
||||
expr-eval:
|
||||
@go build ./examples/expr-eval
|
||||
|
||||
filter:
|
||||
@go build ./examples/filter
|
||||
|
||||
deps:
|
||||
@go get github.com/mna/pigeon@master
|
||||
@go get golang.org/x/tools/cmd/goimports
|
||||
@go get golang.org/x/tools/cmd/cover
|
||||
@go mod tidy
|
||||
|
||||
.PHONY: generate test coverage fmt deps bench examples expr-parse expr-eval filter
|
||||
|
|
@ -0,0 +1,115 @@
|
|||
# bexpr - Boolean Expression Evaluator [![GoDoc](https://godoc.org/github.com/hashicorp/go-bexpr?status.svg)](https://godoc.org/github.com/hashicorp/go-bexpr) [![CircleCI](https://circleci.com/gh/hashicorp/go-bexpr.svg?style=svg)](https://circleci.com/gh/hashicorp/go-bexpr)
|
||||
|
||||
`bexpr` is a Go (golang) library to provide generic boolean expression evaluation and filtering for Go data structures.
|
||||
|
||||
## Limitations
|
||||
|
||||
Currently `bexpr` does not support operating on types with cyclical structures. Attempting to generate the fields
|
||||
of these types will cause a stack overflow. There are however two means of getting around this. First if you do not
|
||||
need the nested type to be available during evaluation then you can simply add the `bexpr:"-"` struct tag to the
|
||||
fields where that type is referenced and `bexpr` will not delve further into that type. A second solution is implement
|
||||
the `MatchExpressionEvaluator` interface and provide the necessary field configurations yourself.
|
||||
|
||||
Eventually this lib will support handling these cycles automatically.
|
||||
|
||||
## Stability
|
||||
|
||||
Currently there is a `MatchExpressionEvaluator` interface that can be used to implement custom behavior. This interface should be considered *experimental* and is likely to change in the future. One need for the change is to make it easier for custom implementations to re-invoke the main bexpr logic on subfields so that they do not have to implement custom logic for themselves and every sub field they contain. With the current interface its not really possible.
|
||||
|
||||
## Usage (Reflection)
|
||||
|
||||
This example program is available in [examples/simple](examples/simple)
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/hashicorp/go-bexpr"
|
||||
)
|
||||
|
||||
type Example struct {
|
||||
X int
|
||||
|
||||
// Can renamed a field with the struct tag
|
||||
Y string `bexpr:"y"`
|
||||
|
||||
// Fields can use multiple names for accessing
|
||||
Z bool `bexpr:"Z,z,foo"`
|
||||
|
||||
// Tag with "-" to prevent allowing this field from being used
|
||||
Hidden string `bexpr:"-"`
|
||||
|
||||
// Unexported fields are not available for evaluation
|
||||
unexported string
|
||||
}
|
||||
|
||||
func main() {
|
||||
value := map[string]Example{
|
||||
"foo": Example{X: 5, Y: "foo", Z: true, Hidden: "yes", unexported: "no"},
|
||||
"bar": Example{X: 42, Y: "bar", Z: false, Hidden: "no", unexported: "yes"},
|
||||
}
|
||||
|
||||
expressions := []string{
|
||||
"foo.X == 5",
|
||||
"bar.y == bar",
|
||||
"foo.foo != false",
|
||||
"foo.z == true",
|
||||
"foo.Z == true",
|
||||
|
||||
// will error in evaluator creation
|
||||
"bar.Hidden != yes",
|
||||
|
||||
// will error in evaluator creation
|
||||
"foo.unexported == no",
|
||||
}
|
||||
|
||||
for _, expression := range expressions {
|
||||
eval, err := bexpr.CreateEvaluatorForType(expression, nil, (*map[string]Example)(nil))
|
||||
|
||||
if err != nil {
|
||||
fmt.Printf("Failed to create evaluator for expression %q: %v\n", expression, err)
|
||||
continue
|
||||
}
|
||||
|
||||
result, err := eval.Evaluate(value)
|
||||
if err != nil {
|
||||
fmt.Printf("Failed to run evaluation of expression %q: %v\n", expression, err)
|
||||
continue
|
||||
}
|
||||
|
||||
fmt.Printf("Result of expression %q evaluation: %t\n", expression, result)
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
This will output:
|
||||
|
||||
```
|
||||
Result of expression "foo.X == 5" evaluation: true
|
||||
Result of expression "bar.y == bar" evaluation: true
|
||||
Result of expression "foo.foo != false" evaluation: true
|
||||
Result of expression "foo.z == true" evaluation: true
|
||||
Result of expression "foo.Z == true" evaluation: true
|
||||
Failed to create evaluator for expression "bar.Hidden != yes": Selector "bar.Hidden" is not valid
|
||||
Failed to create evaluator for expression "foo.unexported == no": Selector "foo.unexported" is not valid
|
||||
```
|
||||
|
||||
## Testing
|
||||
|
||||
The [Makefile](Makefile) contains 3 main targets to aid with testing:
|
||||
|
||||
1. `make test` - runs the standard test suite
|
||||
2. `make coverage` - runs the test suite gathering coverage information
|
||||
3. `make bench` - this will run benchmarks. You can use the [`benchcmp`](https://godoc.org/golang.org/x/tools/cmd/benchcmp) tool to compare
|
||||
subsequent runs of the tool to compare performance. There are a few arguments you can
|
||||
provide to the make invocation to alter the behavior a bit
|
||||
* `BENCHFULL=1` - This will enable running all the benchmarks. Some could be fairly redundant but
|
||||
could be useful when modifying specific sections of the code.
|
||||
* `BENCHTIME=5s` - By default the -benchtime paramater used for the `go test` invocation is `2s`.
|
||||
`1s` seemed like too little to get results consistent enough for comparison between two runs.
|
||||
For the highest degree of confidence that performance has remained steady increase this value
|
||||
even further. The time it takes to run the bench testing suite grows linearly with this value.
|
||||
* `BENCHTESTS=BenchmarkEvalute` - This is used to run a particular benchmark including all of its
|
||||
sub-benchmarks. This is just an example and "BenchmarkEvaluate" can be replaced with any
|
||||
benchmark functions name.
|
|
@ -0,0 +1,131 @@
|
|||
package bexpr
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// TODO - Probably should make most of what is in here un-exported
|
||||
|
||||
//go:generate pigeon -o grammar.go -optimize-parser grammar.peg
|
||||
//go:generate goimports -w grammar.go
|
||||
|
||||
type Expression interface {
|
||||
ExpressionDump(w io.Writer, indent string, level int)
|
||||
}
|
||||
|
||||
type UnaryOperator int
|
||||
|
||||
const (
|
||||
UnaryOpNot UnaryOperator = iota
|
||||
)
|
||||
|
||||
func (op UnaryOperator) String() string {
|
||||
switch op {
|
||||
case UnaryOpNot:
|
||||
return "Not"
|
||||
default:
|
||||
return "UNKNOWN"
|
||||
}
|
||||
}
|
||||
|
||||
type BinaryOperator int
|
||||
|
||||
const (
|
||||
BinaryOpAnd BinaryOperator = iota
|
||||
BinaryOpOr
|
||||
)
|
||||
|
||||
func (op BinaryOperator) String() string {
|
||||
switch op {
|
||||
case BinaryOpAnd:
|
||||
return "And"
|
||||
case BinaryOpOr:
|
||||
return "Or"
|
||||
default:
|
||||
return "UNKNOWN"
|
||||
}
|
||||
}
|
||||
|
||||
type MatchOperator int
|
||||
|
||||
const (
|
||||
MatchEqual MatchOperator = iota
|
||||
MatchNotEqual
|
||||
MatchIn
|
||||
MatchNotIn
|
||||
MatchIsEmpty
|
||||
MatchIsNotEmpty
|
||||
)
|
||||
|
||||
func (op MatchOperator) String() string {
|
||||
switch op {
|
||||
case MatchEqual:
|
||||
return "Equal"
|
||||
case MatchNotEqual:
|
||||
return "Not Equal"
|
||||
case MatchIn:
|
||||
return "In"
|
||||
case MatchNotIn:
|
||||
return "Not In"
|
||||
case MatchIsEmpty:
|
||||
return "Is Empty"
|
||||
case MatchIsNotEmpty:
|
||||
return "Is Not Empty"
|
||||
default:
|
||||
return "UNKNOWN"
|
||||
}
|
||||
}
|
||||
|
||||
type MatchValue struct {
|
||||
Raw string
|
||||
Converted interface{}
|
||||
}
|
||||
|
||||
type UnaryExpression struct {
|
||||
Operator UnaryOperator
|
||||
Operand Expression
|
||||
}
|
||||
|
||||
type BinaryExpression struct {
|
||||
Left Expression
|
||||
Operator BinaryOperator
|
||||
Right Expression
|
||||
}
|
||||
|
||||
type Selector []string
|
||||
|
||||
func (sel Selector) String() string {
|
||||
return strings.Join([]string(sel), ".")
|
||||
}
|
||||
|
||||
type MatchExpression struct {
|
||||
Selector Selector
|
||||
Operator MatchOperator
|
||||
Value *MatchValue
|
||||
}
|
||||
|
||||
func (expr *UnaryExpression) ExpressionDump(w io.Writer, indent string, level int) {
|
||||
localIndent := strings.Repeat(indent, level)
|
||||
fmt.Fprintf(w, "%s%s {\n", localIndent, expr.Operator.String())
|
||||
expr.Operand.ExpressionDump(w, indent, level+1)
|
||||
fmt.Fprintf(w, "%s}\n", localIndent)
|
||||
}
|
||||
|
||||
func (expr *BinaryExpression) ExpressionDump(w io.Writer, indent string, level int) {
|
||||
localIndent := strings.Repeat(indent, level)
|
||||
fmt.Fprintf(w, "%s%s {\n", localIndent, expr.Operator.String())
|
||||
expr.Left.ExpressionDump(w, indent, level+1)
|
||||
expr.Right.ExpressionDump(w, indent, level+1)
|
||||
fmt.Fprintf(w, "%s}\n", localIndent)
|
||||
}
|
||||
|
||||
func (expr *MatchExpression) ExpressionDump(w io.Writer, indent string, level int) {
|
||||
switch expr.Operator {
|
||||
case MatchEqual, MatchNotEqual, MatchIn, MatchNotIn:
|
||||
fmt.Fprintf(w, "%[1]s%[3]s {\n%[2]sSelector: %[4]v\n%[2]sValue: %[5]q\n%[1]s}\n", strings.Repeat(indent, level), strings.Repeat(indent, level+1), expr.Operator.String(), expr.Selector, expr.Value.Raw)
|
||||
default:
|
||||
fmt.Fprintf(w, "%[1]s%[3]s {\n%[2]sSelector: %[4]v\n%[1]s}\n", strings.Repeat(indent, level), strings.Repeat(indent, level+1), expr.Operator.String(), expr.Selector)
|
||||
}
|
||||
}
|
|
@ -0,0 +1,162 @@
|
|||
// bexpr is an implementation of a generic boolean expression evaluator.
|
||||
// The general goal is to be able to evaluate some expression against some
|
||||
// arbitrary data and get back a boolean of whether or not the data
|
||||
// was matched by the expression
|
||||
package bexpr
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultMaxMatches = 32
|
||||
defaultMaxRawValueLength = 512
|
||||
)
|
||||
|
||||
// MatchExpressionEvaluator is the interface to implement to provide custom evaluation
|
||||
// logic for a selector. This could be used to enable synthetic fields or other
|
||||
// more complex logic that the default behavior does not support
|
||||
type MatchExpressionEvaluator interface {
|
||||
// FieldConfigurations returns the configuration for this field and any subfields
|
||||
// it may have. It must be valid to call this method on nil.
|
||||
FieldConfigurations() FieldConfigurations
|
||||
|
||||
// EvaluateMatch returns whether there was a match or not. We are not also
|
||||
// expecting any errors because all the validation bits are handled
|
||||
// during parsing and cross checking against the output of FieldConfigurations.
|
||||
EvaluateMatch(sel Selector, op MatchOperator, value interface{}) (bool, error)
|
||||
}
|
||||
|
||||
type Evaluator struct {
|
||||
// The syntax tree
|
||||
ast Expression
|
||||
|
||||
// A few configurations for extra validation of the AST
|
||||
config EvaluatorConfig
|
||||
|
||||
// Once an expression has been run against a particular data type it cannot be executed
|
||||
// against a different data type. Some coerced value memoization occurs which would
|
||||
// be invalid against other data types.
|
||||
boundType reflect.Type
|
||||
|
||||
// The field configuration of the boundType
|
||||
fields FieldConfigurations
|
||||
}
|
||||
|
||||
// Extra configuration used to perform further validation on a parsed
|
||||
// expression and to aid in the evaluation process
|
||||
type EvaluatorConfig struct {
|
||||
// Maximum number of matching expressions allowed. 0 means unlimited
|
||||
// This does not include and, or and not expressions within the AST
|
||||
MaxMatches int
|
||||
// Maximum length of raw values. 0 means unlimited
|
||||
MaxRawValueLength int
|
||||
// The Registry to use for validating expressions for a data type
|
||||
// If nil the `DefaultRegistry` will be used. To disable using a
|
||||
// registry all together you can set this to `NilRegistry`
|
||||
Registry Registry
|
||||
}
|
||||
|
||||
func CreateEvaluator(expression string, config *EvaluatorConfig) (*Evaluator, error) {
|
||||
return CreateEvaluatorForType(expression, config, nil)
|
||||
}
|
||||
|
||||
func CreateEvaluatorForType(expression string, config *EvaluatorConfig, dataType interface{}) (*Evaluator, error) {
|
||||
ast, err := Parse("", []byte(expression))
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
eval := &Evaluator{ast: ast.(Expression)}
|
||||
|
||||
if config == nil {
|
||||
config = &eval.config
|
||||
}
|
||||
err = eval.validate(config, dataType, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return eval, nil
|
||||
}
|
||||
|
||||
func (eval *Evaluator) Evaluate(datum interface{}) (bool, error) {
|
||||
if eval.fields == nil {
|
||||
err := eval.validate(&eval.config, datum, true)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
} else if reflect.TypeOf(datum) != eval.boundType {
|
||||
return false, fmt.Errorf("This evaluator can only be used to evaluate matches against %s", eval.boundType)
|
||||
}
|
||||
|
||||
return evaluate(eval.ast, datum, eval.fields)
|
||||
}
|
||||
|
||||
func (eval *Evaluator) validate(config *EvaluatorConfig, dataType interface{}, updateEvaluator bool) error {
|
||||
if config == nil {
|
||||
return fmt.Errorf("Invalid config")
|
||||
}
|
||||
|
||||
var fields FieldConfigurations
|
||||
var err error
|
||||
var rtype reflect.Type
|
||||
if dataType != nil {
|
||||
registry := DefaultRegistry
|
||||
if config.Registry != nil {
|
||||
registry = config.Registry
|
||||
}
|
||||
|
||||
switch t := dataType.(type) {
|
||||
case reflect.Type:
|
||||
rtype = t
|
||||
case *reflect.Type:
|
||||
rtype = *t
|
||||
case reflect.Value:
|
||||
rtype = t.Type()
|
||||
case *reflect.Value:
|
||||
rtype = t.Type()
|
||||
default:
|
||||
rtype = reflect.TypeOf(dataType)
|
||||
}
|
||||
|
||||
fields, err = registry.GetFieldConfigurations(rtype)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(fields) < 1 {
|
||||
return fmt.Errorf("Data type %s has no evaluatable fields", rtype.String())
|
||||
}
|
||||
}
|
||||
|
||||
maxMatches := config.MaxMatches
|
||||
if maxMatches == 0 {
|
||||
maxMatches = defaultMaxMatches
|
||||
}
|
||||
|
||||
maxRawValueLength := config.MaxRawValueLength
|
||||
if maxRawValueLength == 0 {
|
||||
maxRawValueLength = defaultMaxRawValueLength
|
||||
}
|
||||
|
||||
err = validate(eval.ast, fields, config.MaxMatches, config.MaxRawValueLength)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if updateEvaluator {
|
||||
eval.config = *config
|
||||
eval.fields = fields
|
||||
eval.boundType = rtype
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Validates an existing expression against a possibly different configuration
|
||||
func (eval *Evaluator) Validate(config *EvaluatorConfig, dataType interface{}) error {
|
||||
return eval.validate(config, dataType, false)
|
||||
}
|
|
@ -0,0 +1,135 @@
|
|||
package bexpr
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// CoerceInt conforms to the FieldValueCoercionFn signature
|
||||
// and can be used to convert the raw string value of
|
||||
// an expression into an `int`
|
||||
func CoerceInt(value string) (interface{}, error) {
|
||||
i, err := strconv.ParseInt(value, 0, 0)
|
||||
return int(i), err
|
||||
}
|
||||
|
||||
// CoerceInt8 conforms to the FieldValueCoercionFn signature
|
||||
// and can be used to convert the raw string value of
|
||||
// an expression into an `int8`
|
||||
func CoerceInt8(value string) (interface{}, error) {
|
||||
i, err := strconv.ParseInt(value, 0, 8)
|
||||
return int8(i), err
|
||||
}
|
||||
|
||||
// CoerceInt16 conforms to the FieldValueCoercionFn signature
|
||||
// and can be used to convert the raw string value of
|
||||
// an expression into an `int16`
|
||||
func CoerceInt16(value string) (interface{}, error) {
|
||||
i, err := strconv.ParseInt(value, 0, 16)
|
||||
return int16(i), err
|
||||
}
|
||||
|
||||
// CoerceInt32 conforms to the FieldValueCoercionFn signature
|
||||
// and can be used to convert the raw string value of
|
||||
// an expression into an `int32`
|
||||
func CoerceInt32(value string) (interface{}, error) {
|
||||
i, err := strconv.ParseInt(value, 0, 32)
|
||||
return int32(i), err
|
||||
}
|
||||
|
||||
// CoerceInt64 conforms to the FieldValueCoercionFn signature
|
||||
// and can be used to convert the raw string value of
|
||||
// an expression into an `int64`
|
||||
func CoerceInt64(value string) (interface{}, error) {
|
||||
i, err := strconv.ParseInt(value, 0, 64)
|
||||
return int64(i), err
|
||||
}
|
||||
|
||||
// CoerceUint conforms to the FieldValueCoercionFn signature
|
||||
// and can be used to convert the raw string value of
|
||||
// an expression into an `int`
|
||||
func CoerceUint(value string) (interface{}, error) {
|
||||
i, err := strconv.ParseUint(value, 0, 0)
|
||||
return uint(i), err
|
||||
}
|
||||
|
||||
// CoerceUint8 conforms to the FieldValueCoercionFn signature
|
||||
// and can be used to convert the raw string value of
|
||||
// an expression into an `int8`
|
||||
func CoerceUint8(value string) (interface{}, error) {
|
||||
i, err := strconv.ParseUint(value, 0, 8)
|
||||
return uint8(i), err
|
||||
}
|
||||
|
||||
// CoerceUint16 conforms to the FieldValueCoercionFn signature
|
||||
// and can be used to convert the raw string value of
|
||||
// an expression into an `int16`
|
||||
func CoerceUint16(value string) (interface{}, error) {
|
||||
i, err := strconv.ParseUint(value, 0, 16)
|
||||
return uint16(i), err
|
||||
}
|
||||
|
||||
// CoerceUint32 conforms to the FieldValueCoercionFn signature
|
||||
// and can be used to convert the raw string value of
|
||||
// an expression into an `int32`
|
||||
func CoerceUint32(value string) (interface{}, error) {
|
||||
i, err := strconv.ParseUint(value, 0, 32)
|
||||
return uint32(i), err
|
||||
}
|
||||
|
||||
// CoerceUint64 conforms to the FieldValueCoercionFn signature
|
||||
// and can be used to convert the raw string value of
|
||||
// an expression into an `int64`
|
||||
func CoerceUint64(value string) (interface{}, error) {
|
||||
i, err := strconv.ParseUint(value, 0, 64)
|
||||
return uint64(i), err
|
||||
}
|
||||
|
||||
// CoerceBool conforms to the FieldValueCoercionFn signature
|
||||
// and can be used to convert the raw string value of
|
||||
// an expression into a `bool`
|
||||
func CoerceBool(value string) (interface{}, error) {
|
||||
return strconv.ParseBool(value)
|
||||
}
|
||||
|
||||
// CoerceFloat32 conforms to the FieldValueCoercionFn signature
|
||||
// and can be used to convert the raw string value of
|
||||
// an expression into an `float32`
|
||||
func CoerceFloat32(value string) (interface{}, error) {
|
||||
// ParseFloat always returns a float64 but ensures
|
||||
// it can be converted to a float32 without changing
|
||||
// its value
|
||||
f, err := strconv.ParseFloat(value, 32)
|
||||
return float32(f), err
|
||||
}
|
||||
|
||||
// CoerceFloat64 conforms to the FieldValueCoercionFn signature
|
||||
// and can be used to convert the raw string value of
|
||||
// an expression into an `float64`
|
||||
func CoerceFloat64(value string) (interface{}, error) {
|
||||
return strconv.ParseFloat(value, 64)
|
||||
}
|
||||
|
||||
// CoerceString conforms to the FieldValueCoercionFn signature
|
||||
// and can be used to convert the raw string value of
|
||||
// an expression into a `string`
|
||||
func CoerceString(value string) (interface{}, error) {
|
||||
return value, nil
|
||||
}
|
||||
|
||||
var primitiveCoercionFns = map[reflect.Kind]FieldValueCoercionFn{
|
||||
reflect.Bool: CoerceBool,
|
||||
reflect.Int: CoerceInt,
|
||||
reflect.Int8: CoerceInt8,
|
||||
reflect.Int16: CoerceInt16,
|
||||
reflect.Int32: CoerceInt32,
|
||||
reflect.Int64: CoerceInt64,
|
||||
reflect.Uint: CoerceUint,
|
||||
reflect.Uint8: CoerceUint8,
|
||||
reflect.Uint16: CoerceUint16,
|
||||
reflect.Uint32: CoerceUint32,
|
||||
reflect.Uint64: CoerceUint64,
|
||||
reflect.Float32: CoerceFloat32,
|
||||
reflect.Float64: CoerceFloat64,
|
||||
reflect.String: CoerceString,
|
||||
}
|
|
@ -0,0 +1,300 @@
|
|||
package bexpr
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var primitiveEqualityFns = map[reflect.Kind]func(first interface{}, second reflect.Value) bool{
|
||||
reflect.Bool: doEqualBool,
|
||||
reflect.Int: doEqualInt,
|
||||
reflect.Int8: doEqualInt8,
|
||||
reflect.Int16: doEqualInt16,
|
||||
reflect.Int32: doEqualInt32,
|
||||
reflect.Int64: doEqualInt64,
|
||||
reflect.Uint: doEqualUint,
|
||||
reflect.Uint8: doEqualUint8,
|
||||
reflect.Uint16: doEqualUint16,
|
||||
reflect.Uint32: doEqualUint32,
|
||||
reflect.Uint64: doEqualUint64,
|
||||
reflect.Float32: doEqualFloat32,
|
||||
reflect.Float64: doEqualFloat64,
|
||||
reflect.String: doEqualString,
|
||||
}
|
||||
|
||||
func doEqualBool(first interface{}, second reflect.Value) bool {
|
||||
return first.(bool) == second.Bool()
|
||||
}
|
||||
|
||||
func doEqualInt(first interface{}, second reflect.Value) bool {
|
||||
return first.(int) == int(second.Int())
|
||||
}
|
||||
|
||||
func doEqualInt8(first interface{}, second reflect.Value) bool {
|
||||
return first.(int8) == int8(second.Int())
|
||||
}
|
||||
|
||||
func doEqualInt16(first interface{}, second reflect.Value) bool {
|
||||
return first.(int16) == int16(second.Int())
|
||||
}
|
||||
|
||||
func doEqualInt32(first interface{}, second reflect.Value) bool {
|
||||
return first.(int32) == int32(second.Int())
|
||||
}
|
||||
|
||||
func doEqualInt64(first interface{}, second reflect.Value) bool {
|
||||
return first.(int64) == second.Int()
|
||||
}
|
||||
|
||||
func doEqualUint(first interface{}, second reflect.Value) bool {
|
||||
return first.(uint) == uint(second.Uint())
|
||||
}
|
||||
|
||||
func doEqualUint8(first interface{}, second reflect.Value) bool {
|
||||
return first.(uint8) == uint8(second.Uint())
|
||||
}
|
||||
|
||||
func doEqualUint16(first interface{}, second reflect.Value) bool {
|
||||
return first.(uint16) == uint16(second.Uint())
|
||||
}
|
||||
|
||||
func doEqualUint32(first interface{}, second reflect.Value) bool {
|
||||
return first.(uint32) == uint32(second.Uint())
|
||||
}
|
||||
|
||||
func doEqualUint64(first interface{}, second reflect.Value) bool {
|
||||
return first.(uint64) == second.Uint()
|
||||
}
|
||||
|
||||
func doEqualFloat32(first interface{}, second reflect.Value) bool {
|
||||
return first.(float32) == float32(second.Float())
|
||||
}
|
||||
|
||||
func doEqualFloat64(first interface{}, second reflect.Value) bool {
|
||||
return first.(float64) == second.Float()
|
||||
}
|
||||
|
||||
func doEqualString(first interface{}, second reflect.Value) bool {
|
||||
return first.(string) == second.String()
|
||||
}
|
||||
|
||||
// Get rid of 0 to many levels of pointers to get at the real type
|
||||
func derefType(rtype reflect.Type) reflect.Type {
|
||||
for rtype.Kind() == reflect.Ptr {
|
||||
rtype = rtype.Elem()
|
||||
}
|
||||
return rtype
|
||||
}
|
||||
|
||||
func doMatchEqual(expression *MatchExpression, value reflect.Value) (bool, error) {
|
||||
// NOTE: see preconditions in evaluateMatchExpressionRecurse
|
||||
eqFn := primitiveEqualityFns[value.Kind()]
|
||||
matchValue := getMatchExprValue(expression)
|
||||
return eqFn(matchValue, value), nil
|
||||
}
|
||||
|
||||
func doMatchIn(expression *MatchExpression, value reflect.Value) (bool, error) {
|
||||
// NOTE: see preconditions in evaluateMatchExpressionRecurse
|
||||
matchValue := getMatchExprValue(expression)
|
||||
|
||||
switch kind := value.Kind(); kind {
|
||||
case reflect.Map:
|
||||
found := value.MapIndex(reflect.ValueOf(matchValue))
|
||||
return found.IsValid(), nil
|
||||
case reflect.Slice, reflect.Array:
|
||||
itemType := derefType(value.Type().Elem())
|
||||
eqFn := primitiveEqualityFns[itemType.Kind()]
|
||||
|
||||
for i := 0; i < value.Len(); i++ {
|
||||
item := value.Index(i)
|
||||
|
||||
// the value will be the correct type as we verified the itemType
|
||||
if eqFn(matchValue, reflect.Indirect(item)) {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
return false, nil
|
||||
case reflect.String:
|
||||
return strings.Contains(value.String(), matchValue.(string)), nil
|
||||
default:
|
||||
// this shouldn't be possible but we have to have something to return to keep the compiler happy
|
||||
return false, fmt.Errorf("Cannot perform in/contains operations on type %s for selector: %q", kind, expression.Selector)
|
||||
}
|
||||
}
|
||||
|
||||
func doMatchIsEmpty(matcher *MatchExpression, value reflect.Value) (bool, error) {
|
||||
// NOTE: see preconditions in evaluateMatchExpressionRecurse
|
||||
return value.Len() == 0, nil
|
||||
}
|
||||
|
||||
func getMatchExprValue(expression *MatchExpression) interface{} {
|
||||
// NOTE: see preconditions in evaluateMatchExpressionRecurse
|
||||
if expression.Value == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if expression.Value.Converted != nil {
|
||||
return expression.Value.Converted
|
||||
}
|
||||
|
||||
return expression.Value.Raw
|
||||
}
|
||||
|
||||
func evaluateMatchExpressionRecurse(expression *MatchExpression, depth int, rvalue reflect.Value, fields FieldConfigurations) (bool, error) {
|
||||
// NOTE: Some information about preconditions is probably good to have here. Parsing
|
||||
// as well as the extra validation pass that MUST occur before executing the
|
||||
// expression evaluation allow us to make some assumptions here.
|
||||
//
|
||||
// 1. Selectors MUST be valid. Therefore we don't need to test if they should
|
||||
// be valid. This means that we can index in the FieldConfigurations map
|
||||
// and a configuration MUST be present.
|
||||
// 2. If expression.Value could be converted it will already have been. No need to try
|
||||
// and convert again. There is also no need to check that the types match as they MUST
|
||||
// in order to have passed validation.
|
||||
// 3. If we are presented with a map and we have more selectors to go through then its key
|
||||
// type MUST be a string
|
||||
// 4. We already have validated that the operations can be performed on the target data.
|
||||
// So calls to the doMatch* functions don't need to do any checking to ensure that
|
||||
// calling various fns on them will work and not panic - because they wont.
|
||||
|
||||
if depth >= len(expression.Selector) {
|
||||
// we have reached the end of the selector - execute the match operations
|
||||
switch expression.Operator {
|
||||
case MatchEqual:
|
||||
return doMatchEqual(expression, rvalue)
|
||||
case MatchNotEqual:
|
||||
result, err := doMatchEqual(expression, rvalue)
|
||||
if err == nil {
|
||||
return !result, nil
|
||||
}
|
||||
return false, err
|
||||
case MatchIn:
|
||||
return doMatchIn(expression, rvalue)
|
||||
case MatchNotIn:
|
||||
result, err := doMatchIn(expression, rvalue)
|
||||
if err == nil {
|
||||
return !result, nil
|
||||
}
|
||||
return false, err
|
||||
case MatchIsEmpty:
|
||||
return doMatchIsEmpty(expression, rvalue)
|
||||
case MatchIsNotEmpty:
|
||||
result, err := doMatchIsEmpty(expression, rvalue)
|
||||
if err == nil {
|
||||
return !result, nil
|
||||
}
|
||||
return false, err
|
||||
default:
|
||||
return false, fmt.Errorf("Invalid match operation: %d", expression.Operator)
|
||||
}
|
||||
}
|
||||
|
||||
switch rvalue.Kind() {
|
||||
case reflect.Struct:
|
||||
fieldName := expression.Selector[depth]
|
||||
fieldConfig := fields[FieldName(fieldName)]
|
||||
|
||||
if fieldConfig.StructFieldName != "" {
|
||||
fieldName = fieldConfig.StructFieldName
|
||||
}
|
||||
|
||||
value := reflect.Indirect(rvalue.FieldByName(fieldName))
|
||||
|
||||
if matcher, ok := value.Interface().(MatchExpressionEvaluator); ok {
|
||||
return matcher.EvaluateMatch(expression.Selector[depth+1:], expression.Operator, getMatchExprValue(expression))
|
||||
}
|
||||
|
||||
return evaluateMatchExpressionRecurse(expression, depth+1, value, fieldConfig.SubFields)
|
||||
|
||||
case reflect.Slice, reflect.Array:
|
||||
// TODO (mkeeler) - Should we support implementing the MatchExpressionEvaluator interface for slice/array types?
|
||||
// Punting on that for now.
|
||||
for i := 0; i < rvalue.Len(); i++ {
|
||||
item := reflect.Indirect(rvalue.Index(i))
|
||||
// we use the same depth because right now we are not allowing
|
||||
// selection of individual slice/array elements
|
||||
result, err := evaluateMatchExpressionRecurse(expression, depth, item, fields)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
// operations on slices are implicity ANY operations currently so the first truthy evaluation we find we can stop
|
||||
if result {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
return false, nil
|
||||
case reflect.Map:
|
||||
// TODO (mkeeler) - Should we support implementing the MatchExpressionEvaluator interface for map types
|
||||
// such as the FieldConfigurations type? Maybe later
|
||||
//
|
||||
value := reflect.Indirect(rvalue.MapIndex(reflect.ValueOf(expression.Selector[depth])))
|
||||
|
||||
if !value.IsValid() {
|
||||
// when the key doesn't exist in the map
|
||||
switch expression.Operator {
|
||||
case MatchEqual, MatchIsNotEmpty, MatchIn:
|
||||
return false, nil
|
||||
default:
|
||||
// MatchNotEqual, MatchIsEmpty, MatchNotIn
|
||||
// Whatever you were looking for cannot be equal because it doesn't exist
|
||||
// Similarly it cannot be in some other container and every other container
|
||||
// is always empty.
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
if matcher, ok := value.Interface().(MatchExpressionEvaluator); ok {
|
||||
return matcher.EvaluateMatch(expression.Selector[depth+1:], expression.Operator, getMatchExprValue(expression))
|
||||
}
|
||||
|
||||
return evaluateMatchExpressionRecurse(expression, depth+1, value, fields[FieldNameAny].SubFields)
|
||||
default:
|
||||
return false, fmt.Errorf("Value at selector %q with type %s does not support nested field selection", expression.Selector[:depth], rvalue.Kind())
|
||||
}
|
||||
}
|
||||
|
||||
func evaluateMatchExpression(expression *MatchExpression, datum interface{}, fields FieldConfigurations) (bool, error) {
|
||||
if matcher, ok := datum.(MatchExpressionEvaluator); ok {
|
||||
return matcher.EvaluateMatch(expression.Selector, expression.Operator, getMatchExprValue(expression))
|
||||
}
|
||||
|
||||
rvalue := reflect.Indirect(reflect.ValueOf(datum))
|
||||
|
||||
return evaluateMatchExpressionRecurse(expression, 0, rvalue, fields)
|
||||
}
|
||||
|
||||
func evaluate(ast Expression, datum interface{}, fields FieldConfigurations) (bool, error) {
|
||||
switch node := ast.(type) {
|
||||
case *UnaryExpression:
|
||||
switch node.Operator {
|
||||
case UnaryOpNot:
|
||||
result, err := evaluate(node.Operand, datum, fields)
|
||||
return !result, err
|
||||
}
|
||||
case *BinaryExpression:
|
||||
switch node.Operator {
|
||||
case BinaryOpAnd:
|
||||
result, err := evaluate(node.Left, datum, fields)
|
||||
if err != nil || result == false {
|
||||
return result, err
|
||||
}
|
||||
|
||||
return evaluate(node.Right, datum, fields)
|
||||
|
||||
case BinaryOpOr:
|
||||
result, err := evaluate(node.Left, datum, fields)
|
||||
if err != nil || result == true {
|
||||
return result, err
|
||||
}
|
||||
|
||||
return evaluate(node.Right, datum, fields)
|
||||
}
|
||||
case *MatchExpression:
|
||||
return evaluateMatchExpression(node, datum, fields)
|
||||
}
|
||||
return false, fmt.Errorf("Invalid AST node")
|
||||
}
|
|
@ -0,0 +1,308 @@
|
|||
package bexpr
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Function type for usage with a SelectorConfiguration
|
||||
type FieldValueCoercionFn func(value string) (interface{}, error)
|
||||
|
||||
// Strongly typed name of a field
|
||||
type FieldName string
|
||||
|
||||
// Used to represent an arbitrary field name
|
||||
const FieldNameAny FieldName = ""
|
||||
|
||||
// The FieldConfiguration struct represents how boolean expression
|
||||
// validation and preparation should work for the given field. A field
|
||||
// in this case is a single element of a selector.
|
||||
//
|
||||
// Example: foo.bar.baz has 3 fields separate by '.' characters.
|
||||
type FieldConfiguration struct {
|
||||
// Name to use when looking up fields within a struct. This is useful when
|
||||
// the name(s) you want to expose to users writing the expressions does not
|
||||
// exactly match the Field name of the structure. If this is empty then the
|
||||
// user provided name will be used
|
||||
StructFieldName string
|
||||
|
||||
// Nested field configurations
|
||||
SubFields FieldConfigurations
|
||||
|
||||
// Function to run on the raw string value present in the expression
|
||||
// syntax to coerce into whatever form the MatchExpressionEvaluator wants
|
||||
// The coercion happens only once and will then be passed as the `value`
|
||||
// parameter to all EvaluateMatch invocations on the MatchExpressionEvaluator.
|
||||
CoerceFn FieldValueCoercionFn
|
||||
|
||||
// List of MatchOperators supported for this field. This configuration
|
||||
// is used to pre-validate an expressions fields before execution.
|
||||
SupportedOperations []MatchOperator
|
||||
}
|
||||
|
||||
// Represents all the valid fields and their corresponding configuration
|
||||
type FieldConfigurations map[FieldName]*FieldConfiguration
|
||||
|
||||
func generateFieldConfigurationInterface(rtype reflect.Type) (FieldConfigurations, bool) {
|
||||
// Handle those types that implement our interface
|
||||
if rtype.Implements(reflect.TypeOf((*MatchExpressionEvaluator)(nil)).Elem()) {
|
||||
// TODO (mkeeler) Do we need to new a value just to call the function? Potentially we can
|
||||
// lookup the func and invoke it with a nil pointer?
|
||||
value := reflect.New(rtype)
|
||||
// have to take the Elem() of the new value because New gives us a ptr to the type that
|
||||
// we checked if it implements the interface
|
||||
configs := value.Elem().Interface().(MatchExpressionEvaluator).FieldConfigurations()
|
||||
return configs, true
|
||||
}
|
||||
|
||||
return nil, false
|
||||
}
|
||||
|
||||
func generateFieldConfigurationInternal(rtype reflect.Type) (*FieldConfiguration, error) {
|
||||
if fields, ok := generateFieldConfigurationInterface(rtype); ok {
|
||||
return &FieldConfiguration{
|
||||
SubFields: fields,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// must be done after checking for interface implementing
|
||||
rtype = derefType(rtype)
|
||||
|
||||
// Handle primitive types
|
||||
if coerceFn, ok := primitiveCoercionFns[rtype.Kind()]; ok {
|
||||
return &FieldConfiguration{
|
||||
CoerceFn: coerceFn,
|
||||
SupportedOperations: []MatchOperator{MatchEqual, MatchNotEqual},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Handle compound types
|
||||
switch rtype.Kind() {
|
||||
case reflect.Map:
|
||||
return generateMapFieldConfiguration(derefType(rtype.Key()), rtype.Elem())
|
||||
case reflect.Array, reflect.Slice:
|
||||
return generateSliceFieldConfiguration(rtype.Elem())
|
||||
case reflect.Struct:
|
||||
subfields, err := generateStructFieldConfigurations(rtype)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &FieldConfiguration{
|
||||
SubFields: subfields,
|
||||
}, nil
|
||||
|
||||
default: // unsupported types are just not filterable
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
|
||||
func generateSliceFieldConfiguration(elemType reflect.Type) (*FieldConfiguration, error) {
|
||||
if coerceFn, ok := primitiveCoercionFns[elemType.Kind()]; ok {
|
||||
// slices of primitives have somewhat different supported operations
|
||||
return &FieldConfiguration{
|
||||
CoerceFn: coerceFn,
|
||||
SupportedOperations: []MatchOperator{MatchIn, MatchNotIn, MatchIsEmpty, MatchIsNotEmpty},
|
||||
}, nil
|
||||
}
|
||||
|
||||
subfield, err := generateFieldConfigurationInternal(elemType)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cfg := &FieldConfiguration{
|
||||
SupportedOperations: []MatchOperator{MatchIsEmpty, MatchIsNotEmpty},
|
||||
}
|
||||
|
||||
if subfield != nil && len(subfield.SubFields) > 0 {
|
||||
cfg.SubFields = subfield.SubFields
|
||||
}
|
||||
|
||||
return cfg, nil
|
||||
}
|
||||
|
||||
func generateMapFieldConfiguration(keyType, valueType reflect.Type) (*FieldConfiguration, error) {
|
||||
switch keyType.Kind() {
|
||||
case reflect.String:
|
||||
subfield, err := generateFieldConfigurationInternal(valueType)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cfg := &FieldConfiguration{
|
||||
CoerceFn: CoerceString,
|
||||
SupportedOperations: []MatchOperator{MatchIsEmpty, MatchIsNotEmpty, MatchIn, MatchNotIn},
|
||||
}
|
||||
|
||||
if subfield != nil {
|
||||
cfg.SubFields = FieldConfigurations{
|
||||
FieldNameAny: subfield,
|
||||
}
|
||||
}
|
||||
|
||||
return cfg, nil
|
||||
|
||||
default:
|
||||
// For maps with non-string keys we can really only do emptiness checks
|
||||
// and cannot index into them at all
|
||||
return &FieldConfiguration{
|
||||
SupportedOperations: []MatchOperator{MatchIsEmpty, MatchIsNotEmpty},
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
|
||||
func generateStructFieldConfigurations(rtype reflect.Type) (FieldConfigurations, error) {
|
||||
fieldConfigs := make(FieldConfigurations)
|
||||
|
||||
for i := 0; i < rtype.NumField(); i++ {
|
||||
field := rtype.Field(i)
|
||||
|
||||
fieldTag := field.Tag.Get("bexpr")
|
||||
|
||||
var fieldNames []string
|
||||
|
||||
if field.PkgPath != "" {
|
||||
// we cant handle unexported fields using reflection
|
||||
continue
|
||||
}
|
||||
|
||||
if fieldTag != "" {
|
||||
parts := strings.Split(fieldTag, ",")
|
||||
|
||||
if len(parts) > 0 {
|
||||
if parts[0] == "-" {
|
||||
continue
|
||||
}
|
||||
|
||||
fieldNames = parts
|
||||
} else {
|
||||
fieldNames = append(fieldNames, field.Name)
|
||||
}
|
||||
} else {
|
||||
fieldNames = append(fieldNames, field.Name)
|
||||
}
|
||||
|
||||
cfg, err := generateFieldConfigurationInternal(field.Type)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cfg.StructFieldName = field.Name
|
||||
|
||||
// link the config to all the correct names
|
||||
for _, name := range fieldNames {
|
||||
fieldConfigs[FieldName(name)] = cfg
|
||||
}
|
||||
}
|
||||
|
||||
return fieldConfigs, nil
|
||||
}
|
||||
|
||||
// `generateFieldConfigurations` can be used to generate the `FieldConfigurations` map
|
||||
// It supports generating configurations for either a `map[string]*` or a `struct` as the `topLevelType`
|
||||
//
|
||||
// Internally within the top level type the following is supported:
|
||||
//
|
||||
// Primitive Types:
|
||||
// strings
|
||||
// integers (all width types and signedness)
|
||||
// floats (32 and 64 bit)
|
||||
// bool
|
||||
//
|
||||
// Compound Types
|
||||
// `map[*]*`
|
||||
// - Supports emptiness checking. Does not support further selector nesting.
|
||||
// `map[string]*`
|
||||
// - Supports in/contains operations on the keys.
|
||||
// `map[string]<supported type>`
|
||||
// - Will have a single subfield with name `FieldNameAny` (wildcard) and the rest of
|
||||
// the field configuration will come from the `<supported type>`
|
||||
// `[]*`
|
||||
// - Supports emptiness checking only. Does not support further selector nesting.
|
||||
// `[]<supported primitive type>`
|
||||
// - Supports in/contains operations against the primitive values.
|
||||
// `[]<supported compund type>`
|
||||
// - Will have subfields with the configuration of whatever the supported
|
||||
// compound type is.
|
||||
// - Does not support indexing of individual values like a map does currently
|
||||
// and with the current evaluation logic slices of slices will mostly be
|
||||
// handled as if they were flattened. One thing that cannot be done is
|
||||
// to be able to perform emptiness/contains checking against the internal
|
||||
// slice.
|
||||
// structs
|
||||
// - No operations are supported on the struct itself
|
||||
// - Will have subfield configurations generated for the fields of the struct.
|
||||
// - A struct tag like `bexpr:"<name>"` allows changing the name that allows indexing
|
||||
// into the subfield.
|
||||
// - By default unexported fields of a struct are not selectable. If The struct tag is
|
||||
// present then this behavior is overridden.
|
||||
// - Exported fields can be made unselectable by adding a tag to the field like `bexpr:"-"`
|
||||
func GenerateFieldConfigurations(topLevelType interface{}) (FieldConfigurations, error) {
|
||||
return generateFieldConfigurations(reflect.TypeOf(topLevelType))
|
||||
}
|
||||
|
||||
func generateFieldConfigurations(rtype reflect.Type) (FieldConfigurations, error) {
|
||||
if fields, ok := generateFieldConfigurationInterface(rtype); ok {
|
||||
return fields, nil
|
||||
}
|
||||
|
||||
// Do this after we check for interface implementation
|
||||
rtype = derefType(rtype)
|
||||
|
||||
switch rtype.Kind() {
|
||||
case reflect.Struct:
|
||||
fields, err := generateStructFieldConfigurations(rtype)
|
||||
return fields, err
|
||||
case reflect.Map:
|
||||
if rtype.Key().Kind() != reflect.String {
|
||||
return nil, fmt.Errorf("Cannot generate FieldConfigurations for maps with keys that are not strings")
|
||||
}
|
||||
|
||||
elemType := rtype.Elem()
|
||||
|
||||
field, err := generateFieldConfigurationInternal(elemType)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if field == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return FieldConfigurations{
|
||||
FieldNameAny: field,
|
||||
}, nil
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("Invalid top level type - can only use structs, map[string]* or an MatchExpressionEvaluator")
|
||||
}
|
||||
|
||||
func (config *FieldConfiguration) stringInternal(builder *strings.Builder, level int, path string) {
|
||||
fmt.Fprintf(builder, "%sPath: %s, StructFieldName: %s, CoerceFn: %p, SupportedOperations: %v\n", strings.Repeat(" ", level), path, config.StructFieldName, config.CoerceFn, config.SupportedOperations)
|
||||
if len(config.SubFields) > 0 {
|
||||
config.SubFields.stringInternal(builder, level+1, path)
|
||||
}
|
||||
}
|
||||
|
||||
func (config *FieldConfiguration) String() string {
|
||||
var builder strings.Builder
|
||||
config.stringInternal(&builder, 0, "")
|
||||
return builder.String()
|
||||
}
|
||||
|
||||
func (configs FieldConfigurations) stringInternal(builder *strings.Builder, level int, path string) {
|
||||
for fieldName, cfg := range configs {
|
||||
newPath := string(fieldName)
|
||||
if level > 0 {
|
||||
newPath = fmt.Sprintf("%s.%s", path, fieldName)
|
||||
}
|
||||
cfg.stringInternal(builder, level, newPath)
|
||||
}
|
||||
}
|
||||
|
||||
func (configs FieldConfigurations) String() string {
|
||||
var builder strings.Builder
|
||||
configs.stringInternal(&builder, 0, "")
|
||||
return builder.String()
|
||||
}
|
|
@ -0,0 +1,106 @@
|
|||
package bexpr
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
type Filter struct {
|
||||
// The underlying boolean expression evaluator
|
||||
evaluator *Evaluator
|
||||
}
|
||||
|
||||
func getElementType(dataType interface{}) reflect.Type {
|
||||
rtype := reflect.TypeOf(dataType)
|
||||
if rtype == nil {
|
||||
return nil
|
||||
}
|
||||
switch rtype.Kind() {
|
||||
case reflect.Map, reflect.Slice, reflect.Array:
|
||||
return rtype.Elem()
|
||||
default:
|
||||
return rtype
|
||||
}
|
||||
}
|
||||
|
||||
// Creates a filter to operate on the given data type.
|
||||
// The data type passed can be either be a container type (map, slice or array) or the element type.
|
||||
// For example, if you want to filter a []Foo then the data type to pass here is either []Foo or just Foo.
|
||||
// If no expression is provided the nil filter will be returned but is not an error. This is done
|
||||
// to allow for executing the nil filter which is just a no-op
|
||||
func CreateFilter(expression string, config *EvaluatorConfig, dataType interface{}) (*Filter, error) {
|
||||
if expression == "" {
|
||||
// nil filter
|
||||
return nil, nil
|
||||
}
|
||||
exp, err := CreateEvaluatorForType(expression, config, getElementType(dataType))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to create boolean expression evaluator: %v", err)
|
||||
}
|
||||
|
||||
return &Filter{
|
||||
evaluator: exp,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Execute the filter. If called on a nil filter this is a no-op and
|
||||
// will return the original data
|
||||
func (f *Filter) Execute(data interface{}) (interface{}, error) {
|
||||
if f == nil {
|
||||
return data, nil
|
||||
}
|
||||
|
||||
rvalue := reflect.ValueOf(data)
|
||||
rtype := rvalue.Type()
|
||||
|
||||
switch rvalue.Kind() {
|
||||
case reflect.Array:
|
||||
// For arrays we return slices instead of fixed sized arrays
|
||||
rtype = reflect.SliceOf(rtype.Elem())
|
||||
fallthrough
|
||||
case reflect.Slice:
|
||||
newSlice := reflect.MakeSlice(rtype, 0, rvalue.Len())
|
||||
|
||||
for i := 0; i < rvalue.Len(); i++ {
|
||||
item := rvalue.Index(i)
|
||||
if !item.CanInterface() {
|
||||
return nil, fmt.Errorf("Slice/Array value can not be used")
|
||||
}
|
||||
result, err := f.evaluator.Evaluate(item.Interface())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if result {
|
||||
newSlice = reflect.Append(newSlice, item)
|
||||
}
|
||||
}
|
||||
|
||||
return newSlice.Interface(), nil
|
||||
case reflect.Map:
|
||||
newMap := reflect.MakeMap(rtype)
|
||||
|
||||
// TODO (mkeeler) - Update to use a MapRange iterator once Go 1.12 is usable
|
||||
// for all of our products
|
||||
for _, mapKey := range rvalue.MapKeys() {
|
||||
item := rvalue.MapIndex(mapKey)
|
||||
|
||||
if !item.CanInterface() {
|
||||
return nil, fmt.Errorf("Map value cannot be used")
|
||||
}
|
||||
|
||||
result, err := f.evaluator.Evaluate(item.Interface())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if result {
|
||||
newMap.SetMapIndex(mapKey, item)
|
||||
}
|
||||
}
|
||||
|
||||
return newMap.Interface(), nil
|
||||
default:
|
||||
return nil, fmt.Errorf("Only slices, arrays and maps are filterable")
|
||||
}
|
||||
}
|
|
@ -0,0 +1,3 @@
|
|||
module github.com/hashicorp/go-bexpr
|
||||
|
||||
require github.com/stretchr/testify v1.3.0
|
|
@ -0,0 +1,8 @@
|
|||
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/stretchr/objx v0.1.0 h1:4G4v2dO3VZwixGIRoQ5Lfboy6nUhCyYzaqnIAPPhYs4=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,157 @@
|
|||
{
|
||||
package bexpr
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
}
|
||||
|
||||
Input <- _? "(" _? expr:OrExpression _? ")" _? EOF {
|
||||
return expr, nil
|
||||
} / _? expr:OrExpression _? EOF {
|
||||
return expr, nil
|
||||
}
|
||||
|
||||
OrExpression <- left:AndExpression _ "or" _ right:OrExpression {
|
||||
return &BinaryExpression{
|
||||
Operator: BinaryOpOr,
|
||||
Left: left.(Expression),
|
||||
Right: right.(Expression),
|
||||
}, nil
|
||||
} / expr:AndExpression {
|
||||
return expr, nil
|
||||
}
|
||||
|
||||
AndExpression <- left:NotExpression _ "and" _ right:AndExpression {
|
||||
return &BinaryExpression{
|
||||
Operator: BinaryOpAnd,
|
||||
Left: left.(Expression),
|
||||
Right: right.(Expression),
|
||||
}, nil
|
||||
} / expr:NotExpression {
|
||||
return expr, nil
|
||||
}
|
||||
|
||||
NotExpression <- "not" _ expr:NotExpression {
|
||||
if unary, ok := expr.(*UnaryExpression); ok && unary.Operator == UnaryOpNot {
|
||||
// small optimization to get rid unnecessary levels of AST nodes
|
||||
// for things like: not not foo == 3 which is equivalent to foo == 3
|
||||
return unary.Operand, nil
|
||||
}
|
||||
|
||||
return &UnaryExpression{
|
||||
Operator: UnaryOpNot,
|
||||
Operand: expr.(Expression),
|
||||
}, nil
|
||||
} / expr:ParenthesizedExpression {
|
||||
return expr, nil
|
||||
}
|
||||
|
||||
ParenthesizedExpression "grouping" <- "(" _? expr:OrExpression _? ")" {
|
||||
return expr, nil
|
||||
} / expr:MatchExpression {
|
||||
return expr, nil
|
||||
} / "(" _? OrExpression _? !")" &{
|
||||
return false, errors.New("Unmatched parentheses")
|
||||
}
|
||||
|
||||
MatchExpression "match" <- MatchSelectorOpValue / MatchSelectorOp / MatchValueOpSelector
|
||||
|
||||
MatchSelectorOpValue "match" <- selector:Selector operator:(MatchEqual / MatchNotEqual / MatchContains / MatchNotContains) value:Value {
|
||||
return &MatchExpression{Selector: selector.(Selector), Operator: operator.(MatchOperator), Value: value.(*MatchValue)}, nil
|
||||
}
|
||||
|
||||
MatchSelectorOp "match" <- selector:Selector operator:(MatchIsEmpty / MatchIsNotEmpty) {
|
||||
return &MatchExpression{Selector: selector.(Selector), Operator: operator.(MatchOperator), Value: nil}, nil
|
||||
}
|
||||
|
||||
MatchValueOpSelector "match" <- value:Value operator:(MatchIn / MatchNotIn) selector:Selector {
|
||||
return &MatchExpression{Selector: selector.(Selector), Operator: operator.(MatchOperator), Value: value.(*MatchValue)}, nil
|
||||
} / Value operator:(MatchIn / MatchNotIn) !Selector &{
|
||||
return false, errors.New("Invalid selector")
|
||||
}
|
||||
|
||||
MatchEqual <- _? "==" _? {
|
||||
return MatchEqual, nil
|
||||
}
|
||||
MatchNotEqual <- _? "!=" _? {
|
||||
return MatchNotEqual, nil
|
||||
}
|
||||
MatchIsEmpty <- _ "is" _ "empty" {
|
||||
return MatchIsEmpty, nil
|
||||
}
|
||||
MatchIsNotEmpty <- _"is" _ "not" _ "empty" {
|
||||
return MatchIsNotEmpty, nil
|
||||
}
|
||||
MatchIn <- _ "in" _ {
|
||||
return MatchIn, nil
|
||||
}
|
||||
MatchNotIn <- _ "not" _ "in" _ {
|
||||
return MatchNotIn, nil
|
||||
}
|
||||
MatchContains <- _ "contains" _ {
|
||||
return MatchIn, nil
|
||||
}
|
||||
MatchNotContains <- _ "not" _ "contains" _ {
|
||||
return MatchNotIn, nil
|
||||
}
|
||||
|
||||
|
||||
Selector "selector" <- first:Identifier rest:SelectorOrIndex* {
|
||||
sel := Selector{
|
||||
first.(string),
|
||||
}
|
||||
|
||||
if rest != nil {
|
||||
for _, v := range rest.([]interface{}) {
|
||||
sel = append(sel, v.(string))
|
||||
}
|
||||
}
|
||||
return sel, nil
|
||||
}
|
||||
|
||||
Identifier <- [a-zA-Z] [a-zA-Z0-9_]* {
|
||||
return string(c.text), nil
|
||||
}
|
||||
|
||||
SelectorOrIndex <- "." ident:Identifier {
|
||||
return ident, nil
|
||||
} / expr:IndexExpression {
|
||||
return expr, nil
|
||||
}
|
||||
|
||||
IndexExpression "index" <- "[" _? lit:StringLiteral _? "]" {
|
||||
return lit, nil
|
||||
} / "[" _? !StringLiteral &{
|
||||
return false, errors.New("Invalid index")
|
||||
} / "[" _? StringLiteral _? !"]" &{
|
||||
return false, errors.New("Unclosed index expression")
|
||||
}
|
||||
|
||||
Value "value" <- selector:Selector { return &MatchValue{Raw:strings.Join(selector.(Selector), ".")}, nil }
|
||||
/ n:NumberLiteral { return &MatchValue{Raw: n.(string)}, nil }
|
||||
/ s:StringLiteral { return &MatchValue{Raw: s.(string)}, nil}
|
||||
|
||||
NumberLiteral "number" <- "-"? IntegerOrFloat &AfterNumbers {
|
||||
return string(c.text), nil
|
||||
} / "-"? IntegerOrFloat !AfterNumbers &{
|
||||
return false, errors.New("Invalid number literal")
|
||||
}
|
||||
|
||||
AfterNumbers <- &(_ / EOF / ")")
|
||||
|
||||
IntegerOrFloat <- ("0" / [1-9][0-9]*) ("." [0-9]+)?
|
||||
|
||||
StringLiteral "string" <- ('`' RawStringChar* '`' / '"' DoubleStringChar* '"') {
|
||||
return strconv.Unquote(string(c.text))
|
||||
} / ('`' RawStringChar* / '"' DoubleStringChar*) EOF &{
|
||||
return false, errors.New("Unterminated string literal")
|
||||
}
|
||||
|
||||
RawStringChar <- !'`' .
|
||||
DoubleStringChar <- !'"' .
|
||||
|
||||
_ "whitespace" <- [ \t\r\n]+
|
||||
|
||||
EOF <- !.
|
|
@ -0,0 +1,59 @@
|
|||
package bexpr
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"sync"
|
||||
)
|
||||
|
||||
var DefaultRegistry Registry = NewSyncRegistry()
|
||||
|
||||
type Registry interface {
|
||||
GetFieldConfigurations(reflect.Type) (FieldConfigurations, error)
|
||||
}
|
||||
|
||||
type SyncRegistry struct {
|
||||
configurations map[reflect.Type]FieldConfigurations
|
||||
lock sync.RWMutex
|
||||
}
|
||||
|
||||
func NewSyncRegistry() *SyncRegistry {
|
||||
return &SyncRegistry{
|
||||
configurations: make(map[reflect.Type]FieldConfigurations),
|
||||
}
|
||||
}
|
||||
|
||||
func (r *SyncRegistry) GetFieldConfigurations(rtype reflect.Type) (FieldConfigurations, error) {
|
||||
if r != nil {
|
||||
r.lock.RLock()
|
||||
configurations, ok := r.configurations[rtype]
|
||||
r.lock.RUnlock()
|
||||
|
||||
if ok {
|
||||
return configurations, nil
|
||||
}
|
||||
}
|
||||
|
||||
fields, err := generateFieldConfigurations(rtype)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if r != nil {
|
||||
r.lock.Lock()
|
||||
r.configurations[rtype] = fields
|
||||
r.lock.Unlock()
|
||||
}
|
||||
|
||||
return fields, nil
|
||||
}
|
||||
|
||||
type nilRegistry struct{}
|
||||
|
||||
// The pass through registry can be used to prevent using the default registry and thus storing
|
||||
// any field configurations
|
||||
var NilRegistry = (*nilRegistry)(nil)
|
||||
|
||||
func (r *nilRegistry) GetFieldConfigurations(rtype reflect.Type) (FieldConfigurations, error) {
|
||||
fields, err := generateFieldConfigurations(rtype)
|
||||
return fields, err
|
||||
}
|
|
@ -0,0 +1,123 @@
|
|||
package bexpr
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
func validateRecurse(ast Expression, fields FieldConfigurations, maxRawValueLength int) (int, error) {
|
||||
switch node := ast.(type) {
|
||||
case *UnaryExpression:
|
||||
switch node.Operator {
|
||||
case UnaryOpNot:
|
||||
// this is fine
|
||||
default:
|
||||
return 0, fmt.Errorf("Invalid unary expression operator: %d", node.Operator)
|
||||
}
|
||||
|
||||
if node.Operand == nil {
|
||||
return 0, fmt.Errorf("Invalid unary expression operand: nil")
|
||||
}
|
||||
return validateRecurse(node.Operand, fields, maxRawValueLength)
|
||||
case *BinaryExpression:
|
||||
switch node.Operator {
|
||||
case BinaryOpAnd, BinaryOpOr:
|
||||
// this is fine
|
||||
default:
|
||||
return 0, fmt.Errorf("Invalid binary expression operator: %d", node.Operator)
|
||||
}
|
||||
|
||||
if node.Left == nil {
|
||||
return 0, fmt.Errorf("Invalid left hand side of binary expression: nil")
|
||||
} else if node.Right == nil {
|
||||
return 0, fmt.Errorf("Invalid right hand side of binary expression: nil")
|
||||
}
|
||||
|
||||
leftMatches, err := validateRecurse(node.Left, fields, maxRawValueLength)
|
||||
if err != nil {
|
||||
return leftMatches, err
|
||||
}
|
||||
|
||||
rightMatches, err := validateRecurse(node.Right, fields, maxRawValueLength)
|
||||
return leftMatches + rightMatches, err
|
||||
case *MatchExpression:
|
||||
if len(node.Selector) < 1 {
|
||||
return 1, fmt.Errorf("Invalid selector: %q", node.Selector)
|
||||
}
|
||||
|
||||
if node.Value != nil && maxRawValueLength != 0 && len(node.Value.Raw) > maxRawValueLength {
|
||||
return 1, fmt.Errorf("Value in expression with length %d for selector %q exceeds maximum length of", len(node.Value.Raw), maxRawValueLength)
|
||||
}
|
||||
|
||||
// exit early if we have no fields to check against
|
||||
if len(fields) < 1 {
|
||||
return 1, nil
|
||||
}
|
||||
|
||||
configs := fields
|
||||
var lastConfig *FieldConfiguration
|
||||
// validate the selector
|
||||
for idx, field := range node.Selector {
|
||||
if fcfg, ok := configs[FieldName(field)]; ok {
|
||||
lastConfig = fcfg
|
||||
configs = fcfg.SubFields
|
||||
} else if fcfg, ok := configs[FieldNameAny]; ok {
|
||||
lastConfig = fcfg
|
||||
configs = fcfg.SubFields
|
||||
} else {
|
||||
return 1, fmt.Errorf("Selector %q is not valid", node.Selector[:idx+1])
|
||||
}
|
||||
|
||||
// this just verifies that the FieldConfigurations we are using was created properly
|
||||
if lastConfig == nil {
|
||||
return 1, fmt.Errorf("FieldConfiguration for selector %q is nil", node.Selector[:idx])
|
||||
}
|
||||
}
|
||||
|
||||
// check the operator
|
||||
found := false
|
||||
for _, op := range lastConfig.SupportedOperations {
|
||||
if op == node.Operator {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !found {
|
||||
return 1, fmt.Errorf("Invalid match operator %q for selector %q", node.Operator, node.Selector)
|
||||
}
|
||||
|
||||
// coerce/validate the value
|
||||
if node.Value != nil {
|
||||
if lastConfig.CoerceFn != nil {
|
||||
coerced, err := lastConfig.CoerceFn(node.Value.Raw)
|
||||
if err != nil {
|
||||
return 1, fmt.Errorf("Failed to coerce value %q for selector %q: %v", node.Value.Raw, node.Selector, err)
|
||||
}
|
||||
|
||||
node.Value.Converted = coerced
|
||||
}
|
||||
} else {
|
||||
switch node.Operator {
|
||||
case MatchIsEmpty, MatchIsNotEmpty:
|
||||
// these don't require values
|
||||
default:
|
||||
return 1, fmt.Errorf("Match operator %q requires a non-nil value", node.Operator)
|
||||
}
|
||||
}
|
||||
return 1, nil
|
||||
}
|
||||
return 0, fmt.Errorf("Cannot validate: Invalid AST")
|
||||
}
|
||||
|
||||
func validate(ast Expression, fields FieldConfigurations, maxMatches, maxRawValueLength int) error {
|
||||
matches, err := validateRecurse(ast, fields, maxRawValueLength)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if maxMatches != 0 && matches > maxMatches {
|
||||
return fmt.Errorf("Number of match expressions (%d) exceeds the limit (%d)", matches, maxMatches)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,13 @@
|
|||
engines:
|
||||
gofmt:
|
||||
enabled: true
|
||||
golint:
|
||||
enabled: true
|
||||
govet:
|
||||
enabled: true
|
||||
|
||||
exclude_patterns:
|
||||
- ".github/"
|
||||
- "vendor/"
|
||||
- "codegen/"
|
||||
- "doc.go"
|
|
@ -1,4 +1,11 @@
|
|||
/dep
|
||||
/testdep
|
||||
/profile.out
|
||||
/coverage.txt
|
||||
# Binaries for programs and plugins
|
||||
*.exe
|
||||
*.dll
|
||||
*.so
|
||||
*.dylib
|
||||
|
||||
# Test binary, build with `go test -c`
|
||||
*.test
|
||||
|
||||
# Output of the go coverage tool, specifically when used with LiteIDE
|
||||
*.out
|
||||
|
|
|
@ -4,10 +4,22 @@ go:
|
|||
- 1.9
|
||||
- tip
|
||||
|
||||
env:
|
||||
global:
|
||||
- CC_TEST_REPORTER_ID=68feaa3410049ce73e145287acbcdacc525087a30627f96f04e579e75bd71c00
|
||||
|
||||
before_script:
|
||||
- curl -L https://codeclimate.com/downloads/test-reporter/test-reporter-latest-linux-amd64 > ./cc-test-reporter
|
||||
- chmod +x ./cc-test-reporter
|
||||
- ./cc-test-reporter before-build
|
||||
|
||||
install:
|
||||
- go get github.com/go-task/task/cmd/task
|
||||
|
||||
script:
|
||||
- task dl-deps
|
||||
- task lint
|
||||
- task test
|
||||
- task test-coverage
|
||||
|
||||
after_script:
|
||||
- ./cc-test-reporter after-build --exit-code $TRAVIS_TEST_RESULT
|
||||
|
|
|
@ -15,13 +15,16 @@
|
|||
|
||||
[[projects]]
|
||||
name = "github.com/stretchr/testify"
|
||||
packages = ["assert"]
|
||||
packages = [
|
||||
"assert",
|
||||
"require"
|
||||
]
|
||||
revision = "b91bfb9ebec76498946beb6af7c0230c7cc7ba6c"
|
||||
version = "v1.2.0"
|
||||
|
||||
[solve-meta]
|
||||
analyzer-name = "dep"
|
||||
analyzer-version = 1
|
||||
inputs-digest = "50e2495ec1af6e2f7ffb2f3551e4300d30357d7c7fe38ff6056469fa9cfb3673"
|
||||
inputs-digest = "2d160a7dea4ffd13c6c31dab40373822f9d78c73beba016d662bef8f7a998876"
|
||||
solver-name = "gps-cdcl"
|
||||
solver-version = 1
|
||||
|
|
|
@ -1,3 +1,8 @@
|
|||
[prune]
|
||||
unused-packages = true
|
||||
non-go = true
|
||||
go-tests = true
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/stretchr/testify"
|
||||
version = "~1.2.0"
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
# Objx
|
||||
[![Build Status](https://travis-ci.org/stretchr/objx.svg?branch=master)](https://travis-ci.org/stretchr/objx)
|
||||
[![Go Report Card](https://goreportcard.com/badge/github.com/stretchr/objx)](https://goreportcard.com/report/github.com/stretchr/objx)
|
||||
[![Maintainability](https://api.codeclimate.com/v1/badges/1d64bc6c8474c2074f2b/maintainability)](https://codeclimate.com/github/stretchr/objx/maintainability)
|
||||
[![Test Coverage](https://api.codeclimate.com/v1/badges/1d64bc6c8474c2074f2b/test_coverage)](https://codeclimate.com/github/stretchr/objx/test_coverage)
|
||||
[![Sourcegraph](https://sourcegraph.com/github.com/stretchr/objx/-/badge.svg)](https://sourcegraph.com/github.com/stretchr/objx)
|
||||
[![GoDoc](https://godoc.org/github.com/stretchr/objx?status.svg)](https://godoc.org/github.com/stretchr/objx)
|
||||
|
||||
|
|
|
@ -12,11 +12,12 @@ update-deps:
|
|||
cmds:
|
||||
- dep ensure
|
||||
- dep ensure -update
|
||||
- dep prune
|
||||
|
||||
lint:
|
||||
desc: Runs golint
|
||||
cmds:
|
||||
- go fmt $(go list ./... | grep -v /vendor/)
|
||||
- go vet $(go list ./... | grep -v /vendor/)
|
||||
- golint $(ls *.go | grep -v "doc.go")
|
||||
silent: true
|
||||
|
||||
|
@ -24,3 +25,8 @@ test:
|
|||
desc: Runs go tests
|
||||
cmds:
|
||||
- go test -race .
|
||||
|
||||
test-coverage:
|
||||
desc: Runs go tests and calucates test coverage
|
||||
cmds:
|
||||
- go test -coverprofile=c.out .
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
package objx
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
@ -28,7 +27,7 @@ var arrayAccesRegex = regexp.MustCompile(arrayAccesRegexString)
|
|||
//
|
||||
// o.Get("books[1].chapters[2].title")
|
||||
func (m Map) Get(selector string) *Value {
|
||||
rawObj := access(m, selector, nil, false, false)
|
||||
rawObj := access(m, selector, nil, false)
|
||||
return &Value{data: rawObj}
|
||||
}
|
||||
|
||||
|
@ -43,34 +42,25 @@ func (m Map) Get(selector string) *Value {
|
|||
//
|
||||
// o.Set("books[1].chapters[2].title","Time to Go")
|
||||
func (m Map) Set(selector string, value interface{}) Map {
|
||||
access(m, selector, value, true, false)
|
||||
access(m, selector, value, true)
|
||||
return m
|
||||
}
|
||||
|
||||
// access accesses the object using the selector and performs the
|
||||
// appropriate action.
|
||||
func access(current, selector, value interface{}, isSet, panics bool) interface{} {
|
||||
|
||||
func access(current, selector, value interface{}, isSet bool) interface{} {
|
||||
switch selector.(type) {
|
||||
case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64:
|
||||
|
||||
if array, ok := current.([]interface{}); ok {
|
||||
index := intFromInterface(selector)
|
||||
|
||||
if index >= len(array) {
|
||||
if panics {
|
||||
panic(fmt.Sprintf("objx: Index %d is out of range. Slice only contains %d items.", index, len(array)))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
return array[index]
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
case string:
|
||||
|
||||
selStr := selector.(string)
|
||||
selSegs := strings.SplitN(selStr, PathSeparator, 2)
|
||||
thisSel := selSegs[0]
|
||||
|
@ -79,7 +69,6 @@ func access(current, selector, value interface{}, isSet, panics bool) interface{
|
|||
|
||||
if strings.Contains(thisSel, "[") {
|
||||
arrayMatches := arrayAccesRegex.FindStringSubmatch(thisSel)
|
||||
|
||||
if len(arrayMatches) > 0 {
|
||||
// Get the key into the map
|
||||
thisSel = arrayMatches[1]
|
||||
|
@ -94,11 +83,9 @@ func access(current, selector, value interface{}, isSet, panics bool) interface{
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
if curMap, ok := current.(Map); ok {
|
||||
current = map[string]interface{}(curMap)
|
||||
}
|
||||
|
||||
// get the object in question
|
||||
switch current.(type) {
|
||||
case map[string]interface{}:
|
||||
|
@ -111,29 +98,19 @@ func access(current, selector, value interface{}, isSet, panics bool) interface{
|
|||
default:
|
||||
current = nil
|
||||
}
|
||||
|
||||
if current == nil && panics {
|
||||
panic(fmt.Sprintf("objx: '%v' invalid on object.", selector))
|
||||
}
|
||||
|
||||
// do we need to access the item of an array?
|
||||
if index > -1 {
|
||||
if array, ok := current.([]interface{}); ok {
|
||||
if index < len(array) {
|
||||
current = array[index]
|
||||
} else {
|
||||
if panics {
|
||||
panic(fmt.Sprintf("objx: Index %d is out of range. Slice only contains %d items.", index, len(array)))
|
||||
}
|
||||
current = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(selSegs) > 1 {
|
||||
current = access(current, selSegs[1], value, isSet, panics)
|
||||
current = access(current, selSegs[1], value, isSet)
|
||||
}
|
||||
|
||||
}
|
||||
return current
|
||||
}
|
||||
|
@ -165,7 +142,7 @@ func intFromInterface(selector interface{}) int {
|
|||
case uint64:
|
||||
value = int(selector.(uint64))
|
||||
default:
|
||||
panic("objx: array access argument is not an integer type (this should never happen)")
|
||||
return 0
|
||||
}
|
||||
return value
|
||||
}
|
||||
|
|
|
@ -47,9 +47,8 @@ func New(data interface{}) Map {
|
|||
//
|
||||
// The arguments follow a key, value pattern.
|
||||
//
|
||||
// Panics
|
||||
//
|
||||
// Panics if any key argument is non-string or if there are an odd number of arguments.
|
||||
// Returns nil if any key argument is non-string or if there are an odd number of arguments.
|
||||
//
|
||||
// Example
|
||||
//
|
||||
|
@ -58,14 +57,13 @@ func New(data interface{}) Map {
|
|||
// m := objx.MSI("name", "Mat", "age", 29, "subobj", objx.MSI("active", true))
|
||||
//
|
||||
// // creates an Map equivalent to
|
||||
// m := objx.New(map[string]interface{}{"name": "Mat", "age": 29, "subobj": map[string]interface{}{"active": true}})
|
||||
// m := objx.Map{"name": "Mat", "age": 29, "subobj": objx.Map{"active": true}}
|
||||
func MSI(keyAndValuePairs ...interface{}) Map {
|
||||
newMap := make(map[string]interface{})
|
||||
newMap := Map{}
|
||||
keyAndValuePairsLen := len(keyAndValuePairs)
|
||||
if keyAndValuePairsLen%2 != 0 {
|
||||
panic("objx: MSI must have an even number of arguments following the 'key, value' pattern.")
|
||||
return nil
|
||||
}
|
||||
|
||||
for i := 0; i < keyAndValuePairsLen; i = i + 2 {
|
||||
key := keyAndValuePairs[i]
|
||||
value := keyAndValuePairs[i+1]
|
||||
|
@ -73,11 +71,11 @@ func MSI(keyAndValuePairs ...interface{}) Map {
|
|||
// make sure the key is a string
|
||||
keyString, keyStringOK := key.(string)
|
||||
if !keyStringOK {
|
||||
panic("objx: MSI must follow 'string, interface{}' pattern. " + keyString + " is not a valid key.")
|
||||
return nil
|
||||
}
|
||||
newMap[keyString] = value
|
||||
}
|
||||
return New(newMap)
|
||||
return newMap
|
||||
}
|
||||
|
||||
// ****** Conversion Constructors
|
||||
|
@ -170,12 +168,11 @@ func FromURLQuery(query string) (Map, error) {
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
m := make(map[string]interface{})
|
||||
m := Map{}
|
||||
for k, vals := range vals {
|
||||
m[k] = vals[0]
|
||||
}
|
||||
return New(m), nil
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// MustFromURLQuery generates a new Obj by parsing the specified
|
||||
|
|
|
@ -5,14 +5,7 @@ package objx
|
|||
func (m Map) Exclude(exclude []string) Map {
|
||||
excluded := make(Map)
|
||||
for k, v := range m {
|
||||
var shouldInclude = true
|
||||
for _, toExclude := range exclude {
|
||||
if k == toExclude {
|
||||
shouldInclude = false
|
||||
break
|
||||
}
|
||||
}
|
||||
if shouldInclude {
|
||||
if !contains(exclude, k) {
|
||||
excluded[k] = v
|
||||
}
|
||||
}
|
||||
|
@ -21,11 +14,11 @@ func (m Map) Exclude(exclude []string) Map {
|
|||
|
||||
// Copy creates a shallow copy of the Obj.
|
||||
func (m Map) Copy() Map {
|
||||
copied := make(map[string]interface{})
|
||||
copied := Map{}
|
||||
for k, v := range m {
|
||||
copied[k] = v
|
||||
}
|
||||
return New(copied)
|
||||
return copied
|
||||
}
|
||||
|
||||
// Merge blends the specified map with a copy of this map and returns the result.
|
||||
|
@ -52,12 +45,12 @@ func (m Map) MergeHere(merge Map) Map {
|
|||
// to change the keys and values as it goes. This method requires that
|
||||
// the wrapped object be a map[string]interface{}
|
||||
func (m Map) Transform(transformer func(key string, value interface{}) (string, interface{})) Map {
|
||||
newMap := make(map[string]interface{})
|
||||
newMap := Map{}
|
||||
for k, v := range m {
|
||||
modifiedKey, modifiedVal := transformer(k, v)
|
||||
newMap[modifiedKey] = modifiedVal
|
||||
}
|
||||
return New(newMap)
|
||||
return newMap
|
||||
}
|
||||
|
||||
// TransformKeys builds a new map using the specified key mapping.
|
||||
|
@ -72,3 +65,13 @@ func (m Map) TransformKeys(mapping map[string]string) Map {
|
|||
return key, value
|
||||
})
|
||||
}
|
||||
|
||||
// Checks if a string slice contains a string
|
||||
func contains(s []string, e string) bool {
|
||||
for _, a := range s {
|
||||
if a == e {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
|
|
@ -5,13 +5,8 @@ import (
|
|||
"encoding/hex"
|
||||
)
|
||||
|
||||
// HashWithKey hashes the specified string using the security
|
||||
// key.
|
||||
// HashWithKey hashes the specified string using the security key
|
||||
func HashWithKey(data, key string) string {
|
||||
hash := sha1.New()
|
||||
_, err := hash.Write([]byte(data + ":" + key))
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
return hex.EncodeToString(hash.Sum(nil))
|
||||
d := sha1.Sum([]byte(data + ":" + key))
|
||||
return hex.EncodeToString(d[:])
|
||||
}
|
||||
|
|
|
@ -30,8 +30,6 @@ func (v *Value) String() string {
|
|||
return strconv.FormatFloat(v.Float64(), 'f', -1, 64)
|
||||
case v.IsInt():
|
||||
return strconv.FormatInt(int64(v.Int()), 10)
|
||||
case v.IsInt():
|
||||
return strconv.FormatInt(int64(v.Int()), 10)
|
||||
case v.IsInt8():
|
||||
return strconv.FormatInt(int64(v.Int8()), 10)
|
||||
case v.IsInt16():
|
||||
|
@ -51,6 +49,5 @@ func (v *Value) String() string {
|
|||
case v.IsUint64():
|
||||
return strconv.FormatUint(v.Uint64(), 10)
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%#v", v.Data())
|
||||
}
|
||||
|
|
|
@ -195,6 +195,8 @@ github.com/hashicorp/consul/sdk/testutil/retry
|
|||
github.com/hashicorp/consul/sdk/testutil
|
||||
# github.com/hashicorp/errwrap v1.0.0
|
||||
github.com/hashicorp/errwrap
|
||||
# github.com/hashicorp/go-bexpr v0.1.0
|
||||
github.com/hashicorp/go-bexpr
|
||||
# github.com/hashicorp/go-checkpoint v0.0.0-20171009173528-1545e56e46de
|
||||
github.com/hashicorp/go-checkpoint
|
||||
# github.com/hashicorp/go-cleanhttp v0.5.1
|
||||
|
@ -450,7 +452,7 @@ github.com/softlayer/softlayer-go/sl
|
|||
github.com/softlayer/softlayer-go/config
|
||||
# github.com/spf13/pflag v1.0.3
|
||||
github.com/spf13/pflag
|
||||
# github.com/stretchr/objx v0.1.0
|
||||
# github.com/stretchr/objx v0.1.1
|
||||
github.com/stretchr/objx
|
||||
# github.com/stretchr/testify v1.3.0
|
||||
github.com/stretchr/testify/require
|
||||
|
|
|
@ -37,6 +37,11 @@ The table below shows this endpoint's support for
|
|||
| ---------------- | ----------------- | ------------- | ------------------------ |
|
||||
| `NO` | `none` | `none` | `node:read,service:read` |
|
||||
|
||||
### Parameters
|
||||
|
||||
- `filter` `(string: "")` - Specifies the expression used to filter the
|
||||
queries results prior to returning the data.
|
||||
|
||||
### Sample Request
|
||||
|
||||
```text
|
||||
|
@ -62,6 +67,24 @@ $ curl \
|
|||
}
|
||||
```
|
||||
|
||||
### Filtering
|
||||
|
||||
The filter will be executed against each health check value in the results map with
|
||||
the following selectors and filter operations being supported:
|
||||
|
||||
|
||||
| Selector | Supported Operations |
|
||||
| ------------- | ---------------------------------- |
|
||||
| `CheckID` | Equal, Not Equal |
|
||||
| `Name` | Equal, Not Equal |
|
||||
| `Node` | Equal, Not Equal |
|
||||
| `Notes` | Equal, Not Equal |
|
||||
| `Output` | Equal, Not Equal |
|
||||
| `ServiceID` | Equal, Not Equal |
|
||||
| `ServiceName` | Equal, Not Equal |
|
||||
| `ServiceTags` | In, Not In, Is Empty, Is Not Empty |
|
||||
| `Status` | Equal, Not Equal |
|
||||
|
||||
## Register Check
|
||||
|
||||
This endpoint adds a new check to the local agent. Checks may be of script,
|
||||
|
|
|
@ -38,6 +38,11 @@ The table below shows this endpoint's support for
|
|||
| ---------------- | ----------------- | ------------- | -------------- |
|
||||
| `NO` | `none` | `none` | `service:read` |
|
||||
|
||||
### Parameters
|
||||
|
||||
- `filter` `(string: "")` - Specifies the expression used to filter the
|
||||
queries results prior to returning the data.
|
||||
|
||||
### Sample Request
|
||||
|
||||
```text
|
||||
|
@ -67,6 +72,38 @@ $ curl \
|
|||
}
|
||||
```
|
||||
|
||||
### Filtering
|
||||
|
||||
The filter is executed against each value in the service mapping with the
|
||||
following selectors and filter operations being supported:
|
||||
|
||||
| Selector | Supported Operations |
|
||||
| -------------------------------------- | ---------------------------------- |
|
||||
| `Address` | Equal, Not Equal |
|
||||
| `Connect.Native` | Equal, Not Equal |
|
||||
| `EnableTagOverride` | Equal, Not Equal |
|
||||
| `ID` | Equal, Not Equal |
|
||||
| `Kind` | Equal, Not Equal |
|
||||
| `Meta` | In, Not In, Is Empty, Is Not Empty |
|
||||
| `Meta.<any>` | Equal, Not Equal |
|
||||
| `Port` | Equal, Not Equal |
|
||||
| `Proxy.DestinationServiceID` | Equal, Not Equal |
|
||||
| `Proxy.DestinationServiceName` | Equal, Not Equal |
|
||||
| `Proxy.LocalServiceAddress` | Equal, Not Equal |
|
||||
| `Proxy.LocalServicePort` | Equal, Not Equal |
|
||||
| `Proxy.Upstreams` | Is Empty, Is Not Empty |
|
||||
| `Proxy.Upstreams.Datacenter` | Equal, Not Equal |
|
||||
| `Proxy.Upstreams.DestinationName` | Equal, Not Equal |
|
||||
| `Proxy.Upstreams.DestinationNamespace` | Equal, Not Equal |
|
||||
| `Proxy.Upstreams.DestinationType` | Equal, Not Equal |
|
||||
| `Proxy.Upstreams.LocalBindAddress` | Equal, Not Equal |
|
||||
| `Proxy.Upstreams.LocalBindPort` | Equal, Not Equal |
|
||||
| `Service` | Equal, Not Equal |
|
||||
| `Tags` | In, Not In, Is Empty, Is Not Empty |
|
||||
| `Weights.Passing` | Equal, Not Equal |
|
||||
| `Weights.Warning` | Equal, Not Equal |
|
||||
|
||||
|
||||
## Get Service Configuration
|
||||
|
||||
This endpoint was added in Consul 1.3.0 and returns the full service definition
|
||||
|
|
|
@ -55,7 +55,7 @@ The table below shows this endpoint's support for
|
|||
provided, it will be defaulted to the value of the `Service.Service` property.
|
||||
Only one service with a given `ID` may be present per node. The service
|
||||
`Tags`, `Address`, `Meta`, and `Port` fields are all optional. For more
|
||||
infomation about these fields and the implications of setting them,
|
||||
information about these fields and the implications of setting them,
|
||||
see the [Service - Agent API](https://www.consul.io/api/agent/service.html) page
|
||||
as registering services differs between using this or the Services Agent endpoint.
|
||||
|
||||
|
@ -79,11 +79,11 @@ The table below shows this endpoint's support for
|
|||
sending an array of `Check` objects.
|
||||
|
||||
- `SkipNodeUpdate` `(bool: false)` - Specifies whether to skip updating the
|
||||
node's information in the registration. This is useful in the case where
|
||||
only a health check or service entry on a node needs to be updated or when
|
||||
node's information in the registration. This is useful in the case where
|
||||
only a health check or service entry on a node needs to be updated or when
|
||||
a register request is intended to update a service entry or health check.
|
||||
In both use cases, node information will not be overwritten, if the node is
|
||||
already registered. Note, if the paramater is enabled for a node that doesn't
|
||||
In both use cases, node information will not be overwritten, if the node is
|
||||
already registered. Note, if the paramater is enabled for a node that doesn't
|
||||
exist, it will still be created.
|
||||
|
||||
It is important to note that `Check` does not have to be provided with `Service`
|
||||
|
@ -286,6 +286,9 @@ The table below shows this endpoint's support for
|
|||
will filter the results to nodes with the specified key/value pairs. This is
|
||||
specified as part of the URL as a query parameter.
|
||||
|
||||
- `filter` `(string: "")` - Specifies the expression used to filter the
|
||||
queries results prior to returning the data.
|
||||
|
||||
### Sample Request
|
||||
|
||||
```text
|
||||
|
@ -326,6 +329,23 @@ $ curl \
|
|||
]
|
||||
```
|
||||
|
||||
### Filtering
|
||||
|
||||
The filter will be executed against each Node in the result list with
|
||||
the following selectors and filter operations being supported:
|
||||
|
||||
| Selector | Supported Operations |
|
||||
| ----------------------- | ---------------------------------- |
|
||||
| `Address` | Equal, Not Equal |
|
||||
| `Datacenter` | Equal, Not Equal |
|
||||
| `ID` | Equal, Not Equal |
|
||||
| `Meta` | In, Not In, Is Empty, Is Not Empty |
|
||||
| `Meta.<any>` | Equal, Not Equal |
|
||||
| `Node` | Equal, Not Equal |
|
||||
| `TaggedAddresses` | In, Not In, Is Empty, Is Not Empty |
|
||||
| `TaggedAddresses.<any>` | Equal, Not Equal |
|
||||
|
||||
|
||||
## List Services
|
||||
|
||||
This endpoint returns the services registered in a given datacenter.
|
||||
|
@ -405,7 +425,7 @@ The table below shows this endpoint's support for
|
|||
the datacenter of the agent being queried. This is specified as part of the
|
||||
URL as a query parameter.
|
||||
|
||||
- `tag` `(string: "")` - Specifies the tag to filter on. This is specified as part of
|
||||
- `tag` `(string: "")` - Specifies the tag to filter on. This is specified as part of
|
||||
the URL as a query parameter. Can be used multiple times for additional filtering,
|
||||
returning only the results that include all of the tag values provided.
|
||||
|
||||
|
@ -419,6 +439,9 @@ The table below shows this endpoint's support for
|
|||
will filter the results to nodes with the specified key/value pairs. This is
|
||||
specified as part of the URL as a query parameter.
|
||||
|
||||
- `filter` `(string: "")` - Specifies the expression used to filter the
|
||||
queries results prior to returning the data.
|
||||
|
||||
### Sample Request
|
||||
|
||||
```text
|
||||
|
@ -520,6 +543,45 @@ $ curl \
|
|||
value of this struct is equivalent to the `Connect` field for service
|
||||
registration.
|
||||
|
||||
### Filtering
|
||||
|
||||
Filtering is executed against each entry in the top level result list with the
|
||||
following selectors and filter operations being supported:
|
||||
|
||||
| Selector | Supported Operations |
|
||||
| --------------------------------------------- | ---------------------------------- |
|
||||
| `Address` | Equal, Not Equal |
|
||||
| `Datacenter` | Equal, Not Equal |
|
||||
| `ID` | Equal, Not Equal |
|
||||
| `Node` | Equal, Not Equal |
|
||||
| `NodeMeta` | In, Not In, Is Empty, Is Not Empty |
|
||||
| `NodeMeta.<any>` | Equal, Not Equal |
|
||||
| `ServiceAddress` | Equal, Not Equal |
|
||||
| `ServiceConnect.Native` | Equal, Not Equal |
|
||||
| `ServiceEnableTagOverride` | Equal, Not Equal |
|
||||
| `ServiceID` | Equal, Not Equal |
|
||||
| `ServiceKind` | Equal, Not Equal |
|
||||
| `ServiceMeta` | In, Not In, Is Empty, Is Not Empty |
|
||||
| `ServiceMeta.<any>` | Equal, Not Equal |
|
||||
| `ServiceName` | Equal, Not Equal |
|
||||
| `ServicePort` | Equal, Not Equal |
|
||||
| `ServiceProxy.DestinationServiceID` | Equal, Not Equal |
|
||||
| `ServiceProxy.DestinationServiceName` | Equal, Not Equal |
|
||||
| `ServiceProxy.LocalServiceAddress` | Equal, Not Equal |
|
||||
| `ServiceProxy.LocalServicePort` | Equal, Not Equal |
|
||||
| `ServiceProxy.Upstreams` | Is Empty, Is Not Empty |
|
||||
| `ServiceProxy.Upstreams.Datacenter` | Equal, Not Equal |
|
||||
| `ServiceProxy.Upstreams.DestinationName` | Equal, Not Equal |
|
||||
| `ServiceProxy.Upstreams.DestinationNamespace` | Equal, Not Equal |
|
||||
| `ServiceProxy.Upstreams.DestinationType` | Equal, Not Equal |
|
||||
| `ServiceProxy.Upstreams.LocalBindAddress` | Equal, Not Equal |
|
||||
| `ServiceProxy.Upstreams.LocalBindPort` | Equal, Not Equal |
|
||||
| `ServiceTags` | In, Not In, Is Empty, Is Not Empty |
|
||||
| `ServiceWeights.Passing` | Equal, Not Equal |
|
||||
| `ServiceWeights.Warning` | Equal, Not Equal |
|
||||
| `TaggedAddresses` | In, Not In, Is Empty, Is Not Empty |
|
||||
| `TaggedAddresses.<any>` | Equal, Not Equal |
|
||||
|
||||
## List Nodes for Connect-capable Service
|
||||
|
||||
This endpoint returns the nodes providing a
|
||||
|
@ -562,6 +624,9 @@ The table below shows this endpoint's support for
|
|||
the datacenter of the agent being queried. This is specified as part of the
|
||||
URL as a query parameter.
|
||||
|
||||
- `filter` `(string: "")` - Specifies the expression used to filter the
|
||||
queries results prior to returning the data.
|
||||
|
||||
### Sample Request
|
||||
|
||||
```text
|
||||
|
@ -608,3 +673,34 @@ $ curl \
|
|||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Filtering
|
||||
|
||||
The filter will be executed against each value in the `Services` mapping within the
|
||||
top level Node object. The following selectors and filter operations are supported:
|
||||
|
||||
| Selector | Supported Operations |
|
||||
| -------------------------------------- | ---------------------------------- |
|
||||
| `Address` | Equal, Not Equal |
|
||||
| `Connect.Native` | Equal, Not Equal |
|
||||
| `EnableTagOverride` | Equal, Not Equal |
|
||||
| `ID` | Equal, Not Equal |
|
||||
| `Kind` | Equal, Not Equal |
|
||||
| `Meta` | In, Not In, Is Empty, Is Not Empty |
|
||||
| `Meta.<any>` | Equal, Not Equal |
|
||||
| `Port` | Equal, Not Equal |
|
||||
| `Proxy.DestinationServiceID` | Equal, Not Equal |
|
||||
| `Proxy.DestinationServiceName` | Equal, Not Equal |
|
||||
| `Proxy.LocalServiceAddress` | Equal, Not Equal |
|
||||
| `Proxy.LocalServicePort` | Equal, Not Equal |
|
||||
| `Proxy.Upstreams` | Is Empty, Is Not Empty |
|
||||
| `Proxy.Upstreams.Datacenter` | Equal, Not Equal |
|
||||
| `Proxy.Upstreams.DestinationName` | Equal, Not Equal |
|
||||
| `Proxy.Upstreams.DestinationNamespace` | Equal, Not Equal |
|
||||
| `Proxy.Upstreams.DestinationType` | Equal, Not Equal |
|
||||
| `Proxy.Upstreams.LocalBindAddress` | Equal, Not Equal |
|
||||
| `Proxy.Upstreams.LocalBindPort` | Equal, Not Equal |
|
||||
| `Service` | Equal, Not Equal |
|
||||
| `Tags` | In, Not In, Is Empty, Is Not Empty |
|
||||
| `Weights.Passing` | Equal, Not Equal |
|
||||
| `Weights.Warning` | Equal, Not Equal |
|
||||
|
|
|
@ -0,0 +1,107 @@
|
|||
---
|
||||
layout: api
|
||||
page_title: Blocking Queries
|
||||
sidebar_current: api-features-blocking
|
||||
description: |-
|
||||
Many endpoints in Consul support a feature known as "blocking queries". A
|
||||
blocking query is used to wait for a potential change using long polling.
|
||||
---
|
||||
|
||||
|
||||
# Blocking Queries
|
||||
|
||||
Many endpoints in Consul support a feature known as "blocking queries". A
|
||||
blocking query is used to wait for a potential change using long polling. Not
|
||||
all endpoints support blocking, but each endpoint uniquely documents its support
|
||||
for blocking queries in the documentation.
|
||||
|
||||
Endpoints that support blocking queries return an HTTP header named
|
||||
`X-Consul-Index`. This is a unique identifier representing the current state of
|
||||
the requested resource.
|
||||
|
||||
On subsequent requests for this resource, the client can set the `index` query
|
||||
string parameter to the value of `X-Consul-Index`, indicating that the client
|
||||
wishes to wait for any changes subsequent to that index.
|
||||
|
||||
When this is provided, the HTTP request will "hang" until a change in the system
|
||||
occurs, or the maximum timeout is reached. A critical note is that the return of
|
||||
a blocking request is **no guarantee** of a change. It is possible that the
|
||||
timeout was reached or that there was an idempotent write that does not affect
|
||||
the result of the query.
|
||||
|
||||
In addition to `index`, endpoints that support blocking will also honor a `wait`
|
||||
parameter specifying a maximum duration for the blocking request. This is
|
||||
limited to 10 minutes. If not set, the wait time defaults to 5 minutes. This
|
||||
value can be specified in the form of "10s" or "5m" (i.e., 10 seconds or 5
|
||||
minutes, respectively). A small random amount of additional wait time is added
|
||||
to the supplied maximum `wait` time to spread out the wake up time of any
|
||||
concurrent requests. This adds up to `wait / 16` additional time to the maximum
|
||||
duration.
|
||||
|
||||
## Implementation Details
|
||||
|
||||
While the mechanism is relatively simple to work with, there are a few edge
|
||||
cases that must be handled correctly.
|
||||
|
||||
* **Reset the index if it goes backwards**. While indexes in general are
|
||||
monotonically increasing(i.e. they should only ever increase as time passes),
|
||||
there are several real-world scenarios in
|
||||
which they can go backwards for a given query. Implementations must check
|
||||
to see if a returned index is lower than the previous value,
|
||||
and if it is, should reset index to `0` - effectively restarting their blocking loop.
|
||||
Failure to do so may cause the client to miss future updates for an unbounded
|
||||
time, or to use an invalid index value that causes no blocking and increases
|
||||
load on the servers. Cases where this can occur include:
|
||||
* If a raft snapshot is restored on the servers with older version of the data.
|
||||
* KV list operations where an item with the highest index is removed.
|
||||
* A Consul upgrade changes the way watches work to optimize them with more
|
||||
granular indexes.
|
||||
|
||||
* **Sanity check index is greater than zero**. After the initial request (or a
|
||||
reset as above) the `X-Consul-Index` returned _should_ always be greater than zero. It
|
||||
is a bug in Consul if it is not, however this has happened a few times and can
|
||||
still be triggered on some older Consul versions. It's especially bad because it
|
||||
causes blocking clients that are not aware to enter a busy loop, using excessive
|
||||
client CPU and causing high load on servers. It is _always_ safe to use an
|
||||
index of `1` to wait for updates when the data being requested doesn't exist
|
||||
yet, so clients _should_ sanity check that their index is at least 1 after
|
||||
each blocking response is handled to be sure they actually block on the next
|
||||
request.
|
||||
|
||||
* **Rate limit**. The blocking query mechanism is reasonably efficient when updates
|
||||
are relatively rare (order of tens of seconds to minutes between updates). In cases
|
||||
where a result gets updated very fast however - possibly during an outage or incident
|
||||
with a badly behaved client - blocking query loops degrade into busy loops that
|
||||
consume excessive client CPU and cause high server load. While it's possible to just add a sleep
|
||||
to every iteration of the loop, this is **not** recommended since it causes update
|
||||
delivery to be delayed in the happy case, and it can exacerbate the problem since
|
||||
it increases the chance that the index has changed on the next request. Clients
|
||||
_should_ instead rate limit the loop so that in the happy case they proceed without
|
||||
waiting, but when values start to churn quickly they degrade into polling at a
|
||||
reasonable rate (say every 15 seconds). Ideally this is done with an algorithm that
|
||||
allows a couple of quick successive deliveries before it starts to limit rate - a
|
||||
[token bucket](https://en.wikipedia.org/wiki/Token_bucket) with burst of 2 is a simple
|
||||
way to achieve this.
|
||||
|
||||
## Hash-based Blocking Queries
|
||||
|
||||
A limited number of agent endpoints also support blocking however because the
|
||||
state is local to the agent and not managed with a consistent raft index, their
|
||||
blocking mechanism is different.
|
||||
|
||||
Since there is no monotonically increasing index, each response instead contains
|
||||
a header `X-Consul-ContentHash` which is an opaque hash digest generated by
|
||||
hashing over all fields in the response that are relevant.
|
||||
|
||||
Subsequent requests may be sent with a query parameter `hash=<value>` where
|
||||
`value` is the last hash header value seen, and this will block until the `wait`
|
||||
timeout is passed or until the local agent's state changes in such a way that
|
||||
the hash would be different.
|
||||
|
||||
Other than the different header and query parameter names, the biggest
|
||||
difference is that hash values are opaque and can't be compared to see if one
|
||||
result is older or newer than another. In general hash-based blocking will not
|
||||
return too early due to an idempotent update since the hash will remain the same
|
||||
unless the result actually changes, however as with index-based blocking there
|
||||
is no strict guarantee that clients will never observe the same result delivered
|
||||
before the full timeout has elapsed.
|
|
@ -0,0 +1,97 @@
|
|||
---
|
||||
layout: api
|
||||
page_title: Agent Caching
|
||||
sidebar_current: api-features-caching
|
||||
description: |-
|
||||
Some read endpoints support agent caching. They are clearly marked in the
|
||||
documentation.
|
||||
---
|
||||
|
||||
# Agent Caching
|
||||
|
||||
Some read endpoints support agent caching. They are clearly marked in the
|
||||
documentation. Agent caching can take two forms, [`simple`](#simple-caching) or
|
||||
[`background refresh`](#blocking-refresh-caching) depending on the endpoint's
|
||||
semantics. The documentation for each endpoint clearly identify which if any
|
||||
form of caching is supported. The details for each are described below.
|
||||
|
||||
Where supported, caching can be enabled though the `?cached` parameter.
|
||||
Combining `?cached` with `?consistent` is an error.
|
||||
|
||||
## Simple Caching
|
||||
|
||||
Endpoints supporting simple caching may return a result directly from the local
|
||||
agent's cache without a round trip to the servers. By default the agent caches
|
||||
results for a relatively long time (3 days) such that it can still return a
|
||||
result even if the servers are unavailable for an extended period to enable
|
||||
"fail static" semantics.
|
||||
|
||||
That means that with no other arguments, `?cached` queries might receive a
|
||||
response which is days old. To request better freshness, the HTTP
|
||||
`Cache-Control` header may be set with a directive like `max-age=<seconds>`. In
|
||||
this case the agent will attempt to re-fetch the result from the servers if the
|
||||
cached value is older than the given `max-age`. If the servers can't be reached
|
||||
a 500 is returned as normal.
|
||||
|
||||
To allow clients to maintain fresh results in normal operation but allow stale
|
||||
ones if the servers are unavailable, the `stale-if-error=<seconds>` directive
|
||||
may be additionally provided in the `Cache-Control` header. This will return the
|
||||
cached value anyway even it it's older than `max-age` (provided it's not older
|
||||
than `stale-if-error`) rather than a 500. It must be provided along with a
|
||||
`max-age` or `must-revalidate`. The `Age` response header, if larger than
|
||||
`max-age` can be used to determine if the server was unreachable and a cached
|
||||
version returned instead.
|
||||
|
||||
For example, assuming there is a cached response that is 65 seconds old, and
|
||||
that the servers are currently unavailable, `Cache-Control: max-age=30` will
|
||||
result in a 500 error, while `Cache-Control: max-age=30 stale-if-error=259200`
|
||||
will result in the cached response being returned.
|
||||
|
||||
A request setting either `max-age=0` or `must-revalidate` directives will cause
|
||||
the agent to always re-fetch the response from servers. Either can be combined
|
||||
with `stale-if-error=<seconds>` to ensure fresh results when the servers are
|
||||
available, but falling back to cached results if the request to the servers
|
||||
fails.
|
||||
|
||||
Requests that do not use `?cached` currently bypass the cache entirely so the
|
||||
cached response returned might be more stale than the last uncached response
|
||||
returned on the same agent. If this causes problems, it is possible to make
|
||||
requests using `?cached` and setting `Cache-Control: must-revalidate` to have
|
||||
always-fresh results yet keeping the cache populated with the most recent
|
||||
result.
|
||||
|
||||
In all cases the HTTP `X-Cache` header is always set in the response to either
|
||||
`HIT` or `MISS` indicating whether the response was served from cache or not.
|
||||
|
||||
For cache hits, the HTTP `Age` header is always set in the response to indicate
|
||||
how many seconds since that response was fetched from the servers.
|
||||
|
||||
## Background Refresh Caching
|
||||
|
||||
Endpoints supporting background refresh caching may return a result directly
|
||||
from the local agent's cache without a round trip to the severs. The first fetch
|
||||
that is a miss will cause an initial fetch from the servers, but will also
|
||||
trigger the agent to begin a background blocking query that watches for any
|
||||
changes to that result and updates the cached value if changes occur.
|
||||
|
||||
Following requests will _always_ be a cache hit until there has been no request
|
||||
for the resource for the TTL (which is typically 3 days).
|
||||
|
||||
Clients can perform blocking queries against the local agent which will be
|
||||
served from the cache. This allows multiple clients to watch the same resource
|
||||
locally while only a single blocking watch for that resource will be made to the
|
||||
servers from a given client agent.
|
||||
|
||||
HTTP `Cache-Control` headers are ignored in this mode since the cache is being
|
||||
actively updated and has different semantics to a typical passive cache.
|
||||
|
||||
In all cases the HTTP `X-Cache` header is always set in the response to either
|
||||
`HIT` or `MISS` indicating whether the response was served from cache or not.
|
||||
|
||||
For cache hits, the HTTP `Age` header is always set in the response to indicate
|
||||
how many seconds since that response was fetched from the servers. As long as
|
||||
the local agent has an active connection to the servers, the age will always be
|
||||
`0` since the value is up-to-date. If the agent get's disconnected, the cached
|
||||
result is still returned but with an `Age` that indicates how many seconds have
|
||||
elapsed since the local agent got disconnected from the servers, during which
|
||||
time updates to the result might have been missed.
|
|
@ -0,0 +1,49 @@
|
|||
---
|
||||
layout: api
|
||||
page_title: Consistency Modes
|
||||
sidebar_current: api-features-consistency
|
||||
description: |-
|
||||
Most of the read query endpoints support multiple levels of consistency. Since no policy will suit all clients' needs, these consistency modes allow the user to have the ultimate say in how to balance the trade-offs inherent in a distributed system.
|
||||
---
|
||||
|
||||
# Consistency Modes
|
||||
|
||||
Most of the read query endpoints support multiple levels of consistency. Since
|
||||
no policy will suit all clients' needs, these consistency modes allow the user
|
||||
to have the ultimate say in how to balance the trade-offs inherent in a
|
||||
distributed system.
|
||||
|
||||
The three read modes are:
|
||||
|
||||
- `default` - If not specified, the default is strongly consistent in almost all
|
||||
cases. However, there is a small window in which a new leader may be elected
|
||||
during which the old leader may service stale values. The trade-off is fast
|
||||
reads but potentially stale values. The condition resulting in stale reads is
|
||||
hard to trigger, and most clients should not need to worry about this case.
|
||||
Also, note that this race condition only applies to reads, not writes.
|
||||
|
||||
- `consistent` - This mode is strongly consistent without caveats. It requires
|
||||
that a leader verify with a quorum of peers that it is still leader. This
|
||||
introduces an additional round-trip to all server nodes. The trade-off is
|
||||
increased latency due to an extra round trip. Most clients should not use this
|
||||
unless they cannot tolerate a stale read.
|
||||
|
||||
- `stale` - This mode allows any server to service the read regardless of
|
||||
whether it is the leader. This means reads can be arbitrarily stale; however,
|
||||
results are generally consistent to within 50 milliseconds of the leader. The
|
||||
trade-off is very fast and scalable reads with a higher likelihood of stale
|
||||
values. Since this mode allows reads without a leader, a cluster that is
|
||||
unavailable will still be able to respond to queries.
|
||||
|
||||
To switch these modes, either the `stale` or `consistent` query parameters
|
||||
should be provided on requests. It is an error to provide both.
|
||||
|
||||
Note that some endpoints support a `cached` parameter which has some of the same
|
||||
semantics as `stale` but different trade offs. This behavior is described in
|
||||
[Agent Caching](#agent-caching).
|
||||
|
||||
To support bounding the acceptable staleness of data, responses provide the
|
||||
`X-Consul-LastContact` header containing the time in milliseconds that a server
|
||||
was last contacted by the leader node. The `X-Consul-KnownLeader` header also
|
||||
indicates if there is a known leader. These can be used by clients to gauge the
|
||||
staleness of a result and take appropriate action.
|
|
@ -0,0 +1,458 @@
|
|||
---
|
||||
layout: api
|
||||
page_title: Filtering
|
||||
sidebar_current: api-features-filtering
|
||||
description: |-
|
||||
Consul exposes a RESTful HTTP API to control almost every aspect of the
|
||||
Consul agent.
|
||||
---
|
||||
|
||||
# Filtering
|
||||
|
||||
A filter expression is used to refine a data query for some API listing endpoints as notated in the individual API documentation.
|
||||
Filtering will be executed on the Consul server before data is returned, reducing the network load. To pass a
|
||||
filter expression to Consul, with a data query, use the `filter` parameter.
|
||||
|
||||
```sh
|
||||
curl -G <path> --data-urlencode 'filter=<filter expression>'
|
||||
```
|
||||
|
||||
To create a filter expression, you will write one or more expressions using matching operators, selectors, and values.
|
||||
|
||||
## Expression Syntax
|
||||
|
||||
Expressions are written in plain text format. Boolean logic and parenthesization are
|
||||
supported. In general whitespace is ignored, except within literal
|
||||
strings.
|
||||
|
||||
### Expressions
|
||||
|
||||
There are several methods for connecting expressions, including
|
||||
|
||||
- logical `or`
|
||||
- logical `and`
|
||||
- logical `not`
|
||||
- grouping with parenthesis
|
||||
- matching expressions
|
||||
|
||||
```text
|
||||
// Logical Or - evaluates to true if either sub-expression does
|
||||
<Expression 1> or <Expression 2>
|
||||
|
||||
// Logical And - evaluates to true if both sub-expressions do
|
||||
<Expression 1 > and <Expression 2>
|
||||
|
||||
// Logical Not - evaluates to true if the sub-expression does not
|
||||
not <Expression 1>
|
||||
|
||||
// Grouping - Overrides normal precedence rules
|
||||
( <Expression 1> )
|
||||
|
||||
// Inspects data to check for a match
|
||||
<Matching Expression 1>
|
||||
```
|
||||
|
||||
Standard operator precedence can be expected for the various forms. For
|
||||
example, the following two expressions would be equivalent.
|
||||
|
||||
```text
|
||||
<Expression 1> and not <Expression 2> or <Expression 3>
|
||||
|
||||
( <Expression 1> and (not <Expression 2> )) or <Expression 3>
|
||||
```
|
||||
|
||||
### Matching Operators
|
||||
|
||||
Matching operators are used to create an expression. All matching operators use a selector or value to choose what data should be
|
||||
matched. Each endpoint that supports filtering accepts a potentially
|
||||
different list of selectors and is detailed in the API documentation for
|
||||
those endpoints.
|
||||
|
||||
|
||||
```text
|
||||
// Equality & Inequality checks
|
||||
<Selector> == <Value>
|
||||
<Selector> != <Value>
|
||||
|
||||
// Emptiness checks
|
||||
<Selector> is empty
|
||||
<Selector> is not empty
|
||||
|
||||
// Contains checks
|
||||
<Value> in <Selector>
|
||||
<Value> not in <Selector>
|
||||
<Selector> contains <Value>
|
||||
<Selector> not contains <Value>
|
||||
```
|
||||
|
||||
### Selectors
|
||||
|
||||
Selectors are used by matching operators to create an expression. They are
|
||||
defined by a `.` separated list of names. Each name must start with
|
||||
a an ASCII letter and can contain ASCII letters, numbers, and underscores. When
|
||||
part of the selector references a map value it may be expressed using the form
|
||||
`["<map key name>"]` instead of `.<map key name>`. This allows the possibility
|
||||
of using map keys that are not valid selectors in and of themselves.
|
||||
|
||||
```text
|
||||
// selects the foo key within the ServiceMeta mapping for the
|
||||
// /catalog/service/:service endpoint
|
||||
ServiceMeta.foo
|
||||
|
||||
// Also selects the foo key for the same endpoint
|
||||
ServiceMeta["foo"]
|
||||
```
|
||||
|
||||
### Values
|
||||
|
||||
Values are used by matching operators to create an expression. Values can be any valid selector, a number, or a quoted string. For numbers any
|
||||
base 10 integers and floating point numbers are possible. For quoted strings,
|
||||
they may either be enclosed in double quotes or backticks. When enclosed in
|
||||
backticks they are treated as raw strings and escape sequences such as `\n`
|
||||
will not be expanded.
|
||||
|
||||
## Filter Utilization
|
||||
|
||||
Generally, only the main object is filtered. When filtering for
|
||||
an item within an array that is not at the top level, the entire array that contains the item
|
||||
will be returned. This is usually the outermost object of a response,
|
||||
but in some cases such the [`/catalog/node/:node`](api/catalog.html#list-services-for-node)
|
||||
endpoint the filtering is performed on a object embedded within the results.
|
||||
|
||||
### Performance
|
||||
|
||||
Filters are executed on the servers and therefore will consume some amount
|
||||
of CPU time on the server. For non-stale queries this means that the filter
|
||||
is executed on the leader.
|
||||
|
||||
### Filtering Examples
|
||||
|
||||
#### Agent API
|
||||
|
||||
**Command - Unfiltered**
|
||||
|
||||
```sh
|
||||
curl -X GET localhost:8500/v1/agent/services
|
||||
```
|
||||
|
||||
**Response - Unfiltered**
|
||||
|
||||
```json
|
||||
{
|
||||
"redis1": {
|
||||
"ID": "redis1",
|
||||
"Service": "redis",
|
||||
"Tags": [
|
||||
"primary",
|
||||
"production"
|
||||
],
|
||||
"Meta": {
|
||||
"env": "production",
|
||||
"foo": "bar"
|
||||
},
|
||||
"Port": 1234,
|
||||
"Address": "",
|
||||
"Weights": {
|
||||
"Passing": 1,
|
||||
"Warning": 1
|
||||
},
|
||||
"EnableTagOverride": false
|
||||
},
|
||||
"redis2": {
|
||||
"ID": "redis2",
|
||||
"Service": "redis",
|
||||
"Tags": [
|
||||
"secondary",
|
||||
"production"
|
||||
],
|
||||
"Meta": {
|
||||
"env": "production",
|
||||
"foo": "bar"
|
||||
},
|
||||
"Port": 1235,
|
||||
"Address": "",
|
||||
"Weights": {
|
||||
"Passing": 1,
|
||||
"Warning": 1
|
||||
},
|
||||
"EnableTagOverride": false
|
||||
},
|
||||
"redis3": {
|
||||
"ID": "redis3",
|
||||
"Service": "redis",
|
||||
"Tags": [
|
||||
"primary",
|
||||
"qa"
|
||||
],
|
||||
"Meta": {
|
||||
"env": "qa"
|
||||
},
|
||||
"Port": 1234,
|
||||
"Address": "",
|
||||
"Weights": {
|
||||
"Passing": 1,
|
||||
"Warning": 1
|
||||
},
|
||||
"EnableTagOverride": false
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Command - Filtered**
|
||||
|
||||
```sh
|
||||
curl -G localhost:8500/v1/agent/services --data-urlencode 'filter=Meta.env == qa'
|
||||
```
|
||||
|
||||
**Response - Filtered**
|
||||
|
||||
```json
|
||||
{
|
||||
"redis3": {
|
||||
"ID": "redis3",
|
||||
"Service": "redis",
|
||||
"Tags": [
|
||||
"primary",
|
||||
"qa"
|
||||
],
|
||||
"Meta": {
|
||||
"env": "qa"
|
||||
},
|
||||
"Port": 1234,
|
||||
"Address": "",
|
||||
"Weights": {
|
||||
"Passing": 1,
|
||||
"Warning": 1
|
||||
},
|
||||
"EnableTagOverride": false
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### Catalog API
|
||||
|
||||
**Command - Unfiltered**
|
||||
|
||||
```sh
|
||||
curl -X GET localhost:8500/v1/catalog/service/api-internal
|
||||
```
|
||||
|
||||
**Response - Unfiltered**
|
||||
|
||||
```json
|
||||
[
|
||||
{
|
||||
"ID": "b4f64e8c-5c7d-11e9-bf68-8c8590bd0966",
|
||||
"Node": "node-1",
|
||||
"Address": "198.18.0.1",
|
||||
"Datacenter": "dc1",
|
||||
"TaggedAddresses": null,
|
||||
"NodeMeta": {
|
||||
"agent": "true",
|
||||
"arch": "i386",
|
||||
"os": "darwin"
|
||||
},
|
||||
"ServiceKind": "",
|
||||
"ServiceID": "api-internal",
|
||||
"ServiceName": "api-internal",
|
||||
"ServiceTags": [
|
||||
"tag"
|
||||
],
|
||||
"ServiceAddress": "",
|
||||
"ServiceWeights": {
|
||||
"Passing": 1,
|
||||
"Warning": 1
|
||||
},
|
||||
"ServiceMeta": {
|
||||
"environment": "qa"
|
||||
},
|
||||
"ServicePort": 9090,
|
||||
"ServiceEnableTagOverride": false,
|
||||
"ServiceProxyDestination": "",
|
||||
"ServiceProxy": {},
|
||||
"ServiceConnect": {},
|
||||
"CreateIndex": 30,
|
||||
"ModifyIndex": 30
|
||||
},
|
||||
{
|
||||
"ID": "b4faf93a-5c7d-11e9-840d-8c8590bd0966",
|
||||
"Node": "node-2",
|
||||
"Address": "198.18.0.2",
|
||||
"Datacenter": "dc1",
|
||||
"TaggedAddresses": null,
|
||||
"NodeMeta": {
|
||||
"arch": "arm",
|
||||
"os": "linux"
|
||||
},
|
||||
"ServiceKind": "",
|
||||
"ServiceID": "api-internal",
|
||||
"ServiceName": "api-internal",
|
||||
"ServiceTags": [
|
||||
"test",
|
||||
"tag"
|
||||
],
|
||||
"ServiceAddress": "",
|
||||
"ServiceWeights": {
|
||||
"Passing": 1,
|
||||
"Warning": 1
|
||||
},
|
||||
"ServiceMeta": {
|
||||
"environment": "production"
|
||||
},
|
||||
"ServicePort": 9090,
|
||||
"ServiceEnableTagOverride": false,
|
||||
"ServiceProxyDestination": "",
|
||||
"ServiceProxy": {},
|
||||
"ServiceConnect": {},
|
||||
"CreateIndex": 29,
|
||||
"ModifyIndex": 29
|
||||
},
|
||||
{
|
||||
"ID": "b4fbe7f4-5c7d-11e9-ac82-8c8590bd0966",
|
||||
"Node": "node-4",
|
||||
"Address": "198.18.0.4",
|
||||
"Datacenter": "dc1",
|
||||
"TaggedAddresses": null,
|
||||
"NodeMeta": {
|
||||
"arch": "i386",
|
||||
"os": "freebsd"
|
||||
},
|
||||
"ServiceKind": "",
|
||||
"ServiceID": "api-internal",
|
||||
"ServiceName": "api-internal",
|
||||
"ServiceTags": [],
|
||||
"ServiceAddress": "",
|
||||
"ServiceWeights": {
|
||||
"Passing": 1,
|
||||
"Warning": 1
|
||||
},
|
||||
"ServiceMeta": {
|
||||
"environment": "qa"
|
||||
},
|
||||
"ServicePort": 9090,
|
||||
"ServiceEnableTagOverride": false,
|
||||
"ServiceProxyDestination": "",
|
||||
"ServiceProxy": {},
|
||||
"ServiceConnect": {},
|
||||
"CreateIndex": 28,
|
||||
"ModifyIndex": 28
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
**Command - Filtered**
|
||||
|
||||
```sh
|
||||
curl -G localhost:8500/v1/catalog/service/api-internal --data-urlencode 'filter=NodeMeta.os == linux'
|
||||
```
|
||||
|
||||
**Response - Filtered**
|
||||
|
||||
```json
|
||||
[
|
||||
{
|
||||
"ID": "b4faf93a-5c7d-11e9-840d-8c8590bd0966",
|
||||
"Node": "node-2",
|
||||
"Address": "198.18.0.2",
|
||||
"Datacenter": "dc1",
|
||||
"TaggedAddresses": null,
|
||||
"NodeMeta": {
|
||||
"arch": "arm",
|
||||
"os": "linux"
|
||||
},
|
||||
"ServiceKind": "",
|
||||
"ServiceID": "api-internal",
|
||||
"ServiceName": "api-internal",
|
||||
"ServiceTags": [
|
||||
"test",
|
||||
"tag"
|
||||
],
|
||||
"ServiceAddress": "",
|
||||
"ServiceWeights": {
|
||||
"Passing": 1,
|
||||
"Warning": 1
|
||||
},
|
||||
"ServiceMeta": {
|
||||
"environment": "production"
|
||||
},
|
||||
"ServicePort": 9090,
|
||||
"ServiceEnableTagOverride": false,
|
||||
"ServiceProxyDestination": "",
|
||||
"ServiceProxy": {},
|
||||
"ServiceConnect": {},
|
||||
"CreateIndex": 29,
|
||||
"ModifyIndex": 29
|
||||
}
|
||||
]
|
||||
|
||||
```
|
||||
|
||||
#### Health API
|
||||
|
||||
**Command - Unfiltered**
|
||||
|
||||
```sh
|
||||
curl -X GET localhost:8500/v1/health/node/node-1
|
||||
```
|
||||
|
||||
**Response - Unfiltered**
|
||||
|
||||
```json
|
||||
[
|
||||
{
|
||||
"Node": "node-1",
|
||||
"CheckID": "node-health",
|
||||
"Name": "Node level check",
|
||||
"Status": "critical",
|
||||
"Notes": "",
|
||||
"Output": "",
|
||||
"ServiceID": "",
|
||||
"ServiceName": "",
|
||||
"ServiceTags": [],
|
||||
"Definition": {},
|
||||
"CreateIndex": 13,
|
||||
"ModifyIndex": 13
|
||||
},
|
||||
{
|
||||
"Node": "node-1",
|
||||
"CheckID": "svc-web-health",
|
||||
"Name": "Service level check - web",
|
||||
"Status": "warning",
|
||||
"Notes": "",
|
||||
"Output": "",
|
||||
"ServiceID": "",
|
||||
"ServiceName": "web",
|
||||
"ServiceTags": [],
|
||||
"Definition": {},
|
||||
"CreateIndex": 18,
|
||||
"ModifyIndex": 18
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
**Command - Filtered**
|
||||
|
||||
```sh
|
||||
curl -G localhost:8500/v1/health/node/node-1 --data-urlencode 'filter=ServiceName != ""'
|
||||
```
|
||||
|
||||
**Response - Filtered**
|
||||
|
||||
```json
|
||||
[
|
||||
{
|
||||
"Node": "node-1",
|
||||
"CheckID": "svc-web-health",
|
||||
"Name": "Service level check - web",
|
||||
"Status": "warning",
|
||||
"Notes": "",
|
||||
"Output": "",
|
||||
"ServiceID": "",
|
||||
"ServiceName": "web",
|
||||
"ServiceTags": [],
|
||||
"Definition": {},
|
||||
"CreateIndex": 18,
|
||||
"ModifyIndex": 18
|
||||
}
|
||||
]
|
||||
```
|
|
@ -42,6 +42,9 @@ The table below shows this endpoint's support for
|
|||
the datacenter of the agent being queried. This is specified as part of the
|
||||
URL as a query parameter.
|
||||
|
||||
- `filter` `(string: "")` - Specifies the expression used to filter the
|
||||
queries results prior to returning the data.
|
||||
|
||||
### Sample Request
|
||||
|
||||
```text
|
||||
|
@ -80,6 +83,23 @@ $ curl \
|
|||
]
|
||||
```
|
||||
|
||||
### Filtering
|
||||
|
||||
The filter will be executed against each health check in the results list with
|
||||
the following selectors and filter operations being supported:
|
||||
|
||||
| Selector | Supported Operations |
|
||||
| ------------- | ---------------------------------- |
|
||||
| `CheckID` | Equal, Not Equal |
|
||||
| `Name` | Equal, Not Equal |
|
||||
| `Node` | Equal, Not Equal |
|
||||
| `Notes` | Equal, Not Equal |
|
||||
| `Output` | Equal, Not Equal |
|
||||
| `ServiceID` | Equal, Not Equal |
|
||||
| `ServiceName` | Equal, Not Equal |
|
||||
| `ServiceTags` | In, Not In, Is Empty, Is Not Empty |
|
||||
| `Status` | Equal, Not Equal |
|
||||
|
||||
## List Checks for Service
|
||||
|
||||
This endpoint returns the checks associated with the service provided on the
|
||||
|
@ -118,6 +138,9 @@ The table below shows this endpoint's support for
|
|||
will filter the results to nodes with the specified key/value pairs. This is
|
||||
specified as part of the URL as a query parameter.
|
||||
|
||||
- `filter` `(string: "")` - Specifies the expression used to filter the
|
||||
queries results prior to returning the data.
|
||||
|
||||
### Sample Request
|
||||
|
||||
```text
|
||||
|
@ -138,11 +161,29 @@ $ curl \
|
|||
"Output": "",
|
||||
"ServiceID": "redis",
|
||||
"ServiceName": "redis",
|
||||
"ServiceTags": ["primary"]
|
||||
"ServiceTags": ["primary"]
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
### Filtering
|
||||
|
||||
The filter will be executed against each health check in the results list with
|
||||
the following selectors and filter operations being supported:
|
||||
|
||||
|
||||
| Selector | Supported Operations |
|
||||
| ------------- | ---------------------------------- |
|
||||
| `CheckID` | Equal, Not Equal |
|
||||
| `Name` | Equal, Not Equal |
|
||||
| `Node` | Equal, Not Equal |
|
||||
| `Notes` | Equal, Not Equal |
|
||||
| `Output` | Equal, Not Equal |
|
||||
| `ServiceID` | Equal, Not Equal |
|
||||
| `ServiceName` | Equal, Not Equal |
|
||||
| `ServiceTags` | In, Not In, Is Empty, Is Not Empty |
|
||||
| `Status` | Equal, Not Equal |
|
||||
|
||||
## List Nodes for Service
|
||||
|
||||
This endpoint returns the nodes providing the service indicated on the path.
|
||||
|
@ -178,8 +219,8 @@ The table below shows this endpoint's support for
|
|||
part of the URL as a query parameter.
|
||||
|
||||
- `tag` `(string: "")` - Specifies the tag to filter the list. This is
|
||||
specified as part of the URL as a query parameter. Can be used multiple times
|
||||
for additional filtering, returning only the results that include all of the tag
|
||||
specified as part of the URL as a query parameter. Can be used multiple times
|
||||
for additional filtering, returning only the results that include all of the tag
|
||||
values provided.
|
||||
|
||||
- `node-meta` `(string: "")` - Specifies a desired node metadata key/value pair
|
||||
|
@ -191,6 +232,9 @@ The table below shows this endpoint's support for
|
|||
with all checks in the `passing` state. This can be used to avoid additional
|
||||
filtering on the client side.
|
||||
|
||||
- `filter` `(string: "")` - Specifies the expression used to filter the
|
||||
queries results prior to returning the data.
|
||||
|
||||
### Sample Request
|
||||
|
||||
```text
|
||||
|
@ -240,7 +284,7 @@ $ curl \
|
|||
"Output": "",
|
||||
"ServiceID": "redis",
|
||||
"ServiceName": "redis",
|
||||
"ServiceTags": ["primary"]
|
||||
"ServiceTags": ["primary"]
|
||||
},
|
||||
{
|
||||
"Node": "foobar",
|
||||
|
@ -258,6 +302,55 @@ $ curl \
|
|||
]
|
||||
```
|
||||
|
||||
### Filtering
|
||||
|
||||
The filter will be executed against each entry in the top level results list with the
|
||||
following selectors and filter operations being supported:
|
||||
|
||||
| Selector | Supported Operations |
|
||||
| ---------------------------------------------- | ---------------------------------- |
|
||||
| `Checks` | Is Empty, Is Not Empty |
|
||||
| `Checks.CheckID` | Equal, Not Equal |
|
||||
| `Checks.Name` | Equal, Not Equal |
|
||||
| `Checks.Node` | Equal, Not Equal |
|
||||
| `Checks.Notes` | Equal, Not Equal |
|
||||
| `Checks.Output` | Equal, Not Equal |
|
||||
| `Checks.ServiceID` | Equal, Not Equal |
|
||||
| `Checks.ServiceName` | Equal, Not Equal |
|
||||
| `Checks.ServiceTags` | In, Not In, Is Empty, Is Not Empty |
|
||||
| `Checks.Status` | Equal, Not Equal |
|
||||
| `Node.Address` | Equal, Not Equal |
|
||||
| `Node.Datacenter` | Equal, Not Equal |
|
||||
| `Node.ID` | Equal, Not Equal |
|
||||
| `Node.Meta` | In, Not In, Is Empty, Is Not Empty |
|
||||
| `Node.Meta.<any>` | Equal, Not Equal |
|
||||
| `Node.Node` | Equal, Not Equal |
|
||||
| `Node.TaggedAddresses` | In, Not In, Is Empty, Is Not Empty |
|
||||
| `Node.TaggedAddresses.<any>` | Equal, Not Equal |
|
||||
| `Service.Address` | Equal, Not Equal |
|
||||
| `Service.Connect.Native` | Equal, Not Equal |
|
||||
| `Service.EnableTagOverride` | Equal, Not Equal |
|
||||
| `Service.ID` | Equal, Not Equal |
|
||||
| `Service.Kind` | Equal, Not Equal |
|
||||
| `Service.Meta` | In, Not In, Is Empty, Is Not Empty |
|
||||
| `Service.Meta.<any>` | Equal, Not Equal |
|
||||
| `Service.Port` | Equal, Not Equal |
|
||||
| `Service.Proxy.DestinationServiceID` | Equal, Not Equal |
|
||||
| `Service.Proxy.DestinationServiceName` | Equal, Not Equal |
|
||||
| `Service.Proxy.LocalServiceAddress` | Equal, Not Equal |
|
||||
| `Service.Proxy.LocalServicePort` | Equal, Not Equal |
|
||||
| `Service.Proxy.Upstreams` | Is Empty, Is Not Empty |
|
||||
| `Service.Proxy.Upstreams.Datacenter` | Equal, Not Equal |
|
||||
| `Service.Proxy.Upstreams.DestinationName` | Equal, Not Equal |
|
||||
| `Service.Proxy.Upstreams.DestinationNamespace` | Equal, Not Equal |
|
||||
| `Service.Proxy.Upstreams.DestinationType` | Equal, Not Equal |
|
||||
| `Service.Proxy.Upstreams.LocalBindAddress` | Equal, Not Equal |
|
||||
| `Service.Proxy.Upstreams.LocalBindPort` | Equal, Not Equal |
|
||||
| `Service.Service` | Equal, Not Equal |
|
||||
| `Service.Tags` | In, Not In, Is Empty, Is Not Empty |
|
||||
| `Service.Weights.Passing` | Equal, Not Equal |
|
||||
| `Service.Weights.Warning` | Equal, Not Equal |
|
||||
|
||||
## List Nodes for Connect-capable Service
|
||||
|
||||
This endpoint returns the nodes providing a
|
||||
|
@ -311,6 +404,9 @@ The table below shows this endpoint's support for
|
|||
will filter the results to nodes with the specified key/value pairs. This is
|
||||
specified as part of the URL as a query parameter.
|
||||
|
||||
- `filter` `(string: "")` - Specifies the expression used to filter the
|
||||
queries results prior to returning the data.
|
||||
|
||||
### Sample Request
|
||||
|
||||
```text
|
||||
|
@ -346,3 +442,21 @@ $ curl \
|
|||
}
|
||||
]
|
||||
```
|
||||
|
||||
### Filtering
|
||||
|
||||
The filter will be executed against each health check in the results list with
|
||||
the following selectors and filter operations being supported:
|
||||
|
||||
|
||||
| Selector | Supported Operations |
|
||||
| ------------- | ---------------------------------- |
|
||||
| `CheckID` | Equal, Not Equal |
|
||||
| `Name` | Equal, Not Equal |
|
||||
| `Node` | Equal, Not Equal |
|
||||
| `Notes` | Equal, Not Equal |
|
||||
| `Output` | Equal, Not Equal |
|
||||
| `ServiceID` | Equal, Not Equal |
|
||||
| `ServiceName` | Equal, Not Equal |
|
||||
| `ServiceTags` | In, Not In, Is Empty, Is Not Empty |
|
||||
| `Status` | Equal, Not Equal |
|
||||
|
|
|
@ -1,33 +1,17 @@
|
|||
---
|
||||
layout: api
|
||||
page_title: HTTP API
|
||||
sidebar_current: api-overview
|
||||
sidebar_current: api-introduction
|
||||
description: |-
|
||||
Consul exposes a RESTful HTTP API to control almost every aspect of the
|
||||
Consul agent.
|
||||
---
|
||||
|
||||
# HTTP API
|
||||
# HTTP API Structure
|
||||
|
||||
The main interface to Consul is a RESTful HTTP API. The API can perform basic
|
||||
CRUD operations on nodes, services, checks, configuration, and more.
|
||||
|
||||
## Version Prefix
|
||||
|
||||
All API routes are prefixed with `/v1/`. This documentation is only for the v1 API.
|
||||
|
||||
## ACLs
|
||||
|
||||
Several endpoints in Consul use or require ACL tokens to operate. An agent
|
||||
can be configured to use a default token in requests using the `acl_token`
|
||||
configuration option. However, the token can also be specified per-request
|
||||
by using the `X-Consul-Token` request header or Bearer header in Authorization
|
||||
header or the `token` query string parameter. The request header takes
|
||||
precedence over the default token, and the query string parameter takes
|
||||
precedence over everything.
|
||||
|
||||
For more details about ACLs, please see the [ACL Guide](/docs/guides/acl.html).
|
||||
|
||||
## Authentication
|
||||
|
||||
When authentication is enabled, a Consul token should be provided to API
|
||||
|
@ -47,234 +31,9 @@ Previously this was provided via a `?token=` query parameter. This functionality
|
|||
exists on many endpoints for backwards compatibility, but its use is **highly
|
||||
discouraged**, since it can show up in access logs as part of the URL.
|
||||
|
||||
## Blocking Queries
|
||||
## Version Prefix
|
||||
|
||||
Many endpoints in Consul support a feature known as "blocking queries". A
|
||||
blocking query is used to wait for a potential change using long polling. Not
|
||||
all endpoints support blocking, but each endpoint uniquely documents its support
|
||||
for blocking queries in the documentation.
|
||||
|
||||
Endpoints that support blocking queries return an HTTP header named
|
||||
`X-Consul-Index`. This is a unique identifier representing the current state of
|
||||
the requested resource.
|
||||
|
||||
On subsequent requests for this resource, the client can set the `index` query
|
||||
string parameter to the value of `X-Consul-Index`, indicating that the client
|
||||
wishes to wait for any changes subsequent to that index.
|
||||
|
||||
When this is provided, the HTTP request will "hang" until a change in the system
|
||||
occurs, or the maximum timeout is reached. A critical note is that the return of
|
||||
a blocking request is **no guarantee** of a change. It is possible that the
|
||||
timeout was reached or that there was an idempotent write that does not affect
|
||||
the result of the query.
|
||||
|
||||
In addition to `index`, endpoints that support blocking will also honor a `wait`
|
||||
parameter specifying a maximum duration for the blocking request. This is
|
||||
limited to 10 minutes. If not set, the wait time defaults to 5 minutes. This
|
||||
value can be specified in the form of "10s" or "5m" (i.e., 10 seconds or 5
|
||||
minutes, respectively). A small random amount of additional wait time is added
|
||||
to the supplied maximum `wait` time to spread out the wake up time of any
|
||||
concurrent requests. This adds up to `wait / 16` additional time to the maximum
|
||||
duration.
|
||||
|
||||
### Implementation Details
|
||||
|
||||
While the mechanism is relatively simple to work with, there are a few edge
|
||||
cases that must be handled correctly.
|
||||
|
||||
* **Reset the index if it goes backwards**. While indexes in general are
|
||||
monotonically increasing(i.e. they should only ever increase as time passes),
|
||||
there are several real-world scenarios in
|
||||
which they can go backwards for a given query. Implementations must check
|
||||
to see if a returned index is lower than the previous value,
|
||||
and if it is, should reset index to `0` - effectively restarting their blocking loop.
|
||||
Failure to do so may cause the client to miss future updates for an unbounded
|
||||
time, or to use an invalid index value that causes no blocking and increases
|
||||
load on the servers. Cases where this can occur include:
|
||||
* If a raft snapshot is restored on the servers with older version of the data.
|
||||
* KV list operations where an item with the highest index is removed.
|
||||
* A Consul upgrade changes the way watches work to optimize them with more
|
||||
granular indexes.
|
||||
|
||||
* **Sanity check index is greater than zero**. After the initial request (or a
|
||||
reset as above) the `X-Consul-Index` returned _should_ always be greater than zero. It
|
||||
is a bug in Consul if it is not, however this has happened a few times and can
|
||||
still be triggered on some older Consul versions. It's especially bad because it
|
||||
causes blocking clients that are not aware to enter a busy loop, using excessive
|
||||
client CPU and causing high load on servers. It is _always_ safe to use an
|
||||
index of `1` to wait for updates when the data being requested doesn't exist
|
||||
yet, so clients _should_ sanity check that their index is at least 1 after
|
||||
each blocking response is handled to be sure they actually block on the next
|
||||
request.
|
||||
|
||||
* **Rate limit**. The blocking query mechanism is reasonably efficient when updates
|
||||
are relatively rare (order of tens of seconds to minutes between updates). In cases
|
||||
where a result gets updated very fast however - possibly during an outage or incident
|
||||
with a badly behaved client - blocking query loops degrade into busy loops that
|
||||
consume excessive client CPU and cause high server load. While it's possible to just add a sleep
|
||||
to every iteration of the loop, this is **not** recommended since it causes update
|
||||
delivery to be delayed in the happy case, and it can exacerbate the problem since
|
||||
it increases the chance that the index has changed on the next request. Clients
|
||||
_should_ instead rate limit the loop so that in the happy case they proceed without
|
||||
waiting, but when values start to churn quickly they degrade into polling at a
|
||||
reasonable rate (say every 15 seconds). Ideally this is done with an algorithm that
|
||||
allows a couple of quick successive deliveries before it starts to limit rate - a
|
||||
[token bucket](https://en.wikipedia.org/wiki/Token_bucket) with burst of 2 is a simple
|
||||
way to achieve this.
|
||||
|
||||
### Hash-based Blocking Queries
|
||||
|
||||
A limited number of agent endpoints also support blocking however because the
|
||||
state is local to the agent and not managed with a consistent raft index, their
|
||||
blocking mechanism is different.
|
||||
|
||||
Since there is no monotonically increasing index, each response instead contains
|
||||
a header `X-Consul-ContentHash` which is an opaque hash digest generated by
|
||||
hashing over all fields in the response that are relevant.
|
||||
|
||||
Subsequent requests may be sent with a query parameter `hash=<value>` where
|
||||
`value` is the last hash header value seen, and this will block until the `wait`
|
||||
timeout is passed or until the local agent's state changes in such a way that
|
||||
the hash would be different.
|
||||
|
||||
Other than the different header and query parameter names, the biggest
|
||||
difference is that hash values are opaque and can't be compared to see if one
|
||||
result is older or newer than another. In general hash-based blocking will not
|
||||
return too early due to an idempotent update since the hash will remain the same
|
||||
unless the result actually changes, however as with index-based blocking there
|
||||
is no strict guarantee that clients will never observe the same result delivered
|
||||
before the full timeout has elapsed.
|
||||
|
||||
## Consistency Modes
|
||||
|
||||
Most of the read query endpoints support multiple levels of consistency. Since
|
||||
no policy will suit all clients' needs, these consistency modes allow the user
|
||||
to have the ultimate say in how to balance the trade-offs inherent in a
|
||||
distributed system.
|
||||
|
||||
The three read modes are:
|
||||
|
||||
- `default` - If not specified, the default is strongly consistent in almost all
|
||||
cases. However, there is a small window in which a new leader may be elected
|
||||
during which the old leader may service stale values. The trade-off is fast
|
||||
reads but potentially stale values. The condition resulting in stale reads is
|
||||
hard to trigger, and most clients should not need to worry about this case.
|
||||
Also, note that this race condition only applies to reads, not writes.
|
||||
|
||||
- `consistent` - This mode is strongly consistent without caveats. It requires
|
||||
that a leader verify with a quorum of peers that it is still leader. This
|
||||
introduces an additional round-trip to all server nodes. The trade-off is
|
||||
increased latency due to an extra round trip. Most clients should not use this
|
||||
unless they cannot tolerate a stale read.
|
||||
|
||||
- `stale` - This mode allows any server to service the read regardless of
|
||||
whether it is the leader. This means reads can be arbitrarily stale; however,
|
||||
results are generally consistent to within 50 milliseconds of the leader. The
|
||||
trade-off is very fast and scalable reads with a higher likelihood of stale
|
||||
values. Since this mode allows reads without a leader, a cluster that is
|
||||
unavailable will still be able to respond to queries.
|
||||
|
||||
To switch these modes, either the `stale` or `consistent` query parameters
|
||||
should be provided on requests. It is an error to provide both.
|
||||
|
||||
Note that some endpoints support a `cached` parameter which has some of the same
|
||||
semantics as `stale` but different trade offs. This behaviour is described in
|
||||
[Agent Caching](#agent-caching).
|
||||
|
||||
To support bounding the acceptable staleness of data, responses provide the
|
||||
`X-Consul-LastContact` header containing the time in milliseconds that a server
|
||||
was last contacted by the leader node. The `X-Consul-KnownLeader` header also
|
||||
indicates if there is a known leader. These can be used by clients to gauge the
|
||||
staleness of a result and take appropriate action.
|
||||
|
||||
## Agent Caching
|
||||
|
||||
Some read endpoints support agent caching. They are clearly marked in the
|
||||
documentation. Agent caching can take two forms, [`simple`](#simple-caching) or
|
||||
[`background refresh`](#blocking-refresh-caching) depending on the endpoint's
|
||||
semantics. The documentation for each endpoint clearly identify which if any
|
||||
form of caching is supported. The details for each are described below.
|
||||
|
||||
Where supported, caching can be enabled though the `?cached` parameter.
|
||||
Combining `?cached` with `?consistent` is an error.
|
||||
|
||||
### Simple Caching
|
||||
|
||||
Endpoints supporting simple caching may return a result directly from the local
|
||||
agent's cache without a round trip to the servers. By default the agent caches
|
||||
results for a relatively long time (3 days) such that it can still return a
|
||||
result even if the servers are unavailable for an extended period to enable
|
||||
"fail static" semantics.
|
||||
|
||||
That means that with no other arguments, `?cached` queries might receive a
|
||||
response which is days old. To request better freshness, the HTTP
|
||||
`Cache-Control` header may be set with a directive like `max-age=<seconds>`. In
|
||||
this case the agent will attempt to re-fetch the result from the servers if the
|
||||
cached value is older than the given `max-age`. If the servers can't be reached
|
||||
a 500 is returned as normal.
|
||||
|
||||
To allow clients to maintain fresh results in normal operation but allow stale
|
||||
ones if the servers are unavailable, the `stale-if-error=<seconds>` directive
|
||||
may be additionally provided in the `Cache-Control` header. This will return the
|
||||
cached value anyway even it it's older than `max-age` (provided it's not older
|
||||
than `stale-if-error`) rather than a 500. It must be provided along with a
|
||||
`max-age` or `must-revalidate`. The `Age` response header, if larger than
|
||||
`max-age` can be used to determine if the server was unreachable and a cached
|
||||
version returned instead.
|
||||
|
||||
For example, assuming there is a cached response that is 65 seconds old, and
|
||||
that the servers are currently unavailable, `Cache-Control: max-age=30` will
|
||||
result in a 500 error, while `Cache-Control: max-age=30 stale-if-error=259200`
|
||||
will result in the cached response being returned.
|
||||
|
||||
A request setting either `max-age=0` or `must-revalidate` directives will cause
|
||||
the agent to always re-fetch the response from servers. Either can be combined
|
||||
with `stale-if-error=<seconds>` to ensure fresh results when the servers are
|
||||
available, but falling back to cached results if the request to the servers
|
||||
fails.
|
||||
|
||||
Requests that do not use `?cached` currently bypass the cache entirely so the
|
||||
cached response returned might be more stale than the last uncached response
|
||||
returned on the same agent. If this causes problems, it is possible to make
|
||||
requests using `?cached` and setting `Cache-Control: must-revalidate` to have
|
||||
always-fresh results yet keeping the cache populated with the most recent
|
||||
result.
|
||||
|
||||
In all cases the HTTP `X-Cache` header is always set in the response to either
|
||||
`HIT` or `MISS` indicating whether the response was served from cache or not.
|
||||
|
||||
For cache hits, the HTTP `Age` header is always set in the response to indicate
|
||||
how many seconds since that response was fetched from the servers.
|
||||
|
||||
### Background Refresh Caching
|
||||
|
||||
Endpoints supporting background refresh caching may return a result directly
|
||||
from the local agent's cache without a round trip to the severs. The first fetch
|
||||
that is a miss will cause an initial fetch from the servers, but will also
|
||||
trigger the agent to begin a background blocking query that watches for any
|
||||
changes to that result and updates the cached value if changes occur.
|
||||
|
||||
Following requests will _always_ be a cache hit until there has been no request
|
||||
for the resource for the TTL (which is typically 3 days).
|
||||
|
||||
Clients can perform blocking queries against the local agent which will be
|
||||
served from the cache. This allows multiple clients to watch the same resource
|
||||
locally while only a single blocking watch for that resource will be made to the
|
||||
servers from a given client agent.
|
||||
|
||||
HTTP `Cache-Control` headers are ignored in this mode since the cache is being
|
||||
actively updated and has different semantics to a typical passive cache.
|
||||
|
||||
In all cases the HTTP `X-Cache` header is always set in the response to either
|
||||
`HIT` or `MISS` indicating whether the response was served from cache or not.
|
||||
|
||||
For cache hits, the HTTP `Age` header is always set in the response to indicate
|
||||
how many seconds since that response was fetched from the servers. As long as
|
||||
the local agent has an active connection to the servers, the age will always be
|
||||
`0` since the value is up-to-date. If the agent get's disconnected, the cached
|
||||
result is still returned but with an `Age` that indicates how many seconds have
|
||||
elapsed since the local agent got disconnected from the servers, during which
|
||||
time updates to the result might have been missed.
|
||||
All API routes are prefixed with `/v1/`. This documentation is only for the v1 API.
|
||||
|
||||
## Formatted JSON Output
|
||||
|
||||
|
@ -323,3 +82,6 @@ UUID-format identifiers generated by the Consul API use the
|
|||
These UUID-format strings are generated using high quality, purely random bytes.
|
||||
It is not intended to be RFC compliant, merely to use a well-understood string
|
||||
representation of a 128-bit value.
|
||||
|
||||
|
||||
|
||||
|
|
|
@ -71,3 +71,8 @@ Usage: `consul catalog nodes [options]`
|
|||
|
||||
- `-service=<id or name>` - Service id or name to filter nodes. Only nodes
|
||||
which are providing the given service will be returned.
|
||||
|
||||
- `-filter=<filter>` - Expression to use for filtering the results. Can be passed
|
||||
via stdin by using `-` for the value or from a file by passing `@<file path>`.
|
||||
See the [`/catalog/nodes` API documentation](api/catalog.html#filtering) for a
|
||||
description of what is filterable.
|
||||
|
|
|
@ -1,11 +1,25 @@
|
|||
<% wrap_layout :inner do %>
|
||||
<% content_for :sidebar do %>
|
||||
<ul class="nav docs-sidenav">
|
||||
<li<%= sidebar_current("api-overview") %>>
|
||||
<a href="/api/index.html">API Overview</a>
|
||||
<li<%= sidebar_current("api-introduction") %>>
|
||||
<a href="/api/index.html">API Introduction</a>
|
||||
</li>
|
||||
<li<%= sidebar_current("api-libraries-and-sdks") %>>
|
||||
<a href="/api/libraries-and-sdks.html">Libraries & SDKs</a>
|
||||
<li<%= sidebar_current("api-features") %>>
|
||||
<a href="/api/features/consistency.html">API Features</a>
|
||||
<ul class="nav">
|
||||
<li<%= sidebar_current("api-consistency-modes") %>>
|
||||
<a href="/api/features/consistency.html">Consistency Modes</a>
|
||||
</li>
|
||||
<li<%= sidebar_current("api-blocking-queries") %>>
|
||||
<a href="/api/features/blocking.html">Blocking Queries</a>
|
||||
</li>
|
||||
<li<%= sidebar_current("api-filtering") %>>
|
||||
<a href="/api/features/filtering.html">Filtering</a>
|
||||
</li>
|
||||
<li<%= sidebar_current("api-agent-caching") %>>
|
||||
<a href="/api/features/caching.html">Agent Caching</a>
|
||||
</li>
|
||||
</ul>
|
||||
</li>
|
||||
|
||||
<hr>
|
||||
|
@ -102,7 +116,14 @@
|
|||
<li<%= sidebar_current("api-txn") %>>
|
||||
<a href="/api/txn.html">Transactions</a>
|
||||
</li>
|
||||
</ul>
|
||||
|
||||
<hr>
|
||||
|
||||
<li<%= sidebar_current("api-libraries-and-sdks") %>>
|
||||
<a href="/api/libraries-and-sdks.html">Libraries & SDKs</a>
|
||||
</li>
|
||||
</ul>
|
||||
|
||||
<% end %>
|
||||
|
||||
<%= yield %>
|
||||
|
|
Loading…
Reference in New Issue