ui: Replace NaN and undefined metrics values with `-` (#9200)

* ui: Add functionality to metrics mocks:

1. More randomness during blocking queries
2. NaN and undefined values that come from prometheus
3. General trivial amends to bring things closer to the style of the
project

* Provider should always provide data as a string or undefined

* Use a placeholder `-` if the metrics endpoint responds with undefined data
This commit is contained in:
John Cowen 2020-11-16 15:22:24 +00:00 committed by GitHub
parent 11db2b37c3
commit 959974e960
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 171 additions and 164 deletions

View File

@ -4,23 +4,50 @@
"resultType": "vector",
"result": [
${
[1].map(function(_){
var type = "service";
var proto = "tcp";
[1].map(item => {
const dc = 'dc1';
const generateTargets = function(num) {
// Seed faker by the number of results we want to make it deterministic
// here and in other correlated endpoints.
fake.seed(num);
return range(num).map(i => {
const nspace = i === 0 ? `default` : `${fake.hacker.noun()}-ns-${i}`;
return {
Name: `service-${fake.random.number({min:0, max:99})}`,
Datacenter: `${dc}`,
Namespace: `${nspace}`
}
})
};
var q = location.search.query;
// little helper to get a deterministic number from the target service
// name string. NOTE: this should be the same as in metrics-proxy/.../query
// endpoint so metrics match what is requested.
const hashStr = function(s) {
for(var i = 0, h = 0xdeadbeef; i < s.length; i++)
h = Math.imul(h ^ s.charCodeAt(i), 2654435761);
return (h ^ h >>> 16) >>> 0;
};
const randExp = function(max, lambda) {
return (-Math.log(1-(1-Math.exp(-lambda))*Math.random())/lambda) * max;
};
const q = location.search.query;
let type = 'service';
let proto = 'tcp';
// Match the relabel arguments since "downstream" appears in both
// "service" and "upstream" type queries' metric names while
// "upstream" appears in downstream query metric names (confusingly).
if (q.match('"upstream"')) {
type = "upstream";
type = 'upstream';
} else if (q.match('"downstream"')) {
type = "downstream";
type = 'downstream';
}
if (q.match('envoy_http_')) {
proto = "http";
proto = 'http';
}
// NOTE!!! The logic below to pick the upstream/downstream service
@ -30,151 +57,136 @@
// Pick a number of down/upstreams to return based on the cookie variable.
// If you change anything about this variable or it's default, you'll need
// to change the topology endpoint to match.
var numResults = 1;
if (type === "upstream") {
numResults = env("CONSUL_UPSTREAM_COUNT", 3);
let numResults = 1;
if (type === 'upstream') {
numResults = parseInt(env('CONSUL_UPSTREAM_COUNT', 3));
}
if (type === "downstream") {
numResults = env("CONSUL_DOWNSTREAM_COUNT", 5);
if (type === 'downstream') {
numResults = parseInt(env('CONSUL_DOWNSTREAM_COUNT', 5));
}
var genFakeServiceNames = function(num) {
// Seed faker by the number of results we want to make it deterministic
// here and in other correlated endpoints.
fake.seed(num);
var serviceNames = [];
for (var i = 0; i < num; i++) {
serviceNames.push(`service-${fake.random.number({min:0, max:99})}`)
}
return serviceNames
};
// Figure out the actual name for the target service
var targetService = "invalid-local-cluster";
var m = q.match(/local_cluster="([^"]*)"/);
if (m && m.length >= 2 && m[1] != "") {
let targetService = 'invalid-local-cluster';
let m = q.match(/local_cluster="([^"]*)"/);
if (m && m.length >= 2 && m[1] !== '') {
targetService = m[1];
}
m = q.match(/consul_service="([^"]*)"/);
if (type == "downstream" && m && m.length >= 2 && m[1] != "") {
if (type === 'downstream' && m && m.length >= 2 && m[1] !== '') {
// downstreams don't have the same selector for the main service
// name.
targetService = m[1];
}
var serviceNames = [];
let targets = [];
switch(type) {
case "downstream": // fallthrough
case "upstream":
serviceNames = genFakeServiceNames(numResults);
case 'downstream': // fallthrough
case 'upstream':
targets = generateTargets(numResults);
break;
default:
// fallthrough
case "service":
serviceNames = [targetService];
case 'service':
targets = [targetService];
break;
}
// little helper to get a deterministic number from the target service
// name string. NOTE: this should be the same as in service-topology
// endpoint so metrics match what is requested.
var hashStr = function(s) {
for(var i = 0, h = 0xdeadbeef; i < s.length; i++)
h = Math.imul(h ^ s.charCodeAt(i), 2654435761);
return (h ^ h >>> 16) >>> 0;
};
var serviceProto = "tcp"
let serviceProto = 'tcp';
// Randomly pick the serviceProtocol which will affect which types of
// stats we return for downstream clusters. But we need it to be
// deterministic for a given service name so that all the downstream
// stats are consistently typed.
fake.seed(hashStr(targetService))
if (fake.random.number(1) > 0.5) {
serviceProto = "http";
serviceProto = 'http';
}
// For up/downstreams only return HTTP metrics half of the time.
// For upstreams it's based on the upstream's protocol which might be
// mixed so alternate protocols for upstreams.
if (type == "upstream") {
if (type === "upstream") {
// Pretend all odd service indexes are tcp and even are http
var wantMod = 0;
if (proto == "tcp") {
wantMod = 1;
}
serviceNames = serviceNames.filter(function(x, i){ return i%2 == wantMod })
const wantMod = proto === 'tcp' ? 1 : 0;
targets = targets.filter((item, i) => i % 2 === wantMod);
}
// For downstreams it's based on the target's protocol which we
// don't really know but all downstreams should be the same type
// so only return metrics for that protocol.
if (type == "downstream" && proto == "http" && serviceProto != "http") {
serviceNames = [];
if (type === 'downstream' && proto === 'http' && serviceProto !== 'http') {
targets = [];
}
// Work out which metric is being queried to make them more realistic.
var range = 100;
let max = 100;
switch(proto) {
case "http":
case 'http':
if (q.match('envoy_response_code_class="5"')) {
// It's error rate make it a percentage
range = 30;
max = 30;
} else if (q.match("rq_completed")) {
// Requests per second
range = 1000;
max = 1000;
} else if (q.match("quantile\\(0.99")) {
// 99 percentile time in ms make it longer than 50 percentile
range = 5000;
max = 5000;
} else if (q.match("quantile\\(0.5")) {
// 50th percentile
range = 500;
max = 500;
}
break;
case "tcp":
case 'tcp':
if (q.match('cx_total')) {
// New conns per second
range = 100;
max = 100;
} else if (q.match('cx_rx_bytes')) {
// inbound data rate tends to be lower than outbound
range = 0.5 * 1e9;
max = 0.5 * 1e9;
} else if (q.match('cx_tx_bytes')) {
// inbound data rate
range = 1e9;
max = 1e9;
}
// no route/connect faile are OK with default 0-100
// no route/connect failed are OK with default 0-100
break;
}
var randExp = function(max, lambda) {
return (-Math.log(1-(1-Math.exp(-lambda))*Math.random())/lambda) * max;
}
// Now generate the data points
return serviceNames.map(function(name, i){
var metric = `{}`;
return targets.map((item, i) => {
let metric = `{}`;
switch(type) {
default:
break;
case "upstream":
case 'upstream':
// TODO: this should really return tcp proxy label for tcp
// metrics but we don't look at that for now.
metric = `{"upstream": "${name}", "envoy_http_conn_manager_prefix": "${name}"}`;
metric = `{"upstream": "${item.Name}", "envoy_http_conn_manager_prefix": "${item.Name}"}`;
break;
case "downstream":
metric = `{"downstream": "${name}", "local_cluster": "${name}"}`;
case 'downstream':
metric = `{"downstream": "${item.Name}", "local_cluster": "${item.Name}"}`;
break;
}
const timestamp = Date.now() / 1000;
let value = randExp(max, 20);
// prometheus can sometimes generate NaN and undefined strings, so
// replicate that randomly
const num = fake.random.number({min: 0, max: 10});
switch(true) {
case num > 8:
value = 'NaN';
break;
case num > 5:
value = 'undefined';
break;
}
return `{
"metric": ${metric},
"value": [
${Date.now()/1000},
"${randExp(range, 20)}"
${timestamp},
"${value}"
]
}`;
}).join(",")
})[0]
})
})
}
]
}

View File

@ -6,73 +6,74 @@
${
// We need 15 minutes worth of data at 10 second resoution. Currently we
// always query for two series together so loop twice over this.
[0, 1].map(function(i){
var timePeriodMins = 15;
var resolutionSecs = 10;
var numPoints = (timePeriodMins*60)/resolutionSecs;
var time = (Date.now()/1000) - (timePeriodMins*60);
[0, 1].map((i) => {
const timePeriodMins = 15;
const resolutionSecs = 10;
const numPoints = (timePeriodMins * 60) / resolutionSecs;
let time = (Date.now() / 1000) - (timePeriodMins * 60);
var q = location.search.query;
var proto = "tcp";
var range = 1000;
var riseBias = 10;
var fallBias = 10;
var volatility = 0.2;
var label = "";
const q = location.search.query;
let proto = 'tcp';
let max = 1000;
let riseBias = 10;
let fallBias = 10;
let volatility = 0.2;
let label = '';
if (q.match('envoy_listener_http_downstream_rq_xx')) {
proto = "http"
proto = 'http'
// Switch random value ranges for total vs error rates
switch(i) {
case 0:
range = 1000; // up to 1000 rps for success
label = "Successes";
max = 1000; // up to 1000 rps for success
label = 'Successes';
break;
case 1:
range = 500; // up to 500 errors per second
max = 500; // up to 500 errors per second
fallBias = 1; // fall quicker than we rise
riseBias = 30; // start low generally
volatility = 1;
label = "Errors";
label = 'Errors';
break;
}
} else {
// Type tcp
switch(i) {
case 0:
range = 0.5 * 1e9; // up to 500 mbps recieved
label = "Inbound";
max = 0.5 * 1e9; // up to 500 mbps recieved
label = 'Inbound';
break;
case 1:
range = 1e9; // up to 1 gbps
label = "Outbound"
max = 1e9; // up to 1 gbps
label = 'Outbound';
break;
}
}
var randExp = function(max, lambda) {
const randExp = function(max, lambda) {
return (-Math.log(1-(1-Math.exp(-lambda))*Math.random())/lambda) * max;
}
// Starting value
var value = randExp(range, riseBias);
if (value > range) {
value = range;
let value = randExp(max, riseBias);
if (value > max) {
value = max;
}
var points = [];
const points = [];
let rising;
for (var i = 0; i < numPoints; i++) {
points.push(`[${time}, "${value}"]`);
time = time + resolutionSecs;
var rising = (Math.random() > 0.5);
delta = volatility * randExp(range, rising ? riseBias : fallBias);
rising = (Math.random() > 0.5);
delta = volatility * randExp(max, rising ? riseBias : fallBias);
if (!rising) {
// Make it a negative change
delta = 0 - delta;
}
value = value + delta
if (value > range) {
value = range;
if (value > max) {
value = max;
}
if (value < 0) {
value = 0;

View File

@ -1,6 +1,29 @@
${
[1].map(() => {
const dc = location.search.dc;
const generateTargets = function(num) {
// Seed faker by the number of results we want to make it deterministic
// here and in other correlated endpoints.
fake.seed(num);
return range(num).map(i => {
const nspace = i === 0 ? `default` : `${fake.hacker.noun()}-ns-${i}`;
return {
Name: `service-${fake.random.number({min:0, max:99})}`,
Datacenter: `${dc}`,
Namespace: `${nspace}`
}
})
};
// little helper to get a deterministic number from the target service
// name string. NOTE: this should be the same as in metrics-proxy/.../query
// endpoint so metrics match what is requested.
const hashStr = function(s) {
for(var i = 0, h = 0xdeadbeef; i < s.length; i++)
h = Math.imul(h ^ s.charCodeAt(i), 2654435761);
return (h ^ h >>> 16) >>> 0;
};
// NOTE!!! The logic below to pick the upstream/downstream service
// names must exactly match the logic in internal/ui/metrics-proxy/.../query
@ -9,64 +32,41 @@ ${
// Pick a number of down/upstreams to return based on the cookie variable.
// If you change anything about this variable or it's default, you'll need
// to change the topology endpoint to match.
var numUp = env("CONSUL_UPSTREAM_COUNT", 3);
var numDown = env("CONSUL_DOWNSTREAM_COUNT", 5);
const numUp = parseInt(env('CONSUL_UPSTREAM_COUNT', 3));
const numDown = parseInt(env('CONSUL_DOWNSTREAM_COUNT', 5));
var genFakeServiceNames = function(num) {
// Seed faker by the number of results we want to make it deterministic
// here and in other correlated endpoints.
fake.seed(num);
var serviceNames = [];
for (var i = 0; i < num; i++) {
serviceNames.push(`service-${fake.random.number({min:0, max:99})}`)
}
return serviceNames
};
const index = parseInt(location.search.index || 0);
const targetService = location.pathname.get(4)
var upstreams = genFakeServiceNames(numUp);
var downstreams = genFakeServiceNames(numDown);
const upstreams = generateTargets(numUp);
const downstreams = generateTargets(numDown);
const targetService = location.pathname.toString().replace('/v1/internal/ui/service-topology/', '')
// little helper to get a deterministic number from the target service
// name string. NOTE: this should be the same as in metrics-proxy/.../query
// endpoint so metrics match what is requested.
var hashStr = function(s) {
for(var i = 0, h = 0xdeadbeef; i < s.length; i++)
h = Math.imul(h ^ s.charCodeAt(i), 2654435761);
return (h ^ h >>> 16) >>> 0;
};
var serviceProto = "tcp"
// Randomly pick the serviceProtocol which will affect which types of
// stats we return for downstream clusters. But we need it to be
// deterministic for a given service name so that all the downstream
// stats are consistently typed.
let serviceProto = 'tcp';
fake.seed(hashStr(targetService))
if (fake.random.number(1) > 0.5) {
serviceProto = "http";
serviceProto = 'http';
}
fake.seed(index);
return `
{
"Protocol": "${serviceProto}",
"FilteredByACLs": ${fake.random.boolean()},
"Upstreams":
[
"Upstreams": [
${
upstreams.map((item, i) => {
let hasPerms = fake.random.boolean();
const hasPerms = fake.random.boolean();
// if hasPerms is true allowed is always false as some restrictions apply
let allowed = hasPerms ? false : fake.random.boolean();
const allowed = hasPerms ? false : fake.random.boolean();
return `
{
"Name": "${item}",
"Datacenter": "${dc}",
${i === 1 ? `
"Namespace": "default",
` : `
"Namespace": "${fake.hacker.noun()}-ns-${i}",
`}
"Name": "${item.Name}",
"Datacenter": "${item.Datacenter}",
"Namespace": "${item.Namespace}",
"ChecksPassing":${fake.random.number({min: 1, max: env('CONSUL_CHECK_COUNT', fake.random.number(10))})},
"ChecksWarning":${fake.random.number({min: 0, max: env('CONSUL_CHECK_COUNT', fake.random.number(10))})},
"ChecksCritical":${fake.random.number({min: 0, max: env('CONSUL_CHECK_COUNT', fake.random.number(10))})},
@ -79,22 +79,17 @@ ${
}
`})}
],
"Downstreams":
[
"Downstreams": [
${
downstreams.map((item, i) => {
let hasPerms = fake.random.boolean();
const hasPerms = fake.random.boolean();
// if hasPerms is true allowed is always false as some restrictions apply
let allowed = hasPerms ? false : fake.random.boolean();
const allowed = hasPerms ? false : fake.random.boolean();
return `
{
"Name": "${item}",
"Datacenter": "${dc}",
${i === 1 ? `
"Namespace": "default",
` : `
"Namespace": "${fake.hacker.noun()}-ns-${i}",
`}
"Name": "${item.Name}",
"Datacenter": "${item.Datacenter}",
"Namespace": "${item.Namespace}",
"ChecksPassing":${fake.random.number({min: 1, max: env('CONSUL_CHECK_COUNT', fake.random.number(10))})},
"ChecksWarning":${fake.random.number({min: 0, max: env('CONSUL_CHECK_COUNT', fake.random.number(10))})},
"ChecksCritical":${fake.random.number({min: 0, max: env('CONSUL_CHECK_COUNT', fake.random.number(10))})},

View File

@ -659,7 +659,6 @@
// no result as a zero not a missing stat.
promql += ' OR on() vector(0)';
}
//console.log(promql)
var params = {
query: promql,
time: new Date().getTime() / 1000,
@ -671,7 +670,7 @@
return {
label: label,
desc: desc,
value: formatter(v),
value: isNaN(v) ? '-' : formatter(v),
};
}
@ -683,7 +682,7 @@
data[groupName] = {
label: label,
desc: desc.replace('{{GROUP}}', groupName),
value: formatter(v),
value: isNaN(v) ? '-' : formatter(v),
};
}
return data;