508 lines
15 KiB
Plaintext
508 lines
15 KiB
Plaintext
{
|
|
"cells": [
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"# Pubsub Test Analysis Notebook\n",
|
|
"\n",
|
|
"## Usage\n",
|
|
"\n",
|
|
"This notebook analyzes the output of a single pubsub test execution.\n",
|
|
"\n",
|
|
"You must run all the cells (`Cell > Run All` menu) to load the data and generate the charts.\n",
|
|
"\n",
|
|
"This may take quite some time and require considerable RAM on the first\n",
|
|
"run, but the results will be cached to `ANALYSIS_DIR/pandas` for future runs.\n",
|
|
"\n",
|
|
"<p/>\n",
|
|
"\n",
|
|
"<details>\n",
|
|
" <summary>Expand to show example data</summary>\n",
|
|
"\n",
|
|
"### `scores`\n",
|
|
"\n",
|
|
"The `scores` `DataFrame` contains peer score events, indexed by timestamp.\n",
|
|
"\n",
|
|
"**Example**:\n",
|
|
"\n",
|
|
"| timestamp | observer | peer | score |\n",
|
|
"|-------------------------------|------------------------------------------------------|------------------------------------------------------|-----------|\n",
|
|
"| 2020-03-31 16:46:22.675526100 | 12D3KooWM1Q8EazdTBaYidtmAnaEqjtAzCGkYypzjgUmDU3bNpAS | 12D3KooWGHEfmKenMpsGDu1xEuVw7itsEMMT6oBVbYx642627Etw | 0.0027 |\n",
|
|
"| 2020-03-31 16:46:22.675526100 | 12D3KooWM1Q8EazdTBaYidtmAnaEqjtAzCGkYypzjgUmDU3bNpAS | 12D3KooWN5fa46NhuVP9as8ANmZ54gAM5cPuhTuu1bMAY5wvgoPG | 16.5617 |\n",
|
|
"\n",
|
|
"\n",
|
|
"- `observer` is the peer assigning the score\n",
|
|
"- `peer` is the peer receiving the score\n",
|
|
"\n",
|
|
"### `metrics`\n",
|
|
"\n",
|
|
"The `metrics` `DataFrame` contains aggregated tracer metrics.\n",
|
|
"\n",
|
|
"**Example**:\n",
|
|
"\n",
|
|
"| | published | rejected | delivered | duplicates | droppedrpc | peersadded | peersremoved | topicsjoined | topicsleft | peer | sent_rpcs | sent_messages | sent_grafts | sent_prunes | sent_iwants | sent_ihaves | recv_rpcs | recv_messages | recv_grafts | recv_prunes | recv_iwants | recv_ihaves |\n",
|
|
"|----|-------------|------------|-------------|--------------|--------------|--------------|----------------|----------------|--------------|------------------------------------------------------|-------------|-----------------|---------------|---------------|---------------|---------------|-------------|-----------------|---------------|---------------|---------------|---------------|\n",
|
|
"| 0 | 0 | 0 | 721 | 0 | 0 | 2 | 1 | 1 | 0 | 12D3KooWM1Q8EazdTBaYidtmAnaEqjtAzCGkYypzjgUmDU3bNpAS | 722 | 721 | 0 | 0 | 0 | 0 | 727 | 721 | 2 | 0 | 0 | 0 |\n",
|
|
"| 1 | 0 | 0 | 721 | 0 | 0 | 2 | 1 | 1 | 0 | 12D3KooWN5fa46NhuVP9as8ANmZ54gAM5cPuhTuu1bMAY5wvgoPG | 724 | 721 | 1 | 0 | 0 | 0 | 726 | 721 | 1 | 0 | 0 | 0 |\n",
|
|
"\n",
|
|
" \n",
|
|
"</details>"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {
|
|
"tags": [
|
|
"parameters"
|
|
]
|
|
},
|
|
"outputs": [],
|
|
"source": [
|
|
"# Parameters in this cell can be overriden using papermill\n",
|
|
"\n",
|
|
"# path to directory contaning output from the extract_test_outputs method in analyze.py\n",
|
|
"ANALYSIS_DIR=\".\"\n",
|
|
"\n",
|
|
"# dir to save figure images\n",
|
|
"FIGURE_OUT=\"./figures\"\n",
|
|
"\n",
|
|
"# path to zip file containing all figures\n",
|
|
"FIGURE_ZIP_OUT=\"./figures.zip\"\n",
|
|
"\n",
|
|
"# font sizes\n",
|
|
"SMALL_SIZE = 8\n",
|
|
"MEDIUM_SIZE = 10\n",
|
|
"BIGGER_SIZE = 18"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"import os\n",
|
|
"import pandas as pd\n",
|
|
"import numpy as np\n",
|
|
"import matplotlib.pyplot as plt\n",
|
|
"import toml\n",
|
|
"import ipywidgets as widgets\n",
|
|
"from pprint import pprint\n",
|
|
"import pathlib\n",
|
|
"import seaborn as sns\n",
|
|
"from durations import Duration\n",
|
|
"\n",
|
|
"import notebook_helper\n",
|
|
"from notebook_helper import no_scores_message, load_pandas, archive_figures, p25, p50, p75, p95, p99\n",
|
|
"\n",
|
|
"# load a helper to save figures to FIGURE_OUT\n",
|
|
"save_fig = notebook_helper.save_fig_fn(FIGURE_OUT)\n",
|
|
"\n",
|
|
"# render charts in a larger, zoomable style\n",
|
|
"%matplotlib notebook\n",
|
|
"\n",
|
|
"# turn off autosaving for the notebook\n",
|
|
"%autosave 0\n",
|
|
"\n",
|
|
"# prettify the colors\n",
|
|
"sns.set(color_codes=True)\n",
|
|
"\n",
|
|
"# helper to set font sizes for charts\n",
|
|
"def set_chart_fontsize(size):\n",
|
|
" plt.rc('font', size=size) # controls default text sizes\n",
|
|
" plt.rc('axes', titlesize=size) # fontsize of the axes title\n",
|
|
" plt.rc('axes', labelsize=size) # fontsize of the x and y labels\n",
|
|
" plt.rc('xtick', labelsize=size) # fontsize of the tick labels\n",
|
|
" plt.rc('ytick', labelsize=size) # fontsize of the tick labels\n",
|
|
" plt.rc('legend', fontsize=size) # legend fontsize\n",
|
|
" plt.rc('figure', titlesize=size) # fontsize of the figure title\n",
|
|
"\n",
|
|
" \n",
|
|
"# set chart fonts to BIGGER_SIZE by default\n",
|
|
"set_chart_fontsize(BIGGER_SIZE)\n",
|
|
"\n",
|
|
"# load data\n",
|
|
"print('loading test data from ' + ANALYSIS_DIR)\n",
|
|
"\n",
|
|
"tables = load_pandas(ANALYSIS_DIR)\n",
|
|
"scores = tables['scores']\n",
|
|
"metrics = tables['metrics']\n",
|
|
"cdf = tables['cdf']\n",
|
|
"pdf = tables['pdf']\n",
|
|
"peers = tables['peers']\n",
|
|
"\n",
|
|
"# resample score index into 5s windows for plotting later\n",
|
|
"if not scores.empty:\n",
|
|
" print('resampling peer scores')\n",
|
|
" resample_interval = '5s'\n",
|
|
" sampled = scores.resample(resample_interval)\n",
|
|
"\n",
|
|
"t_warm = peers['t_warm'].max()\n",
|
|
"t_run = peers['t_run'].max()\n",
|
|
"t_cool = peers['t_cool'].max()\n",
|
|
"t_complete = peers['t_complete'].max()\n",
|
|
"\n",
|
|
"params_panel, test_params = notebook_helper.test_params_panel(ANALYSIS_DIR)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"## Test Parameters\n",
|
|
"\n",
|
|
"The cell below shows the parameters that were used to create the composition file for this test run.\n",
|
|
"\n",
|
|
"You can access the parameter values from other cells via the `test_params` dict, e.g.:\n",
|
|
"\n",
|
|
"```python\n",
|
|
"from durations import Duration\n",
|
|
"warmup = Duration(test_params['T_WARM'])\n",
|
|
"print('warmup seconds: {}'.format(warmup.to_seconds()))\n",
|
|
"```"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {
|
|
"scrolled": false
|
|
},
|
|
"outputs": [],
|
|
"source": [
|
|
"params_panel"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"### Aggregations"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"#### Latency Distribution"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"#### CDF"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"fig = notebook_helper.plot_latency_cdf(cdf)\n",
|
|
"save_fig(fig, 'latency-cdf')"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"#### PDF"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"fig = notebook_helper.plot_latency_pdf(pdf)\n",
|
|
"save_fig(fig, 'latency-pdf')"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"#### PDF (above p99)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"fig = notebook_helper.plot_latency_pdf_above_quantile(pdf, quantile=0.99)\n",
|
|
"save_fig(fig, 'latency-pdf-over-p99')"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"#### Tracestat summary\n",
|
|
"Only Publish and Deliver counts are accurate, the rest are filtered."
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"print(notebook_helper.tracestat_summary(ANALYSIS_DIR))"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"#### Aggregated tracer metrics (per-peer)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"metrics[['published', 'delivered', 'rejected', 'duplicates', 'droppedrpc']].agg([np.min, np.max, np.median, np.mean]).rename(columns={'amax': 'max', 'amin': 'min', 'amedian': 'median', 'amean': 'mean'})\n"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"#### All peer scores, aggregated across the test runtime"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"if not scores.empty:\n",
|
|
" scores['score'].agg({'min': np.min, 'max': np.max, 'median': np.median, 'mean': np.mean})\n",
|
|
"else:\n",
|
|
" no_scores_message()"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"#### Aggregated score values for peers with negative scores\n",
|
|
"\n",
|
|
"- `observer` is the peer assigning the score. \n",
|
|
"- `peer` is the peer receiving the score."
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"if not scores.empty:\n",
|
|
" neg = scores.where(scores['score'] < 0).groupby(['peer', 'observer'])\n",
|
|
" n = neg.agg({'score': [np.min, np.max, np.median, np.mean]})\n",
|
|
" n\n",
|
|
"else:\n",
|
|
" no_scores_message()"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"### Show honest peers with negative scores, joined with tracer metrics"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"if not scores.empty:\n",
|
|
" # select columns from metrics table\n",
|
|
" m = metrics[['peer', 'published', 'delivered', 'rejected']]\n",
|
|
"\n",
|
|
" pd.DataFrame(n).merge(m, on='peer').groupby('peer').head()\n",
|
|
"else:\n",
|
|
" no_scores_message()"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"#### global min/max score over time"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {
|
|
"scrolled": false
|
|
},
|
|
"outputs": [],
|
|
"source": [
|
|
"time_annotations = [\n",
|
|
" {'label': 'warmup complete', 'time': t_run},\n",
|
|
" {'label': 'cooldown begin', 'time': t_cool},\n",
|
|
"]\n",
|
|
"\n",
|
|
"def annotate_score_plot(plot, label, legend_anchor=None):\n",
|
|
" notebook_helper.annotate_score_plot(plot, label, legend_anchor=legend_anchor, time_annotations=time_annotations)\n",
|
|
"\n",
|
|
"if not scores.empty:\n",
|
|
" fig, ax = plt.subplots(2, figsize=(11, 8))\n",
|
|
" plt.subplots_adjust(hspace=0.5)\n",
|
|
" fig.suptitle(\"Min / Max Peer Scores\")\n",
|
|
" plot = sampled['score'].agg([np.min, np.max]).plot(ax=ax[0])\n",
|
|
" annotate_score_plot(plot, 'All Peers min/max', legend_anchor=(0, -0.2))\n",
|
|
" \n",
|
|
" # hide bottom plot so legend is visible\n",
|
|
" ax[1].set_visible(False)\n",
|
|
" \n",
|
|
" # write png\n",
|
|
" save_fig(fig, 'score-min-max')\n",
|
|
"else:\n",
|
|
" no_scores_message()"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"#### global mean / median score over time"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {
|
|
"scrolled": false
|
|
},
|
|
"outputs": [],
|
|
"source": [
|
|
"if not scores.empty: \n",
|
|
" # create 4 subplots - three for charts and one that we'll hide to make blank space\n",
|
|
" # for the legend\n",
|
|
" fig, ax = plt.subplots(2, figsize=(11, 8))\n",
|
|
" plt.subplots_adjust(hspace=0.5)\n",
|
|
" fig.suptitle('Mean / Median Peer Scores')\n",
|
|
" plot = sampled['score'].agg([np.mean, np.median]).plot(ax=ax[0])\n",
|
|
" annotate_score_plot(plot, 'All Peers mean/median', legend_anchor=(0, -0.2))\n",
|
|
"\n",
|
|
" # hide bottom plot so legend is visible\n",
|
|
" ax[1].set_visible(False)\n",
|
|
" \n",
|
|
" save_fig(fig, 'score-mean-median')\n",
|
|
"else:\n",
|
|
" no_scores_message()"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"#### mean score distribution (all peers)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {
|
|
"scrolled": false
|
|
},
|
|
"outputs": [],
|
|
"source": [
|
|
"if not scores.empty:\n",
|
|
" plot = scores[['peer', 'score']].groupby('peer').mean().plot.hist(bins=20)\n",
|
|
" fig = plot.get_figure()\n",
|
|
" fig.suptitle('Mean score distribution (all peers)')\n",
|
|
" save_fig(fig, 'score-global-mean-distribution')\n",
|
|
"else:\n",
|
|
" no_scores_message()"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"#### score distributions (honest vs attacker)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {
|
|
"scrolled": false
|
|
},
|
|
"outputs": [],
|
|
"source": [
|
|
"aggregations = [np.min, np.max, p25, p75, p95]\n",
|
|
"kwargs = {'kind': 'hist', 'subplots': True, 'sharex': True, 'sharey': True, 'figsize': (8, 10)}\n",
|
|
"\n",
|
|
"if not scores.empty:\n",
|
|
" scores_by_peer = scores.groupby('peer')\n",
|
|
" \n",
|
|
" plots = scores_by_peer['score'].agg(aggregations).plot(title='Score distributions (all peers)', **kwargs)\n",
|
|
" for p in plots:\n",
|
|
" p.set_ylabel('freq')\n",
|
|
" fig = plots[0].get_figure()\n",
|
|
" save_fig(fig, 'score-distributions-all-peers')\n",
|
|
"\n",
|
|
"else:\n",
|
|
" no_scores_message()"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"# zip all figure images into a bundle\n",
|
|
"archive_figures(FIGURE_OUT, FIGURE_ZIP_OUT)"
|
|
]
|
|
}
|
|
],
|
|
"metadata": {
|
|
"kernelspec": {
|
|
"display_name": "Python 3",
|
|
"language": "python",
|
|
"name": "python3"
|
|
},
|
|
"language_info": {
|
|
"codemirror_mode": {
|
|
"name": "ipython",
|
|
"version": 3
|
|
},
|
|
"file_extension": ".py",
|
|
"mimetype": "text/x-python",
|
|
"name": "python",
|
|
"nbconvert_exporter": "python",
|
|
"pygments_lexer": "ipython3",
|
|
"version": "3.7.7"
|
|
},
|
|
"pycharm": {
|
|
"stem_cell": {
|
|
"cell_type": "raw",
|
|
"metadata": {
|
|
"collapsed": false
|
|
},
|
|
"source": []
|
|
}
|
|
}
|
|
},
|
|
"nbformat": 4,
|
|
"nbformat_minor": 4
|
|
} |