remove unwated software (#79)

* remove unwated software

* log fix

* log fix
This commit is contained in:
fbarbu15 2024-10-24 12:43:51 +03:00 committed by GitHub
parent 684d14c050
commit aca216e95a
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
5 changed files with 125 additions and 6 deletions

27
.github/actions/prune-vm/action.yml vendored Normal file
View File

@ -0,0 +1,27 @@
# Inspired by https://github.com/AdityaGarg8/remove-unwanted-software
# to free up disk space. Currently removes Dotnet, Android and Haskell.
name: Remove unwanted software
description: Default GitHub runners come with a lot of unnecessary software
runs:
using: "composite"
steps:
- name: Disk space report before modification
shell: bash
run: |
echo "==> Available space before cleanup"
echo
df -h
- name: Maximize build disk space
shell: bash
run: |
set -euo pipefail
sudo rm -rf /usr/share/dotnet
sudo rm -rf /usr/local/lib/android
sudo rm -rf /opt/ghc
sudo rm -rf /usr/local/.ghcup
- name: Disk space report after modification
shell: bash
run: |
echo "==> Available space after cleanup"
echo
df -h

View File

@ -36,11 +36,14 @@ jobs:
tests:
name: tests
runs-on: ubuntu-latest
timeout-minutes: 90
timeout-minutes: 120
steps:
- uses: actions/checkout@v4
- name: Remove unwanted software
uses: ./.github/actions/prune-vm
- uses: actions/setup-python@v4
with:
python-version: '3.12'

View File

@ -6,5 +6,6 @@
"reportInvalidStringEscapeSequence": false,
"reportWildcardImportFromLibrary": false,
"venvPath": ".",
"venv": ".venv"
"venv": ".venv",
"typeCheckingMode": "off"
}

61
scripts/waku_network.sh Executable file
View File

@ -0,0 +1,61 @@
#!/bin/bash
printf "\nAssuming you already have a docker network called waku\n"
# if not something like this should create it: docker network create --driver bridge --subnet 172.18.0.0/16 --gateway 172.18.0.1 waku
cluster_id=4
pubsub_topic="/waku/2/rs/$cluster_id/0"
encoded_pubsub_topic=$(echo "$pubsub_topic" | sed 's:/:%2F:g')
node_1=wakuorg/go-waku:latest
node_2=wakuorg/go-waku:latest
node_1_ip=172.18.45.95
printf "\nStarting Bootstrap Node\n"
container_id1=$(docker run -d -i -t -p 12297:12297 -p 12298:12298 -p 12299:12299 -p 12300:12300 -p 12301:12301 $node_1 --listen-address=0.0.0.0 --rest=true --rest-admin=true --websocket-support=true --log-level=DEBUG --rest-relay-cache-capacity=100 --websocket-port=12299 --rest-port=12297 --tcp-port=12298 --discv5-udp-port=12300 --rest-address=0.0.0.0 --nat=extip:$node_1_ip --peer-exchange=true --discv5-discovery=true --cluster-id=$cluster_id --relay=true --store=true --nodekey=30348dd51465150e04a5d9d932c72864c8967f806cce60b5d26afeca1e77eb68)
docker network connect --ip $node_1_ip waku $container_id1
sleep 1
info1=$(curl -X GET "http://127.0.0.1:12297/debug/v1/info" -H "accept: application/json")
enrUri=$(echo $info1 | jq -r '.enrUri')
printf "\nStarting Second Node\n"
container_id2=$(docker run -d -i -t -p 8158:8158 -p 8159:8159 -p 8160:8160 -p 8161:8161 -p 8162:8162 $node_2 --listen-address=0.0.0.0 --rest=true --rest-admin=true --websocket-support=true --log-level=DEBUG --rest-relay-cache-capacity=100 --websocket-port=8160 --rest-port=8158 --tcp-port=8159 --discv5-udp-port=8161 --rest-address=0.0.0.0 --nat=extip:172.18.207.159 --peer-exchange=true --discv5-discovery=true --cluster-id=$cluster_id --relay=true --discv5-bootstrap-node=$enrUri)
docker network connect --ip 172.18.207.159 waku $container_id2
sleep 1
printf "\nSubscribe\n"
curl -X POST "http://127.0.0.1:12297/relay/v1/subscriptions" -H "Content-Type: application/json" -d "[\"$pubsub_topic\"]"
curl -X POST "http://127.0.0.1:8158/relay/v1/subscriptions" -H "Content-Type: application/json" -d "[\"$pubsub_topic\"]"
sleep 0.1
printf "\nRelay from NODE1\n"
curl -X POST "http://127.0.0.1:12297/relay/v1/messages/$encoded_pubsub_topic" -H "Content-Type: application/json" -d '{"payload": "UmVsYXkgd29ya3MhIQ==", "contentTopic": "/test/1/waku-relay/proto", "timestamp": '$(date +%s%N)'}'
sleep 0.1
printf "\nCheck message in NODE2\n"
response=$(curl -X GET "http://127.0.0.1:8158/relay/v1/messages/$encoded_pubsub_topic" -H "Content-Type: application/json")
printf "\nResponse: $response\n\n"
printf "NODE1 INFO: $info1\n\n"
printf "NODE1 ENR: $enrUri\n\n"
# Extract the first non-WebSocket address
ws_address=$(echo $info1 | jq -r '.listenAddresses[] | select(contains("/ws") | not)')
# Check if we got an address, and construct the new address with it
if [[ $ws_address != "" ]]; then
identifier=$(echo $ws_address | awk -F'/p2p/' '{print $2}')
if [[ $identifier != "" ]]; then
multiaddr_with_id="/ip4/${node_1_ip}/tcp/${node_1_tcp}/p2p/${identifier}"
printf "NODE1 MULTIADDRESS: $multiaddr_with_id\n\n"
enrUri=$(echo $info1 | jq -r '.enrUri')
else
echo "No identifier found in the address."
exit 1
fi
else
echo "No non-WebSocket address found."
exit 1
fi

View File

@ -1,12 +1,13 @@
import os
import re
import time
from src.libs.custom_logger import get_custom_logger
import random
import threading
import docker
from src.env_vars import NETWORK_NAME, SUBNET, IP_RANGE, GATEWAY
from docker.types import IPAMConfig, IPAMPool
from docker.errors import NotFound
from docker.errors import NotFound, APIError
logger = get_custom_logger(__name__)
@ -63,9 +64,35 @@ class DockerManager:
def _log_container_output(self, container, log_path):
os.makedirs(os.path.dirname(log_path), exist_ok=True)
with open(log_path, "wb+") as log_file:
for chunk in container.logs(stream=True):
log_file.write(chunk)
retry_count = 0
start_time = time.time()
try:
with open(log_path, "wb+") as log_file:
while True:
if container.status in ["exited", "dead"]:
logger.info(f"Container {container.short_id} has stopped. Exiting log stream.")
return
try:
for chunk in container.logs(stream=True):
if chunk:
log_file.write(chunk)
log_file.flush()
start_time = time.time()
retry_count = 0
else:
if time.time() - start_time > 5:
logger.warning(f"Log stream timeout for container {container.short_id}")
return
except (APIError, IOError) as e:
retry_count += 1
if retry_count >= 5:
logger.error(f"Max retries reached for container {container.short_id}. Exiting log stream.")
return
time.sleep(0.2)
except Exception as e:
return
except Exception as e:
logger.error(f"Failed to set up logging for container {container.short_id}: {e}")
def generate_ports(self, base_port=None, count=5):
if base_port is None: