commit
0a6da210d9
6 changed files with 3176 additions and 91 deletions
32
Dockerfile
32
Dockerfile
|
@ -1,28 +1,24 @@
|
|||
FROM python:3.8-slim-buster
|
||||
|
||||
LABEL maintainer="Josh Smith" \
|
||||
LABEL maintainer="Team QLUSTOR <team@qlustor.com>" \
|
||||
description="Original by Aiden Gilmartin. Speedtest to InfluxDB data bridge"
|
||||
|
||||
# Install dependencies
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
RUN apt-get update
|
||||
RUN apt-get -q -y install --no-install-recommends apt-utils gnupg1 apt-transport-https dirmngr
|
||||
|
||||
# Install speedtest-cli
|
||||
RUN apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 379CE192D401AB61
|
||||
RUN echo "deb https://ookla.bintray.com/debian buster main" | tee /etc/apt/sources.list.d/speedtest.list
|
||||
RUN apt-get update && apt-get -q -y install speedtest
|
||||
|
||||
RUN true &&\
|
||||
\
|
||||
# Install dependencies
|
||||
apt-get update && \
|
||||
apt-get -q -y install --no-install-recommends apt-utils gnupg1 apt-transport-https dirmngr && \
|
||||
\
|
||||
# Install Python packages
|
||||
COPY requirements.txt /
|
||||
RUN pip install -r /requirements.txt
|
||||
|
||||
pip3 install pythonping influxdb && \
|
||||
\
|
||||
# Clean up
|
||||
RUN apt-get -q -y autoremove
|
||||
RUN apt-get -q -y clean
|
||||
RUN rm -rf /var/lib/apt/lists/*
|
||||
apt-get -q -y autoremove && apt-get -q -y clean && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Final setup & execution
|
||||
COPY . /app
|
||||
ADD . /app
|
||||
WORKDIR /app
|
||||
CMD ["python3", "-u", "main.py"]
|
||||
ENTRYPOINT ["/bin/sh", "/app/entrypoint.sh"]
|
||||
CMD ["main.py"]
|
||||
|
|
3002
GrafanaDash-SpeedTests.json
Normal file
3002
GrafanaDash-SpeedTests.json
Normal file
File diff suppressed because it is too large
Load diff
63
README.md
63
README.md
|
@ -4,9 +4,9 @@ This is a Python script that will continuously run the official Speedtest CLI ap
|
|||
|
||||
This script will allow you to measure your internet connections speed and consistency over time. It uses env variables as configuration. It's as easy to use as telling your Docker server a 1 line command and you'll be set. Using Grafana you can start exploring this data easily.
|
||||
|
||||
I built a grafana dashboard for this data that can be found at https://grafana.com/grafana/dashboards/13053
|
||||
I built a Grafana dashboard which has been exported into this repo as `GrafanaDash-SpeedTests.json` to import into Grafana for your convenience.
|
||||
|
||||
![Grafana Dashboard](https://grafana.com/api/dashboards/13053/images/8976/image)
|
||||
![GrafanaDashboard](https://user-images.githubusercontent.com/945191/105287048-46f52a80-5b6c-11eb-9e57-038d63b67efb.png)
|
||||
|
||||
There are some added features to allow some additional details that Ookla provides as tags on your data. Some examples are your current ISP, the interface being used, the server who hosted the test. Overtime, you could identify if some serers are performing better than others.
|
||||
|
||||
|
@ -15,19 +15,23 @@ There are some added features to allow some additional details that Ookla provid
|
|||
The InfluxDB connection settings are controlled by environment variables.
|
||||
|
||||
The variables available are:
|
||||
- INFLUX_DB_ADDRESS = 192.168.1.xxx
|
||||
- INFLUX_DB_PORT = 8086
|
||||
- INFLUX_DB_USER = user
|
||||
- INFLUX_DB_PASSWORD = pass
|
||||
- INFLUX_DB_DATABASE = speedtest
|
||||
- INFLUX_DB_TAGS = *comma seperated list of tags. See below for options*
|
||||
- SPEEDTEST_INTERVAL = 60
|
||||
- SPEEDTEST_FAIL_INTERVAL = 5
|
||||
- NAMESPACE = default - None
|
||||
- INFLUX_DB_ADDRESS = default - influxdb
|
||||
- INFLUX_DB_PORT = default - 8086
|
||||
- INFLUX_DB_USER = default - {blank}
|
||||
- INFLUX_DB_PASSWORD = default - {blank}
|
||||
- INFLUX_DB_DATABASE = default - speedtests
|
||||
- INFLUX_DB_TAGS = default - None * See below for options, '*' widcard for all *
|
||||
- SPEEDTEST_INTERVAL = default - 5 (minutes)
|
||||
- SPEEDTEST_SERVER_ID = default - {blank} * id from https://c.speedtest.net/speedtest-servers-static.php *
|
||||
- PING_INTERVAL = default - 5 (seconds)
|
||||
- PING_TARGETS = default - 1.1.1.1, 8.8.8.8 (csv of hosts to ping)
|
||||
|
||||
### Variable Notes
|
||||
- Intervals are in minutes. *Script will convert it to seconds.*
|
||||
- If any variables are not needed, don't declare them. Functions will operate with or without most variables.
|
||||
- Tags should be input without quotes. *INFLUX_DB_TAGS = isp, interface, external_ip, server_name, speedtest_url*
|
||||
- NAMESPACE is used to collect data from multiple instances of the container into one database and select which you wish to view in Grafana. i.e. I have one monitoring my Starlink, the other my TELUS connection.
|
||||
|
||||
### Tag Options
|
||||
The Ookla speedtest app provides a nice set of data beyond the upload and download speed. The list is below.
|
||||
|
@ -59,40 +63,21 @@ Be aware that this script will automatically accept the license and GDPR stateme
|
|||
|
||||
1. Build the container.
|
||||
|
||||
`docker build -t breadlysm/speedtest-to-influxdb ./`
|
||||
`docker build -t qlustor/speedtest_ookla-to-influxdb ./`
|
||||
|
||||
2. Run the container.
|
||||
```
|
||||
docker run -d --name speedtest-influx \
|
||||
-e 'INFLUX_DB_ADDRESS'='_influxdb_host_' \
|
||||
docker run -d -t --name speedflux \
|
||||
-e 'NAMESPACE'='None' \
|
||||
-e 'INFLUX_DB_ADDRESS'='influxdb' \
|
||||
-e 'INFLUX_DB_PORT'='8086' \
|
||||
-e 'INFLUX_DB_USER'='_influx_user_' \
|
||||
-e 'INFLUX_DB_PASSWORD'='_influx_pass_' \
|
||||
-e 'INFLUX_DB_DATABASE'='speedtest' \
|
||||
-e 'SPEEDTEST_INTERVAL'='1800' \
|
||||
-e 'SPEEDTEST_FAIL_INTERVAL'='60' \
|
||||
breadlysm/speedtest-to-influxdb
|
||||
-e 'INFLUX_DB_DATABASE'='speedtests' \
|
||||
-e 'SPEEDTEST_INTERVAL'='5' \
|
||||
-e 'SPEEDTEST_FAIL_INTERVAL'='5' \
|
||||
-e 'SPEEDTEST_SERVER_ID'='12746' \
|
||||
qlustor/speedtest_ookla-to-influxdb
|
||||
```
|
||||
### No Container
|
||||
|
||||
1. Clone the repo
|
||||
|
||||
`git clone https://github.com/breadlysm/speedtest-to-influxdb.git`
|
||||
|
||||
2. Configure the .env file in the repo or set the environment variables on your device.
|
||||
|
||||
3. [Install the Speedtest CLI application by Ookla.](https://www.speedtest.net/apps/cli)
|
||||
|
||||
NOTE: The `speedtest-cli` package in distro repositories is an unofficial client. It will need to be uninstalled before installing the Ookla Speedtest CLI application with the directions on their website.
|
||||
|
||||
4. Install the InfluxDB client for library from Python.
|
||||
|
||||
`pip install influxdb`
|
||||
|
||||
5. Run the script.
|
||||
|
||||
`python3 ./main.py`
|
||||
|
||||
|
||||
|
||||
This script looks to have been originally written by https://github.com/aidengilmartin/speedtest-to-influxdb/blob/master/main.py and I forked it from https://github.com/martinfrancois/speedtest-to-influxdb. They did the hard work, I've continued to modify it though to fit my needs.
|
||||
This script looks to have been originally written by https://github.com/aidengilmartin/speedtest-to-influxdb/blob/master/main.py and I forked it from https://github.com/breadlysm/speedtest-to-influxdb. They did the hard work, I've continued to modify it though to fit my needs.
|
||||
|
|
16
entrypoint.sh
Normal file
16
entrypoint.sh
Normal file
|
@ -0,0 +1,16 @@
|
|||
#!/bin/sh
|
||||
|
||||
printenv >> /etc/environment
|
||||
ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
|
||||
|
||||
# Install speedtest-cli
|
||||
if [ ! -e /usr/bin/speedtest ]
|
||||
then
|
||||
apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 379CE192D401AB61
|
||||
echo "deb https://ookla.bintray.com/debian buster main" | tee /etc/apt/sources.list.d/speedtest.list
|
||||
apt-get update && apt-get -q -y install speedtest
|
||||
apt-get -q -y autoremove && apt-get -q -y clean
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
fi
|
||||
|
||||
exec /usr/local/bin/python3 $@
|
141
main.py
141
main.py
|
@ -1,22 +1,31 @@
|
|||
import os
|
||||
import time
|
||||
import json
|
||||
import datetime
|
||||
import subprocess
|
||||
import os
|
||||
from pythonping import ping
|
||||
from influxdb import InfluxDBClient
|
||||
from multiprocessing import Process
|
||||
|
||||
# InfluxDB Settings
|
||||
DB_ADDRESS = os.environ.get('INFLUX_DB_ADDRESS')
|
||||
DB_PORT = int(os.environ.get('INFLUX_DB_PORT'))
|
||||
DB_USER = os.environ.get('INFLUX_DB_USER')
|
||||
DB_PASSWORD = os.environ.get('INFLUX_DB_PASSWORD')
|
||||
DB_DATABASE = os.environ.get('INFLUX_DB_DATABASE')
|
||||
DB_TAGS = os.environ.get('INFLUX_DB_TAGS')
|
||||
NAMESPACE = os.getenv('NAMESPACE', 'None')
|
||||
DB_ADDRESS = os.getenv('INFLUX_DB_ADDRESS', 'influxdb')
|
||||
DB_PORT = int(os.getenv('INFLUX_DB_PORT', '8086'))
|
||||
DB_USER = os.getenv('INFLUX_DB_USER', '')
|
||||
DB_PASSWORD = os.getenv('INFLUX_DB_PASSWORD', '')
|
||||
DB_DATABASE = os.getenv('INFLUX_DB_DATABASE', 'speedtests')
|
||||
DB_TAGS = os.getenv('INFLUX_DB_TAGS', None)
|
||||
PING_TARGETS = os.getenv('PING_TARGETS', '1.1.1.1, 8.8.8.8')
|
||||
|
||||
# Speedtest Settings
|
||||
# Time between tests (in minutes, converts to seconds).
|
||||
TEST_INTERVAL = int(os.environ.get('SPEEDTEST_INTERVAL')) * 60
|
||||
TEST_INTERVAL = int(os.getenv('SPEEDTEST_INTERVAL', '5')) * 60
|
||||
# Time before retrying a failed Speedtest (in minutes, converts to seconds).
|
||||
TEST_FAIL_INTERVAL = int(os.environ.get('SPEEDTEST_FAIL_INTERVAL')) * 60
|
||||
TEST_FAIL_INTERVAL = int(os.getenv('SPEEDTEST_FAIL_INTERVAL', '5')) * 60
|
||||
# Specific server ID
|
||||
SERVER_ID = os.getenv('SPEEDTEST_SERVER_ID', '')
|
||||
# Time between ping tests (in seconds).
|
||||
PING_INTERVAL = int(os.getenv('PING_INTERVAL', '5'))
|
||||
|
||||
influxdb_client = InfluxDBClient(
|
||||
DB_ADDRESS, DB_PORT, DB_USER, DB_PASSWORD, None)
|
||||
|
@ -42,10 +51,11 @@ def pkt_loss(data):
|
|||
|
||||
def tag_selection(data):
|
||||
tags = DB_TAGS
|
||||
if tags is None:
|
||||
return None
|
||||
options = {}
|
||||
|
||||
# tag_switch takes in _data and attaches CLIoutput to more readable ids
|
||||
tag_switch = {
|
||||
'namespace': NAMESPACE,
|
||||
'isp': data['isp'],
|
||||
'interface': data['interface']['name'],
|
||||
'internal_ip': data['interface']['internalIp'],
|
||||
|
@ -63,17 +73,24 @@ def tag_selection(data):
|
|||
'speedtest_url': data['result']['url']
|
||||
}
|
||||
|
||||
options = {}
|
||||
if tags is None:
|
||||
tags = 'namespace'
|
||||
elif '*' in tags:
|
||||
return tag_switch
|
||||
else:
|
||||
tags = 'namespace, ' + tags
|
||||
|
||||
tags = tags.split(',')
|
||||
for tag in tags:
|
||||
# split the tag string, strip and add selected tags to {options} with corresponding tag_switch data
|
||||
tag = tag.strip()
|
||||
options[tag] = tag_switch[tag]
|
||||
|
||||
return options
|
||||
|
||||
|
||||
def format_for_influx(cliout):
|
||||
data = json.loads(cliout)
|
||||
def format_for_influx(data):
|
||||
|
||||
# There is additional data in the speedtest-cli output but it is likely not necessary to store.
|
||||
influx_data = [
|
||||
{
|
||||
|
@ -110,37 +127,107 @@ def format_for_influx(cliout):
|
|||
'fields': {
|
||||
'packetLoss': pkt_loss(data)
|
||||
}
|
||||
},
|
||||
{
|
||||
'measurement': 'speeds',
|
||||
'time': data['timestamp'],
|
||||
'fields': {
|
||||
'jitter': data['ping']['jitter'],
|
||||
'latency': data['ping']['latency'],
|
||||
'packetLoss': pkt_loss(data),
|
||||
# Byte to Megabit
|
||||
'bandwidth_down': data['download']['bandwidth'] / 125000,
|
||||
'bytes_down': data['download']['bytes'],
|
||||
'elapsed_down': data['download']['elapsed'],
|
||||
# Byte to Megabit
|
||||
'bandwidth_up': data['upload']['bandwidth'] / 125000,
|
||||
'bytes_up': data['upload']['bytes'],
|
||||
'elapsed_up': data['upload']['elapsed']
|
||||
}
|
||||
}
|
||||
]
|
||||
tags = tag_selection(data)
|
||||
if tags is None:
|
||||
return influx_data
|
||||
else:
|
||||
if tags is not None:
|
||||
for measurement in influx_data:
|
||||
measurement['tags'] = tags
|
||||
|
||||
return influx_data
|
||||
|
||||
|
||||
def main():
|
||||
init_db() # Setup the database if it does not already exist.
|
||||
|
||||
while (1): # Run a Speedtest and send the results to influxDB indefinitely.
|
||||
def speedtest():
|
||||
if not SERVER_ID:
|
||||
speedtest = subprocess.run(
|
||||
["speedtest", "--accept-license", "--accept-gdpr", "-f", "json"], capture_output=True)
|
||||
print("Automatic server choice")
|
||||
else:
|
||||
speedtest = subprocess.run(
|
||||
["speedtest", "--accept-license", "--accept-gdpr", "-f", "json", "--server-id=" + SERVER_ID], capture_output=True)
|
||||
print("Manual server choice : ID = " + SERVER_ID)
|
||||
|
||||
if speedtest.returncode == 0: # Speedtest was successful.
|
||||
data = format_for_influx(speedtest.stdout)
|
||||
print("Speedtest Successful:")
|
||||
print("Speedtest Successful :")
|
||||
data_json = json.loads(speedtest.stdout)
|
||||
print("time: " + str(data_json['timestamp']) + " - ping: " + str(data_json['ping']['latency']) + " ms - download: " + str(data_json['download']['bandwidth']/125000) + " Mb/s - upload: " + str(data_json['upload']['bandwidth'] / 125000) + " Mb/s - isp: " + data_json['isp'] + " - ext. IP: " + data_json['interface']['externalIp'] + " - server id: " + str(data_json['server']['id']) + " (" + data_json['server']['name'] + " @ " + data_json['server']['location'] + ")")
|
||||
data = format_for_influx(data_json)
|
||||
if influxdb_client.write_points(data) == True:
|
||||
print("Data written to DB successfully")
|
||||
time.sleep(TEST_INTERVAL)
|
||||
else: # Speedtest failed.
|
||||
print("Speedtest Failed:")
|
||||
print("Speedtest Failed :")
|
||||
print(speedtest.stderr)
|
||||
print(speedtest.stdout)
|
||||
time.sleep(TEST_FAIL_INTERVAL)
|
||||
# time.sleep(TEST_FAIL_INTERVAL)
|
||||
|
||||
|
||||
def pingtest():
|
||||
timestamp = datetime.datetime.utcnow()
|
||||
for target in PING_TARGETS.split(','):
|
||||
target = target.strip()
|
||||
pingtest = ping(target, verbose=False, timeout=1, count=1, size=128)
|
||||
data = [
|
||||
{
|
||||
'measurement': 'pings',
|
||||
'time': timestamp,
|
||||
'tags': {
|
||||
'namespace': NAMESPACE,
|
||||
'target' : target
|
||||
},
|
||||
'fields': {
|
||||
'success' : int(pingtest._responses[0].error_message is None),
|
||||
'rtt': float(0 if pingtest._responses[0].error_message is not None else pingtest.rtt_avg_ms)
|
||||
}
|
||||
}
|
||||
]
|
||||
if influxdb_client.write_points(data) == True:
|
||||
print("Ping data written to DB successfully")
|
||||
else: # Speedtest failed.
|
||||
print("Ping Failed.")
|
||||
|
||||
def main():
|
||||
pPing = Process(target=pingtest)
|
||||
pSpeed = Process(target=speedtest)
|
||||
|
||||
init_db() # Setup the database if it does not already exist.
|
||||
|
||||
loopcount = 0
|
||||
while (1): # Run a Speedtest and send the results to influxDB indefinitely.
|
||||
if loopcount == 0 or loopcount % PING_INTERVAL == 0:
|
||||
if pPing.is_alive():
|
||||
pPing.terminate()
|
||||
pPing = Process(target=pingtest)
|
||||
pPing.start()
|
||||
|
||||
if loopcount == 0 or loopcount % TEST_INTERVAL == 0:
|
||||
if pSpeed.is_alive():
|
||||
pSpeed.terminate()
|
||||
pSpeed = Process(target=speedtest)
|
||||
pSpeed.start()
|
||||
|
||||
if loopcount % ( PING_INTERVAL * TEST_INTERVAL ) == 0:
|
||||
loopcount = 0
|
||||
|
||||
time.sleep(1)
|
||||
loopcount += 1
|
||||
|
||||
if __name__ == '__main__':
|
||||
print('Speedtest CLI Data Logger to InfluxDB')
|
||||
print('Speedtest CLI data logger to InfluxDB started...')
|
||||
main()
|
||||
|
|
|
@ -1 +0,0 @@
|
|||
influxdb
|
Loading…
Reference in a new issue