Compare commits
24 Commits
a871c2a681
...
main
Author | SHA1 | Date | |
---|---|---|---|
a1bad4139e | |||
7150957574 | |||
8bf481e6fc | |||
510e790155 | |||
7cf2f66783 | |||
84afbe61b8 | |||
c5aff5fd3b | |||
ae6b8f443c | |||
3051db8a7a | |||
57c316974b | |||
c1b379e922 | |||
85d6715854 | |||
b3d46abb91 | |||
79d3270a8d | |||
d24657a1b8 | |||
70cd381a91 | |||
c62774de1f | |||
8034aec25a | |||
050588cb73 | |||
10d301d784 | |||
29ef5cba55 | |||
86e4536404 | |||
45e6d2fb3e | |||
3ab429628a |
198
.gitignore
vendored
198
.gitignore
vendored
@ -1,166 +1,34 @@
|
||||
# ---> Python
|
||||
# Byte-compiled / optimized / DLL files
|
||||
__pycache__/
|
||||
*.py[cod]
|
||||
*$py.class
|
||||
*.diff
|
||||
.pylint.*
|
||||
|
||||
# C extensions
|
||||
*.so
|
||||
|
||||
# Distribution / packaging
|
||||
.Python
|
||||
build/
|
||||
develop-eggs/
|
||||
dist/
|
||||
downloads/
|
||||
eggs/
|
||||
.eggs/
|
||||
lib/
|
||||
lib64/
|
||||
parts/
|
||||
sdist/
|
||||
var/
|
||||
wheels/
|
||||
share/python-wheels/
|
||||
*.egg-info/
|
||||
.installed.cfg
|
||||
*.egg
|
||||
MANIFEST
|
||||
|
||||
# PyInstaller
|
||||
# Usually these files are written by a python script from a template
|
||||
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
||||
*.manifest
|
||||
*.spec
|
||||
|
||||
# Installer logs
|
||||
pip-log.txt
|
||||
pip-delete-this-directory.txt
|
||||
|
||||
# Unit test / coverage reports
|
||||
htmlcov/
|
||||
.tox/
|
||||
.nox/
|
||||
.coverage
|
||||
.coverage.*
|
||||
.cache
|
||||
nosetests.xml
|
||||
coverage.xml
|
||||
*.cover
|
||||
*.py,cover
|
||||
.hypothesis/
|
||||
.pytest_cache/
|
||||
cover/
|
||||
|
||||
# Translations
|
||||
*.mo
|
||||
*.pot
|
||||
|
||||
# Django stuff:
|
||||
*.log
|
||||
local_settings.py
|
||||
db.sqlite3
|
||||
db.sqlite3-journal
|
||||
|
||||
# Flask stuff:
|
||||
instance/
|
||||
.webassets-cache
|
||||
|
||||
# Scrapy stuff:
|
||||
.scrapy
|
||||
|
||||
# Sphinx documentation
|
||||
docs/_build/
|
||||
|
||||
# PyBuilder
|
||||
.pybuilder/
|
||||
target/
|
||||
|
||||
# Jupyter Notebook
|
||||
.ipynb_checkpoints
|
||||
|
||||
# IPython
|
||||
profile_default/
|
||||
ipython_config.py
|
||||
|
||||
# pyenv
|
||||
# For a library or package, you might want to ignore these files since the code is
|
||||
# intended to run in multiple environments; otherwise, check them in:
|
||||
# .python-version
|
||||
|
||||
# pipenv
|
||||
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
||||
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
||||
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
||||
# install all needed dependencies.
|
||||
#Pipfile.lock
|
||||
|
||||
# poetry
|
||||
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
|
||||
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
||||
# commonly ignored for libraries.
|
||||
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
|
||||
#poetry.lock
|
||||
|
||||
# pdm
|
||||
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
|
||||
#pdm.lock
|
||||
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
|
||||
# in version control.
|
||||
# https://pdm.fming.dev/#use-with-ide
|
||||
.pdm.toml
|
||||
|
||||
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
|
||||
__pypackages__/
|
||||
|
||||
# Celery stuff
|
||||
celerybeat-schedule
|
||||
celerybeat.pid
|
||||
|
||||
# SageMath parsed files
|
||||
*.sage.py
|
||||
|
||||
# Environments
|
||||
.env
|
||||
.venv
|
||||
env/
|
||||
venv/
|
||||
ENV/
|
||||
env.bak/
|
||||
venv.bak/
|
||||
|
||||
# Spyder project settings
|
||||
.spyderproject
|
||||
.spyproject
|
||||
|
||||
# Rope project settings
|
||||
.ropeproject
|
||||
|
||||
# mkdocs documentation
|
||||
/site
|
||||
|
||||
# mypy
|
||||
.mypy_cache/
|
||||
.dmypy.json
|
||||
dmypy.json
|
||||
|
||||
# Pyre type checker
|
||||
.pyre/
|
||||
|
||||
# pytype static type analyzer
|
||||
.pytype/
|
||||
|
||||
# Cython debug symbols
|
||||
cython_debug/
|
||||
|
||||
# PyCharm
|
||||
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
|
||||
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
|
||||
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
||||
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
||||
#.idea/
|
||||
|
||||
.pylint.err
|
||||
.pylint.log
|
||||
.pylint.out
|
||||
*.pyc
|
||||
*.pyo
|
||||
|
||||
libs/
|
||||
*.egg-info
|
||||
*.log
|
||||
*.out
|
||||
*.bak
|
||||
|
||||
.idea
|
||||
*~
|
||||
#*
|
||||
*.iml
|
||||
*.junk
|
||||
|
||||
*.so
|
||||
*.log
|
||||
toxygen/build
|
||||
toxygen/dist
|
||||
*.spec
|
||||
dist
|
||||
toxygen/avatars
|
||||
toxygen/__pycache__
|
||||
/*.egg-info
|
||||
/*.egg
|
||||
html
|
||||
Toxygen.egg-info
|
||||
*.tox
|
||||
.cache
|
||||
*.db
|
||||
*~
|
||||
Makefile
|
||||
|
7
.rsync.sh
Normal file
7
.rsync.sh
Normal file
@ -0,0 +1,7 @@
|
||||
#!/bin/sh
|
||||
|
||||
find * -name \*.py | xargs grep -l '[ ]*$' | xargs sed -i -e 's/[ ]*$//'
|
||||
rsync "$@" -vax --include \*.py --exclude \*.log --exclude \*.out \
|
||||
--exclude \*.egg-info --exclude libs --exclude dist --exclude build \
|
||||
--exclude \*.pyc --exclude .pyl\* --exclude \*~ --exclude \*.so \
|
||||
./ ../tox_profile.git/|grep -v /$
|
93
README.md
93
README.md
@ -3,10 +3,13 @@
|
||||
Read and manipulate tox profile files. It started as a simple script from
|
||||
<https://stackoverflow.com/questions/30901873/what-format-are-tox-files-stored-in>
|
||||
|
||||
```tox_savefile.py``` reads a Tox profile and prints to stderr various
|
||||
```tox_profile.py``` reads a Tox profile and prints to stderr various
|
||||
things that it finds. Then can write what it found in JSON/YAML/REPR/PPRINT
|
||||
to a file. It can also test the nodes in a profile using ```nmap```.
|
||||
|
||||
( There are sometimes problems with the json info dump of bytes keys:
|
||||
```TypeError: Object of type bytes is not JSON serializable```)
|
||||
|
||||
It can also download, select, or test nodes in a ```DHTnode.json``` file.
|
||||
|
||||
It can also decrypt a profile, saving the output to a file.
|
||||
@ -30,11 +33,11 @@ to stdout
|
||||
to a file.
|
||||
|
||||
```
|
||||
usage: tox_savefile.py [-h]
|
||||
[--command info|decrypt|nodes|edit]
|
||||
[--info info|repr|yaml|json|pprint|nmap_udp|nmap_tcp]
|
||||
usage: tox_profile.py [-h]
|
||||
[--command info|decrypt|nodes|edit|onions]
|
||||
[--info info|repr|yaml|json|pprint|nmap_dht|nmap_relay]
|
||||
[--indent INDENT]
|
||||
[--nodes select_tcp|select_udp|select_version|nmap_tcp|nmap_udp,download]
|
||||
[--nodes select_tcp|select_udp|select_version|nmap_tcp|nmap_udp|download|check|clean]
|
||||
[--download_nodes_url DOWNLOAD_NODES_URL]
|
||||
[--edit help|section,num,key,val]
|
||||
[--output OUTPUT]
|
||||
@ -50,7 +53,7 @@ Optional arguments:
|
||||
--command {info,decrypt,nodes,edit}
|
||||
Action command - default: info
|
||||
--output OUTPUT Destination for info/decrypt/nodes - can be the same as input
|
||||
--info info|repr|yaml|json|pprint|nmap_udp|nmap_tcp (may require nmap)
|
||||
--info info|repr|yaml|json|pprint|nmap_dht|nmap_relay (may require nmap)
|
||||
Format for info command
|
||||
--indent INDENT Indent for yaml/json/pprint
|
||||
--nodes select_tcp|select_udp|select_version|nmap_tcp|nmap_udp|download
|
||||
@ -66,10 +69,23 @@ Optional arguments:
|
||||
Choose one of ```{info,repr,yaml,json,pprint,save}```
|
||||
for the format for info command.
|
||||
|
||||
Choose one of ```{nmap_udp,nmap_tcp}```
|
||||
Choose one of ```{nmap_dht,nmap_relay,nmap_path}```
|
||||
to run tests using ```nmap``` for the ```DHT``` and ```TCP_RELAY```
|
||||
sections of the profile. Reguires ```nmap``` and uses ```sudo```.
|
||||
|
||||
```
|
||||
--info default='info',
|
||||
choices=[info, save, repr, yaml,json, pprint]
|
||||
with --info=info prints info about the profile to stderr
|
||||
yaml,json, pprint, repr - output format
|
||||
nmap_dht - test DHT nodes with nmap
|
||||
nmap_relay - test TCP_RELAY nodes with nmap
|
||||
nmap_path - test PATH_NODE nodes with nmap
|
||||
--indent for pprint/yaml/json default=2
|
||||
|
||||
|
||||
```
|
||||
|
||||
#### Saving a copy
|
||||
|
||||
The code now can generate a saved copy of the profile as it parses the profile.
|
||||
@ -83,6 +99,7 @@ decryption).
|
||||
|
||||
### --command nodes
|
||||
|
||||
|
||||
Takes a DHTnodes.json file as an argument.
|
||||
Choose one of ```{select_tcp,select_udp,select_version}```
|
||||
for ```--nodes``` to select TCP nodes, UDP nodes,
|
||||
@ -94,6 +111,29 @@ Reguires ```nmap``` and uses ```sudo```.
|
||||
|
||||
Choose ```download``` to download the nodes from ```--download_nodes_url```
|
||||
|
||||
Choose ```check``` to check the downloaded nodes, and the error return
|
||||
is the number of nodes with errors.
|
||||
|
||||
Choose ```clean``` to clean the downloaded nodes, and give
|
||||
```--output``` for the file the nodes ckeaned of errors.
|
||||
|
||||
Check and clean will also try to ping the nodes on the relevant ports,
|
||||
and clean will update the ```status_tcp``, ```status_udp```, and
|
||||
```last_ping``` fields of the nodes.
|
||||
|
||||
--nodes
|
||||
choices=[select_tcp, select_udp, nmap_tcp, select_version, nmap_udp, check, download]
|
||||
select_udp - select udp nodes
|
||||
select_tcp - select tcp nodes
|
||||
nmap_udp - test UDP nodes with nmap
|
||||
nmap_tcp - test TCP nodes with nmap
|
||||
select_version - select nodes that are the latest version
|
||||
download - download nodes from --download_nodes_url
|
||||
check - check nodes from --download_nodes_url
|
||||
clean - check nodes and save them as --output
|
||||
--download_nodes_url https://nodes.tox.chat/json
|
||||
```
|
||||
|
||||
### --command decrypt
|
||||
|
||||
Decrypt a profile, with ```--output``` to a filename.
|
||||
@ -124,6 +164,12 @@ The ```num``` field is to accomodate sections that have lists:
|
||||
The ```--output``` can be the same as input as the input file is read
|
||||
and closed before processing starts.
|
||||
|
||||
```
|
||||
--edit
|
||||
help - print a summary of what fields can be edited
|
||||
section,num,key,val - edit the field section,num,key with val
|
||||
```
|
||||
|
||||
You can use the ```---edit``` command to synchronize profiles, by
|
||||
keeping the keypair and synchronize profiles between different clients:
|
||||
e.g. your could keep your profile from toxic as master, and copy it over
|
||||
@ -132,7 +178,8 @@ your qtox/toxygen/TriFa profile while preserving their keypair and NOSPAM:
|
||||
1. Use ```--command info --info info``` on the target profile to get the
|
||||
```Nospam```, ```Public_key``` and ```Private_key``` of the target.
|
||||
2. Backup the target and copy the source profile to the target.
|
||||
3. Edit the target with the values from 1) with:```
|
||||
3. Edit the target with the values from 1) with:
|
||||
```
|
||||
--command edit --edit NOSPAMKEYS,.,Nospam,hexstr --output target target
|
||||
--command edit --edit NOSPAMKEYS,.,Public_key,hexstr --output target target
|
||||
--command edit --edit NOSPAMKEYS,.,Private_key,hexstr --output target target
|
||||
@ -163,6 +210,10 @@ required. It's available in most distros, or <https://stedolan.github.io/jq/>
|
||||
For the ```nmap``` commands, the ```nmap``` utility is
|
||||
required. It's available in most distros, or <https://nmap.org/>
|
||||
|
||||
## Issues
|
||||
|
||||
https://git.plastiras.org/emdee/tox_profile/issues
|
||||
|
||||
## Future Directions
|
||||
|
||||
This has not been tested on Windwoes, but is should be simple to fix.
|
||||
@ -171,7 +222,33 @@ Because it's written in Python it is easy to extend to, for example,
|
||||
supporting multidevices:
|
||||
<https://git.plastiras.org/emdee/tox_profile/wiki/MultiDevice-Announcements-POC>
|
||||
|
||||
There are a couple of bash scripts to show usage:
|
||||
* tox_profile_examples.bash - simple example usage
|
||||
* tox_profile_test.bash - a real test runner that still needs documenting.
|
||||
|
||||
## Specification
|
||||
|
||||
There is a copy of the Tox [spec](https://toktok.ltd/spec.html)
|
||||
in the repo - it is missing any description of the groups section.
|
||||
|
||||
## Updates
|
||||
|
||||
Although Tox works over Tor, we do not recommend its usage for
|
||||
anonymity as it leaks DNS requests due to a 6-year old known security
|
||||
issue: https://github.com/TokTok/c-toxcore/issues/469 unless your Tox
|
||||
client does hostname lookups before calling Tox (like
|
||||
[toxygen](https://git.plastiras.org/emdee/toxygen) does).
|
||||
Otherwise, do not use it for anonymous communication unless you have a
|
||||
TCP and UDP firewall in place.
|
||||
|
||||
The Tox project does not follow semantic versioning so the project may
|
||||
break the underlying ctypes wrapper at any time; it's not possible to
|
||||
use Tox version numbers to tell what the API will be. The last git version
|
||||
this code was tested with is ``1623e3ee5c3a5837a92f959f289fcef18bfa9c959```
|
||||
of Feb 12 10:06:37 2024. In which case you'll have to go into the tox.py
|
||||
file in https://git.plastiras.org/emdee/toxygen_wrapper to fix it yourself.
|
||||
|
||||
The uptodate version of this code is on https://git.plastiras.org/emdee/tox_profile
|
||||
|
||||
Work on this project is suspended until the
|
||||
[MultiDevice](https://git.plastiras.org/emdee/tox_profile/wiki/MultiDevice-Announcements-POC) problem is solved. Fork me!
|
||||
|
0
__init__.py
Normal file
0
__init__.py
Normal file
48
pyproject.toml
Normal file
48
pyproject.toml
Normal file
@ -0,0 +1,48 @@
|
||||
[project]
|
||||
name = 'tox_profile'
|
||||
requires-python = ">= 3.7"
|
||||
description = "Read and manipulate tox profile files"
|
||||
keywords = ["tox", "tox_profile"]
|
||||
classifiers = [
|
||||
# How mature is this project? Common values are
|
||||
# 3 - Alpha
|
||||
# 4 - Beta
|
||||
# 5 - Production/Stable
|
||||
"Development Status :: 4 - Beta",
|
||||
|
||||
# Indicate who your project is intended for
|
||||
"Intended Audience :: Developers",
|
||||
|
||||
# Specify the Python versions you support here.
|
||||
"Programming Language :: Python :: 3",
|
||||
"License :: OSI Approved",
|
||||
"Operating System :: POSIX :: BSD :: FreeBSD",
|
||||
"Operating System :: POSIX :: Linux",
|
||||
"Programming Language :: Python :: 3 :: Only",
|
||||
"Programming Language :: Python :: 3.7",
|
||||
"Programming Language :: Python :: 3.8",
|
||||
"Programming Language :: Python :: 3.9",
|
||||
"Programming Language :: Python :: 3.10",
|
||||
"Programming Language :: Python :: 3.11",
|
||||
"Programming Language :: Python :: Implementation :: CPython",
|
||||
]
|
||||
dynamic = ["version", "readme", "dependencies"] # cannot be dynamic ['license']
|
||||
|
||||
[project.scripts]
|
||||
tox_profile = "tox_profile:iMain"
|
||||
|
||||
[project.urls]
|
||||
repository = "https://git.plastiras.org/emdee/tox_profile"
|
||||
homepage = "https://git.plastiras.org/emdee/tox_profile"
|
||||
|
||||
[build-system]
|
||||
requires = ["setuptools >= 61.0"]
|
||||
build-backend = "setuptools.build_meta"
|
||||
|
||||
[tool.setuptools.dynamic]
|
||||
version = {attr = "tox_profile.__version__"}
|
||||
readme = {file = ["README.md"]}
|
||||
dependencies = {file = ["requirements.txt"]}
|
||||
|
||||
#[tool.setuptools.packages.find]
|
||||
#where = "src"
|
9
requirements.txt
Normal file
9
requirements.txt
Normal file
@ -0,0 +1,9 @@
|
||||
# the versions are the current ones tested - may work with earlier versions
|
||||
ruamel.yaml >= 0.18.5
|
||||
xmsgpack >= 1.0.7
|
||||
coloredlogs >= 15.0.1
|
||||
# optional
|
||||
# nmap
|
||||
# this is not on pypi yet - get it from
|
||||
# https://git.plastiras.org/emdee/toxygen_wrapper
|
||||
# toxygen_wrapper >= 1.0.0
|
55
setup.cfg
Normal file
55
setup.cfg
Normal file
@ -0,0 +1,55 @@
|
||||
[metadata]
|
||||
classifiers =
|
||||
License :: OSI Approved
|
||||
Intended Audience :: Web Developers
|
||||
Operating System :: POSIX :: BSD :: FreeBSD
|
||||
Operating System :: POSIX :: Linux
|
||||
Programming Language :: Python :: 3 :: Only
|
||||
Programming Language :: Python :: 3.7
|
||||
Programming Language :: Python :: 3.8
|
||||
Programming Language :: Python :: 3.9
|
||||
Programming Language :: Python :: 3.10
|
||||
Programming Language :: Python :: 3.11
|
||||
Programming Language :: Python :: Implementation :: CPython
|
||||
description='Read and manipulate tox profile files'
|
||||
long_description='Read and manipulate tox profile files'
|
||||
url='https://git.plastiras.org/emdee/tox_profile/'
|
||||
keywords='ctypes Tox'
|
||||
|
||||
[options]
|
||||
zip_safe = false
|
||||
python_requires = >=3.7
|
||||
include_package_data =
|
||||
"*" = ["*.txt", "*.bash"]
|
||||
|
||||
[options.entry_points]
|
||||
console_scripts =
|
||||
tox_profile = tox_profile.tox_profile:iMain
|
||||
|
||||
[easy_install]
|
||||
zip_ok = false
|
||||
|
||||
[flake8]
|
||||
jobs = 1
|
||||
max-line-length = 88
|
||||
ignore =
|
||||
E111
|
||||
E114
|
||||
E128
|
||||
E225
|
||||
E261
|
||||
E302
|
||||
E305
|
||||
E402
|
||||
E501
|
||||
E502
|
||||
E541
|
||||
E701
|
||||
E702
|
||||
E704
|
||||
E722
|
||||
E741
|
||||
F508
|
||||
F541
|
||||
W503
|
||||
W601
|
0
src/__init__.py
Normal file
0
src/__init__.py
Normal file
2
src/tox_profile/__init__.py
Normal file
2
src/tox_profile/__init__.py
Normal file
@ -0,0 +1,2 @@
|
||||
|
||||
__version__ = "1.0.0"
|
5
src/tox_profile/__main__.py
Normal file
5
src/tox_profile/__main__.py
Normal file
@ -0,0 +1,5 @@
|
||||
import sys
|
||||
from tox_profile.tox_profile import iMain
|
||||
|
||||
if __name__ == '__main__':
|
||||
sys.exit(iMain(sys.argv[1:]))
|
1477
src/tox_profile/tox_profile.py
Normal file
1477
src/tox_profile/tox_profile.py
Normal file
File diff suppressed because it is too large
Load Diff
24
tox_profile_examples.bash
Normal file
24
tox_profile_examples.bash
Normal file
@ -0,0 +1,24 @@
|
||||
#!/bin/sh -e
|
||||
# -*- mode: sh; fill-column: 75; tab-width: 8; coding: utf-8-unix -*-
|
||||
|
||||
# some examples of tox-profile usage
|
||||
|
||||
export PYTHONPATH=/mnt/o/var/local/src/toxygen_wrapper.git
|
||||
TOX_HOME=$HOME/.config/tox
|
||||
NMAP_CMD='sudo -u debian-tor nmap'
|
||||
|
||||
echo INFO: check the download json file
|
||||
python3 tox_profile.py --command nodes --nodes check \
|
||||
$TOX_HOME/DHTnodes.json.new \
|
||||
2>&1 | tee /tmp/DHTnodes.json.log
|
||||
|
||||
echo INFO: get the tcp nodes/ports from the downloaded json file
|
||||
python3 tox_profile.py --command nodes --nodes select_tcp \
|
||||
--output /tmp/DHTnodes.json.tcp \
|
||||
$TOX_HOME/DHTnodes.json.new
|
||||
|
||||
echo INFO: run ping/nmap on the tcp nodes/ports from the downloaded json file
|
||||
python3 tox_profile.py --command nodes --nodes nmap_tcp \
|
||||
--nmap_cmd $NMAP_CMD \
|
||||
--output /tmp/DHTnodes.json.tcp.out \
|
||||
/tmp/DHTnodes.json.tcp
|
@ -1,26 +1,55 @@
|
||||
#!/bin/sh
|
||||
# -*- mode: sh; fill-column: 75; tab-width: 8; coding: utf-8-unix -*-
|
||||
|
||||
# tox_savefile.py has a lot of features so it needs test coverage
|
||||
# tox_profile.py has a lot of features so it needs test coverage
|
||||
|
||||
PREFIX=/o/var/local/src
|
||||
EXE=python3.sh
|
||||
WRAPPER=$PREFIX/toxygen_wrapper
|
||||
PREFIX=/mnt/o/var/local
|
||||
ROLE=text
|
||||
DEBUG=1
|
||||
EXE=/var/local/bin/python3.bash
|
||||
WRAPPER=$PREFIX/src/toxygen_wrapper.git
|
||||
tox=$HOME/.config/tox/toxic_profile.tox
|
||||
[ -s $tox ] || exit 2
|
||||
target=$PREFIX/src/tox_profile/tox_profile.py
|
||||
|
||||
OUT=/tmp/toxic_profile
|
||||
|
||||
ps ax | grep -q tor && netstat -n4le | grep -q :9050
|
||||
[ $? -eq 0 ] && HAVE_TOR=1 || HAVE_TOR=0
|
||||
|
||||
[ -f /usr/local/bin/usr_local_tput.bash ] && \
|
||||
. /usr/local/bin/usr_local_tput.bash || {
|
||||
DEBUG() { echo DEBUG $* ; }
|
||||
DBUG() { echo DEBUG $* ; }
|
||||
INFO() { echo INFO $* ; }
|
||||
WARN() { echo WARN $* ; }
|
||||
ERROR() { echo ERROR $* ; }
|
||||
}
|
||||
|
||||
# set -- -e
|
||||
target=$PREFIX/tox_profile/tox_savefile.py
|
||||
[ -s $target ] || exit 1
|
||||
if [ -z "$TOXCORE_LIBS" ] && [ ! -d libs ] ; then
|
||||
mkdir libs
|
||||
cd libs
|
||||
# /lib/x86_64-linux-gnu/libtoxcore.so.2
|
||||
for pro in qtox toxic ; do
|
||||
if which $pro 2> /dev/null ; then
|
||||
DBUG linking to $pro libtoxcore
|
||||
lib=$( ldd `which $pro` | grep libtoxcore|sed -e 's/.* => //' -e 's/ .*//')
|
||||
[ -n "$lib" -a -f "$lib" ] || { WARN $Lib ; continue ; }
|
||||
INFO linking to $lib
|
||||
for elt in libtoxcore.so libtoxav.so libtoxencryptsave.so ; do
|
||||
ln -s "$lib" "$elt"
|
||||
done
|
||||
export TOXCORE_LIBS=$PWD
|
||||
break
|
||||
fi
|
||||
done
|
||||
cd ..
|
||||
elif [ -z "$TOXCORE_LIBS" ] && [ -d libs ] ; then
|
||||
export TOXCORE_LIBS=$PWD/libs
|
||||
fi
|
||||
|
||||
tox=$HOME/.config/tox/toxic_profile.tox
|
||||
[ -s $tox ] || exit 2
|
||||
|
||||
# set -- -e
|
||||
[ -s $target ] || exit 1
|
||||
|
||||
[ -d $WRAPPER ] || {
|
||||
ERROR wrapper is required https://git.plastiras.org/emdee/toxygen_wrapper
|
||||
@ -34,7 +63,7 @@ json=$HOME/.config/tox/DHTnodes.json
|
||||
which jq > /dev/null && HAVE_JQ=1 || HAVE_JQ=0
|
||||
which nmap > /dev/null && HAVE_NMAP=1 || HAVE_NMAP=0
|
||||
|
||||
sudo rm -f /tmp/toxic_profile.* /tmp/toxic_nodes.*
|
||||
sudo rm -f $OUT.* /tmp/toxic_nodes.*
|
||||
|
||||
test_jq () {
|
||||
[ $# -eq 3 ] || {
|
||||
@ -66,42 +95,66 @@ test_jq () {
|
||||
|
||||
i=0
|
||||
[ "$HAVE_JQ" = 0 ] || \
|
||||
test_jq $json /tmp/toxic_nodes.json /tmp/toxic_nodes.err || exit ${i}$?
|
||||
test_jq $json /tmp/toxic_nodes.json /tmp/toxic_nodes.err || {
|
||||
ERROR test_jq failed on $json
|
||||
exit ${i}$?
|
||||
}
|
||||
[ -f /tmp/toxic_nodes.json ] || cp -p $json /tmp/toxic_nodes.json
|
||||
json=/tmp/toxic_nodes.json
|
||||
|
||||
i=1
|
||||
# required password
|
||||
INFO $i decrypt /tmp/toxic_profile.bin
|
||||
$EXE $target --command decrypt --output /tmp/toxic_profile.bin $tox || exit ${i}1
|
||||
[ -s /tmp/toxic_profile.bin ] || exit ${i}2
|
||||
INFO $i decrypt $OUT.bin
|
||||
$EXE $target --command decrypt --output $OUT.bin $tox || exit ${i}1
|
||||
[ -s $OUT.bin ] || exit ${i}2
|
||||
|
||||
tox=/tmp/toxic_profile.bin
|
||||
tox=$OUT.bin
|
||||
INFO $i info $tox
|
||||
$EXE $target --command info --info info $tox 2>/tmp/toxic_profile.info || {
|
||||
$EXE $target --command info --info info $tox 2>$OUT.info || {
|
||||
ERROR $i $EXE $target --command info --info info $tox
|
||||
exit ${i}3
|
||||
}
|
||||
[ -s /tmp/toxic_profile.info ] || exit ${i}4
|
||||
[ -s $OUT.info ] || exit ${i}4
|
||||
|
||||
INFO $i /tmp/toxic_profile.save
|
||||
$EXE $target --command info --info save --output /tmp/toxic_profile.save $tox 2>/dev/null || exit ${i}5
|
||||
[ -s /tmp/toxic_profile.save ] || exit ${i}6
|
||||
INFO $i $EXE $target --command info --info save --output $OUT.save $tox
|
||||
$EXE $target --command info --info save --output $OUT.save $tox 2>/dev/null || {
|
||||
ERROR $?
|
||||
exit ${i}5
|
||||
}
|
||||
|
||||
[ -s $OUT.save ] || exit ${i}6
|
||||
|
||||
i=2
|
||||
for the_tox in $tox /tmp/toxic_profile.save ; do
|
||||
[ $# -ne 0 -a $1 -ne $i ] || \
|
||||
! INFO $i Info and editing || \
|
||||
for the_tox in $tox $OUT.save ; do
|
||||
DBUG $i $the_tox
|
||||
the_base=`echo $the_tox | sed -e 's/.save$//' -e 's/.tox$//'`
|
||||
for elt in json yaml pprint repr ; do
|
||||
if [ $elt = yaml -o $elt = json ] ; then
|
||||
# ModuleNotFoundError
|
||||
python3 -c "import $elt" 2>/dev/null || continue
|
||||
fi
|
||||
INFO $i $the_base.$elt
|
||||
DBUG $EXE $target \
|
||||
--command info --info $elt \
|
||||
--output $the_base.$elt $the_tox '2>'$the_base.$elt.err
|
||||
$EXE $target --command info --info $elt \
|
||||
--output $the_base.$elt $the_tox 2>$the_base.$nmap.err || exit ${i}0
|
||||
[ -s $the_base.$elt ] || exit ${i}1
|
||||
--output $the_base.$elt $the_tox 2>$the_base.$elt.err || {
|
||||
tail $the_base.$elt.err
|
||||
if [ $elt != yaml -a $elt != json ] ; then
|
||||
exit ${i}0
|
||||
else
|
||||
WARN $elt
|
||||
fi
|
||||
}
|
||||
[ -s $the_base.$elt ] || {
|
||||
WARN no output $the_base.$elt
|
||||
# exit ${i}1
|
||||
}
|
||||
done
|
||||
|
||||
DBUG $EXE $target --command edit --edit help $the_tox
|
||||
$EXE $target --command edit --edit help $the_tox 2>/dev/null || exit ${i}2
|
||||
|
||||
# edit the status message
|
||||
@ -123,16 +176,18 @@ for the_tox in $tox /tmp/toxic_profile.save ; do
|
||||
$EXE $target --command edit --edit 'DHT,.,DHTnode,' \
|
||||
--output $the_base.noDHT.tox $the_tox 2>&1|grep EDIT || exit ${i}7
|
||||
[ -s $the_base.noDHT.tox ] || exit ${i}7
|
||||
$EXE $target --command info $the_base.noDHT.tox 2>&1|grep 'NO DHT' || exit ${i}8
|
||||
$EXE $target --command info $the_base.noDHT.tox 2>&1 | grep 'NO DHT' || exit ${i}8
|
||||
|
||||
done
|
||||
|
||||
i=3
|
||||
[ "$#" -ne 0 -a "$1" != "$i" ] || \
|
||||
[ "$HAVE_JQ" = 0 ] || \
|
||||
! INFO $i Nodes || \
|
||||
for the_json in $json ; do
|
||||
DBUG $i $the_json
|
||||
the_base=`echo $the_json | sed -e 's/.json$//' -e 's/.tox$//'`
|
||||
for nmap in select_tcp select_udp select_version ; do
|
||||
for nmap in clean check select_tcp select_udp select_version; do
|
||||
$EXE $target --command nodes --nodes $nmap \
|
||||
--output $the_base.$nmap.json $the_json || {
|
||||
WARN $i $the_json $nmap ${i}1
|
||||
@ -160,43 +215,89 @@ for the_json in $json ; do
|
||||
done
|
||||
done
|
||||
|
||||
ls -l /tmp/toxic_profile.* /tmp/toxic_nodes.*
|
||||
i=4
|
||||
##
|
||||
[ $# -ne 0 -a "$1" -ne $i ] || \
|
||||
[ "$HAVE_TOR" = 0 ] || \
|
||||
[ ! -f /etc/tor/torrc ] || \
|
||||
! INFO $i Onions || \
|
||||
for the_tox in /etc/tor/torrc ; do
|
||||
DBUG $i $the_tox
|
||||
the_base=`echo $OUT.save | sed -e 's/.save$//' -e 's/.tox$//'`
|
||||
# exits
|
||||
for slot in config test; do
|
||||
if [ $slot = exits ] && ! netstat -nle4 | grep -q :9050 ; then
|
||||
WARN Tor not running
|
||||
continue
|
||||
fi
|
||||
INFO $target --command onions --onions $slot \
|
||||
--output $the_base.$slot.out $the_tox
|
||||
DBUG=1 $EXE $target --command onions --onions $slot \
|
||||
--log_level 10 \
|
||||
--output $the_base.$slot.out $the_tox|| {
|
||||
WARN $i $?
|
||||
continue
|
||||
}
|
||||
[ true -o -s $the_base.$slot.out ] || {
|
||||
WARN $i empty $the_base.$slot.out
|
||||
continue
|
||||
}
|
||||
done
|
||||
done
|
||||
|
||||
# ls -l $OUT.* /tmp/toxic_nodes.*
|
||||
|
||||
# DEBUG=0 /usr/local/bin/proxy_ping_test.bash tor || exit 0
|
||||
ip route | grep ^def || exit 0
|
||||
|
||||
i=4
|
||||
i=5
|
||||
##
|
||||
the_tox=$tox
|
||||
[ $# -ne 0 -a "$1" != "$i" ] || \
|
||||
[ "$HAVE_JQ" = 0 ] || \
|
||||
[ "$HAVE_NMAP" = 0 ] || \
|
||||
for the_tox in $tox /tmp/toxic_profile.save ; do
|
||||
! INFO $i Making dogfood || \
|
||||
for the_tox in $tox $OUT.save ; do
|
||||
DBUG $i $the_tox
|
||||
the_base=`echo $the_tox | sed -e 's/.save$//' -e 's/.tox$//'`
|
||||
for nmap in nmap_tcp nmap_udp nmap_onion ; do
|
||||
for nmap in nmap_relay nmap_dht nmap_path ; do
|
||||
# [ $nmap = select_tcp ] && continue
|
||||
# [ $nmap = select_udp ] && continue
|
||||
if [ $nmap = nmap_dht ] && [ $HAVE_TOR = 1 ] ; then
|
||||
INFO skipping $nmap because HAVE_TOR
|
||||
continue
|
||||
fi
|
||||
INFO $i $the_base.$nmap
|
||||
DBUG $target --command info --info $nmap \
|
||||
--output $the_base.$nmap.out $the_tox
|
||||
$EXE $target --command info --info $nmap \
|
||||
--output $the_base.$nmap.out $the_tox 2>$the_base.$nmap.err || {
|
||||
# select_tcp may be empty and jq errors
|
||||
# exit ${i}1
|
||||
WARN $i $the_base.$nmap.err
|
||||
WARN $i $? $the_base.$nmap.err
|
||||
tail $the_base.$nmap.err
|
||||
continue
|
||||
}
|
||||
[ -s $the_base.$nmap.out ] || {
|
||||
ERROR $i $the_base.$nmap.out
|
||||
WARN $i empty $the_base.$nmap.out
|
||||
continue
|
||||
}
|
||||
done
|
||||
done
|
||||
|
||||
i=5
|
||||
i=6
|
||||
##
|
||||
[ $# -ne 0 -a "$1" != "$i" ] || \
|
||||
[ "$HAVE_JQ" = 0 ] || \
|
||||
! INFO $i Eating dogfood || \
|
||||
for the_json in $json ; do
|
||||
DBUG $i $the_json
|
||||
the_base=`echo $the_json | sed -e 's/.save$//' -e 's/.json$//'`
|
||||
for nmap in nmap_tcp nmap_udp ; do
|
||||
INFO $i $the_base.$nmap
|
||||
if [ $nmap = nmap_udp ] && [ $HAVE_TOR = 1 ] ; then
|
||||
INFO skipping $nmap because HAVE_TOR
|
||||
continue
|
||||
fi
|
||||
INFO $i $target --command nodes --nodes $nmap --output $the_base.$nmap
|
||||
$EXE $target --command nodes --nodes $nmap \
|
||||
--output $the_base.$nmap $the_json 2>$the_base.$nmap.err || {
|
||||
WARN $i $the_json $nmap ${i}1
|
||||
@ -209,7 +310,7 @@ for the_json in $json ; do
|
||||
done
|
||||
done
|
||||
|
||||
i=6
|
||||
i=7
|
||||
DBUG $i
|
||||
$EXE $target --command nodes --nodes download \
|
||||
--output /tmp/toxic_nodes.new $json || {
|
||||
@ -217,16 +318,23 @@ $EXE $target --command nodes --nodes download \
|
||||
exit ${i}1
|
||||
}
|
||||
[ -s /tmp/toxic_nodes.new ] || exit ${i}4
|
||||
INFO $i downloaded /tmp/toxic_nodes.new
|
||||
json=/tmp/toxic_nodes.new
|
||||
[ "$HAVE_JQ" = 0 ] || \
|
||||
jq . < $json >/tmp/toxic_nodes.new.json 2>>/tmp/toxic_nodes.new.err || {
|
||||
[ $# -ne 0 -a "$1" != "$i" ] || \
|
||||
[ "$HAVE_JQ" = 0 ] || \
|
||||
jq . < $json >/tmp/toxic_nodes.new.json 2>>/tmp/toxic_nodes.new.json.err || {
|
||||
ERROR $i jq $json
|
||||
exit ${i}2
|
||||
}
|
||||
[ "$HAVE_JQ" = 0 ] || \
|
||||
grep error: /tmp/toxic_nodes.new.err && {
|
||||
}
|
||||
INFO $i jq from /tmp/toxic_nodes.new.json
|
||||
|
||||
[ $# -ne 0 -a "$1" != "$i" ] || \
|
||||
[ "$HAVE_JQ" = 0 ] || \
|
||||
grep error: /tmp/toxic_nodes.new.json.err && {
|
||||
ERROR $i jq $json
|
||||
exit ${i}3
|
||||
}
|
||||
}
|
||||
INFO $i no errors in /tmp/toxic_nodes.new.err
|
||||
|
||||
|
||||
exit 0
|
938
tox_savefile.py
938
tox_savefile.py
@ -1,938 +0,0 @@
|
||||
# -*- mode: python; indent-tabs-mode: nil; py-indent-offset: 4; coding: utf-8 -*-
|
||||
|
||||
"""
|
||||
Reads a tox profile and prints out information on what's in there to stderr.
|
||||
|
||||
Call it with one argument, the filename of the profile for the decrypt or info
|
||||
commands, or the filename of the nodes file for the nodes command.
|
||||
|
||||
4 commands are supported:
|
||||
--command decrypt
|
||||
decrypts the profile and writes to the result to stdout
|
||||
|
||||
--command info
|
||||
prints info about what's in the Tox profile to stderr
|
||||
|
||||
--command nodes
|
||||
assumes you are reading a json nodes file instead of a profile
|
||||
|
||||
--command edits
|
||||
edits fields in a Tox profile with --output to a file
|
||||
|
||||
"""
|
||||
|
||||
"""
|
||||
--output Destination for info/decrypt/edit/nodes/download
|
||||
--info default='info',
|
||||
choices=[info, save, repr, yaml,json, pprint]
|
||||
with --info=info prints info about the profile to stderr
|
||||
yaml,json, pprint, repr - output format
|
||||
nmap_udp - test DHT nodes with nmap
|
||||
nmap_tcp - test TCP_RELAY nodes with nmap
|
||||
nmap_onion - test PATH_NODE nodes with nmap
|
||||
--indent for pprint/yaml/json default=2
|
||||
|
||||
--nodes
|
||||
choices=[select_tcp, select_udp, nmap_tcp, select_version, nmap_udp]
|
||||
select_udp - select udp nodes
|
||||
select_tcp - select tcp nodes
|
||||
nmap_udp - test UDP nodes with nmap
|
||||
nmap_tcp - test TCP nodes with nmap
|
||||
select_version - select nodes that are the latest version
|
||||
download - download nodes from --download_nodes_url
|
||||
--download_nodes_url https://nodes.tox.chat/json
|
||||
|
||||
--edit
|
||||
help - print a summary of what fields can be edited
|
||||
section,num,key,val - edit the field section,num,key with val
|
||||
|
||||
|
||||
"""
|
||||
|
||||
# originally from:
|
||||
# https://stackoverflow.com/questions/30901873/what-format-are-tox-files-stored-in
|
||||
|
||||
import sys
|
||||
import os
|
||||
import time
|
||||
import struct
|
||||
from socket import inet_ntop, AF_INET6, AF_INET
|
||||
import logging
|
||||
import argparse
|
||||
from pprint import pprint
|
||||
import shutil
|
||||
|
||||
try:
|
||||
# https://pypi.org/project/msgpack/
|
||||
import msgpack
|
||||
except ImportError as e:
|
||||
msgpack = None
|
||||
try:
|
||||
import yaml
|
||||
except ImportError as e:
|
||||
yaml = None
|
||||
try:
|
||||
import json
|
||||
except ImportError as e:
|
||||
json = None
|
||||
try:
|
||||
# https://pypi.org/project/coloredlogs/
|
||||
import coloredlogs
|
||||
if 'COLOREDLOGS_LEVEL_STYLES' not in os.environ:
|
||||
os.environ['COLOREDLOGS_LEVEL_STYLES'] = 'spam=22;debug=28;verbose=34;notice=220;warning=202;success=118,bold;error=124;critical=background=red'
|
||||
except ImportError as e:
|
||||
coloredlogs = False
|
||||
try:
|
||||
# https://git.plastiras.org/emdee/toxygen_wrapper
|
||||
from wrapper.toxencryptsave import ToxEncryptSave
|
||||
from wrapper_tests.support_http import download_url, bAreWeConnected
|
||||
except ImportError as e:
|
||||
print(f"Import Error {e}")
|
||||
print("Download toxygen_wrapper to deal with encrypted tox files, from:")
|
||||
print("https://git.plastiras.org/emdee/toxygen_wrapper")
|
||||
print("Just put the parent of the wrapper directory on your PYTHONPATH")
|
||||
print("You also need to link your libtoxcore.so and libtoxav.so")
|
||||
print("and libtoxencryptsave.so into wrapper/../libs/")
|
||||
print("Link all 3 from libtoxcore.so if you have only libtoxcore.so")
|
||||
ToxEncryptSave = None
|
||||
download_url = None
|
||||
|
||||
LOG = logging.getLogger('TSF')
|
||||
|
||||
# Fix for Windows
|
||||
sDIR = os.environ.get('TMPDIR', '/tmp')
|
||||
sTOX_VERSION = "1000002018"
|
||||
bHAVE_NMAP = shutil.which('nmap')
|
||||
bHAVE_JQ = shutil.which('jq')
|
||||
bHAVE_BASH = shutil.which('bash')
|
||||
bMARK = b'\x00\x00\x00\x00\x1f\x1b\xed\x15'
|
||||
bDEBUG = 'DEBUG' in os.environ and os.environ['DEBUG'] != 0
|
||||
def trace(s): LOG.log(LOG.level, '+ ' +s)
|
||||
LOG.trace = trace
|
||||
|
||||
global bOUT, aOUT, sENC
|
||||
aOUT = {}
|
||||
bOUT = b''
|
||||
sENC = sys.getdefaultencoding() # 'utf-8'
|
||||
lNULLS = ['', '[]', 'null']
|
||||
# grep '#''#' logging_tox_savefile.py|sed -e 's/.* //'
|
||||
sEDIT_HELP = """
|
||||
NAME,.,Nick_name,str
|
||||
STATUSMESSAGE,.,Status_message,str
|
||||
STATUS,.,Online_status,int
|
||||
NOSPAMKEYS,.,Nospam,hexstr
|
||||
NOSPAMKEYS,.,Public_key,hexstr
|
||||
NOSPAMKEYS,.,Private_key,hexstr
|
||||
DHT,.,DHTnode,
|
||||
TCP_RELAY,.,TCPnode,
|
||||
PATH_NODE,.,PATHnode,
|
||||
"""
|
||||
|
||||
#messenger.c
|
||||
MESSENGER_STATE_TYPE_NOSPAMKEYS = 1
|
||||
MESSENGER_STATE_TYPE_DHT = 2
|
||||
MESSENGER_STATE_TYPE_FRIENDS = 3
|
||||
MESSENGER_STATE_TYPE_NAME = 4
|
||||
MESSENGER_STATE_TYPE_STATUSMESSAGE = 5
|
||||
MESSENGER_STATE_TYPE_STATUS = 6
|
||||
MESSENGER_STATE_TYPE_GROUPS = 7
|
||||
MESSENGER_STATE_TYPE_TCP_RELAY = 10
|
||||
MESSENGER_STATE_TYPE_PATH_NODE = 11
|
||||
MESSENGER_STATE_TYPE_CONFERENCES = 20
|
||||
MESSENGER_STATE_TYPE_END = 255
|
||||
dSTATE_TYPE = {
|
||||
MESSENGER_STATE_TYPE_NOSPAMKEYS: "NOSPAMKEYS",
|
||||
MESSENGER_STATE_TYPE_DHT: "DHT",
|
||||
MESSENGER_STATE_TYPE_FRIENDS: "FRIENDS",
|
||||
MESSENGER_STATE_TYPE_NAME: "NAME",
|
||||
MESSENGER_STATE_TYPE_STATUSMESSAGE: "STATUSMESSAGE",
|
||||
MESSENGER_STATE_TYPE_STATUS: "STATUS",
|
||||
MESSENGER_STATE_TYPE_GROUPS: "GROUPS",
|
||||
MESSENGER_STATE_TYPE_TCP_RELAY: "TCP_RELAY",
|
||||
MESSENGER_STATE_TYPE_PATH_NODE: "PATH_NODE",
|
||||
MESSENGER_STATE_TYPE_CONFERENCES: "CONFERENCES",
|
||||
MESSENGER_STATE_TYPE_END: "END",
|
||||
}
|
||||
|
||||
def decrypt_data(data):
|
||||
from getpass import getpass
|
||||
|
||||
if not ToxEncryptSave: return data
|
||||
|
||||
oToxES = ToxEncryptSave()
|
||||
if not oToxES.is_data_encrypted(data):
|
||||
LOG.debug('Not encrypted')
|
||||
return data
|
||||
assert data[:8] == b'toxEsave', data[:8]
|
||||
|
||||
sys.stdout.flush()
|
||||
password = getpass('Password: ')
|
||||
assert password
|
||||
newData = oToxES.pass_decrypt(data, password)
|
||||
LOG.debug('Decrypted: ' +str(len(newData)) +' bytes')
|
||||
return newData
|
||||
|
||||
def str_to_hex(raw_id, length=None):
|
||||
if length is None: length = len(raw_id)
|
||||
res = ''.join('{:02x}'.format(ord(raw_id[i])) for i in range(length))
|
||||
return res.upper()
|
||||
|
||||
def bin_to_hex(raw_id, length=None):
|
||||
if length is None: length = len(raw_id)
|
||||
res = ''.join('{:02x}'.format(raw_id[i]) for i in range(length))
|
||||
return res.upper()
|
||||
|
||||
def lProcessFriends(state, index, length, result):
|
||||
"""Friend:
|
||||
|
||||
The integers in this structure are stored in Big Endian format.
|
||||
|
||||
Length Contents
|
||||
1 uint8_t Status
|
||||
32 Long term public key
|
||||
1024 Friend request message as a byte string
|
||||
1 PADDING
|
||||
2 uint16_t Size of the friend request message
|
||||
128 Name as a byte string
|
||||
2 uint16_t Size of the name
|
||||
1007 Status message as a byte string
|
||||
1 PADDING
|
||||
2 uint16_t Size of the status message
|
||||
1 uint8_t User status (see also: USERSTATUS)
|
||||
3 PADDING
|
||||
4 uint32_t Nospam (only used for sending a friend request)
|
||||
8 uint64_t Last seen time
|
||||
|
||||
"""
|
||||
dStatus = { # Status Meaning
|
||||
0: 'Not a friend',
|
||||
1: 'Friend added',
|
||||
2: 'Friend request sent',
|
||||
3: 'Confirmed friend',
|
||||
4: 'Friend online'
|
||||
}
|
||||
slen = 1+32+1024+1+2+128+2+1007+1+2+1+3+4+8 # 2216
|
||||
assert length % slen == 0
|
||||
lIN = []
|
||||
for i in range(length // slen):
|
||||
delta = i*slen
|
||||
status = struct.unpack_from(">b", result, delta)[0]
|
||||
o = delta+1; l = 32
|
||||
pk = bin_to_hex(result[o:o+l], l)
|
||||
|
||||
o = delta+1+32+1024+1+2+128; l = 2
|
||||
nsize = struct.unpack_from(">H", result, o)[0]
|
||||
o = delta+1+32+1024+1+2; l = 128
|
||||
name = str(result[o:o+nsize], sENC)
|
||||
|
||||
o = delta+1+32+1024+1+2+128+2+1007; l = 2
|
||||
msize = struct.unpack_from(">H", result, o)[0]
|
||||
o = delta+1+32+1024+1+2+128+2; l = 1007
|
||||
mame = str(result[o:o+msize], sENC)
|
||||
LOG.info(f"Friend #{i} {dStatus[status]} {name} {pk}")
|
||||
lIN += [{"Status": dStatus[status],
|
||||
"Name": name,
|
||||
"Pk": pk}]
|
||||
return lIN
|
||||
|
||||
def lProcessGroups(state, index, length, result, label="GROUPS"):
|
||||
"""
|
||||
No GROUPS description in spec.html
|
||||
"""
|
||||
global sENC
|
||||
lIN = []
|
||||
i = 0
|
||||
if not msgpack:
|
||||
LOG.warn(f"process_chunk Groups = NO msgpack bytes={length}")
|
||||
return []
|
||||
try:
|
||||
groups = msgpack.loads(result, raw=True)
|
||||
LOG.info(f"{label} {len(groups)} groups")
|
||||
for group in groups:
|
||||
assert len(group) == 7, group
|
||||
i += 1
|
||||
|
||||
state_values, \
|
||||
state_bin, \
|
||||
topic_info, \
|
||||
mod_list, \
|
||||
keys, \
|
||||
self_info, \
|
||||
saved_peers, = group
|
||||
|
||||
assert len(state_values) == 8, state_values
|
||||
manually_disconnected, \
|
||||
group_name_len, \
|
||||
privacy_state, \
|
||||
maxpeers, \
|
||||
password_length, \
|
||||
version, \
|
||||
topic_lock, \
|
||||
voice_state = state_values
|
||||
LOG.info(f"lProcessGroups #{i} version={version}")
|
||||
dBINS = {"Version": version,
|
||||
"Privacy_state": privacy_state}
|
||||
lIN += [{"State_values": dBINS}]
|
||||
|
||||
assert len(state_bin) == 5, state_bin
|
||||
shared_state_sig, \
|
||||
founder_public_key, \
|
||||
group_name_len, \
|
||||
password_length, \
|
||||
mod_list_hash = state_bin
|
||||
LOG.info(f"lProcessGroups #{i} founder_public_key={bin_to_hex(founder_public_key)}")
|
||||
dBINS = {"Founder_public_key": bin_to_hex(founder_public_key)}
|
||||
lIN += [{"State_bin": dBINS}]
|
||||
|
||||
assert len(topic_info) == 6, topic_info
|
||||
topic_info_topic = str(topic_info[3], sENC)
|
||||
LOG.info(f"lProcessGroups #{i} topic_info_topic={topic_info_topic}")
|
||||
dBINS = {"topic_info_topic": topic_info_topic}
|
||||
lIN += [{"Topic_info": dBINS}]
|
||||
|
||||
assert len(mod_list) == 2, mod_list
|
||||
num_moderators = mod_list[0]
|
||||
LOG.info(f"lProcessGroups #{i} num moderators={mod_list[0]}")
|
||||
#define CRYPTO_SIGN_PUBLIC_KEY_SIZE 32
|
||||
mods = mod_list[1]
|
||||
assert len(mods) % 32 == 0, len(mods)
|
||||
assert len(mods) == num_moderators * 32, len(mods)
|
||||
lMODS = []
|
||||
for j in range(num_moderators):
|
||||
mod = mods[j*32:j*32 + 32]
|
||||
LOG.info(f"lProcessGroups group#{i} mod#{j} sig_pk={bin_to_hex(mod)}")
|
||||
lMODS += [{"Sig_pk": bin_to_hex(mod)}]
|
||||
lIN += [{"Moderators": lMODS}]
|
||||
|
||||
assert len(keys) == 4, keys
|
||||
LOG.debug(f"lProcessGroups #{i} {repr(list(map(len, keys)))}")
|
||||
chat_public_key, \
|
||||
chat_secret_key, \
|
||||
self_public_key, \
|
||||
self_secret_key = keys
|
||||
LOG.info(f"lProcessGroups #{i} chat_public_key={bin_to_hex(chat_public_key)}")
|
||||
lIN[0].update({"Chat_public_key": bin_to_hex(chat_public_key)})
|
||||
if int(bin_to_hex(chat_secret_key), 16) != 0:
|
||||
# 192 * b'0'
|
||||
LOG.info(f"lProcessGroups #{i} chat_secret_key={bin_to_hex(chat_secret_key)}")
|
||||
lIN[0].update({"Chat_secret_key": bin_to_hex(chat_secret_key)})
|
||||
|
||||
LOG.info(f"lProcessGroups #{i} self_public_key={bin_to_hex(self_public_key)}")
|
||||
lIN[0].update({"Self_public_key": bin_to_hex(self_public_key)})
|
||||
LOG.info(f"lProcessGroups #{i} self_secret_key={bin_to_hex(self_secret_key)}")
|
||||
lIN[0].update({"Self_secret_key": bin_to_hex(self_secret_key)})
|
||||
|
||||
assert len(self_info) == 4, self_info
|
||||
self_nick_len, self_role, self_status, self_nick = self_info
|
||||
self_nick = str(self_nick, sENC)
|
||||
LOG.info(f"lProcessGroups #{i} self_nick={self_nick}")
|
||||
dBINS = {"Self_nick": self_nick}
|
||||
lIN += [{"Self_info": dBINS}]
|
||||
|
||||
assert len(saved_peers) == 2, saved_peers
|
||||
|
||||
except Exception as e:
|
||||
LOG.warn(f"process_chunk Groups #{i} error={e}")
|
||||
return lIN
|
||||
|
||||
def lProcessNodeInfo(state, index, length, result, label="DHTnode"):
|
||||
"""Node Info (packed node format)
|
||||
|
||||
The Node Info data structure contains a Transport Protocol, a Socket
|
||||
Address, and a Public Key. This is sufficient information to start
|
||||
communicating with that node. The binary representation of a Node Info is
|
||||
called the “packed node format”.
|
||||
|
||||
Length Type Contents
|
||||
1 bit Transport Protocol UDP = 0, TCP = 1
|
||||
7 bit Address Family 2 = IPv4, 10 = IPv6
|
||||
4 | 16 IP address 4 bytes for IPv4, 16 bytes for IPv6
|
||||
2 Port Number Port number
|
||||
32 Public Key Node ID
|
||||
|
||||
"""
|
||||
delta = 0
|
||||
relay = 0
|
||||
lIN = []
|
||||
while length > 0:
|
||||
status = struct.unpack_from(">B", result, delta)[0]
|
||||
if status >= 128:
|
||||
prot = 'TCP'
|
||||
af = status - 128
|
||||
else:
|
||||
prot = 'UDP'
|
||||
af = status
|
||||
if af == 2:
|
||||
af = 'IPv4'
|
||||
alen = 4
|
||||
ipaddr = inet_ntop(AF_INET, result[delta+1:delta+1+alen])
|
||||
else:
|
||||
af = 'IPv6'
|
||||
alen = 16
|
||||
ipaddr = inet_ntop(AF_INET6, result[delta+1:delta+1+alen])
|
||||
total = 1 + alen + 2 + 32
|
||||
port = int(struct.unpack_from(">H", result, delta+1+alen)[0])
|
||||
pk = bin_to_hex(result[delta+1+alen+2:delta+1+alen+2+32], 32)
|
||||
LOG.info(f"{label} #{relay} bytes={length} status={status} prot={prot} af={af} ip={ipaddr} port={port} pk={pk}")
|
||||
lIN += [{"Bytes": length,
|
||||
"Status": status,
|
||||
"Prot": prot,
|
||||
"Af": af,
|
||||
"Ip": ipaddr,
|
||||
"Port": port,
|
||||
"Pk": pk}]
|
||||
delta += total
|
||||
length -= total
|
||||
relay += 1
|
||||
return lIN
|
||||
|
||||
def lProcessDHTnodes(state, index, length, result, label="DHTnode"):
|
||||
relay = 0
|
||||
status = struct.unpack_from("<L", result, 0)[0]
|
||||
# 4 uint32_t (0x159000D)
|
||||
assert status == 0x159000D
|
||||
length -= 4
|
||||
delta = 4
|
||||
lIN = []
|
||||
while length > 0:
|
||||
slen = struct.unpack_from("<L", result, delta)[0]
|
||||
stype = struct.unpack_from("<H", result, delta+4)[0]
|
||||
smark = struct.unpack_from("<H", result, delta+6)[0]
|
||||
assert smark == 0x11CE
|
||||
total = slen + 4 + 2 + 2
|
||||
subtotal = 0
|
||||
offset = delta
|
||||
while offset < slen: #loop over nodes
|
||||
status = struct.unpack_from(">B", result, offset+8)[0]
|
||||
assert status < 12
|
||||
prot = 'UDP'
|
||||
if status == 2:
|
||||
af = 'IPv4'
|
||||
alen = 4
|
||||
ipaddr = inet_ntop(AF_INET, result[offset+8+1:offset+8+1+alen])
|
||||
else:
|
||||
af = 'IPv6'
|
||||
alen = 16
|
||||
ipaddr = inet_ntop(AF_INET6, result[offset+8+1:offset+8+1+alen])
|
||||
subtotal = 1 + alen + 2 + 32
|
||||
port = int(struct.unpack_from(">H", result, offset+8+1+alen)[0])
|
||||
pk = bin_to_hex(result[offset+8+1+alen+2:offset+8+1+alen+2+32], 32)
|
||||
|
||||
LOG.info(f"{label} #{relay} status={status} ipaddr={ipaddr} port={port} {pk}")
|
||||
lIN += [{
|
||||
"Status": status,
|
||||
"Prot": prot,
|
||||
"Af": af,
|
||||
"Ip": ipaddr,
|
||||
"Port": port,
|
||||
"Pk": pk}]
|
||||
offset += subtotal
|
||||
delta += total
|
||||
length -= total
|
||||
relay += 1
|
||||
return lIN
|
||||
|
||||
def process_chunk(index, state, oArgs=None):
|
||||
global bOUT, aOUT
|
||||
global sENC
|
||||
|
||||
length = struct.unpack_from("<I", state, index)[0]
|
||||
data_type = struct.unpack_from("<H", state, index + 4)[0]
|
||||
check = struct.unpack_from("<H", state, index + 6)[0]
|
||||
assert check == 0x01CE, check
|
||||
new_index = index + length + 8
|
||||
result = state[index + 8:index + 8 + length]
|
||||
|
||||
label = dSTATE_TYPE[data_type]
|
||||
if oArgs.command == 'edit' and oArgs.edit:
|
||||
section,num,key,val = oArgs.edit.split(',', 3)
|
||||
|
||||
diff = index - len(bOUT)
|
||||
if bDEBUG and diff > 0:
|
||||
LOG.warn(f"PROCESS_CHUNK {label} index={index} bOUT={len(bOUT)} delta={diff} length={length}")
|
||||
elif bDEBUG:
|
||||
LOG.trace(f"PROCESS_CHUNK {label} index={index} bOUT={len(bOUT)} delta={diff} length={length}")
|
||||
|
||||
if data_type == MESSENGER_STATE_TYPE_NOSPAMKEYS:
|
||||
nospam = bin_to_hex(result[0:4])
|
||||
public_key = bin_to_hex(result[4:36])
|
||||
private_key = bin_to_hex(result[36:68])
|
||||
LOG.info(f"nospam = {nospam}")
|
||||
LOG.info(f"public_key = {public_key}")
|
||||
LOG.info(f"private_key = {private_key}")
|
||||
aIN = {"Nospam": f"{nospam}",
|
||||
"Public_key": f"{public_key}",
|
||||
"Private_key": f"{private_key}"}
|
||||
aOUT.update({label: aIN})
|
||||
if oArgs.command == 'edit' and section == label:
|
||||
## NOSPAMKEYS,.,Nospam,hexstr
|
||||
if key == "Nospam":
|
||||
assert len(val) == 4*2, val
|
||||
result = bytes.fromhex (val) +result[4:]
|
||||
LOG.info(f"{label} {key} EDITED to {val}")
|
||||
## NOSPAMKEYS,.,Public_key,hexstr
|
||||
elif key == "Public_key":
|
||||
assert len(val) == 32 * 2, val
|
||||
result = +result[0:4] +bytes.fromhex(val) +result[36:]
|
||||
LOG.info(f"{label} {key} EDITED to {val}")
|
||||
## NOSPAMKEYS,.,Private_key,hexstr
|
||||
elif key == "Private_key":
|
||||
assert len(val) == 32 * 2, val
|
||||
result = +result[0:36] +bytes.fromhex(val)
|
||||
LOG.info(f"{label} {key} EDITED to {val}")
|
||||
|
||||
elif data_type == MESSENGER_STATE_TYPE_DHT:
|
||||
LOG.debug(f"process_chunk {label} length={length}")
|
||||
if length > 4:
|
||||
lIN = lProcessDHTnodes(state, index, length, result, "DHTnode")
|
||||
else:
|
||||
lIN = []
|
||||
LOG.info(f"NO {label}")
|
||||
aOUT.update({label: lIN})
|
||||
if oArgs.command == 'edit' and section == label:
|
||||
## DHT,.,DHTnode,
|
||||
if num == '.' and key == "DHTnode" and val in lNULLS:
|
||||
# 4 uint32_t (0x159000D)
|
||||
status = 0x159000D
|
||||
# FixMe - dunno
|
||||
result = struct.pack("<L", status)
|
||||
length = 4
|
||||
LOG.info(f"{label} {key} EDITED to {val}")
|
||||
|
||||
elif data_type == MESSENGER_STATE_TYPE_FRIENDS:
|
||||
LOG.info(f"{label} {length // 2216} FRIENDS {length % 2216}")
|
||||
if length > 0:
|
||||
lIN = lProcessFriends(state, index, length, result)
|
||||
else:
|
||||
lIN = []
|
||||
LOG.info(f"NO {label}")
|
||||
aOUT.update({label: lIN})
|
||||
|
||||
elif data_type == MESSENGER_STATE_TYPE_NAME:
|
||||
name = str(result, sENC)
|
||||
LOG.info(f"{label} Nick_name = " +name)
|
||||
aIN = {"Nick_name": name}
|
||||
aOUT.update({label: aIN})
|
||||
if oArgs.command == 'edit' and section == label:
|
||||
## NAME,.,Nick_name,str
|
||||
if key == "Nick_name":
|
||||
result = bytes(val, sENC)
|
||||
length = len(result)
|
||||
LOG.info(f"{label} {key} EDITED to {val}")
|
||||
|
||||
elif data_type == MESSENGER_STATE_TYPE_STATUSMESSAGE:
|
||||
mess = str(result, sENC)
|
||||
LOG.info(f"{label} StatusMessage = " +mess)
|
||||
aIN = {"Status_message": mess}
|
||||
aOUT.update({label: aIN})
|
||||
if oArgs.command == 'edit' and section == label:
|
||||
## STATUSMESSAGE,.,Status_message,str
|
||||
if key == "Status_message":
|
||||
result = bytes(val, sENC)
|
||||
length = len(result)
|
||||
LOG.info(f"{label} {key} EDITED to {val}")
|
||||
|
||||
elif data_type == MESSENGER_STATE_TYPE_STATUS:
|
||||
# 1 uint8_t status (0 = online, 1 = away, 2 = busy)
|
||||
dStatus = {0: 'online', 1: 'away', 2: 'busy'}
|
||||
status = struct.unpack_from(">b", state, index)[0]
|
||||
status = dStatus[status]
|
||||
LOG.info(f"{label} = " +status)
|
||||
aIN = {f"Online_status": status}
|
||||
aOUT.update({label: aIN})
|
||||
if oArgs.command == 'edit' and section == label:
|
||||
## STATUS,.,Online_status,int
|
||||
if key == "Online_status":
|
||||
result = struct.pack(">b", int(val))
|
||||
length = len(result)
|
||||
LOG.info(f"{label} {key} EDITED to {val}")
|
||||
|
||||
elif data_type == MESSENGER_STATE_TYPE_GROUPS:
|
||||
if length > 0:
|
||||
lIN = lProcessGroups(state, index, length, result, label)
|
||||
else:
|
||||
lIN = []
|
||||
LOG.info(f"NO {label}")
|
||||
aOUT.update({label: lIN})
|
||||
|
||||
elif data_type == MESSENGER_STATE_TYPE_TCP_RELAY:
|
||||
if length > 0:
|
||||
lIN = lProcessNodeInfo(state, index, length, result, "TCPnode")
|
||||
else:
|
||||
lIN = []
|
||||
LOG.info(f"NO {label}")
|
||||
aOUT.update({label: lIN})
|
||||
if oArgs.command == 'edit' and section == label:
|
||||
## TCP_RELAY,.,TCPnode,
|
||||
if num == '.' and key == "TCPnode" and val in lNULLS:
|
||||
result = b''
|
||||
length = 0
|
||||
LOG.info(f"{label} {key} EDITED to {val}")
|
||||
|
||||
elif data_type == MESSENGER_STATE_TYPE_PATH_NODE:
|
||||
#define NUM_SAVED_PATH_NODES 8
|
||||
if not length % 8 == 0:
|
||||
# this should be an assert?
|
||||
LOG.warn(f"process_chunk {label} mod={length % 8}")
|
||||
else:
|
||||
LOG.debug(f"process_chunk {label} bytes={length}")
|
||||
lIN = lProcessNodeInfo(state, index, length, result, "PATHnode")
|
||||
aOUT.update({label: lIN})
|
||||
if oArgs.command == 'edit' and section == label:
|
||||
## PATH_NODE,.,PATHnode,
|
||||
if num == '.' and key == "PATHnode" and val in lNULLS:
|
||||
result = b''
|
||||
length = 0
|
||||
LOG.info(f"{label} {key} EDITED to {val}")
|
||||
|
||||
elif data_type == MESSENGER_STATE_TYPE_CONFERENCES:
|
||||
lIN = []
|
||||
if length > 0:
|
||||
LOG.debug(f"TODO process_chunk {label} bytes={length}")
|
||||
else:
|
||||
LOG.info(f"NO {label}")
|
||||
aOUT.update({label: []})
|
||||
|
||||
elif data_type != MESSENGER_STATE_TYPE_END:
|
||||
LOG.error("UNRECOGNIZED datatype={datatype}")
|
||||
sys.exit(1)
|
||||
|
||||
else:
|
||||
LOG.info("END") # That's all folks...
|
||||
# drop through
|
||||
|
||||
# We repack as we read: or edit as we parse; simply edit result and length.
|
||||
# We'll add the results back to bOUT to see if we get what we started with.
|
||||
# Then will will be able to selectively null sections or selectively edit.
|
||||
assert length == len(result), length
|
||||
bOUT += struct.pack("<I", length) + \
|
||||
struct.pack("<H", data_type) + \
|
||||
struct.pack("<H", check) + \
|
||||
result
|
||||
|
||||
if data_type == MESSENGER_STATE_TYPE_END or index + 8 >= len(state):
|
||||
diff = len(bSAVE) - len(bOUT)
|
||||
if oArgs.command != 'edit' and diff > 0:
|
||||
# if short repacking as we read - tox_profile is padded with nulls
|
||||
LOG.warn(f"PROCESS_CHUNK bSAVE={len(bSAVE)} bOUT={len(bOUT)} delta={diff}")
|
||||
return
|
||||
|
||||
process_chunk(new_index, state, oArgs)
|
||||
|
||||
sNMAP_TCP = """#!/bin/bash
|
||||
ip=""
|
||||
declare -a ports
|
||||
jq '.|with_entries(select(.key|match("nodes"))).nodes[]|select(.status_tcp)|select(.ipv4|match("."))|.ipv4,.tcp_ports' | while read line ; do
|
||||
if [ -z "$ip" ] ; then
|
||||
ip=`echo $line|sed -e 's/"//g'`
|
||||
ports=()
|
||||
continue
|
||||
elif [ "$line" = '[' ] ; then
|
||||
continue
|
||||
elif [ "$line" = ']' ] ; then
|
||||
if ! route | grep -q ^def ; then
|
||||
echo ERROR no route
|
||||
exit 3
|
||||
fi
|
||||
if [ "$ip" = '"NONE"' -o "$ip" = 'NONE' ] ; then
|
||||
:
|
||||
elif ping -c 1 $ip | grep '100% packet loss' ; then
|
||||
echo WARN failed ping $ip
|
||||
else
|
||||
echo INFO $ip "${ports[*]}"
|
||||
cmd="nmap -Pn -n -sT -p T:"`echo "${ports[*]}" |sed -e 's/ /,/g'`
|
||||
echo DBUG $cmd $ip
|
||||
$cmd $ip | grep /tcp
|
||||
fi
|
||||
ip=""
|
||||
continue
|
||||
else
|
||||
port=`echo $line|sed -e 's/,//'`
|
||||
ports+=($port)
|
||||
fi
|
||||
done"""
|
||||
|
||||
def vBashFileNmapTcp():
|
||||
assert bHAVE_JQ, "jq is required for this command"
|
||||
assert bHAVE_NMAP, "nmap is required for this command"
|
||||
assert bHAVE_BASH, "bash is required for this command"
|
||||
f = "NmapTcp.bash"
|
||||
sFile = os.path.join(sDIR, f)
|
||||
if not os.path.exists(sFile):
|
||||
with open(sFile, 'wt') as iFd:
|
||||
iFd.write(sNMAP_TCP)
|
||||
os.chmod(sFile, 0o0775)
|
||||
return sFile
|
||||
|
||||
def vBashFileNmapUdp():
|
||||
assert bHAVE_JQ, "jq is required for this command"
|
||||
assert bHAVE_NMAP, "nmap is required for this command"
|
||||
assert bHAVE_BASH, "bash is required for this command"
|
||||
f = "NmapUdp.bash"
|
||||
sFile = os.path.join(sDIR, f)
|
||||
if not os.path.exists(sFile):
|
||||
with open(sFile, 'wt') as iFd:
|
||||
iFd.write(sNMAP_TCP.
|
||||
replace('nmap -Pn -n -sT -p T',
|
||||
'nmap -Pn -n -sU -p U').
|
||||
replace('tcp_ports','udp_ports').
|
||||
replace('status_tcp','status_udp'))
|
||||
os.chmod(sFile, 0o0775)
|
||||
return sFile
|
||||
|
||||
def vOsSystemNmapUdp(l, oArgs):
|
||||
iErrs = 0
|
||||
for elt in aOUT["DHT"]:
|
||||
cmd = f"sudo nmap -Pn -n -sU -p U:{elt['Port']} {elt['Ip']}"
|
||||
iErrs += os.system(cmd +f" >> {oArgs.output} 2>&1")
|
||||
if iErrs:
|
||||
LOG.warn(f"{oArgs.info} {iErrs} ERRORs to {oArgs.output}")
|
||||
print(f"{oArgs.info} {iErrs} ERRORs to {oArgs.output}")
|
||||
else:
|
||||
LOG.info(f"{oArgs.info} NO errors to {oArgs.output}")
|
||||
print(f"{oArgs.info} NO errors to {oArgs.output}")
|
||||
|
||||
def vOsSystemNmapTcp(l, oArgs):
|
||||
iErrs = 0
|
||||
for elt in l:
|
||||
cmd = f"sudo nmap -Pn -n -sT -p T:{elt['Port']} {elt['Ip']}"
|
||||
print(f"{oArgs.info} NO errors to {oArgs.output}")
|
||||
iErrs += os.system(cmd +f" >> {oArgs.output} 2>&1")
|
||||
if iErrs:
|
||||
LOG.warn(f"{oArgs.info} {iErrs} ERRORs to {oArgs.output}")
|
||||
print(f"{oArgs.info} {iErrs} ERRORs to {oArgs.output}")
|
||||
else:
|
||||
LOG.info(f"{oArgs.info} NO errors to {oArgs.output}")
|
||||
print(f"{oArgs.info} NO errors to {oArgs.output}")
|
||||
|
||||
def vSetupLogging(loglevel=logging.DEBUG):
|
||||
global LOG
|
||||
if coloredlogs:
|
||||
aKw = dict(level=loglevel,
|
||||
logger=LOG,
|
||||
fmt='%(name)s %(levelname)s %(message)s')
|
||||
coloredlogs.install(**aKw)
|
||||
else:
|
||||
aKw = dict(level=loglevel,
|
||||
format='%(name)s %(levelname)-4s %(message)s')
|
||||
logging.basicConfig(**aKw)
|
||||
|
||||
logging._defaultFormatter = logging.Formatter(datefmt='%m-%d %H:%M:%S')
|
||||
logging._defaultFormatter.default_time_format = '%m-%d %H:%M:%S'
|
||||
logging._defaultFormatter.default_msec_format = ''
|
||||
|
||||
def oMainArgparser(_=None):
|
||||
if not os.path.exists('/proc/sys/net/ipv6'):
|
||||
bIpV6 = 'False'
|
||||
else:
|
||||
bIpV6 = 'True'
|
||||
lIpV6Choices=[bIpV6, 'False']
|
||||
|
||||
parser = argparse.ArgumentParser(epilog=__doc__)
|
||||
# list(dSTATE_TYPE.values())
|
||||
# ['nospamkeys', 'dht', 'friends', 'name', 'statusmessage', 'status', 'groups', 'tcp_relay', 'path_node', 'conferences']
|
||||
|
||||
parser.add_argument('--output', type=str, default='',
|
||||
help='Destination for info/decrypt - defaults to stderr')
|
||||
parser.add_argument('--command', type=str, default='info',
|
||||
choices=['info', 'decrypt', 'nodes', 'edit'],
|
||||
required=True,
|
||||
help='Action command - default: info')
|
||||
# nargs='+',
|
||||
parser.add_argument('--edit', type=str, default='',
|
||||
help='comma seperated SECTION,num,key,value - or help for ')
|
||||
parser.add_argument('--indent', type=int, default=2,
|
||||
help='Indent for yaml/json/pprint')
|
||||
choices=['info', 'save', 'repr', 'yaml','json', 'pprint']
|
||||
if bHAVE_NMAP: choices += ['nmap_tcp', 'nmap_udp', 'nmap_onion']
|
||||
parser.add_argument('--info', type=str, default='info',
|
||||
choices=choices,
|
||||
help='Format for info command')
|
||||
choices = []
|
||||
if bHAVE_JQ:
|
||||
choices += ['select_tcp', 'select_udp', 'select_version']
|
||||
if bHAVE_NMAP: choices += ['nmap_tcp', 'nmap_udp']
|
||||
if download_url:
|
||||
choices += ['download']
|
||||
parser.add_argument('--nodes', type=str, default='',
|
||||
choices=choices,
|
||||
help='Action for nodes command (requires jq)')
|
||||
parser.add_argument('--download_nodes_url', type=str,
|
||||
default='https://nodes.tox.chat/json')
|
||||
parser.add_argument('--encoding', type=str, default=sENC)
|
||||
parser.add_argument('profile', type=str, nargs='?', default=None,
|
||||
help='tox profile file - may be encrypted')
|
||||
return parser
|
||||
|
||||
if __name__ == '__main__':
|
||||
lArgv = sys.argv[1:]
|
||||
parser = oMainArgparser()
|
||||
oArgs = parser.parse_args(lArgv)
|
||||
if oArgs.command in ['edit'] and oArgs.edit == 'help':
|
||||
l = list(dSTATE_TYPE.values())
|
||||
l.remove('END')
|
||||
print('Available Sections: ' +repr(l))
|
||||
print('Supported Quads: section,num,key,type ' +sEDIT_HELP)
|
||||
sys.exit(0)
|
||||
|
||||
sFile = oArgs.profile
|
||||
assert os.path.isfile(sFile), sFile
|
||||
|
||||
sENC = oArgs.encoding
|
||||
vSetupLogging()
|
||||
|
||||
bSAVE = open(sFile, 'rb').read()
|
||||
if ToxEncryptSave and bSAVE[:8] == b'toxEsave':
|
||||
try:
|
||||
bSAVE = decrypt_data(bSAVE)
|
||||
except Exception as e:
|
||||
LOG.error(f"decrypting {sFile} - {e}")
|
||||
sys.exit(1)
|
||||
assert bSAVE
|
||||
|
||||
oStream = None
|
||||
if oArgs.command == 'decrypt':
|
||||
assert oArgs.output, "--output required for this command"
|
||||
oStream = open(oArgs.output, 'wb')
|
||||
iRet = oStream.write(bSAVE)
|
||||
LOG.info(f"Wrote {iRet} to {oArgs.output}")
|
||||
iRet = 0
|
||||
|
||||
elif oArgs.command == 'nodes':
|
||||
iRet = -1
|
||||
ep_sec = str(int(time.time()))
|
||||
json_head = '{"last_scan":' +ep_sec \
|
||||
+',"last_refresh":' +ep_sec \
|
||||
+',"nodes":['
|
||||
if oArgs.nodes == 'select_tcp':
|
||||
assert oArgs.output, "--output required for this command"
|
||||
assert bHAVE_JQ, "jq is required for this command"
|
||||
with open(oArgs.output, 'wt') as oFd:
|
||||
oFd.write(json_head)
|
||||
cmd = f"cat '{sFile}' | jq '.|with_entries(select(.key|match(\"nodes\"))).nodes[]|select(.status_tcp)|select(.ipv4|match(\".\"))' "
|
||||
iRet = os.system(cmd +"| sed -e '2,$s/^{/,{/'" +f" >>{oArgs.output}")
|
||||
with open(oArgs.output, 'at') as oFd: oFd.write(']}\n')
|
||||
|
||||
elif oArgs.nodes == 'select_udp':
|
||||
assert oArgs.output, "--output required for this command"
|
||||
assert bHAVE_JQ, "jq is required for this command"
|
||||
with open(oArgs.output, 'wt') as oFd:
|
||||
oFd.write(json_head)
|
||||
cmd = f"cat '{sFile}' | jq '.|with_entries(select(.key|match(\"nodes\"))).nodes[]|select(.status_udp)|select(.ipv4|match(\".\"))' "
|
||||
iRet = os.system(cmd +"| sed -e '2,$s/^{/,{/'" +f" >>{oArgs.output}")
|
||||
with open(oArgs.output, 'at') as oFd: oFd.write(']}\n')
|
||||
|
||||
elif oArgs.nodes == 'select_version':
|
||||
assert bHAVE_JQ, "jq is required for this command"
|
||||
assert oArgs.output, "--output required for this command"
|
||||
with open(oArgs.output, 'wt') as oFd:
|
||||
oFd.write(json_head)
|
||||
cmd = f"cat '{sFile}' | jq '.|with_entries(select(.key|match(\"nodes\"))).nodes[]|select(.status_udp)|select(.version|match(\"{sTOX_VERSION}\"))'"
|
||||
|
||||
iRet = os.system(cmd +"| sed -e '2,$s/^{/,{/'" +f" >>{oArgs.output}")
|
||||
with open(oArgs.output, 'at') as oFd:
|
||||
oFd.write(']}\n')
|
||||
|
||||
elif oArgs.nodes == 'nmap_tcp':
|
||||
assert oArgs.output, "--output required for this command"
|
||||
if not bAreWeConnected():
|
||||
LOG.warn(f"{oArgs.nodes} we are not connected")
|
||||
cmd = vBashFileNmapTcp()
|
||||
iRet = os.system(f"bash {cmd} < '{sFile}'" +f" >'{oArgs.output}'")
|
||||
|
||||
elif oArgs.nodes == 'nmap_udp':
|
||||
assert oArgs.output, "--output required for this command"
|
||||
if not bAreWeConnected():
|
||||
LOG.warn(f"{oArgs.nodes} we are not connected")
|
||||
cmd = vBashFileNmapUdp()
|
||||
iRet = os.system(f"bash {cmd} < '{sFile}'" +f" >'{oArgs.output}'")
|
||||
|
||||
elif oArgs.nodes == 'download' and download_url:
|
||||
if not bAreWeConnected():
|
||||
LOG.warn(f"{oArgs.nodes} we are not connected")
|
||||
url = oArgs.download_nodes_url
|
||||
b = download_url(url)
|
||||
if not bSAVE:
|
||||
LOG.warn("failed downloading list of nodes")
|
||||
iRet = -1
|
||||
else:
|
||||
if oArgs.output:
|
||||
oStream = open(oArgs.output, 'rb')
|
||||
oStream.write(b)
|
||||
else:
|
||||
oStream = sys.stdout
|
||||
oStream.write(str(b, sENC))
|
||||
iRet = -1
|
||||
LOG.info(f"downloaded list of nodes to {oStream}")
|
||||
|
||||
if iRet > 0:
|
||||
LOG.warn(f"{oArgs.nodes} iRet={iRet} to {oArgs.output}")
|
||||
elif iRet == 0:
|
||||
LOG.info(f"{oArgs.nodes} iRet={iRet} to {oArgs.output}")
|
||||
|
||||
elif oArgs.command in ['info', 'edit']:
|
||||
if oArgs.command in ['edit']:
|
||||
assert oArgs.output, "--output required for this command"
|
||||
assert oArgs.edit != '', "--edit required for this command"
|
||||
elif oArgs.command == 'info':
|
||||
# assert oArgs.info != '', "--info required for this command"
|
||||
if oArgs.info in ['save', 'yaml', 'json', 'repr', 'pprint']:
|
||||
assert oArgs.output, "--output required for this command"
|
||||
|
||||
# toxEsave
|
||||
assert bSAVE[:8] == bMARK, "Not a Tox profile"
|
||||
bOUT = bMARK
|
||||
|
||||
iErrs = 0
|
||||
process_chunk(len(bOUT), bSAVE, oArgs)
|
||||
if not bOUT:
|
||||
LOG.error(f"{oArgs.command} NO bOUT results")
|
||||
else:
|
||||
oStream = None
|
||||
LOG.debug(f"command={oArgs.command} len bOUT={len(bOUT)} results")
|
||||
|
||||
if oArgs.command in ['edit'] or oArgs.info in ['save']:
|
||||
LOG.debug(f"{oArgs.command} saving to {oArgs.output}")
|
||||
oStream = open(oArgs.output, 'wb', encoding=None)
|
||||
if oStream.write(bOUT) > 0: iRet = 0
|
||||
LOG.info(f"{oArgs.info}ed iRet={iRet} to {oArgs.output}")
|
||||
elif oArgs.info == 'info':
|
||||
pass
|
||||
elif oArgs.info == 'yaml' and yaml:
|
||||
LOG.debug(f"{oArgs.command} saving to {oArgs.output}")
|
||||
oStream = open(oArgs.output, 'wt', encoding=sENC)
|
||||
yaml.dump(aOUT, stream=oStream, indent=oArgs.indent)
|
||||
if oStream.write('\n') > 0: iRet = 0
|
||||
LOG.info(f"{oArgs.info}ing iRet={iRet} to {oArgs.output}")
|
||||
elif oArgs.info == 'json' and json:
|
||||
LOG.debug(f"{oArgs.command} saving to {oArgs.output}")
|
||||
oStream = open(oArgs.output, 'wt', encoding=sENC)
|
||||
json.dump(aOUT, oStream, indent=oArgs.indent)
|
||||
if oStream.write('\n') > 0: iRet = 0
|
||||
LOG.info(f"{oArgs.info}ing iRet={iRet} to {oArgs.output}")
|
||||
elif oArgs.info == 'repr':
|
||||
LOG.debug(f"{oArgs.command} saving to {oArgs.output}")
|
||||
oStream = open(oArgs.output, 'wt', encoding=sENC)
|
||||
if oStream.write(repr(bOUT)) > 0: iRet = 0
|
||||
if oStream.write('\n') > 0: iRet = 0
|
||||
LOG.info(f"{oArgs.info}ing iRet={iRet} to {oArgs.output}")
|
||||
elif oArgs.info == 'pprint':
|
||||
LOG.debug(f"{oArgs.command} saving to {oArgs.output}")
|
||||
oStream = open(oArgs.output, 'wt', encoding=sENC)
|
||||
pprint(aOUT, stream=oStream, indent=oArgs.indent, width=80)
|
||||
iRet = 0
|
||||
LOG.info(f"{oArgs.info}ing iRet={iRet} to {oArgs.output}")
|
||||
elif oArgs.info == 'nmap_tcp' and bHAVE_NMAP:
|
||||
assert oArgs.output, "--output required for this command"
|
||||
vOsSystemNmapTcp(aOUT["TCP_RELAY"], oArgs)
|
||||
elif oArgs.info == 'nmap_udp' and bHAVE_NMAP:
|
||||
assert oArgs.output, "--output required for this command"
|
||||
vOsSystemNmapUdp(aOUT["DHT"], oArgs)
|
||||
elif oArgs.info == 'nmap_onion' and bHAVE_NMAP:
|
||||
assert oArgs.output, "--output required for this command"
|
||||
vOsSystemNmapUdp(aOUT["PATH_NODE"], oArgs)
|
||||
|
||||
if oStream and oStream != sys.stdout and oStream != sys.stderr:
|
||||
oStream.close()
|
||||
|
||||
sys.exit(0)
|
Reference in New Issue
Block a user