Compare commits
No commits in common. "master" and "cca_rework" have entirely different histories.
master
...
cca_rework
22
.gitignore
vendored
22
.gitignore
vendored
@ -1,22 +0,0 @@
|
|||||||
.vs/
|
|
||||||
*.o
|
|
||||||
*.swp
|
|
||||||
~*
|
|
||||||
*~
|
|
||||||
.idea/
|
|
||||||
cmake-build-debug/
|
|
||||||
cmake-build-debugandtest/
|
|
||||||
cmake-build-release/
|
|
||||||
*.stackdump
|
|
||||||
*.coredump
|
|
||||||
compile_commands.json
|
|
||||||
/build*
|
|
||||||
.clangd
|
|
||||||
.cache
|
|
||||||
|
|
||||||
.DS_Store
|
|
||||||
.AppleDouble
|
|
||||||
.LSOverride
|
|
||||||
|
|
||||||
CMakeLists.txt.user*
|
|
||||||
CMakeCache.txt
|
|
114
CMakeLists.txt
114
CMakeLists.txt
@ -1,21 +1,9 @@
|
|||||||
cmake_minimum_required(VERSION 3.24 FATAL_ERROR)
|
cmake_minimum_required(VERSION 3.9 FATAL_ERROR)
|
||||||
|
|
||||||
add_subdirectory(./external)
|
add_subdirectory(./external)
|
||||||
|
|
||||||
project(solanaceae)
|
project(solanaceae)
|
||||||
|
|
||||||
if (CMAKE_SOURCE_DIR STREQUAL CMAKE_CURRENT_SOURCE_DIR)
|
|
||||||
set(SOLANACEAE_NGCFT1_STANDALONE ON)
|
|
||||||
else()
|
|
||||||
set(SOLANACEAE_NGCFT1_STANDALONE OFF)
|
|
||||||
endif()
|
|
||||||
message("II SOLANACEAE_NGCFT1_STANDALONE " ${SOLANACEAE_NGCFT1_STANDALONE})
|
|
||||||
|
|
||||||
option(SOLANACEAE_NGCFT1_BUILD_PLUGINS "Build the solanaceae_ngcft1 plugins" ${SOLANACEAE_NGCFT1_BUILD_PLUGINS})
|
|
||||||
|
|
||||||
# TODO: move this stuff to src
|
|
||||||
########################################
|
|
||||||
|
|
||||||
add_library(solanaceae_ngcext
|
add_library(solanaceae_ngcext
|
||||||
./solanaceae/ngc_ext/ngcext.hpp
|
./solanaceae/ngc_ext/ngcext.hpp
|
||||||
./solanaceae/ngc_ext/ngcext.cpp
|
./solanaceae/ngc_ext/ngcext.cpp
|
||||||
@ -59,46 +47,13 @@ add_library(solanaceae_sha1_ngcft1
|
|||||||
# hacky deps
|
# hacky deps
|
||||||
./solanaceae/ngc_ft1_sha1/mio.hpp
|
./solanaceae/ngc_ft1_sha1/mio.hpp
|
||||||
./solanaceae/ngc_ft1_sha1/file_rw_mapped.hpp
|
./solanaceae/ngc_ft1_sha1/file_rw_mapped.hpp
|
||||||
./solanaceae/ngc_ft1_sha1/file_constructor.hpp
|
|
||||||
./solanaceae/ngc_ft1_sha1/file_constructor.cpp
|
|
||||||
|
|
||||||
./solanaceae/ngc_ft1_sha1/backends/sha1_mapped_filesystem.hpp
|
|
||||||
./solanaceae/ngc_ft1_sha1/backends/sha1_mapped_filesystem.cpp
|
|
||||||
|
|
||||||
./solanaceae/ngc_ft1_sha1/hash_utils.hpp
|
./solanaceae/ngc_ft1_sha1/hash_utils.hpp
|
||||||
./solanaceae/ngc_ft1_sha1/hash_utils.cpp
|
./solanaceae/ngc_ft1_sha1/hash_utils.cpp
|
||||||
|
|
||||||
./solanaceae/ngc_ft1_sha1/util.hpp
|
|
||||||
|
|
||||||
./solanaceae/ngc_ft1_sha1/ft1_sha1_info.hpp
|
./solanaceae/ngc_ft1_sha1/ft1_sha1_info.hpp
|
||||||
./solanaceae/ngc_ft1_sha1/ft1_sha1_info.cpp
|
./solanaceae/ngc_ft1_sha1/ft1_sha1_info.cpp
|
||||||
|
|
||||||
./solanaceae/ngc_ft1_sha1/components.hpp
|
|
||||||
./solanaceae/ngc_ft1_sha1/components.cpp
|
|
||||||
|
|
||||||
./solanaceae/ngc_ft1_sha1/contact_components.hpp
|
|
||||||
|
|
||||||
./solanaceae/ngc_ft1_sha1/chunk_picker.hpp
|
|
||||||
./solanaceae/ngc_ft1_sha1/chunk_picker.cpp
|
|
||||||
|
|
||||||
./solanaceae/ngc_ft1_sha1/participation.hpp
|
|
||||||
./solanaceae/ngc_ft1_sha1/participation.cpp
|
|
||||||
|
|
||||||
./solanaceae/ngc_ft1_sha1/re_announce_systems.hpp
|
|
||||||
./solanaceae/ngc_ft1_sha1/re_announce_systems.cpp
|
|
||||||
|
|
||||||
./solanaceae/ngc_ft1_sha1/chunk_picker_systems.hpp
|
|
||||||
./solanaceae/ngc_ft1_sha1/chunk_picker_systems.cpp
|
|
||||||
|
|
||||||
./solanaceae/ngc_ft1_sha1/transfer_stats_systems.hpp
|
|
||||||
./solanaceae/ngc_ft1_sha1/transfer_stats_systems.cpp
|
|
||||||
|
|
||||||
./solanaceae/ngc_ft1_sha1/sending_transfers.hpp
|
|
||||||
./solanaceae/ngc_ft1_sha1/sending_transfers.cpp
|
|
||||||
|
|
||||||
./solanaceae/ngc_ft1_sha1/receiving_transfers.hpp
|
|
||||||
./solanaceae/ngc_ft1_sha1/receiving_transfers.cpp
|
|
||||||
|
|
||||||
./solanaceae/ngc_ft1_sha1/sha1_ngcft1.hpp
|
./solanaceae/ngc_ft1_sha1/sha1_ngcft1.hpp
|
||||||
./solanaceae/ngc_ft1_sha1/sha1_ngcft1.cpp
|
./solanaceae/ngc_ft1_sha1/sha1_ngcft1.cpp
|
||||||
)
|
)
|
||||||
@ -110,72 +65,5 @@ target_link_libraries(solanaceae_sha1_ngcft1 PUBLIC
|
|||||||
sha1::sha1
|
sha1::sha1
|
||||||
solanaceae_tox_contacts
|
solanaceae_tox_contacts
|
||||||
solanaceae_message3
|
solanaceae_message3
|
||||||
solanaceae_object_store
|
|
||||||
solanaceae_file2
|
|
||||||
)
|
)
|
||||||
|
|
||||||
option(SOLANACEAE_NGCFT1_SHA1_BUILD_TESTING "Build the solanaceae_ngcft1_sha1 tests" OFF)
|
|
||||||
message("II SOLANACEAE_NGCFT1_SHA1_BUILD_TESTING " ${SOLANACEAE_NGCFT1_SHA1_BUILD_TESTING})
|
|
||||||
|
|
||||||
# TODO: proper options n shit
|
|
||||||
if (SOLANACEAE_NGCFT1_SHA1_BUILD_TESTING)
|
|
||||||
include(CTest)
|
|
||||||
|
|
||||||
#add_executable(bitset_tests
|
|
||||||
# ./solanaceae/ngc_ft1_sha1/bitset_tests.cpp
|
|
||||||
#)
|
|
||||||
|
|
||||||
#target_link_libraries(bitset_tests PUBLIC
|
|
||||||
# solanaceae_sha1_ngcft1
|
|
||||||
#)
|
|
||||||
|
|
||||||
endif()
|
|
||||||
|
|
||||||
########################################
|
|
||||||
|
|
||||||
add_library(solanaceae_ngchs2
|
|
||||||
./solanaceae/ngc_hs2/serl.hpp
|
|
||||||
|
|
||||||
./solanaceae/ngc_hs2/ts_find_start.hpp
|
|
||||||
|
|
||||||
./solanaceae/ngc_hs2/ngc_hs2_sigma.hpp
|
|
||||||
./solanaceae/ngc_hs2/ngc_hs2_sigma.cpp
|
|
||||||
|
|
||||||
./solanaceae/ngc_hs2/ngc_hs2_rizzler.hpp
|
|
||||||
./solanaceae/ngc_hs2/ngc_hs2_rizzler.cpp
|
|
||||||
)
|
|
||||||
target_include_directories(solanaceae_ngchs2 PUBLIC .)
|
|
||||||
target_compile_features(solanaceae_ngchs2 PUBLIC cxx_std_17)
|
|
||||||
target_link_libraries(solanaceae_ngchs2 PUBLIC
|
|
||||||
solanaceae_ngcft1
|
|
||||||
solanaceae_sha1_ngcft1 # HACK: properly abstract filekind/-id
|
|
||||||
solanaceae_tox_contacts
|
|
||||||
solanaceae_message3
|
|
||||||
solanaceae_object_store
|
|
||||||
nlohmann_json::nlohmann_json
|
|
||||||
)
|
|
||||||
|
|
||||||
option(SOLANACEAE_NGCHS2_BUILD_TESTING "Build the solanaceae_ngchs2 tests" OFF)
|
|
||||||
message("II SOLANACEAE_NGCHS2_BUILD_TESTING " ${SOLANACEAE_NGCHS2_BUILD_TESTING})
|
|
||||||
|
|
||||||
if (SOLANACEAE_NGCHS2_BUILD_TESTING)
|
|
||||||
include(CTest)
|
|
||||||
|
|
||||||
add_executable(test_hs2_ts_binarysearch
|
|
||||||
./solanaceae/ngc_hs2/test_ts_binarysearch.cpp
|
|
||||||
)
|
|
||||||
|
|
||||||
target_link_libraries(test_hs2_ts_binarysearch PUBLIC
|
|
||||||
solanaceae_ngchs2
|
|
||||||
)
|
|
||||||
|
|
||||||
add_test(NAME test_hs2_ts_binarysearch COMMAND test_hs2_ts_binarysearch)
|
|
||||||
|
|
||||||
endif()
|
|
||||||
|
|
||||||
########################################
|
|
||||||
|
|
||||||
if (SOLANACEAE_NGCFT1_BUILD_PLUGINS)
|
|
||||||
add_subdirectory(./plugins)
|
|
||||||
endif()
|
|
||||||
|
|
||||||
|
10
external/CMakeLists.txt
vendored
10
external/CMakeLists.txt
vendored
@ -2,13 +2,3 @@ cmake_minimum_required(VERSION 3.9 FATAL_ERROR)
|
|||||||
|
|
||||||
add_subdirectory(./sha1)
|
add_subdirectory(./sha1)
|
||||||
|
|
||||||
# we are running a custom msgpack serialization for hs2
|
|
||||||
if (NOT TARGET nlohmann_json::nlohmann_json)
|
|
||||||
FetchContent_Declare(json
|
|
||||||
URL https://github.com/nlohmann/json/releases/download/v3.11.3/json.tar.xz
|
|
||||||
URL_HASH SHA256=d6c65aca6b1ed68e7a182f4757257b107ae403032760ed6ef121c9d55e81757d
|
|
||||||
EXCLUDE_FROM_ALL
|
|
||||||
)
|
|
||||||
FetchContent_MakeAvailable(json)
|
|
||||||
endif()
|
|
||||||
|
|
||||||
|
@ -1,33 +0,0 @@
|
|||||||
cmake_minimum_required(VERSION 3.9...3.24 FATAL_ERROR)
|
|
||||||
|
|
||||||
########################################
|
|
||||||
|
|
||||||
add_library(plugin_ngcft1 MODULE
|
|
||||||
./plugin_ngcft1.cpp
|
|
||||||
)
|
|
||||||
target_compile_features(plugin_ngcft1 PUBLIC cxx_std_17)
|
|
||||||
set_target_properties(plugin_ngcft1 PROPERTIES
|
|
||||||
C_VISIBILITY_PRESET hidden
|
|
||||||
)
|
|
||||||
target_compile_definitions(plugin_ngcft1 PUBLIC ENTT_API_IMPORT)
|
|
||||||
target_link_libraries(plugin_ngcft1 PUBLIC
|
|
||||||
solanaceae_plugin
|
|
||||||
solanaceae_ngcext
|
|
||||||
solanaceae_ngcft1
|
|
||||||
solanaceae_sha1_ngcft1
|
|
||||||
)
|
|
||||||
|
|
||||||
########################################
|
|
||||||
|
|
||||||
add_library(plugin_ngchs2 MODULE
|
|
||||||
./plugin_ngchs2.cpp
|
|
||||||
)
|
|
||||||
target_compile_features(plugin_ngchs2 PUBLIC cxx_std_17)
|
|
||||||
set_target_properties(plugin_ngchs2 PROPERTIES
|
|
||||||
C_VISIBILITY_PRESET hidden
|
|
||||||
)
|
|
||||||
target_compile_definitions(plugin_ngchs2 PUBLIC ENTT_API_IMPORT)
|
|
||||||
target_link_libraries(plugin_ngchs2 PUBLIC
|
|
||||||
solanaceae_plugin
|
|
||||||
solanaceae_ngchs2
|
|
||||||
)
|
|
@ -1,81 +0,0 @@
|
|||||||
#include <solanaceae/plugin/solana_plugin_v1.h>
|
|
||||||
|
|
||||||
#include <solanaceae/ngc_ext/ngcext.hpp>
|
|
||||||
#include <solanaceae/ngc_ft1/ngcft1.hpp>
|
|
||||||
#include <solanaceae/ngc_ft1_sha1/sha1_ngcft1.hpp>
|
|
||||||
|
|
||||||
#include <entt/entt.hpp>
|
|
||||||
#include <entt/fwd.hpp>
|
|
||||||
|
|
||||||
#include <memory>
|
|
||||||
#include <iostream>
|
|
||||||
|
|
||||||
static std::unique_ptr<NGCEXTEventProvider> g_ngcextep = nullptr;
|
|
||||||
// TODO: make sep plug
|
|
||||||
static std::unique_ptr<NGCFT1> g_ngcft1 = nullptr;
|
|
||||||
static std::unique_ptr<SHA1_NGCFT1> g_sha1_ngcft1 = nullptr;
|
|
||||||
|
|
||||||
constexpr const char* plugin_name = "NGCEXT";
|
|
||||||
|
|
||||||
extern "C" {
|
|
||||||
|
|
||||||
SOLANA_PLUGIN_EXPORT const char* solana_plugin_get_name(void) {
|
|
||||||
return plugin_name;
|
|
||||||
}
|
|
||||||
|
|
||||||
SOLANA_PLUGIN_EXPORT uint32_t solana_plugin_get_version(void) {
|
|
||||||
return SOLANA_PLUGIN_VERSION;
|
|
||||||
}
|
|
||||||
|
|
||||||
SOLANA_PLUGIN_EXPORT uint32_t solana_plugin_start(struct SolanaAPI* solana_api) {
|
|
||||||
std::cout << "PLUGIN " << plugin_name << " START()\n";
|
|
||||||
|
|
||||||
if (solana_api == nullptr) {
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
try {
|
|
||||||
auto* os = PLUG_RESOLVE_INSTANCE(ObjectStore2);
|
|
||||||
auto* tox_i = PLUG_RESOLVE_INSTANCE(ToxI);
|
|
||||||
auto* tox_event_provider_i = PLUG_RESOLVE_INSTANCE(ToxEventProviderI);
|
|
||||||
auto* cr = PLUG_RESOLVE_INSTANCE_VERSIONED(Contact3Registry, "1");
|
|
||||||
auto* rmm = PLUG_RESOLVE_INSTANCE(RegistryMessageModelI);
|
|
||||||
auto* tcm = PLUG_RESOLVE_INSTANCE(ToxContactModel2);
|
|
||||||
|
|
||||||
// static store, could be anywhere tho
|
|
||||||
// construct with fetched dependencies
|
|
||||||
g_ngcextep = std::make_unique<NGCEXTEventProvider>(*tox_i, *tox_event_provider_i);
|
|
||||||
g_ngcft1 = std::make_unique<NGCFT1>(*tox_i, *tox_event_provider_i, *g_ngcextep.get());
|
|
||||||
g_sha1_ngcft1 = std::make_unique<SHA1_NGCFT1>(*os, *cr, *rmm, *g_ngcft1.get(), *tcm, *tox_event_provider_i, *g_ngcextep.get());
|
|
||||||
|
|
||||||
// register types
|
|
||||||
PLUG_PROVIDE_INSTANCE(NGCEXTEventProviderI, plugin_name, g_ngcextep.get());
|
|
||||||
|
|
||||||
PLUG_PROVIDE_INSTANCE(NGCFT1EventProviderI, plugin_name, g_ngcft1.get());
|
|
||||||
PLUG_PROVIDE_INSTANCE(NGCFT1, plugin_name, g_ngcft1.get());
|
|
||||||
|
|
||||||
PLUG_PROVIDE_INSTANCE(SHA1_NGCFT1, plugin_name, g_sha1_ngcft1.get());
|
|
||||||
} catch (const ResolveException& e) {
|
|
||||||
std::cerr << "PLUGIN " << plugin_name << " " << e.what << "\n";
|
|
||||||
return 2;
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
SOLANA_PLUGIN_EXPORT void solana_plugin_stop(void) {
|
|
||||||
std::cout << "PLUGIN " << plugin_name << " STOP()\n";
|
|
||||||
|
|
||||||
g_sha1_ngcft1.reset();
|
|
||||||
g_ngcft1.reset();
|
|
||||||
g_ngcextep.reset();
|
|
||||||
}
|
|
||||||
|
|
||||||
SOLANA_PLUGIN_EXPORT float solana_plugin_tick(float delta) {
|
|
||||||
const float ft_interval = g_ngcft1->iterate(delta);
|
|
||||||
const float sha_interval = g_sha1_ngcft1->iterate(delta);
|
|
||||||
return std::min<float>(ft_interval, sha_interval);
|
|
||||||
}
|
|
||||||
|
|
||||||
} // extern C
|
|
||||||
|
|
@ -1,79 +0,0 @@
|
|||||||
#include <solanaceae/plugin/solana_plugin_v1.h>
|
|
||||||
|
|
||||||
#include <solanaceae/tox_contacts/tox_contact_model2.hpp>
|
|
||||||
|
|
||||||
#include <solanaceae/ngc_ft1/ngcft1.hpp>
|
|
||||||
#include <solanaceae/ngc_ft1_sha1/sha1_ngcft1.hpp> // this hurts
|
|
||||||
#include <solanaceae/ngc_hs2/ngc_hs2_sigma.hpp>
|
|
||||||
#include <solanaceae/ngc_hs2/ngc_hs2_rizzler.hpp>
|
|
||||||
|
|
||||||
#include <entt/entt.hpp>
|
|
||||||
#include <entt/fwd.hpp>
|
|
||||||
|
|
||||||
#include <memory>
|
|
||||||
#include <iostream>
|
|
||||||
|
|
||||||
// https://youtu.be/OwT83dN82pc
|
|
||||||
|
|
||||||
static std::unique_ptr<NGCHS2Sigma> g_ngchs2s = nullptr;
|
|
||||||
static std::unique_ptr<NGCHS2Rizzler> g_ngchs2r = nullptr;
|
|
||||||
|
|
||||||
constexpr const char* plugin_name = "NGCHS2";
|
|
||||||
|
|
||||||
extern "C" {
|
|
||||||
|
|
||||||
SOLANA_PLUGIN_EXPORT const char* solana_plugin_get_name(void) {
|
|
||||||
return plugin_name;
|
|
||||||
}
|
|
||||||
|
|
||||||
SOLANA_PLUGIN_EXPORT uint32_t solana_plugin_get_version(void) {
|
|
||||||
return SOLANA_PLUGIN_VERSION;
|
|
||||||
}
|
|
||||||
|
|
||||||
SOLANA_PLUGIN_EXPORT uint32_t solana_plugin_start(struct SolanaAPI* solana_api) {
|
|
||||||
std::cout << "PLUGIN " << plugin_name << " START()\n";
|
|
||||||
|
|
||||||
if (solana_api == nullptr) {
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
try {
|
|
||||||
//auto* tox_i = PLUG_RESOLVE_INSTANCE(ToxI);
|
|
||||||
auto* tox_event_provider_i = PLUG_RESOLVE_INSTANCE(ToxEventProviderI);
|
|
||||||
auto* cr = PLUG_RESOLVE_INSTANCE_VERSIONED(Contact3Registry, "1");
|
|
||||||
auto* rmm = PLUG_RESOLVE_INSTANCE(RegistryMessageModelI);
|
|
||||||
auto* tcm = PLUG_RESOLVE_INSTANCE(ToxContactModel2);
|
|
||||||
auto* ngcft1 = PLUG_RESOLVE_INSTANCE(NGCFT1);
|
|
||||||
auto* sha1_ngcft1 = PLUG_RESOLVE_INSTANCE(SHA1_NGCFT1);
|
|
||||||
|
|
||||||
// static store, could be anywhere tho
|
|
||||||
// construct with fetched dependencies
|
|
||||||
g_ngchs2s = std::make_unique<NGCHS2Sigma>(*cr, *rmm, *tcm, *ngcft1);
|
|
||||||
g_ngchs2r = std::make_unique<NGCHS2Rizzler>(*cr, *rmm, *tcm, *ngcft1, *tox_event_provider_i, *sha1_ngcft1);
|
|
||||||
|
|
||||||
// register types
|
|
||||||
PLUG_PROVIDE_INSTANCE(NGCHS2Sigma, plugin_name, g_ngchs2s.get());
|
|
||||||
PLUG_PROVIDE_INSTANCE(NGCHS2Rizzler, plugin_name, g_ngchs2r.get());
|
|
||||||
} catch (const ResolveException& e) {
|
|
||||||
std::cerr << "PLUGIN " << plugin_name << " " << e.what << "\n";
|
|
||||||
return 2;
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
SOLANA_PLUGIN_EXPORT void solana_plugin_stop(void) {
|
|
||||||
std::cout << "PLUGIN " << plugin_name << " STOP()\n";
|
|
||||||
|
|
||||||
g_ngchs2r.reset();
|
|
||||||
g_ngchs2s.reset();
|
|
||||||
}
|
|
||||||
|
|
||||||
SOLANA_PLUGIN_EXPORT float solana_plugin_tick(float delta) {
|
|
||||||
const float sigma_interval = g_ngchs2s->iterate(delta);
|
|
||||||
const float rizzler_interval = g_ngchs2r->iterate(delta);
|
|
||||||
return std::min<float>(sigma_interval, rizzler_interval);
|
|
||||||
}
|
|
||||||
|
|
||||||
} // extern C
|
|
||||||
|
|
@ -1,13 +1,10 @@
|
|||||||
#include "./ngcext.hpp"
|
#include "./ngcext.hpp"
|
||||||
|
|
||||||
#include <iostream>
|
#include <iostream>
|
||||||
#include <cassert>
|
|
||||||
|
|
||||||
NGCEXTEventProvider::NGCEXTEventProvider(ToxI& t, ToxEventProviderI& tep) : _t(t), _tep(tep), _tep_sr(_tep.newSubRef(this)) {
|
NGCEXTEventProvider::NGCEXTEventProvider(ToxEventProviderI& tep) : _tep(tep) {
|
||||||
_tep_sr
|
_tep.subscribe(this, Tox_Event::TOX_EVENT_GROUP_CUSTOM_PACKET);
|
||||||
.subscribe(Tox_Event_Type::TOX_EVENT_GROUP_CUSTOM_PACKET)
|
_tep.subscribe(this, Tox_Event::TOX_EVENT_GROUP_CUSTOM_PRIVATE_PACKET);
|
||||||
.subscribe(Tox_Event_Type::TOX_EVENT_GROUP_CUSTOM_PRIVATE_PACKET)
|
|
||||||
;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#define _DATA_HAVE(x, error) if ((data_size - curser) < (x)) { error; }
|
#define _DATA_HAVE(x, error) if ((data_size - curser) < (x)) { error; }
|
||||||
@ -80,7 +77,7 @@ bool NGCEXTEventProvider::parse_ft1_init(
|
|||||||
e.file_size = 0u;
|
e.file_size = 0u;
|
||||||
_DATA_HAVE(sizeof(e.file_size), std::cerr << "NGCEXT: packet too small, missing file_size\n"; return false)
|
_DATA_HAVE(sizeof(e.file_size), std::cerr << "NGCEXT: packet too small, missing file_size\n"; return false)
|
||||||
for (size_t i = 0; i < sizeof(e.file_size); i++, curser++) {
|
for (size_t i = 0; i < sizeof(e.file_size); i++, curser++) {
|
||||||
e.file_size |= uint64_t(data[curser]) << (i*8);
|
e.file_size |= size_t(data[curser]) << (i*8);
|
||||||
}
|
}
|
||||||
|
|
||||||
// - 1 byte (temporary_file_tf_id)
|
// - 1 byte (temporary_file_tf_id)
|
||||||
@ -115,85 +112,6 @@ bool NGCEXTEventProvider::parse_ft1_init_ack(
|
|||||||
_DATA_HAVE(sizeof(e.transfer_id), std::cerr << "NGCEXT: packet too small, missing transfer_id\n"; return false)
|
_DATA_HAVE(sizeof(e.transfer_id), std::cerr << "NGCEXT: packet too small, missing transfer_id\n"; return false)
|
||||||
e.transfer_id = data[curser++];
|
e.transfer_id = data[curser++];
|
||||||
|
|
||||||
e.max_lossy_data_size = 500-4; // -4 and 500 are hardcoded
|
|
||||||
|
|
||||||
return dispatch(
|
|
||||||
NGCEXT_Event::FT1_INIT_ACK,
|
|
||||||
e
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
bool NGCEXTEventProvider::parse_ft1_init_ack_v2(
|
|
||||||
uint32_t group_number, uint32_t peer_number,
|
|
||||||
const uint8_t* data, size_t data_size,
|
|
||||||
bool _private
|
|
||||||
) {
|
|
||||||
if (!_private) {
|
|
||||||
std::cerr << "NGCEXT: ft1_init_ack_v2 cant be public\n";
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
Events::NGCEXT_ft1_init_ack e;
|
|
||||||
e.group_number = group_number;
|
|
||||||
e.peer_number = peer_number;
|
|
||||||
size_t curser = 0;
|
|
||||||
|
|
||||||
// - 1 byte (temporary_file_tf_id)
|
|
||||||
_DATA_HAVE(sizeof(e.transfer_id), std::cerr << "NGCEXT: packet too small, missing transfer_id\n"; return false)
|
|
||||||
e.transfer_id = data[curser++];
|
|
||||||
|
|
||||||
// - 2 byte (max_lossy_data_size)
|
|
||||||
if ((data_size - curser) >= sizeof(e.max_lossy_data_size)) {
|
|
||||||
e.max_lossy_data_size = 0;
|
|
||||||
for (size_t i = 0; i < sizeof(e.max_lossy_data_size); i++, curser++) {
|
|
||||||
e.max_lossy_data_size |= uint16_t(data[curser]) << (i*8);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
e.max_lossy_data_size = 500-4; // default
|
|
||||||
}
|
|
||||||
|
|
||||||
return dispatch(
|
|
||||||
NGCEXT_Event::FT1_INIT_ACK,
|
|
||||||
e
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
bool NGCEXTEventProvider::parse_ft1_init_ack_v3(
|
|
||||||
uint32_t group_number, uint32_t peer_number,
|
|
||||||
const uint8_t* data, size_t data_size,
|
|
||||||
bool _private
|
|
||||||
) {
|
|
||||||
if (!_private) {
|
|
||||||
std::cerr << "NGCEXT: ft1_init_ack_v3 cant be public\n";
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
Events::NGCEXT_ft1_init_ack e;
|
|
||||||
e.group_number = group_number;
|
|
||||||
e.peer_number = peer_number;
|
|
||||||
size_t curser = 0;
|
|
||||||
|
|
||||||
// - 1 byte (temporary_file_tf_id)
|
|
||||||
_DATA_HAVE(sizeof(e.transfer_id), std::cerr << "NGCEXT: packet too small, missing transfer_id\n"; return false)
|
|
||||||
e.transfer_id = data[curser++];
|
|
||||||
|
|
||||||
// - 2 byte (max_lossy_data_size)
|
|
||||||
if ((data_size - curser) >= sizeof(e.max_lossy_data_size)) {
|
|
||||||
e.max_lossy_data_size = 0;
|
|
||||||
for (size_t i = 0; i < sizeof(e.max_lossy_data_size); i++, curser++) {
|
|
||||||
e.max_lossy_data_size |= uint16_t(data[curser]) << (i*8);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
e.max_lossy_data_size = 500-4; // default
|
|
||||||
}
|
|
||||||
|
|
||||||
// - 1 byte (feature_flags)
|
|
||||||
if ((data_size - curser) >= sizeof(e.feature_flags)) {
|
|
||||||
e.feature_flags = data[curser++];
|
|
||||||
} else {
|
|
||||||
e.feature_flags = 0x00; // default
|
|
||||||
}
|
|
||||||
|
|
||||||
return dispatch(
|
return dispatch(
|
||||||
NGCEXT_Event::FT1_INIT_ACK,
|
NGCEXT_Event::FT1_INIT_ACK,
|
||||||
e
|
e
|
||||||
@ -255,7 +173,6 @@ bool NGCEXTEventProvider::parse_ft1_data_ack(
|
|||||||
_DATA_HAVE(sizeof(e.transfer_id), std::cerr << "NGCEXT: packet too small, missing transfer_id\n"; return false)
|
_DATA_HAVE(sizeof(e.transfer_id), std::cerr << "NGCEXT: packet too small, missing transfer_id\n"; return false)
|
||||||
e.transfer_id = data[curser++];
|
e.transfer_id = data[curser++];
|
||||||
|
|
||||||
e.sequence_ids.reserve(std::max<int64_t>(data_size-curser, 1)/sizeof(uint16_t));
|
|
||||||
while (curser < data_size) {
|
while (curser < data_size) {
|
||||||
_DATA_HAVE(sizeof(uint16_t), std::cerr << "NGCEXT: packet too small, missing seq_id\n"; return false)
|
_DATA_HAVE(sizeof(uint16_t), std::cerr << "NGCEXT: packet too small, missing seq_id\n"; return false)
|
||||||
uint16_t seq_id = data[curser++];
|
uint16_t seq_id = data[curser++];
|
||||||
@ -307,209 +224,6 @@ bool NGCEXTEventProvider::parse_ft1_message(
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool NGCEXTEventProvider::parse_ft1_have(
|
|
||||||
uint32_t group_number, uint32_t peer_number,
|
|
||||||
const uint8_t* data, size_t data_size,
|
|
||||||
bool _private
|
|
||||||
) {
|
|
||||||
if (!_private) {
|
|
||||||
std::cerr << "NGCEXT: ft1_have cant be public\n";
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
Events::NGCEXT_ft1_have e;
|
|
||||||
e.group_number = group_number;
|
|
||||||
e.peer_number = peer_number;
|
|
||||||
size_t curser = 0;
|
|
||||||
|
|
||||||
// - 4 byte (file_kind)
|
|
||||||
e.file_kind = 0u;
|
|
||||||
_DATA_HAVE(sizeof(e.file_kind), std::cerr << "NGCEXT: packet too small, missing file_kind\n"; return false)
|
|
||||||
for (size_t i = 0; i < sizeof(e.file_kind); i++, curser++) {
|
|
||||||
e.file_kind |= uint32_t(data[curser]) << (i*8);
|
|
||||||
}
|
|
||||||
|
|
||||||
// - X bytes (file_kind dependent id, differnt sizes)
|
|
||||||
uint16_t file_id_size = 0u;
|
|
||||||
_DATA_HAVE(sizeof(file_id_size), std::cerr << "NGCEXT: packet too small, missing file_id_size\n"; return false)
|
|
||||||
for (size_t i = 0; i < sizeof(file_id_size); i++, curser++) {
|
|
||||||
file_id_size |= uint32_t(data[curser]) << (i*8);
|
|
||||||
}
|
|
||||||
|
|
||||||
_DATA_HAVE(file_id_size, std::cerr << "NGCEXT: packet too small, missing file_id, or file_id_size too large(" << data_size-curser << ")\n"; return false)
|
|
||||||
|
|
||||||
e.file_id = {data+curser, data+curser+file_id_size};
|
|
||||||
curser += file_id_size;
|
|
||||||
|
|
||||||
// - array [
|
|
||||||
// - 4 bytes (chunk index)
|
|
||||||
// - ]
|
|
||||||
while (curser < data_size) {
|
|
||||||
_DATA_HAVE(sizeof(uint32_t), std::cerr << "NGCEXT: packet too small, broken chunk index\n"; return false)
|
|
||||||
uint32_t chunk_index = 0u;
|
|
||||||
for (size_t i = 0; i < sizeof(chunk_index); i++, curser++) {
|
|
||||||
chunk_index |= uint32_t(data[curser]) << (i*8);
|
|
||||||
}
|
|
||||||
e.chunks.push_back(chunk_index);
|
|
||||||
}
|
|
||||||
|
|
||||||
return dispatch(
|
|
||||||
NGCEXT_Event::FT1_HAVE,
|
|
||||||
e
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
bool NGCEXTEventProvider::parse_ft1_bitset(
|
|
||||||
uint32_t group_number, uint32_t peer_number,
|
|
||||||
const uint8_t* data, size_t data_size,
|
|
||||||
bool _private
|
|
||||||
) {
|
|
||||||
if (!_private) {
|
|
||||||
std::cerr << "NGCEXT: ft1_bitset cant be public\n";
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
Events::NGCEXT_ft1_bitset e;
|
|
||||||
e.group_number = group_number;
|
|
||||||
e.peer_number = peer_number;
|
|
||||||
size_t curser = 0;
|
|
||||||
|
|
||||||
// - 4 byte (file_kind)
|
|
||||||
e.file_kind = 0u;
|
|
||||||
_DATA_HAVE(sizeof(e.file_kind), std::cerr << "NGCEXT: packet too small, missing file_kind\n"; return false)
|
|
||||||
for (size_t i = 0; i < sizeof(e.file_kind); i++, curser++) {
|
|
||||||
e.file_kind |= uint32_t(data[curser]) << (i*8);
|
|
||||||
}
|
|
||||||
|
|
||||||
// - X bytes (file_kind dependent id, differnt sizes)
|
|
||||||
uint16_t file_id_size = 0u;
|
|
||||||
_DATA_HAVE(sizeof(file_id_size), std::cerr << "NGCEXT: packet too small, missing file_id_size\n"; return false)
|
|
||||||
for (size_t i = 0; i < sizeof(file_id_size); i++, curser++) {
|
|
||||||
file_id_size |= uint32_t(data[curser]) << (i*8);
|
|
||||||
}
|
|
||||||
|
|
||||||
_DATA_HAVE(file_id_size, std::cerr << "NGCEXT: packet too small, missing file_id, or file_id_size too large (" << data_size-curser << ")\n"; return false)
|
|
||||||
|
|
||||||
e.file_id = {data+curser, data+curser+file_id_size};
|
|
||||||
curser += file_id_size;
|
|
||||||
|
|
||||||
e.start_chunk = 0u;
|
|
||||||
_DATA_HAVE(sizeof(e.start_chunk), std::cerr << "NGCEXT: packet too small, missing start_chunk\n"; return false)
|
|
||||||
for (size_t i = 0; i < sizeof(e.start_chunk); i++, curser++) {
|
|
||||||
e.start_chunk |= uint32_t(data[curser]) << (i*8);
|
|
||||||
}
|
|
||||||
|
|
||||||
// - X bytes
|
|
||||||
// - array [
|
|
||||||
// - 1 bit (have chunk)
|
|
||||||
// - ] (filled up with zero)
|
|
||||||
// high to low?
|
|
||||||
// simply rest of file packet
|
|
||||||
e.chunk_bitset = {data+curser, data+curser+(data_size-curser)};
|
|
||||||
|
|
||||||
return dispatch(
|
|
||||||
NGCEXT_Event::FT1_BITSET,
|
|
||||||
e
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
bool NGCEXTEventProvider::parse_ft1_have_all(
|
|
||||||
uint32_t group_number, uint32_t peer_number,
|
|
||||||
const uint8_t* data, size_t data_size,
|
|
||||||
bool _private
|
|
||||||
) {
|
|
||||||
// can be public
|
|
||||||
// TODO: warn on public?
|
|
||||||
|
|
||||||
Events::NGCEXT_ft1_have_all e;
|
|
||||||
e.group_number = group_number;
|
|
||||||
e.peer_number = peer_number;
|
|
||||||
size_t curser = 0;
|
|
||||||
|
|
||||||
// - 4 byte (file_kind)
|
|
||||||
e.file_kind = 0u;
|
|
||||||
_DATA_HAVE(sizeof(e.file_kind), std::cerr << "NGCEXT: packet too small, missing file_kind\n"; return false)
|
|
||||||
for (size_t i = 0; i < sizeof(e.file_kind); i++, curser++) {
|
|
||||||
e.file_kind |= uint32_t(data[curser]) << (i*8);
|
|
||||||
}
|
|
||||||
|
|
||||||
_DATA_HAVE(1, std::cerr << "NGCEXT: packet too small, missing file_id\n"; return false)
|
|
||||||
|
|
||||||
// - X bytes (file_id, differnt sizes)
|
|
||||||
e.file_id = {data+curser, data+curser+(data_size-curser)};
|
|
||||||
|
|
||||||
return dispatch(
|
|
||||||
NGCEXT_Event::FT1_HAVE_ALL,
|
|
||||||
e
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
bool NGCEXTEventProvider::parse_ft1_init2(
|
|
||||||
uint32_t group_number, uint32_t peer_number,
|
|
||||||
const uint8_t* data, size_t data_size,
|
|
||||||
bool _private
|
|
||||||
) {
|
|
||||||
if (!_private) {
|
|
||||||
std::cerr << "NGCEXT: ft1_init2 cant be public\n";
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
Events::NGCEXT_ft1_init2 e;
|
|
||||||
e.group_number = group_number;
|
|
||||||
e.peer_number = peer_number;
|
|
||||||
size_t curser = 0;
|
|
||||||
|
|
||||||
// - 4 byte (file_kind)
|
|
||||||
e.file_kind = 0u;
|
|
||||||
_DATA_HAVE(sizeof(e.file_kind), std::cerr << "NGCEXT: packet too small, missing file_kind\n"; return false)
|
|
||||||
for (size_t i = 0; i < sizeof(e.file_kind); i++, curser++) {
|
|
||||||
e.file_kind |= uint32_t(data[curser]) << (i*8);
|
|
||||||
}
|
|
||||||
|
|
||||||
// - 8 bytes (data size)
|
|
||||||
e.file_size = 0u;
|
|
||||||
_DATA_HAVE(sizeof(e.file_size), std::cerr << "NGCEXT: packet too small, missing file_size\n"; return false)
|
|
||||||
for (size_t i = 0; i < sizeof(e.file_size); i++, curser++) {
|
|
||||||
e.file_size |= uint64_t(data[curser]) << (i*8);
|
|
||||||
}
|
|
||||||
|
|
||||||
// - 1 byte (temporary_file_tf_id)
|
|
||||||
_DATA_HAVE(sizeof(e.transfer_id), std::cerr << "NGCEXT: packet too small, missing transfer_id\n"; return false)
|
|
||||||
e.transfer_id = data[curser++];
|
|
||||||
|
|
||||||
// - 1 byte feature flags
|
|
||||||
_DATA_HAVE(sizeof(e.feature_flags), std::cerr << "NGCEXT: packet too small, missing feature_flags\n"; return false)
|
|
||||||
e.feature_flags = data[curser++];
|
|
||||||
|
|
||||||
// - X bytes (file_kind dependent id, differnt sizes)
|
|
||||||
e.file_id = {data+curser, data+curser+(data_size-curser)};
|
|
||||||
|
|
||||||
return dispatch(
|
|
||||||
NGCEXT_Event::FT1_INIT2,
|
|
||||||
e
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
bool NGCEXTEventProvider::parse_pc1_announce(
|
|
||||||
uint32_t group_number, uint32_t peer_number,
|
|
||||||
const uint8_t* data, size_t data_size,
|
|
||||||
bool _private
|
|
||||||
) {
|
|
||||||
// can be public
|
|
||||||
Events::NGCEXT_pc1_announce e;
|
|
||||||
e.group_number = group_number;
|
|
||||||
e.peer_number = peer_number;
|
|
||||||
size_t curser = 0;
|
|
||||||
|
|
||||||
// - X bytes (id, differnt sizes)
|
|
||||||
e.id = {data+curser, data+curser+(data_size-curser)};
|
|
||||||
|
|
||||||
return dispatch(
|
|
||||||
NGCEXT_Event::PC1_ANNOUNCE,
|
|
||||||
e
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
bool NGCEXTEventProvider::handlePacket(
|
bool NGCEXTEventProvider::handlePacket(
|
||||||
const uint32_t group_number,
|
const uint32_t group_number,
|
||||||
const uint32_t peer_number,
|
const uint32_t peer_number,
|
||||||
@ -533,25 +247,13 @@ bool NGCEXTEventProvider::handlePacket(
|
|||||||
case NGCEXT_Event::FT1_INIT:
|
case NGCEXT_Event::FT1_INIT:
|
||||||
return parse_ft1_init(group_number, peer_number, data+1, data_size-1, _private);
|
return parse_ft1_init(group_number, peer_number, data+1, data_size-1, _private);
|
||||||
case NGCEXT_Event::FT1_INIT_ACK:
|
case NGCEXT_Event::FT1_INIT_ACK:
|
||||||
//return parse_ft1_init_ack(group_number, peer_number, data+1, data_size-1, _private);
|
return parse_ft1_init_ack(group_number, peer_number, data+1, data_size-1, _private);
|
||||||
//return parse_ft1_init_ack_v2(group_number, peer_number, data+1, data_size-1, _private);
|
|
||||||
return parse_ft1_init_ack_v3(group_number, peer_number, data+1, data_size-1, _private);
|
|
||||||
case NGCEXT_Event::FT1_DATA:
|
case NGCEXT_Event::FT1_DATA:
|
||||||
return parse_ft1_data(group_number, peer_number, data+1, data_size-1, _private);
|
return parse_ft1_data(group_number, peer_number, data+1, data_size-1, _private);
|
||||||
case NGCEXT_Event::FT1_DATA_ACK:
|
case NGCEXT_Event::FT1_DATA_ACK:
|
||||||
return parse_ft1_data_ack(group_number, peer_number, data+1, data_size-1, _private);
|
return parse_ft1_data_ack(group_number, peer_number, data+1, data_size-1, _private);
|
||||||
case NGCEXT_Event::FT1_MESSAGE:
|
case NGCEXT_Event::FT1_MESSAGE:
|
||||||
return parse_ft1_message(group_number, peer_number, data+1, data_size-1, _private);
|
return parse_ft1_message(group_number, peer_number, data+1, data_size-1, _private);
|
||||||
case NGCEXT_Event::FT1_HAVE:
|
|
||||||
return parse_ft1_have(group_number, peer_number, data+1, data_size-1, _private);
|
|
||||||
case NGCEXT_Event::FT1_BITSET:
|
|
||||||
return parse_ft1_bitset(group_number, peer_number, data+1, data_size-1, _private);
|
|
||||||
case NGCEXT_Event::FT1_HAVE_ALL:
|
|
||||||
return parse_ft1_have_all(group_number, peer_number, data+1, data_size-1, _private);
|
|
||||||
case NGCEXT_Event::FT1_INIT2:
|
|
||||||
return parse_ft1_init2(group_number, peer_number, data+1, data_size-1, _private);
|
|
||||||
case NGCEXT_Event::PC1_ANNOUNCE:
|
|
||||||
return parse_pc1_announce(group_number, peer_number, data+1, data_size-1, _private);
|
|
||||||
default:
|
default:
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
@ -559,309 +261,6 @@ bool NGCEXTEventProvider::handlePacket(
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool NGCEXTEventProvider::send_ft1_request(
|
|
||||||
uint32_t group_number, uint32_t peer_number,
|
|
||||||
uint32_t file_kind,
|
|
||||||
const uint8_t* file_id, size_t file_id_size
|
|
||||||
) {
|
|
||||||
// - 1 byte packet id
|
|
||||||
// - 4 byte file_kind
|
|
||||||
// - X bytes file_id
|
|
||||||
std::vector<uint8_t> pkg;
|
|
||||||
pkg.push_back(static_cast<uint8_t>(NGCEXT_Event::FT1_REQUEST));
|
|
||||||
for (size_t i = 0; i < sizeof(file_kind); i++) {
|
|
||||||
pkg.push_back((file_kind>>(i*8)) & 0xff);
|
|
||||||
}
|
|
||||||
for (size_t i = 0; i < file_id_size; i++) {
|
|
||||||
pkg.push_back(file_id[i]);
|
|
||||||
}
|
|
||||||
|
|
||||||
// lossless
|
|
||||||
return _t.toxGroupSendCustomPrivatePacket(group_number, peer_number, true, pkg) == TOX_ERR_GROUP_SEND_CUSTOM_PRIVATE_PACKET_OK;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool NGCEXTEventProvider::send_ft1_init(
|
|
||||||
uint32_t group_number, uint32_t peer_number,
|
|
||||||
uint32_t file_kind,
|
|
||||||
uint64_t file_size,
|
|
||||||
uint8_t transfer_id,
|
|
||||||
const uint8_t* file_id, size_t file_id_size
|
|
||||||
) {
|
|
||||||
// - 1 byte packet id
|
|
||||||
// - 4 byte (file_kind)
|
|
||||||
// - 8 bytes (data size)
|
|
||||||
// - 1 byte (temporary_file_tf_id, for this peer only, technically just a prefix to distinguish between simultainious fts)
|
|
||||||
// - X bytes (file_kind dependent id, differnt sizes)
|
|
||||||
|
|
||||||
std::vector<uint8_t> pkg;
|
|
||||||
pkg.push_back(static_cast<uint8_t>(NGCEXT_Event::FT1_INIT));
|
|
||||||
for (size_t i = 0; i < sizeof(file_kind); i++) {
|
|
||||||
pkg.push_back((file_kind>>(i*8)) & 0xff);
|
|
||||||
}
|
|
||||||
for (size_t i = 0; i < sizeof(file_size); i++) {
|
|
||||||
pkg.push_back((file_size>>(i*8)) & 0xff);
|
|
||||||
}
|
|
||||||
pkg.push_back(transfer_id);
|
|
||||||
for (size_t i = 0; i < file_id_size; i++) {
|
|
||||||
pkg.push_back(file_id[i]);
|
|
||||||
}
|
|
||||||
|
|
||||||
// lossless
|
|
||||||
return _t.toxGroupSendCustomPrivatePacket(group_number, peer_number, true, pkg) == TOX_ERR_GROUP_SEND_CUSTOM_PRIVATE_PACKET_OK;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool NGCEXTEventProvider::send_ft1_init_ack(
|
|
||||||
uint32_t group_number, uint32_t peer_number,
|
|
||||||
uint8_t transfer_id
|
|
||||||
) {
|
|
||||||
// - 1 byte packet id
|
|
||||||
// - 1 byte transfer_id
|
|
||||||
std::vector<uint8_t> pkg;
|
|
||||||
pkg.push_back(static_cast<uint8_t>(NGCEXT_Event::FT1_INIT_ACK));
|
|
||||||
pkg.push_back(transfer_id);
|
|
||||||
|
|
||||||
// - 2 bytes max_lossy_data_size
|
|
||||||
const uint16_t max_lossy_data_size = _t.toxGroupMaxCustomLossyPacketLength() - 4;
|
|
||||||
for (size_t i = 0; i < sizeof(uint16_t); i++) {
|
|
||||||
pkg.push_back((max_lossy_data_size>>(i*8)) & 0xff);
|
|
||||||
}
|
|
||||||
|
|
||||||
// lossless
|
|
||||||
return _t.toxGroupSendCustomPrivatePacket(group_number, peer_number, true, pkg) == TOX_ERR_GROUP_SEND_CUSTOM_PRIVATE_PACKET_OK;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool NGCEXTEventProvider::send_ft1_data(
|
|
||||||
uint32_t group_number, uint32_t peer_number,
|
|
||||||
uint8_t transfer_id,
|
|
||||||
uint16_t sequence_id,
|
|
||||||
const uint8_t* data, size_t data_size
|
|
||||||
) {
|
|
||||||
assert(data_size > 0);
|
|
||||||
|
|
||||||
// TODO
|
|
||||||
// check header_size+data_size <= max pkg size
|
|
||||||
|
|
||||||
std::vector<uint8_t> pkg;
|
|
||||||
pkg.reserve(2048); // saves a ton of allocations
|
|
||||||
pkg.push_back(static_cast<uint8_t>(NGCEXT_Event::FT1_DATA));
|
|
||||||
pkg.push_back(transfer_id);
|
|
||||||
pkg.push_back(sequence_id & 0xff);
|
|
||||||
pkg.push_back((sequence_id >> (1*8)) & 0xff);
|
|
||||||
|
|
||||||
// TODO: optimize
|
|
||||||
for (size_t i = 0; i < data_size; i++) {
|
|
||||||
pkg.push_back(data[i]);
|
|
||||||
}
|
|
||||||
|
|
||||||
// lossy
|
|
||||||
return _t.toxGroupSendCustomPrivatePacket(group_number, peer_number, false, pkg) == TOX_ERR_GROUP_SEND_CUSTOM_PRIVATE_PACKET_OK;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool NGCEXTEventProvider::send_ft1_data_ack(
|
|
||||||
uint32_t group_number, uint32_t peer_number,
|
|
||||||
uint8_t transfer_id,
|
|
||||||
const uint16_t* seq_ids, size_t seq_ids_size
|
|
||||||
) {
|
|
||||||
std::vector<uint8_t> pkg;
|
|
||||||
pkg.reserve(1+1+2*32); // 32acks in a single pkg should be unlikely
|
|
||||||
pkg.push_back(static_cast<uint8_t>(NGCEXT_Event::FT1_DATA_ACK));
|
|
||||||
pkg.push_back(transfer_id);
|
|
||||||
|
|
||||||
// TODO: optimize
|
|
||||||
for (size_t i = 0; i < seq_ids_size; i++) {
|
|
||||||
pkg.push_back(seq_ids[i] & 0xff);
|
|
||||||
pkg.push_back((seq_ids[i] >> (1*8)) & 0xff);
|
|
||||||
}
|
|
||||||
|
|
||||||
// lossy
|
|
||||||
return _t.toxGroupSendCustomPrivatePacket(group_number, peer_number, false, pkg) == TOX_ERR_GROUP_SEND_CUSTOM_PRIVATE_PACKET_OK;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool NGCEXTEventProvider::send_all_ft1_message(
|
|
||||||
uint32_t group_number,
|
|
||||||
uint32_t message_id,
|
|
||||||
uint32_t file_kind,
|
|
||||||
const uint8_t* file_id, size_t file_id_size
|
|
||||||
) {
|
|
||||||
std::vector<uint8_t> pkg;
|
|
||||||
pkg.push_back(static_cast<uint8_t>(NGCEXT_Event::FT1_MESSAGE));
|
|
||||||
|
|
||||||
for (size_t i = 0; i < sizeof(message_id); i++) {
|
|
||||||
pkg.push_back((message_id>>(i*8)) & 0xff);
|
|
||||||
}
|
|
||||||
for (size_t i = 0; i < sizeof(file_kind); i++) {
|
|
||||||
pkg.push_back((file_kind>>(i*8)) & 0xff);
|
|
||||||
}
|
|
||||||
for (size_t i = 0; i < file_id_size; i++) {
|
|
||||||
pkg.push_back(file_id[i]);
|
|
||||||
}
|
|
||||||
|
|
||||||
// lossless
|
|
||||||
return _t.toxGroupSendCustomPacket(group_number, true, pkg) == TOX_ERR_GROUP_SEND_CUSTOM_PACKET_OK;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool NGCEXTEventProvider::send_ft1_have(
|
|
||||||
uint32_t group_number, uint32_t peer_number,
|
|
||||||
uint32_t file_kind,
|
|
||||||
const uint8_t* file_id, size_t file_id_size,
|
|
||||||
const uint32_t* chunks_data, size_t chunks_size
|
|
||||||
) {
|
|
||||||
// 16bit file id size
|
|
||||||
assert(file_id_size <= 0xffff);
|
|
||||||
if (file_id_size > 0xffff) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
std::vector<uint8_t> pkg;
|
|
||||||
pkg.push_back(static_cast<uint8_t>(NGCEXT_Event::FT1_HAVE));
|
|
||||||
|
|
||||||
for (size_t i = 0; i < sizeof(file_kind); i++) {
|
|
||||||
pkg.push_back((file_kind>>(i*8)) & 0xff);
|
|
||||||
}
|
|
||||||
|
|
||||||
// file id not last in packet, needs explicit size
|
|
||||||
const uint16_t file_id_size_cast = file_id_size;
|
|
||||||
for (size_t i = 0; i < sizeof(file_id_size_cast); i++) {
|
|
||||||
pkg.push_back((file_id_size_cast>>(i*8)) & 0xff);
|
|
||||||
}
|
|
||||||
for (size_t i = 0; i < file_id_size; i++) {
|
|
||||||
pkg.push_back(file_id[i]);
|
|
||||||
}
|
|
||||||
|
|
||||||
// rest is chunks
|
|
||||||
for (size_t c_i = 0; c_i < chunks_size; c_i++) {
|
|
||||||
for (size_t i = 0; i < sizeof(chunks_data[c_i]); i++) {
|
|
||||||
pkg.push_back((chunks_data[c_i]>>(i*8)) & 0xff);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// lossless
|
|
||||||
return _t.toxGroupSendCustomPrivatePacket(group_number, peer_number, true, pkg) == TOX_ERR_GROUP_SEND_CUSTOM_PRIVATE_PACKET_OK;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool NGCEXTEventProvider::send_ft1_bitset(
|
|
||||||
uint32_t group_number, uint32_t peer_number,
|
|
||||||
uint32_t file_kind,
|
|
||||||
const uint8_t* file_id, size_t file_id_size,
|
|
||||||
uint32_t start_chunk,
|
|
||||||
const uint8_t* bitset_data, size_t bitset_size // size is bytes
|
|
||||||
) {
|
|
||||||
std::vector<uint8_t> pkg;
|
|
||||||
pkg.push_back(static_cast<uint8_t>(NGCEXT_Event::FT1_BITSET));
|
|
||||||
|
|
||||||
for (size_t i = 0; i < sizeof(file_kind); i++) {
|
|
||||||
pkg.push_back((file_kind>>(i*8)) & 0xff);
|
|
||||||
}
|
|
||||||
|
|
||||||
// file id not last in packet, needs explicit size
|
|
||||||
const uint16_t file_id_size_cast = file_id_size;
|
|
||||||
for (size_t i = 0; i < sizeof(file_id_size_cast); i++) {
|
|
||||||
pkg.push_back((file_id_size_cast>>(i*8)) & 0xff);
|
|
||||||
}
|
|
||||||
for (size_t i = 0; i < file_id_size; i++) {
|
|
||||||
pkg.push_back(file_id[i]);
|
|
||||||
}
|
|
||||||
|
|
||||||
for (size_t i = 0; i < sizeof(start_chunk); i++) {
|
|
||||||
pkg.push_back((start_chunk>>(i*8)) & 0xff);
|
|
||||||
}
|
|
||||||
|
|
||||||
for (size_t i = 0; i < bitset_size; i++) {
|
|
||||||
pkg.push_back(bitset_data[i]);
|
|
||||||
}
|
|
||||||
|
|
||||||
// lossless
|
|
||||||
return _t.toxGroupSendCustomPrivatePacket(group_number, peer_number, true, pkg) == TOX_ERR_GROUP_SEND_CUSTOM_PRIVATE_PACKET_OK;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool NGCEXTEventProvider::send_ft1_have_all(
|
|
||||||
uint32_t group_number, uint32_t peer_number,
|
|
||||||
uint32_t file_kind,
|
|
||||||
const uint8_t* file_id, size_t file_id_size
|
|
||||||
) {
|
|
||||||
std::vector<uint8_t> pkg;
|
|
||||||
pkg.push_back(static_cast<uint8_t>(NGCEXT_Event::FT1_HAVE_ALL));
|
|
||||||
|
|
||||||
for (size_t i = 0; i < sizeof(file_kind); i++) {
|
|
||||||
pkg.push_back((file_kind>>(i*8)) & 0xff);
|
|
||||||
}
|
|
||||||
for (size_t i = 0; i < file_id_size; i++) {
|
|
||||||
pkg.push_back(file_id[i]);
|
|
||||||
}
|
|
||||||
|
|
||||||
// lossless
|
|
||||||
return _t.toxGroupSendCustomPrivatePacket(group_number, peer_number, true, pkg) == TOX_ERR_GROUP_SEND_CUSTOM_PRIVATE_PACKET_OK;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool NGCEXTEventProvider::send_ft1_init2(
|
|
||||||
uint32_t group_number, uint32_t peer_number,
|
|
||||||
uint32_t file_kind,
|
|
||||||
uint64_t file_size,
|
|
||||||
uint8_t transfer_id,
|
|
||||||
uint8_t feature_flags,
|
|
||||||
const uint8_t* file_id, size_t file_id_size
|
|
||||||
) {
|
|
||||||
// - 1 byte packet id
|
|
||||||
// - 4 byte (file_kind)
|
|
||||||
// - 8 bytes (data size)
|
|
||||||
// - 1 byte (temporary_file_tf_id, for this peer only, technically just a prefix to distinguish between simultainious fts)
|
|
||||||
// - 1 byte (feature_flags)
|
|
||||||
// - X bytes (file_kind dependent id, differnt sizes)
|
|
||||||
|
|
||||||
std::vector<uint8_t> pkg;
|
|
||||||
pkg.push_back(static_cast<uint8_t>(NGCEXT_Event::FT1_INIT2));
|
|
||||||
for (size_t i = 0; i < sizeof(file_kind); i++) {
|
|
||||||
pkg.push_back((file_kind>>(i*8)) & 0xff);
|
|
||||||
}
|
|
||||||
for (size_t i = 0; i < sizeof(file_size); i++) {
|
|
||||||
pkg.push_back((file_size>>(i*8)) & 0xff);
|
|
||||||
}
|
|
||||||
pkg.push_back(transfer_id);
|
|
||||||
pkg.push_back(feature_flags);
|
|
||||||
for (size_t i = 0; i < file_id_size; i++) {
|
|
||||||
pkg.push_back(file_id[i]);
|
|
||||||
}
|
|
||||||
|
|
||||||
// lossless
|
|
||||||
return _t.toxGroupSendCustomPrivatePacket(group_number, peer_number, true, pkg) == TOX_ERR_GROUP_SEND_CUSTOM_PRIVATE_PACKET_OK;
|
|
||||||
}
|
|
||||||
|
|
||||||
static std::vector<uint8_t> build_pc1_announce(const uint8_t* id_data, size_t id_size) {
|
|
||||||
// - 1 byte packet id
|
|
||||||
// - X bytes (id, differnt sizes)
|
|
||||||
|
|
||||||
std::vector<uint8_t> pkg;
|
|
||||||
pkg.push_back(static_cast<uint8_t>(NGCEXT_Event::PC1_ANNOUNCE));
|
|
||||||
for (size_t i = 0; i < id_size; i++) {
|
|
||||||
pkg.push_back(id_data[i]);
|
|
||||||
}
|
|
||||||
return pkg;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool NGCEXTEventProvider::send_pc1_announce(
|
|
||||||
uint32_t group_number, uint32_t peer_number,
|
|
||||||
const uint8_t* id_data, size_t id_size
|
|
||||||
) {
|
|
||||||
auto pkg = build_pc1_announce(id_data, id_size);
|
|
||||||
|
|
||||||
std::cout << "NEEP: sending PC1_ANNOUNCE s:" << pkg.size() - sizeof(NGCEXT_Event::PC1_ANNOUNCE) << "\n";
|
|
||||||
|
|
||||||
// lossless?
|
|
||||||
return _t.toxGroupSendCustomPrivatePacket(group_number, peer_number, true, pkg) == TOX_ERR_GROUP_SEND_CUSTOM_PRIVATE_PACKET_OK;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool NGCEXTEventProvider::send_all_pc1_announce(
|
|
||||||
uint32_t group_number,
|
|
||||||
const uint8_t* id_data, size_t id_size
|
|
||||||
) {
|
|
||||||
auto pkg = build_pc1_announce(id_data, id_size);
|
|
||||||
|
|
||||||
std::cout << "NEEP: sending all PC1_ANNOUNCE s:" << pkg.size() - sizeof(NGCEXT_Event::PC1_ANNOUNCE) << "\n";
|
|
||||||
|
|
||||||
// lossless?
|
|
||||||
return _t.toxGroupSendCustomPacket(group_number, true, pkg) == TOX_ERR_GROUP_SEND_CUSTOM_PACKET_OK;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool NGCEXTEventProvider::onToxEvent(const Tox_Event_Group_Custom_Packet* e) {
|
bool NGCEXTEventProvider::onToxEvent(const Tox_Event_Group_Custom_Packet* e) {
|
||||||
const auto group_number = tox_event_group_custom_packet_get_group_number(e);
|
const auto group_number = tox_event_group_custom_packet_get_group_number(e);
|
||||||
const auto peer_number = tox_event_group_custom_packet_get_peer_id(e);
|
const auto peer_number = tox_event_group_custom_packet_get_peer_id(e);
|
||||||
|
@ -3,11 +3,11 @@
|
|||||||
// solanaceae port of tox_ngc_ext
|
// solanaceae port of tox_ngc_ext
|
||||||
|
|
||||||
#include <solanaceae/toxcore/tox_event_interface.hpp>
|
#include <solanaceae/toxcore/tox_event_interface.hpp>
|
||||||
#include <solanaceae/toxcore/tox_interface.hpp>
|
|
||||||
#include <solanaceae/util/event_provider.hpp>
|
#include <solanaceae/util/event_provider.hpp>
|
||||||
|
|
||||||
#include <solanaceae/toxcore/tox_key.hpp>
|
#include <solanaceae/toxcore/tox_key.hpp>
|
||||||
|
|
||||||
|
#include <array>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
|
|
||||||
namespace Events {
|
namespace Events {
|
||||||
@ -30,7 +30,6 @@ namespace Events {
|
|||||||
uint32_t peer_number;
|
uint32_t peer_number;
|
||||||
|
|
||||||
// respond to a request with 0 or more message ids, sorted by newest first
|
// respond to a request with 0 or more message ids, sorted by newest first
|
||||||
|
|
||||||
// - peer_key bytes (the msg_ids are from)
|
// - peer_key bytes (the msg_ids are from)
|
||||||
ToxKey peer_key;
|
ToxKey peer_key;
|
||||||
|
|
||||||
@ -48,7 +47,6 @@ namespace Events {
|
|||||||
uint32_t peer_number;
|
uint32_t peer_number;
|
||||||
|
|
||||||
// request the other side to initiate a FT
|
// request the other side to initiate a FT
|
||||||
|
|
||||||
// - 4 byte (file_kind)
|
// - 4 byte (file_kind)
|
||||||
uint32_t file_kind;
|
uint32_t file_kind;
|
||||||
|
|
||||||
@ -56,13 +54,11 @@ namespace Events {
|
|||||||
std::vector<uint8_t> file_id;
|
std::vector<uint8_t> file_id;
|
||||||
};
|
};
|
||||||
|
|
||||||
// DEPRECATED: use FT1_INIT2 instead
|
|
||||||
struct NGCEXT_ft1_init {
|
struct NGCEXT_ft1_init {
|
||||||
uint32_t group_number;
|
uint32_t group_number;
|
||||||
uint32_t peer_number;
|
uint32_t peer_number;
|
||||||
|
|
||||||
// tell the other side you want to start a FT
|
// tell the other side you want to start a FT
|
||||||
|
|
||||||
// - 4 byte (file_kind)
|
// - 4 byte (file_kind)
|
||||||
uint32_t file_kind;
|
uint32_t file_kind;
|
||||||
|
|
||||||
@ -74,6 +70,8 @@ namespace Events {
|
|||||||
|
|
||||||
// - X bytes (file_kind dependent id, differnt sizes)
|
// - X bytes (file_kind dependent id, differnt sizes)
|
||||||
std::vector<uint8_t> file_id;
|
std::vector<uint8_t> file_id;
|
||||||
|
|
||||||
|
// TODO: max supported lossy packet size
|
||||||
};
|
};
|
||||||
|
|
||||||
struct NGCEXT_ft1_init_ack {
|
struct NGCEXT_ft1_init_ack {
|
||||||
@ -83,13 +81,7 @@ namespace Events {
|
|||||||
// - 1 byte (transfer_id)
|
// - 1 byte (transfer_id)
|
||||||
uint8_t transfer_id;
|
uint8_t transfer_id;
|
||||||
|
|
||||||
// - 2 byte (self_max_lossy_data_size)
|
// TODO: max supported lossy packet size
|
||||||
uint16_t max_lossy_data_size;
|
|
||||||
|
|
||||||
// - 1 byte feature flags
|
|
||||||
// - 0x01 advertised zstd compression
|
|
||||||
// - 0x02
|
|
||||||
uint8_t feature_flags;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
struct NGCEXT_ft1_data {
|
struct NGCEXT_ft1_data {
|
||||||
@ -97,7 +89,6 @@ namespace Events {
|
|||||||
uint32_t peer_number;
|
uint32_t peer_number;
|
||||||
|
|
||||||
// data fragment
|
// data fragment
|
||||||
|
|
||||||
// - 1 byte (temporary_file_tf_id)
|
// - 1 byte (temporary_file_tf_id)
|
||||||
uint8_t transfer_id;
|
uint8_t transfer_id;
|
||||||
|
|
||||||
@ -129,6 +120,7 @@ namespace Events {
|
|||||||
// - 4 byte (message_id)
|
// - 4 byte (message_id)
|
||||||
uint32_t message_id;
|
uint32_t message_id;
|
||||||
|
|
||||||
|
// request the other side to initiate a FT
|
||||||
// - 4 byte (file_kind)
|
// - 4 byte (file_kind)
|
||||||
uint32_t file_kind;
|
uint32_t file_kind;
|
||||||
|
|
||||||
@ -136,84 +128,6 @@ namespace Events {
|
|||||||
std::vector<uint8_t> file_id;
|
std::vector<uint8_t> file_id;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct NGCEXT_ft1_have {
|
|
||||||
uint32_t group_number;
|
|
||||||
uint32_t peer_number;
|
|
||||||
|
|
||||||
// - 4 byte (file_kind)
|
|
||||||
uint32_t file_kind;
|
|
||||||
|
|
||||||
// - X bytes (file_kind dependent id, differnt sizes)
|
|
||||||
std::vector<uint8_t> file_id;
|
|
||||||
|
|
||||||
// - array [
|
|
||||||
// - 4 bytes (chunk index)
|
|
||||||
// - ]
|
|
||||||
std::vector<uint32_t> chunks;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct NGCEXT_ft1_bitset {
|
|
||||||
uint32_t group_number;
|
|
||||||
uint32_t peer_number;
|
|
||||||
|
|
||||||
// - 4 byte (file_kind)
|
|
||||||
uint32_t file_kind;
|
|
||||||
|
|
||||||
// - X bytes (file_kind dependent id, differnt sizes)
|
|
||||||
std::vector<uint8_t> file_id;
|
|
||||||
|
|
||||||
uint32_t start_chunk;
|
|
||||||
|
|
||||||
// - array [
|
|
||||||
// - 1 bit (have chunk)
|
|
||||||
// - ] (filled up with zero)
|
|
||||||
// high to low?
|
|
||||||
std::vector<uint8_t> chunk_bitset;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct NGCEXT_ft1_have_all {
|
|
||||||
uint32_t group_number;
|
|
||||||
uint32_t peer_number;
|
|
||||||
|
|
||||||
// - 4 byte (file_kind)
|
|
||||||
uint32_t file_kind;
|
|
||||||
|
|
||||||
// - X bytes (file_kind dependent id, differnt sizes)
|
|
||||||
std::vector<uint8_t> file_id;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct NGCEXT_ft1_init2 {
|
|
||||||
uint32_t group_number;
|
|
||||||
uint32_t peer_number;
|
|
||||||
|
|
||||||
// tell the other side you want to start a FT
|
|
||||||
|
|
||||||
// - 4 byte (file_kind)
|
|
||||||
uint32_t file_kind;
|
|
||||||
|
|
||||||
// - 8 bytes (data size)
|
|
||||||
uint64_t file_size;
|
|
||||||
|
|
||||||
// - 1 byte (temporary_file_tf_id, for this peer only, technically just a prefix to distinguish between simultainious fts)
|
|
||||||
uint8_t transfer_id;
|
|
||||||
|
|
||||||
// - 1 byte feature flags
|
|
||||||
// - 0x01 advertise zstd compression
|
|
||||||
// - 0x02
|
|
||||||
uint8_t feature_flags;
|
|
||||||
|
|
||||||
// - X bytes (file_kind dependent id, differnt sizes)
|
|
||||||
std::vector<uint8_t> file_id;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct NGCEXT_pc1_announce {
|
|
||||||
uint32_t group_number;
|
|
||||||
uint32_t peer_number;
|
|
||||||
|
|
||||||
// - X bytes (id, differnt sizes)
|
|
||||||
std::vector<uint8_t> id;
|
|
||||||
};
|
|
||||||
|
|
||||||
} // Events
|
} // Events
|
||||||
|
|
||||||
enum class NGCEXT_Event : uint8_t {
|
enum class NGCEXT_Event : uint8_t {
|
||||||
@ -240,7 +154,6 @@ enum class NGCEXT_Event : uint8_t {
|
|||||||
|
|
||||||
// tell the other side you want to start a FT
|
// tell the other side you want to start a FT
|
||||||
// TODO: might use id layer instead. with it, it would look similar to friends_ft
|
// TODO: might use id layer instead. with it, it would look similar to friends_ft
|
||||||
// DEPRECATED: use FT1_INIT2 instead
|
|
||||||
// - 4 byte (file_kind)
|
// - 4 byte (file_kind)
|
||||||
// - 8 bytes (data size, can be 0 if unknown, BUT files have to be atleast 1 byte)
|
// - 8 bytes (data size, can be 0 if unknown, BUT files have to be atleast 1 byte)
|
||||||
// - 1 byte (temporary_file_tf_id, for this peer only, technically just a prefix to distinguish between simultainious fts)
|
// - 1 byte (temporary_file_tf_id, for this peer only, technically just a prefix to distinguish between simultainious fts)
|
||||||
@ -250,8 +163,6 @@ enum class NGCEXT_Event : uint8_t {
|
|||||||
// acknowlage init (like an accept)
|
// acknowlage init (like an accept)
|
||||||
// like tox ft control continue
|
// like tox ft control continue
|
||||||
// - 1 byte (transfer_id)
|
// - 1 byte (transfer_id)
|
||||||
// - 2 byte (self_max_lossy_data_size) (optimal since v2)
|
|
||||||
// - 1 byte feature flags (optimal since v3, requires prev)
|
|
||||||
FT1_INIT_ACK,
|
FT1_INIT_ACK,
|
||||||
|
|
||||||
// TODO: init deny, speed up non acceptance
|
// TODO: init deny, speed up non acceptance
|
||||||
@ -275,63 +186,11 @@ enum class NGCEXT_Event : uint8_t {
|
|||||||
// send file as message
|
// send file as message
|
||||||
// basically the opposite of request
|
// basically the opposite of request
|
||||||
// contains file_kind and file_id (and timestamp?)
|
// contains file_kind and file_id (and timestamp?)
|
||||||
// - 4 bytes (message_id)
|
// - 4 byte (message_id)
|
||||||
// - 4 bytes (file_kind)
|
// - 4 byte (file_kind)
|
||||||
// - X bytes (file_kind dependent id, differnt sizes)
|
// - X bytes (file_kind dependent id, differnt sizes)
|
||||||
FT1_MESSAGE,
|
FT1_MESSAGE,
|
||||||
|
|
||||||
// announce you have specified chunks, for given info
|
|
||||||
// this is info/chunk specific
|
|
||||||
// bundle these together to reduce overhead (like maybe every 16, max 1min)
|
|
||||||
// - 4 bytes (file_kind)
|
|
||||||
// - X bytes (file_kind dependent id, differnt sizes)
|
|
||||||
// - array [
|
|
||||||
// - 4 bytes (chunk index)
|
|
||||||
// - ]
|
|
||||||
FT1_HAVE,
|
|
||||||
|
|
||||||
// tell the other peer which chunks, for a given info you have
|
|
||||||
// compressed down to a bitset (in parts)
|
|
||||||
// supposed to only be sent once on participation announcement, when mutual interest
|
|
||||||
// it is always assumed by the other side, that you dont have the chunk, until told otherwise,
|
|
||||||
// so you can be smart about what you send.
|
|
||||||
// - 4 bytes (file_kind)
|
|
||||||
// - X bytes (file_kind dependent id, differnt sizes)
|
|
||||||
// - 4 bytes (first chunk index in bitset)
|
|
||||||
// - array [
|
|
||||||
// - 1 bit (have chunk)
|
|
||||||
// - ] (filled up with zero)
|
|
||||||
FT1_BITSET,
|
|
||||||
|
|
||||||
// announce you have all chunks, for given info
|
|
||||||
// prefer over have and bitset
|
|
||||||
// - 4 bytes (file_kind)
|
|
||||||
// - X bytes (file_kind dependent id, differnt sizes)
|
|
||||||
FT1_HAVE_ALL,
|
|
||||||
|
|
||||||
// tell the other side you want to start a FT
|
|
||||||
// update: added feature flags (compression)
|
|
||||||
// - 4 byte (file_kind)
|
|
||||||
// - 8 bytes (data size, can be 0 if unknown, BUT files have to be atleast 1 byte)
|
|
||||||
// - 1 byte (temporary_file_tf_id, for this peer only, technically just a prefix to distinguish between simultainious fts)
|
|
||||||
// - 1 byte feature flags
|
|
||||||
// - X bytes (file_kind dependent id, differnt sizes)
|
|
||||||
FT1_INIT2,
|
|
||||||
|
|
||||||
// TODO: FT1_IDONTHAVE, tell a peer you no longer have said chunk
|
|
||||||
// TODO: FT1_REJECT, tell a peer you wont fulfil the request
|
|
||||||
// TODO: FT1_CANCEL, tell a peer you stop the transfer
|
|
||||||
|
|
||||||
// tell another peer that you are participating in X
|
|
||||||
// you can reply with PC1_ANNOUNCE, to let the other side know, you too are participating in X
|
|
||||||
// you should NOT announce often, since this hits peers that not participate
|
|
||||||
// ft1 uses fk+id
|
|
||||||
// - x bytes (id, different sizes)
|
|
||||||
PC1_ANNOUNCE = 0x80 | 32u,
|
|
||||||
|
|
||||||
// uses sub splitting
|
|
||||||
P2PRNG = 0x80 | 38u,
|
|
||||||
|
|
||||||
MAX
|
MAX
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -345,22 +204,15 @@ struct NGCEXTEventI {
|
|||||||
virtual bool onEvent(const Events::NGCEXT_ft1_data&) { return false; }
|
virtual bool onEvent(const Events::NGCEXT_ft1_data&) { return false; }
|
||||||
virtual bool onEvent(const Events::NGCEXT_ft1_data_ack&) { return false; }
|
virtual bool onEvent(const Events::NGCEXT_ft1_data_ack&) { return false; }
|
||||||
virtual bool onEvent(const Events::NGCEXT_ft1_message&) { return false; }
|
virtual bool onEvent(const Events::NGCEXT_ft1_message&) { return false; }
|
||||||
virtual bool onEvent(const Events::NGCEXT_ft1_have&) { return false; }
|
|
||||||
virtual bool onEvent(const Events::NGCEXT_ft1_bitset&) { return false; }
|
|
||||||
virtual bool onEvent(const Events::NGCEXT_ft1_have_all&) { return false; }
|
|
||||||
virtual bool onEvent(const Events::NGCEXT_ft1_init2&) { return false; }
|
|
||||||
virtual bool onEvent(const Events::NGCEXT_pc1_announce&) { return false; }
|
|
||||||
};
|
};
|
||||||
|
|
||||||
using NGCEXTEventProviderI = EventProviderI<NGCEXTEventI>;
|
using NGCEXTEventProviderI = EventProviderI<NGCEXTEventI>;
|
||||||
|
|
||||||
class NGCEXTEventProvider : public ToxEventI, public NGCEXTEventProviderI {
|
class NGCEXTEventProvider : public ToxEventI, public NGCEXTEventProviderI {
|
||||||
ToxI& _t;
|
|
||||||
ToxEventProviderI& _tep;
|
ToxEventProviderI& _tep;
|
||||||
ToxEventProviderI::SubscriptionReference _tep_sr;
|
|
||||||
|
|
||||||
public:
|
public:
|
||||||
NGCEXTEventProvider(ToxI& t, ToxEventProviderI& tep);
|
NGCEXTEventProvider(ToxEventProviderI& tep/*, ToxI& t*/);
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
bool parse_hs1_request_last_ids(
|
bool parse_hs1_request_last_ids(
|
||||||
@ -393,18 +245,6 @@ class NGCEXTEventProvider : public ToxEventI, public NGCEXTEventProviderI {
|
|||||||
bool _private
|
bool _private
|
||||||
);
|
);
|
||||||
|
|
||||||
bool parse_ft1_init_ack_v2(
|
|
||||||
uint32_t group_number, uint32_t peer_number,
|
|
||||||
const uint8_t* data, size_t data_size,
|
|
||||||
bool _private
|
|
||||||
);
|
|
||||||
|
|
||||||
bool parse_ft1_init_ack_v3(
|
|
||||||
uint32_t group_number, uint32_t peer_number,
|
|
||||||
const uint8_t* data, size_t data_size,
|
|
||||||
bool _private
|
|
||||||
);
|
|
||||||
|
|
||||||
bool parse_ft1_data(
|
bool parse_ft1_data(
|
||||||
uint32_t group_number, uint32_t peer_number,
|
uint32_t group_number, uint32_t peer_number,
|
||||||
const uint8_t* data, size_t data_size,
|
const uint8_t* data, size_t data_size,
|
||||||
@ -423,36 +263,6 @@ class NGCEXTEventProvider : public ToxEventI, public NGCEXTEventProviderI {
|
|||||||
bool _private
|
bool _private
|
||||||
);
|
);
|
||||||
|
|
||||||
bool parse_ft1_have(
|
|
||||||
uint32_t group_number, uint32_t peer_number,
|
|
||||||
const uint8_t* data, size_t data_size,
|
|
||||||
bool _private
|
|
||||||
);
|
|
||||||
|
|
||||||
bool parse_ft1_bitset(
|
|
||||||
uint32_t group_number, uint32_t peer_number,
|
|
||||||
const uint8_t* data, size_t data_size,
|
|
||||||
bool _private
|
|
||||||
);
|
|
||||||
|
|
||||||
bool parse_ft1_have_all(
|
|
||||||
uint32_t group_number, uint32_t peer_number,
|
|
||||||
const uint8_t* data, size_t data_size,
|
|
||||||
bool _private
|
|
||||||
);
|
|
||||||
|
|
||||||
bool parse_ft1_init2(
|
|
||||||
uint32_t group_number, uint32_t peer_number,
|
|
||||||
const uint8_t* data, size_t data_size,
|
|
||||||
bool _private
|
|
||||||
);
|
|
||||||
|
|
||||||
bool parse_pc1_announce(
|
|
||||||
uint32_t group_number, uint32_t peer_number,
|
|
||||||
const uint8_t* data, size_t data_size,
|
|
||||||
bool _private
|
|
||||||
);
|
|
||||||
|
|
||||||
bool handlePacket(
|
bool handlePacket(
|
||||||
const uint32_t group_number,
|
const uint32_t group_number,
|
||||||
const uint32_t peer_number,
|
const uint32_t peer_number,
|
||||||
@ -461,87 +271,6 @@ class NGCEXTEventProvider : public ToxEventI, public NGCEXTEventProviderI {
|
|||||||
const bool _private
|
const bool _private
|
||||||
);
|
);
|
||||||
|
|
||||||
public: // send api
|
|
||||||
bool send_ft1_request(
|
|
||||||
uint32_t group_number, uint32_t peer_number,
|
|
||||||
uint32_t file_kind,
|
|
||||||
const uint8_t* file_id, size_t file_id_size
|
|
||||||
);
|
|
||||||
|
|
||||||
bool send_ft1_init(
|
|
||||||
uint32_t group_number, uint32_t peer_number,
|
|
||||||
uint32_t file_kind,
|
|
||||||
uint64_t file_size,
|
|
||||||
uint8_t transfer_id,
|
|
||||||
const uint8_t* file_id, size_t file_id_size
|
|
||||||
);
|
|
||||||
|
|
||||||
bool send_ft1_init_ack(
|
|
||||||
uint32_t group_number, uint32_t peer_number,
|
|
||||||
uint8_t transfer_id
|
|
||||||
);
|
|
||||||
|
|
||||||
bool send_ft1_data(
|
|
||||||
uint32_t group_number, uint32_t peer_number,
|
|
||||||
uint8_t transfer_id,
|
|
||||||
uint16_t sequence_id,
|
|
||||||
const uint8_t* data, size_t data_size
|
|
||||||
);
|
|
||||||
|
|
||||||
bool send_ft1_data_ack(
|
|
||||||
uint32_t group_number, uint32_t peer_number,
|
|
||||||
uint8_t transfer_id,
|
|
||||||
const uint16_t* seq_ids, size_t seq_ids_size
|
|
||||||
);
|
|
||||||
|
|
||||||
// TODO: add private version
|
|
||||||
bool send_all_ft1_message(
|
|
||||||
uint32_t group_number,
|
|
||||||
uint32_t message_id,
|
|
||||||
uint32_t file_kind,
|
|
||||||
const uint8_t* file_id, size_t file_id_size
|
|
||||||
);
|
|
||||||
|
|
||||||
bool send_ft1_have(
|
|
||||||
uint32_t group_number, uint32_t peer_number,
|
|
||||||
uint32_t file_kind,
|
|
||||||
const uint8_t* file_id, size_t file_id_size,
|
|
||||||
const uint32_t* chunks_data, size_t chunks_size
|
|
||||||
);
|
|
||||||
|
|
||||||
bool send_ft1_bitset(
|
|
||||||
uint32_t group_number, uint32_t peer_number,
|
|
||||||
uint32_t file_kind,
|
|
||||||
const uint8_t* file_id, size_t file_id_size,
|
|
||||||
uint32_t start_chunk,
|
|
||||||
const uint8_t* bitset_data, size_t bitset_size // size is bytes
|
|
||||||
);
|
|
||||||
|
|
||||||
bool send_ft1_have_all(
|
|
||||||
uint32_t group_number, uint32_t peer_number,
|
|
||||||
uint32_t file_kind,
|
|
||||||
const uint8_t* file_id, size_t file_id_size
|
|
||||||
);
|
|
||||||
|
|
||||||
bool send_ft1_init2(
|
|
||||||
uint32_t group_number, uint32_t peer_number,
|
|
||||||
uint32_t file_kind,
|
|
||||||
uint64_t file_size,
|
|
||||||
uint8_t transfer_id,
|
|
||||||
uint8_t feature_flags,
|
|
||||||
const uint8_t* file_id, size_t file_id_size
|
|
||||||
);
|
|
||||||
|
|
||||||
bool send_pc1_announce(
|
|
||||||
uint32_t group_number, uint32_t peer_number,
|
|
||||||
const uint8_t* id_data, size_t id_size
|
|
||||||
);
|
|
||||||
|
|
||||||
bool send_all_pc1_announce(
|
|
||||||
uint32_t group_number,
|
|
||||||
const uint8_t* id_data, size_t id_size
|
|
||||||
);
|
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
bool onToxEvent(const Tox_Event_Group_Custom_Packet* e) override;
|
bool onToxEvent(const Tox_Event_Group_Custom_Packet* e) override;
|
||||||
bool onToxEvent(const Tox_Event_Group_Custom_Private_Packet* e) override;
|
bool onToxEvent(const Tox_Event_Group_Custom_Private_Packet* e) override;
|
||||||
|
@ -5,6 +5,15 @@
|
|||||||
#include <cstddef>
|
#include <cstddef>
|
||||||
|
|
||||||
// TODO: refactor, more state tracking in ccai and seperate into flow and congestion algos
|
// TODO: refactor, more state tracking in ccai and seperate into flow and congestion algos
|
||||||
|
inline bool isSkipSeqID(const std::pair<uint8_t, uint16_t>& a, const std::pair<uint8_t, uint16_t>& b) {
|
||||||
|
// this is not perfect, would need more ft id based history
|
||||||
|
if (a.first != b.first) {
|
||||||
|
return false; // we dont know
|
||||||
|
} else {
|
||||||
|
return a.second+1 != b.second;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
struct CCAI {
|
struct CCAI {
|
||||||
public: // config
|
public: // config
|
||||||
using SeqIDType = std::pair<uint8_t, uint16_t>; // tf_id, seq_id
|
using SeqIDType = std::pair<uint8_t, uint16_t>; // tf_id, seq_id
|
||||||
@ -29,38 +38,22 @@ struct CCAI {
|
|||||||
//static_assert(maximum_segment_size == 574); // mesured in wireshark
|
//static_assert(maximum_segment_size == 574); // mesured in wireshark
|
||||||
|
|
||||||
// flow control
|
// flow control
|
||||||
//float max_byterate_allowed {100.f*1024*1024}; // 100MiB/s
|
float max_byterate_allowed {10*1024*1024}; // 10MiB/s
|
||||||
float max_byterate_allowed {10.f*1024*1024}; // 10MiB/s
|
|
||||||
//float max_byterate_allowed {1.f*1024*1024}; // 1MiB/s
|
|
||||||
//float max_byterate_allowed {0.6f*1024*1024}; // 600KiB/s
|
|
||||||
//float max_byterate_allowed {0.5f*1024*1024}; // 500KiB/s
|
|
||||||
//float max_byterate_allowed {0.15f*1024*1024}; // 150KiB/s
|
|
||||||
//float max_byterate_allowed {0.05f*1024*1024}; // 50KiB/s
|
|
||||||
|
|
||||||
public: // api
|
public: // api
|
||||||
CCAI(size_t maximum_segment_data_size) : MAXIMUM_SEGMENT_DATA_SIZE(maximum_segment_data_size) {}
|
CCAI(size_t maximum_segment_data_size) : MAXIMUM_SEGMENT_DATA_SIZE(maximum_segment_data_size) {}
|
||||||
virtual ~CCAI(void) {}
|
|
||||||
|
|
||||||
// returns current rtt/delay
|
|
||||||
virtual float getCurrentDelay(void) const = 0;
|
|
||||||
|
|
||||||
// return the current believed window in bytes of how much data can be inflight,
|
// return the current believed window in bytes of how much data can be inflight,
|
||||||
virtual float getWindow(void) const = 0;
|
//virtual float getCWnD(void) const = 0;
|
||||||
|
|
||||||
// TODO: api for how much data we should send
|
// TODO: api for how much data we should send
|
||||||
// take time since last sent into account
|
// take time since last sent into account
|
||||||
// respect max_byterate_allowed
|
// respect max_byterate_allowed
|
||||||
virtual int64_t canSend(float time_delta) = 0;
|
virtual size_t canSend(void) = 0;
|
||||||
|
|
||||||
// get the list of timed out seq_ids
|
// get the list of timed out seq_ids
|
||||||
virtual std::vector<SeqIDType> getTimeouts(void) const = 0;
|
virtual std::vector<SeqIDType> getTimeouts(void) const = 0;
|
||||||
|
|
||||||
// returns -1 if not implemented, can return 0
|
|
||||||
virtual int64_t inFlightCount(void) const { return -1; }
|
|
||||||
|
|
||||||
// returns -1 if not implemented, can return 0
|
|
||||||
virtual int64_t inFlightBytes(void) const { return -1; }
|
|
||||||
|
|
||||||
public: // callbacks
|
public: // callbacks
|
||||||
// data size is without overhead
|
// data size is without overhead
|
||||||
virtual void onSent(SeqIDType seq, size_t data_size) = 0;
|
virtual void onSent(SeqIDType seq, size_t data_size) = 0;
|
||||||
|
@ -3,27 +3,14 @@
|
|||||||
#include <cmath>
|
#include <cmath>
|
||||||
#include <iostream>
|
#include <iostream>
|
||||||
|
|
||||||
void CUBIC::updateReductionTimer(float time_delta) {
|
|
||||||
const auto now {getTimeNow()};
|
|
||||||
|
|
||||||
// only keep updating while the cca interaction is not too long ago
|
|
||||||
// or simply when there are packets in flight
|
|
||||||
// (you need space to resend timedout, which still use up pipe space)
|
|
||||||
if (!_in_flight.empty() || now - _time_point_last_update <= getCurrentDelay()*4.f) {
|
|
||||||
_time_since_reduction += time_delta;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void CUBIC::resetReductionTimer(void) {
|
|
||||||
_time_since_reduction = 0.f;
|
|
||||||
}
|
|
||||||
|
|
||||||
float CUBIC::getCWnD(void) const {
|
float CUBIC::getCWnD(void) const {
|
||||||
const double K = cbrt(
|
const double K = cbrt(
|
||||||
(_window_max * (1. - BETA)) / SCALING_CONSTANT
|
(_window_max * (1. - BETA)) / SCALING_CONSTANT
|
||||||
);
|
);
|
||||||
|
|
||||||
const double TK = _time_since_reduction - K;
|
const double time_since_reduction = getTimeNow() - _time_point_reduction;
|
||||||
|
|
||||||
|
const double TK = time_since_reduction - K;
|
||||||
|
|
||||||
const double cwnd =
|
const double cwnd =
|
||||||
SCALING_CONSTANT
|
SCALING_CONSTANT
|
||||||
@ -46,69 +33,29 @@ float CUBIC::getCWnD(void) const {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void CUBIC::onCongestion(void) {
|
void CUBIC::onCongestion(void) {
|
||||||
// 8 is probably too much (800ms for 100ms rtt)
|
if (getTimeNow() - _time_point_reduction >= getCurrentDelay()) {
|
||||||
if (_time_since_reduction >= getCurrentDelay()*4.f) {
|
const auto current_cwnd = getCWnD();
|
||||||
const auto tmp_old_tp = _time_since_reduction;
|
_time_point_reduction = getTimeNow();
|
||||||
|
_window_max = current_cwnd;
|
||||||
|
|
||||||
const auto current_cwnd = getCWnD(); // TODO: remove, only used by logging?
|
std::cout << "CONGESTION! cwnd:" << current_cwnd << "\n";
|
||||||
const auto current_wnd = getWindow(); // respects cwnd and fwnd
|
|
||||||
|
|
||||||
resetReductionTimer();
|
|
||||||
|
|
||||||
if (current_cwnd < _window_max) {
|
|
||||||
// congestion before reaching the inflection point (prev window_max).
|
|
||||||
// reduce to wnd*beta to be fair
|
|
||||||
_window_max = current_wnd * BETA;
|
|
||||||
} else {
|
|
||||||
_window_max = current_wnd;
|
|
||||||
}
|
|
||||||
|
|
||||||
_window_max = std::max(_window_max, 2.0*MAXIMUM_SEGMENT_SIZE);
|
|
||||||
|
|
||||||
#if 1
|
|
||||||
std::cout << "----CONGESTION!"
|
|
||||||
<< " cwnd:" << current_cwnd
|
|
||||||
<< " wnd:" << current_wnd
|
|
||||||
<< " cwnd_max:" << _window_max
|
|
||||||
<< " pts:" << tmp_old_tp
|
|
||||||
<< " rtt:" << getCurrentDelay()
|
|
||||||
<< "\n"
|
|
||||||
;
|
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
float CUBIC::getWindow(void) const {
|
size_t CUBIC::canSend(void) {
|
||||||
return std::min<float>(getCWnD(), FlowOnly::getWindow());
|
const auto fspace_pkgs = FlowOnly::canSend();
|
||||||
}
|
|
||||||
|
|
||||||
int64_t CUBIC::canSend(float time_delta) {
|
|
||||||
const auto fspace_pkgs = FlowOnly::canSend(time_delta);
|
|
||||||
|
|
||||||
updateReductionTimer(time_delta);
|
|
||||||
|
|
||||||
if (fspace_pkgs == 0u) {
|
if (fspace_pkgs == 0u) {
|
||||||
std::cerr << "CUBIC: flow said 0\n";
|
|
||||||
return 0u;
|
return 0u;
|
||||||
}
|
}
|
||||||
|
|
||||||
const auto window = getCWnD();
|
const int64_t cspace_bytes = getCWnD() - _in_flight_bytes;
|
||||||
int64_t cspace_bytes = window - _in_flight_bytes;
|
|
||||||
if (cspace_bytes < MAXIMUM_SEGMENT_DATA_SIZE) {
|
if (cspace_bytes < MAXIMUM_SEGMENT_DATA_SIZE) {
|
||||||
//std::cerr << "CUBIC: cspace < seg size\n";
|
|
||||||
return 0u;
|
return 0u;
|
||||||
}
|
}
|
||||||
|
|
||||||
// also limit to max sendrate per tick, which is usually smaller than window
|
|
||||||
// this is mostly to prevent spikes on empty windows
|
|
||||||
const auto rate = window / getCurrentDelay();
|
|
||||||
|
|
||||||
// we dont want this limit to fall below atleast 1 segment
|
|
||||||
const int64_t max_bytes_per_tick = std::max<int64_t>(rate * time_delta + 0.5f, MAXIMUM_SEGMENT_SIZE);
|
|
||||||
cspace_bytes = std::min<int64_t>(cspace_bytes, max_bytes_per_tick);
|
|
||||||
|
|
||||||
// limit to whole packets
|
// limit to whole packets
|
||||||
int64_t cspace_pkgs = (cspace_bytes / MAXIMUM_SEGMENT_DATA_SIZE) * MAXIMUM_SEGMENT_DATA_SIZE;
|
size_t cspace_pkgs = std::floor(cspace_bytes / MAXIMUM_SEGMENT_DATA_SIZE) * MAXIMUM_SEGMENT_DATA_SIZE;
|
||||||
|
|
||||||
return std::min(cspace_pkgs, fspace_pkgs);
|
return std::min(cspace_pkgs, fspace_pkgs);
|
||||||
}
|
}
|
||||||
|
@ -2,9 +2,13 @@
|
|||||||
|
|
||||||
#include "./flow_only.hpp"
|
#include "./flow_only.hpp"
|
||||||
|
|
||||||
|
#include <chrono>
|
||||||
|
|
||||||
struct CUBIC : public FlowOnly {
|
struct CUBIC : public FlowOnly {
|
||||||
|
//using clock = std::chrono::steady_clock;
|
||||||
|
|
||||||
public: // config
|
public: // config
|
||||||
static constexpr float BETA {0.8f};
|
static constexpr float BETA {0.7f};
|
||||||
static constexpr float SCALING_CONSTANT {0.4f};
|
static constexpr float SCALING_CONSTANT {0.4f};
|
||||||
static constexpr float RTT_EMA_ALPHA = 0.1f; // 0.1 is very smooth, might need more
|
static constexpr float RTT_EMA_ALPHA = 0.1f; // 0.1 is very smooth, might need more
|
||||||
|
|
||||||
@ -12,26 +16,37 @@ struct CUBIC : public FlowOnly {
|
|||||||
// window size before last reduciton
|
// window size before last reduciton
|
||||||
double _window_max {2.f * MAXIMUM_SEGMENT_SIZE}; // start with mss*2
|
double _window_max {2.f * MAXIMUM_SEGMENT_SIZE}; // start with mss*2
|
||||||
//double _window_last_max {2.f * MAXIMUM_SEGMENT_SIZE};
|
//double _window_last_max {2.f * MAXIMUM_SEGMENT_SIZE};
|
||||||
|
double _time_point_reduction {getTimeNow()};
|
||||||
double _time_since_reduction {12.f}; // warm start
|
|
||||||
|
|
||||||
private:
|
private:
|
||||||
void updateReductionTimer(float time_delta);
|
|
||||||
void resetReductionTimer(void);
|
|
||||||
|
|
||||||
float getCWnD(void) const;
|
float getCWnD(void) const;
|
||||||
|
|
||||||
|
// moving avg over the last few delay samples
|
||||||
|
// VERY sensitive to bundling acks
|
||||||
|
//float getCurrentDelay(void) const;
|
||||||
|
|
||||||
|
//void addRTT(float new_delay);
|
||||||
|
|
||||||
void onCongestion(void) override;
|
void onCongestion(void) override;
|
||||||
|
|
||||||
public: // api
|
public: // api
|
||||||
CUBIC(size_t maximum_segment_data_size) : FlowOnly(maximum_segment_data_size) {}
|
CUBIC(size_t maximum_segment_data_size) : FlowOnly(maximum_segment_data_size) {}
|
||||||
virtual ~CUBIC(void) {}
|
|
||||||
|
|
||||||
float getWindow(void) const override;
|
|
||||||
|
|
||||||
// TODO: api for how much data we should send
|
// TODO: api for how much data we should send
|
||||||
// take time since last sent into account
|
// take time since last sent into account
|
||||||
// respect max_byterate_allowed
|
// respect max_byterate_allowed
|
||||||
int64_t canSend(float time_delta) override;
|
size_t canSend(void) override;
|
||||||
|
|
||||||
|
// get the list of timed out seq_ids
|
||||||
|
//std::vector<SeqIDType> getTimeouts(void) const override;
|
||||||
|
|
||||||
|
public: // callbacks
|
||||||
|
// data size is without overhead
|
||||||
|
//void onSent(SeqIDType seq, size_t data_size) override;
|
||||||
|
|
||||||
|
//void onAck(std::vector<SeqIDType> seqs) override;
|
||||||
|
|
||||||
|
// if discard, not resent, not inflight
|
||||||
|
//void onLoss(SeqIDType seq, bool discard) override;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -6,18 +6,10 @@
|
|||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
|
|
||||||
float FlowOnly::getCurrentDelay(void) const {
|
float FlowOnly::getCurrentDelay(void) const {
|
||||||
// below 1ms is useless
|
return std::min(_rtt_ema, RTT_MAX);
|
||||||
//return std::clamp(_rtt_ema, 0.001f, RTT_MAX);
|
|
||||||
// the current iterate rate min is 5ms
|
|
||||||
return std::clamp(_rtt_ema, 0.005f, RTT_MAX);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void FlowOnly::addRTT(float new_delay) {
|
void FlowOnly::addRTT(float new_delay) {
|
||||||
if (new_delay > _rtt_ema * RTT_UP_MAX) {
|
|
||||||
// too large a jump up, to be taken into account
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
// lerp(new_delay, rtt_ema, 0.1)
|
// lerp(new_delay, rtt_ema, 0.1)
|
||||||
_rtt_ema = RTT_EMA_ALPHA * new_delay + (1.f - RTT_EMA_ALPHA) * _rtt_ema;
|
_rtt_ema = RTT_EMA_ALPHA * new_delay + (1.f - RTT_EMA_ALPHA) * _rtt_ema;
|
||||||
}
|
}
|
||||||
@ -31,69 +23,33 @@ void FlowOnly::updateWindow(void) {
|
|||||||
_fwnd = std::max(_fwnd, 2.f * MAXIMUM_SEGMENT_DATA_SIZE);
|
_fwnd = std::max(_fwnd, 2.f * MAXIMUM_SEGMENT_DATA_SIZE);
|
||||||
}
|
}
|
||||||
|
|
||||||
void FlowOnly::updateCongestion(void) {
|
size_t FlowOnly::canSend(void) {
|
||||||
updateWindow();
|
|
||||||
const auto tmp_window = getWindow();
|
|
||||||
// packet window * 0.3
|
|
||||||
// but atleast 4
|
|
||||||
int32_t max_consecutive_events = std::clamp<int32_t>(
|
|
||||||
(tmp_window/MAXIMUM_SEGMENT_DATA_SIZE) * 0.3f,
|
|
||||||
4,
|
|
||||||
50 // limit TODO: fix idle/time starved algo
|
|
||||||
);
|
|
||||||
// TODO: magic number
|
|
||||||
|
|
||||||
#if 0
|
|
||||||
std::cout << "NGC_FT1 Flow: pkg out of order"
|
|
||||||
<< " w:" << tmp_window
|
|
||||||
<< " pw:" << tmp_window/MAXIMUM_SEGMENT_DATA_SIZE
|
|
||||||
<< " coe:" << _consecutive_events
|
|
||||||
<< " mcoe:" << max_consecutive_events
|
|
||||||
<< "\n";
|
|
||||||
#endif
|
|
||||||
|
|
||||||
if (_consecutive_events > max_consecutive_events) {
|
|
||||||
//std::cout << "CONGESTION! NGC_FT1 flow: pkg out of order\n";
|
|
||||||
onCongestion();
|
|
||||||
|
|
||||||
// TODO: set _consecutive_events to zero?
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
float FlowOnly::getWindow(void) const {
|
|
||||||
return _fwnd;
|
|
||||||
}
|
|
||||||
|
|
||||||
int64_t FlowOnly::canSend(float time_delta) {
|
|
||||||
if (_in_flight.empty()) {
|
if (_in_flight.empty()) {
|
||||||
assert(_in_flight_bytes == 0);
|
assert(_in_flight_bytes == 0);
|
||||||
// TODO: should we really exit early here??
|
return MAXIMUM_SEGMENT_DATA_SIZE;
|
||||||
return 2*MAXIMUM_SEGMENT_DATA_SIZE;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
updateWindow();
|
updateWindow();
|
||||||
|
|
||||||
int64_t fspace = _fwnd - _in_flight_bytes;
|
const int64_t fspace = _fwnd - _in_flight_bytes;
|
||||||
if (fspace < MAXIMUM_SEGMENT_DATA_SIZE) {
|
if (fspace < MAXIMUM_SEGMENT_DATA_SIZE) {
|
||||||
return 0u;
|
return 0u;
|
||||||
}
|
}
|
||||||
|
|
||||||
// also limit to max sendrate per tick, which is usually smaller than window
|
|
||||||
// this is mostly to prevent spikes on empty windows
|
|
||||||
fspace = std::min<int64_t>(fspace, max_byterate_allowed * time_delta + 0.5f);
|
|
||||||
|
|
||||||
// limit to whole packets
|
// limit to whole packets
|
||||||
return (fspace / MAXIMUM_SEGMENT_DATA_SIZE) * MAXIMUM_SEGMENT_DATA_SIZE;
|
size_t space = std::floor(fspace / MAXIMUM_SEGMENT_DATA_SIZE)
|
||||||
|
* MAXIMUM_SEGMENT_DATA_SIZE;
|
||||||
|
|
||||||
|
return space;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::vector<FlowOnly::SeqIDType> FlowOnly::getTimeouts(void) const {
|
std::vector<FlowOnly::SeqIDType> FlowOnly::getTimeouts(void) const {
|
||||||
std::vector<SeqIDType> list;
|
std::vector<SeqIDType> list;
|
||||||
list.reserve(_in_flight.size()/3); // we dont know, so we just guess
|
|
||||||
|
|
||||||
// after 3 rtt delay, we trigger timeout
|
// after 3 rtt delay, we trigger timeout
|
||||||
const auto now_adjusted = getTimeNow() - getCurrentDelay()*3.f;
|
const auto now_adjusted = getTimeNow() - getCurrentDelay()*3.f;
|
||||||
|
|
||||||
for (const auto& [seq, time_stamp, size, _] : _in_flight) {
|
for (const auto& [seq, time_stamp, size] : _in_flight) {
|
||||||
if (now_adjusted > time_stamp) {
|
if (now_adjusted > time_stamp) {
|
||||||
list.push_back(seq);
|
list.push_back(seq);
|
||||||
}
|
}
|
||||||
@ -102,35 +58,16 @@ std::vector<FlowOnly::SeqIDType> FlowOnly::getTimeouts(void) const {
|
|||||||
return list;
|
return list;
|
||||||
}
|
}
|
||||||
|
|
||||||
int64_t FlowOnly::inFlightCount(void) const {
|
|
||||||
return _in_flight.size();
|
|
||||||
}
|
|
||||||
|
|
||||||
int64_t FlowOnly::inFlightBytes(void) const {
|
|
||||||
return _in_flight_bytes;
|
|
||||||
}
|
|
||||||
|
|
||||||
void FlowOnly::onSent(SeqIDType seq, size_t data_size) {
|
void FlowOnly::onSent(SeqIDType seq, size_t data_size) {
|
||||||
if constexpr (true) {
|
if constexpr (true) {
|
||||||
size_t sum {0u};
|
|
||||||
for (const auto& it : _in_flight) {
|
for (const auto& it : _in_flight) {
|
||||||
assert(it.id != seq);
|
assert(std::get<0>(it) != seq);
|
||||||
sum += it.bytes;
|
|
||||||
}
|
}
|
||||||
assert(_in_flight_bytes == sum);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
const auto& new_entry = _in_flight.emplace_back(
|
_in_flight.push_back({seq, getTimeNow(), data_size + SEGMENT_OVERHEAD});
|
||||||
FlyingBunch{
|
_in_flight_bytes += data_size + SEGMENT_OVERHEAD;
|
||||||
seq,
|
//_recently_sent_bytes += data_size + SEGMENT_OVERHEAD;
|
||||||
static_cast<float>(getTimeNow()),
|
|
||||||
data_size + SEGMENT_OVERHEAD,
|
|
||||||
false
|
|
||||||
}
|
|
||||||
);
|
|
||||||
_in_flight_bytes += new_entry.bytes;
|
|
||||||
|
|
||||||
_time_point_last_update = getTimeNow();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void FlowOnly::onAck(std::vector<SeqIDType> seqs) {
|
void FlowOnly::onAck(std::vector<SeqIDType> seqs) {
|
||||||
@ -141,31 +78,28 @@ void FlowOnly::onAck(std::vector<SeqIDType> seqs) {
|
|||||||
|
|
||||||
const auto now {getTimeNow()};
|
const auto now {getTimeNow()};
|
||||||
|
|
||||||
_time_point_last_update = now;
|
|
||||||
|
|
||||||
// first seq in seqs is the actual value, all extra are for redundency
|
// first seq in seqs is the actual value, all extra are for redundency
|
||||||
{ // skip in ack is congestion event
|
{ // skip in ack is congestion event
|
||||||
// 1. look at primary ack of packet
|
// 1. look at primary ack of packet
|
||||||
auto it = std::find_if(_in_flight.begin(), _in_flight.end(), [seq = seqs.front()](const auto& v) -> bool {
|
auto it = std::find_if(_in_flight.begin(), _in_flight.end(), [seq = seqs.front()](const auto& v) -> bool {
|
||||||
return v.id == seq;
|
return std::get<0>(v) == seq;
|
||||||
});
|
});
|
||||||
if (it != _in_flight.end() && !it->ignore) {
|
if (it != _in_flight.end()) {
|
||||||
// find first non ignore, it should be the expected
|
if (it != _in_flight.begin()) {
|
||||||
auto first_it = std::find_if_not(_in_flight.cbegin(), _in_flight.cend(), [](const auto& v) -> bool { return v.ignore; });
|
|
||||||
|
|
||||||
if (first_it != _in_flight.cend() && it != first_it) {
|
|
||||||
// not next expected seq -> skip detected
|
// not next expected seq -> skip detected
|
||||||
|
|
||||||
_consecutive_events++;
|
std::cout << "CONGESTION out of order\n";
|
||||||
it->ignore = true; // only handle once
|
onCongestion();
|
||||||
|
//if (getTimeNow() >= _last_congestion_event + _last_congestion_rtt) {
|
||||||
updateCongestion();
|
//_recently_lost_data = true;
|
||||||
|
//_last_congestion_event = getTimeNow();
|
||||||
|
//_last_congestion_rtt = getCurrentDelay();
|
||||||
|
//}
|
||||||
} else {
|
} else {
|
||||||
// only mesure delay, if not a congestion
|
// only mesure delay, if not a congestion
|
||||||
addRTT(now - it->timestamp);
|
addRTT(now - std::get<1>(*it));
|
||||||
_consecutive_events = 0;
|
|
||||||
}
|
}
|
||||||
} else { // TOOD: if ! ignore too
|
} else {
|
||||||
// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
|
// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
|
||||||
#if 0
|
#if 0
|
||||||
// assume we got a duplicated packet
|
// assume we got a duplicated packet
|
||||||
@ -177,14 +111,14 @@ void FlowOnly::onAck(std::vector<SeqIDType> seqs) {
|
|||||||
|
|
||||||
for (const auto& seq : seqs) {
|
for (const auto& seq : seqs) {
|
||||||
auto it = std::find_if(_in_flight.begin(), _in_flight.end(), [seq](const auto& v) -> bool {
|
auto it = std::find_if(_in_flight.begin(), _in_flight.end(), [seq](const auto& v) -> bool {
|
||||||
return v.id == seq;
|
return std::get<0>(v) == seq;
|
||||||
});
|
});
|
||||||
|
|
||||||
if (it == _in_flight.end()) {
|
if (it == _in_flight.end()) {
|
||||||
continue; // not found, ignore
|
continue; // not found, ignore
|
||||||
} else {
|
} else {
|
||||||
//most_recent = std::max(most_recent, std::get<1>(*it));
|
//most_recent = std::max(most_recent, std::get<1>(*it));
|
||||||
_in_flight_bytes -= it->bytes;
|
_in_flight_bytes -= std::get<2>(*it);
|
||||||
assert(_in_flight_bytes >= 0);
|
assert(_in_flight_bytes >= 0);
|
||||||
//_recently_acked_data += std::get<2>(*it);
|
//_recently_acked_data += std::get<2>(*it);
|
||||||
_in_flight.erase(it);
|
_in_flight.erase(it);
|
||||||
@ -194,8 +128,8 @@ void FlowOnly::onAck(std::vector<SeqIDType> seqs) {
|
|||||||
|
|
||||||
void FlowOnly::onLoss(SeqIDType seq, bool discard) {
|
void FlowOnly::onLoss(SeqIDType seq, bool discard) {
|
||||||
auto it = std::find_if(_in_flight.begin(), _in_flight.end(), [seq](const auto& v) -> bool {
|
auto it = std::find_if(_in_flight.begin(), _in_flight.end(), [seq](const auto& v) -> bool {
|
||||||
assert(!std::isnan(v.timestamp));
|
assert(!std::isnan(std::get<1>(v)));
|
||||||
return v.id == seq;
|
return std::get<0>(v) == seq;
|
||||||
});
|
});
|
||||||
|
|
||||||
if (it == _in_flight.end()) {
|
if (it == _in_flight.end()) {
|
||||||
@ -203,27 +137,24 @@ void FlowOnly::onLoss(SeqIDType seq, bool discard) {
|
|||||||
return; // not found, ignore ??
|
return; // not found, ignore ??
|
||||||
}
|
}
|
||||||
|
|
||||||
//std::cerr << "FLOW loss\n";
|
std::cerr << "FLOW loss\n";
|
||||||
|
|
||||||
// "if data lost is not to be retransmitted"
|
// "if data lost is not to be retransmitted"
|
||||||
if (discard) {
|
if (discard) {
|
||||||
_in_flight_bytes -= it->bytes;
|
_in_flight_bytes -= std::get<2>(*it);
|
||||||
assert(_in_flight_bytes >= 0);
|
assert(_in_flight_bytes >= 0);
|
||||||
_in_flight.erase(it);
|
_in_flight.erase(it);
|
||||||
} else {
|
|
||||||
// and not take into rtt
|
|
||||||
it->timestamp = getTimeNow();
|
|
||||||
it->ignore = true;
|
|
||||||
}
|
}
|
||||||
|
// TODO: reset timestamp?
|
||||||
|
|
||||||
// usually after data arrived out-of-order/duplicate
|
#if 0 // temporarily disable ce for timeout
|
||||||
if (!it->ignore) {
|
// at most once per rtt?
|
||||||
it->ignore = true; // only handle once
|
// TODO: use delay at event instead
|
||||||
//_consecutive_events++;
|
if (getTimeNow() >= _last_congestion_event + _last_congestion_rtt) {
|
||||||
|
_recently_lost_data = true;
|
||||||
//updateCongestion();
|
_last_congestion_event = getTimeNow();
|
||||||
// this is usually a safe indicator for congestion/maxed connection
|
_last_congestion_rtt = getCurrentDelay();
|
||||||
onCongestion();
|
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4,15 +4,23 @@
|
|||||||
|
|
||||||
#include <chrono>
|
#include <chrono>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
|
#include <tuple>
|
||||||
|
|
||||||
struct FlowOnly : public CCAI {
|
struct FlowOnly : public CCAI {
|
||||||
protected:
|
protected:
|
||||||
using clock = std::chrono::steady_clock;
|
using clock = std::chrono::steady_clock;
|
||||||
|
|
||||||
public: // config
|
public: // config
|
||||||
static constexpr float RTT_EMA_ALPHA = 0.001f; // might need change over time
|
static constexpr float RTT_EMA_ALPHA = 0.1f; // might need over time
|
||||||
static constexpr float RTT_UP_MAX = 3.0f; // how much larger a delay can be to be taken into account
|
static constexpr float RTT_MAX = 2.f; // 2 sec is probably too much
|
||||||
static constexpr float RTT_MAX = 2.f; // maybe larger for tunneled connections
|
|
||||||
|
//float max_byterate_allowed {100.f*1024*1024}; // 100MiB/s
|
||||||
|
float max_byterate_allowed {10.f*1024*1024}; // 10MiB/s
|
||||||
|
//float max_byterate_allowed {1.f*1024*1024}; // 1MiB/s
|
||||||
|
//float max_byterate_allowed {0.6f*1024*1024}; // 600KiB/s
|
||||||
|
//float max_byterate_allowed {0.5f*1024*1024}; // 500KiB/s
|
||||||
|
//float max_byterate_allowed {0.05f*1024*1024}; // 50KiB/s
|
||||||
|
//float max_byterate_allowed {0.15f*1024*1024}; // 150KiB/s
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
// initialize to low value, will get corrected very fast
|
// initialize to low value, will get corrected very fast
|
||||||
@ -22,24 +30,11 @@ struct FlowOnly : public CCAI {
|
|||||||
float _rtt_ema {0.1f};
|
float _rtt_ema {0.1f};
|
||||||
|
|
||||||
// list of sequence ids and timestamps of when they where sent (and payload size)
|
// list of sequence ids and timestamps of when they where sent (and payload size)
|
||||||
struct FlyingBunch {
|
std::vector<std::tuple<SeqIDType, float, size_t>> _in_flight;
|
||||||
SeqIDType id;
|
|
||||||
float timestamp;
|
|
||||||
size_t bytes;
|
|
||||||
|
|
||||||
// set to true if counted as ce or resent due to timeout
|
|
||||||
bool ignore {false};
|
|
||||||
};
|
|
||||||
std::vector<FlyingBunch> _in_flight;
|
|
||||||
int64_t _in_flight_bytes {0};
|
int64_t _in_flight_bytes {0};
|
||||||
|
|
||||||
int32_t _consecutive_events {0};
|
|
||||||
|
|
||||||
clock::time_point _time_start_offset;
|
clock::time_point _time_start_offset;
|
||||||
|
|
||||||
// used to clamp growth rate in the void
|
|
||||||
double _time_point_last_update {getTimeNow()};
|
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
// make values relative to algo start for readability (and precision)
|
// make values relative to algo start for readability (and precision)
|
||||||
// get timestamp in seconds
|
// get timestamp in seconds
|
||||||
@ -49,10 +44,7 @@ struct FlowOnly : public CCAI {
|
|||||||
|
|
||||||
// moving avg over the last few delay samples
|
// moving avg over the last few delay samples
|
||||||
// VERY sensitive to bundling acks
|
// VERY sensitive to bundling acks
|
||||||
float getCurrentDelay(void) const override;
|
float getCurrentDelay(void) const;
|
||||||
|
|
||||||
// call updateWindow() to update this value
|
|
||||||
float getWindow(void) const override;
|
|
||||||
|
|
||||||
void addRTT(float new_delay);
|
void addRTT(float new_delay);
|
||||||
|
|
||||||
@ -60,24 +52,17 @@ struct FlowOnly : public CCAI {
|
|||||||
|
|
||||||
virtual void onCongestion(void) {};
|
virtual void onCongestion(void) {};
|
||||||
|
|
||||||
// internal logic, calls the onCongestion() event
|
|
||||||
void updateCongestion(void);
|
|
||||||
|
|
||||||
public: // api
|
public: // api
|
||||||
FlowOnly(size_t maximum_segment_data_size) : CCAI(maximum_segment_data_size) {}
|
FlowOnly(size_t maximum_segment_data_size) : CCAI(maximum_segment_data_size) {}
|
||||||
virtual ~FlowOnly(void) {}
|
|
||||||
|
|
||||||
// TODO: api for how much data we should send
|
// TODO: api for how much data we should send
|
||||||
// take time since last sent into account
|
// take time since last sent into account
|
||||||
// respect max_byterate_allowed
|
// respect max_byterate_allowed
|
||||||
int64_t canSend(float time_delta) override;
|
size_t canSend(void) override;
|
||||||
|
|
||||||
// get the list of timed out seq_ids
|
// get the list of timed out seq_ids
|
||||||
std::vector<SeqIDType> getTimeouts(void) const override;
|
std::vector<SeqIDType> getTimeouts(void) const override;
|
||||||
|
|
||||||
int64_t inFlightCount(void) const override;
|
|
||||||
int64_t inFlightBytes(void) const override;
|
|
||||||
|
|
||||||
public: // callbacks
|
public: // callbacks
|
||||||
// data size is without overhead
|
// data size is without overhead
|
||||||
void onSent(SeqIDType seq, size_t data_size) override;
|
void onSent(SeqIDType seq, size_t data_size) override;
|
||||||
|
@ -6,7 +6,6 @@
|
|||||||
#include <deque>
|
#include <deque>
|
||||||
#include <cstdint>
|
#include <cstdint>
|
||||||
#include <cassert>
|
#include <cassert>
|
||||||
#include <tuple>
|
|
||||||
|
|
||||||
#include <iomanip>
|
#include <iomanip>
|
||||||
#include <iostream>
|
#include <iostream>
|
||||||
@ -14,22 +13,13 @@
|
|||||||
|
|
||||||
// https://youtu.be/0HRwNSA-JYM
|
// https://youtu.be/0HRwNSA-JYM
|
||||||
|
|
||||||
static bool isSkipSeqID(const std::pair<uint8_t, uint16_t>& a, const std::pair<uint8_t, uint16_t>& b) {
|
|
||||||
// this is not perfect, would need more ft id based history
|
|
||||||
if (a.first != b.first) {
|
|
||||||
return false; // we dont know
|
|
||||||
} else {
|
|
||||||
return a.second+1 != b.second;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
inline constexpr bool PLOTTING = false;
|
inline constexpr bool PLOTTING = false;
|
||||||
|
|
||||||
LEDBAT::LEDBAT(size_t maximum_segment_data_size) : CCAI(maximum_segment_data_size) {
|
LEDBAT::LEDBAT(size_t maximum_segment_data_size) : CCAI(maximum_segment_data_size) {
|
||||||
_time_start_offset = clock::now();
|
_time_start_offset = clock::now();
|
||||||
}
|
}
|
||||||
|
|
||||||
int64_t LEDBAT::canSend(float time_delta) {
|
size_t LEDBAT::canSend(void) {
|
||||||
if (_in_flight.empty()) {
|
if (_in_flight.empty()) {
|
||||||
return MAXIMUM_SEGMENT_DATA_SIZE;
|
return MAXIMUM_SEGMENT_DATA_SIZE;
|
||||||
}
|
}
|
||||||
@ -44,7 +34,9 @@ int64_t LEDBAT::canSend(float time_delta) {
|
|||||||
return 0u;
|
return 0u;
|
||||||
}
|
}
|
||||||
|
|
||||||
return std::ceil(std::min<float>(cspace, fspace) / MAXIMUM_SEGMENT_DATA_SIZE) * MAXIMUM_SEGMENT_DATA_SIZE;
|
size_t space = std::ceil(std::min<float>(cspace, fspace) / MAXIMUM_SEGMENT_DATA_SIZE) * MAXIMUM_SEGMENT_DATA_SIZE;
|
||||||
|
|
||||||
|
return space;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::vector<LEDBAT::SeqIDType> LEDBAT::getTimeouts(void) const {
|
std::vector<LEDBAT::SeqIDType> LEDBAT::getTimeouts(void) const {
|
||||||
|
@ -11,7 +11,7 @@
|
|||||||
// LEDBAT++: https://www.ietf.org/archive/id/draft-irtf-iccrg-ledbat-plus-plus-01.txt
|
// LEDBAT++: https://www.ietf.org/archive/id/draft-irtf-iccrg-ledbat-plus-plus-01.txt
|
||||||
|
|
||||||
// LEDBAT++ implementation
|
// LEDBAT++ implementation
|
||||||
struct LEDBAT : public CCAI {
|
struct LEDBAT : public CCAI{
|
||||||
public: // config
|
public: // config
|
||||||
#if 0
|
#if 0
|
||||||
using SeqIDType = std::pair<uint8_t, uint16_t>; // tf_id, seq_id
|
using SeqIDType = std::pair<uint8_t, uint16_t>; // tf_id, seq_id
|
||||||
@ -47,20 +47,21 @@ struct LEDBAT : public CCAI {
|
|||||||
|
|
||||||
//static constexpr size_t rtt_buffer_size_max {2000};
|
//static constexpr size_t rtt_buffer_size_max {2000};
|
||||||
|
|
||||||
|
float max_byterate_allowed {10*1024*1024}; // 10MiB/s
|
||||||
|
|
||||||
public:
|
public:
|
||||||
LEDBAT(size_t maximum_segment_data_size);
|
LEDBAT(size_t maximum_segment_data_size);
|
||||||
virtual ~LEDBAT(void) {}
|
|
||||||
|
|
||||||
// return the current believed window in bytes of how much data can be inflight,
|
// return the current believed window in bytes of how much data can be inflight,
|
||||||
// without overstepping the delay requirement
|
// without overstepping the delay requirement
|
||||||
float getWindow(void) const override {
|
float getCWnD(void) const {
|
||||||
return _cwnd;
|
return _cwnd;
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: api for how much data we should send
|
// TODO: api for how much data we should send
|
||||||
// take time since last sent into account
|
// take time since last sent into account
|
||||||
// respect max_byterate_allowed
|
// respect max_byterate_allowed
|
||||||
int64_t canSend(float time_delta) override;
|
size_t canSend(void) override;
|
||||||
|
|
||||||
// get the list of timed out seq_ids
|
// get the list of timed out seq_ids
|
||||||
std::vector<SeqIDType> getTimeouts(void) const override;
|
std::vector<SeqIDType> getTimeouts(void) const override;
|
||||||
@ -85,7 +86,7 @@ struct LEDBAT : public CCAI {
|
|||||||
|
|
||||||
// moving avg over the last few delay samples
|
// moving avg over the last few delay samples
|
||||||
// VERY sensitive to bundling acks
|
// VERY sensitive to bundling acks
|
||||||
float getCurrentDelay(void) const override;
|
float getCurrentDelay(void) const;
|
||||||
|
|
||||||
void addRTT(float new_delay);
|
void addRTT(float new_delay);
|
||||||
|
|
||||||
|
@ -1,21 +1,149 @@
|
|||||||
#include "./ngcft1.hpp"
|
#include "./ngcft1.hpp"
|
||||||
|
|
||||||
#include "./flow_only.hpp"
|
#include <solanaceae/toxcore/utils.hpp>
|
||||||
#include "./cubic.hpp"
|
|
||||||
#include "./ledbat.hpp"
|
|
||||||
|
|
||||||
#include <solanaceae/util/utils.hpp>
|
|
||||||
|
|
||||||
#include <sodium.h>
|
#include <sodium.h>
|
||||||
|
|
||||||
#include <cstdint>
|
|
||||||
#include <iostream>
|
#include <iostream>
|
||||||
#include <set>
|
#include <set>
|
||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
#include <cassert>
|
#include <cassert>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
|
|
||||||
void NGCFT1::updateSendTransfer(float time_delta, uint32_t group_number, uint32_t peer_number, Group::Peer& peer, size_t idx, std::set<CCAI::SeqIDType>& timeouts_set, int64_t& can_packet_size) {
|
bool NGCFT1::sendPKG_FT1_REQUEST(
|
||||||
|
uint32_t group_number, uint32_t peer_number,
|
||||||
|
uint32_t file_kind,
|
||||||
|
const uint8_t* file_id, size_t file_id_size
|
||||||
|
) {
|
||||||
|
// - 1 byte packet id
|
||||||
|
// - 4 byte file_kind
|
||||||
|
// - X bytes file_id
|
||||||
|
std::vector<uint8_t> pkg;
|
||||||
|
pkg.push_back(static_cast<uint8_t>(NGCEXT_Event::FT1_REQUEST));
|
||||||
|
for (size_t i = 0; i < sizeof(file_kind); i++) {
|
||||||
|
pkg.push_back((file_kind>>(i*8)) & 0xff);
|
||||||
|
}
|
||||||
|
for (size_t i = 0; i < file_id_size; i++) {
|
||||||
|
pkg.push_back(file_id[i]);
|
||||||
|
}
|
||||||
|
|
||||||
|
// lossless
|
||||||
|
return _t.toxGroupSendCustomPrivatePacket(group_number, peer_number, true, pkg) == TOX_ERR_GROUP_SEND_CUSTOM_PRIVATE_PACKET_OK;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool NGCFT1::sendPKG_FT1_INIT(
|
||||||
|
uint32_t group_number, uint32_t peer_number,
|
||||||
|
uint32_t file_kind,
|
||||||
|
uint64_t file_size,
|
||||||
|
uint8_t transfer_id,
|
||||||
|
const uint8_t* file_id, size_t file_id_size
|
||||||
|
) {
|
||||||
|
// - 1 byte packet id
|
||||||
|
// - 4 byte (file_kind)
|
||||||
|
// - 8 bytes (data size)
|
||||||
|
// - 1 byte (temporary_file_tf_id, for this peer only, technically just a prefix to distinguish between simultainious fts)
|
||||||
|
// - X bytes (file_kind dependent id, differnt sizes)
|
||||||
|
|
||||||
|
std::vector<uint8_t> pkg;
|
||||||
|
pkg.push_back(static_cast<uint8_t>(NGCEXT_Event::FT1_INIT));
|
||||||
|
for (size_t i = 0; i < sizeof(file_kind); i++) {
|
||||||
|
pkg.push_back((file_kind>>(i*8)) & 0xff);
|
||||||
|
}
|
||||||
|
for (size_t i = 0; i < sizeof(file_size); i++) {
|
||||||
|
pkg.push_back((file_size>>(i*8)) & 0xff);
|
||||||
|
}
|
||||||
|
pkg.push_back(transfer_id);
|
||||||
|
for (size_t i = 0; i < file_id_size; i++) {
|
||||||
|
pkg.push_back(file_id[i]);
|
||||||
|
}
|
||||||
|
|
||||||
|
// lossless
|
||||||
|
return _t.toxGroupSendCustomPrivatePacket(group_number, peer_number, true, pkg) == TOX_ERR_GROUP_SEND_CUSTOM_PRIVATE_PACKET_OK;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool NGCFT1::sendPKG_FT1_INIT_ACK(
|
||||||
|
uint32_t group_number, uint32_t peer_number,
|
||||||
|
uint8_t transfer_id
|
||||||
|
) {
|
||||||
|
// - 1 byte packet id
|
||||||
|
// - 1 byte transfer_id
|
||||||
|
std::vector<uint8_t> pkg;
|
||||||
|
pkg.push_back(static_cast<uint8_t>(NGCEXT_Event::FT1_INIT_ACK));
|
||||||
|
pkg.push_back(transfer_id);
|
||||||
|
|
||||||
|
// lossless
|
||||||
|
return _t.toxGroupSendCustomPrivatePacket(group_number, peer_number, true, pkg) == TOX_ERR_GROUP_SEND_CUSTOM_PRIVATE_PACKET_OK;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool NGCFT1::sendPKG_FT1_DATA(
|
||||||
|
uint32_t group_number, uint32_t peer_number,
|
||||||
|
uint8_t transfer_id,
|
||||||
|
uint16_t sequence_id,
|
||||||
|
const uint8_t* data, size_t data_size
|
||||||
|
) {
|
||||||
|
assert(data_size > 0);
|
||||||
|
|
||||||
|
// TODO
|
||||||
|
// check header_size+data_size <= max pkg size
|
||||||
|
|
||||||
|
std::vector<uint8_t> pkg;
|
||||||
|
pkg.push_back(static_cast<uint8_t>(NGCEXT_Event::FT1_DATA));
|
||||||
|
pkg.push_back(transfer_id);
|
||||||
|
pkg.push_back(sequence_id & 0xff);
|
||||||
|
pkg.push_back((sequence_id >> (1*8)) & 0xff);
|
||||||
|
|
||||||
|
// TODO: optimize
|
||||||
|
for (size_t i = 0; i < data_size; i++) {
|
||||||
|
pkg.push_back(data[i]);
|
||||||
|
}
|
||||||
|
|
||||||
|
// lossy
|
||||||
|
return _t.toxGroupSendCustomPrivatePacket(group_number, peer_number, false, pkg) == TOX_ERR_GROUP_SEND_CUSTOM_PRIVATE_PACKET_OK;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool NGCFT1::sendPKG_FT1_DATA_ACK(
|
||||||
|
uint32_t group_number, uint32_t peer_number,
|
||||||
|
uint8_t transfer_id,
|
||||||
|
const uint16_t* seq_ids, size_t seq_ids_size
|
||||||
|
) {
|
||||||
|
std::vector<uint8_t> pkg;
|
||||||
|
pkg.push_back(static_cast<uint8_t>(NGCEXT_Event::FT1_DATA_ACK));
|
||||||
|
pkg.push_back(transfer_id);
|
||||||
|
|
||||||
|
// TODO: optimize
|
||||||
|
for (size_t i = 0; i < seq_ids_size; i++) {
|
||||||
|
pkg.push_back(seq_ids[i] & 0xff);
|
||||||
|
pkg.push_back((seq_ids[i] >> (1*8)) & 0xff);
|
||||||
|
}
|
||||||
|
|
||||||
|
// lossy
|
||||||
|
return _t.toxGroupSendCustomPrivatePacket(group_number, peer_number, false, pkg) == TOX_ERR_GROUP_SEND_CUSTOM_PRIVATE_PACKET_OK;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool NGCFT1::sendPKG_FT1_MESSAGE(
|
||||||
|
uint32_t group_number,
|
||||||
|
uint32_t message_id,
|
||||||
|
uint32_t file_kind,
|
||||||
|
const uint8_t* file_id, size_t file_id_size
|
||||||
|
) {
|
||||||
|
std::vector<uint8_t> pkg;
|
||||||
|
pkg.push_back(static_cast<uint8_t>(NGCEXT_Event::FT1_MESSAGE));
|
||||||
|
|
||||||
|
for (size_t i = 0; i < sizeof(message_id); i++) {
|
||||||
|
pkg.push_back((message_id>>(i*8)) & 0xff);
|
||||||
|
}
|
||||||
|
for (size_t i = 0; i < sizeof(file_kind); i++) {
|
||||||
|
pkg.push_back((file_kind>>(i*8)) & 0xff);
|
||||||
|
}
|
||||||
|
for (size_t i = 0; i < file_id_size; i++) {
|
||||||
|
pkg.push_back(file_id[i]);
|
||||||
|
}
|
||||||
|
|
||||||
|
// lossless
|
||||||
|
return _t.toxGroupSendCustomPacket(group_number, true, pkg) == TOX_ERR_GROUP_SEND_CUSTOM_PACKET_OK;
|
||||||
|
}
|
||||||
|
|
||||||
|
void NGCFT1::updateSendTransfer(float time_delta, uint32_t group_number, uint32_t peer_number, Group::Peer& peer, size_t idx, std::set<CCAI::SeqIDType>& timeouts_set) {
|
||||||
auto& tf_opt = peer.send_transfers.at(idx);
|
auto& tf_opt = peer.send_transfers.at(idx);
|
||||||
assert(tf_opt.has_value());
|
assert(tf_opt.has_value());
|
||||||
auto& tf = tf_opt.value();
|
auto& tf = tf_opt.value();
|
||||||
@ -40,54 +168,29 @@ void NGCFT1::updateSendTransfer(float time_delta, uint32_t group_number, uint32_
|
|||||||
} else {
|
} else {
|
||||||
// timed out, resend
|
// timed out, resend
|
||||||
std::cerr << "NGCFT1 warning: ft init timed out, resending\n";
|
std::cerr << "NGCFT1 warning: ft init timed out, resending\n";
|
||||||
_neep.send_ft1_init(group_number, peer_number, tf.file_kind, tf.file_size, idx, tf.file_id.data(), tf.file_id.size());
|
sendPKG_FT1_INIT(group_number, peer_number, tf.file_kind, tf.file_size, idx, tf.file_id.data(), tf.file_id.size());
|
||||||
tf.inits_sent++;
|
tf.inits_sent++;
|
||||||
tf.time_since_activity = 0.f;
|
tf.time_since_activity = 0.f;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
break;
|
//break;
|
||||||
case State::FINISHING: // we still have unacked packets
|
return;
|
||||||
tf.ssb.for_each(time_delta, [&](uint16_t id, const std::vector<uint8_t>& data, float& time_since_activity) {
|
case State::SENDING: {
|
||||||
if (timeouts_set.count({idx, id})) {
|
tf.ssb.for_each(time_delta, [&](uint16_t id, const std::vector<uint8_t>& data, float& time_since_activity) {
|
||||||
if (can_packet_size >= data.size()) {
|
// no ack after 5 sec -> resend
|
||||||
_neep.send_ft1_data(group_number, peer_number, idx, id, data.data(), data.size());
|
//if (time_since_activity >= ngc_ft1_ctx->options.sending_resend_without_ack_after) {
|
||||||
|
if (timeouts_set.count({idx, id})) {
|
||||||
|
// TODO: can fail
|
||||||
|
sendPKG_FT1_DATA(group_number, peer_number, idx, id, data.data(), data.size());
|
||||||
peer.cca->onLoss({idx, id}, false);
|
peer.cca->onLoss({idx, id}, false);
|
||||||
time_since_activity = 0.f;
|
time_since_activity = 0.f;
|
||||||
timeouts_set.erase({idx, id});
|
timeouts_set.erase({idx, id});
|
||||||
can_packet_size -= data.size();
|
|
||||||
} else {
|
|
||||||
#if 0 // too spammy
|
|
||||||
std::cerr << "NGCFT1 warning: no space to resend timedout\n";
|
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
}
|
|
||||||
});
|
|
||||||
if (tf.time_since_activity >= sending_give_up_after) {
|
|
||||||
// no ack after 30sec, close ft
|
|
||||||
std::cerr << "NGCFT1 warning: sending ft finishing timed out, deleting\n";
|
|
||||||
dispatch(
|
|
||||||
NGCFT1_Event::send_done,
|
|
||||||
Events::NGCFT1_send_done{
|
|
||||||
group_number, peer_number,
|
|
||||||
static_cast<uint8_t>(idx),
|
|
||||||
}
|
|
||||||
);
|
|
||||||
|
|
||||||
// clean up cca
|
|
||||||
tf.ssb.for_each(time_delta, [&](uint16_t id, const std::vector<uint8_t>& data, float& time_since_activity) {
|
|
||||||
peer.cca->onLoss({idx, id}, true);
|
|
||||||
timeouts_set.erase({idx, id});
|
|
||||||
});
|
});
|
||||||
|
|
||||||
tf_opt.reset();
|
if (tf.time_since_activity >= sending_give_up_after) {
|
||||||
}
|
|
||||||
break;
|
|
||||||
case State::SENDING: {
|
|
||||||
// first handle overall timeout (could otherwise do resends directly before, which is useless)
|
|
||||||
// timeout increases with active transfers (otherwise we could starve them)
|
|
||||||
if (tf.time_since_activity >= (sending_give_up_after * peer.active_send_transfers)) {
|
|
||||||
// no ack after 30sec, close ft
|
// no ack after 30sec, close ft
|
||||||
std::cerr << "NGCFT1 warning: sending ft in progress timed out, deleting (ifc:" << peer.cca->inFlightCount() << ")\n";
|
std::cerr << "NGCFT1 warning: sending ft in progress timed out, deleting\n";
|
||||||
dispatch(
|
dispatch(
|
||||||
NGCFT1_Event::send_done,
|
NGCFT1_Event::send_done,
|
||||||
Events::NGCFT1_send_done{
|
Events::NGCFT1_send_done{
|
||||||
@ -107,26 +210,25 @@ void NGCFT1::updateSendTransfer(float time_delta, uint32_t group_number, uint32_
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
// do resends
|
|
||||||
tf.ssb.for_each(time_delta, [&](uint16_t id, const std::vector<uint8_t>& data, float& time_since_activity) {
|
|
||||||
if (can_packet_size >= data.size() && time_since_activity >= peer.cca->getCurrentDelay() && timeouts_set.count({idx, id})) {
|
|
||||||
// TODO: can fail
|
|
||||||
_neep.send_ft1_data(group_number, peer_number, idx, id, data.data(), data.size());
|
|
||||||
peer.cca->onLoss({idx, id}, false);
|
|
||||||
time_since_activity = 0.f;
|
|
||||||
timeouts_set.erase({idx, id});
|
|
||||||
can_packet_size -= data.size();
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
// if chunks in flight < window size (2)
|
// if chunks in flight < window size (2)
|
||||||
|
//while (tf.ssb.size() < ngc_ft1_ctx->options.packet_window_size) {
|
||||||
|
int64_t can_packet_size {static_cast<int64_t>(peer.cca->canSend())};
|
||||||
|
//if (can_packet_size) {
|
||||||
|
//std::cerr << "FT: can_packet_size: " << can_packet_size;
|
||||||
|
//}
|
||||||
|
size_t count {0};
|
||||||
while (can_packet_size > 0 && tf.file_size > 0) {
|
while (can_packet_size > 0 && tf.file_size > 0) {
|
||||||
std::vector<uint8_t> new_data;
|
std::vector<uint8_t> new_data;
|
||||||
|
|
||||||
|
// TODO: parameterize packet size? -> only if JF increases lossy packet size >:)
|
||||||
|
//size_t chunk_size = std::min<size_t>(496u, tf.file_size - tf.file_size_current);
|
||||||
|
//size_t chunk_size = std::min<size_t>(can_packet_size, tf.file_size - tf.file_size_current);
|
||||||
size_t chunk_size = std::min<size_t>({
|
size_t chunk_size = std::min<size_t>({
|
||||||
|
//496u,
|
||||||
|
//996u,
|
||||||
peer.cca->MAXIMUM_SEGMENT_DATA_SIZE,
|
peer.cca->MAXIMUM_SEGMENT_DATA_SIZE,
|
||||||
static_cast<size_t>(can_packet_size),
|
static_cast<size_t>(can_packet_size),
|
||||||
static_cast<size_t>(tf.file_size - tf.file_size_current),
|
tf.file_size - tf.file_size_current
|
||||||
});
|
});
|
||||||
if (chunk_size == 0) {
|
if (chunk_size == 0) {
|
||||||
tf.state = State::FINISHING;
|
tf.state = State::FINISHING;
|
||||||
@ -135,6 +237,14 @@ void NGCFT1::updateSendTransfer(float time_delta, uint32_t group_number, uint32_
|
|||||||
|
|
||||||
new_data.resize(chunk_size);
|
new_data.resize(chunk_size);
|
||||||
|
|
||||||
|
//ngc_ft1_ctx->cb_send_data[tf.file_kind](
|
||||||
|
//tox,
|
||||||
|
//group_number, peer_number,
|
||||||
|
//idx,
|
||||||
|
//tf.file_size_current,
|
||||||
|
//new_data.data(), new_data.size(),
|
||||||
|
//ngc_ft1_ctx->ud_send_data.count(tf.file_kind) ? ngc_ft1_ctx->ud_send_data.at(tf.file_kind) : nullptr
|
||||||
|
//);
|
||||||
assert(idx <= 0xffu);
|
assert(idx <= 0xffu);
|
||||||
// TODO: check return value
|
// TODO: check return value
|
||||||
dispatch(
|
dispatch(
|
||||||
@ -143,212 +253,112 @@ void NGCFT1::updateSendTransfer(float time_delta, uint32_t group_number, uint32_
|
|||||||
group_number, peer_number,
|
group_number, peer_number,
|
||||||
static_cast<uint8_t>(idx),
|
static_cast<uint8_t>(idx),
|
||||||
tf.file_size_current,
|
tf.file_size_current,
|
||||||
new_data.data(), static_cast<uint32_t>(new_data.size()),
|
new_data.data(), new_data.size(),
|
||||||
}
|
}
|
||||||
);
|
);
|
||||||
|
|
||||||
uint16_t seq_id = tf.ssb.add(std::move(new_data));
|
uint16_t seq_id = tf.ssb.add(std::move(new_data));
|
||||||
const bool sent = _neep.send_ft1_data(group_number, peer_number, idx, seq_id, tf.ssb.entries.at(seq_id).data.data(), tf.ssb.entries.at(seq_id).data.size());
|
sendPKG_FT1_DATA(group_number, peer_number, idx, seq_id, tf.ssb.entries.at(seq_id).data.data(), tf.ssb.entries.at(seq_id).data.size());
|
||||||
if (sent) {
|
peer.cca->onSent({idx, seq_id}, chunk_size);
|
||||||
peer.cca->onSent({idx, seq_id}, chunk_size);
|
|
||||||
} else {
|
#if defined(EXTRA_LOGGING) && EXTRA_LOGGING == 1
|
||||||
std::cerr << "NGCFT1: failed to send packet (queue full?) --------------\n";
|
fprintf(stderr, "FT: sent data size: %ld (seq %d)\n", chunk_size, seq_id);
|
||||||
peer.cca->onLoss({idx, seq_id}, false); // HACK: fake congestion event
|
#endif
|
||||||
// TODO: onCongestion
|
|
||||||
can_packet_size = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
tf.file_size_current += chunk_size;
|
tf.file_size_current += chunk_size;
|
||||||
can_packet_size -= chunk_size;
|
can_packet_size -= chunk_size;
|
||||||
|
count++;
|
||||||
}
|
}
|
||||||
|
//if (count) {
|
||||||
|
//std::cerr << " split over " << count << "\n";
|
||||||
|
//}
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
case State::FINISHING: // we still have unacked packets
|
||||||
|
tf.ssb.for_each(time_delta, [&](uint16_t id, const std::vector<uint8_t>& data, float& time_since_activity) {
|
||||||
|
// no ack after 5 sec -> resend
|
||||||
|
//if (time_since_activity >= ngc_ft1_ctx->options.sending_resend_without_ack_after) {
|
||||||
|
if (timeouts_set.count({idx, id})) {
|
||||||
|
sendPKG_FT1_DATA(group_number, peer_number, idx, id, data.data(), data.size());
|
||||||
|
peer.cca->onLoss({idx, id}, false);
|
||||||
|
time_since_activity = 0.f;
|
||||||
|
timeouts_set.erase({idx, id});
|
||||||
|
}
|
||||||
|
});
|
||||||
|
if (tf.time_since_activity >= sending_give_up_after) {
|
||||||
|
// no ack after 30sec, close ft
|
||||||
|
// TODO: notify app
|
||||||
|
std::cerr << "NGCFT1 warning: sending ft finishing timed out, deleting\n";
|
||||||
|
|
||||||
|
// clean up cca
|
||||||
|
tf.ssb.for_each(time_delta, [&](uint16_t id, const std::vector<uint8_t>& data, float& time_since_activity) {
|
||||||
|
peer.cca->onLoss({idx, id}, true);
|
||||||
|
timeouts_set.erase({idx, id});
|
||||||
|
});
|
||||||
|
|
||||||
|
tf_opt.reset();
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
default: // invalid state, delete
|
default: // invalid state, delete
|
||||||
std::cerr << "NGCFT1 error: ft in invalid state, deleting\n";
|
std::cerr << "NGCFT1 error: ft in invalid state, deleting\n";
|
||||||
assert(false && "ft in invalid state");
|
|
||||||
tf_opt.reset();
|
tf_opt.reset();
|
||||||
|
//continue;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
bool NGCFT1::iteratePeer(float time_delta, uint32_t group_number, uint32_t peer_number, Group::Peer& peer) {
|
void NGCFT1::iteratePeer(float time_delta, uint32_t group_number, uint32_t peer_number, Group::Peer& peer) {
|
||||||
if (peer.cca) {
|
auto timeouts = peer.cca->getTimeouts();
|
||||||
auto timeouts = peer.cca->getTimeouts();
|
std::set<CCAI::SeqIDType> timeouts_set{timeouts.cbegin(), timeouts.cend()};
|
||||||
std::set<CCAI::SeqIDType> timeouts_set{timeouts.cbegin(), timeouts.cend()};
|
|
||||||
|
|
||||||
int64_t can_packet_size {peer.cca->canSend(time_delta)}; // might get more space while iterating (time)
|
for (size_t idx = 0; idx < peer.send_transfers.size(); idx++) {
|
||||||
|
if (peer.send_transfers.at(idx).has_value()) {
|
||||||
// get number current running transfers TODO: improve
|
updateSendTransfer(time_delta, group_number, peer_number, peer, idx, timeouts_set);
|
||||||
peer.active_send_transfers = 0;
|
|
||||||
for (const auto& it : peer.send_transfers) {
|
|
||||||
if (it.has_value()) {
|
|
||||||
peer.active_send_transfers++;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// change iterate start position to not starve transfers in the back
|
|
||||||
size_t iterated_count = 0;
|
|
||||||
bool last_send_found = false;
|
|
||||||
for (size_t idx = peer.next_send_transfer_send_idx; iterated_count < peer.send_transfers.size(); idx++, iterated_count++) {
|
|
||||||
idx = idx % peer.send_transfers.size();
|
|
||||||
|
|
||||||
if (peer.send_transfers.at(idx).has_value()) {
|
|
||||||
if (!last_send_found && can_packet_size <= 0) {
|
|
||||||
peer.next_send_transfer_send_idx = idx;
|
|
||||||
last_send_found = true; // only set once
|
|
||||||
}
|
|
||||||
updateSendTransfer(time_delta, group_number, peer_number, peer, idx, timeouts_set, can_packet_size);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
bool recv_activity {false};
|
// TODO: receiving tranfers?
|
||||||
for (size_t idx = 0; idx < peer.recv_transfers.size(); idx++) {
|
|
||||||
if (!peer.recv_transfers.at(idx).has_value()) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
auto& transfer = peer.recv_transfers.at(idx).value();
|
|
||||||
|
|
||||||
// proper switch case?
|
|
||||||
if (transfer.state == Group::Peer::RecvTransfer::State::FINISHING) {
|
|
||||||
transfer.timer -= time_delta;
|
|
||||||
if (transfer.timer <= 0.f) {
|
|
||||||
peer.recv_transfers.at(idx).reset();
|
|
||||||
}
|
|
||||||
recv_activity = true; // count as activity, not sure we need this
|
|
||||||
} else {
|
|
||||||
transfer.timer += time_delta;
|
|
||||||
if (transfer.timer < 0.5f) {
|
|
||||||
// back off when no activity
|
|
||||||
recv_activity = true;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return peer.active_send_transfers > 0 || recv_activity;
|
|
||||||
}
|
|
||||||
|
|
||||||
const CCAI* NGCFT1::getPeerCCA(
|
|
||||||
uint32_t group_number,
|
|
||||||
uint32_t peer_number
|
|
||||||
) const {
|
|
||||||
auto group_it = groups.find(group_number);
|
|
||||||
if (group_it == groups.end()) {
|
|
||||||
return nullptr;;
|
|
||||||
}
|
|
||||||
|
|
||||||
auto peer_it = group_it->second.peers.find(peer_number);
|
|
||||||
if (peer_it == group_it->second.peers.end()) {
|
|
||||||
return nullptr;;
|
|
||||||
}
|
|
||||||
|
|
||||||
const auto& cca_ptr = peer_it->second.cca;
|
|
||||||
|
|
||||||
if (!cca_ptr) {
|
|
||||||
return nullptr;;
|
|
||||||
}
|
|
||||||
|
|
||||||
return cca_ptr.get();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
NGCFT1::NGCFT1(
|
NGCFT1::NGCFT1(
|
||||||
ToxI& t,
|
ToxI& t,
|
||||||
ToxEventProviderI& tep,
|
ToxEventProviderI& tep,
|
||||||
NGCEXTEventProvider& neep
|
NGCEXTEventProviderI& neep
|
||||||
) : _t(t), _tep(tep), _tep_sr(_tep.newSubRef(this)), _neep(neep), _neep_sr(_neep.newSubRef(this))
|
) : _t(t), _tep(tep), _neep(neep)
|
||||||
{
|
{
|
||||||
_neep_sr
|
_neep.subscribe(this, NGCEXT_Event::FT1_REQUEST);
|
||||||
.subscribe(NGCEXT_Event::FT1_REQUEST)
|
_neep.subscribe(this, NGCEXT_Event::FT1_INIT);
|
||||||
.subscribe(NGCEXT_Event::FT1_INIT)
|
_neep.subscribe(this, NGCEXT_Event::FT1_INIT_ACK);
|
||||||
.subscribe(NGCEXT_Event::FT1_INIT_ACK)
|
_neep.subscribe(this, NGCEXT_Event::FT1_DATA);
|
||||||
.subscribe(NGCEXT_Event::FT1_DATA)
|
_neep.subscribe(this, NGCEXT_Event::FT1_DATA_ACK);
|
||||||
.subscribe(NGCEXT_Event::FT1_DATA_ACK)
|
_neep.subscribe(this, NGCEXT_Event::FT1_MESSAGE);
|
||||||
.subscribe(NGCEXT_Event::FT1_MESSAGE)
|
|
||||||
;
|
|
||||||
|
|
||||||
_tep_sr.subscribe(Tox_Event_Type::TOX_EVENT_GROUP_PEER_EXIT);
|
_tep.subscribe(this, Tox_Event::TOX_EVENT_GROUP_PEER_EXIT);
|
||||||
}
|
}
|
||||||
|
|
||||||
float NGCFT1::iterate(float time_delta) {
|
void NGCFT1::iterate(float time_delta) {
|
||||||
_time_since_activity += time_delta;
|
|
||||||
bool transfer_activity {false};
|
|
||||||
for (auto& [group_number, group] : groups) {
|
for (auto& [group_number, group] : groups) {
|
||||||
for (auto& [peer_number, peer] : group.peers) {
|
for (auto& [peer_number, peer] : group.peers) {
|
||||||
transfer_activity = transfer_activity || iteratePeer(time_delta, group_number, peer_number, peer);
|
iteratePeer(time_delta, group_number, peer_number, peer);
|
||||||
|
|
||||||
#if 0
|
|
||||||
// find any active transfer
|
|
||||||
if (!transfer_activity) {
|
|
||||||
for (const auto& t : peer.send_transfers) {
|
|
||||||
if (t.has_value()) {
|
|
||||||
transfer_activity = true;
|
|
||||||
#if 0
|
|
||||||
std::cout
|
|
||||||
<< "--- active send transfer "
|
|
||||||
<< group_number << ":" << peer_number
|
|
||||||
<< "(" << std::get<0>(_t.toxGroupPeerGetName(group_number, peer_number)).value_or("<unk>") << ")"
|
|
||||||
<< " fk:" << t.value().file_kind
|
|
||||||
<< " state:" << (int)t.value().state
|
|
||||||
<< " tsa:" << t.value().time_since_activity
|
|
||||||
<< "\n"
|
|
||||||
;
|
|
||||||
#endif
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (!transfer_activity) {
|
|
||||||
for (const auto& t : peer.recv_transfers) {
|
|
||||||
if (t.has_value()) {
|
|
||||||
transfer_activity = true;
|
|
||||||
#if 0
|
|
||||||
std::cout
|
|
||||||
<< "--- active recv transfer "
|
|
||||||
<< group_number << ":" << peer_number
|
|
||||||
<< "(" << std::get<0>(_t.toxGroupPeerGetName(group_number, peer_number)).value_or("<unk>") << ")"
|
|
||||||
<< " fk:" << t.value().file_kind
|
|
||||||
<< " state:" << (int)t.value().state
|
|
||||||
<< " ft:" << t.value().finishing_timer
|
|
||||||
<< "\n"
|
|
||||||
;
|
|
||||||
#endif
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (transfer_activity) {
|
|
||||||
_time_since_activity = 0.f;
|
|
||||||
// ~15ms for up to 1mb/s
|
|
||||||
// ~5ms for up to 4mb/s
|
|
||||||
return 0.005f; // 5ms
|
|
||||||
} else if (_time_since_activity < 1.0f) {
|
|
||||||
// bc of temporality
|
|
||||||
return 0.025f;
|
|
||||||
} else {
|
|
||||||
return 1.f; // once a sec might be too little
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
bool NGCFT1::NGC_FT1_send_request_private(
|
void NGCFT1::NGC_FT1_send_request_private(
|
||||||
uint32_t group_number, uint32_t peer_number,
|
uint32_t group_number, uint32_t peer_number,
|
||||||
uint32_t file_kind,
|
uint32_t file_kind,
|
||||||
const uint8_t* file_id, uint32_t file_id_size
|
const uint8_t* file_id, size_t file_id_size
|
||||||
) {
|
) {
|
||||||
return _neep.send_ft1_request(group_number, peer_number, file_kind, file_id, file_id_size);
|
// TODO: error check
|
||||||
|
sendPKG_FT1_REQUEST(group_number, peer_number, file_kind, file_id, file_id_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool NGCFT1::NGC_FT1_send_init_private(
|
bool NGCFT1::NGC_FT1_send_init_private(
|
||||||
uint32_t group_number, uint32_t peer_number,
|
uint32_t group_number, uint32_t peer_number,
|
||||||
uint32_t file_kind,
|
uint32_t file_kind,
|
||||||
const uint8_t* file_id, uint32_t file_id_size,
|
const uint8_t* file_id, size_t file_id_size,
|
||||||
uint64_t file_size,
|
size_t file_size,
|
||||||
uint8_t* transfer_id,
|
uint8_t* transfer_id
|
||||||
bool can_compress
|
|
||||||
) {
|
) {
|
||||||
if (std::get<0>(_t.toxGroupPeerGetConnectionStatus(group_number, peer_number)).value_or(TOX_CONNECTION_NONE) == TOX_CONNECTION_NONE) {
|
if (std::get<0>(_t.toxGroupPeerGetConnectionStatus(group_number, peer_number)).value_or(TOX_CONNECTION_NONE) == TOX_CONNECTION_NONE) {
|
||||||
std::cerr << "NGCFT1 error: cant init ft, peer offline\n";
|
std::cerr << "NGCFT1 error: cant init ft, peer offline\n";
|
||||||
@ -378,12 +388,10 @@ bool NGCFT1::NGC_FT1_send_init_private(
|
|||||||
std::cerr << "NGCFT1 error: cant init ft, no free transfer slot\n";
|
std::cerr << "NGCFT1 error: cant init ft, no free transfer slot\n";
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
idx = i;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: check return value
|
// TODO: check return value
|
||||||
_neep.send_ft1_init(group_number, peer_number, file_kind, file_size, idx, file_id, file_id_size);
|
sendPKG_FT1_INIT(group_number, peer_number, file_kind, file_size, idx, file_id, file_id_size);
|
||||||
|
|
||||||
peer.send_transfers[idx] = Group::Peer::SendTransfer{
|
peer.send_transfers[idx] = Group::Peer::SendTransfer{
|
||||||
file_kind,
|
file_kind,
|
||||||
@ -407,64 +415,18 @@ bool NGCFT1::NGC_FT1_send_message_public(
|
|||||||
uint32_t group_number,
|
uint32_t group_number,
|
||||||
uint32_t& message_id,
|
uint32_t& message_id,
|
||||||
uint32_t file_kind,
|
uint32_t file_kind,
|
||||||
const uint8_t* file_id, uint32_t file_id_size
|
const uint8_t* file_id, size_t file_id_size
|
||||||
) {
|
) {
|
||||||
// create msg_id
|
// create msg_id
|
||||||
message_id = randombytes_random();
|
message_id = randombytes_random();
|
||||||
|
|
||||||
// TODO: check return value
|
// TODO: check return value
|
||||||
return _neep.send_all_ft1_message(group_number, message_id, file_kind, file_id, file_id_size);
|
return sendPKG_FT1_MESSAGE(group_number, message_id, file_kind, file_id, file_id_size);
|
||||||
}
|
|
||||||
|
|
||||||
float NGCFT1::getPeerDelay(uint32_t group_number, uint32_t peer_number) const {
|
|
||||||
auto* cca_ptr = getPeerCCA(group_number, peer_number);
|
|
||||||
|
|
||||||
if (cca_ptr == nullptr) {
|
|
||||||
return -1.f;
|
|
||||||
}
|
|
||||||
|
|
||||||
return cca_ptr->getCurrentDelay();
|
|
||||||
}
|
|
||||||
|
|
||||||
float NGCFT1::getPeerWindow(uint32_t group_number, uint32_t peer_number) const {
|
|
||||||
auto* cca_ptr = getPeerCCA(group_number, peer_number);
|
|
||||||
|
|
||||||
if (cca_ptr == nullptr) {
|
|
||||||
return -1.f;
|
|
||||||
}
|
|
||||||
|
|
||||||
return cca_ptr->getWindow();
|
|
||||||
}
|
|
||||||
|
|
||||||
int64_t NGCFT1::getPeerInFlightPackets(
|
|
||||||
uint32_t group_number,
|
|
||||||
uint32_t peer_number
|
|
||||||
) const {
|
|
||||||
auto* cca_ptr = getPeerCCA(group_number, peer_number);
|
|
||||||
|
|
||||||
if (cca_ptr == nullptr) {
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
return cca_ptr->inFlightCount();
|
|
||||||
}
|
|
||||||
|
|
||||||
int64_t NGCFT1::getPeerInFlightBytes(
|
|
||||||
uint32_t group_number,
|
|
||||||
uint32_t peer_number
|
|
||||||
) const {
|
|
||||||
auto* cca_ptr = getPeerCCA(group_number, peer_number);
|
|
||||||
|
|
||||||
if (cca_ptr == nullptr) {
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
return cca_ptr->inFlightCount();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
bool NGCFT1::onEvent(const Events::NGCEXT_ft1_request& e) {
|
bool NGCFT1::onEvent(const Events::NGCEXT_ft1_request& e) {
|
||||||
//#if !NDEBUG
|
//#if !NDEBUG
|
||||||
std::cout << "NGCFT1: got FT1_REQUEST fk:" << e.file_kind << " [" << bin2hex(e.file_id) << "]\n";
|
std::cout << "NGCFT1: FT1_REQUEST fk:" << e.file_kind << " [" << bin2hex(e.file_id) << "]\n";
|
||||||
//#endif
|
//#endif
|
||||||
|
|
||||||
// .... just rethrow??
|
// .... just rethrow??
|
||||||
@ -474,30 +436,58 @@ bool NGCFT1::onEvent(const Events::NGCEXT_ft1_request& e) {
|
|||||||
Events::NGCFT1_recv_request{
|
Events::NGCFT1_recv_request{
|
||||||
e.group_number, e.peer_number,
|
e.group_number, e.peer_number,
|
||||||
static_cast<NGCFT1_file_kind>(e.file_kind),
|
static_cast<NGCFT1_file_kind>(e.file_kind),
|
||||||
e.file_id.data(), static_cast<uint32_t>(e.file_id.size())
|
e.file_id.data(), e.file_id.size()
|
||||||
}
|
}
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool NGCFT1::onEvent(const Events::NGCEXT_ft1_init& e) {
|
bool NGCFT1::onEvent(const Events::NGCEXT_ft1_init& e) {
|
||||||
//#if !NDEBUG
|
//#if !NDEBUG
|
||||||
std::cout << "NGCFT1: got FT1_INIT fk:" << e.file_kind << " fs:" << e.file_size << " tid:" << int(e.transfer_id) << " [" << bin2hex(e.file_id) << "]\n";
|
std::cout << "NGCFT1: FT1_INIT fk:" << e.file_kind << " fs:" << e.file_size << " tid:" << int(e.transfer_id) << " [" << bin2hex(e.file_id) << "]\n";
|
||||||
//#endif
|
//#endif
|
||||||
// HACK: simply forward to init2 hanlder
|
|
||||||
return onEvent(Events::NGCEXT_ft1_init2{
|
bool accept = false;
|
||||||
e.group_number,
|
dispatch(
|
||||||
e.peer_number,
|
NGCFT1_Event::recv_init,
|
||||||
|
Events::NGCFT1_recv_init{
|
||||||
|
e.group_number, e.peer_number,
|
||||||
|
static_cast<NGCFT1_file_kind>(e.file_kind),
|
||||||
|
e.file_id.data(), e.file_id.size(),
|
||||||
|
e.transfer_id,
|
||||||
|
e.file_size,
|
||||||
|
accept
|
||||||
|
}
|
||||||
|
);
|
||||||
|
|
||||||
|
if (!accept) {
|
||||||
|
std::cout << "NGCFT1: rejected init\n";
|
||||||
|
return true; // return true?
|
||||||
|
}
|
||||||
|
|
||||||
|
sendPKG_FT1_INIT_ACK(e.group_number, e.peer_number, e.transfer_id);
|
||||||
|
|
||||||
|
std::cout << "NGCFT1: accepted init\n";
|
||||||
|
|
||||||
|
auto& peer = groups[e.group_number].peers[e.peer_number];
|
||||||
|
if (peer.recv_transfers[e.transfer_id].has_value()) {
|
||||||
|
std::cerr << "NGCFT1 warning: overwriting existing recv_transfer " << int(e.transfer_id) << "\n";
|
||||||
|
}
|
||||||
|
|
||||||
|
peer.recv_transfers[e.transfer_id] = Group::Peer::RecvTransfer{
|
||||||
e.file_kind,
|
e.file_kind,
|
||||||
|
e.file_id,
|
||||||
|
Group::Peer::RecvTransfer::State::INITED,
|
||||||
e.file_size,
|
e.file_size,
|
||||||
e.transfer_id,
|
0u,
|
||||||
0x00, // non set
|
{} // rsb
|
||||||
e.file_id, // sadly a copy, wont matter in the future
|
};
|
||||||
});
|
|
||||||
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool NGCFT1::onEvent(const Events::NGCEXT_ft1_init_ack& e) {
|
bool NGCFT1::onEvent(const Events::NGCEXT_ft1_init_ack& e) {
|
||||||
//#if !NDEBUG
|
//#if !NDEBUG
|
||||||
std::cout << "NGCFT1: got FT1_INIT_ACK " << e.group_number << ":" << e.peer_number << " mds:" << e.max_lossy_data_size << "\n";
|
std::cout << "NGCFT1: FT1_INIT_ACK\n";
|
||||||
//#endif
|
//#endif
|
||||||
|
|
||||||
// we now should start sending data
|
// we now should start sending data
|
||||||
@ -517,35 +507,10 @@ bool NGCFT1::onEvent(const Events::NGCEXT_ft1_init_ack& e) {
|
|||||||
|
|
||||||
using State = Group::Peer::SendTransfer::State;
|
using State = Group::Peer::SendTransfer::State;
|
||||||
if (transfer.state != State::INIT_SENT) {
|
if (transfer.state != State::INIT_SENT) {
|
||||||
std::cerr << "NGCFT1 error: init_ack but not in INIT_SENT state\n";
|
std::cerr << "NGCFT1 error: inti_ack but not in INIT_SENT state\n";
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (e.max_lossy_data_size < 16) {
|
|
||||||
std::cerr << "NGCFT1 error: init_ack max_lossy_data_size is less than 16 bytes\n";
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
// negotiated packet_data_size
|
|
||||||
const auto negotiated_packet_data_size = std::min<uint32_t>(e.max_lossy_data_size, _t.toxGroupMaxCustomLossyPacketLength()-4);
|
|
||||||
// TODO: reset cca with new pkg size
|
|
||||||
if (!peer.cca) {
|
|
||||||
// make random max of [1020-1220]
|
|
||||||
const uint32_t random_max_data_size = (1024-4) + _rng()%201;
|
|
||||||
const uint32_t randomized_negotiated_packet_data_size = std::min(negotiated_packet_data_size, random_max_data_size);
|
|
||||||
|
|
||||||
peer.max_packet_data_size = randomized_negotiated_packet_data_size;
|
|
||||||
|
|
||||||
std::cerr << "NGCFT1: creating cca with max:" << peer.max_packet_data_size << "\n";
|
|
||||||
|
|
||||||
peer.cca = std::make_unique<CUBIC>(peer.max_packet_data_size);
|
|
||||||
//peer.cca = std::make_unique<LEDBAT>(peer.max_packet_data_size);
|
|
||||||
//peer.cca = std::make_unique<FlowOnly>(peer.max_packet_data_size);
|
|
||||||
//peer.cca->max_byterate_allowed = 1.f *1024*1024;
|
|
||||||
} else {
|
|
||||||
std::cerr << "NGCFT1: reusing cca. rtt:" << peer.cca->getCurrentDelay() << " w:" << peer.cca->getWindow() << " ifc:" << peer.cca->inFlightCount() << "\n";
|
|
||||||
}
|
|
||||||
|
|
||||||
// iterate will now call NGC_FT1_send_data_cb
|
// iterate will now call NGC_FT1_send_data_cb
|
||||||
transfer.state = State::SENDING;
|
transfer.state = State::SENDING;
|
||||||
transfer.time_since_activity = 0.f;
|
transfer.time_since_activity = 0.f;
|
||||||
@ -555,7 +520,7 @@ bool NGCFT1::onEvent(const Events::NGCEXT_ft1_init_ack& e) {
|
|||||||
|
|
||||||
bool NGCFT1::onEvent(const Events::NGCEXT_ft1_data& e) {
|
bool NGCFT1::onEvent(const Events::NGCEXT_ft1_data& e) {
|
||||||
#if !NDEBUG
|
#if !NDEBUG
|
||||||
//std::cout << "NGCFT1: got FT1_DATA " << e.sequence_id << "\n";
|
std::cout << "NGCFT1: FT1_DATA\n";
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
if (e.data.empty()) {
|
if (e.data.empty()) {
|
||||||
@ -575,7 +540,6 @@ bool NGCFT1::onEvent(const Events::NGCEXT_ft1_data& e) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
auto& transfer = peer.recv_transfers[e.transfer_id].value();
|
auto& transfer = peer.recv_transfers[e.transfer_id].value();
|
||||||
transfer.timer = 0.f;
|
|
||||||
|
|
||||||
// do reassembly, ignore dups
|
// do reassembly, ignore dups
|
||||||
transfer.rsb.add(e.sequence_id, std::vector<uint8_t>(e.data)); // TODO: ugly explicit copy for what should just be a move
|
transfer.rsb.add(e.sequence_id, std::vector<uint8_t>(e.data)); // TODO: ugly explicit copy for what should just be a move
|
||||||
@ -591,7 +555,7 @@ bool NGCFT1::onEvent(const Events::NGCEXT_ft1_data& e) {
|
|||||||
e.group_number, e.peer_number,
|
e.group_number, e.peer_number,
|
||||||
e.transfer_id,
|
e.transfer_id,
|
||||||
transfer.file_size_current,
|
transfer.file_size_current,
|
||||||
data.data(), static_cast<uint32_t>(data.size())
|
data.data(), data.size()
|
||||||
}
|
}
|
||||||
);
|
);
|
||||||
|
|
||||||
@ -604,19 +568,13 @@ bool NGCFT1::onEvent(const Events::NGCEXT_ft1_data& e) {
|
|||||||
// TODO: check if this caps at max acks
|
// TODO: check if this caps at max acks
|
||||||
if (!ack_seq_ids.empty()) {
|
if (!ack_seq_ids.empty()) {
|
||||||
// TODO: check return value
|
// TODO: check return value
|
||||||
_neep.send_ft1_data_ack(e.group_number, e.peer_number, e.transfer_id, ack_seq_ids.data(), ack_seq_ids.size());
|
sendPKG_FT1_DATA_ACK(e.group_number, e.peer_number, e.transfer_id, ack_seq_ids.data(), ack_seq_ids.size());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
if (transfer.file_size_current == transfer.file_size) {
|
if (transfer.file_size_current == transfer.file_size) {
|
||||||
// all data received
|
// TODO: set all data received, and clean up
|
||||||
transfer.state = Group::Peer::RecvTransfer::State::FINISHING;
|
//transfer.state = Group::Peer::RecvTransfer::State::RECV;
|
||||||
|
|
||||||
// TODO: keep around for remote timeout + delay + offset, so we can be sure all acks where received
|
|
||||||
// or implement a dedicated finished that needs to be acked
|
|
||||||
//transfer.timer = 0.75f; // TODO: we are receiving, we dont know delay
|
|
||||||
transfer.timer = FlowOnly::RTT_MAX;
|
|
||||||
|
|
||||||
dispatch(
|
dispatch(
|
||||||
NGCFT1_Event::recv_done,
|
NGCFT1_Event::recv_done,
|
||||||
Events::NGCFT1_recv_done{
|
Events::NGCFT1_recv_done{
|
||||||
@ -631,7 +589,7 @@ bool NGCFT1::onEvent(const Events::NGCEXT_ft1_data& e) {
|
|||||||
|
|
||||||
bool NGCFT1::onEvent(const Events::NGCEXT_ft1_data_ack& e) {
|
bool NGCFT1::onEvent(const Events::NGCEXT_ft1_data_ack& e) {
|
||||||
#if !NDEBUG
|
#if !NDEBUG
|
||||||
//std::cout << "NGCFT1: got FT1_DATA_ACK\n";
|
//std::cout << "NGCFT1: FT1_DATA_ACK\n";
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
if (!groups.count(e.group_number)) {
|
if (!groups.count(e.group_number)) {
|
||||||
@ -641,8 +599,6 @@ bool NGCFT1::onEvent(const Events::NGCEXT_ft1_data_ack& e) {
|
|||||||
|
|
||||||
Group::Peer& peer = groups[e.group_number].peers[e.peer_number];
|
Group::Peer& peer = groups[e.group_number].peers[e.peer_number];
|
||||||
if (!peer.send_transfers[e.transfer_id].has_value()) {
|
if (!peer.send_transfers[e.transfer_id].has_value()) {
|
||||||
// we delete directly, packets might still be in flight (in practice they are when ce)
|
|
||||||
// update: we no longer delete directly, but its kinda hacky
|
|
||||||
std::cerr << "NGCFT1 warning: data_ack for unknown transfer\n";
|
std::cerr << "NGCFT1 warning: data_ack for unknown transfer\n";
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
@ -659,7 +615,6 @@ bool NGCFT1::onEvent(const Events::NGCEXT_ft1_data_ack& e) {
|
|||||||
|
|
||||||
{
|
{
|
||||||
std::vector<CCAI::SeqIDType> seqs;
|
std::vector<CCAI::SeqIDType> seqs;
|
||||||
seqs.reserve(e.sequence_ids.size());
|
|
||||||
for (const auto it : e.sequence_ids) {
|
for (const auto it : e.sequence_ids) {
|
||||||
// TODO: improve this o.o
|
// TODO: improve this o.o
|
||||||
seqs.push_back({e.transfer_id, it});
|
seqs.push_back({e.transfer_id, it});
|
||||||
@ -670,7 +625,7 @@ bool NGCFT1::onEvent(const Events::NGCEXT_ft1_data_ack& e) {
|
|||||||
|
|
||||||
// delete if all packets acked
|
// delete if all packets acked
|
||||||
if (transfer.file_size == transfer.file_size_current && transfer.ssb.size() == 0) {
|
if (transfer.file_size == transfer.file_size_current && transfer.ssb.size() == 0) {
|
||||||
std::cout << "NGCFT1: " << int(e.transfer_id) << " done. wnd:" << peer.cca->getWindow() << "\n";
|
std::cout << "NGCFT1: " << int(e.transfer_id) << " done\n";
|
||||||
dispatch(
|
dispatch(
|
||||||
NGCFT1_Event::send_done,
|
NGCFT1_Event::send_done,
|
||||||
Events::NGCFT1_send_done{
|
Events::NGCFT1_send_done{
|
||||||
@ -686,7 +641,7 @@ bool NGCFT1::onEvent(const Events::NGCEXT_ft1_data_ack& e) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
bool NGCFT1::onEvent(const Events::NGCEXT_ft1_message& e) {
|
bool NGCFT1::onEvent(const Events::NGCEXT_ft1_message& e) {
|
||||||
std::cout << "NGCFT1: got FT1_MESSAGE mid:" << e.message_id << " fk:" << e.file_kind << " [" << bin2hex(e.file_id) << "]\n";
|
std::cout << "NGCFT1: FT1_MESSAGE mid:" << e.message_id << " fk:" << e.file_kind << " [" << bin2hex(e.file_id) << "]\n";
|
||||||
|
|
||||||
// .... just rethrow??
|
// .... just rethrow??
|
||||||
// TODO: dont
|
// TODO: dont
|
||||||
@ -696,55 +651,11 @@ bool NGCFT1::onEvent(const Events::NGCEXT_ft1_message& e) {
|
|||||||
e.group_number, e.peer_number,
|
e.group_number, e.peer_number,
|
||||||
e.message_id,
|
e.message_id,
|
||||||
static_cast<NGCFT1_file_kind>(e.file_kind),
|
static_cast<NGCFT1_file_kind>(e.file_kind),
|
||||||
e.file_id.data(), static_cast<uint32_t>(e.file_id.size())
|
e.file_id.data(), e.file_id.size()
|
||||||
}
|
}
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool NGCFT1::onEvent(const Events::NGCEXT_ft1_init2& e) {
|
|
||||||
//#if !NDEBUG
|
|
||||||
std::cout << "NGCFT1: got FT1_INIT2 fk:" << e.file_kind << " fs:" << e.file_size << " tid:" << int(e.transfer_id) << " ff:" << int(e.feature_flags) << " [" << bin2hex(e.file_id) << "]\n";
|
|
||||||
//#endif
|
|
||||||
|
|
||||||
bool accept = false;
|
|
||||||
dispatch(
|
|
||||||
NGCFT1_Event::recv_init,
|
|
||||||
Events::NGCFT1_recv_init{
|
|
||||||
e.group_number, e.peer_number,
|
|
||||||
static_cast<NGCFT1_file_kind>(e.file_kind),
|
|
||||||
e.file_id.data(), static_cast<uint32_t>(e.file_id.size()),
|
|
||||||
e.transfer_id,
|
|
||||||
e.file_size,
|
|
||||||
accept
|
|
||||||
}
|
|
||||||
);
|
|
||||||
|
|
||||||
if (!accept) {
|
|
||||||
std::cout << "NGCFT1: rejected init2\n";
|
|
||||||
return true; // return true?
|
|
||||||
}
|
|
||||||
|
|
||||||
_neep.send_ft1_init_ack(e.group_number, e.peer_number, e.transfer_id);
|
|
||||||
|
|
||||||
std::cout << "NGCFT1: accepted init2\n";
|
|
||||||
|
|
||||||
auto& peer = groups[e.group_number].peers[e.peer_number];
|
|
||||||
if (peer.recv_transfers[e.transfer_id].has_value()) {
|
|
||||||
std::cerr << "NGCFT1 warning: overwriting existing recv_transfer " << int(e.transfer_id) << ", other peer started new transfer on preexising\n";
|
|
||||||
}
|
|
||||||
|
|
||||||
peer.recv_transfers[e.transfer_id] = Group::Peer::RecvTransfer{
|
|
||||||
e.file_kind,
|
|
||||||
e.file_id,
|
|
||||||
Group::Peer::RecvTransfer::State::INITED,
|
|
||||||
e.file_size,
|
|
||||||
0u,
|
|
||||||
{} // rsb
|
|
||||||
};
|
|
||||||
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool NGCFT1::onToxEvent(const Tox_Event_Group_Peer_Exit* e) {
|
bool NGCFT1::onToxEvent(const Tox_Event_Group_Peer_Exit* e) {
|
||||||
const auto group_number = tox_event_group_peer_exit_get_group_number(e);
|
const auto group_number = tox_event_group_peer_exit_get_group_number(e);
|
||||||
const auto peer_number = tox_event_group_peer_exit_get_peer_id(e);
|
const auto peer_number = tox_event_group_peer_exit_get_peer_id(e);
|
||||||
@ -800,7 +711,7 @@ bool NGCFT1::onToxEvent(const Tox_Event_Group_Peer_Exit* e) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// reset cca
|
// reset cca
|
||||||
peer.cca.reset(); // dont actually reallocate
|
peer.cca = std::make_unique<CUBIC>(500-4); // TODO: replace with tox_group_max_custom_lossy_packet_length()-4
|
||||||
|
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
@ -2,23 +2,22 @@
|
|||||||
|
|
||||||
// solanaceae port of tox_ngc_ft1
|
// solanaceae port of tox_ngc_ft1
|
||||||
|
|
||||||
#include <solanaceae/toxcore/tox_event_interface.hpp>
|
|
||||||
#include <solanaceae/toxcore/tox_interface.hpp>
|
#include <solanaceae/toxcore/tox_interface.hpp>
|
||||||
|
#include <solanaceae/toxcore/tox_event_interface.hpp>
|
||||||
|
|
||||||
#include <solanaceae/ngc_ext/ngcext.hpp>
|
#include <solanaceae/ngc_ext/ngcext.hpp>
|
||||||
|
#include "./cubic.hpp"
|
||||||
#include "./cca.hpp"
|
//#include "./flow_only.hpp"
|
||||||
|
//#include "./ledbat.hpp"
|
||||||
|
|
||||||
#include "./rcv_buf.hpp"
|
#include "./rcv_buf.hpp"
|
||||||
#include "./snd_buf.hpp"
|
#include "./snd_buf.hpp"
|
||||||
|
|
||||||
#include "./ngcft1_file_kind.hpp"
|
#include "./ngcft1_file_kind.hpp"
|
||||||
|
|
||||||
#include <cstdint>
|
|
||||||
#include <map>
|
#include <map>
|
||||||
#include <set>
|
#include <set>
|
||||||
#include <memory>
|
#include <memory>
|
||||||
#include <random>
|
|
||||||
|
|
||||||
namespace Events {
|
namespace Events {
|
||||||
|
|
||||||
@ -29,7 +28,7 @@ namespace Events {
|
|||||||
NGCFT1_file_kind file_kind;
|
NGCFT1_file_kind file_kind;
|
||||||
|
|
||||||
const uint8_t* file_id;
|
const uint8_t* file_id;
|
||||||
uint32_t file_id_size;
|
size_t file_id_size;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct NGCFT1_recv_init {
|
struct NGCFT1_recv_init {
|
||||||
@ -39,10 +38,10 @@ namespace Events {
|
|||||||
NGCFT1_file_kind file_kind;
|
NGCFT1_file_kind file_kind;
|
||||||
|
|
||||||
const uint8_t* file_id;
|
const uint8_t* file_id;
|
||||||
uint32_t file_id_size;
|
size_t file_id_size;
|
||||||
|
|
||||||
const uint8_t transfer_id;
|
const uint8_t transfer_id;
|
||||||
const uint64_t file_size;
|
const size_t file_size;
|
||||||
|
|
||||||
// return true to accept, false to deny
|
// return true to accept, false to deny
|
||||||
bool& accept;
|
bool& accept;
|
||||||
@ -54,9 +53,9 @@ namespace Events {
|
|||||||
|
|
||||||
uint8_t transfer_id;
|
uint8_t transfer_id;
|
||||||
|
|
||||||
uint64_t data_offset;
|
size_t data_offset;
|
||||||
const uint8_t* data;
|
const uint8_t* data;
|
||||||
uint32_t data_size;
|
size_t data_size;
|
||||||
};
|
};
|
||||||
|
|
||||||
// request to fill data_size bytes into data
|
// request to fill data_size bytes into data
|
||||||
@ -66,9 +65,9 @@ namespace Events {
|
|||||||
|
|
||||||
uint8_t transfer_id;
|
uint8_t transfer_id;
|
||||||
|
|
||||||
uint64_t data_offset;
|
size_t data_offset;
|
||||||
uint8_t* data;
|
uint8_t* data;
|
||||||
uint32_t data_size;
|
size_t data_size;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct NGCFT1_recv_done {
|
struct NGCFT1_recv_done {
|
||||||
@ -96,7 +95,7 @@ namespace Events {
|
|||||||
NGCFT1_file_kind file_kind;
|
NGCFT1_file_kind file_kind;
|
||||||
|
|
||||||
const uint8_t* file_id;
|
const uint8_t* file_id;
|
||||||
uint32_t file_id_size;
|
size_t file_id_size;
|
||||||
};
|
};
|
||||||
|
|
||||||
} // Events
|
} // Events
|
||||||
@ -132,41 +131,30 @@ using NGCFT1EventProviderI = EventProviderI<NGCFT1EventI>;
|
|||||||
class NGCFT1 : public ToxEventI, public NGCEXTEventI, public NGCFT1EventProviderI {
|
class NGCFT1 : public ToxEventI, public NGCEXTEventI, public NGCFT1EventProviderI {
|
||||||
ToxI& _t;
|
ToxI& _t;
|
||||||
ToxEventProviderI& _tep;
|
ToxEventProviderI& _tep;
|
||||||
ToxEventProviderI::SubscriptionReference _tep_sr;
|
NGCEXTEventProviderI& _neep;
|
||||||
NGCEXTEventProvider& _neep; // not the interface?
|
|
||||||
NGCEXTEventProvider::SubscriptionReference _neep_sr;
|
|
||||||
|
|
||||||
std::default_random_engine _rng{std::random_device{}()};
|
|
||||||
|
|
||||||
float _time_since_activity {10.f};
|
|
||||||
|
|
||||||
// TODO: config
|
// TODO: config
|
||||||
size_t acks_per_packet {3u}; // 3
|
size_t acks_per_packet {3u}; // 3
|
||||||
float init_retry_timeout_after {4.f};
|
float init_retry_timeout_after {5.f}; // 10sec
|
||||||
float sending_give_up_after {10.f}; // sec (per active transfer)
|
float sending_give_up_after {30.f}; // 30sec
|
||||||
|
|
||||||
|
|
||||||
struct Group {
|
struct Group {
|
||||||
struct Peer {
|
struct Peer {
|
||||||
uint32_t max_packet_data_size {500-4};
|
std::unique_ptr<CCAI> cca = std::make_unique<CUBIC>(500-4); // TODO: replace with tox_group_max_custom_lossy_packet_length()-4
|
||||||
//std::unique_ptr<CCAI> cca = std::make_unique<CUBIC>(max_packet_data_size); // TODO: replace with tox_group_max_custom_lossy_packet_length()-4
|
|
||||||
std::unique_ptr<CCAI> cca;
|
|
||||||
|
|
||||||
struct RecvTransfer {
|
struct RecvTransfer {
|
||||||
uint32_t file_kind;
|
uint32_t file_kind;
|
||||||
std::vector<uint8_t> file_id;
|
std::vector<uint8_t> file_id;
|
||||||
|
|
||||||
enum class State {
|
enum class State {
|
||||||
INITED, // init acked, but no data received yet (might be dropped)
|
INITED, //init acked, but no data received yet (might be dropped)
|
||||||
RECV, // receiving data
|
RECV, // receiving data
|
||||||
FINISHING, // got all the data, but we wait for 2*delay, since its likely there is data still arriving
|
|
||||||
} state;
|
} state;
|
||||||
|
|
||||||
uint64_t file_size {0};
|
// float time_since_last_activity ?
|
||||||
uint64_t file_size_current {0};
|
size_t file_size {0};
|
||||||
|
size_t file_size_current {0};
|
||||||
// if state INITED or RECV, time since last activity
|
|
||||||
// if state FINISHING and it reaches 0, delete
|
|
||||||
float timer {0.f};
|
|
||||||
|
|
||||||
// sequence id based reassembly
|
// sequence id based reassembly
|
||||||
RecvSequenceBuffer rsb;
|
RecvSequenceBuffer rsb;
|
||||||
@ -191,8 +179,8 @@ class NGCFT1 : public ToxEventI, public NGCEXTEventI, public NGCFT1EventProvider
|
|||||||
size_t inits_sent {1}; // is sent when creating
|
size_t inits_sent {1}; // is sent when creating
|
||||||
|
|
||||||
float time_since_activity {0.f};
|
float time_since_activity {0.f};
|
||||||
uint64_t file_size {0};
|
size_t file_size {0};
|
||||||
uint64_t file_size_current {0};
|
size_t file_size_current {0};
|
||||||
|
|
||||||
// sequence array
|
// sequence array
|
||||||
// list of sent but not acked seq_ids
|
// list of sent but not acked seq_ids
|
||||||
@ -200,44 +188,46 @@ class NGCFT1 : public ToxEventI, public NGCEXTEventI, public NGCFT1EventProvider
|
|||||||
};
|
};
|
||||||
std::array<std::optional<SendTransfer>, 256> send_transfers;
|
std::array<std::optional<SendTransfer>, 256> send_transfers;
|
||||||
size_t next_send_transfer_idx {0}; // next id will be 0
|
size_t next_send_transfer_idx {0}; // next id will be 0
|
||||||
size_t next_send_transfer_send_idx {0};
|
|
||||||
|
|
||||||
size_t active_send_transfers {0};
|
|
||||||
};
|
};
|
||||||
std::map<uint32_t, Peer> peers;
|
std::map<uint32_t, Peer> peers;
|
||||||
};
|
};
|
||||||
std::map<uint32_t, Group> groups;
|
std::map<uint32_t, Group> groups;
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
void updateSendTransfer(float time_delta, uint32_t group_number, uint32_t peer_number, Group::Peer& peer, size_t idx, std::set<CCAI::SeqIDType>& timeouts_set, int64_t& can_packet_size);
|
bool sendPKG_FT1_REQUEST(uint32_t group_number, uint32_t peer_number, uint32_t file_kind, const uint8_t* file_id, size_t file_id_size);
|
||||||
bool iteratePeer(float time_delta, uint32_t group_number, uint32_t peer_number, Group::Peer& peer);
|
bool sendPKG_FT1_INIT(uint32_t group_number, uint32_t peer_number, uint32_t file_kind, uint64_t file_size, uint8_t transfer_id, const uint8_t* file_id, size_t file_id_size);
|
||||||
|
bool sendPKG_FT1_INIT_ACK(uint32_t group_number, uint32_t peer_number, uint8_t transfer_id);
|
||||||
|
bool sendPKG_FT1_DATA(uint32_t group_number, uint32_t peer_number, uint8_t transfer_id, uint16_t sequence_id, const uint8_t* data, size_t data_size);
|
||||||
|
bool sendPKG_FT1_DATA_ACK(uint32_t group_number, uint32_t peer_number, uint8_t transfer_id, const uint16_t* seq_ids, size_t seq_ids_size);
|
||||||
|
bool sendPKG_FT1_MESSAGE(uint32_t group_number, uint32_t message_id, uint32_t file_kind, const uint8_t* file_id, size_t file_id_size);
|
||||||
|
|
||||||
const CCAI* getPeerCCA(uint32_t group_number, uint32_t peer_number) const;
|
void updateSendTransfer(float time_delta, uint32_t group_number, uint32_t peer_number, Group::Peer& peer, size_t idx, std::set<CCAI::SeqIDType>& timeouts_set);
|
||||||
|
void iteratePeer(float time_delta, uint32_t group_number, uint32_t peer_number, Group::Peer& peer);
|
||||||
|
|
||||||
public:
|
public:
|
||||||
NGCFT1(
|
NGCFT1(
|
||||||
ToxI& t,
|
ToxI& t,
|
||||||
ToxEventProviderI& tep,
|
ToxEventProviderI& tep,
|
||||||
NGCEXTEventProvider& neep
|
NGCEXTEventProviderI& neep
|
||||||
);
|
);
|
||||||
|
|
||||||
float iterate(float delta);
|
void iterate(float delta);
|
||||||
|
|
||||||
public: // ft1 api
|
public: // ft1 api
|
||||||
bool NGC_FT1_send_request_private(
|
// TODO: public variant?
|
||||||
|
void NGC_FT1_send_request_private(
|
||||||
uint32_t group_number, uint32_t peer_number,
|
uint32_t group_number, uint32_t peer_number,
|
||||||
uint32_t file_kind,
|
uint32_t file_kind,
|
||||||
const uint8_t* file_id, uint32_t file_id_size
|
const uint8_t* file_id, size_t file_id_size
|
||||||
);
|
);
|
||||||
|
|
||||||
// public does not make sense here
|
// public does not make sense here
|
||||||
bool NGC_FT1_send_init_private(
|
bool NGC_FT1_send_init_private(
|
||||||
uint32_t group_number, uint32_t peer_number,
|
uint32_t group_number, uint32_t peer_number,
|
||||||
uint32_t file_kind,
|
uint32_t file_kind,
|
||||||
const uint8_t* file_id, uint32_t file_id_size,
|
const uint8_t* file_id, size_t file_id_size,
|
||||||
uint64_t file_size,
|
size_t file_size,
|
||||||
uint8_t* transfer_id,
|
uint8_t* transfer_id
|
||||||
bool can_compress = false // set this if you know the data is compressable (eg text)
|
|
||||||
);
|
);
|
||||||
|
|
||||||
// sends the message and fills in message_id
|
// sends the message and fills in message_id
|
||||||
@ -245,26 +235,9 @@ class NGCFT1 : public ToxEventI, public NGCEXTEventI, public NGCFT1EventProvider
|
|||||||
uint32_t group_number,
|
uint32_t group_number,
|
||||||
uint32_t& message_id,
|
uint32_t& message_id,
|
||||||
uint32_t file_kind,
|
uint32_t file_kind,
|
||||||
const uint8_t* file_id, uint32_t file_id_size
|
const uint8_t* file_id, size_t file_id_size
|
||||||
);
|
);
|
||||||
|
|
||||||
public: // cca stuff
|
|
||||||
// rtt/delay
|
|
||||||
// negative on error or no cca
|
|
||||||
float getPeerDelay(uint32_t group_number, uint32_t peer_number) const;
|
|
||||||
|
|
||||||
// belived possible current window
|
|
||||||
// negative on error or no cca
|
|
||||||
float getPeerWindow(uint32_t group_number, uint32_t peer_number) const;
|
|
||||||
|
|
||||||
// packets in flight
|
|
||||||
// returns -1 if error or no cca
|
|
||||||
int64_t getPeerInFlightPackets(uint32_t group_number, uint32_t peer_number) const;
|
|
||||||
|
|
||||||
// actual bytes in flight (aka window)
|
|
||||||
// returns -1 if error or no cca
|
|
||||||
int64_t getPeerInFlightBytes(uint32_t group_number, uint32_t peer_number) const;
|
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
bool onEvent(const Events::NGCEXT_ft1_request&) override;
|
bool onEvent(const Events::NGCEXT_ft1_request&) override;
|
||||||
bool onEvent(const Events::NGCEXT_ft1_init&) override;
|
bool onEvent(const Events::NGCEXT_ft1_init&) override;
|
||||||
@ -272,7 +245,6 @@ class NGCFT1 : public ToxEventI, public NGCEXTEventI, public NGCFT1EventProvider
|
|||||||
bool onEvent(const Events::NGCEXT_ft1_data&) override;
|
bool onEvent(const Events::NGCEXT_ft1_data&) override;
|
||||||
bool onEvent(const Events::NGCEXT_ft1_data_ack&) override;
|
bool onEvent(const Events::NGCEXT_ft1_data_ack&) override;
|
||||||
bool onEvent(const Events::NGCEXT_ft1_message&) override;
|
bool onEvent(const Events::NGCEXT_ft1_message&) override;
|
||||||
bool onEvent(const Events::NGCEXT_ft1_init2&) override;
|
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
bool onToxEvent(const Tox_Event_Group_Peer_Exit* e) override;
|
bool onToxEvent(const Tox_Event_Group_Peer_Exit* e) override;
|
||||||
|
@ -16,7 +16,7 @@ enum class NGCFT1_file_kind : uint32_t {
|
|||||||
// id: TOX_FILE_ID_LENGTH (32) bytes
|
// id: TOX_FILE_ID_LENGTH (32) bytes
|
||||||
// this is basically and id and probably not a hash, like the tox friend api
|
// this is basically and id and probably not a hash, like the tox friend api
|
||||||
// this id can be unique between 2 peers
|
// this id can be unique between 2 peers
|
||||||
ID = 8u, // TODO: this is actually DATA and 0
|
ID = 8u,
|
||||||
|
|
||||||
// id: hash of the info, like a torrent infohash (using the same hash as the data)
|
// id: hash of the info, like a torrent infohash (using the same hash as the data)
|
||||||
// TODO: determain internal format
|
// TODO: determain internal format
|
||||||
@ -72,10 +72,5 @@ enum class NGCFT1_file_kind : uint32_t {
|
|||||||
// id: sha256
|
// id: sha256
|
||||||
// always of size 16KiB, except if last piece in file
|
// always of size 16KiB, except if last piece in file
|
||||||
TORRENT_V2_PIECE,
|
TORRENT_V2_PIECE,
|
||||||
|
|
||||||
// https://gist.github.com/Green-Sky/440cd9817a7114786850eb4c62dc57c3
|
|
||||||
// id: ts start, ts end
|
|
||||||
HS2_RANGE_TIME = 0x00000f00, // TODO: remove, did not survive
|
|
||||||
HS2_RANGE_TIME_MSGPACK = 0x00000f02,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -1,248 +0,0 @@
|
|||||||
#include "./sha1_mapped_filesystem.hpp"
|
|
||||||
|
|
||||||
#include <solanaceae/object_store/meta_components.hpp>
|
|
||||||
#include <solanaceae/object_store/meta_components_file.hpp>
|
|
||||||
|
|
||||||
#include "../file_constructor.hpp"
|
|
||||||
#include "../ft1_sha1_info.hpp"
|
|
||||||
#include "../hash_utils.hpp"
|
|
||||||
#include "../components.hpp"
|
|
||||||
|
|
||||||
#include <solanaceae/util/utils.hpp>
|
|
||||||
|
|
||||||
#include <atomic>
|
|
||||||
#include <mutex>
|
|
||||||
#include <list>
|
|
||||||
#include <thread>
|
|
||||||
|
|
||||||
#include <iostream>
|
|
||||||
|
|
||||||
namespace Backends {
|
|
||||||
|
|
||||||
struct SHA1MappedFilesystem_InfoBuilderState {
|
|
||||||
std::atomic_bool info_builder_dirty {false};
|
|
||||||
std::mutex info_builder_queue_mutex;
|
|
||||||
using InfoBuilderEntry = std::function<void(void)>;
|
|
||||||
std::list<InfoBuilderEntry> info_builder_queue;
|
|
||||||
};
|
|
||||||
|
|
||||||
SHA1MappedFilesystem::SHA1MappedFilesystem(
|
|
||||||
ObjectStore2& os
|
|
||||||
) : StorageBackendI::StorageBackendI(os), _ibs(std::make_unique<SHA1MappedFilesystem_InfoBuilderState>()) {
|
|
||||||
}
|
|
||||||
|
|
||||||
SHA1MappedFilesystem::~SHA1MappedFilesystem(void) {
|
|
||||||
}
|
|
||||||
|
|
||||||
void SHA1MappedFilesystem::tick(void) {
|
|
||||||
if (_ibs->info_builder_dirty) {
|
|
||||||
std::lock_guard l{_ibs->info_builder_queue_mutex};
|
|
||||||
_ibs->info_builder_dirty = false; // set while holding lock
|
|
||||||
|
|
||||||
for (auto& it : _ibs->info_builder_queue) {
|
|
||||||
it();
|
|
||||||
}
|
|
||||||
_ibs->info_builder_queue.clear();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
ObjectHandle SHA1MappedFilesystem::newObject(ByteSpan id) {
|
|
||||||
ObjectHandle o{_os.registry(), _os.registry().create()};
|
|
||||||
|
|
||||||
o.emplace<ObjComp::Ephemeral::Backend>(this);
|
|
||||||
o.emplace<ObjComp::ID>(std::vector<uint8_t>{id});
|
|
||||||
//o.emplace<ObjComp::Ephemeral::FilePath>(object_file_path.generic_u8string());
|
|
||||||
|
|
||||||
_os.throwEventConstruct(o);
|
|
||||||
|
|
||||||
return o;
|
|
||||||
}
|
|
||||||
|
|
||||||
void SHA1MappedFilesystem::newFromFile(std::string_view file_name, std::string_view file_path, std::function<void(ObjectHandle o)>&& cb) {
|
|
||||||
std::thread(std::move([
|
|
||||||
this,
|
|
||||||
ibs = _ibs.get(),
|
|
||||||
cb = std::move(cb),
|
|
||||||
file_name_ = std::string(file_name),
|
|
||||||
file_path_ = std::string(file_path)
|
|
||||||
]() mutable {
|
|
||||||
// 0. open and fail
|
|
||||||
std::unique_ptr<File2I> file_impl = construct_file2_rw_mapped(file_path_, -1);
|
|
||||||
if (!file_impl->isGood()) {
|
|
||||||
{
|
|
||||||
std::lock_guard l{ibs->info_builder_queue_mutex};
|
|
||||||
ibs->info_builder_queue.push_back([file_path_](){
|
|
||||||
// back on iterate thread
|
|
||||||
|
|
||||||
std::cerr << "SHA1MF error: failed opening file '" << file_path_ << "'!\n";
|
|
||||||
});
|
|
||||||
ibs->info_builder_dirty = true; // still in scope, set before mutex unlock
|
|
||||||
}
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
// 1. build info by hashing all chunks
|
|
||||||
FT1InfoSHA1 sha1_info;
|
|
||||||
// build info
|
|
||||||
sha1_info.file_name = file_name_;
|
|
||||||
sha1_info.file_size = file_impl->_file_size; // TODO: remove the reliance on implementation details
|
|
||||||
sha1_info.chunk_size = chunkSizeFromFileSize(sha1_info.file_size);
|
|
||||||
{
|
|
||||||
// TOOD: remove
|
|
||||||
const uint32_t cs_low {32*1024};
|
|
||||||
const uint32_t cs_high {4*1024*1024};
|
|
||||||
|
|
||||||
assert(sha1_info.chunk_size >= cs_low);
|
|
||||||
assert(sha1_info.chunk_size <= cs_high);
|
|
||||||
}
|
|
||||||
|
|
||||||
{ // build chunks
|
|
||||||
// HACK: load file fully
|
|
||||||
// ... its only a hack if its not memory mapped, but reading in chunk_sized chunks is probably a good idea anyway
|
|
||||||
const auto file_data = file_impl->read(file_impl->_file_size, 0);
|
|
||||||
size_t i = 0;
|
|
||||||
for (; i + sha1_info.chunk_size < file_data.size; i += sha1_info.chunk_size) {
|
|
||||||
sha1_info.chunks.push_back(hash_sha1(file_data.ptr+i, sha1_info.chunk_size));
|
|
||||||
}
|
|
||||||
|
|
||||||
if (i < file_data.size) {
|
|
||||||
sha1_info.chunks.push_back(hash_sha1(file_data.ptr+i, file_data.size-i));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
file_impl.reset();
|
|
||||||
|
|
||||||
std::lock_guard l{ibs->info_builder_queue_mutex};
|
|
||||||
ibs->info_builder_queue.push_back(std::move([
|
|
||||||
this,
|
|
||||||
file_name_,
|
|
||||||
file_path_,
|
|
||||||
sha1_info = std::move(sha1_info),
|
|
||||||
cb = std::move(cb)
|
|
||||||
]() mutable { //
|
|
||||||
// executed on iterate thread
|
|
||||||
|
|
||||||
// reopen, cant move, since std::function needs to be copy consturctable (meh)
|
|
||||||
std::unique_ptr<File2I> file_impl = construct_file2_rw_mapped(file_path_, sha1_info.file_size);
|
|
||||||
if (!file_impl->isGood()) {
|
|
||||||
std::cerr << "SHA1MF error: failed opening file '" << file_path_ << "'!\n";
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
// 2. hash info
|
|
||||||
std::vector<uint8_t> sha1_info_data;
|
|
||||||
std::vector<uint8_t> sha1_info_hash;
|
|
||||||
|
|
||||||
std::cout << "SHA1MF info is: \n" << sha1_info;
|
|
||||||
sha1_info_data = sha1_info.toBuffer();
|
|
||||||
std::cout << "SHA1MF sha1_info size: " << sha1_info_data.size() << "\n";
|
|
||||||
sha1_info_hash = hash_sha1(sha1_info_data.data(), sha1_info_data.size());
|
|
||||||
std::cout << "SHA1MF sha1_info_hash: " << bin2hex(sha1_info_hash) << "\n";
|
|
||||||
|
|
||||||
ObjectHandle o;
|
|
||||||
// check if content exists
|
|
||||||
// TODO: store "info_to_content" in reg/backend, for better lookup speed
|
|
||||||
// rn ok, bc this is rare
|
|
||||||
for (const auto& [it_ov, it_ih] : _os.registry().view<Components::FT1InfoSHA1Hash>().each()) {
|
|
||||||
if (it_ih.hash == sha1_info_hash) {
|
|
||||||
o = {_os.registry(), it_ov};
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (static_cast<bool>(o)) {
|
|
||||||
// TODO: check if content is incomplete and use file instead
|
|
||||||
if (!o.all_of<Components::FT1InfoSHA1>()) {
|
|
||||||
o.emplace<Components::FT1InfoSHA1>(sha1_info);
|
|
||||||
}
|
|
||||||
if (!o.all_of<Components::FT1InfoSHA1Data>()) {
|
|
||||||
o.emplace<Components::FT1InfoSHA1Data>(sha1_info_data);
|
|
||||||
}
|
|
||||||
|
|
||||||
// hash has to be set already
|
|
||||||
// Components::FT1InfoSHA1Hash
|
|
||||||
|
|
||||||
// hmmm
|
|
||||||
// TODO: we need a replacement for this
|
|
||||||
o.remove<ObjComp::Ephemeral::File::TagTransferPaused>();
|
|
||||||
|
|
||||||
// we dont want the info anymore
|
|
||||||
o.remove<Components::ReRequestInfoTimer>();
|
|
||||||
} else {
|
|
||||||
o = newObject(ByteSpan{sha1_info_hash});
|
|
||||||
|
|
||||||
o.emplace<Components::FT1InfoSHA1>(sha1_info);
|
|
||||||
o.emplace<Components::FT1InfoSHA1Data>(sha1_info_data); // keep around? or file?
|
|
||||||
o.emplace<Components::FT1InfoSHA1Hash>(sha1_info_hash);
|
|
||||||
}
|
|
||||||
|
|
||||||
{ // lookup tables and have
|
|
||||||
auto& cc = o.get_or_emplace<Components::FT1ChunkSHA1Cache>();
|
|
||||||
// skip have vec, since all
|
|
||||||
cc.have_count = sha1_info.chunks.size(); // need?
|
|
||||||
|
|
||||||
cc.chunk_hash_to_index.clear(); // for cpy pst
|
|
||||||
for (size_t i = 0; i < sha1_info.chunks.size(); i++) {
|
|
||||||
cc.chunk_hash_to_index[sha1_info.chunks[i]].push_back(i);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
o.emplace_or_replace<ObjComp::F::TagLocalHaveAll>();
|
|
||||||
o.remove<ObjComp::F::LocalHaveBitset>();
|
|
||||||
|
|
||||||
{ // file info
|
|
||||||
// TODO: not overwrite fi? since same?
|
|
||||||
o.emplace_or_replace<ObjComp::F::SingleInfo>(file_name_, file_impl->_file_size);
|
|
||||||
o.emplace_or_replace<ObjComp::F::SingleInfoLocal>(file_path_);
|
|
||||||
o.emplace_or_replace<ObjComp::Ephemeral::FilePath>(file_path_); // ?
|
|
||||||
}
|
|
||||||
|
|
||||||
o.emplace_or_replace<Components::FT1File2>(std::move(file_impl));
|
|
||||||
|
|
||||||
if (!o.all_of<ObjComp::Ephemeral::File::TransferStats>()) {
|
|
||||||
o.emplace<ObjComp::Ephemeral::File::TransferStats>();
|
|
||||||
}
|
|
||||||
|
|
||||||
cb(o);
|
|
||||||
|
|
||||||
// TODO: earlier?
|
|
||||||
_os.throwEventUpdate(o);
|
|
||||||
}));
|
|
||||||
ibs->info_builder_dirty = true; // still in scope, set before mutex unlock
|
|
||||||
})).detach();
|
|
||||||
}
|
|
||||||
|
|
||||||
std::unique_ptr<File2I> SHA1MappedFilesystem::file2(Object ov, FILE2_FLAGS flags) {
|
|
||||||
if (flags & FILE2_RAW) {
|
|
||||||
std::cerr << "SHA1MF error: does not support raw modes\n";
|
|
||||||
return nullptr;
|
|
||||||
}
|
|
||||||
|
|
||||||
ObjectHandle o{_os.registry(), ov};
|
|
||||||
|
|
||||||
if (!static_cast<bool>(o)) {
|
|
||||||
return nullptr;
|
|
||||||
}
|
|
||||||
|
|
||||||
// will this do if we go and support enc?
|
|
||||||
// use ObjComp::Ephemeral::FilePath instead??
|
|
||||||
if (!o.all_of<ObjComp::F::SingleInfoLocal>()) {
|
|
||||||
return nullptr;
|
|
||||||
}
|
|
||||||
|
|
||||||
const auto& file_path = o.get<ObjComp::F::SingleInfoLocal>().file_path;
|
|
||||||
if (file_path.empty()) {
|
|
||||||
return nullptr;
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: read-only one too
|
|
||||||
// since they are mapped, is this efficent to have multiple?
|
|
||||||
auto res = construct_file2_rw_mapped(file_path, -1);
|
|
||||||
if (!res || !res->isGood()) {
|
|
||||||
std::cerr << "SHA1MF error: failed constructing mapped file '" << file_path << "'\n";
|
|
||||||
return nullptr;
|
|
||||||
}
|
|
||||||
|
|
||||||
return res;
|
|
||||||
}
|
|
||||||
|
|
||||||
} // Backends
|
|
||||||
|
|
@ -1,39 +0,0 @@
|
|||||||
#pragma once
|
|
||||||
|
|
||||||
#include <solanaceae/object_store/object_store.hpp>
|
|
||||||
|
|
||||||
#include <string>
|
|
||||||
#include <string_view>
|
|
||||||
#include <memory>
|
|
||||||
|
|
||||||
namespace Backends {
|
|
||||||
|
|
||||||
// fwd to hide the threading headers
|
|
||||||
struct SHA1MappedFilesystem_InfoBuilderState;
|
|
||||||
|
|
||||||
struct SHA1MappedFilesystem : public StorageBackendI {
|
|
||||||
std::unique_ptr<SHA1MappedFilesystem_InfoBuilderState> _ibs;
|
|
||||||
|
|
||||||
SHA1MappedFilesystem(
|
|
||||||
ObjectStore2& os
|
|
||||||
);
|
|
||||||
~SHA1MappedFilesystem(void);
|
|
||||||
|
|
||||||
// pull from info builder queue
|
|
||||||
// call from main thread (os thread?) often
|
|
||||||
void tick(void);
|
|
||||||
|
|
||||||
ObjectHandle newObject(ByteSpan id) override;
|
|
||||||
|
|
||||||
// performs async file hashing
|
|
||||||
// create message in cb
|
|
||||||
void newFromFile(std::string_view file_name, std::string_view file_path, std::function<void(ObjectHandle o)>&& cb/*, bool merge_preexisting = false*/);
|
|
||||||
|
|
||||||
// might return pre-existing?
|
|
||||||
ObjectHandle newFromInfoHash(ByteSpan info_hash);
|
|
||||||
|
|
||||||
std::unique_ptr<File2I> file2(Object o, FILE2_FLAGS flags) override;
|
|
||||||
};
|
|
||||||
|
|
||||||
} // Backends
|
|
||||||
|
|
@ -1,395 +0,0 @@
|
|||||||
#include "./chunk_picker.hpp"
|
|
||||||
|
|
||||||
#include <solanaceae/tox_contacts/components.hpp>
|
|
||||||
#include "./contact_components.hpp"
|
|
||||||
|
|
||||||
#include <solanaceae/object_store/meta_components_file.hpp>
|
|
||||||
#include "./components.hpp"
|
|
||||||
|
|
||||||
#include <algorithm>
|
|
||||||
|
|
||||||
#include <iostream>
|
|
||||||
|
|
||||||
// TODO: move ps to own file
|
|
||||||
// picker strategies are generators
|
|
||||||
// gen returns true if a valid chunk was picked
|
|
||||||
// ps should be light weight and no persistant state
|
|
||||||
// ps produce an index only once
|
|
||||||
|
|
||||||
// simply scans from the beginning, requesting chunks in that order
|
|
||||||
struct PickerStrategySequential {
|
|
||||||
const BitSet& chunk_candidates;
|
|
||||||
const size_t total_chunks;
|
|
||||||
|
|
||||||
size_t i {0u};
|
|
||||||
|
|
||||||
PickerStrategySequential(
|
|
||||||
const BitSet& chunk_candidates_,
|
|
||||||
const size_t total_chunks_,
|
|
||||||
const size_t start_offset_ = 0u
|
|
||||||
) :
|
|
||||||
chunk_candidates(chunk_candidates_),
|
|
||||||
total_chunks(total_chunks_),
|
|
||||||
i(start_offset_)
|
|
||||||
{}
|
|
||||||
|
|
||||||
|
|
||||||
bool gen(size_t& out_chunk_idx) {
|
|
||||||
for (; i < total_chunks && i < chunk_candidates.size_bits(); i++) {
|
|
||||||
if (chunk_candidates[i]) {
|
|
||||||
out_chunk_idx = i;
|
|
||||||
i++;
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
// chooses a random start position and then requests linearly from there
|
|
||||||
struct PickerStrategyRandom {
|
|
||||||
const BitSet& chunk_candidates;
|
|
||||||
const size_t total_chunks;
|
|
||||||
std::minstd_rand& rng;
|
|
||||||
|
|
||||||
size_t count {0u};
|
|
||||||
size_t i {rng()%total_chunks};
|
|
||||||
|
|
||||||
PickerStrategyRandom(
|
|
||||||
const BitSet& chunk_candidates_,
|
|
||||||
const size_t total_chunks_,
|
|
||||||
std::minstd_rand& rng_
|
|
||||||
) :
|
|
||||||
chunk_candidates(chunk_candidates_),
|
|
||||||
total_chunks(total_chunks_),
|
|
||||||
rng(rng_)
|
|
||||||
{}
|
|
||||||
|
|
||||||
bool gen(size_t& out_chunk_idx) {
|
|
||||||
for (; count < total_chunks; count++, i++) {
|
|
||||||
// wrap around
|
|
||||||
if (i >= total_chunks) {
|
|
||||||
i = i%total_chunks;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (chunk_candidates[i]) {
|
|
||||||
out_chunk_idx = i;
|
|
||||||
count++;
|
|
||||||
i++;
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
// switches randomly between random and sequential
|
|
||||||
struct PickerStrategyRandomSequential {
|
|
||||||
PickerStrategyRandom psr;
|
|
||||||
PickerStrategySequential pssf;
|
|
||||||
|
|
||||||
// TODO: configurable
|
|
||||||
std::bernoulli_distribution d{0.5f};
|
|
||||||
|
|
||||||
PickerStrategyRandomSequential(
|
|
||||||
const BitSet& chunk_candidates_,
|
|
||||||
const size_t total_chunks_,
|
|
||||||
std::minstd_rand& rng_,
|
|
||||||
const size_t start_offset_ = 0u
|
|
||||||
) :
|
|
||||||
psr(chunk_candidates_, total_chunks_, rng_),
|
|
||||||
pssf(chunk_candidates_, total_chunks_, start_offset_)
|
|
||||||
{}
|
|
||||||
|
|
||||||
bool gen(size_t& out_chunk_idx) {
|
|
||||||
if (d(psr.rng)) {
|
|
||||||
return psr.gen(out_chunk_idx);
|
|
||||||
} else {
|
|
||||||
return pssf.gen(out_chunk_idx);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
// TODO: return bytes instead, so it can be done chunk size independent
|
|
||||||
static constexpr size_t flowWindowToRequestCount(size_t flow_window) {
|
|
||||||
// based on 500KiB/s with ~0.05s delay looks fine
|
|
||||||
// increase to 4 at wnd >= 25*1024
|
|
||||||
if (flow_window >= 25*1024) {
|
|
||||||
return 4u;
|
|
||||||
}
|
|
||||||
return 3u;
|
|
||||||
}
|
|
||||||
|
|
||||||
void ChunkPicker::updateParticipation(
|
|
||||||
Contact3Handle c,
|
|
||||||
ObjectRegistry& objreg
|
|
||||||
) {
|
|
||||||
if (!c.all_of<Contact::Components::FT1Participation>()) {
|
|
||||||
participating_unfinished.clear();
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
entt::dense_set<Object> checked;
|
|
||||||
for (const Object ov : c.get<Contact::Components::FT1Participation>().participating) {
|
|
||||||
using Priority = ObjComp::Ephemeral::File::DownloadPriority::Priority;
|
|
||||||
const ObjectHandle o {objreg, ov};
|
|
||||||
|
|
||||||
if (participating_unfinished.contains(o)) {
|
|
||||||
if (!o.all_of<Components::FT1ChunkSHA1Cache, Components::FT1InfoSHA1>()) {
|
|
||||||
participating_unfinished.erase(o);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (o.all_of<ObjComp::Ephemeral::File::TagTransferPaused>()) {
|
|
||||||
participating_unfinished.erase(o);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (o.all_of<ObjComp::F::TagLocalHaveAll>()) {
|
|
||||||
participating_unfinished.erase(o);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: optimize this to only change on dirty, or something
|
|
||||||
if (o.all_of<ObjComp::Ephemeral::File::DownloadPriority>()) {
|
|
||||||
Priority prio = o.get<ObjComp::Ephemeral::File::DownloadPriority>().p;
|
|
||||||
|
|
||||||
uint16_t pskips =
|
|
||||||
prio == Priority::HIGHEST ? 0u :
|
|
||||||
prio == Priority::HIGH ? 1u :
|
|
||||||
prio == Priority::NORMAL ? 2u :
|
|
||||||
prio == Priority::LOW ? 4u :
|
|
||||||
8u // LOWEST
|
|
||||||
;
|
|
||||||
|
|
||||||
participating_unfinished.at(o).should_skip = pskips;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if (!o.all_of<Components::FT1ChunkSHA1Cache, Components::FT1InfoSHA1>()) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (o.all_of<ObjComp::Ephemeral::File::TagTransferPaused>()) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!o.all_of<ObjComp::F::TagLocalHaveAll>()) {
|
|
||||||
Priority prio = Priority::NORMAL;
|
|
||||||
|
|
||||||
if (o.all_of<ObjComp::Ephemeral::File::DownloadPriority>()) {
|
|
||||||
prio = o.get<ObjComp::Ephemeral::File::DownloadPriority>().p;
|
|
||||||
}
|
|
||||||
|
|
||||||
uint16_t pskips =
|
|
||||||
prio == Priority::HIGHEST ? 0u :
|
|
||||||
prio == Priority::HIGH ? 1u :
|
|
||||||
prio == Priority::NORMAL ? 2u :
|
|
||||||
prio == Priority::LOW ? 4u :
|
|
||||||
8u // LOWEST
|
|
||||||
;
|
|
||||||
|
|
||||||
participating_unfinished.emplace(o, ParticipationEntry{pskips});
|
|
||||||
}
|
|
||||||
}
|
|
||||||
checked.emplace(o);
|
|
||||||
}
|
|
||||||
|
|
||||||
// now we still need to remove left over unfinished.
|
|
||||||
// TODO: how did they get left over
|
|
||||||
entt::dense_set<Object> to_remove;
|
|
||||||
for (const auto& [o, _] : participating_unfinished) {
|
|
||||||
if (!checked.contains(o)) {
|
|
||||||
std::cerr << "unfinished contained non participating\n";
|
|
||||||
to_remove.emplace(o);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for (const auto& o : to_remove) {
|
|
||||||
participating_unfinished.erase(o);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
std::vector<ChunkPicker::ContentChunkR> ChunkPicker::updateChunkRequests(
|
|
||||||
Contact3Handle c,
|
|
||||||
ObjectRegistry& objreg,
|
|
||||||
const ReceivingTransfers& rt,
|
|
||||||
const size_t open_requests
|
|
||||||
//const size_t flow_window
|
|
||||||
//NGCFT1& nft
|
|
||||||
) {
|
|
||||||
if (!static_cast<bool>(c)) {
|
|
||||||
assert(false); return {};
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!c.all_of<Contact::Components::ToxGroupPeerEphemeral>()) {
|
|
||||||
assert(false); return {};
|
|
||||||
}
|
|
||||||
const auto [group_number, peer_number] = c.get<Contact::Components::ToxGroupPeerEphemeral>();
|
|
||||||
|
|
||||||
updateParticipation(c, objreg);
|
|
||||||
|
|
||||||
if (participating_unfinished.empty()) {
|
|
||||||
participating_in_last = entt::null;
|
|
||||||
return {};
|
|
||||||
}
|
|
||||||
|
|
||||||
std::vector<ContentChunkR> req_ret;
|
|
||||||
|
|
||||||
// count running tf and open requests
|
|
||||||
const size_t num_ongoing_transfers = rt.sizePeer(group_number, peer_number);
|
|
||||||
// TODO: account for open requests
|
|
||||||
const int64_t num_total = num_ongoing_transfers + open_requests;
|
|
||||||
|
|
||||||
// TODO: base max on rate(chunks per sec), gonna be ass with variable chunk size
|
|
||||||
//const size_t num_max = std::max(max_tf_chunk_requests, flowWindowToRequestCount(flow_window));
|
|
||||||
const size_t num_max = max_tf_chunk_requests;
|
|
||||||
|
|
||||||
const size_t num_requests = std::max<int64_t>(0, int64_t(num_max)-num_total);
|
|
||||||
std::cerr << "CP: want " << num_requests << "(rt:" << num_ongoing_transfers << " or:" << open_requests << ") from " << group_number << ":" << peer_number << "\n";
|
|
||||||
|
|
||||||
// while n < X
|
|
||||||
|
|
||||||
// round robin content (remember last obj)
|
|
||||||
if (!objreg.valid(participating_in_last) || !participating_unfinished.count(participating_in_last)) {
|
|
||||||
participating_in_last = participating_unfinished.begin()->first;
|
|
||||||
}
|
|
||||||
assert(objreg.valid(participating_in_last));
|
|
||||||
|
|
||||||
auto it = participating_unfinished.find(participating_in_last);
|
|
||||||
// hard limit robin rounds to array size times 20
|
|
||||||
for (size_t i = 0; req_ret.size() < num_requests && i < participating_unfinished.size()*20; i++, it++) {
|
|
||||||
if (it == participating_unfinished.end()) {
|
|
||||||
it = participating_unfinished.begin();
|
|
||||||
}
|
|
||||||
|
|
||||||
if (it->second.skips < it->second.should_skip) {
|
|
||||||
it->second.skips++;
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
it->second.skips = 0;
|
|
||||||
|
|
||||||
ObjectHandle o {objreg, it->first};
|
|
||||||
|
|
||||||
// intersect self have with other have
|
|
||||||
if (!o.all_of<ObjComp::F::RemoteHaveBitset, Components::FT1ChunkSHA1Cache, Components::FT1InfoSHA1>()) {
|
|
||||||
// rare case where no one else has anything
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (o.all_of<ObjComp::F::TagLocalHaveAll>()) {
|
|
||||||
std::cerr << "ChunkPicker error: completed content still in participating_unfinished!\n";
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
//const auto& cc = o.get<Components::FT1ChunkSHA1Cache>();
|
|
||||||
|
|
||||||
const auto& others_have = o.get<ObjComp::F::RemoteHaveBitset>().others;
|
|
||||||
auto other_it = others_have.find(c);
|
|
||||||
if (other_it == others_have.end()) {
|
|
||||||
// rare case where the other is participating but has nothing
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
const auto& other_have = other_it->second;
|
|
||||||
|
|
||||||
const auto& info = o.get<Components::FT1InfoSHA1>();
|
|
||||||
const auto total_chunks = info.chunks.size();
|
|
||||||
|
|
||||||
const auto* lhb = o.try_get<ObjComp::F::LocalHaveBitset>();
|
|
||||||
|
|
||||||
// if we dont have anything, this might not exist yet
|
|
||||||
BitSet chunk_candidates = lhb == nullptr ? BitSet{total_chunks} : (lhb->have.size_bits() >= total_chunks ? lhb->have : BitSet{total_chunks});
|
|
||||||
|
|
||||||
if (!other_have.have_all) {
|
|
||||||
// AND is the same as ~(~A | ~B)
|
|
||||||
// that means we leave chunk_candidates as (have is inverted want)
|
|
||||||
// merge is or
|
|
||||||
// invert at the end
|
|
||||||
chunk_candidates
|
|
||||||
.merge(other_have.have.invert())
|
|
||||||
.invert();
|
|
||||||
// TODO: add intersect for more perf
|
|
||||||
} else {
|
|
||||||
chunk_candidates.invert();
|
|
||||||
}
|
|
||||||
auto& requested_chunks = o.get_or_emplace<Components::FT1ChunkSHA1Requested>().chunks;
|
|
||||||
|
|
||||||
// TODO: trim off round up to 8, since they are now always set
|
|
||||||
|
|
||||||
// now select (globaly) unrequested other have
|
|
||||||
// TODO: how do we prioritize within a file?
|
|
||||||
// - sequential (walk from start (or readhead?))
|
|
||||||
// - random (choose random start pos and walk)
|
|
||||||
// - random/sequential (randomly choose between the 2)
|
|
||||||
// - rarest (keep track of rarity and sort by that)
|
|
||||||
// - steaming (use readhead to determain time critical chunks, potentially over requesting, first (relative to stream head) otherwise
|
|
||||||
// maybe look into libtorrens deadline stuff
|
|
||||||
// - arbitrary priority maps/functions (and combine with above in rations)
|
|
||||||
|
|
||||||
// TODO: configurable
|
|
||||||
size_t start_offset {0u};
|
|
||||||
if (o.all_of<ObjComp::Ephemeral::File::ReadHeadHint>()) {
|
|
||||||
const auto byte_offset = o.get<ObjComp::Ephemeral::File::ReadHeadHint>().offset_into_file;
|
|
||||||
if (byte_offset <= info.file_size) {
|
|
||||||
start_offset = byte_offset/info.chunk_size;
|
|
||||||
} else {
|
|
||||||
// error?
|
|
||||||
}
|
|
||||||
}
|
|
||||||
//PickerStrategySequential ps(chunk_candidates, total_chunks, start_offset);
|
|
||||||
//PickerStrategyRandom ps(chunk_candidates, total_chunks, _rng);
|
|
||||||
PickerStrategyRandomSequential ps(chunk_candidates, total_chunks, _rng, start_offset);
|
|
||||||
size_t out_chunk_idx {0};
|
|
||||||
size_t req_from_this_o {0};
|
|
||||||
while (ps.gen(out_chunk_idx) && req_ret.size() < num_requests && req_from_this_o < std::max<size_t>(total_chunks/3, 1)) {
|
|
||||||
// out_chunk_idx is a potential candidate we can request form peer
|
|
||||||
|
|
||||||
// - check against double requests
|
|
||||||
if (std::find_if(req_ret.cbegin(), req_ret.cend(), [&](const ContentChunkR& x) -> bool {
|
|
||||||
return x.object == o && x.chunk_index == out_chunk_idx;
|
|
||||||
}) != req_ret.cend()) {
|
|
||||||
// already in return array
|
|
||||||
// how did we get here? should we fast exit? if sequential strat, we would want to
|
|
||||||
continue; // skip
|
|
||||||
}
|
|
||||||
|
|
||||||
// - check against global requests (this might differ based on strat)
|
|
||||||
if (requested_chunks.count(out_chunk_idx) != 0) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
// - we check against globally running transfers (this might differ based on strat)
|
|
||||||
if (rt.containsChunk(o, out_chunk_idx)) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
// if nothing else blocks this, add to ret
|
|
||||||
req_ret.push_back(ContentChunkR{o, out_chunk_idx});
|
|
||||||
|
|
||||||
// TODO: move this after packet was sent successfully
|
|
||||||
// (move net in? hmm)
|
|
||||||
requested_chunks[out_chunk_idx] = Components::FT1ChunkSHA1Requested::Entry{0.f, c};
|
|
||||||
|
|
||||||
req_from_this_o++;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (it == participating_unfinished.end()) {
|
|
||||||
participating_in_last = entt::null;
|
|
||||||
} else {
|
|
||||||
participating_in_last = it->first;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (req_ret.size() < num_requests) {
|
|
||||||
std::cerr << "CP: could not fulfil, " << group_number << ":" << peer_number << " only has " << req_ret.size() << " candidates\n";
|
|
||||||
}
|
|
||||||
|
|
||||||
// -- no -- (just compat with old code, ignore)
|
|
||||||
// if n < X
|
|
||||||
// optimistically request 1 chunk other does not have
|
|
||||||
// (don't mark es requested? or lower cooldown to re-request?)
|
|
||||||
|
|
||||||
return req_ret;
|
|
||||||
}
|
|
||||||
|
|
@ -1,77 +0,0 @@
|
|||||||
#pragma once
|
|
||||||
|
|
||||||
#include <solanaceae/contact/contact_model3.hpp>
|
|
||||||
#include <solanaceae/object_store/object_store.hpp>
|
|
||||||
|
|
||||||
#include "./components.hpp"
|
|
||||||
|
|
||||||
#include "./receiving_transfers.hpp"
|
|
||||||
|
|
||||||
#include <entt/container/dense_map.hpp>
|
|
||||||
#include <entt/container/dense_set.hpp>
|
|
||||||
|
|
||||||
#include <cstddef>
|
|
||||||
#include <cstdint>
|
|
||||||
#include <random>
|
|
||||||
|
|
||||||
//#include <solanaceae/ngc_ft1/ngcft1.hpp>
|
|
||||||
|
|
||||||
// goal is to always keep 2 transfers running and X(6) requests queued up
|
|
||||||
// per peer
|
|
||||||
|
|
||||||
struct ChunkPickerUpdateTag {};
|
|
||||||
|
|
||||||
struct ChunkPickerTimer {
|
|
||||||
// adds update tag on 0
|
|
||||||
float timer {0.f};
|
|
||||||
};
|
|
||||||
|
|
||||||
// contact component?
|
|
||||||
struct ChunkPicker {
|
|
||||||
// max transfers
|
|
||||||
static constexpr size_t max_tf_info_requests {1};
|
|
||||||
static constexpr size_t max_tf_chunk_requests {4}; // TODO: dynamic, function/factor of (window(delay*speed)/chunksize)
|
|
||||||
|
|
||||||
// TODO: cheaper init? tls rng for deep seeding?
|
|
||||||
std::minstd_rand _rng{std::random_device{}()};
|
|
||||||
|
|
||||||
// TODO: handle with hash utils?
|
|
||||||
struct ParticipationEntry {
|
|
||||||
ParticipationEntry(void) {}
|
|
||||||
ParticipationEntry(uint16_t s) : should_skip(s) {}
|
|
||||||
// skips in round robin -> lower should_skip => higher priority
|
|
||||||
// TODO: replace with enum value
|
|
||||||
uint16_t should_skip {2}; // 0 high, 8 low (double each time? 0,1,2,4,8)
|
|
||||||
uint16_t skips {0};
|
|
||||||
};
|
|
||||||
entt::dense_map<Object, ParticipationEntry> participating_unfinished;
|
|
||||||
Object participating_in_last {entt::null};
|
|
||||||
|
|
||||||
private: // TODO: properly sort
|
|
||||||
// updates participating_unfinished
|
|
||||||
void updateParticipation(
|
|
||||||
Contact3Handle c,
|
|
||||||
ObjectRegistry& objreg
|
|
||||||
);
|
|
||||||
public:
|
|
||||||
|
|
||||||
// ---------- tick ----------
|
|
||||||
|
|
||||||
//void sendInfoRequests();
|
|
||||||
|
|
||||||
// is this like a system?
|
|
||||||
struct ContentChunkR {
|
|
||||||
ObjectHandle object;
|
|
||||||
size_t chunk_index;
|
|
||||||
};
|
|
||||||
// returns list of chunks to request
|
|
||||||
[[nodiscard]] std::vector<ContentChunkR> updateChunkRequests(
|
|
||||||
Contact3Handle c,
|
|
||||||
ObjectRegistry& objreg,
|
|
||||||
const ReceivingTransfers& rt,
|
|
||||||
const size_t open_requests
|
|
||||||
//const size_t flow_window
|
|
||||||
//NGCFT1& nft
|
|
||||||
);
|
|
||||||
};
|
|
||||||
|
|
@ -1,127 +0,0 @@
|
|||||||
#include "./chunk_picker_systems.hpp"
|
|
||||||
|
|
||||||
#include <solanaceae/ngc_ft1/ngcft1_file_kind.hpp>
|
|
||||||
|
|
||||||
#include "./components.hpp"
|
|
||||||
#include "./chunk_picker.hpp"
|
|
||||||
#include "./contact_components.hpp"
|
|
||||||
|
|
||||||
#include <cassert>
|
|
||||||
#include <iostream>
|
|
||||||
|
|
||||||
namespace Systems {
|
|
||||||
|
|
||||||
void chunk_picker_updates(
|
|
||||||
Contact3Registry& cr,
|
|
||||||
ObjectRegistry& os_reg,
|
|
||||||
const entt::dense_map<Contact3, size_t>& peer_open_requests,
|
|
||||||
const ReceivingTransfers& receiving_transfers,
|
|
||||||
NGCFT1& nft, // TODO: remove this somehow
|
|
||||||
const float delta
|
|
||||||
) {
|
|
||||||
std::vector<Contact3Handle> cp_to_remove;
|
|
||||||
|
|
||||||
// first, update timers
|
|
||||||
cr.view<ChunkPickerTimer>().each([&cr, delta](const Contact3 cv, ChunkPickerTimer& cpt) {
|
|
||||||
cpt.timer -= delta;
|
|
||||||
if (cpt.timer <= 0.f) {
|
|
||||||
cr.emplace_or_replace<ChunkPickerUpdateTag>(cv);
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
//std::cout << "number of chunkpickers: " << _cr.storage<ChunkPicker>().size() << ", of which " << _cr.storage<ChunkPickerUpdateTag>().size() << " need updating\n";
|
|
||||||
|
|
||||||
// now check for potentially missing cp
|
|
||||||
auto cput_view = cr.view<ChunkPickerUpdateTag>();
|
|
||||||
cput_view.each([&cr, &cp_to_remove](const Contact3 cv) {
|
|
||||||
Contact3Handle c{cr, cv};
|
|
||||||
|
|
||||||
//std::cout << "cput :)\n";
|
|
||||||
|
|
||||||
if (!c.all_of<Contact::Components::ToxGroupPeerEphemeral, Contact::Components::FT1Participation>()) {
|
|
||||||
std::cout << "cput uh nuh :(\n";
|
|
||||||
cp_to_remove.push_back(c);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!c.all_of<ChunkPicker>()) {
|
|
||||||
std::cout << "creating new cp!!\n";
|
|
||||||
c.emplace<ChunkPicker>();
|
|
||||||
c.emplace_or_replace<ChunkPickerTimer>();
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
// now update all cp that are tagged
|
|
||||||
cr.view<ChunkPicker, ChunkPickerUpdateTag>().each([&cr, &os_reg, &peer_open_requests, &receiving_transfers, &nft, &cp_to_remove](const Contact3 cv, ChunkPicker& cp) {
|
|
||||||
Contact3Handle c{cr, cv};
|
|
||||||
|
|
||||||
if (!c.all_of<Contact::Components::ToxGroupPeerEphemeral, Contact::Components::FT1Participation>()) {
|
|
||||||
cp_to_remove.push_back(c);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
//std::cout << "cpu :)\n";
|
|
||||||
|
|
||||||
// HACK: expensive, dont do every tick, only on events
|
|
||||||
// do verification in debug instead?
|
|
||||||
//cp.validateParticipation(c, _os.registry());
|
|
||||||
|
|
||||||
size_t peer_open_request = 0;
|
|
||||||
if (peer_open_requests.contains(c)) {
|
|
||||||
peer_open_request += peer_open_requests.at(c);
|
|
||||||
}
|
|
||||||
|
|
||||||
auto new_requests = cp.updateChunkRequests(
|
|
||||||
c,
|
|
||||||
os_reg,
|
|
||||||
receiving_transfers,
|
|
||||||
peer_open_request
|
|
||||||
);
|
|
||||||
|
|
||||||
if (new_requests.empty()) {
|
|
||||||
// updateChunkRequests updates the unfinished
|
|
||||||
// TODO: pull out and check there?
|
|
||||||
if (cp.participating_unfinished.empty()) {
|
|
||||||
std::cout << "destroying empty useless cp\n";
|
|
||||||
cp_to_remove.push_back(c);
|
|
||||||
} else {
|
|
||||||
// most likely will have something soon
|
|
||||||
// TODO: mark dirty on have instead?
|
|
||||||
c.get_or_emplace<ChunkPickerTimer>().timer = 10.f;
|
|
||||||
}
|
|
||||||
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
assert(c.all_of<Contact::Components::ToxGroupPeerEphemeral>());
|
|
||||||
const auto [group_number, peer_number] = c.get<Contact::Components::ToxGroupPeerEphemeral>();
|
|
||||||
|
|
||||||
for (const auto [r_o, r_idx] : new_requests) {
|
|
||||||
auto& cc = r_o.get<Components::FT1ChunkSHA1Cache>();
|
|
||||||
const auto& info = r_o.get<Components::FT1InfoSHA1>();
|
|
||||||
|
|
||||||
// request chunk_idx
|
|
||||||
nft.NGC_FT1_send_request_private(
|
|
||||||
group_number, peer_number,
|
|
||||||
static_cast<uint32_t>(NGCFT1_file_kind::HASH_SHA1_CHUNK),
|
|
||||||
info.chunks.at(r_idx).data.data(), info.chunks.at(r_idx).size()
|
|
||||||
);
|
|
||||||
std::cout << "SHA1_NGCFT1: requesting chunk [" << info.chunks.at(r_idx) << "] from " << group_number << ":" << peer_number << "\n";
|
|
||||||
}
|
|
||||||
|
|
||||||
// force update every minute
|
|
||||||
// TODO: add small random bias to spread load
|
|
||||||
c.get_or_emplace<ChunkPickerTimer>().timer = 60.f;
|
|
||||||
});
|
|
||||||
|
|
||||||
// unmark all marked
|
|
||||||
cr.clear<ChunkPickerUpdateTag>();
|
|
||||||
assert(cr.storage<ChunkPickerUpdateTag>().empty());
|
|
||||||
|
|
||||||
for (const auto& c : cp_to_remove) {
|
|
||||||
c.remove<ChunkPicker, ChunkPickerTimer>();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
} // Systems
|
|
||||||
|
|
@ -1,22 +0,0 @@
|
|||||||
#pragma once
|
|
||||||
|
|
||||||
#include <solanaceae/contact/contact_model3.hpp>
|
|
||||||
#include <solanaceae/object_store/object_store.hpp>
|
|
||||||
#include <solanaceae/tox_contacts/components.hpp>
|
|
||||||
#include <solanaceae/ngc_ft1/ngcft1.hpp>
|
|
||||||
|
|
||||||
#include "./receiving_transfers.hpp"
|
|
||||||
|
|
||||||
namespace Systems {
|
|
||||||
|
|
||||||
void chunk_picker_updates(
|
|
||||||
Contact3Registry& cr,
|
|
||||||
ObjectRegistry& os_reg,
|
|
||||||
const entt::dense_map<Contact3, size_t>& peer_open_requests,
|
|
||||||
const ReceivingTransfers& receiving_transfers,
|
|
||||||
NGCFT1& nft, // TODO: remove this somehow
|
|
||||||
const float delta
|
|
||||||
);
|
|
||||||
|
|
||||||
} // Systems
|
|
||||||
|
|
@ -1,69 +0,0 @@
|
|||||||
#include "./components.hpp"
|
|
||||||
|
|
||||||
#include <solanaceae/object_store/meta_components_file.hpp>
|
|
||||||
|
|
||||||
namespace Components {
|
|
||||||
|
|
||||||
std::vector<size_t> FT1ChunkSHA1Cache::chunkIndices(const SHA1Digest& hash) const {
|
|
||||||
const auto it = chunk_hash_to_index.find(hash);
|
|
||||||
if (it != chunk_hash_to_index.cend()) {
|
|
||||||
return it->second;
|
|
||||||
} else {
|
|
||||||
return {};
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
bool FT1ChunkSHA1Cache::haveChunk(ObjectHandle o, const SHA1Digest& hash) const {
|
|
||||||
if (o.all_of<ObjComp::F::TagLocalHaveAll>()) {
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
const auto* lhb = o.try_get<ObjComp::F::LocalHaveBitset>();
|
|
||||||
if (lhb == nullptr) {
|
|
||||||
return false; // we dont have anything yet
|
|
||||||
}
|
|
||||||
|
|
||||||
if (auto i_vec = chunkIndices(hash); !i_vec.empty()) {
|
|
||||||
// TODO: should i test all?
|
|
||||||
//return have_chunk[i_vec.front()];
|
|
||||||
return lhb->have[i_vec.front()];
|
|
||||||
}
|
|
||||||
|
|
||||||
// not part of this file
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
void ReAnnounceTimer::set(const float new_timer) {
|
|
||||||
timer = new_timer;
|
|
||||||
last_max = new_timer;
|
|
||||||
}
|
|
||||||
|
|
||||||
void ReAnnounceTimer::reset(void) {
|
|
||||||
if (last_max <= 0.01f) {
|
|
||||||
last_max = 1.f;
|
|
||||||
}
|
|
||||||
|
|
||||||
last_max *= 2.f;
|
|
||||||
timer = last_max;
|
|
||||||
}
|
|
||||||
|
|
||||||
void ReAnnounceTimer::lower(void) {
|
|
||||||
timer *= 0.1f;
|
|
||||||
//last_max *= 0.1f; // is this a good idea?
|
|
||||||
last_max *= 0.9f; // is this a good idea?
|
|
||||||
}
|
|
||||||
|
|
||||||
void TransferStatsTally::Peer::trimSent(const float time_now) {
|
|
||||||
while (recently_sent.size() > 4 && time_now - recently_sent.front().time_point > 1.f) {
|
|
||||||
recently_sent.pop_front();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void TransferStatsTally::Peer::trimReceived(const float time_now) {
|
|
||||||
while (recently_received.size() > 4 && time_now - recently_received.front().time_point > 1.f) {
|
|
||||||
recently_received.pop_front();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
} // Components
|
|
||||||
|
|
@ -1,118 +0,0 @@
|
|||||||
#pragma once
|
|
||||||
|
|
||||||
#include <solanaceae/contact/components.hpp>
|
|
||||||
#include <solanaceae/message3/components.hpp>
|
|
||||||
#include <solanaceae/message3/registry_message_model.hpp>
|
|
||||||
#include <solanaceae/object_store/meta_components_file.hpp>
|
|
||||||
|
|
||||||
#include <solanaceae/util/bitset.hpp>
|
|
||||||
|
|
||||||
#include <entt/container/dense_set.hpp>
|
|
||||||
#include <entt/container/dense_map.hpp>
|
|
||||||
|
|
||||||
#include "./ft1_sha1_info.hpp"
|
|
||||||
#include "./hash_utils.hpp"
|
|
||||||
|
|
||||||
#include <vector>
|
|
||||||
#include <deque>
|
|
||||||
|
|
||||||
|
|
||||||
// TODO: rename to object components
|
|
||||||
namespace Components {
|
|
||||||
|
|
||||||
struct Messages {
|
|
||||||
// dense set instead?
|
|
||||||
std::vector<Message3Handle> messages;
|
|
||||||
};
|
|
||||||
|
|
||||||
using FT1InfoSHA1 = FT1InfoSHA1;
|
|
||||||
|
|
||||||
struct FT1InfoSHA1Data {
|
|
||||||
std::vector<uint8_t> data;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct FT1InfoSHA1Hash {
|
|
||||||
std::vector<uint8_t> hash;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct FT1ChunkSHA1Cache {
|
|
||||||
// TODO: extract have_count to generic comp
|
|
||||||
|
|
||||||
// have_chunk is the size of info.chunks.size(), or empty if have_all
|
|
||||||
// keep in mind bitset rounds up to 8s
|
|
||||||
//BitSet have_chunk{0};
|
|
||||||
|
|
||||||
//bool have_all {false};
|
|
||||||
size_t have_count {0}; // move?
|
|
||||||
entt::dense_map<SHA1Digest, std::vector<size_t>> chunk_hash_to_index;
|
|
||||||
|
|
||||||
std::vector<size_t> chunkIndices(const SHA1Digest& hash) const;
|
|
||||||
bool haveChunk(ObjectHandle o, const SHA1Digest& hash) const;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct FT1File2 {
|
|
||||||
// the cached file2 for faster access
|
|
||||||
// should be destroyed when no activity and recreated on demand
|
|
||||||
std::unique_ptr<File2I> file;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct FT1ChunkSHA1Requested {
|
|
||||||
// requested chunks with a timer since last request
|
|
||||||
struct Entry {
|
|
||||||
float timer {0.f};
|
|
||||||
Contact3 c {entt::null};
|
|
||||||
};
|
|
||||||
entt::dense_map<size_t, Entry> chunks;
|
|
||||||
};
|
|
||||||
|
|
||||||
// TODO: once announce is shipped, remove the "Suspected"
|
|
||||||
struct SuspectedParticipants {
|
|
||||||
entt::dense_set<Contact3> participants;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct ReRequestInfoTimer {
|
|
||||||
float timer {0.f};
|
|
||||||
};
|
|
||||||
|
|
||||||
struct AnnounceTargets {
|
|
||||||
entt::dense_set<Contact3> targets;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct ReAnnounceTimer {
|
|
||||||
float timer {0.f};
|
|
||||||
float last_max {0.f};
|
|
||||||
|
|
||||||
void set(const float new_timer);
|
|
||||||
|
|
||||||
// exponential back-off
|
|
||||||
void reset(void);
|
|
||||||
|
|
||||||
// on peer join to group
|
|
||||||
void lower(void);
|
|
||||||
};
|
|
||||||
|
|
||||||
struct TransferStatsSeparated {
|
|
||||||
entt::dense_map<Contact3, ObjComp::Ephemeral::File::TransferStats> stats;
|
|
||||||
};
|
|
||||||
|
|
||||||
// used to populate stats
|
|
||||||
struct TransferStatsTally {
|
|
||||||
struct Peer {
|
|
||||||
struct Entry {
|
|
||||||
float time_point {0.f};
|
|
||||||
uint64_t bytes {0u};
|
|
||||||
bool accounted {false};
|
|
||||||
};
|
|
||||||
std::deque<Entry> recently_sent;
|
|
||||||
std::deque<Entry> recently_received;
|
|
||||||
|
|
||||||
// keep atleast 4 or 1sec
|
|
||||||
// trim too old front
|
|
||||||
void trimSent(const float time_now);
|
|
||||||
void trimReceived(const float time_now);
|
|
||||||
};
|
|
||||||
entt::dense_map<Contact3, Peer> tally;
|
|
||||||
};
|
|
||||||
|
|
||||||
} // Components
|
|
||||||
|
|
@ -1,13 +0,0 @@
|
|||||||
#pragma once
|
|
||||||
|
|
||||||
#include <solanaceae/object_store/object_store.hpp>
|
|
||||||
#include <entt/container/dense_set.hpp>
|
|
||||||
|
|
||||||
namespace Contact::Components {
|
|
||||||
|
|
||||||
struct FT1Participation {
|
|
||||||
entt::dense_set<Object> participating;
|
|
||||||
};
|
|
||||||
|
|
||||||
} // Contact::Components
|
|
||||||
|
|
@ -1,8 +0,0 @@
|
|||||||
#include "./file_constructor.hpp"
|
|
||||||
|
|
||||||
#include "./file_rw_mapped.hpp"
|
|
||||||
|
|
||||||
std::unique_ptr<File2I> construct_file2_rw_mapped(std::string_view file_path, int64_t file_size) {
|
|
||||||
return std::make_unique<File2RWMapped>(file_path, file_size);
|
|
||||||
}
|
|
||||||
|
|
@ -1,9 +0,0 @@
|
|||||||
#pragma once
|
|
||||||
|
|
||||||
#include <solanaceae/file/file2.hpp>
|
|
||||||
|
|
||||||
#include <memory>
|
|
||||||
#include <string_view>
|
|
||||||
|
|
||||||
std::unique_ptr<File2I> construct_file2_rw_mapped(std::string_view file_path, int64_t file_size = -1);
|
|
||||||
|
|
@ -1,83 +1,58 @@
|
|||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include <solanaceae/file/file2.hpp>
|
#include <solanaceae/message3/file.hpp>
|
||||||
|
|
||||||
#include "./mio.hpp"
|
#include "./mio.hpp"
|
||||||
|
|
||||||
#include <filesystem>
|
#include <filesystem>
|
||||||
#include <fstream>
|
#include <fstream>
|
||||||
#include <iostream>
|
|
||||||
#include <cstring>
|
#include <cstring>
|
||||||
#include <cassert>
|
|
||||||
|
|
||||||
struct File2RWMapped : public File2I {
|
struct FileRWMapped : public FileI {
|
||||||
mio::ummap_sink _file_map;
|
mio::ummap_sink _file_map;
|
||||||
|
|
||||||
// TODO: add truncate support?
|
// TODO: add truncate support?
|
||||||
// TODO: rw always true?
|
FileRWMapped(std::string_view file_path, uint64_t file_size) {
|
||||||
File2RWMapped(std::string_view file_path, int64_t file_size = -1) : File2I(true, true) {
|
_file_size = file_size;
|
||||||
std::filesystem::path native_file_path{file_path};
|
|
||||||
|
|
||||||
if (!std::filesystem::exists(native_file_path)) {
|
if (!std::filesystem::exists(file_path)) {
|
||||||
std::ofstream(native_file_path) << '\0'; // force create the file
|
std::ofstream(std::string{file_path}) << '\0'; // force create the file
|
||||||
}
|
|
||||||
|
|
||||||
_file_size = std::filesystem::file_size(native_file_path);
|
|
||||||
if (file_size >= 0 && _file_size != file_size) {
|
|
||||||
_file_size = file_size;
|
|
||||||
std::filesystem::resize_file(native_file_path, file_size); // ensure size, usually sparse
|
|
||||||
}
|
}
|
||||||
|
std::filesystem::resize_file(file_path, file_size); // ensure size, usually sparse
|
||||||
|
|
||||||
std::error_code err;
|
std::error_code err;
|
||||||
// sink, is also read
|
// sink, is also read
|
||||||
_file_map.map(native_file_path.u8string(), 0, _file_size, err);
|
_file_map.map(std::string{file_path}, 0, file_size, err);
|
||||||
|
|
||||||
if (err) {
|
if (err) {
|
||||||
std::cerr << "FileRWMapped error: mapping file failed " << err << "\n";
|
// TODO: errro
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
virtual ~File2RWMapped(void) override {}
|
virtual ~FileRWMapped(void) override {}
|
||||||
|
|
||||||
bool isGood(void) override {
|
bool isGood(void) override {
|
||||||
return _file_map.is_mapped();
|
return _file_map.is_mapped();
|
||||||
}
|
}
|
||||||
|
|
||||||
bool write(const ByteSpan data, int64_t pos = -1) override {
|
std::vector<uint8_t> read(uint64_t pos, uint64_t size) override {
|
||||||
// TODO: support streaming write
|
if (pos+size > _file_size) {
|
||||||
if (pos < 0) {
|
//assert(false && "read past end");
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
|
||||||
|
return {_file_map.data()+pos, _file_map.data()+(pos+size)};
|
||||||
|
}
|
||||||
|
|
||||||
|
bool write(uint64_t pos, const std::vector<uint8_t>& data) override {
|
||||||
|
if (pos+data.size() > _file_size) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (data.empty()) {
|
std::memcpy(_file_map.data()+pos, data.data(), data.size());
|
||||||
return true; // false?
|
|
||||||
}
|
|
||||||
|
|
||||||
// file size is fix for mmaped files
|
|
||||||
if (pos+data.size > _file_size) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
std::memcpy(_file_map.data()+pos, data.ptr, data.size);
|
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
ByteSpanWithOwnership read(uint64_t size, int64_t pos = -1) override {
|
|
||||||
// TODO: support streaming read
|
|
||||||
if (pos < 0) {
|
|
||||||
assert(false && "streaming not implemented");
|
|
||||||
return ByteSpan{};
|
|
||||||
}
|
|
||||||
|
|
||||||
if (pos+size > _file_size) {
|
|
||||||
assert(false && "read past end");
|
|
||||||
return ByteSpan{};
|
|
||||||
}
|
|
||||||
|
|
||||||
// return non-owning
|
|
||||||
return ByteSpan{_file_map.data()+pos, size};
|
|
||||||
}
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -1,8 +1,5 @@
|
|||||||
#include "./ft1_sha1_info.hpp"
|
#include "./ft1_sha1_info.hpp"
|
||||||
|
|
||||||
// next power of two
|
|
||||||
#include <entt/core/memory.hpp>
|
|
||||||
|
|
||||||
#include <sodium.h>
|
#include <sodium.h>
|
||||||
|
|
||||||
SHA1Digest::SHA1Digest(const std::vector<uint8_t>& v) {
|
SHA1Digest::SHA1Digest(const std::vector<uint8_t>& v) {
|
||||||
@ -31,27 +28,6 @@ std::ostream& operator<<(std::ostream& out, const SHA1Digest& v) {
|
|||||||
return out;
|
return out;
|
||||||
}
|
}
|
||||||
|
|
||||||
uint32_t chunkSizeFromFileSize(uint64_t file_size) {
|
|
||||||
const uint64_t fs_low {UINT64_C(512)*1024};
|
|
||||||
const uint64_t fs_high {UINT64_C(2)*1024*1024*1024};
|
|
||||||
|
|
||||||
const uint32_t cs_low {32*1024};
|
|
||||||
const uint32_t cs_high {4*1024*1024};
|
|
||||||
|
|
||||||
if (file_size <= fs_low) { // 512kib
|
|
||||||
return cs_low; // 32kib
|
|
||||||
} else if (file_size >= fs_high) { // 2gib
|
|
||||||
return cs_high; // 4mib
|
|
||||||
}
|
|
||||||
|
|
||||||
double t = file_size - fs_low;
|
|
||||||
t /= fs_high;
|
|
||||||
|
|
||||||
double x = (1 - t) * cs_low + t * cs_high;
|
|
||||||
|
|
||||||
return entt::next_power_of_two(uint64_t(x));
|
|
||||||
}
|
|
||||||
|
|
||||||
size_t FT1InfoSHA1::chunkSize(size_t chunk_index) const {
|
size_t FT1InfoSHA1::chunkSize(size_t chunk_index) const {
|
||||||
if (chunk_index+1 == chunks.size()) {
|
if (chunk_index+1 == chunks.size()) {
|
||||||
// last chunk
|
// last chunk
|
||||||
@ -63,7 +39,6 @@ size_t FT1InfoSHA1::chunkSize(size_t chunk_index) const {
|
|||||||
|
|
||||||
std::vector<uint8_t> FT1InfoSHA1::toBuffer(void) const {
|
std::vector<uint8_t> FT1InfoSHA1::toBuffer(void) const {
|
||||||
std::vector<uint8_t> buffer;
|
std::vector<uint8_t> buffer;
|
||||||
buffer.reserve(256+8+4+20*chunks.size());
|
|
||||||
|
|
||||||
assert(!file_name.empty());
|
assert(!file_name.empty());
|
||||||
// TODO: optimize
|
// TODO: optimize
|
||||||
|
@ -18,30 +18,28 @@ struct SHA1Digest {
|
|||||||
bool operator==(const SHA1Digest& other) const { return data == other.data; }
|
bool operator==(const SHA1Digest& other) const { return data == other.data; }
|
||||||
bool operator!=(const SHA1Digest& other) const { return data != other.data; }
|
bool operator!=(const SHA1Digest& other) const { return data != other.data; }
|
||||||
|
|
||||||
constexpr size_t size(void) const { return data.size(); }
|
size_t size(void) const { return data.size(); }
|
||||||
};
|
};
|
||||||
|
|
||||||
std::ostream& operator<<(std::ostream& out, const SHA1Digest& v);
|
std::ostream& operator<<(std::ostream& out, const SHA1Digest& v);
|
||||||
|
|
||||||
namespace std { // inject
|
namespace std { // inject
|
||||||
template<> struct hash<SHA1Digest> {
|
template<> struct hash<SHA1Digest> {
|
||||||
std::uint64_t operator()(const SHA1Digest& h) const noexcept {
|
std::size_t operator()(const SHA1Digest& h) const noexcept {
|
||||||
return
|
return
|
||||||
std::uint64_t(h.data[0]) << (0*8) |
|
size_t(h.data[0]) << (0*8) |
|
||||||
std::uint64_t(h.data[1]) << (1*8) |
|
size_t(h.data[1]) << (1*8) |
|
||||||
std::uint64_t(h.data[2]) << (2*8) |
|
size_t(h.data[2]) << (2*8) |
|
||||||
std::uint64_t(h.data[3]) << (3*8) |
|
size_t(h.data[3]) << (3*8) |
|
||||||
std::uint64_t(h.data[4]) << (4*8) |
|
size_t(h.data[4]) << (4*8) |
|
||||||
std::uint64_t(h.data[5]) << (5*8) |
|
size_t(h.data[5]) << (5*8) |
|
||||||
std::uint64_t(h.data[6]) << (6*8) |
|
size_t(h.data[6]) << (6*8) |
|
||||||
std::uint64_t(h.data[7]) << (7*8)
|
size_t(h.data[7]) << (7*8)
|
||||||
;
|
;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
} // std
|
} // std
|
||||||
|
|
||||||
uint32_t chunkSizeFromFileSize(uint64_t file_size);
|
|
||||||
|
|
||||||
struct FT1InfoSHA1 {
|
struct FT1InfoSHA1 {
|
||||||
std::string file_name;
|
std::string file_name;
|
||||||
uint64_t file_size {0};
|
uint64_t file_size {0};
|
||||||
|
@ -1,48 +0,0 @@
|
|||||||
#include "./participation.hpp"
|
|
||||||
|
|
||||||
#include "./contact_components.hpp"
|
|
||||||
#include "./chunk_picker.hpp"
|
|
||||||
|
|
||||||
#include <iostream>
|
|
||||||
|
|
||||||
bool addParticipation(Contact3Handle c, ObjectHandle o) {
|
|
||||||
bool was_new {false};
|
|
||||||
assert(static_cast<bool>(o));
|
|
||||||
assert(static_cast<bool>(c));
|
|
||||||
|
|
||||||
if (static_cast<bool>(o)) {
|
|
||||||
const auto [_, inserted] = o.get_or_emplace<Components::SuspectedParticipants>().participants.emplace(c);
|
|
||||||
was_new = inserted;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (static_cast<bool>(c)) {
|
|
||||||
const auto [_, inserted] = c.get_or_emplace<Contact::Components::FT1Participation>().participating.emplace(o);
|
|
||||||
was_new = was_new || inserted;
|
|
||||||
}
|
|
||||||
|
|
||||||
//std::cout << "added " << (was_new?"new ":"") << "participant\n";
|
|
||||||
|
|
||||||
return was_new;
|
|
||||||
}
|
|
||||||
|
|
||||||
void removeParticipation(Contact3Handle c, ObjectHandle o) {
|
|
||||||
assert(static_cast<bool>(o));
|
|
||||||
assert(static_cast<bool>(c));
|
|
||||||
|
|
||||||
if (static_cast<bool>(o) && o.all_of<Components::SuspectedParticipants>()) {
|
|
||||||
o.get<Components::SuspectedParticipants>().participants.erase(c);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (static_cast<bool>(c)) {
|
|
||||||
if (c.all_of<Contact::Components::FT1Participation>()) {
|
|
||||||
c.get<Contact::Components::FT1Participation>().participating.erase(o);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (c.all_of<ChunkPicker>()) {
|
|
||||||
c.get<ChunkPicker>().participating_unfinished.erase(o);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
//std::cout << "removed participant\n";
|
|
||||||
}
|
|
||||||
|
|
@ -1,8 +0,0 @@
|
|||||||
#pragma once
|
|
||||||
|
|
||||||
#include <solanaceae/object_store/object_store.hpp>
|
|
||||||
#include <solanaceae/contact/contact_model3.hpp>
|
|
||||||
|
|
||||||
bool addParticipation(Contact3Handle c, ObjectHandle o);
|
|
||||||
void removeParticipation(Contact3Handle c, ObjectHandle o);
|
|
||||||
|
|
@ -1,91 +0,0 @@
|
|||||||
#include "./re_announce_systems.hpp"
|
|
||||||
|
|
||||||
#include "./components.hpp"
|
|
||||||
#include <solanaceae/object_store/meta_components_file.hpp>
|
|
||||||
#include <solanaceae/tox_contacts/components.hpp>
|
|
||||||
#include <solanaceae/ngc_ft1/ngcft1_file_kind.hpp>
|
|
||||||
#include <vector>
|
|
||||||
#include <cassert>
|
|
||||||
|
|
||||||
namespace Systems {
|
|
||||||
|
|
||||||
void re_announce(
|
|
||||||
ObjectRegistry& os_reg,
|
|
||||||
Contact3Registry& cr,
|
|
||||||
NGCEXTEventProvider& neep,
|
|
||||||
const float delta
|
|
||||||
) {
|
|
||||||
std::vector<Object> to_remove;
|
|
||||||
os_reg.view<Components::ReAnnounceTimer>().each([&os_reg, &cr, &neep, &to_remove, delta](Object ov, Components::ReAnnounceTimer& rat) {
|
|
||||||
ObjectHandle o{os_reg, ov};
|
|
||||||
// if no known targets, or no hash, remove
|
|
||||||
if (!o.all_of<Components::AnnounceTargets, Components::FT1InfoSHA1Hash>()) {
|
|
||||||
to_remove.push_back(ov);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: pause
|
|
||||||
//// if paused -> remove
|
|
||||||
//if (o.all_of<Message::Components::Transfer::TagPaused>()) {
|
|
||||||
// to_remove.push_back(ov);
|
|
||||||
// return;
|
|
||||||
//}
|
|
||||||
|
|
||||||
// // if not downloading or info incomplete -> remove
|
|
||||||
//if (!o.all_of<Components::FT1ChunkSHA1Cache, Components::FT1InfoSHA1Hash, Components::AnnounceTargets>()) {
|
|
||||||
// if not downloading AND info complete -> remove
|
|
||||||
if (!o.all_of<Components::FT1ChunkSHA1Cache>() && o.all_of<Components::FT1InfoSHA1Data>()) {
|
|
||||||
to_remove.push_back(ov);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (o.all_of<ObjComp::F::TagLocalHaveAll>()) {
|
|
||||||
// transfer done, we stop announcing
|
|
||||||
to_remove.push_back(ov);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
// update all timers
|
|
||||||
rat.timer -= delta;
|
|
||||||
|
|
||||||
// send announces
|
|
||||||
if (rat.timer <= 0.f) {
|
|
||||||
rat.reset(); // exponential back-off
|
|
||||||
|
|
||||||
std::vector<uint8_t> announce_id;
|
|
||||||
const uint32_t file_kind = static_cast<uint32_t>(NGCFT1_file_kind::HASH_SHA1_INFO);
|
|
||||||
for (size_t i = 0; i < sizeof(file_kind); i++) {
|
|
||||||
announce_id.push_back((file_kind>>(i*8)) & 0xff);
|
|
||||||
}
|
|
||||||
assert(o.all_of<Components::FT1InfoSHA1Hash>());
|
|
||||||
const auto& info_hash = o.get<Components::FT1InfoSHA1Hash>().hash;
|
|
||||||
announce_id.insert(announce_id.cend(), info_hash.cbegin(), info_hash.cend());
|
|
||||||
|
|
||||||
for (const auto cv : o.get<Components::AnnounceTargets>().targets) {
|
|
||||||
if (cr.all_of<Contact::Components::ToxGroupPeerEphemeral>(cv)) {
|
|
||||||
// private ?
|
|
||||||
const auto [group_number, peer_number] = cr.get<Contact::Components::ToxGroupPeerEphemeral>(cv);
|
|
||||||
neep.send_pc1_announce(group_number, peer_number, announce_id.data(), announce_id.size());
|
|
||||||
} else if (cr.all_of<Contact::Components::ToxGroupEphemeral>(cv)) {
|
|
||||||
// public
|
|
||||||
const auto group_number = cr.get<Contact::Components::ToxGroupEphemeral>(cv).group_number;
|
|
||||||
neep.send_all_pc1_announce(group_number, announce_id.data(), announce_id.size());
|
|
||||||
} else {
|
|
||||||
assert(false && "we dont know how to announce to this target");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
for (const auto ov : to_remove) {
|
|
||||||
os_reg.remove<Components::ReAnnounceTimer>(ov);
|
|
||||||
// we keep the annouce target list around (if it exists)
|
|
||||||
// TODO: should we make the target list more generic?
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: how to handle unpause?
|
|
||||||
}
|
|
||||||
|
|
||||||
} // Systems
|
|
||||||
|
|
@ -1,17 +0,0 @@
|
|||||||
#pragma once
|
|
||||||
|
|
||||||
#include <solanaceae/object_store/object_store.hpp>
|
|
||||||
#include <solanaceae/contact/contact_model3.hpp>
|
|
||||||
#include <solanaceae/ngc_ext/ngcext.hpp>
|
|
||||||
|
|
||||||
namespace Systems {
|
|
||||||
|
|
||||||
void re_announce(
|
|
||||||
ObjectRegistry& os_reg,
|
|
||||||
Contact3Registry& cr,
|
|
||||||
NGCEXTEventProvider& neep,
|
|
||||||
const float delta
|
|
||||||
);
|
|
||||||
|
|
||||||
} // Systems
|
|
||||||
|
|
@ -1,131 +0,0 @@
|
|||||||
#include "./receiving_transfers.hpp"
|
|
||||||
|
|
||||||
#include <iostream>
|
|
||||||
|
|
||||||
void ReceivingTransfers::tick(float delta) {
|
|
||||||
for (auto peer_it = _data.begin(); peer_it != _data.end();) {
|
|
||||||
for (auto it = peer_it->second.begin(); it != peer_it->second.end();) {
|
|
||||||
it->second.time_since_activity += delta;
|
|
||||||
|
|
||||||
// if we have not heard for 60sec, timeout
|
|
||||||
if (it->second.time_since_activity >= 60.f) {
|
|
||||||
std::cerr << "SHA1_NGCFT1 warning: receiving tansfer timed out " << "." << int(it->first) << "\n";
|
|
||||||
// TODO: if info, requeue? or just keep the timer comp? - no, timer comp will continue ticking, even if loading
|
|
||||||
//it->second.v
|
|
||||||
it = peer_it->second.erase(it);
|
|
||||||
} else {
|
|
||||||
it++;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (peer_it->second.empty()) {
|
|
||||||
// cleanup unused peers too agressive?
|
|
||||||
peer_it = _data.erase(peer_it);
|
|
||||||
} else {
|
|
||||||
peer_it++;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
ReceivingTransfers::Entry& ReceivingTransfers::emplaceInfo(uint32_t group_number, uint32_t peer_number, uint8_t transfer_id, const Entry::Info& info) {
|
|
||||||
auto& ent = _data[combine_ids(group_number, peer_number)][transfer_id];
|
|
||||||
ent.v = info;
|
|
||||||
return ent;
|
|
||||||
}
|
|
||||||
|
|
||||||
ReceivingTransfers::Entry& ReceivingTransfers::emplaceChunk(uint32_t group_number, uint32_t peer_number, uint8_t transfer_id, const Entry::Chunk& chunk) {
|
|
||||||
assert(!chunk.chunk_indices.empty());
|
|
||||||
assert(!containsPeerChunk(group_number, peer_number, chunk.content, chunk.chunk_indices.front()));
|
|
||||||
auto& ent = _data[combine_ids(group_number, peer_number)][transfer_id];
|
|
||||||
ent.v = chunk;
|
|
||||||
return ent;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool ReceivingTransfers::containsPeerTransfer(uint32_t group_number, uint32_t peer_number, uint8_t transfer_id) const {
|
|
||||||
auto it = _data.find(combine_ids(group_number, peer_number));
|
|
||||||
if (it == _data.end()) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
return it->second.count(transfer_id);
|
|
||||||
}
|
|
||||||
|
|
||||||
bool ReceivingTransfers::containsChunk(ObjectHandle o, size_t chunk_idx) const {
|
|
||||||
for (const auto& [_, p] : _data) {
|
|
||||||
for (const auto& [_2, v] : p) {
|
|
||||||
if (!v.isChunk()) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
const auto& c = v.getChunk();
|
|
||||||
if (c.content != o) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
for (const auto idx : c.chunk_indices) {
|
|
||||||
if (idx == chunk_idx) {
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool ReceivingTransfers::containsPeerChunk(uint32_t group_number, uint32_t peer_number, ObjectHandle o, size_t chunk_idx) const {
|
|
||||||
auto it = _data.find(combine_ids(group_number, peer_number));
|
|
||||||
if (it == _data.end()) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
for (const auto& [_, v] : it->second) {
|
|
||||||
if (!v.isChunk()) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
const auto& c = v.getChunk();
|
|
||||||
if (c.content != o) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
for (const auto idx : c.chunk_indices) {
|
|
||||||
if (idx == chunk_idx) {
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
void ReceivingTransfers::removePeer(uint32_t group_number, uint32_t peer_number) {
|
|
||||||
_data.erase(combine_ids(group_number, peer_number));
|
|
||||||
}
|
|
||||||
|
|
||||||
void ReceivingTransfers::removePeerTransfer(uint32_t group_number, uint32_t peer_number, uint8_t transfer_id) {
|
|
||||||
auto it = _data.find(combine_ids(group_number, peer_number));
|
|
||||||
if (it == _data.end()) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
it->second.erase(transfer_id);
|
|
||||||
}
|
|
||||||
|
|
||||||
size_t ReceivingTransfers::size(void) const {
|
|
||||||
size_t count {0};
|
|
||||||
for (const auto& [_, p] : _data) {
|
|
||||||
count += p.size();
|
|
||||||
}
|
|
||||||
return count;
|
|
||||||
}
|
|
||||||
|
|
||||||
size_t ReceivingTransfers::sizePeer(uint32_t group_number, uint32_t peer_number) const {
|
|
||||||
auto it = _data.find(combine_ids(group_number, peer_number));
|
|
||||||
if (it == _data.end()) {
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
return it->second.size();
|
|
||||||
}
|
|
||||||
|
|
@ -1,66 +0,0 @@
|
|||||||
#pragma once
|
|
||||||
|
|
||||||
#include <solanaceae/object_store/object_store.hpp>
|
|
||||||
|
|
||||||
#include <entt/container/dense_map.hpp>
|
|
||||||
|
|
||||||
#include "./util.hpp"
|
|
||||||
|
|
||||||
#include <cstdint>
|
|
||||||
#include <variant>
|
|
||||||
#include <vector>
|
|
||||||
|
|
||||||
struct ReceivingTransfers {
|
|
||||||
struct Entry {
|
|
||||||
struct Info {
|
|
||||||
ObjectHandle content;
|
|
||||||
// copy of info data
|
|
||||||
// too large?
|
|
||||||
std::vector<uint8_t> info_data;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct Chunk {
|
|
||||||
ObjectHandle content;
|
|
||||||
std::vector<size_t> chunk_indices;
|
|
||||||
// or data?
|
|
||||||
// if memmapped, this would be just a pointer
|
|
||||||
};
|
|
||||||
|
|
||||||
std::variant<Info, Chunk> v;
|
|
||||||
|
|
||||||
float time_since_activity {0.f};
|
|
||||||
|
|
||||||
bool isInfo(void) const { return std::holds_alternative<Info>(v); }
|
|
||||||
bool isChunk(void) const { return std::holds_alternative<Chunk>(v); }
|
|
||||||
|
|
||||||
Info& getInfo(void) { return std::get<Info>(v); }
|
|
||||||
const Info& getInfo(void) const { return std::get<Info>(v); }
|
|
||||||
Chunk& getChunk(void) { return std::get<Chunk>(v); }
|
|
||||||
const Chunk& getChunk(void) const { return std::get<Chunk>(v); }
|
|
||||||
};
|
|
||||||
|
|
||||||
// key is groupid + peerid
|
|
||||||
// TODO: replace with contact
|
|
||||||
//using ReceivingTransfers = entt::dense_map<uint64_t, entt::dense_map<uint8_t, ReceivingTransferE>>;
|
|
||||||
entt::dense_map<uint64_t, entt::dense_map<uint8_t, Entry>> _data;
|
|
||||||
|
|
||||||
void tick(float delta);
|
|
||||||
|
|
||||||
Entry& emplaceInfo(uint32_t group_number, uint32_t peer_number, uint8_t transfer_id, const Entry::Info& info);
|
|
||||||
Entry& emplaceChunk(uint32_t group_number, uint32_t peer_number, uint8_t transfer_id, const Entry::Chunk& chunk);
|
|
||||||
|
|
||||||
bool containsPeer(uint32_t group_number, uint32_t peer_number) const { return _data.count(combine_ids(group_number, peer_number)); }
|
|
||||||
bool containsPeerTransfer(uint32_t group_number, uint32_t peer_number, uint8_t transfer_id) const;
|
|
||||||
bool containsChunk(ObjectHandle o, size_t chunk_idx) const;
|
|
||||||
bool containsPeerChunk(uint32_t group_number, uint32_t peer_number, ObjectHandle o, size_t chunk_idx) const;
|
|
||||||
|
|
||||||
auto& getPeer(uint32_t group_number, uint32_t peer_number) { return _data.at(combine_ids(group_number, peer_number)); }
|
|
||||||
auto& getTransfer(uint32_t group_number, uint32_t peer_number, uint8_t transfer_id) { return getPeer(group_number, peer_number).at(transfer_id); }
|
|
||||||
|
|
||||||
void removePeer(uint32_t group_number, uint32_t peer_number);
|
|
||||||
void removePeerTransfer(uint32_t group_number, uint32_t peer_number, uint8_t transfer_id);
|
|
||||||
|
|
||||||
size_t size(void) const;
|
|
||||||
size_t sizePeer(uint32_t group_number, uint32_t peer_number) const;
|
|
||||||
};
|
|
||||||
|
|
@ -1,128 +0,0 @@
|
|||||||
#include "./sending_transfers.hpp"
|
|
||||||
|
|
||||||
#include <iostream>
|
|
||||||
#include <cassert>
|
|
||||||
|
|
||||||
void SendingTransfers::tick(float delta) {
|
|
||||||
for (auto peer_it = _data.begin(); peer_it != _data.end();) {
|
|
||||||
for (auto it = peer_it->second.begin(); it != peer_it->second.end();) {
|
|
||||||
it->second.time_since_activity += delta;
|
|
||||||
|
|
||||||
// if we have not heard for 10min, timeout (lower level event on real timeout)
|
|
||||||
// (2min was too little, so it seems)
|
|
||||||
// TODO: do we really need this if we get events?
|
|
||||||
// FIXME: disabled for now, we are trusting ngcft1 for now
|
|
||||||
if (false && it->second.time_since_activity >= 60.f*10.f) {
|
|
||||||
std::cerr << "SHA1_NGCFT1 warning: sending tansfer timed out " << "." << int(it->first) << "\n";
|
|
||||||
assert(false);
|
|
||||||
it = peer_it->second.erase(it);
|
|
||||||
} else {
|
|
||||||
it++;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (peer_it->second.empty()) {
|
|
||||||
// cleanup unused peers too agressive?
|
|
||||||
peer_it = _data.erase(peer_it);
|
|
||||||
} else {
|
|
||||||
peer_it++;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
SendingTransfers::Entry& SendingTransfers::emplaceInfo(uint32_t group_number, uint32_t peer_number, uint8_t transfer_id, const Entry::Info& info) {
|
|
||||||
auto& ent = _data[combine_ids(group_number, peer_number)][transfer_id];
|
|
||||||
ent.v = info;
|
|
||||||
return ent;
|
|
||||||
}
|
|
||||||
|
|
||||||
SendingTransfers::Entry& SendingTransfers::emplaceChunk(uint32_t group_number, uint32_t peer_number, uint8_t transfer_id, const Entry::Chunk& chunk) {
|
|
||||||
assert(!containsPeerChunk(group_number, peer_number, chunk.content, chunk.chunk_index));
|
|
||||||
auto& ent = _data[combine_ids(group_number, peer_number)][transfer_id];
|
|
||||||
ent.v = chunk;
|
|
||||||
return ent;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool SendingTransfers::containsPeerTransfer(uint32_t group_number, uint32_t peer_number, uint8_t transfer_id) const {
|
|
||||||
auto it = _data.find(combine_ids(group_number, peer_number));
|
|
||||||
if (it == _data.end()) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
return it->second.count(transfer_id);
|
|
||||||
}
|
|
||||||
|
|
||||||
bool SendingTransfers::containsChunk(ObjectHandle o, size_t chunk_idx) const {
|
|
||||||
for (const auto& [_, p] : _data) {
|
|
||||||
for (const auto& [_2, v] : p) {
|
|
||||||
if (!v.isChunk()) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
const auto& c = v.getChunk();
|
|
||||||
if (c.content != o) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (c.chunk_index == chunk_idx) {
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool SendingTransfers::containsPeerChunk(uint32_t group_number, uint32_t peer_number, ObjectHandle o, size_t chunk_idx) const {
|
|
||||||
auto it = _data.find(combine_ids(group_number, peer_number));
|
|
||||||
if (it == _data.end()) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
for (const auto& [_, v] : it->second) {
|
|
||||||
if (!v.isChunk()) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
const auto& c = v.getChunk();
|
|
||||||
if (c.content != o) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (c.chunk_index == chunk_idx) {
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
void SendingTransfers::removePeer(uint32_t group_number, uint32_t peer_number) {
|
|
||||||
_data.erase(combine_ids(group_number, peer_number));
|
|
||||||
}
|
|
||||||
|
|
||||||
void SendingTransfers::removePeerTransfer(uint32_t group_number, uint32_t peer_number, uint8_t transfer_id) {
|
|
||||||
auto it = _data.find(combine_ids(group_number, peer_number));
|
|
||||||
if (it == _data.end()) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
it->second.erase(transfer_id);
|
|
||||||
}
|
|
||||||
|
|
||||||
size_t SendingTransfers::size(void) const {
|
|
||||||
size_t count {0};
|
|
||||||
for (const auto& [_, p] : _data) {
|
|
||||||
count += p.size();
|
|
||||||
}
|
|
||||||
return count;
|
|
||||||
}
|
|
||||||
|
|
||||||
size_t SendingTransfers::sizePeer(uint32_t group_number, uint32_t peer_number) const {
|
|
||||||
auto it = _data.find(combine_ids(group_number, peer_number));
|
|
||||||
if (it == _data.end()) {
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
return it->second.size();
|
|
||||||
}
|
|
@ -1,67 +0,0 @@
|
|||||||
#pragma once
|
|
||||||
|
|
||||||
#include <solanaceae/object_store/object_store.hpp>
|
|
||||||
|
|
||||||
#include <entt/container/dense_map.hpp>
|
|
||||||
|
|
||||||
#include "./util.hpp"
|
|
||||||
|
|
||||||
#include <cstdint>
|
|
||||||
#include <variant>
|
|
||||||
#include <vector>
|
|
||||||
|
|
||||||
struct SendingTransfers {
|
|
||||||
struct Entry {
|
|
||||||
struct Info {
|
|
||||||
// copy of info data
|
|
||||||
// too large?
|
|
||||||
std::vector<uint8_t> info_data;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct Chunk {
|
|
||||||
ObjectHandle content;
|
|
||||||
size_t chunk_index; // <.< remove offset_into_file
|
|
||||||
//uint64_t offset_into_file;
|
|
||||||
// or data?
|
|
||||||
// if memmapped, this would be just a pointer
|
|
||||||
};
|
|
||||||
|
|
||||||
std::variant<Info, Chunk> v;
|
|
||||||
|
|
||||||
float time_since_activity {0.f};
|
|
||||||
|
|
||||||
bool isInfo(void) const { return std::holds_alternative<Info>(v); }
|
|
||||||
bool isChunk(void) const { return std::holds_alternative<Chunk>(v); }
|
|
||||||
|
|
||||||
Info& getInfo(void) { return std::get<Info>(v); }
|
|
||||||
const Info& getInfo(void) const { return std::get<Info>(v); }
|
|
||||||
Chunk& getChunk(void) { return std::get<Chunk>(v); }
|
|
||||||
const Chunk& getChunk(void) const { return std::get<Chunk>(v); }
|
|
||||||
|
|
||||||
};
|
|
||||||
|
|
||||||
// key is groupid + peerid
|
|
||||||
// TODO: replace with contact
|
|
||||||
entt::dense_map<uint64_t, entt::dense_map<uint8_t, Entry>> _data;
|
|
||||||
|
|
||||||
void tick(float delta);
|
|
||||||
|
|
||||||
Entry& emplaceInfo(uint32_t group_number, uint32_t peer_number, uint8_t transfer_id, const Entry::Info& info);
|
|
||||||
Entry& emplaceChunk(uint32_t group_number, uint32_t peer_number, uint8_t transfer_id, const Entry::Chunk& chunk);
|
|
||||||
|
|
||||||
bool containsPeer(uint32_t group_number, uint32_t peer_number) const { return _data.count(combine_ids(group_number, peer_number)); }
|
|
||||||
bool containsPeerTransfer(uint32_t group_number, uint32_t peer_number, uint8_t transfer_id) const;
|
|
||||||
// less reliable, since we dont keep the list of chunk idecies
|
|
||||||
bool containsChunk(ObjectHandle o, size_t chunk_idx) const;
|
|
||||||
bool containsPeerChunk(uint32_t group_number, uint32_t peer_number, ObjectHandle o, size_t chunk_idx) const;
|
|
||||||
|
|
||||||
auto& getPeer(uint32_t group_number, uint32_t peer_number) { return _data.at(combine_ids(group_number, peer_number)); }
|
|
||||||
auto& getTransfer(uint32_t group_number, uint32_t peer_number, uint8_t transfer_id) { return getPeer(group_number, peer_number).at(transfer_id); }
|
|
||||||
|
|
||||||
void removePeer(uint32_t group_number, uint32_t peer_number);
|
|
||||||
void removePeerTransfer(uint32_t group_number, uint32_t peer_number, uint8_t transfer_id);
|
|
||||||
|
|
||||||
size_t size(void) const;
|
|
||||||
size_t sizePeer(uint32_t group_number, uint32_t peer_number) const;
|
|
||||||
};
|
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
@ -2,7 +2,6 @@
|
|||||||
|
|
||||||
// solanaceae port of sha1 fts for NGCFT1
|
// solanaceae port of sha1 fts for NGCFT1
|
||||||
|
|
||||||
#include <solanaceae/object_store/object_store.hpp>
|
|
||||||
#include <solanaceae/contact/contact_model3.hpp>
|
#include <solanaceae/contact/contact_model3.hpp>
|
||||||
#include <solanaceae/message3/registry_message_model.hpp>
|
#include <solanaceae/message3/registry_message_model.hpp>
|
||||||
#include <solanaceae/tox_contacts/tox_contact_model2.hpp>
|
#include <solanaceae/tox_contacts/tox_contact_model2.hpp>
|
||||||
@ -10,113 +9,129 @@
|
|||||||
#include <solanaceae/ngc_ft1/ngcft1.hpp>
|
#include <solanaceae/ngc_ft1/ngcft1.hpp>
|
||||||
|
|
||||||
#include "./ft1_sha1_info.hpp"
|
#include "./ft1_sha1_info.hpp"
|
||||||
#include "./sending_transfers.hpp"
|
|
||||||
#include "./receiving_transfers.hpp"
|
|
||||||
|
|
||||||
#include "./backends/sha1_mapped_filesystem.hpp"
|
|
||||||
|
|
||||||
|
#include <entt/entity/registry.hpp>
|
||||||
|
#include <entt/entity/handle.hpp>
|
||||||
#include <entt/container/dense_map.hpp>
|
#include <entt/container/dense_map.hpp>
|
||||||
|
|
||||||
|
#include <variant>
|
||||||
#include <random>
|
#include <random>
|
||||||
#include <chrono>
|
#include <atomic>
|
||||||
|
#include <mutex>
|
||||||
|
#include <list>
|
||||||
|
|
||||||
class SHA1_NGCFT1 : public ToxEventI, public RegistryMessageModelEventI, public ObjectStoreEventI, public NGCFT1EventI, public NGCEXTEventI {
|
enum class Content : uint32_t {};
|
||||||
ObjectStore2& _os;
|
using ContentRegistry = entt::basic_registry<Content>;
|
||||||
ObjectStore2::SubscriptionReference _os_sr;
|
using ContentHandle = entt::basic_handle<ContentRegistry>;
|
||||||
// TODO: backend abstraction
|
|
||||||
|
class SHA1_NGCFT1 : public RegistryMessageModelEventI, public NGCFT1EventI {
|
||||||
Contact3Registry& _cr;
|
Contact3Registry& _cr;
|
||||||
RegistryMessageModelI& _rmm;
|
RegistryMessageModel& _rmm;
|
||||||
RegistryMessageModelI::SubscriptionReference _rmm_sr;
|
|
||||||
NGCFT1& _nft;
|
NGCFT1& _nft;
|
||||||
NGCFT1::SubscriptionReference _nft_sr;
|
|
||||||
ToxContactModel2& _tcm;
|
ToxContactModel2& _tcm;
|
||||||
ToxEventProviderI& _tep;
|
|
||||||
ToxEventProviderI::SubscriptionReference _tep_sr;
|
|
||||||
NGCEXTEventProvider& _neep;
|
|
||||||
NGCEXTEventProvider::SubscriptionReference _neep_sr;
|
|
||||||
|
|
||||||
Backends::SHA1MappedFilesystem _mfb;
|
|
||||||
|
|
||||||
bool _object_update_lock {false};
|
|
||||||
|
|
||||||
std::minstd_rand _rng {1337*11};
|
std::minstd_rand _rng {1337*11};
|
||||||
|
|
||||||
using clock = std::chrono::steady_clock;
|
// registry per group?
|
||||||
clock::time_point _time_start_offset {clock::now()};
|
ContentRegistry _contentr;
|
||||||
float getTimeNow(void) const {
|
|
||||||
return std::chrono::duration<float>{clock::now() - _time_start_offset}.count();
|
|
||||||
}
|
|
||||||
|
|
||||||
// limit this to each group?
|
// limit this to each group?
|
||||||
entt::dense_map<SHA1Digest, ObjectHandle> _info_to_content;
|
entt::dense_map<SHA1Digest, ContentHandle> _info_to_content;
|
||||||
|
|
||||||
// sha1 chunk index
|
// sha1 chunk index
|
||||||
// TODO: optimize lookup
|
// TODO: optimize lookup
|
||||||
// TODO: multiple contents. hashes might be unique, but data is not
|
// TODO: multiple contents. hashes might be unique, but data is not
|
||||||
entt::dense_map<SHA1Digest, ObjectHandle> _chunks;
|
entt::dense_map<SHA1Digest, ContentHandle> _chunks;
|
||||||
|
|
||||||
// group_number, peer_number, content, chunk_hash, timer
|
// group_number, peer_number, content, chunk_hash, timer
|
||||||
std::deque<std::tuple<uint32_t, uint32_t, ObjectHandle, SHA1Digest, float>> _queue_requested_chunk;
|
std::deque<std::tuple<uint32_t, uint32_t, ContentHandle, SHA1Digest, float>> _queue_requested_chunk;
|
||||||
//void queueUpRequestInfo(uint32_t group_number, uint32_t peer_number, const SHA1Digest& hash);
|
//void queueUpRequestInfo(uint32_t group_number, uint32_t peer_number, const SHA1Digest& hash);
|
||||||
void queueUpRequestChunk(uint32_t group_number, uint32_t peer_number, ObjectHandle content, const SHA1Digest& hash);
|
void queueUpRequestChunk(uint32_t group_number, uint32_t peer_number, ContentHandle content, const SHA1Digest& hash);
|
||||||
|
|
||||||
SendingTransfers _sending_transfers;
|
struct SendingTransfer {
|
||||||
ReceivingTransfers _receiving_transfers;
|
struct Info {
|
||||||
|
// copy of info data
|
||||||
|
// too large?
|
||||||
|
std::vector<uint8_t> info_data;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct Chunk {
|
||||||
|
ContentHandle content;
|
||||||
|
size_t chunk_index; // <.< remove offset_into_file
|
||||||
|
//uint64_t offset_into_file;
|
||||||
|
// or data?
|
||||||
|
// if memmapped, this would be just a pointer
|
||||||
|
};
|
||||||
|
|
||||||
|
std::variant<Info, Chunk> v;
|
||||||
|
|
||||||
|
float time_since_activity {0.f};
|
||||||
|
};
|
||||||
|
// key is groupid + peerid
|
||||||
|
entt::dense_map<uint64_t, entt::dense_map<uint8_t, SendingTransfer>> _sending_transfers;
|
||||||
|
|
||||||
|
struct ReceivingTransfer {
|
||||||
|
struct Info {
|
||||||
|
ContentHandle content;
|
||||||
|
// copy of info data
|
||||||
|
// too large?
|
||||||
|
std::vector<uint8_t> info_data;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct Chunk {
|
||||||
|
ContentHandle content;
|
||||||
|
std::vector<size_t> chunk_indices;
|
||||||
|
// or data?
|
||||||
|
// if memmapped, this would be just a pointer
|
||||||
|
};
|
||||||
|
|
||||||
|
std::variant<Info, Chunk> v;
|
||||||
|
|
||||||
|
float time_since_activity {0.f};
|
||||||
|
};
|
||||||
|
// key is groupid + peerid
|
||||||
|
entt::dense_map<uint64_t, entt::dense_map<uint8_t, ReceivingTransfer>> _receiving_transfers;
|
||||||
|
|
||||||
// makes request rotate around open content
|
// makes request rotate around open content
|
||||||
std::deque<ObjectHandle> _queue_content_want_info;
|
std::deque<ContentHandle> _queue_content_want_info;
|
||||||
|
std::deque<ContentHandle> _queue_content_want_chunk;
|
||||||
|
|
||||||
struct QBitsetEntry {
|
std::atomic_bool _info_builder_dirty {false};
|
||||||
Contact3Handle c;
|
std::mutex _info_builder_queue_mutex;
|
||||||
ObjectHandle o;
|
//struct InfoBuilderEntry {
|
||||||
};
|
//// called on completion on the iterate thread
|
||||||
std::deque<QBitsetEntry> _queue_send_bitset;
|
//// (owning)
|
||||||
|
//std::function<void(void)> fn;
|
||||||
|
//};
|
||||||
|
using InfoBuilderEntry = std::function<void(void)>;
|
||||||
|
std::list<InfoBuilderEntry> _info_builder_queue;
|
||||||
|
|
||||||
// FIXME: workaround missing contact events
|
static uint64_t combineIds(const uint32_t group_number, const uint32_t peer_number);
|
||||||
// only used on peer exit (no, also used to quicken lookups)
|
|
||||||
entt::dense_map<uint64_t, Contact3Handle> _tox_peer_to_contact;
|
|
||||||
|
|
||||||
// reset every iterate; kept here as an allocation optimization
|
void updateMessages(ContentHandle ce);
|
||||||
entt::dense_map<Contact3, size_t> _peer_open_requests;
|
|
||||||
|
|
||||||
void updateMessages(ObjectHandle ce);
|
std::optional<std::pair<uint32_t, uint32_t>> selectPeerForRequest(ContentHandle ce);
|
||||||
|
|
||||||
std::optional<std::pair<uint32_t, uint32_t>> selectPeerForRequest(ObjectHandle ce);
|
|
||||||
|
|
||||||
void queueBitsetSendFull(Contact3Handle c, ObjectHandle o);
|
|
||||||
|
|
||||||
File2I* objGetFile2Write(ObjectHandle o);
|
|
||||||
File2I* objGetFile2Read(ObjectHandle o);
|
|
||||||
|
|
||||||
public: // TODO: config
|
public: // TODO: config
|
||||||
bool _udp_only {false};
|
bool _udp_only {false};
|
||||||
|
|
||||||
size_t _max_concurrent_in {4}; // info only
|
size_t _max_concurrent_in {4};
|
||||||
size_t _max_concurrent_out {4*10}; // HACK: allow "ideal" number for 10 peers
|
size_t _max_concurrent_out {6};
|
||||||
|
// TODO: probably also includes running transfers rn (meh)
|
||||||
|
size_t _max_pending_requests {32}; // per content
|
||||||
|
|
||||||
public:
|
public:
|
||||||
SHA1_NGCFT1(
|
SHA1_NGCFT1(
|
||||||
ObjectStore2& os,
|
|
||||||
Contact3Registry& cr,
|
Contact3Registry& cr,
|
||||||
RegistryMessageModelI& rmm,
|
RegistryMessageModel& rmm,
|
||||||
NGCFT1& nft,
|
NGCFT1& nft,
|
||||||
ToxContactModel2& tcm,
|
ToxContactModel2& tcm
|
||||||
ToxEventProviderI& tep,
|
|
||||||
NGCEXTEventProvider& neep
|
|
||||||
);
|
);
|
||||||
|
|
||||||
float iterate(float delta);
|
void iterate(float delta);
|
||||||
|
|
||||||
void onSendFileHashFinished(ObjectHandle o, Message3Registry* reg_ptr, Contact3 c, uint64_t ts);
|
|
||||||
|
|
||||||
// construct the file part in a partially constructed message
|
|
||||||
ObjectHandle constructFileMessageInPlace(Message3Handle msg, NGCFT1_file_kind file_kind, ByteSpan file_id);
|
|
||||||
|
|
||||||
protected: // rmm events (actions)
|
protected: // rmm events (actions)
|
||||||
bool sendFilePath(const Contact3 c, std::string_view file_name, std::string_view file_path) override;
|
bool onEvent(const Message::Events::MessageUpdated&) override;
|
||||||
|
|
||||||
protected: // os events (actions)
|
|
||||||
bool onEvent(const ObjectStore::Events::ObjectUpdate&) override;
|
|
||||||
|
|
||||||
protected: // events
|
protected: // events
|
||||||
bool onEvent(const Events::NGCFT1_recv_request&) override;
|
bool onEvent(const Events::NGCFT1_recv_request&) override;
|
||||||
@ -127,13 +142,6 @@ class SHA1_NGCFT1 : public ToxEventI, public RegistryMessageModelEventI, public
|
|||||||
bool onEvent(const Events::NGCFT1_send_done&) override;
|
bool onEvent(const Events::NGCFT1_send_done&) override;
|
||||||
bool onEvent(const Events::NGCFT1_recv_message&) override;
|
bool onEvent(const Events::NGCFT1_recv_message&) override;
|
||||||
|
|
||||||
bool onToxEvent(const Tox_Event_Group_Peer_Join* e) override;
|
bool sendFilePath(const Contact3 c, std::string_view file_name, std::string_view file_path) override;
|
||||||
bool onToxEvent(const Tox_Event_Group_Peer_Exit* e) override;
|
|
||||||
|
|
||||||
bool onEvent(const Events::NGCEXT_ft1_have&) override;
|
|
||||||
bool onEvent(const Events::NGCEXT_ft1_bitset&) override;
|
|
||||||
bool onEvent(const Events::NGCEXT_ft1_have_all&) override;
|
|
||||||
|
|
||||||
bool onEvent(const Events::NGCEXT_pc1_announce&) override;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -1,118 +0,0 @@
|
|||||||
#include "./transfer_stats_systems.hpp"
|
|
||||||
|
|
||||||
#include "./components.hpp"
|
|
||||||
#include <solanaceae/object_store/meta_components_file.hpp>
|
|
||||||
|
|
||||||
#include <iostream>
|
|
||||||
|
|
||||||
namespace Systems {
|
|
||||||
|
|
||||||
void transfer_tally_update(ObjectRegistry& os_reg, const float time_now) {
|
|
||||||
std::vector<Object> tally_to_remove;
|
|
||||||
// for each tally -> stats separated
|
|
||||||
os_reg.view<Components::TransferStatsTally>().each([&os_reg, time_now, &tally_to_remove](const auto ov, Components::TransferStatsTally& tally_comp) {
|
|
||||||
// for each peer
|
|
||||||
std::vector<Contact3> to_remove;
|
|
||||||
for (auto&& [peer_c, peer] : tally_comp.tally) {
|
|
||||||
auto& tss = os_reg.get_or_emplace<Components::TransferStatsSeparated>(ov).stats;
|
|
||||||
|
|
||||||
// special logic
|
|
||||||
// if newest older than 2sec
|
|
||||||
// discard
|
|
||||||
|
|
||||||
if (!peer.recently_sent.empty()) {
|
|
||||||
if (time_now - peer.recently_sent.back().time_point >= 2.f) {
|
|
||||||
// clean up stale
|
|
||||||
auto peer_in_stats_it = tss.find(peer_c);
|
|
||||||
if (peer_in_stats_it != tss.end()) {
|
|
||||||
peer_in_stats_it->second.rate_up = 0.f;
|
|
||||||
}
|
|
||||||
|
|
||||||
peer.recently_sent.clear();
|
|
||||||
if (peer.recently_received.empty()) {
|
|
||||||
to_remove.push_back(peer_c);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// else trim too old front
|
|
||||||
peer.trimSent(time_now);
|
|
||||||
|
|
||||||
size_t tally_bytes {0u};
|
|
||||||
for (auto& [time, bytes, accounted] : peer.recently_sent) {
|
|
||||||
if (!accounted) {
|
|
||||||
tss[peer_c].total_up += bytes;
|
|
||||||
accounted = true;
|
|
||||||
}
|
|
||||||
tally_bytes += bytes;
|
|
||||||
}
|
|
||||||
|
|
||||||
tss[peer_c].rate_up = tally_bytes / (time_now - peer.recently_sent.front().time_point + 0.00001f);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!peer.recently_received.empty()) {
|
|
||||||
if (time_now - peer.recently_received.back().time_point >= 2.f) {
|
|
||||||
// clean up stale
|
|
||||||
auto peer_in_stats_it = tss.find(peer_c);
|
|
||||||
if (peer_in_stats_it != tss.end()) {
|
|
||||||
peer_in_stats_it->second.rate_down = 0.f;
|
|
||||||
}
|
|
||||||
|
|
||||||
peer.recently_received.clear();
|
|
||||||
if (peer.recently_sent.empty()) {
|
|
||||||
to_remove.push_back(peer_c);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// else trim too old front
|
|
||||||
peer.trimReceived(time_now);
|
|
||||||
|
|
||||||
size_t tally_bytes {0u};
|
|
||||||
for (auto& [time, bytes, accounted] : peer.recently_received) {
|
|
||||||
if (!accounted) {
|
|
||||||
tss[peer_c].total_down += bytes;
|
|
||||||
accounted = true;
|
|
||||||
}
|
|
||||||
tally_bytes += bytes;
|
|
||||||
}
|
|
||||||
|
|
||||||
tss[peer_c].rate_down = tally_bytes / (time_now - peer.recently_received.front().time_point + 0.00001f);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for (const auto c : to_remove) {
|
|
||||||
tally_comp.tally.erase(c);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (tally_comp.tally.empty()) {
|
|
||||||
tally_to_remove.push_back(ov);
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
// for each stats separated -> stats (total)
|
|
||||||
os_reg.view<Components::TransferStatsSeparated, Components::TransferStatsTally>().each([&os_reg](const auto ov, Components::TransferStatsSeparated& tss_comp, const auto&) {
|
|
||||||
auto& stats = os_reg.get_or_emplace<ObjComp::Ephemeral::File::TransferStats>(ov);
|
|
||||||
stats = {}; // reset
|
|
||||||
|
|
||||||
for (const auto& [_, peer_stats] : tss_comp.stats) {
|
|
||||||
stats.rate_up += peer_stats.rate_up;
|
|
||||||
stats.rate_down += peer_stats.rate_down;
|
|
||||||
stats.total_up += peer_stats.total_up;
|
|
||||||
stats.total_down += peer_stats.total_down;
|
|
||||||
}
|
|
||||||
|
|
||||||
#if 0
|
|
||||||
std::cout << "updated stats:\n"
|
|
||||||
<< " rate u:" << stats.rate_up/1024 << "KiB/s d:" << stats.rate_down/1024 << "KiB/s\n"
|
|
||||||
<< " total u:" << stats.total_up/1024 << "KiB d:" << stats.total_down/1024 << "KiB\n"
|
|
||||||
;
|
|
||||||
#endif
|
|
||||||
});
|
|
||||||
|
|
||||||
|
|
||||||
for (const auto ov : tally_to_remove) {
|
|
||||||
os_reg.remove<Components::TransferStatsTally>(ov);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
} // Systems
|
|
||||||
|
|
@ -1,11 +0,0 @@
|
|||||||
#pragma once
|
|
||||||
|
|
||||||
#include <solanaceae/object_store/object_store.hpp>
|
|
||||||
|
|
||||||
namespace Systems {
|
|
||||||
|
|
||||||
// time only needs to be relative
|
|
||||||
void transfer_tally_update(ObjectRegistry& os_reg, const float time_now);
|
|
||||||
|
|
||||||
} // Systems
|
|
||||||
|
|
@ -1,13 +0,0 @@
|
|||||||
#pragma once
|
|
||||||
|
|
||||||
#include <cstdint>
|
|
||||||
|
|
||||||
inline static uint64_t combine_ids(const uint32_t group_number, const uint32_t peer_number) {
|
|
||||||
return (uint64_t(group_number) << 32) | peer_number;
|
|
||||||
}
|
|
||||||
|
|
||||||
inline static void decompose_ids(const uint64_t combined_id, uint32_t& group_number, uint32_t& peer_number) {
|
|
||||||
group_number = combined_id >> 32;
|
|
||||||
peer_number = combined_id & 0xffffffff;
|
|
||||||
}
|
|
||||||
|
|
@ -1,532 +0,0 @@
|
|||||||
#include "./ngc_hs2_rizzler.hpp"
|
|
||||||
|
|
||||||
#include <solanaceae/contact/components.hpp>
|
|
||||||
#include <solanaceae/tox_contacts/components.hpp>
|
|
||||||
#include <solanaceae/tox_contacts/tox_contact_model2.hpp>
|
|
||||||
#include <solanaceae/message3/contact_components.hpp>
|
|
||||||
#include <solanaceae/message3/registry_message_model.hpp>
|
|
||||||
#include <solanaceae/message3/components.hpp>
|
|
||||||
#include <solanaceae/tox_messages/msg_components.hpp>
|
|
||||||
#include <solanaceae/ngc_ft1/ngcft1_file_kind.hpp>
|
|
||||||
|
|
||||||
// TODO: move somewhere else?
|
|
||||||
#include <solanaceae/ngc_ft1_sha1/util.hpp>
|
|
||||||
|
|
||||||
#include <solanaceae/util/span.hpp>
|
|
||||||
|
|
||||||
#include <entt/entity/entity.hpp>
|
|
||||||
|
|
||||||
#include <nlohmann/json.hpp>
|
|
||||||
|
|
||||||
#include "./serl.hpp"
|
|
||||||
|
|
||||||
#include <cstdint>
|
|
||||||
#include <deque>
|
|
||||||
#include <cstring>
|
|
||||||
|
|
||||||
#include <iostream>
|
|
||||||
|
|
||||||
// TODO: move to own file
|
|
||||||
namespace Components {
|
|
||||||
struct RequestedChatLogs {
|
|
||||||
struct Entry {
|
|
||||||
uint64_t ts_start;
|
|
||||||
uint64_t ts_end;
|
|
||||||
//std::vector<uint8_t> fid; // ?
|
|
||||||
};
|
|
||||||
std::deque<Entry> list;
|
|
||||||
bool contains(uint64_t ts_start, uint64_t ts_end);
|
|
||||||
void addRequest(uint64_t ts_start, uint64_t ts_end);
|
|
||||||
};
|
|
||||||
|
|
||||||
struct RunningChatLogs {
|
|
||||||
struct Entry {
|
|
||||||
uint64_t ts_start;
|
|
||||||
uint64_t ts_end;
|
|
||||||
std::vector<uint8_t> data;
|
|
||||||
float last_activity {0.f};
|
|
||||||
};
|
|
||||||
// list of transfers
|
|
||||||
entt::dense_map<uint8_t, Entry> list;
|
|
||||||
};
|
|
||||||
|
|
||||||
bool RequestedChatLogs::contains(uint64_t ts_start, uint64_t ts_end) {
|
|
||||||
auto it = std::find_if(list.cbegin(), list.cend(), [ts_start, ts_end](const auto& value) {
|
|
||||||
return value.ts_start == ts_start && value.ts_end == ts_end;
|
|
||||||
});
|
|
||||||
return it != list.cend();
|
|
||||||
}
|
|
||||||
|
|
||||||
void RequestedChatLogs::addRequest(uint64_t ts_start, uint64_t ts_end) {
|
|
||||||
if (contains(ts_start, ts_end)) {
|
|
||||||
return; // pre existing
|
|
||||||
}
|
|
||||||
list.push_back(Entry{ts_start, ts_end});
|
|
||||||
}
|
|
||||||
|
|
||||||
} // Components
|
|
||||||
|
|
||||||
// TODO: move to contact reg?
|
|
||||||
static Contact3 findContactByID(Contact3Registry& cr, const std::vector<uint8_t>& id) {
|
|
||||||
// TODO: id lookup table, this is very inefficent
|
|
||||||
for (const auto& [c_it, id_it] : cr.view<Contact::Components::ID>().each()) {
|
|
||||||
if (id == id_it.data) {
|
|
||||||
return c_it;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return entt::null;
|
|
||||||
}
|
|
||||||
|
|
||||||
NGCHS2Rizzler::NGCHS2Rizzler(
|
|
||||||
Contact3Registry& cr,
|
|
||||||
RegistryMessageModelI& rmm,
|
|
||||||
ToxContactModel2& tcm,
|
|
||||||
NGCFT1& nft,
|
|
||||||
ToxEventProviderI& tep,
|
|
||||||
SHA1_NGCFT1& sha1_nft
|
|
||||||
) :
|
|
||||||
_cr(cr),
|
|
||||||
_rmm(rmm),
|
|
||||||
_tcm(tcm),
|
|
||||||
_nft(nft),
|
|
||||||
_nftep_sr(_nft.newSubRef(this)),
|
|
||||||
_tep_sr(tep.newSubRef(this)),
|
|
||||||
_sha1_nft(sha1_nft)
|
|
||||||
{
|
|
||||||
_nftep_sr
|
|
||||||
.subscribe(NGCFT1_Event::recv_init)
|
|
||||||
.subscribe(NGCFT1_Event::recv_data)
|
|
||||||
.subscribe(NGCFT1_Event::recv_done)
|
|
||||||
;
|
|
||||||
_tep_sr
|
|
||||||
.subscribe(Tox_Event_Type::TOX_EVENT_GROUP_PEER_JOIN)
|
|
||||||
;
|
|
||||||
}
|
|
||||||
|
|
||||||
NGCHS2Rizzler::~NGCHS2Rizzler(void) {
|
|
||||||
}
|
|
||||||
|
|
||||||
float NGCHS2Rizzler::iterate(float delta) {
|
|
||||||
for (auto it = _request_queue.begin(); it != _request_queue.end();) {
|
|
||||||
it->second.timer += delta;
|
|
||||||
|
|
||||||
if (it->second.timer < it->second.delay) {
|
|
||||||
it++;
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
const Contact3Handle c {_cr, it->first};
|
|
||||||
|
|
||||||
if (!c.all_of<Contact::Components::ToxGroupPeerEphemeral>()) {
|
|
||||||
// peer nolonger online
|
|
||||||
it = _request_queue.erase(it);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
const auto [group_number, peer_number] = c.get<Contact::Components::ToxGroupPeerEphemeral>();
|
|
||||||
|
|
||||||
// now in sec
|
|
||||||
const uint64_t ts_now = Message::getTimeMS()/1000;
|
|
||||||
|
|
||||||
const uint64_t ts_start = ts_now;
|
|
||||||
const uint64_t ts_end = ts_now-(60*60*48);
|
|
||||||
|
|
||||||
if (sendRequest(group_number, peer_number, ts_start, ts_end)) {
|
|
||||||
// TODO: requeue
|
|
||||||
// TODO: segment
|
|
||||||
// TODO: dont request already received ranges
|
|
||||||
|
|
||||||
//// on success, requeue with longer delay (minutes)
|
|
||||||
|
|
||||||
//it->second.timer = 0.f;
|
|
||||||
//it->second.delay = _delay_next_request_min + _rng_dist(_rng)*_delay_next_request_add;
|
|
||||||
|
|
||||||
//// double the delay for overlap (9m-15m)
|
|
||||||
//// TODO: finetune
|
|
||||||
//it->second.sync_delta = uint8_t((it->second.delay/60.f)*2.f) + 1;
|
|
||||||
|
|
||||||
//std::cout << "ZOX #### requeued request in " << it->second.delay << "s\n";
|
|
||||||
|
|
||||||
auto& rcl = c.get_or_emplace<Components::RequestedChatLogs>();
|
|
||||||
rcl.addRequest(ts_start, ts_end);
|
|
||||||
} else {
|
|
||||||
// on failure, assume disconnected
|
|
||||||
}
|
|
||||||
|
|
||||||
// remove from request queue
|
|
||||||
it = _request_queue.erase(it);
|
|
||||||
}
|
|
||||||
|
|
||||||
return 1000.f;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool NGCHS2Rizzler::sendRequest(
|
|
||||||
uint32_t group_number, uint32_t peer_number,
|
|
||||||
uint64_t ts_start, uint64_t ts_end
|
|
||||||
) {
|
|
||||||
std::cout << "NGCHS2Rizzler: sending request to " << group_number << ":" << peer_number << " (" << ts_start << "," << ts_end << ")\n";
|
|
||||||
|
|
||||||
// build fid
|
|
||||||
std::vector<uint8_t> fid;
|
|
||||||
fid.reserve(sizeof(uint64_t)+sizeof(uint64_t));
|
|
||||||
|
|
||||||
serlSimpleType(fid, ts_start);
|
|
||||||
serlSimpleType(fid, ts_end);
|
|
||||||
|
|
||||||
assert(fid.size() == sizeof(uint64_t)+sizeof(uint64_t));
|
|
||||||
|
|
||||||
return _nft.NGC_FT1_send_request_private(
|
|
||||||
group_number, peer_number,
|
|
||||||
(uint32_t)NGCFT1_file_kind::HS2_RANGE_TIME_MSGPACK,
|
|
||||||
fid.data(), fid.size() // fid
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
void NGCHS2Rizzler::handleMsgPack(Contact3Handle sync_by_c, const std::vector<uint8_t>& data) {
|
|
||||||
assert(sync_by_c);
|
|
||||||
|
|
||||||
auto* reg_ptr = _rmm.get(sync_by_c);
|
|
||||||
if (reg_ptr == nullptr) {
|
|
||||||
std::cerr << "NGCHS2Rizzler error: group without msg reg\n";
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
Message3Registry& reg = *reg_ptr;
|
|
||||||
|
|
||||||
uint64_t now_ts = Message::getTimeMS();
|
|
||||||
|
|
||||||
std::cout << "NGCHS2Rizzler: start parsing msgpack chatlog from " << entt::to_integral(sync_by_c.entity()) << "\n";
|
|
||||||
try {
|
|
||||||
const auto j = nlohmann::json::from_msgpack(data);
|
|
||||||
if (!j.is_array()) {
|
|
||||||
std::cerr << "NGCHS2Rizzler error: chatlog not array\n";
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
std::cout << "NGCHS2Rizzler: chatlog has " << j.size() << " entries\n";
|
|
||||||
|
|
||||||
for (const auto j_entry : j) {
|
|
||||||
try {
|
|
||||||
// deci seconds
|
|
||||||
uint64_t ts = j_entry.at("ts");
|
|
||||||
// TODO: check against ts range
|
|
||||||
|
|
||||||
ts *= 100; // convert to ms
|
|
||||||
|
|
||||||
const auto& j_ppk = j_entry.at("ppk");
|
|
||||||
|
|
||||||
uint32_t mid = j_entry.at("mid");
|
|
||||||
|
|
||||||
if (
|
|
||||||
!(j_entry.count("text")) &&
|
|
||||||
!(j_entry.count("fkind") && j_entry.count("fid"))
|
|
||||||
) {
|
|
||||||
std::cerr << "NGCHS2Rizzler error: msg neither contains text nor file fields\n";
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
Contact3 from_c{entt::null};
|
|
||||||
{ // from_c
|
|
||||||
std::vector<uint8_t> id;
|
|
||||||
if (j_ppk.is_binary()) {
|
|
||||||
id = j_ppk.get_binary();
|
|
||||||
} else {
|
|
||||||
j_ppk.at("bytes").get_to(id);
|
|
||||||
}
|
|
||||||
|
|
||||||
from_c = findContactByID(_cr, id);
|
|
||||||
|
|
||||||
if (!_cr.valid(from_c)) {
|
|
||||||
// create sparse contact with id only
|
|
||||||
from_c = _cr.create();
|
|
||||||
_cr.emplace_or_replace<Contact::Components::ID>(from_c, id);
|
|
||||||
|
|
||||||
// TODO: only if public message
|
|
||||||
_cr.emplace_or_replace<Contact::Components::Parent>(from_c, sync_by_c.get<Contact::Components::Parent>().parent);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: from_c perm check
|
|
||||||
// hard to do without numbers
|
|
||||||
|
|
||||||
Message3Handle new_real_msg{reg, reg.create()};
|
|
||||||
|
|
||||||
new_real_msg.emplace<Message::Components::Timestamp>(ts); // reactive?
|
|
||||||
|
|
||||||
new_real_msg.emplace<Message::Components::ContactFrom>(from_c);
|
|
||||||
new_real_msg.emplace<Message::Components::ContactTo>(sync_by_c.get<Contact::Components::Parent>().parent);
|
|
||||||
|
|
||||||
new_real_msg.emplace<Message::Components::ToxGroupMessageID>(mid);
|
|
||||||
|
|
||||||
if (j_entry.contains("action") && static_cast<bool>(j_entry.at("action"))) {
|
|
||||||
new_real_msg.emplace<Message::Components::TagMessageIsAction>();
|
|
||||||
}
|
|
||||||
|
|
||||||
if (j_entry.contains("text")) {
|
|
||||||
const std::string& text = j_entry.at("text");
|
|
||||||
|
|
||||||
new_real_msg.emplace<Message::Components::MessageText>(text);
|
|
||||||
|
|
||||||
#if 0
|
|
||||||
std::cout
|
|
||||||
<< "msg ts:" << ts
|
|
||||||
//<< " ppk:" << j_ppk
|
|
||||||
<< " mid:" << mid
|
|
||||||
<< " type:" << type
|
|
||||||
<< " text:" << text
|
|
||||||
<< "\n"
|
|
||||||
;
|
|
||||||
#endif
|
|
||||||
} else if (j_entry.contains("fkind") && j_entry.contains("fid")) {
|
|
||||||
uint32_t fkind = j_entry.at("fkind");
|
|
||||||
|
|
||||||
const auto& j_fid = j_entry.at("fid");
|
|
||||||
|
|
||||||
std::vector<uint8_t> fid;
|
|
||||||
if (j_fid.is_binary()) {
|
|
||||||
fid = j_fid.get_binary();
|
|
||||||
} else {
|
|
||||||
j_fid.at("bytes").get_to(fid);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (fkind == (uint32_t)NGCFT1_file_kind::HASH_SHA1_INFO) {
|
|
||||||
_sha1_nft.constructFileMessageInPlace(
|
|
||||||
new_real_msg,
|
|
||||||
NGCFT1_file_kind::HASH_SHA1_INFO,
|
|
||||||
ByteSpan{fid}
|
|
||||||
);
|
|
||||||
} else {
|
|
||||||
std::cerr << "NGCHS2Rizzler error: unknown file kind " << fkind << "\n";
|
|
||||||
}
|
|
||||||
|
|
||||||
#if 0
|
|
||||||
std::cout
|
|
||||||
<< "msg ts:" << ts
|
|
||||||
//<< " ppk:" << j_ppk
|
|
||||||
<< " mid:" << mid
|
|
||||||
<< " type:" << type
|
|
||||||
<< " fkind:" << fkind
|
|
||||||
<< " fid:" << j_fid
|
|
||||||
<< "\n"
|
|
||||||
;
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
// now check against pre existing
|
|
||||||
// TODO: dont do this afterwards
|
|
||||||
Message3Handle dup_msg{};
|
|
||||||
{ // check preexisting
|
|
||||||
// get comparator from contact
|
|
||||||
const Contact3Handle reg_c {_cr, reg.ctx().get<Contact3>()};
|
|
||||||
if (reg_c.all_of<Contact::Components::MessageIsSame>()) {
|
|
||||||
auto& comp = reg_c.get<Contact::Components::MessageIsSame>().comp;
|
|
||||||
// walking EVERY existing message OOF
|
|
||||||
// this needs optimizing
|
|
||||||
for (const Message3 other_msg : reg.view<Message::Components::Timestamp, Message::Components::ContactFrom, Message::Components::ContactTo>()) {
|
|
||||||
if (other_msg == new_real_msg) {
|
|
||||||
continue; // skip self
|
|
||||||
}
|
|
||||||
|
|
||||||
if (comp({reg, other_msg}, new_real_msg)) {
|
|
||||||
// dup
|
|
||||||
dup_msg = {reg, other_msg};
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} // else, default heuristic??
|
|
||||||
}
|
|
||||||
|
|
||||||
Message3Handle new_msg = new_real_msg;
|
|
||||||
|
|
||||||
if (dup_msg) {
|
|
||||||
// we leak objects here (if file)
|
|
||||||
reg.destroy(new_msg);
|
|
||||||
new_msg = dup_msg;
|
|
||||||
}
|
|
||||||
|
|
||||||
{ // by whom
|
|
||||||
auto& synced_by = new_msg.get_or_emplace<Message::Components::SyncedBy>().ts;
|
|
||||||
// dont overwrite
|
|
||||||
synced_by.try_emplace(sync_by_c, now_ts);
|
|
||||||
}
|
|
||||||
|
|
||||||
{ // now we also know they got the message
|
|
||||||
auto& list = new_msg.get_or_emplace<Message::Components::ReceivedBy>().ts;
|
|
||||||
// dont overwrite
|
|
||||||
list.try_emplace(sync_by_c, now_ts);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (new_msg == dup_msg) {
|
|
||||||
// TODO: maybe update a timestamp?
|
|
||||||
_rmm.throwEventUpdate(reg, new_msg);
|
|
||||||
} else {
|
|
||||||
// pure new msg
|
|
||||||
|
|
||||||
new_msg.emplace<Message::Components::TimestampProcessed>(now_ts);
|
|
||||||
new_msg.emplace<Message::Components::TimestampWritten>(ts);
|
|
||||||
|
|
||||||
new_msg.emplace<Message::Components::TagUnread>();
|
|
||||||
_rmm.throwEventConstruct(reg, new_msg);
|
|
||||||
}
|
|
||||||
} catch (...) {
|
|
||||||
std::cerr << "NGCHS2Rizzler error: parsing entry '" << j_entry.dump() << "'\n";
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} catch (...) {
|
|
||||||
std::cerr << "NGCHS2Rizzler error: failed parsing data as msgpack\n";
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
bool NGCHS2Rizzler::onEvent(const Events::NGCFT1_recv_init& e) {
|
|
||||||
if (e.file_kind != NGCFT1_file_kind::HS2_RANGE_TIME_MSGPACK) {
|
|
||||||
return false; // not for us
|
|
||||||
}
|
|
||||||
|
|
||||||
std::cout << "NGCHS2Rizzler: recv_init " << e.group_number << ":" << e.peer_number << "." << (int)e.transfer_id << "\n";
|
|
||||||
|
|
||||||
auto c = _tcm.getContactGroupPeer(e.group_number, e.peer_number);
|
|
||||||
if (!c) {
|
|
||||||
return false; // huh?
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!c.all_of<Components::RequestedChatLogs>()) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
// parse start end
|
|
||||||
// TODO: extract
|
|
||||||
ByteSpan fid{e.file_id, e.file_id_size};
|
|
||||||
// TODO: better size check
|
|
||||||
if (fid.size != sizeof(uint64_t)+sizeof(uint64_t)) {
|
|
||||||
std::cerr << "NGCHS2S error: range not lange enough\n";
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
// seconds
|
|
||||||
uint64_t ts_start{0};
|
|
||||||
uint64_t ts_end{0};
|
|
||||||
|
|
||||||
// parse
|
|
||||||
try {
|
|
||||||
ByteSpan ts_start_bytes{fid.ptr, sizeof(uint64_t)};
|
|
||||||
ts_start = deserlTS(ts_start_bytes);
|
|
||||||
|
|
||||||
ByteSpan ts_end_bytes{ts_start_bytes.ptr+ts_start_bytes.size, sizeof(uint64_t)};
|
|
||||||
ts_end = deserlTS(ts_end_bytes);
|
|
||||||
} catch (...) {
|
|
||||||
std::cerr << "NGCHS2R error: failed to parse range\n";
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (ts_end >= ts_start) {
|
|
||||||
std::cerr << "NGCHS2R error: end not < start\n";
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
auto& reqcl = c.get<Components::RequestedChatLogs>();
|
|
||||||
|
|
||||||
if (!reqcl.contains(ts_start, ts_end)) {
|
|
||||||
// warn?
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
auto& rnncl = c.get_or_emplace<Components::RunningChatLogs>();
|
|
||||||
_tox_peer_to_contact[combine_ids(e.group_number, e.peer_number)] = c; // cache
|
|
||||||
|
|
||||||
auto& transfer = rnncl.list[e.transfer_id];
|
|
||||||
transfer.data.reserve(e.file_size); // danger?
|
|
||||||
transfer.last_activity = 0.f;
|
|
||||||
transfer.ts_start = ts_start;
|
|
||||||
transfer.ts_end = ts_end;
|
|
||||||
|
|
||||||
e.accept = true;
|
|
||||||
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool NGCHS2Rizzler::onEvent(const Events::NGCFT1_recv_data& e) {
|
|
||||||
auto c = _tcm.getContactGroupPeer(e.group_number, e.peer_number);
|
|
||||||
if (!c) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!c.all_of<Components::RunningChatLogs>()) {
|
|
||||||
return false; // not ours
|
|
||||||
}
|
|
||||||
|
|
||||||
auto& rnncl = c.get<Components::RunningChatLogs>();
|
|
||||||
if (!rnncl.list.count(e.transfer_id)) {
|
|
||||||
return false; // not ours
|
|
||||||
}
|
|
||||||
|
|
||||||
std::cout << "NGCHS2Rizzler: recv_data " << e.group_number << ":" << e.peer_number << "." << (int)e.transfer_id << " " << e.data_size << "@" << e.data_offset << "\n";
|
|
||||||
|
|
||||||
auto& transfer = rnncl.list.at(e.transfer_id);
|
|
||||||
transfer.data.resize(e.data_offset+e.data_size);
|
|
||||||
std::memcpy(&transfer.data[e.data_offset], e.data, e.data_size);
|
|
||||||
|
|
||||||
transfer.last_activity = 0.f;
|
|
||||||
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool NGCHS2Rizzler::onEvent(const Events::NGCFT1_recv_done& e) {
|
|
||||||
// FIXME: this does not work, tcm just delteded the relation ship
|
|
||||||
//auto c = _tcm.getContactGroupPeer(e.group_number, e.peer_number);
|
|
||||||
//if (!c) {
|
|
||||||
// return false;
|
|
||||||
//}
|
|
||||||
const auto c_it = _tox_peer_to_contact.find(combine_ids(e.group_number, e.peer_number));
|
|
||||||
if (c_it == _tox_peer_to_contact.end()) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
auto c = c_it->second;
|
|
||||||
if (!static_cast<bool>(c)) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!c.all_of<Components::RunningChatLogs>()) {
|
|
||||||
return false; // not ours
|
|
||||||
}
|
|
||||||
|
|
||||||
auto& rnncl = c.get<Components::RunningChatLogs>();
|
|
||||||
if (!rnncl.list.count(e.transfer_id)) {
|
|
||||||
return false; // not ours
|
|
||||||
}
|
|
||||||
|
|
||||||
std::cout << "NGCHS2Rizzler: recv_done " << e.group_number << ":" << e.peer_number << "." << (int)e.transfer_id << "\n";
|
|
||||||
{
|
|
||||||
auto& transfer = rnncl.list.at(e.transfer_id);
|
|
||||||
// TODO: done might mean failed, so we might be parsing bs here
|
|
||||||
|
|
||||||
// use data
|
|
||||||
// TODO: move out of packet handler
|
|
||||||
handleMsgPack(c, transfer.data);
|
|
||||||
}
|
|
||||||
rnncl.list.erase(e.transfer_id);
|
|
||||||
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool NGCHS2Rizzler::onToxEvent(const Tox_Event_Group_Peer_Join* e) {
|
|
||||||
const auto group_number = tox_event_group_peer_join_get_group_number(e);
|
|
||||||
const auto peer_number = tox_event_group_peer_join_get_peer_id(e);
|
|
||||||
|
|
||||||
const auto c = _tcm.getContactGroupPeer(group_number, peer_number);
|
|
||||||
|
|
||||||
if (!c) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!_request_queue.count(c)) {
|
|
||||||
_request_queue[c] = {
|
|
||||||
_delay_before_first_request_min + _rng_dist(_rng)*_delay_before_first_request_add,
|
|
||||||
0.f,
|
|
||||||
0,
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
@ -1,73 +0,0 @@
|
|||||||
#pragma once
|
|
||||||
|
|
||||||
#include <solanaceae/contact/contact_model3.hpp>
|
|
||||||
#include <solanaceae/toxcore/tox_event_interface.hpp>
|
|
||||||
|
|
||||||
#include <solanaceae/ngc_ft1/ngcft1.hpp>
|
|
||||||
#include <solanaceae/ngc_ft1_sha1/sha1_ngcft1.hpp>
|
|
||||||
|
|
||||||
// fwd
|
|
||||||
class ToxContactModel2;
|
|
||||||
class RegistryMessageModelI;
|
|
||||||
|
|
||||||
|
|
||||||
class NGCHS2Rizzler : public ToxEventI, public NGCFT1EventI {
|
|
||||||
Contact3Registry& _cr;
|
|
||||||
RegistryMessageModelI& _rmm;
|
|
||||||
ToxContactModel2& _tcm;
|
|
||||||
NGCFT1& _nft;
|
|
||||||
NGCFT1EventProviderI::SubscriptionReference _nftep_sr;
|
|
||||||
ToxEventProviderI::SubscriptionReference _tep_sr;
|
|
||||||
SHA1_NGCFT1& _sha1_nft;
|
|
||||||
|
|
||||||
// 5s-6s
|
|
||||||
const float _delay_before_first_request_min {5.f};
|
|
||||||
const float _delay_before_first_request_add {1.f};
|
|
||||||
|
|
||||||
std::uniform_real_distribution<float> _rng_dist {0.0f, 1.0f};
|
|
||||||
std::minstd_rand _rng;
|
|
||||||
|
|
||||||
struct RequestQueueInfo {
|
|
||||||
float delay; // const
|
|
||||||
float timer;
|
|
||||||
uint64_t sync_delta; //?
|
|
||||||
};
|
|
||||||
// request queue
|
|
||||||
// c -> delay, timer
|
|
||||||
std::map<Contact3, RequestQueueInfo> _request_queue;
|
|
||||||
|
|
||||||
// FIXME: workaround missing contact events
|
|
||||||
// only used on peer exit (no, also used to quicken lookups)
|
|
||||||
entt::dense_map<uint64_t, Contact3Handle> _tox_peer_to_contact;
|
|
||||||
|
|
||||||
public:
|
|
||||||
NGCHS2Rizzler(
|
|
||||||
Contact3Registry& cr,
|
|
||||||
RegistryMessageModelI& rmm,
|
|
||||||
ToxContactModel2& tcm,
|
|
||||||
NGCFT1& nft,
|
|
||||||
ToxEventProviderI& tep,
|
|
||||||
SHA1_NGCFT1& sha1_nft
|
|
||||||
);
|
|
||||||
|
|
||||||
~NGCHS2Rizzler(void);
|
|
||||||
|
|
||||||
float iterate(float delta);
|
|
||||||
|
|
||||||
protected:
|
|
||||||
bool sendRequest(
|
|
||||||
uint32_t group_number, uint32_t peer_number,
|
|
||||||
uint64_t ts_start, uint64_t ts_end
|
|
||||||
);
|
|
||||||
|
|
||||||
void handleMsgPack(Contact3Handle c, const std::vector<uint8_t>& data);
|
|
||||||
|
|
||||||
protected:
|
|
||||||
bool onEvent(const Events::NGCFT1_recv_init&) override;
|
|
||||||
bool onEvent(const Events::NGCFT1_recv_data&) override;
|
|
||||||
bool onEvent(const Events::NGCFT1_recv_done&) override;
|
|
||||||
|
|
||||||
protected:
|
|
||||||
bool onToxEvent(const Tox_Event_Group_Peer_Join* e) override;
|
|
||||||
};
|
|
||||||
|
|
@ -1,460 +0,0 @@
|
|||||||
#include "./ngc_hs2_sigma.hpp"
|
|
||||||
|
|
||||||
#include <solanaceae/util/span.hpp>
|
|
||||||
|
|
||||||
#include <solanaceae/tox_contacts/tox_contact_model2.hpp>
|
|
||||||
|
|
||||||
#include <solanaceae/contact/components.hpp>
|
|
||||||
#include <solanaceae/tox_contacts/components.hpp>
|
|
||||||
#include <solanaceae/message3/components.hpp>
|
|
||||||
#include <solanaceae/tox_messages/msg_components.hpp>
|
|
||||||
|
|
||||||
//#include <solanaceae/tox_messages/obj_components.hpp>
|
|
||||||
// TODO: this is kinda bad, needs improvement
|
|
||||||
// use tox fileid/filekind instead !
|
|
||||||
#include <solanaceae/ngc_ft1/ngcft1_file_kind.hpp>
|
|
||||||
#include <solanaceae/ngc_ft1_sha1/components.hpp>
|
|
||||||
|
|
||||||
// TODO: move somewhere else?
|
|
||||||
#include <solanaceae/ngc_ft1_sha1/util.hpp>
|
|
||||||
|
|
||||||
#include <nlohmann/json.hpp>
|
|
||||||
|
|
||||||
#include "./serl.hpp"
|
|
||||||
|
|
||||||
#include "./ts_find_start.hpp"
|
|
||||||
|
|
||||||
#include <iostream>
|
|
||||||
|
|
||||||
// https://www.youtube.com/watch?v=AdAqsgga3qo
|
|
||||||
|
|
||||||
// TODO: move to own file
|
|
||||||
namespace Components {
|
|
||||||
|
|
||||||
struct IncommingTimeRangeRequestQueue {
|
|
||||||
struct Entry {
|
|
||||||
TimeRangeRequest ir;
|
|
||||||
std::vector<uint8_t> fid;
|
|
||||||
};
|
|
||||||
std::deque<Entry> _queue;
|
|
||||||
|
|
||||||
// we should remove/notadd queued requests
|
|
||||||
// that are subsets of same or larger ranges
|
|
||||||
void queueRequest(const TimeRangeRequest& new_request, const ByteSpan fid);
|
|
||||||
};
|
|
||||||
|
|
||||||
struct IncommingTimeRangeRequestRunning {
|
|
||||||
struct Entry {
|
|
||||||
TimeRangeRequest ir;
|
|
||||||
std::vector<uint8_t> data; // transfer data in memory
|
|
||||||
float last_activity {0.f};
|
|
||||||
};
|
|
||||||
entt::dense_map<uint8_t, Entry> _list;
|
|
||||||
};
|
|
||||||
|
|
||||||
void IncommingTimeRangeRequestQueue::queueRequest(const TimeRangeRequest& new_request, const ByteSpan fid) {
|
|
||||||
// TODO: do more than exact dedupe
|
|
||||||
for (const auto& [time_range, _] : _queue) {
|
|
||||||
if (time_range.ts_start == new_request.ts_start && time_range.ts_end == new_request.ts_end) {
|
|
||||||
return; // already enqueued
|
|
||||||
// TODO: what about fid?
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
_queue.emplace_back(Entry{
|
|
||||||
new_request,
|
|
||||||
std::vector<uint8_t>{fid.cbegin(), fid.cend()}
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
} // Components
|
|
||||||
|
|
||||||
|
|
||||||
NGCHS2Sigma::NGCHS2Sigma(
|
|
||||||
Contact3Registry& cr,
|
|
||||||
RegistryMessageModelI& rmm,
|
|
||||||
ToxContactModel2& tcm,
|
|
||||||
NGCFT1& nft
|
|
||||||
) :
|
|
||||||
_cr(cr),
|
|
||||||
_rmm(rmm),
|
|
||||||
_tcm(tcm),
|
|
||||||
_nft(nft),
|
|
||||||
_nftep_sr(_nft.newSubRef(this))
|
|
||||||
{
|
|
||||||
_nftep_sr
|
|
||||||
.subscribe(NGCFT1_Event::recv_request)
|
|
||||||
.subscribe(NGCFT1_Event::send_data)
|
|
||||||
.subscribe(NGCFT1_Event::send_done)
|
|
||||||
;
|
|
||||||
}
|
|
||||||
|
|
||||||
NGCHS2Sigma::~NGCHS2Sigma(void) {
|
|
||||||
}
|
|
||||||
|
|
||||||
float NGCHS2Sigma::iterate(float delta) {
|
|
||||||
// limit how often we update here (new fts usually)
|
|
||||||
if (_iterate_heat > 0.f) {
|
|
||||||
_iterate_heat -= delta;
|
|
||||||
return 1000.f; // return heat?
|
|
||||||
} else {
|
|
||||||
_iterate_heat = _iterate_cooldown;
|
|
||||||
}
|
|
||||||
|
|
||||||
// work request queue
|
|
||||||
// check if already running, discard
|
|
||||||
|
|
||||||
auto fn_iirq = [this](auto&& view) {
|
|
||||||
for (auto&& [cv, iirq] : view.each()) {
|
|
||||||
if (iirq._queue.empty()) {
|
|
||||||
// TODO: remove comp?
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
Contact3Handle c{_cr, cv};
|
|
||||||
auto& iirr = c.get_or_emplace<Components::IncommingTimeRangeRequestRunning>();
|
|
||||||
|
|
||||||
// dedup queued from running
|
|
||||||
|
|
||||||
if (iirr._list.size() >= _max_parallel_per_peer) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
// new ft here
|
|
||||||
// TODO: loop? nah just 1 per tick is enough
|
|
||||||
const auto request_entry = iirq._queue.front(); // copy
|
|
||||||
assert(!request_entry.fid.empty());
|
|
||||||
|
|
||||||
if (!c.all_of<Contact::Components::Parent>()) {
|
|
||||||
iirq._queue.pop_front();
|
|
||||||
continue; // how
|
|
||||||
}
|
|
||||||
const Contact3Handle group_c = {*c.registry(), c.get<Contact::Components::Parent>().parent};
|
|
||||||
if (!c.all_of<Contact::Components::ToxGroupPeerEphemeral>()) {
|
|
||||||
iirq._queue.pop_front();
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
const auto [group_number, peer_number] = c.get<Contact::Components::ToxGroupPeerEphemeral>();
|
|
||||||
_tox_peer_to_contact[combine_ids(group_number, peer_number)] = c; // cache
|
|
||||||
|
|
||||||
// TODO: check allowed range here
|
|
||||||
//_max_time_into_past_default
|
|
||||||
|
|
||||||
// potentially heavy op
|
|
||||||
auto data = buildChatLogFileRange(group_c, request_entry.ir.ts_start, request_entry.ir.ts_end);
|
|
||||||
|
|
||||||
uint8_t transfer_id {0};
|
|
||||||
if (!_nft.NGC_FT1_send_init_private(
|
|
||||||
group_number, peer_number,
|
|
||||||
(uint32_t)NGCFT1_file_kind::HS2_RANGE_TIME_MSGPACK,
|
|
||||||
request_entry.fid.data(), request_entry.fid.size(),
|
|
||||||
data.size(),
|
|
||||||
&transfer_id,
|
|
||||||
true // can_compress (does nothing rn)
|
|
||||||
)) {
|
|
||||||
// sending failed, we do not pop but wait for next iterate
|
|
||||||
// TODO: cache data
|
|
||||||
// TODO: fail counter
|
|
||||||
// actually, fail probably means offline, so delete?
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
assert(iirr._list.count(transfer_id) == 0);
|
|
||||||
iirr._list[transfer_id] = {request_entry.ir, data};
|
|
||||||
|
|
||||||
iirq._queue.pop_front();
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
// first handle range requests on weak self
|
|
||||||
fn_iirq(_cr.view<Components::IncommingTimeRangeRequestQueue, Contact::Components::TagSelfWeak>());
|
|
||||||
|
|
||||||
// we could stop here, if too much is already running
|
|
||||||
|
|
||||||
// then range on others
|
|
||||||
fn_iirq(_cr.view<Components::IncommingTimeRangeRequestQueue>(entt::exclude_t<Contact::Components::TagSelfWeak>{}));
|
|
||||||
|
|
||||||
_cr.view<Components::IncommingTimeRangeRequestRunning>().each(
|
|
||||||
[delta](const auto cv, Components::IncommingTimeRangeRequestRunning& irr) {
|
|
||||||
std::vector<uint8_t> to_remove;
|
|
||||||
for (auto&& [ft_id, entry] : irr._list) {
|
|
||||||
entry.last_activity += delta;
|
|
||||||
if (entry.last_activity >= 60.f) {
|
|
||||||
to_remove.push_back(ft_id);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for (const auto it : to_remove) {
|
|
||||||
std::cout << "NGCHS2Sigma warning: timed out ." << (int)it << "\n";
|
|
||||||
// TODO: need a way to tell ft?
|
|
||||||
irr._list.erase(it);
|
|
||||||
// technically we are not supposed to timeout and instead rely on the done event
|
|
||||||
}
|
|
||||||
}
|
|
||||||
);
|
|
||||||
|
|
||||||
return 1000.f;
|
|
||||||
}
|
|
||||||
|
|
||||||
void NGCHS2Sigma::handleTimeRange(Contact3Handle c, const Events::NGCFT1_recv_request& e) {
|
|
||||||
ByteSpan fid{e.file_id, e.file_id_size};
|
|
||||||
// TODO: better size check
|
|
||||||
if (fid.size != sizeof(uint64_t)+sizeof(uint64_t)) {
|
|
||||||
std::cerr << "NGCHS2S error: range not lange enough\n";
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
// seconds
|
|
||||||
uint64_t ts_start{0};
|
|
||||||
uint64_t ts_end{0};
|
|
||||||
|
|
||||||
// parse
|
|
||||||
try {
|
|
||||||
ByteSpan ts_start_bytes{fid.ptr, sizeof(uint64_t)};
|
|
||||||
ts_start = deserlTS(ts_start_bytes);
|
|
||||||
|
|
||||||
ByteSpan ts_end_bytes{ts_start_bytes.ptr+ts_start_bytes.size, sizeof(uint64_t)};
|
|
||||||
ts_end = deserlTS(ts_end_bytes);
|
|
||||||
} catch (...) {
|
|
||||||
std::cerr << "NGCHS2S error: failed to parse range\n";
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (ts_end >= ts_start) {
|
|
||||||
std::cerr << "NGCHS2S error: end not < start\n";
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
// dedupe insert into queue
|
|
||||||
// how much overlap do we allow?
|
|
||||||
c.get_or_emplace<Components::IncommingTimeRangeRequestQueue>().queueRequest(
|
|
||||||
{ts_start, ts_end},
|
|
||||||
fid
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
std::vector<uint8_t> NGCHS2Sigma::buildChatLogFileRange(Contact3Handle c, uint64_t ts_start, uint64_t ts_end) {
|
|
||||||
const Message3Registry* reg_ptr = static_cast<const RegistryMessageModelI&>(_rmm).get(c);
|
|
||||||
if (reg_ptr == nullptr) {
|
|
||||||
return {};
|
|
||||||
}
|
|
||||||
const Message3Registry& msg_reg = *reg_ptr;
|
|
||||||
|
|
||||||
|
|
||||||
if (msg_reg.storage<Message::Components::Timestamp>() == nullptr) {
|
|
||||||
// nothing to do here
|
|
||||||
return {};
|
|
||||||
}
|
|
||||||
|
|
||||||
std::cout << "NGCHS2Sigma: building chatlog for time range " << ts_start-ts_end << "s\n";
|
|
||||||
|
|
||||||
// convert seconds to milliseconds
|
|
||||||
// TODO: lift out?
|
|
||||||
ts_start *= 1000;
|
|
||||||
ts_end *= 1000;
|
|
||||||
|
|
||||||
//std::cout << "!!!! starting msg ts search, ts_start:" << ts_start << " ts_end:" << ts_end << "\n";
|
|
||||||
|
|
||||||
auto ts_view = msg_reg.view<Message::Components::Timestamp>();
|
|
||||||
|
|
||||||
// we iterate "forward", so from newest to oldest
|
|
||||||
|
|
||||||
// start is the newest ts
|
|
||||||
const auto ts_start_it = find_start_by_ts(ts_view, ts_start);
|
|
||||||
// end is the oldest ts
|
|
||||||
|
|
||||||
// we only search for the start point, because we walk to the end anyway
|
|
||||||
|
|
||||||
auto j_array = nlohmann::json::array_t{};
|
|
||||||
|
|
||||||
// hmm
|
|
||||||
// maybe use other view or something?
|
|
||||||
for (auto it = ts_start_it; it != ts_view.end(); it++) {
|
|
||||||
const auto e = *it;
|
|
||||||
const auto& [ts_comp] = ts_view.get(e);
|
|
||||||
|
|
||||||
if (ts_comp.ts > ts_start) {
|
|
||||||
std::cerr << "!!!! msg ent in view too new\n";
|
|
||||||
continue;
|
|
||||||
} else if (ts_comp.ts < ts_end) {
|
|
||||||
// too old, we hit the end of the range
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!msg_reg.all_of<
|
|
||||||
Message::Components::ContactFrom,
|
|
||||||
Message::Components::ContactTo,
|
|
||||||
Message::Components::ToxGroupMessageID
|
|
||||||
>(e)) {
|
|
||||||
continue; // ??
|
|
||||||
}
|
|
||||||
if (!msg_reg.any_of<Message::Components::MessageText, Message::Components::MessageFileObject>(e)) {
|
|
||||||
continue; // skip
|
|
||||||
}
|
|
||||||
|
|
||||||
const auto& [c_from_c, c_to_c] = msg_reg.get<Message::Components::ContactFrom, Message::Components::ContactTo>(e);
|
|
||||||
|
|
||||||
if (c_to_c.c != c) {
|
|
||||||
// message was not public
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!_cr.valid(c_from_c.c)) {
|
|
||||||
continue; // ???
|
|
||||||
}
|
|
||||||
|
|
||||||
Contact3Handle c_from{_cr, c_from_c.c};
|
|
||||||
|
|
||||||
if (!c_from.all_of<Contact::Components::ToxGroupPeerPersistent>()) {
|
|
||||||
continue; // ???
|
|
||||||
}
|
|
||||||
|
|
||||||
if (_only_send_self_observed && msg_reg.all_of<Message::Components::SyncedBy>(e) && c.all_of<Contact::Components::Self>()) {
|
|
||||||
if (!msg_reg.get<Message::Components::SyncedBy>(e).ts.count(c.get<Contact::Components::Self>().self)) {
|
|
||||||
continue; // did not observe ourselfs, skip
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
auto j_entry = nlohmann::json::object_t{};
|
|
||||||
|
|
||||||
j_entry["ts"] = ts_comp.ts/100; // millisec -> decisec
|
|
||||||
{
|
|
||||||
const auto& ppk_ref = c_from.get<Contact::Components::ToxGroupPeerPersistent>().peer_key.data;
|
|
||||||
j_entry["ppk"] = nlohmann::json::binary_t{std::vector<uint8_t>{ppk_ref.cbegin(), ppk_ref.cend()}};
|
|
||||||
}
|
|
||||||
j_entry["mid"] = msg_reg.get<Message::Components::ToxGroupMessageID>(e).id;
|
|
||||||
|
|
||||||
if (msg_reg.all_of<Message::Components::TagMessageIsAction>(e)) {
|
|
||||||
j_entry["action"] = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (msg_reg.all_of<Message::Components::MessageText>(e)) {
|
|
||||||
j_entry["text"] = msg_reg.get<Message::Components::MessageText>(e).text;
|
|
||||||
} else if (msg_reg.any_of<Message::Components::MessageFileObject>(e)) {
|
|
||||||
const auto& o = msg_reg.get<Message::Components::MessageFileObject>(e).o;
|
|
||||||
if (!o) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
// HACK: use tox fild_id and file_kind instead!!
|
|
||||||
if (o.all_of<Components::FT1InfoSHA1Hash>()) {
|
|
||||||
j_entry["fkind"] = NGCFT1_file_kind::HASH_SHA1_INFO;
|
|
||||||
j_entry["fid"] = nlohmann::json::binary_t{o.get<Components::FT1InfoSHA1Hash>().hash};
|
|
||||||
} else {
|
|
||||||
continue; // unknown file type
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
j_array.push_back(j_entry);
|
|
||||||
}
|
|
||||||
|
|
||||||
std::cout << "NGCHS2Sigma: built chat log with " << j_array.size() << " entries\n";
|
|
||||||
|
|
||||||
return nlohmann::json::to_msgpack(j_array);
|
|
||||||
}
|
|
||||||
|
|
||||||
bool NGCHS2Sigma::onEvent(const Message::Events::MessageConstruct&) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool NGCHS2Sigma::onEvent(const Message::Events::MessageUpdated&) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool NGCHS2Sigma::onEvent(const Message::Events::MessageDestory&) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool NGCHS2Sigma::onEvent(const Events::NGCFT1_recv_request& e) {
|
|
||||||
if (
|
|
||||||
e.file_kind != NGCFT1_file_kind::HS2_RANGE_TIME_MSGPACK
|
|
||||||
) {
|
|
||||||
return false; // not for us
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: when is it done from queue?
|
|
||||||
|
|
||||||
auto c = _tcm.getContactGroupPeer(e.group_number, e.peer_number);
|
|
||||||
if (!c) {
|
|
||||||
return false; // how
|
|
||||||
}
|
|
||||||
|
|
||||||
// is other peer allowed to make requests
|
|
||||||
//bool quick_allow {false};
|
|
||||||
bool quick_allow {true}; // HACK: disable all restrictions for this early test
|
|
||||||
// TODO: quick deny?
|
|
||||||
{
|
|
||||||
// - tagged as weakself
|
|
||||||
if (!quick_allow && c.all_of<Contact::Components::TagSelfWeak>()) {
|
|
||||||
quick_allow = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
// - sub perm level??
|
|
||||||
// - out of max time range (ft specific, not a quick_allow)
|
|
||||||
}
|
|
||||||
|
|
||||||
if (e.file_kind == NGCFT1_file_kind::HS2_RANGE_TIME_MSGPACK) {
|
|
||||||
handleTimeRange(c, e);
|
|
||||||
}
|
|
||||||
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool NGCHS2Sigma::onEvent(const Events::NGCFT1_send_data& e) {
|
|
||||||
auto c = _tcm.getContactGroupPeer(e.group_number, e.peer_number);
|
|
||||||
if (!c) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!c.all_of<Components::IncommingTimeRangeRequestRunning>()) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
auto& irr = c.get<Components::IncommingTimeRangeRequestRunning>();
|
|
||||||
if (!irr._list.count(e.transfer_id)) {
|
|
||||||
return false; // not for us (maybe)
|
|
||||||
}
|
|
||||||
|
|
||||||
auto& transfer = irr._list.at(e.transfer_id);
|
|
||||||
if (transfer.data.size() < e.data_offset+e.data_size) {
|
|
||||||
std::cerr << "NGCHS2Sigma error: ft send data larger then file???\n";
|
|
||||||
assert(false && "how");
|
|
||||||
}
|
|
||||||
std::memcpy(e.data, transfer.data.data()+e.data_offset, e.data_size);
|
|
||||||
transfer.last_activity = 0.f;
|
|
||||||
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool NGCHS2Sigma::onEvent(const Events::NGCFT1_send_done& e) {
|
|
||||||
// TODO: this will return null if the peer just disconnected
|
|
||||||
// FIXME: this does not work, tcm just delteded the relation ship
|
|
||||||
//auto c = _tcm.getContactGroupPeer(e.group_number, e.peer_number);
|
|
||||||
//if (!c) {
|
|
||||||
// return false;
|
|
||||||
//}
|
|
||||||
const auto c_it = _tox_peer_to_contact.find(combine_ids(e.group_number, e.peer_number));
|
|
||||||
if (c_it == _tox_peer_to_contact.end()) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
auto c = c_it->second;
|
|
||||||
if (!static_cast<bool>(c)) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!c.all_of<Components::IncommingTimeRangeRequestRunning>()) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
auto& irr = c.get<Components::IncommingTimeRangeRequestRunning>();
|
|
||||||
if (!irr._list.count(e.transfer_id)) {
|
|
||||||
return false; // not for us (maybe)
|
|
||||||
}
|
|
||||||
|
|
||||||
irr._list.erase(e.transfer_id);
|
|
||||||
|
|
||||||
// TODO: check if we completed it
|
|
||||||
std::cout << "NGCHS2Sigma: sent chatlog to " << e.group_number << ":" << e.peer_number << "." << (int)e.transfer_id << "\n";
|
|
||||||
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
@ -1,82 +0,0 @@
|
|||||||
#pragma once
|
|
||||||
|
|
||||||
#include <solanaceae/toxcore/tox_event_interface.hpp>
|
|
||||||
|
|
||||||
#include <solanaceae/contact/contact_model3.hpp>
|
|
||||||
#include <solanaceae/message3/registry_message_model.hpp>
|
|
||||||
|
|
||||||
#include <solanaceae/ngc_ft1/ngcft1.hpp>
|
|
||||||
|
|
||||||
#include <entt/container/dense_map.hpp>
|
|
||||||
|
|
||||||
#include <solanaceae/util/span.hpp>
|
|
||||||
|
|
||||||
#include <vector>
|
|
||||||
#include <deque>
|
|
||||||
|
|
||||||
// fwd
|
|
||||||
class ToxContactModel2;
|
|
||||||
|
|
||||||
|
|
||||||
struct TimeRangeRequest {
|
|
||||||
uint64_t ts_start{0};
|
|
||||||
uint64_t ts_end{0};
|
|
||||||
};
|
|
||||||
|
|
||||||
class NGCHS2Sigma : public RegistryMessageModelEventI, public NGCFT1EventI {
|
|
||||||
Contact3Registry& _cr;
|
|
||||||
RegistryMessageModelI& _rmm;
|
|
||||||
ToxContactModel2& _tcm;
|
|
||||||
NGCFT1& _nft;
|
|
||||||
NGCFT1EventProviderI::SubscriptionReference _nftep_sr;
|
|
||||||
|
|
||||||
float _iterate_heat {0.f};
|
|
||||||
constexpr static float _iterate_cooldown {1.22f}; // sec
|
|
||||||
|
|
||||||
// open/running range requests (by c)
|
|
||||||
// comp on peer c
|
|
||||||
|
|
||||||
// open/running range responses (by c)
|
|
||||||
// comp on peer c
|
|
||||||
|
|
||||||
// limit to 2 uploads per peer simultaniously
|
|
||||||
// TODO: increase for prod (4?) or maybe even lower?
|
|
||||||
// currently per type
|
|
||||||
constexpr static size_t _max_parallel_per_peer {2};
|
|
||||||
|
|
||||||
constexpr static bool _only_send_self_observed {true};
|
|
||||||
constexpr static int64_t _max_time_into_past_default {60*15}; // s
|
|
||||||
|
|
||||||
// FIXME: workaround missing contact events
|
|
||||||
// only used on peer exit (no, also used to quicken lookups)
|
|
||||||
entt::dense_map<uint64_t, Contact3Handle> _tox_peer_to_contact;
|
|
||||||
|
|
||||||
public:
|
|
||||||
NGCHS2Sigma(
|
|
||||||
Contact3Registry& cr,
|
|
||||||
RegistryMessageModelI& rmm,
|
|
||||||
ToxContactModel2& tcm,
|
|
||||||
NGCFT1& nft
|
|
||||||
);
|
|
||||||
|
|
||||||
~NGCHS2Sigma(void);
|
|
||||||
|
|
||||||
float iterate(float delta);
|
|
||||||
|
|
||||||
void handleTimeRange(Contact3Handle c, const Events::NGCFT1_recv_request&);
|
|
||||||
|
|
||||||
// msg reg contact
|
|
||||||
// time ranges
|
|
||||||
[[nodiscard]] std::vector<uint8_t> buildChatLogFileRange(Contact3Handle c, uint64_t ts_start, uint64_t ts_end);
|
|
||||||
|
|
||||||
protected:
|
|
||||||
bool onEvent(const Message::Events::MessageConstruct&) override;
|
|
||||||
bool onEvent(const Message::Events::MessageUpdated&) override;
|
|
||||||
bool onEvent(const Message::Events::MessageDestory&) override;
|
|
||||||
|
|
||||||
protected:
|
|
||||||
bool onEvent(const Events::NGCFT1_recv_request&) override;
|
|
||||||
bool onEvent(const Events::NGCFT1_send_data&) override;
|
|
||||||
bool onEvent(const Events::NGCFT1_send_done&) override;
|
|
||||||
};
|
|
||||||
|
|
@ -1,32 +0,0 @@
|
|||||||
#pragma once
|
|
||||||
|
|
||||||
#include <solanaceae/util/span.hpp>
|
|
||||||
|
|
||||||
#include <cstdint>
|
|
||||||
|
|
||||||
template<typename Type>
|
|
||||||
static uint64_t deserlSimpleType(ByteSpan bytes) {
|
|
||||||
if (bytes.size < sizeof(Type)) {
|
|
||||||
throw int(1);
|
|
||||||
}
|
|
||||||
|
|
||||||
Type value{};
|
|
||||||
|
|
||||||
for (size_t i = 0; i < sizeof(Type); i++) {
|
|
||||||
value |= Type(bytes[i]) << (i*8);
|
|
||||||
}
|
|
||||||
|
|
||||||
return value;
|
|
||||||
}
|
|
||||||
|
|
||||||
static uint64_t deserlTS(ByteSpan ts_bytes) {
|
|
||||||
return deserlSimpleType<uint64_t>(ts_bytes);
|
|
||||||
}
|
|
||||||
|
|
||||||
template<typename Type>
|
|
||||||
static void serlSimpleType(std::vector<uint8_t>& bytes, const Type& value) {
|
|
||||||
for (size_t i = 0; i < sizeof(Type); i++) {
|
|
||||||
bytes.push_back(uint8_t(value >> (i*8) & 0xff));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,110 +0,0 @@
|
|||||||
# [NGC] Group-History-Sync (v2.1) [PoC] [Draft]
|
|
||||||
|
|
||||||
Simple group history sync that uses `timestamp` + `peer public key` + `message_id` (`ts+ppk+mid`) to, mostly, uniquely identify messages and deliver them.
|
|
||||||
|
|
||||||
Messages are bundled up in a `msgpack` `array` and sent as a file transfer.
|
|
||||||
|
|
||||||
## Requirements
|
|
||||||
|
|
||||||
TODO: more?
|
|
||||||
|
|
||||||
### Msgpack
|
|
||||||
|
|
||||||
For serializing the messages.
|
|
||||||
|
|
||||||
### File transfers
|
|
||||||
|
|
||||||
For sending packs of messages.
|
|
||||||
Even a single message can be larger than a single custom packet, so this is a must-have.
|
|
||||||
This also allows for compression down the road.
|
|
||||||
|
|
||||||
## Procedure
|
|
||||||
|
|
||||||
Peer A can request `ts+ppk+mid+msg` list for a given time range from peer B.
|
|
||||||
|
|
||||||
Peer B then sends a filetransfer (with special file type) of list of `ts+ppk+mid+msg`.
|
|
||||||
Optionally compressed. (Delta-coding? / zstd)
|
|
||||||
|
|
||||||
Peer A keeps doing that until the desired time span is covered.
|
|
||||||
|
|
||||||
During all that, peer B usually does the same thing to peer A.
|
|
||||||
|
|
||||||
TODO: deny request explicitly. also why (like perms and time range too large etc)
|
|
||||||
|
|
||||||
## Traffic savings
|
|
||||||
|
|
||||||
It is recomended to remember if a range has been requested and answered from a given peer, to reduce traffic.
|
|
||||||
|
|
||||||
While compression is optional, it is recommended.
|
|
||||||
Timestamps fit delta coding.
|
|
||||||
Peer keys fit dicts.
|
|
||||||
Message ids are mostly high entropy.
|
|
||||||
The Message itself is text, so dict/huffman fits well.
|
|
||||||
|
|
||||||
TODO: store the 4 coloms SoA instead of AoS ?
|
|
||||||
|
|
||||||
## Message uniqueness
|
|
||||||
|
|
||||||
This protocol relies on the randomness of `message_id` and the clocks to be more or less synchronized.
|
|
||||||
|
|
||||||
However, `message_id` can be manipulated freely by any peer, this can make messages appear as duplicates.
|
|
||||||
|
|
||||||
This can be used here, if you don't wish your messages to be syncronized (to an extent).
|
|
||||||
|
|
||||||
## Security
|
|
||||||
|
|
||||||
Only sync publicly sent/recieved messages.
|
|
||||||
|
|
||||||
Only allow sync or extended time ranges from peers you trust (enough).
|
|
||||||
|
|
||||||
The default shall be to not offer any messages.
|
|
||||||
|
|
||||||
Indirect messages shall be low in credibility, while direct synced (by author), with mid credibility.
|
|
||||||
|
|
||||||
Either only high or mid credibility shall be sent.
|
|
||||||
|
|
||||||
|
|
||||||
Manual exceptions to all can be made at the users discretion, eg for other self owned devices.
|
|
||||||
|
|
||||||
## File transfer requests
|
|
||||||
|
|
||||||
TODO: is reusing the ft request api a good idea for this?
|
|
||||||
|
|
||||||
| fttype | name | content (ft id) |
|
|
||||||
|------------|------|---------------------|
|
|
||||||
| 0x00000f02 | time range msgpack | - ts start </br> - ts end |
|
|
||||||
|
|
||||||
## File transfer content
|
|
||||||
|
|
||||||
| fttype | name | content | note |
|
|
||||||
|------------|------|----------------------------|---|
|
|
||||||
| 0x00000f02 | time range msgpack | `message list` in msgpack | |
|
|
||||||
|
|
||||||
### time range msgpack
|
|
||||||
|
|
||||||
Msgpack array of messages.
|
|
||||||
|
|
||||||
```
|
|
||||||
name | type/size | note
|
|
||||||
-------------------------|-------------------|-----
|
|
||||||
- array | 32bit number msgs
|
|
||||||
- ts | 64bit deciseconds
|
|
||||||
- ppk | 32bytes
|
|
||||||
- mid | 16bit
|
|
||||||
- if action |
|
|
||||||
- action | bool
|
|
||||||
- if text |
|
|
||||||
- text | string | maybe byte array instead?
|
|
||||||
- if file |
|
|
||||||
- fkind | 32bit enum | is this right?
|
|
||||||
- fid | bytes kind | length depends on kind
|
|
||||||
```
|
|
||||||
|
|
||||||
Name is the actual string key.
|
|
||||||
Data type sizes are suggestions, if not defined by the tox protocol.
|
|
||||||
|
|
||||||
## TODO
|
|
||||||
|
|
||||||
- [ ] figure out a pro-active approach (instead of waiting for a range request)
|
|
||||||
- [ ] compression in the ft layer? (would make it reusable) hint/autodetect/autoenable for >1k ?
|
|
||||||
|
|
@ -1,82 +0,0 @@
|
|||||||
#include "./ts_find_start.hpp"
|
|
||||||
|
|
||||||
#include <solanaceae/message3/registry_message_model.hpp>
|
|
||||||
#include <solanaceae/message3/components.hpp>
|
|
||||||
|
|
||||||
#include <cassert>
|
|
||||||
|
|
||||||
int main(void) {
|
|
||||||
Message3Registry msg_reg;
|
|
||||||
|
|
||||||
{
|
|
||||||
std::cout << "TEST empty reg\n";
|
|
||||||
auto ts_view = msg_reg.view<Message::Components::Timestamp>();
|
|
||||||
const auto res = find_start_by_ts(ts_view, 42);
|
|
||||||
assert(res == ts_view.end());
|
|
||||||
}
|
|
||||||
|
|
||||||
{
|
|
||||||
std::cout << "TEST single msg newer (fail)\n";
|
|
||||||
Message3Handle msg{msg_reg, msg_reg.create()};
|
|
||||||
msg.emplace<Message::Components::Timestamp>(43ul);
|
|
||||||
|
|
||||||
auto ts_view = msg_reg.view<Message::Components::Timestamp>();
|
|
||||||
const auto res = find_start_by_ts(ts_view, 42);
|
|
||||||
assert(res == ts_view.end());
|
|
||||||
|
|
||||||
msg.destroy();
|
|
||||||
}
|
|
||||||
|
|
||||||
{
|
|
||||||
std::cout << "TEST single msg same (succ)\n";
|
|
||||||
Message3Handle msg{msg_reg, msg_reg.create()};
|
|
||||||
msg.emplace<Message::Components::Timestamp>(42ul);
|
|
||||||
|
|
||||||
auto ts_view = msg_reg.view<Message::Components::Timestamp>();
|
|
||||||
const auto res = find_start_by_ts(ts_view, 42);
|
|
||||||
assert(res != ts_view.end());
|
|
||||||
|
|
||||||
msg.destroy();
|
|
||||||
}
|
|
||||||
|
|
||||||
{
|
|
||||||
std::cout << "TEST single msg older (succ)\n";
|
|
||||||
Message3Handle msg{msg_reg, msg_reg.create()};
|
|
||||||
msg.emplace<Message::Components::Timestamp>(41ul);
|
|
||||||
|
|
||||||
auto ts_view = msg_reg.view<Message::Components::Timestamp>();
|
|
||||||
const auto res = find_start_by_ts(ts_view, 42);
|
|
||||||
assert(res != ts_view.end());
|
|
||||||
|
|
||||||
msg.destroy();
|
|
||||||
}
|
|
||||||
|
|
||||||
{
|
|
||||||
std::cout << "TEST multi msg\n";
|
|
||||||
Message3Handle msg{msg_reg, msg_reg.create()};
|
|
||||||
msg.emplace<Message::Components::Timestamp>(41ul);
|
|
||||||
Message3Handle msg2{msg_reg, msg_reg.create()};
|
|
||||||
msg2.emplace<Message::Components::Timestamp>(42ul);
|
|
||||||
Message3Handle msg3{msg_reg, msg_reg.create()};
|
|
||||||
msg3.emplace<Message::Components::Timestamp>(43ul);
|
|
||||||
|
|
||||||
// see message3/message_time_sort.cpp
|
|
||||||
msg_reg.sort<Message::Components::Timestamp>([](const auto& lhs, const auto& rhs) -> bool {
|
|
||||||
return lhs.ts > rhs.ts;
|
|
||||||
}, entt::insertion_sort{});
|
|
||||||
|
|
||||||
auto ts_view = msg_reg.view<Message::Components::Timestamp>();
|
|
||||||
auto res = find_start_by_ts(ts_view, 42);
|
|
||||||
assert(res != ts_view.end());
|
|
||||||
assert(*res == msg2);
|
|
||||||
res++;
|
|
||||||
assert(*res == msg);
|
|
||||||
|
|
||||||
msg3.destroy();
|
|
||||||
msg2.destroy();
|
|
||||||
msg.destroy();
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
@ -1,31 +0,0 @@
|
|||||||
#pragma once
|
|
||||||
|
|
||||||
#include <algorithm>
|
|
||||||
#include <cstdint>
|
|
||||||
|
|
||||||
#include <iostream>
|
|
||||||
|
|
||||||
// perform binary search to find the first message not newer than ts_start
|
|
||||||
template<typename View>
|
|
||||||
auto find_start_by_ts(const View& view, uint64_t ts_start) {
|
|
||||||
//std::cout << "!!!! starting msg ts search, ts_start:" << ts_start << "\n";
|
|
||||||
|
|
||||||
// -> first value smaller than start ts
|
|
||||||
auto res = std::lower_bound(
|
|
||||||
view.begin(), view.end(),
|
|
||||||
ts_start,
|
|
||||||
[&view](const auto& a, const auto& b) {
|
|
||||||
const auto& [a_comp] = view.get(a);
|
|
||||||
return a_comp.ts > b; // > bc ts is sorted high to low?
|
|
||||||
}
|
|
||||||
);
|
|
||||||
|
|
||||||
if (res != view.end()) {
|
|
||||||
const auto& [ts_comp] = view.get(*res);
|
|
||||||
//std::cout << "!!!! first value not newer than start ts is " << ts_comp.ts << "\n";
|
|
||||||
} else {
|
|
||||||
//std::cout << "!!!! no first value not newer than start ts\n";
|
|
||||||
}
|
|
||||||
return res;
|
|
||||||
}
|
|
||||||
|
|
Loading…
Reference in New Issue
Block a user