75 Commits

Author SHA1 Message Date
0ad4c4997c limit received messages to 1min into the future 2025-04-12 23:00:32 +02:00
8673c63d3c close rw on completion 2025-03-16 11:57:52 +01:00
a094a3d574 move to read only memory mapped files 2025-03-15 19:54:45 +01:00
f5f7f2ca9d close files of inactive tranfsers 2025-03-14 13:50:30 +01:00
246587e30d make contact store version visible 2025-03-10 20:54:00 +01:00
dacda24390 port to contact4 (not using events yet) 2025-03-10 15:09:45 +01:00
7a54552bd2 print mapping error message 2025-03-03 21:08:49 +01:00
1780bd097a error checks around performing the resize
this throws on windows and causes undefined mappings on linux
2025-01-24 00:27:58 +01:00
db73c90e34 small optimizations and os backend refactor 2025-01-18 01:44:46 +01:00
a9ebaa2c2f improve perf (was ~.7% with asan enabled) 2025-01-09 01:53:17 +01:00
0064dde1ce limit provided history by first seen 2025-01-07 22:29:39 +01:00
b3b8b79a65 time moved to util 2025-01-07 16:10:28 +01:00
7cd68845ca forgot to reset timer on recv data 2024-12-19 17:17:10 +01:00
f40907d42a use actual activity for receiving transfers for iterate interval 2024-12-19 13:33:30 +01:00
6d7d643207 adjust flow to iterate speed
higher ft1sha1 iterate interval (not fixing it yet)
2024-12-16 12:52:33 +01:00
b35babe3f8 switch to os provided remote have set and other fixes 2024-12-15 01:23:08 +01:00
78390dd342 update object, object update lock and rare crash 2024-12-13 01:30:03 +01:00
eb169b2779 add participation and log spam fix 2024-12-11 23:23:05 +01:00
100279a483 fixes 2024-12-10 17:57:53 +01:00
60b3d5d941 big ft fixes, mostly for info, but also other stuff 2024-12-10 17:18:28 +01:00
6ad2905e07 hs2: change msgpack format and fixes 2024-12-09 23:38:07 +01:00
930c829031 rizzler working, more fixes everywhere
there still are some crashes that needs workarounds
2024-12-09 22:58:36 +01:00
a139f412b1 various fixes resulting in the first time running the code 2024-12-08 16:08:30 +01:00
abdf6672bf hs2 plug, rizzler still non functional 2024-12-08 14:48:24 +01:00
7bbaa9b929 move in plugin 2024-12-08 13:17:22 +01:00
70620a901b starting the work on hs2 rizzler 2024-12-07 23:34:19 +01:00
231928339e sigma 2024-12-07 11:38:31 +01:00
294c5346ca hs2 send done, but untested 2024-12-06 22:41:05 +01:00
adeaca4efe early out have when we have all 2024-12-06 16:36:02 +01:00
ba809eda43 refactor and test ts start search 2024-12-06 14:11:06 +01:00
b9a7c75d20 hs: progess on the log file generator, fully written but untested
ft: timeout changes and wording fixes
2024-12-05 23:06:44 +01:00
04b6f7925a use cache for group peer to contact lookup (~5% cpu) 2024-12-02 14:27:39 +01:00
5601ad91f5 a bunch of allocation optimizations 2024-12-02 13:08:47 +01:00
741f1428d3 updated spec to 2.1, changed almost everything
other small fixes
2024-11-28 12:59:14 +01:00
34dc01d4dc fix chunk picker round robin actually working
(worked kinda bc of bug before)
make priority dynamic and fix skipping to make it work too
2024-11-22 13:51:48 +01:00
1bf1fbce75 small hs progress 2024-11-22 13:51:25 +01:00
37b92f67c8 respect voice state for receiving file messages
we should also check on send in the future
2024-11-06 10:50:42 +01:00
01c892df8c tweak finishing timer 2024-11-06 10:49:24 +01:00
6eb5826616 split recv and send, they dont share any code (probably) 2024-11-03 18:21:02 +01:00
2e6b15e4ad more hs drafting 2024-11-01 11:31:05 +01:00
63de78aaeb add spec draft to repo 2024-10-31 15:46:48 +01:00
2a0350a564 minor tweaks and fixes
especially preventing a stall on some packetloss scenarios
2024-10-31 11:39:16 +01:00
ee593536a2 boilerplate for hs2 2024-10-30 17:12:05 +01:00
6f2fa60394 handle init2 in ft1 (hacky) 2024-10-30 11:25:33 +01:00
96041fbcec add ft1_init2 and ft1_init_ack v3 2024-10-30 11:08:46 +01:00
b4eaf86ed1 dynamically choose chunk size 2024-10-28 23:23:05 +01:00
c7485c4577 use sr 2024-10-24 14:00:16 +02:00
51396314d1 big fix for not using the found free slot index!!
minor logging and tweaking changes
2024-10-24 11:50:03 +02:00
4aab6e489d fix uint64 cast to size_t 2024-10-23 14:11:26 +02:00
5e884fd3ee dont delay recv_done and check on init info 2024-10-23 13:46:00 +02:00
9a4be575ba minor stuff and logging 2024-10-23 12:51:22 +02:00
fd094b157f more 32bit stuff 2024-10-20 18:37:01 +02:00
c3c2d0f133 fixes for 32bit 2024-10-20 16:35:55 +02:00
4360b65309 update to rmmi 2024-10-06 11:37:07 +02:00
1d7416efed fix rare assert 2024-08-08 23:52:36 +02:00
a761378dd9 add p2prng packet to ext 2024-08-07 11:07:18 +02:00
60d6f27a12 disable sending timeout assert, we believe the underlying ngcft1 2024-08-04 10:15:26 +02:00
07099e4832 tag chunkpicker for update more often 2024-08-04 10:14:59 +02:00
9e2911b36c spread chosen chunks more thinly if small files
should help (a little) if many small files with distribution
2024-08-02 13:06:55 +02:00
6da1f9afca info fixes, should investigate more 2024-07-31 18:34:14 +02:00
8bd2c925a6 fix missized local have bitset 2024-07-25 14:57:27 +02:00
da406714ff adopt to new os and message file refactor 2024-07-24 17:55:31 +02:00
16cb755191 fix dangerous unchecked file stream read 2024-07-22 19:49:02 +02:00
54ace9d0b2 and use new backend code (partially transitioned to os backend) 2024-07-17 17:17:07 +02:00
e50e74e12f add os backend and add threaded hashing
still meh but nicer
2024-07-17 17:13:32 +02:00
f730844771 abstract aways file2rwmapped construction to lower visibility 2024-07-15 17:44:30 +02:00
3fcfbc11a4 reduce includes and some scope
hopefully fixes the windows obj being too large
2024-07-15 16:38:33 +02:00
1efae931d1 better receiving transfer cleanup (reduces log spam) 2024-07-15 14:56:52 +02:00
0b2fa40cb9 lower rat on join
TODO: its indescriminate, only announce to fresh peers
2024-07-15 14:38:53 +02:00
489556e322 fix front access to empty array
and increase send timeout assert
2024-07-15 11:48:16 +02:00
10756e13ce small fixes 2024-07-14 20:11:37 +02:00
74414d0999 re annouce with exponential back-off 2024-07-14 12:38:00 +02:00
bc5599a230 refactor sending transfers the same way as receiving 2024-07-13 13:52:43 +02:00
ca89e43a40 refactor extract chunk picker systems 2024-07-13 12:36:49 +02:00
dd04e6131a transfer stats 2024-07-13 11:46:33 +02:00
51 changed files with 4191 additions and 1203 deletions

View File

@ -1,9 +1,21 @@
cmake_minimum_required(VERSION 3.9 FATAL_ERROR)
cmake_minimum_required(VERSION 3.24 FATAL_ERROR)
add_subdirectory(./external)
project(solanaceae)
if (CMAKE_SOURCE_DIR STREQUAL CMAKE_CURRENT_SOURCE_DIR)
set(SOLANACEAE_NGCFT1_STANDALONE ON)
else()
set(SOLANACEAE_NGCFT1_STANDALONE OFF)
endif()
message("II SOLANACEAE_NGCFT1_STANDALONE " ${SOLANACEAE_NGCFT1_STANDALONE})
option(SOLANACEAE_NGCFT1_BUILD_PLUGINS "Build the solanaceae_ngcft1 plugins" ${SOLANACEAE_NGCFT1_BUILD_PLUGINS})
# TODO: move this stuff to src
########################################
add_library(solanaceae_ngcext
./solanaceae/ngc_ext/ngcext.hpp
./solanaceae/ngc_ext/ngcext.cpp
@ -46,7 +58,12 @@ target_link_libraries(solanaceae_ngcft1 PUBLIC
add_library(solanaceae_sha1_ngcft1
# hacky deps
./solanaceae/ngc_ft1_sha1/mio.hpp
./solanaceae/ngc_ft1_sha1/file_rw_mapped.hpp
./solanaceae/ngc_ft1_sha1/file2_mapped.hpp
./solanaceae/ngc_ft1_sha1/file_constructor.hpp
./solanaceae/ngc_ft1_sha1/file_constructor.cpp
./solanaceae/ngc_ft1_sha1/backends/sha1_mapped_filesystem.hpp
./solanaceae/ngc_ft1_sha1/backends/sha1_mapped_filesystem.cpp
./solanaceae/ngc_ft1_sha1/hash_utils.hpp
./solanaceae/ngc_ft1_sha1/hash_utils.cpp
@ -67,6 +84,21 @@ add_library(solanaceae_sha1_ngcft1
./solanaceae/ngc_ft1_sha1/participation.hpp
./solanaceae/ngc_ft1_sha1/participation.cpp
./solanaceae/ngc_ft1_sha1/file_inactivity_system.hpp
./solanaceae/ngc_ft1_sha1/file_inactivity_system.cpp
./solanaceae/ngc_ft1_sha1/re_announce_systems.hpp
./solanaceae/ngc_ft1_sha1/re_announce_systems.cpp
./solanaceae/ngc_ft1_sha1/chunk_picker_systems.hpp
./solanaceae/ngc_ft1_sha1/chunk_picker_systems.cpp
./solanaceae/ngc_ft1_sha1/transfer_stats_systems.hpp
./solanaceae/ngc_ft1_sha1/transfer_stats_systems.cpp
./solanaceae/ngc_ft1_sha1/sending_transfers.hpp
./solanaceae/ngc_ft1_sha1/sending_transfers.cpp
./solanaceae/ngc_ft1_sha1/receiving_transfers.hpp
./solanaceae/ngc_ft1_sha1/receiving_transfers.cpp
@ -85,8 +117,6 @@ target_link_libraries(solanaceae_sha1_ngcft1 PUBLIC
solanaceae_file2
)
########################################
option(SOLANACEAE_NGCFT1_SHA1_BUILD_TESTING "Build the solanaceae_ngcft1_sha1 tests" OFF)
message("II SOLANACEAE_NGCFT1_SHA1_BUILD_TESTING " ${SOLANACEAE_NGCFT1_SHA1_BUILD_TESTING})
@ -104,3 +134,51 @@ if (SOLANACEAE_NGCFT1_SHA1_BUILD_TESTING)
endif()
########################################
add_library(solanaceae_ngchs2
./solanaceae/ngc_hs2/serl.hpp
./solanaceae/ngc_hs2/ts_find_start.hpp
./solanaceae/ngc_hs2/ngc_hs2_sigma.hpp
./solanaceae/ngc_hs2/ngc_hs2_sigma.cpp
./solanaceae/ngc_hs2/ngc_hs2_rizzler.hpp
./solanaceae/ngc_hs2/ngc_hs2_rizzler.cpp
)
target_include_directories(solanaceae_ngchs2 PUBLIC .)
target_compile_features(solanaceae_ngchs2 PUBLIC cxx_std_17)
target_link_libraries(solanaceae_ngchs2 PUBLIC
solanaceae_ngcft1
solanaceae_sha1_ngcft1 # HACK: properly abstract filekind/-id
solanaceae_tox_contacts
solanaceae_message3
solanaceae_object_store
nlohmann_json::nlohmann_json
)
option(SOLANACEAE_NGCHS2_BUILD_TESTING "Build the solanaceae_ngchs2 tests" OFF)
message("II SOLANACEAE_NGCHS2_BUILD_TESTING " ${SOLANACEAE_NGCHS2_BUILD_TESTING})
if (SOLANACEAE_NGCHS2_BUILD_TESTING)
include(CTest)
add_executable(test_hs2_ts_binarysearch
./solanaceae/ngc_hs2/test_ts_binarysearch.cpp
)
target_link_libraries(test_hs2_ts_binarysearch PUBLIC
solanaceae_ngchs2
)
add_test(NAME test_hs2_ts_binarysearch COMMAND test_hs2_ts_binarysearch)
endif()
########################################
if (SOLANACEAE_NGCFT1_BUILD_PLUGINS)
add_subdirectory(./plugins)
endif()

View File

@ -2,3 +2,13 @@ cmake_minimum_required(VERSION 3.9 FATAL_ERROR)
add_subdirectory(./sha1)
# we are running a custom msgpack serialization for hs2
if (NOT TARGET nlohmann_json::nlohmann_json)
FetchContent_Declare(json
URL https://github.com/nlohmann/json/releases/download/v3.11.3/json.tar.xz
URL_HASH SHA256=d6c65aca6b1ed68e7a182f4757257b107ae403032760ed6ef121c9d55e81757d
EXCLUDE_FROM_ALL
)
FetchContent_MakeAvailable(json)
endif()

33
plugins/CMakeLists.txt Normal file
View File

@ -0,0 +1,33 @@
cmake_minimum_required(VERSION 3.9...3.24 FATAL_ERROR)
########################################
add_library(plugin_ngcft1 MODULE
./plugin_ngcft1.cpp
)
target_compile_features(plugin_ngcft1 PUBLIC cxx_std_17)
set_target_properties(plugin_ngcft1 PROPERTIES
C_VISIBILITY_PRESET hidden
)
target_compile_definitions(plugin_ngcft1 PUBLIC ENTT_API_IMPORT)
target_link_libraries(plugin_ngcft1 PUBLIC
solanaceae_plugin
solanaceae_ngcext
solanaceae_ngcft1
solanaceae_sha1_ngcft1
)
########################################
add_library(plugin_ngchs2 MODULE
./plugin_ngchs2.cpp
)
target_compile_features(plugin_ngchs2 PUBLIC cxx_std_17)
set_target_properties(plugin_ngchs2 PROPERTIES
C_VISIBILITY_PRESET hidden
)
target_compile_definitions(plugin_ngchs2 PUBLIC ENTT_API_IMPORT)
target_link_libraries(plugin_ngchs2 PUBLIC
solanaceae_plugin
solanaceae_ngchs2
)

83
plugins/plugin_ngcft1.cpp Normal file
View File

@ -0,0 +1,83 @@
#include <solanaceae/plugin/solana_plugin_v1.h>
#include <solanaceae/contact/contact_store_i.hpp>
#include <solanaceae/ngc_ext/ngcext.hpp>
#include <solanaceae/ngc_ft1/ngcft1.hpp>
#include <solanaceae/ngc_ft1_sha1/sha1_ngcft1.hpp>
#include <entt/entt.hpp>
#include <entt/fwd.hpp>
#include <memory>
#include <iostream>
static std::unique_ptr<NGCEXTEventProvider> g_ngcextep = nullptr;
// TODO: make sep plug
static std::unique_ptr<NGCFT1> g_ngcft1 = nullptr;
static std::unique_ptr<SHA1_NGCFT1> g_sha1_ngcft1 = nullptr;
constexpr const char* plugin_name = "NGCEXT";
extern "C" {
SOLANA_PLUGIN_EXPORT const char* solana_plugin_get_name(void) {
return plugin_name;
}
SOLANA_PLUGIN_EXPORT uint32_t solana_plugin_get_version(void) {
return SOLANA_PLUGIN_VERSION;
}
SOLANA_PLUGIN_EXPORT uint32_t solana_plugin_start(struct SolanaAPI* solana_api) {
std::cout << "PLUGIN " << plugin_name << " START()\n";
if (solana_api == nullptr) {
return 1;
}
try {
auto* os = PLUG_RESOLVE_INSTANCE(ObjectStore2);
auto* tox_i = PLUG_RESOLVE_INSTANCE(ToxI);
auto* tox_event_provider_i = PLUG_RESOLVE_INSTANCE(ToxEventProviderI);
auto* cs = PLUG_RESOLVE_INSTANCE(ContactStore4I);
auto* rmm = PLUG_RESOLVE_INSTANCE(RegistryMessageModelI);
auto* tcm = PLUG_RESOLVE_INSTANCE(ToxContactModel2);
// static store, could be anywhere tho
// construct with fetched dependencies
g_ngcextep = std::make_unique<NGCEXTEventProvider>(*tox_i, *tox_event_provider_i);
g_ngcft1 = std::make_unique<NGCFT1>(*tox_i, *tox_event_provider_i, *g_ngcextep.get());
g_sha1_ngcft1 = std::make_unique<SHA1_NGCFT1>(*os, *cs, *rmm, *g_ngcft1.get(), *tcm, *tox_event_provider_i, *g_ngcextep.get());
// register types
PLUG_PROVIDE_INSTANCE(NGCEXTEventProviderI, plugin_name, g_ngcextep.get());
PLUG_PROVIDE_INSTANCE(NGCFT1EventProviderI, plugin_name, g_ngcft1.get());
PLUG_PROVIDE_INSTANCE(NGCFT1, plugin_name, g_ngcft1.get());
PLUG_PROVIDE_INSTANCE(SHA1_NGCFT1, plugin_name, g_sha1_ngcft1.get());
} catch (const ResolveException& e) {
std::cerr << "PLUGIN " << plugin_name << " " << e.what << "\n";
return 2;
}
return 0;
}
SOLANA_PLUGIN_EXPORT void solana_plugin_stop(void) {
std::cout << "PLUGIN " << plugin_name << " STOP()\n";
g_sha1_ngcft1.reset();
g_ngcft1.reset();
g_ngcextep.reset();
}
SOLANA_PLUGIN_EXPORT float solana_plugin_tick(float delta) {
const float ft_interval = g_ngcft1->iterate(delta);
const float sha_interval = g_sha1_ngcft1->iterate(delta);
return std::min<float>(ft_interval, sha_interval);
}
} // extern C

80
plugins/plugin_ngchs2.cpp Normal file
View File

@ -0,0 +1,80 @@
#include <solanaceae/plugin/solana_plugin_v1.h>
#include <solanaceae/contact/contact_store_i.hpp>
#include <solanaceae/tox_contacts/tox_contact_model2.hpp>
#include <solanaceae/ngc_ft1/ngcft1.hpp>
#include <solanaceae/ngc_ft1_sha1/sha1_ngcft1.hpp> // this hurts
#include <solanaceae/ngc_hs2/ngc_hs2_sigma.hpp>
#include <solanaceae/ngc_hs2/ngc_hs2_rizzler.hpp>
#include <entt/entt.hpp>
#include <entt/fwd.hpp>
#include <memory>
#include <iostream>
// https://youtu.be/OwT83dN82pc
static std::unique_ptr<NGCHS2Sigma> g_ngchs2s = nullptr;
static std::unique_ptr<NGCHS2Rizzler> g_ngchs2r = nullptr;
constexpr const char* plugin_name = "NGCHS2";
extern "C" {
SOLANA_PLUGIN_EXPORT const char* solana_plugin_get_name(void) {
return plugin_name;
}
SOLANA_PLUGIN_EXPORT uint32_t solana_plugin_get_version(void) {
return SOLANA_PLUGIN_VERSION;
}
SOLANA_PLUGIN_EXPORT uint32_t solana_plugin_start(struct SolanaAPI* solana_api) {
std::cout << "PLUGIN " << plugin_name << " START()\n";
if (solana_api == nullptr) {
return 1;
}
try {
//auto* tox_i = PLUG_RESOLVE_INSTANCE(ToxI);
auto* tox_event_provider_i = PLUG_RESOLVE_INSTANCE(ToxEventProviderI);
auto* cs = PLUG_RESOLVE_INSTANCE(ContactStore4I);
auto* rmm = PLUG_RESOLVE_INSTANCE(RegistryMessageModelI);
auto* tcm = PLUG_RESOLVE_INSTANCE(ToxContactModel2);
auto* ngcft1 = PLUG_RESOLVE_INSTANCE(NGCFT1);
auto* sha1_ngcft1 = PLUG_RESOLVE_INSTANCE(SHA1_NGCFT1);
// static store, could be anywhere tho
// construct with fetched dependencies
g_ngchs2s = std::make_unique<NGCHS2Sigma>(*cs, *rmm, *tcm, *ngcft1);
g_ngchs2r = std::make_unique<NGCHS2Rizzler>(*cs, *rmm, *tcm, *ngcft1, *tox_event_provider_i, *sha1_ngcft1);
// register types
PLUG_PROVIDE_INSTANCE(NGCHS2Sigma, plugin_name, g_ngchs2s.get());
PLUG_PROVIDE_INSTANCE(NGCHS2Rizzler, plugin_name, g_ngchs2r.get());
} catch (const ResolveException& e) {
std::cerr << "PLUGIN " << plugin_name << " " << e.what << "\n";
return 2;
}
return 0;
}
SOLANA_PLUGIN_EXPORT void solana_plugin_stop(void) {
std::cout << "PLUGIN " << plugin_name << " STOP()\n";
g_ngchs2r.reset();
g_ngchs2s.reset();
}
SOLANA_PLUGIN_EXPORT float solana_plugin_tick(float delta) {
const float sigma_interval = g_ngchs2s->iterate(delta);
const float rizzler_interval = g_ngchs2r->iterate(delta);
return std::min<float>(sigma_interval, rizzler_interval);
}
} // extern C

View File

@ -3,9 +3,11 @@
#include <iostream>
#include <cassert>
NGCEXTEventProvider::NGCEXTEventProvider(ToxI& t, ToxEventProviderI& tep) : _t(t), _tep(tep) {
_tep.subscribe(this, Tox_Event_Type::TOX_EVENT_GROUP_CUSTOM_PACKET);
_tep.subscribe(this, Tox_Event_Type::TOX_EVENT_GROUP_CUSTOM_PRIVATE_PACKET);
NGCEXTEventProvider::NGCEXTEventProvider(ToxI& t, ToxEventProviderI& tep) : _t(t), _tep(tep), _tep_sr(_tep.newSubRef(this)) {
_tep_sr
.subscribe(Tox_Event_Type::TOX_EVENT_GROUP_CUSTOM_PACKET)
.subscribe(Tox_Event_Type::TOX_EVENT_GROUP_CUSTOM_PRIVATE_PACKET)
;
}
#define _DATA_HAVE(x, error) if ((data_size - curser) < (x)) { error; }
@ -78,7 +80,7 @@ bool NGCEXTEventProvider::parse_ft1_init(
e.file_size = 0u;
_DATA_HAVE(sizeof(e.file_size), std::cerr << "NGCEXT: packet too small, missing file_size\n"; return false)
for (size_t i = 0; i < sizeof(e.file_size); i++, curser++) {
e.file_size |= size_t(data[curser]) << (i*8);
e.file_size |= uint64_t(data[curser]) << (i*8);
}
// - 1 byte (temporary_file_tf_id)
@ -121,6 +123,83 @@ bool NGCEXTEventProvider::parse_ft1_init_ack(
);
}
bool NGCEXTEventProvider::parse_ft1_init_ack_v2(
uint32_t group_number, uint32_t peer_number,
const uint8_t* data, size_t data_size,
bool _private
) {
if (!_private) {
std::cerr << "NGCEXT: ft1_init_ack_v2 cant be public\n";
return false;
}
Events::NGCEXT_ft1_init_ack e;
e.group_number = group_number;
e.peer_number = peer_number;
size_t curser = 0;
// - 1 byte (temporary_file_tf_id)
_DATA_HAVE(sizeof(e.transfer_id), std::cerr << "NGCEXT: packet too small, missing transfer_id\n"; return false)
e.transfer_id = data[curser++];
// - 2 byte (max_lossy_data_size)
if ((data_size - curser) >= sizeof(e.max_lossy_data_size)) {
e.max_lossy_data_size = 0;
for (size_t i = 0; i < sizeof(e.max_lossy_data_size); i++, curser++) {
e.max_lossy_data_size |= uint16_t(data[curser]) << (i*8);
}
} else {
e.max_lossy_data_size = 500-4; // default
}
return dispatch(
NGCEXT_Event::FT1_INIT_ACK,
e
);
}
bool NGCEXTEventProvider::parse_ft1_init_ack_v3(
uint32_t group_number, uint32_t peer_number,
const uint8_t* data, size_t data_size,
bool _private
) {
if (!_private) {
std::cerr << "NGCEXT: ft1_init_ack_v3 cant be public\n";
return false;
}
Events::NGCEXT_ft1_init_ack e;
e.group_number = group_number;
e.peer_number = peer_number;
size_t curser = 0;
// - 1 byte (temporary_file_tf_id)
_DATA_HAVE(sizeof(e.transfer_id), std::cerr << "NGCEXT: packet too small, missing transfer_id\n"; return false)
e.transfer_id = data[curser++];
// - 2 byte (max_lossy_data_size)
if ((data_size - curser) >= sizeof(e.max_lossy_data_size)) {
e.max_lossy_data_size = 0;
for (size_t i = 0; i < sizeof(e.max_lossy_data_size); i++, curser++) {
e.max_lossy_data_size |= uint16_t(data[curser]) << (i*8);
}
} else {
e.max_lossy_data_size = 500-4; // default
}
// - 1 byte (feature_flags)
if ((data_size - curser) >= sizeof(e.feature_flags)) {
e.feature_flags = data[curser++];
} else {
e.feature_flags = 0x00; // default
}
return dispatch(
NGCEXT_Event::FT1_INIT_ACK,
e
);
}
bool NGCEXTEventProvider::parse_ft1_data(
uint32_t group_number, uint32_t peer_number,
const uint8_t* data, size_t data_size,
@ -176,6 +255,7 @@ bool NGCEXTEventProvider::parse_ft1_data_ack(
_DATA_HAVE(sizeof(e.transfer_id), std::cerr << "NGCEXT: packet too small, missing transfer_id\n"; return false)
e.transfer_id = data[curser++];
e.sequence_ids.reserve(std::max<int64_t>(data_size-curser, 1)/sizeof(uint16_t));
while (curser < data_size) {
_DATA_HAVE(sizeof(uint16_t), std::cerr << "NGCEXT: packet too small, missing seq_id\n"; return false)
uint16_t seq_id = data[curser++];
@ -227,41 +307,6 @@ bool NGCEXTEventProvider::parse_ft1_message(
);
}
bool NGCEXTEventProvider::parse_ft1_init_ack_v2(
uint32_t group_number, uint32_t peer_number,
const uint8_t* data, size_t data_size,
bool _private
) {
if (!_private) {
std::cerr << "NGCEXT: ft1_init_ack_v2 cant be public\n";
return false;
}
Events::NGCEXT_ft1_init_ack e;
e.group_number = group_number;
e.peer_number = peer_number;
size_t curser = 0;
// - 1 byte (temporary_file_tf_id)
_DATA_HAVE(sizeof(e.transfer_id), std::cerr << "NGCEXT: packet too small, missing transfer_id\n"; return false)
e.transfer_id = data[curser++];
// - 2 byte (max_lossy_data_size)
if ((data_size - curser) >= sizeof(e.max_lossy_data_size)) {
e.max_lossy_data_size = 0;
for (size_t i = 0; i < sizeof(e.max_lossy_data_size); i++, curser++) {
e.max_lossy_data_size |= uint16_t(data[curser]) << (i*8);
}
} else {
e.max_lossy_data_size = 500-4; // default
}
return dispatch(
NGCEXT_Event::FT1_INIT_ACK,
e
);
}
bool NGCEXTEventProvider::parse_ft1_have(
uint32_t group_number, uint32_t peer_number,
const uint8_t* data, size_t data_size,
@ -399,6 +444,52 @@ bool NGCEXTEventProvider::parse_ft1_have_all(
);
}
bool NGCEXTEventProvider::parse_ft1_init2(
uint32_t group_number, uint32_t peer_number,
const uint8_t* data, size_t data_size,
bool _private
) {
if (!_private) {
std::cerr << "NGCEXT: ft1_init2 cant be public\n";
return false;
}
Events::NGCEXT_ft1_init2 e;
e.group_number = group_number;
e.peer_number = peer_number;
size_t curser = 0;
// - 4 byte (file_kind)
e.file_kind = 0u;
_DATA_HAVE(sizeof(e.file_kind), std::cerr << "NGCEXT: packet too small, missing file_kind\n"; return false)
for (size_t i = 0; i < sizeof(e.file_kind); i++, curser++) {
e.file_kind |= uint32_t(data[curser]) << (i*8);
}
// - 8 bytes (data size)
e.file_size = 0u;
_DATA_HAVE(sizeof(e.file_size), std::cerr << "NGCEXT: packet too small, missing file_size\n"; return false)
for (size_t i = 0; i < sizeof(e.file_size); i++, curser++) {
e.file_size |= uint64_t(data[curser]) << (i*8);
}
// - 1 byte (temporary_file_tf_id)
_DATA_HAVE(sizeof(e.transfer_id), std::cerr << "NGCEXT: packet too small, missing transfer_id\n"; return false)
e.transfer_id = data[curser++];
// - 1 byte feature flags
_DATA_HAVE(sizeof(e.feature_flags), std::cerr << "NGCEXT: packet too small, missing feature_flags\n"; return false)
e.feature_flags = data[curser++];
// - X bytes (file_kind dependent id, differnt sizes)
e.file_id = {data+curser, data+curser+(data_size-curser)};
return dispatch(
NGCEXT_Event::FT1_INIT2,
e
);
}
bool NGCEXTEventProvider::parse_pc1_announce(
uint32_t group_number, uint32_t peer_number,
const uint8_t* data, size_t data_size,
@ -443,7 +534,8 @@ bool NGCEXTEventProvider::handlePacket(
return parse_ft1_init(group_number, peer_number, data+1, data_size-1, _private);
case NGCEXT_Event::FT1_INIT_ACK:
//return parse_ft1_init_ack(group_number, peer_number, data+1, data_size-1, _private);
return parse_ft1_init_ack_v2(group_number, peer_number, data+1, data_size-1, _private);
//return parse_ft1_init_ack_v2(group_number, peer_number, data+1, data_size-1, _private);
return parse_ft1_init_ack_v3(group_number, peer_number, data+1, data_size-1, _private);
case NGCEXT_Event::FT1_DATA:
return parse_ft1_data(group_number, peer_number, data+1, data_size-1, _private);
case NGCEXT_Event::FT1_DATA_ACK:
@ -456,6 +548,8 @@ bool NGCEXTEventProvider::handlePacket(
return parse_ft1_bitset(group_number, peer_number, data+1, data_size-1, _private);
case NGCEXT_Event::FT1_HAVE_ALL:
return parse_ft1_have_all(group_number, peer_number, data+1, data_size-1, _private);
case NGCEXT_Event::FT1_INIT2:
return parse_ft1_init2(group_number, peer_number, data+1, data_size-1, _private);
case NGCEXT_Event::PC1_ANNOUNCE:
return parse_pc1_announce(group_number, peer_number, data+1, data_size-1, _private);
default:
@ -699,6 +793,39 @@ bool NGCEXTEventProvider::send_ft1_have_all(
return _t.toxGroupSendCustomPrivatePacket(group_number, peer_number, true, pkg) == TOX_ERR_GROUP_SEND_CUSTOM_PRIVATE_PACKET_OK;
}
bool NGCEXTEventProvider::send_ft1_init2(
uint32_t group_number, uint32_t peer_number,
uint32_t file_kind,
uint64_t file_size,
uint8_t transfer_id,
uint8_t feature_flags,
const uint8_t* file_id, size_t file_id_size
) {
// - 1 byte packet id
// - 4 byte (file_kind)
// - 8 bytes (data size)
// - 1 byte (temporary_file_tf_id, for this peer only, technically just a prefix to distinguish between simultainious fts)
// - 1 byte (feature_flags)
// - X bytes (file_kind dependent id, differnt sizes)
std::vector<uint8_t> pkg;
pkg.push_back(static_cast<uint8_t>(NGCEXT_Event::FT1_INIT2));
for (size_t i = 0; i < sizeof(file_kind); i++) {
pkg.push_back((file_kind>>(i*8)) & 0xff);
}
for (size_t i = 0; i < sizeof(file_size); i++) {
pkg.push_back((file_size>>(i*8)) & 0xff);
}
pkg.push_back(transfer_id);
pkg.push_back(feature_flags);
for (size_t i = 0; i < file_id_size; i++) {
pkg.push_back(file_id[i]);
}
// lossless
return _t.toxGroupSendCustomPrivatePacket(group_number, peer_number, true, pkg) == TOX_ERR_GROUP_SEND_CUSTOM_PRIVATE_PACKET_OK;
}
static std::vector<uint8_t> build_pc1_announce(const uint8_t* id_data, size_t id_size) {
// - 1 byte packet id
// - X bytes (id, differnt sizes)

View File

@ -8,7 +8,6 @@
#include <solanaceae/toxcore/tox_key.hpp>
#include <array>
#include <vector>
namespace Events {
@ -31,6 +30,7 @@ namespace Events {
uint32_t peer_number;
// respond to a request with 0 or more message ids, sorted by newest first
// - peer_key bytes (the msg_ids are from)
ToxKey peer_key;
@ -48,6 +48,7 @@ namespace Events {
uint32_t peer_number;
// request the other side to initiate a FT
// - 4 byte (file_kind)
uint32_t file_kind;
@ -55,11 +56,13 @@ namespace Events {
std::vector<uint8_t> file_id;
};
// DEPRECATED: use FT1_INIT2 instead
struct NGCEXT_ft1_init {
uint32_t group_number;
uint32_t peer_number;
// tell the other side you want to start a FT
// - 4 byte (file_kind)
uint32_t file_kind;
@ -82,6 +85,11 @@ namespace Events {
// - 2 byte (self_max_lossy_data_size)
uint16_t max_lossy_data_size;
// - 1 byte feature flags
// - 0x01 advertised zstd compression
// - 0x02
uint8_t feature_flags;
};
struct NGCEXT_ft1_data {
@ -89,6 +97,7 @@ namespace Events {
uint32_t peer_number;
// data fragment
// - 1 byte (temporary_file_tf_id)
uint8_t transfer_id;
@ -173,6 +182,30 @@ namespace Events {
std::vector<uint8_t> file_id;
};
struct NGCEXT_ft1_init2 {
uint32_t group_number;
uint32_t peer_number;
// tell the other side you want to start a FT
// - 4 byte (file_kind)
uint32_t file_kind;
// - 8 bytes (data size)
uint64_t file_size;
// - 1 byte (temporary_file_tf_id, for this peer only, technically just a prefix to distinguish between simultainious fts)
uint8_t transfer_id;
// - 1 byte feature flags
// - 0x01 advertise zstd compression
// - 0x02
uint8_t feature_flags;
// - X bytes (file_kind dependent id, differnt sizes)
std::vector<uint8_t> file_id;
};
struct NGCEXT_pc1_announce {
uint32_t group_number;
uint32_t peer_number;
@ -207,6 +240,7 @@ enum class NGCEXT_Event : uint8_t {
// tell the other side you want to start a FT
// TODO: might use id layer instead. with it, it would look similar to friends_ft
// DEPRECATED: use FT1_INIT2 instead
// - 4 byte (file_kind)
// - 8 bytes (data size, can be 0 if unknown, BUT files have to be atleast 1 byte)
// - 1 byte (temporary_file_tf_id, for this peer only, technically just a prefix to distinguish between simultainious fts)
@ -216,7 +250,8 @@ enum class NGCEXT_Event : uint8_t {
// acknowlage init (like an accept)
// like tox ft control continue
// - 1 byte (transfer_id)
// - 2 byte (self_max_lossy_data_size) (optional since v2)
// - 2 byte (self_max_lossy_data_size) (optimal since v2)
// - 1 byte feature flags (optimal since v3, requires prev)
FT1_INIT_ACK,
// TODO: init deny, speed up non acceptance
@ -274,8 +309,18 @@ enum class NGCEXT_Event : uint8_t {
// - X bytes (file_kind dependent id, differnt sizes)
FT1_HAVE_ALL,
// tell the other side you want to start a FT
// update: added feature flags (compression)
// - 4 byte (file_kind)
// - 8 bytes (data size, can be 0 if unknown, BUT files have to be atleast 1 byte)
// - 1 byte (temporary_file_tf_id, for this peer only, technically just a prefix to distinguish between simultainious fts)
// - 1 byte feature flags
// - X bytes (file_kind dependent id, differnt sizes)
FT1_INIT2,
// TODO: FT1_IDONTHAVE, tell a peer you no longer have said chunk
// TODO: FT1_REJECT, tell a peer you wont fulfil the request
// TODO: FT1_CANCEL, tell a peer you stop the transfer
// tell another peer that you are participating in X
// you can reply with PC1_ANNOUNCE, to let the other side know, you too are participating in X
@ -284,6 +329,9 @@ enum class NGCEXT_Event : uint8_t {
// - x bytes (id, different sizes)
PC1_ANNOUNCE = 0x80 | 32u,
// uses sub splitting
P2PRNG = 0x80 | 38u,
MAX
};
@ -300,6 +348,7 @@ struct NGCEXTEventI {
virtual bool onEvent(const Events::NGCEXT_ft1_have&) { return false; }
virtual bool onEvent(const Events::NGCEXT_ft1_bitset&) { return false; }
virtual bool onEvent(const Events::NGCEXT_ft1_have_all&) { return false; }
virtual bool onEvent(const Events::NGCEXT_ft1_init2&) { return false; }
virtual bool onEvent(const Events::NGCEXT_pc1_announce&) { return false; }
};
@ -308,6 +357,7 @@ using NGCEXTEventProviderI = EventProviderI<NGCEXTEventI>;
class NGCEXTEventProvider : public ToxEventI, public NGCEXTEventProviderI {
ToxI& _t;
ToxEventProviderI& _tep;
ToxEventProviderI::SubscriptionReference _tep_sr;
public:
NGCEXTEventProvider(ToxI& t, ToxEventProviderI& tep);
@ -343,6 +393,18 @@ class NGCEXTEventProvider : public ToxEventI, public NGCEXTEventProviderI {
bool _private
);
bool parse_ft1_init_ack_v2(
uint32_t group_number, uint32_t peer_number,
const uint8_t* data, size_t data_size,
bool _private
);
bool parse_ft1_init_ack_v3(
uint32_t group_number, uint32_t peer_number,
const uint8_t* data, size_t data_size,
bool _private
);
bool parse_ft1_data(
uint32_t group_number, uint32_t peer_number,
const uint8_t* data, size_t data_size,
@ -361,12 +423,6 @@ class NGCEXTEventProvider : public ToxEventI, public NGCEXTEventProviderI {
bool _private
);
bool parse_ft1_init_ack_v2(
uint32_t group_number, uint32_t peer_number,
const uint8_t* data, size_t data_size,
bool _private
);
bool parse_ft1_have(
uint32_t group_number, uint32_t peer_number,
const uint8_t* data, size_t data_size,
@ -385,6 +441,12 @@ class NGCEXTEventProvider : public ToxEventI, public NGCEXTEventProviderI {
bool _private
);
bool parse_ft1_init2(
uint32_t group_number, uint32_t peer_number,
const uint8_t* data, size_t data_size,
bool _private
);
bool parse_pc1_announce(
uint32_t group_number, uint32_t peer_number,
const uint8_t* data, size_t data_size,
@ -461,6 +523,15 @@ class NGCEXTEventProvider : public ToxEventI, public NGCEXTEventProviderI {
const uint8_t* file_id, size_t file_id_size
);
bool send_ft1_init2(
uint32_t group_number, uint32_t peer_number,
uint32_t file_kind,
uint64_t file_size,
uint8_t transfer_id,
uint8_t feature_flags,
const uint8_t* file_id, size_t file_id_size
);
bool send_pc1_announce(
uint32_t group_number, uint32_t peer_number,
const uint8_t* id_data, size_t id_size

View File

@ -5,15 +5,6 @@
#include <cstddef>
// TODO: refactor, more state tracking in ccai and seperate into flow and congestion algos
inline bool isSkipSeqID(const std::pair<uint8_t, uint16_t>& a, const std::pair<uint8_t, uint16_t>& b) {
// this is not perfect, would need more ft id based history
if (a.first != b.first) {
return false; // we dont know
} else {
return a.second+1 != b.second;
}
}
struct CCAI {
public: // config
using SeqIDType = std::pair<uint8_t, uint16_t>; // tf_id, seq_id

View File

@ -7,7 +7,9 @@ void CUBIC::updateReductionTimer(float time_delta) {
const auto now {getTimeNow()};
// only keep updating while the cca interaction is not too long ago
if (now - _time_point_last_update <= getCurrentDelay()*4.f) {
// or simply when there are packets in flight
// (you need space to resend timedout, which still use up pipe space)
if (!_in_flight.empty() || now - _time_point_last_update <= getCurrentDelay()*4.f) {
_time_since_reduction += time_delta;
}
}
@ -86,12 +88,14 @@ int64_t CUBIC::canSend(float time_delta) {
updateReductionTimer(time_delta);
if (fspace_pkgs == 0u) {
std::cerr << "CUBIC: flow said 0\n";
return 0u;
}
const auto window = getCWnD();
int64_t cspace_bytes = window - _in_flight_bytes;
if (cspace_bytes < MAXIMUM_SEGMENT_DATA_SIZE) {
//std::cerr << "CUBIC: cspace < seg size\n";
return 0u;
}

View File

@ -7,7 +7,9 @@
float FlowOnly::getCurrentDelay(void) const {
// below 1ms is useless
return std::clamp(_rtt_ema, 0.001f, RTT_MAX);
//return std::clamp(_rtt_ema, 0.001f, RTT_MAX);
// the current iterate rate min is 5ms
return std::clamp(_rtt_ema, 0.005f, RTT_MAX);
}
void FlowOnly::addRTT(float new_delay) {
@ -65,7 +67,8 @@ float FlowOnly::getWindow(void) const {
int64_t FlowOnly::canSend(float time_delta) {
if (_in_flight.empty()) {
assert(_in_flight_bytes == 0);
return MAXIMUM_SEGMENT_DATA_SIZE;
// TODO: should we really exit early here??
return 2*MAXIMUM_SEGMENT_DATA_SIZE;
}
updateWindow();
@ -85,6 +88,7 @@ int64_t FlowOnly::canSend(float time_delta) {
std::vector<FlowOnly::SeqIDType> FlowOnly::getTimeouts(void) const {
std::vector<SeqIDType> list;
list.reserve(_in_flight.size()/3); // we dont know, so we just guess
// after 3 rtt delay, we trigger timeout
const auto now_adjusted = getTimeNow() - getCurrentDelay()*3.f;

View File

@ -12,7 +12,7 @@ struct FlowOnly : public CCAI {
public: // config
static constexpr float RTT_EMA_ALPHA = 0.001f; // might need change over time
static constexpr float RTT_UP_MAX = 3.0f; // how much larger a delay can be to be taken into account
static constexpr float RTT_MAX = 2.f; // 2 sec is probably too much
static constexpr float RTT_MAX = 2.f; // maybe larger for tunneled connections
protected:
// initialize to low value, will get corrected very fast

View File

@ -14,6 +14,15 @@
// https://youtu.be/0HRwNSA-JYM
static bool isSkipSeqID(const std::pair<uint8_t, uint16_t>& a, const std::pair<uint8_t, uint16_t>& b) {
// this is not perfect, would need more ft id based history
if (a.first != b.first) {
return false; // we dont know
} else {
return a.second+1 != b.second;
}
}
inline constexpr bool PLOTTING = false;
LEDBAT::LEDBAT(size_t maximum_segment_data_size) : CCAI(maximum_segment_data_size) {

View File

@ -40,22 +40,26 @@ void NGCFT1::updateSendTransfer(float time_delta, uint32_t group_number, uint32_
} else {
// timed out, resend
std::cerr << "NGCFT1 warning: ft init timed out, resending\n";
//sendPKG_FT1_INIT(group_number, peer_number, tf.file_kind, tf.file_size, idx, tf.file_id.data(), tf.file_id.size());
_neep.send_ft1_init(group_number, peer_number, tf.file_kind, tf.file_size, idx, tf.file_id.data(), tf.file_id.size());
tf.inits_sent++;
tf.time_since_activity = 0.f;
}
}
//break;
return;
break;
case State::FINISHING: // we still have unacked packets
tf.ssb.for_each(time_delta, [&](uint16_t id, const std::vector<uint8_t>& data, float& time_since_activity) {
if (can_packet_size >= data.size() && timeouts_set.count({idx, id})) {
_neep.send_ft1_data(group_number, peer_number, idx, id, data.data(), data.size());
peer.cca->onLoss({idx, id}, false);
time_since_activity = 0.f;
timeouts_set.erase({idx, id});
can_packet_size -= data.size();
if (timeouts_set.count({idx, id})) {
if (can_packet_size >= data.size()) {
_neep.send_ft1_data(group_number, peer_number, idx, id, data.data(), data.size());
peer.cca->onLoss({idx, id}, false);
time_since_activity = 0.f;
timeouts_set.erase({idx, id});
can_packet_size -= data.size();
} else {
#if 0 // too spammy
std::cerr << "NGCFT1 warning: no space to resend timedout\n";
#endif
}
}
});
if (tf.time_since_activity >= sending_give_up_after) {
@ -122,7 +126,7 @@ void NGCFT1::updateSendTransfer(float time_delta, uint32_t group_number, uint32_
size_t chunk_size = std::min<size_t>({
peer.cca->MAXIMUM_SEGMENT_DATA_SIZE,
static_cast<size_t>(can_packet_size),
tf.file_size - tf.file_size_current
static_cast<size_t>(tf.file_size - tf.file_size_current),
});
if (chunk_size == 0) {
tf.state = State::FINISHING;
@ -139,7 +143,7 @@ void NGCFT1::updateSendTransfer(float time_delta, uint32_t group_number, uint32_
group_number, peer_number,
static_cast<uint8_t>(idx),
tf.file_size_current,
new_data.data(), new_data.size(),
new_data.data(), static_cast<uint32_t>(new_data.size()),
}
);
@ -167,7 +171,7 @@ void NGCFT1::updateSendTransfer(float time_delta, uint32_t group_number, uint32_
}
}
void NGCFT1::iteratePeer(float time_delta, uint32_t group_number, uint32_t peer_number, Group::Peer& peer) {
bool NGCFT1::iteratePeer(float time_delta, uint32_t group_number, uint32_t peer_number, Group::Peer& peer) {
if (peer.cca) {
auto timeouts = peer.cca->getTimeouts();
std::set<CCAI::SeqIDType> timeouts_set{timeouts.cbegin(), timeouts.cend()};
@ -198,7 +202,31 @@ void NGCFT1::iteratePeer(float time_delta, uint32_t group_number, uint32_t peer_
}
}
// TODO: receiving tranfers?
bool recv_activity {false};
for (size_t idx = 0; idx < peer.recv_transfers.size(); idx++) {
if (!peer.recv_transfers.at(idx).has_value()) {
continue;
}
auto& transfer = peer.recv_transfers.at(idx).value();
// proper switch case?
if (transfer.state == Group::Peer::RecvTransfer::State::FINISHING) {
transfer.timer -= time_delta;
if (transfer.timer <= 0.f) {
peer.recv_transfers.at(idx).reset();
}
recv_activity = true; // count as activity, not sure we need this
} else {
transfer.timer += time_delta;
if (transfer.timer < 0.5f) {
// back off when no activity
recv_activity = true;
}
}
}
return peer.active_send_transfers > 0 || recv_activity;
}
const CCAI* NGCFT1::getPeerCCA(
@ -228,51 +256,77 @@ NGCFT1::NGCFT1(
ToxI& t,
ToxEventProviderI& tep,
NGCEXTEventProvider& neep
) : _t(t), _tep(tep), _neep(neep)
) : _t(t), _tep(tep), _tep_sr(_tep.newSubRef(this)), _neep(neep), _neep_sr(_neep.newSubRef(this))
{
_neep.subscribe(this, NGCEXT_Event::FT1_REQUEST);
_neep.subscribe(this, NGCEXT_Event::FT1_INIT);
_neep.subscribe(this, NGCEXT_Event::FT1_INIT_ACK);
_neep.subscribe(this, NGCEXT_Event::FT1_DATA);
_neep.subscribe(this, NGCEXT_Event::FT1_DATA_ACK);
_neep.subscribe(this, NGCEXT_Event::FT1_MESSAGE);
_neep_sr
.subscribe(NGCEXT_Event::FT1_REQUEST)
.subscribe(NGCEXT_Event::FT1_INIT)
.subscribe(NGCEXT_Event::FT1_INIT_ACK)
.subscribe(NGCEXT_Event::FT1_DATA)
.subscribe(NGCEXT_Event::FT1_DATA_ACK)
.subscribe(NGCEXT_Event::FT1_MESSAGE)
;
_tep.subscribe(this, Tox_Event_Type::TOX_EVENT_GROUP_PEER_EXIT);
_tep_sr.subscribe(Tox_Event_Type::TOX_EVENT_GROUP_PEER_EXIT);
}
float NGCFT1::iterate(float time_delta) {
_time_since_activity += time_delta;
bool transfer_in_progress {false};
bool transfer_activity {false};
for (auto& [group_number, group] : groups) {
for (auto& [peer_number, peer] : group.peers) {
iteratePeer(time_delta, group_number, peer_number, peer);
transfer_activity = transfer_activity || iteratePeer(time_delta, group_number, peer_number, peer);
#if 0
// find any active transfer
if (!transfer_in_progress) {
if (!transfer_activity) {
for (const auto& t : peer.send_transfers) {
if (t.has_value()) {
transfer_in_progress = true;
transfer_activity = true;
#if 0
std::cout
<< "--- active send transfer "
<< group_number << ":" << peer_number
<< "(" << std::get<0>(_t.toxGroupPeerGetName(group_number, peer_number)).value_or("<unk>") << ")"
<< " fk:" << t.value().file_kind
<< " state:" << (int)t.value().state
<< " tsa:" << t.value().time_since_activity
<< "\n"
;
#endif
break;
}
}
}
if (!transfer_in_progress) {
if (!transfer_activity) {
for (const auto& t : peer.recv_transfers) {
if (t.has_value()) {
transfer_in_progress = true;
transfer_activity = true;
#if 0
std::cout
<< "--- active recv transfer "
<< group_number << ":" << peer_number
<< "(" << std::get<0>(_t.toxGroupPeerGetName(group_number, peer_number)).value_or("<unk>") << ")"
<< " fk:" << t.value().file_kind
<< " state:" << (int)t.value().state
<< " ft:" << t.value().finishing_timer
<< "\n"
;
#endif
break;
}
}
}
#endif
}
}
if (transfer_in_progress) {
if (transfer_activity) {
_time_since_activity = 0.f;
// ~15ms for up to 1mb/s
// ~5ms for up to 4mb/s
return 0.005f; // 5ms
} else if (_time_since_activity < 0.5f) {
} else if (_time_since_activity < 1.0f) {
// bc of temporality
return 0.025f;
} else {
@ -280,21 +334,21 @@ float NGCFT1::iterate(float time_delta) {
}
}
void NGCFT1::NGC_FT1_send_request_private(
bool NGCFT1::NGC_FT1_send_request_private(
uint32_t group_number, uint32_t peer_number,
uint32_t file_kind,
const uint8_t* file_id, size_t file_id_size
const uint8_t* file_id, uint32_t file_id_size
) {
// TODO: error check
_neep.send_ft1_request(group_number, peer_number, file_kind, file_id, file_id_size);
return _neep.send_ft1_request(group_number, peer_number, file_kind, file_id, file_id_size);
}
bool NGCFT1::NGC_FT1_send_init_private(
uint32_t group_number, uint32_t peer_number,
uint32_t file_kind,
const uint8_t* file_id, size_t file_id_size,
size_t file_size,
uint8_t* transfer_id
const uint8_t* file_id, uint32_t file_id_size,
uint64_t file_size,
uint8_t* transfer_id,
bool can_compress
) {
if (std::get<0>(_t.toxGroupPeerGetConnectionStatus(group_number, peer_number)).value_or(TOX_CONNECTION_NONE) == TOX_CONNECTION_NONE) {
std::cerr << "NGCFT1 error: cant init ft, peer offline\n";
@ -324,6 +378,8 @@ bool NGCFT1::NGC_FT1_send_init_private(
std::cerr << "NGCFT1 error: cant init ft, no free transfer slot\n";
return false;
}
idx = i;
}
// TODO: check return value
@ -351,7 +407,7 @@ bool NGCFT1::NGC_FT1_send_message_public(
uint32_t group_number,
uint32_t& message_id,
uint32_t file_kind,
const uint8_t* file_id, size_t file_id_size
const uint8_t* file_id, uint32_t file_id_size
) {
// create msg_id
message_id = randombytes_random();
@ -418,7 +474,7 @@ bool NGCFT1::onEvent(const Events::NGCEXT_ft1_request& e) {
Events::NGCFT1_recv_request{
e.group_number, e.peer_number,
static_cast<NGCFT1_file_kind>(e.file_kind),
e.file_id.data(), e.file_id.size()
e.file_id.data(), static_cast<uint32_t>(e.file_id.size())
}
);
}
@ -427,49 +483,21 @@ bool NGCFT1::onEvent(const Events::NGCEXT_ft1_init& e) {
//#if !NDEBUG
std::cout << "NGCFT1: got FT1_INIT fk:" << e.file_kind << " fs:" << e.file_size << " tid:" << int(e.transfer_id) << " [" << bin2hex(e.file_id) << "]\n";
//#endif
bool accept = false;
dispatch(
NGCFT1_Event::recv_init,
Events::NGCFT1_recv_init{
e.group_number, e.peer_number,
static_cast<NGCFT1_file_kind>(e.file_kind),
e.file_id.data(), e.file_id.size(),
e.transfer_id,
e.file_size,
accept
}
);
if (!accept) {
std::cout << "NGCFT1: rejected init\n";
return true; // return true?
}
_neep.send_ft1_init_ack(e.group_number, e.peer_number, e.transfer_id);
std::cout << "NGCFT1: accepted init\n";
auto& peer = groups[e.group_number].peers[e.peer_number];
if (peer.recv_transfers[e.transfer_id].has_value()) {
std::cerr << "NGCFT1 warning: overwriting existing recv_transfer " << int(e.transfer_id) << ", other peer started new transfer on preexising\n";
}
peer.recv_transfers[e.transfer_id] = Group::Peer::RecvTransfer{
// HACK: simply forward to init2 hanlder
return onEvent(Events::NGCEXT_ft1_init2{
e.group_number,
e.peer_number,
e.file_kind,
e.file_id,
Group::Peer::RecvTransfer::State::INITED,
e.file_size,
0u,
{} // rsb
};
return true;
e.transfer_id,
0x00, // non set
e.file_id, // sadly a copy, wont matter in the future
});
}
bool NGCFT1::onEvent(const Events::NGCEXT_ft1_init_ack& e) {
//#if !NDEBUG
std::cout << "NGCFT1: got FT1_INIT_ACK mds:" << e.max_lossy_data_size << "\n";
std::cout << "NGCFT1: got FT1_INIT_ACK " << e.group_number << ":" << e.peer_number << " mds:" << e.max_lossy_data_size << "\n";
//#endif
// we now should start sending data
@ -493,6 +521,11 @@ bool NGCFT1::onEvent(const Events::NGCEXT_ft1_init_ack& e) {
return true;
}
if (e.max_lossy_data_size < 16) {
std::cerr << "NGCFT1 error: init_ack max_lossy_data_size is less than 16 bytes\n";
return true;
}
// negotiated packet_data_size
const auto negotiated_packet_data_size = std::min<uint32_t>(e.max_lossy_data_size, _t.toxGroupMaxCustomLossyPacketLength()-4);
// TODO: reset cca with new pkg size
@ -509,6 +542,8 @@ bool NGCFT1::onEvent(const Events::NGCEXT_ft1_init_ack& e) {
//peer.cca = std::make_unique<LEDBAT>(peer.max_packet_data_size);
//peer.cca = std::make_unique<FlowOnly>(peer.max_packet_data_size);
//peer.cca->max_byterate_allowed = 1.f *1024*1024;
} else {
std::cerr << "NGCFT1: reusing cca. rtt:" << peer.cca->getCurrentDelay() << " w:" << peer.cca->getWindow() << " ifc:" << peer.cca->inFlightCount() << "\n";
}
// iterate will now call NGC_FT1_send_data_cb
@ -520,7 +555,7 @@ bool NGCFT1::onEvent(const Events::NGCEXT_ft1_init_ack& e) {
bool NGCFT1::onEvent(const Events::NGCEXT_ft1_data& e) {
#if !NDEBUG
//std::cout << "NGCFT1: got FT1_DATA\n";
//std::cout << "NGCFT1: got FT1_DATA " << e.sequence_id << "\n";
#endif
if (e.data.empty()) {
@ -540,6 +575,7 @@ bool NGCFT1::onEvent(const Events::NGCEXT_ft1_data& e) {
}
auto& transfer = peer.recv_transfers[e.transfer_id].value();
transfer.timer = 0.f;
// do reassembly, ignore dups
transfer.rsb.add(e.sequence_id, std::vector<uint8_t>(e.data)); // TODO: ugly explicit copy for what should just be a move
@ -555,7 +591,7 @@ bool NGCFT1::onEvent(const Events::NGCEXT_ft1_data& e) {
e.group_number, e.peer_number,
e.transfer_id,
transfer.file_size_current,
data.data(), data.size()
data.data(), static_cast<uint32_t>(data.size())
}
);
@ -573,8 +609,14 @@ bool NGCFT1::onEvent(const Events::NGCEXT_ft1_data& e) {
if (transfer.file_size_current == transfer.file_size) {
// TODO: set all data received, and clean up
//transfer.state = Group::Peer::RecvTransfer::State::RECV;
// all data received
transfer.state = Group::Peer::RecvTransfer::State::FINISHING;
// TODO: keep around for remote timeout + delay + offset, so we can be sure all acks where received
// or implement a dedicated finished that needs to be acked
//transfer.timer = 0.75f; // TODO: we are receiving, we dont know delay
transfer.timer = FlowOnly::RTT_MAX;
dispatch(
NGCFT1_Event::recv_done,
Events::NGCFT1_recv_done{
@ -582,11 +624,6 @@ bool NGCFT1::onEvent(const Events::NGCEXT_ft1_data& e) {
e.transfer_id
}
);
// delete transfer
// TODO: keep around for remote timeout + delay + offset, so we can be sure all acks where received
// or implement a dedicated finished that needs to be acked
peer.recv_transfers[e.transfer_id].reset();
}
return true;
@ -605,7 +642,8 @@ bool NGCFT1::onEvent(const Events::NGCEXT_ft1_data_ack& e) {
Group::Peer& peer = groups[e.group_number].peers[e.peer_number];
if (!peer.send_transfers[e.transfer_id].has_value()) {
// we delete directly, packets might still be in flight (in practice they are when ce)
//std::cerr << "NGCFT1 warning: data_ack for unknown transfer\n";
// update: we no longer delete directly, but its kinda hacky
std::cerr << "NGCFT1 warning: data_ack for unknown transfer\n";
return true;
}
@ -621,6 +659,7 @@ bool NGCFT1::onEvent(const Events::NGCEXT_ft1_data_ack& e) {
{
std::vector<CCAI::SeqIDType> seqs;
seqs.reserve(e.sequence_ids.size());
for (const auto it : e.sequence_ids) {
// TODO: improve this o.o
seqs.push_back({e.transfer_id, it});
@ -657,11 +696,55 @@ bool NGCFT1::onEvent(const Events::NGCEXT_ft1_message& e) {
e.group_number, e.peer_number,
e.message_id,
static_cast<NGCFT1_file_kind>(e.file_kind),
e.file_id.data(), e.file_id.size()
e.file_id.data(), static_cast<uint32_t>(e.file_id.size())
}
);
}
bool NGCFT1::onEvent(const Events::NGCEXT_ft1_init2& e) {
//#if !NDEBUG
std::cout << "NGCFT1: got FT1_INIT2 fk:" << e.file_kind << " fs:" << e.file_size << " tid:" << int(e.transfer_id) << " ff:" << int(e.feature_flags) << " [" << bin2hex(e.file_id) << "]\n";
//#endif
bool accept = false;
dispatch(
NGCFT1_Event::recv_init,
Events::NGCFT1_recv_init{
e.group_number, e.peer_number,
static_cast<NGCFT1_file_kind>(e.file_kind),
e.file_id.data(), static_cast<uint32_t>(e.file_id.size()),
e.transfer_id,
e.file_size,
accept
}
);
if (!accept) {
std::cout << "NGCFT1: rejected init2\n";
return true; // return true?
}
_neep.send_ft1_init_ack(e.group_number, e.peer_number, e.transfer_id);
std::cout << "NGCFT1: accepted init2\n";
auto& peer = groups[e.group_number].peers[e.peer_number];
if (peer.recv_transfers[e.transfer_id].has_value()) {
std::cerr << "NGCFT1 warning: overwriting existing recv_transfer " << int(e.transfer_id) << ", other peer started new transfer on preexising\n";
}
peer.recv_transfers[e.transfer_id] = Group::Peer::RecvTransfer{
e.file_kind,
e.file_id,
Group::Peer::RecvTransfer::State::INITED,
e.file_size,
0u,
{} // rsb
};
return true;
}
bool NGCFT1::onToxEvent(const Tox_Event_Group_Peer_Exit* e) {
const auto group_number = tox_event_group_peer_exit_get_group_number(e);
const auto peer_number = tox_event_group_peer_exit_get_peer_id(e);

View File

@ -6,6 +6,7 @@
#include <solanaceae/toxcore/tox_interface.hpp>
#include <solanaceae/ngc_ext/ngcext.hpp>
#include "./cca.hpp"
#include "./rcv_buf.hpp"
@ -28,7 +29,7 @@ namespace Events {
NGCFT1_file_kind file_kind;
const uint8_t* file_id;
size_t file_id_size;
uint32_t file_id_size;
};
struct NGCFT1_recv_init {
@ -38,10 +39,10 @@ namespace Events {
NGCFT1_file_kind file_kind;
const uint8_t* file_id;
size_t file_id_size;
uint32_t file_id_size;
const uint8_t transfer_id;
const size_t file_size;
const uint64_t file_size;
// return true to accept, false to deny
bool& accept;
@ -53,9 +54,9 @@ namespace Events {
uint8_t transfer_id;
size_t data_offset;
uint64_t data_offset;
const uint8_t* data;
size_t data_size;
uint32_t data_size;
};
// request to fill data_size bytes into data
@ -65,9 +66,9 @@ namespace Events {
uint8_t transfer_id;
size_t data_offset;
uint64_t data_offset;
uint8_t* data;
size_t data_size;
uint32_t data_size;
};
struct NGCFT1_recv_done {
@ -95,7 +96,7 @@ namespace Events {
NGCFT1_file_kind file_kind;
const uint8_t* file_id;
size_t file_id_size;
uint32_t file_id_size;
};
} // Events
@ -131,7 +132,9 @@ using NGCFT1EventProviderI = EventProviderI<NGCFT1EventI>;
class NGCFT1 : public ToxEventI, public NGCEXTEventI, public NGCFT1EventProviderI {
ToxI& _t;
ToxEventProviderI& _tep;
ToxEventProviderI::SubscriptionReference _tep_sr;
NGCEXTEventProvider& _neep; // not the interface?
NGCEXTEventProvider::SubscriptionReference _neep_sr;
std::default_random_engine _rng{std::random_device{}()};
@ -140,7 +143,7 @@ class NGCFT1 : public ToxEventI, public NGCEXTEventI, public NGCFT1EventProvider
// TODO: config
size_t acks_per_packet {3u}; // 3
float init_retry_timeout_after {4.f};
float sending_give_up_after {15.f}; // 30sec (per active transfer)
float sending_give_up_after {10.f}; // sec (per active transfer)
struct Group {
struct Peer {
@ -153,13 +156,17 @@ class NGCFT1 : public ToxEventI, public NGCEXTEventI, public NGCFT1EventProvider
std::vector<uint8_t> file_id;
enum class State {
INITED, //init acked, but no data received yet (might be dropped)
RECV, // receiving data
INITED, // init acked, but no data received yet (might be dropped)
RECV, // receiving data
FINISHING, // got all the data, but we wait for 2*delay, since its likely there is data still arriving
} state;
// float time_since_last_activity ?
size_t file_size {0};
size_t file_size_current {0};
uint64_t file_size {0};
uint64_t file_size_current {0};
// if state INITED or RECV, time since last activity
// if state FINISHING and it reaches 0, delete
float timer {0.f};
// sequence id based reassembly
RecvSequenceBuffer rsb;
@ -184,8 +191,8 @@ class NGCFT1 : public ToxEventI, public NGCEXTEventI, public NGCFT1EventProvider
size_t inits_sent {1}; // is sent when creating
float time_since_activity {0.f};
size_t file_size {0};
size_t file_size_current {0};
uint64_t file_size {0};
uint64_t file_size_current {0};
// sequence array
// list of sent but not acked seq_ids
@ -203,7 +210,7 @@ class NGCFT1 : public ToxEventI, public NGCEXTEventI, public NGCFT1EventProvider
protected:
void updateSendTransfer(float time_delta, uint32_t group_number, uint32_t peer_number, Group::Peer& peer, size_t idx, std::set<CCAI::SeqIDType>& timeouts_set, int64_t& can_packet_size);
void iteratePeer(float time_delta, uint32_t group_number, uint32_t peer_number, Group::Peer& peer);
bool iteratePeer(float time_delta, uint32_t group_number, uint32_t peer_number, Group::Peer& peer);
const CCAI* getPeerCCA(uint32_t group_number, uint32_t peer_number) const;
@ -217,19 +224,20 @@ class NGCFT1 : public ToxEventI, public NGCEXTEventI, public NGCFT1EventProvider
float iterate(float delta);
public: // ft1 api
void NGC_FT1_send_request_private(
bool NGC_FT1_send_request_private(
uint32_t group_number, uint32_t peer_number,
uint32_t file_kind,
const uint8_t* file_id, size_t file_id_size
const uint8_t* file_id, uint32_t file_id_size
);
// public does not make sense here
bool NGC_FT1_send_init_private(
uint32_t group_number, uint32_t peer_number,
uint32_t file_kind,
const uint8_t* file_id, size_t file_id_size,
size_t file_size,
uint8_t* transfer_id
const uint8_t* file_id, uint32_t file_id_size,
uint64_t file_size,
uint8_t* transfer_id,
bool can_compress = false // set this if you know the data is compressable (eg text)
);
// sends the message and fills in message_id
@ -237,7 +245,7 @@ class NGCFT1 : public ToxEventI, public NGCEXTEventI, public NGCFT1EventProvider
uint32_t group_number,
uint32_t& message_id,
uint32_t file_kind,
const uint8_t* file_id, size_t file_id_size
const uint8_t* file_id, uint32_t file_id_size
);
public: // cca stuff
@ -264,6 +272,7 @@ class NGCFT1 : public ToxEventI, public NGCEXTEventI, public NGCFT1EventProvider
bool onEvent(const Events::NGCEXT_ft1_data&) override;
bool onEvent(const Events::NGCEXT_ft1_data_ack&) override;
bool onEvent(const Events::NGCEXT_ft1_message&) override;
bool onEvent(const Events::NGCEXT_ft1_init2&) override;
protected:
bool onToxEvent(const Tox_Event_Group_Peer_Exit* e) override;

View File

@ -16,7 +16,7 @@ enum class NGCFT1_file_kind : uint32_t {
// id: TOX_FILE_ID_LENGTH (32) bytes
// this is basically and id and probably not a hash, like the tox friend api
// this id can be unique between 2 peers
ID = 8u,
ID = 8u, // TODO: this is actually DATA and 0
// id: hash of the info, like a torrent infohash (using the same hash as the data)
// TODO: determain internal format
@ -72,5 +72,10 @@ enum class NGCFT1_file_kind : uint32_t {
// id: sha256
// always of size 16KiB, except if last piece in file
TORRENT_V2_PIECE,
// https://gist.github.com/Green-Sky/440cd9817a7114786850eb4c62dc57c3
// id: ts start, ts end
HS2_RANGE_TIME = 0x00000f00, // TODO: remove, did not survive
HS2_RANGE_TIME_MSGPACK = 0x00000f02,
};

View File

@ -25,7 +25,7 @@ bool RecvSequenceBuffer::canPop(void) const {
std::vector<uint8_t> RecvSequenceBuffer::pop(void) {
assert(canPop());
auto tmp_data = entries.at(next_seq_id).data;
auto tmp_data = std::move(entries.at(next_seq_id).data);
erase(next_seq_id);
next_seq_id++;
return tmp_data;

View File

@ -0,0 +1,257 @@
#include "./sha1_mapped_filesystem.hpp"
#include <solanaceae/object_store/meta_components.hpp>
#include <solanaceae/object_store/meta_components_file.hpp>
#include "../file_constructor.hpp"
#include "../ft1_sha1_info.hpp"
#include "../hash_utils.hpp"
#include "../components.hpp"
#include <solanaceae/util/utils.hpp>
#include <atomic>
#include <mutex>
#include <list>
#include <thread>
#include <iostream>
namespace Backends {
struct SHA1MappedFilesystem_InfoBuilderState {
std::atomic_bool info_builder_dirty {false};
std::mutex info_builder_queue_mutex;
using InfoBuilderEntry = std::function<void(float)>;
std::list<InfoBuilderEntry> info_builder_queue;
};
SHA1MappedFilesystem::SHA1MappedFilesystem(
ObjectStore2& os
) : _os(os), _ibs(std::make_unique<SHA1MappedFilesystem_InfoBuilderState>()) {
}
SHA1MappedFilesystem::~SHA1MappedFilesystem(void) {
}
void SHA1MappedFilesystem::tick(float current_time) {
if (_ibs->info_builder_dirty) {
std::lock_guard l{_ibs->info_builder_queue_mutex};
_ibs->info_builder_dirty = false; // set while holding lock
for (auto& it : _ibs->info_builder_queue) {
it(current_time);
}
_ibs->info_builder_queue.clear();
}
}
ObjectHandle SHA1MappedFilesystem::newObject(ByteSpan id, bool throw_construct) {
ObjectHandle o{_os.registry(), _os.registry().create()};
o.emplace<ObjComp::Ephemeral::BackendMeta>(this);
o.emplace<ObjComp::Ephemeral::BackendFile2>(this);
o.emplace<ObjComp::ID>(std::vector<uint8_t>{id});
//o.emplace<ObjComp::Ephemeral::FilePath>(object_file_path.generic_u8string());
_os.throwEventConstruct(o);
return o;
}
void SHA1MappedFilesystem::newFromFile(std::string_view file_name, std::string_view file_path, std::function<void(ObjectHandle o)>&& cb) {
std::thread(std::move([
this,
ibs = _ibs.get(),
cb = std::move(cb),
file_name_ = std::string(file_name),
file_path_ = std::string(file_path)
]() mutable {
// 0. open and fail
std::unique_ptr<File2I> file_impl = construct_file2_r_mapped(file_path_);
if (!file_impl->isGood()) {
{
std::lock_guard l{ibs->info_builder_queue_mutex};
ibs->info_builder_queue.push_back([file_path_](float){
// back on iterate thread
std::cerr << "SHA1MF error: failed opening file '" << file_path_ << "'!\n";
});
ibs->info_builder_dirty = true; // still in scope, set before mutex unlock
}
return;
}
// 1. build info by hashing all chunks
FT1InfoSHA1 sha1_info;
// build info
sha1_info.file_name = file_name_;
sha1_info.file_size = file_impl->_file_size; // TODO: remove the reliance on implementation details
sha1_info.chunk_size = chunkSizeFromFileSize(sha1_info.file_size);
{
// TOOD: remove
const uint32_t cs_low {32*1024};
const uint32_t cs_high {4*1024*1024};
assert(sha1_info.chunk_size >= cs_low);
assert(sha1_info.chunk_size <= cs_high);
}
{ // build chunks
// HACK: load file fully
// ... its only a hack if its not memory mapped, but reading in chunk_sized chunks is probably a good idea anyway
const auto file_data = file_impl->read(file_impl->_file_size, 0);
size_t i = 0;
for (; i + sha1_info.chunk_size < file_data.size; i += sha1_info.chunk_size) {
sha1_info.chunks.push_back(hash_sha1(file_data.ptr+i, sha1_info.chunk_size));
}
if (i < file_data.size) {
sha1_info.chunks.push_back(hash_sha1(file_data.ptr+i, file_data.size-i));
}
}
file_impl.reset();
std::lock_guard l{ibs->info_builder_queue_mutex};
ibs->info_builder_queue.push_back(std::move([
this,
file_name_,
file_path_,
sha1_info = std::move(sha1_info),
cb = std::move(cb)
](float current_time) mutable { //
// executed on iterate thread
// reopen, cant move, since std::function needs to be copy consturctable (meh)
std::unique_ptr<File2I> file_impl = construct_file2_r_mapped(file_path_);
//std::unique_ptr<File2I> file_impl = construct_file2_rw_mapped(file_path_, sha1_info.file_size);
if (!file_impl->isGood()) {
std::cerr << "SHA1MF error: failed opening file '" << file_path_ << "'!\n";
return;
}
// 2. hash info
std::vector<uint8_t> sha1_info_data;
std::vector<uint8_t> sha1_info_hash;
std::cout << "SHA1MF info is: \n" << sha1_info;
sha1_info_data = sha1_info.toBuffer();
std::cout << "SHA1MF sha1_info size: " << sha1_info_data.size() << "\n";
sha1_info_hash = hash_sha1(sha1_info_data.data(), sha1_info_data.size());
std::cout << "SHA1MF sha1_info_hash: " << bin2hex(sha1_info_hash) << "\n";
ObjectHandle o;
// check if content exists
// TODO: store "info_to_content" in reg/backend, for better lookup speed
// rn ok, bc this is rare
for (const auto& [it_ov, it_ih] : _os.registry().view<Components::FT1InfoSHA1Hash>().each()) {
if (it_ih.hash == sha1_info_hash) {
o = {_os.registry(), it_ov};
}
}
if (static_cast<bool>(o)) {
// TODO: check if content is incomplete and use file instead
if (!o.all_of<Components::FT1InfoSHA1>()) {
o.emplace<Components::FT1InfoSHA1>(sha1_info);
}
if (!o.all_of<Components::FT1InfoSHA1Data>()) {
o.emplace<Components::FT1InfoSHA1Data>(sha1_info_data);
}
// hash has to be set already
// Components::FT1InfoSHA1Hash
// hmmm
// TODO: we need a replacement for this
o.remove<ObjComp::Ephemeral::File::TagTransferPaused>();
// we dont want the info anymore
o.remove<Components::ReRequestInfoTimer>();
} else {
o = newObject(ByteSpan{sha1_info_hash});
o.emplace<Components::FT1InfoSHA1>(sha1_info);
o.emplace<Components::FT1InfoSHA1Data>(sha1_info_data); // keep around? or file?
o.emplace<Components::FT1InfoSHA1Hash>(sha1_info_hash);
}
{ // lookup tables and have
auto& cc = o.get_or_emplace<Components::FT1ChunkSHA1Cache>();
// skip have vec, since all
cc.have_count = sha1_info.chunks.size(); // need?
cc.chunk_hash_to_index.clear(); // for cpy pst
for (size_t i = 0; i < sha1_info.chunks.size(); i++) {
cc.chunk_hash_to_index[sha1_info.chunks[i]].push_back(i);
}
}
o.emplace_or_replace<ObjComp::F::TagLocalHaveAll>();
o.remove<ObjComp::F::LocalHaveBitset>();
{ // file info
// TODO: not overwrite fi? since same?
o.emplace_or_replace<ObjComp::F::SingleInfo>(file_name_, file_impl->_file_size);
o.emplace_or_replace<ObjComp::F::SingleInfoLocal>(file_path_);
o.emplace_or_replace<ObjComp::Ephemeral::FilePath>(file_path_); // ?
}
o.emplace_or_replace<Components::FT1File2>(std::move(file_impl), current_time);
if (!o.all_of<ObjComp::Ephemeral::File::TransferStats>()) {
o.emplace<ObjComp::Ephemeral::File::TransferStats>();
}
cb(o);
// TODO: earlier?
_os.throwEventUpdate(o);
}));
ibs->info_builder_dirty = true; // still in scope, set before mutex unlock
})).detach();
}
std::unique_ptr<File2I> SHA1MappedFilesystem::file2(Object ov, FILE2_FLAGS flags) {
if (flags & FILE2_RAW) {
std::cerr << "SHA1MF error: does not support raw modes\n";
return nullptr;
}
ObjectHandle o{_os.registry(), ov};
if (!static_cast<bool>(o)) {
return nullptr;
}
// will this do if we go and support enc?
// use ObjComp::Ephemeral::FilePath instead??
if (!o.all_of<ObjComp::F::SingleInfoLocal>()) {
return nullptr;
}
const auto& file_path = o.get<ObjComp::F::SingleInfoLocal>().file_path;
if (file_path.empty()) {
return nullptr;
}
// since they are mapped, is this efficent to have multiple?
if (flags & FILE2_WRITE) {
auto res = construct_file2_rw_mapped(file_path, -1);
if (!res || !res->isGood()) {
std::cerr << "SHA1MF error: failed constructing mapped RW file '" << file_path << "'\n";
return nullptr;
}
return res;
} else { // read
auto res = construct_file2_r_mapped(file_path);
if (!res || !res->isGood()) {
std::cerr << "SHA1MF error: failed constructing mapped R file '" << file_path << "'\n";
return nullptr;
}
return res;
}
}
} // Backends

View File

@ -0,0 +1,40 @@
#pragma once
#include <solanaceae/object_store/object_store.hpp>
#include <string>
#include <string_view>
#include <memory>
namespace Backends {
// fwd to hide the threading headers
struct SHA1MappedFilesystem_InfoBuilderState;
struct SHA1MappedFilesystem : public StorageBackendIMeta, public StorageBackendIFile2 {
ObjectStore2& _os;
std::unique_ptr<SHA1MappedFilesystem_InfoBuilderState> _ibs;
SHA1MappedFilesystem(
ObjectStore2& os
);
~SHA1MappedFilesystem(void);
// pull from info builder queue
// call from main thread (os thread?) often
void tick(float current_time);
ObjectHandle newObject(ByteSpan id, bool throw_construct = true) override;
// performs async file hashing
// create message in cb
void newFromFile(std::string_view file_name, std::string_view file_path, std::function<void(ObjectHandle o)>&& cb/*, bool merge_preexisting = false*/);
// might return pre-existing?
ObjectHandle newFromInfoHash(ByteSpan info_hash);
std::unique_ptr<File2I> file2(Object o, FILE2_FLAGS flags) override;
};
} // Backends

View File

@ -1,10 +1,11 @@
#include "./chunk_picker.hpp"
#include <solanaceae/tox_contacts/components.hpp>
#include "./components.hpp"
#include "./contact_components.hpp"
#include <solanaceae/object_store/meta_components_file.hpp>
#include "./components.hpp"
#include <algorithm>
#include <iostream>
@ -122,7 +123,7 @@ static constexpr size_t flowWindowToRequestCount(size_t flow_window) {
}
void ChunkPicker::updateParticipation(
Contact3Handle c,
ContactHandle4 c,
ObjectRegistry& objreg
) {
if (!c.all_of<Contact::Components::FT1Participation>()) {
@ -132,6 +133,7 @@ void ChunkPicker::updateParticipation(
entt::dense_set<Object> checked;
for (const Object ov : c.get<Contact::Components::FT1Participation>().participating) {
using Priority = ObjComp::Ephemeral::File::DownloadPriority::Priority;
const ObjectHandle o {objreg, ov};
if (participating_unfinished.contains(o)) {
@ -140,37 +142,52 @@ void ChunkPicker::updateParticipation(
continue;
}
if (o.all_of<Message::Components::Transfer::TagPaused>()) {
if (o.all_of<ObjComp::Ephemeral::File::TagTransferPaused>()) {
participating_unfinished.erase(o);
continue;
}
if (o.get<Components::FT1ChunkSHA1Cache>().have_all) {
if (o.all_of<ObjComp::F::TagLocalHaveAll>()) {
participating_unfinished.erase(o);
continue;
}
// TODO: optimize this to only change on dirty, or something
if (o.all_of<ObjComp::Ephemeral::File::DownloadPriority>()) {
Priority prio = o.get<ObjComp::Ephemeral::File::DownloadPriority>().p;
uint16_t pskips =
prio == Priority::HIGHEST ? 0u :
prio == Priority::HIGH ? 1u :
prio == Priority::NORMAL ? 2u :
prio == Priority::LOW ? 4u :
8u // LOWEST
;
participating_unfinished.at(o).should_skip = pskips;
}
} else {
if (!o.all_of<Components::FT1ChunkSHA1Cache, Components::FT1InfoSHA1>()) {
continue;
}
if (o.all_of<Message::Components::Transfer::TagPaused>()) {
if (o.all_of<ObjComp::Ephemeral::File::TagTransferPaused>()) {
continue;
}
if (!o.get<Components::FT1ChunkSHA1Cache>().have_all) {
using Priority = Components::DownloadPriority::Priority;
if (!o.all_of<ObjComp::F::TagLocalHaveAll>()) {
Priority prio = Priority::NORMAL;
if (o.all_of<Components::DownloadPriority>()) {
prio = o.get<Components::DownloadPriority>().p;
if (o.all_of<ObjComp::Ephemeral::File::DownloadPriority>()) {
prio = o.get<ObjComp::Ephemeral::File::DownloadPriority>().p;
}
uint16_t pskips =
prio == Priority::HIGHER ? 0u :
prio == Priority::HIGHEST ? 0u :
prio == Priority::HIGH ? 1u :
prio == Priority::NORMAL ? 2u :
prio == Priority::LOW ? 4u :
8u
8u // LOWEST
;
participating_unfinished.emplace(o, ParticipationEntry{pskips});
@ -194,9 +211,9 @@ void ChunkPicker::updateParticipation(
}
std::vector<ChunkPicker::ContentChunkR> ChunkPicker::updateChunkRequests(
Contact3Handle c,
ContactHandle4 c,
ObjectRegistry& objreg,
ReceivingTransfers& rt,
const ReceivingTransfers& rt,
const size_t open_requests
//const size_t flow_window
//NGCFT1& nft
@ -236,13 +253,12 @@ std::vector<ChunkPicker::ContentChunkR> ChunkPicker::updateChunkRequests(
// round robin content (remember last obj)
if (!objreg.valid(participating_in_last) || !participating_unfinished.count(participating_in_last)) {
participating_in_last = participating_unfinished.begin()->first;
//participating_in_last = *participating_unfinished.begin();
}
assert(objreg.valid(participating_in_last));
auto it = participating_unfinished.find(participating_in_last);
// hard limit robin rounds to array size time 20
for (size_t i = 0; req_ret.size() < num_requests && i < participating_unfinished.size()*20; i++) {
// hard limit robin rounds to array size times 20
for (size_t i = 0; req_ret.size() < num_requests && i < participating_unfinished.size()*20; i++, it++) {
if (it == participating_unfinished.end()) {
it = participating_unfinished.begin();
}
@ -251,22 +267,24 @@ std::vector<ChunkPicker::ContentChunkR> ChunkPicker::updateChunkRequests(
it->second.skips++;
continue;
}
it->second.skips = 0;
ObjectHandle o {objreg, it->first};
// intersect self have with other have
if (!o.all_of<Components::RemoteHave, Components::FT1ChunkSHA1Cache, Components::FT1InfoSHA1>()) {
// rare case where no one other has anything
if (!o.all_of<ObjComp::F::RemoteHaveBitset, Components::FT1ChunkSHA1Cache, Components::FT1InfoSHA1>()) {
// rare case where no one else has anything
continue;
}
const auto& cc = o.get<Components::FT1ChunkSHA1Cache>();
if (cc.have_all) {
if (o.all_of<ObjComp::F::TagLocalHaveAll>()) {
std::cerr << "ChunkPicker error: completed content still in participating_unfinished!\n";
continue;
}
const auto& others_have = o.get<Components::RemoteHave>().others;
//const auto& cc = o.get<Components::FT1ChunkSHA1Cache>();
const auto& others_have = o.get<ObjComp::F::RemoteHaveBitset>().others;
auto other_it = others_have.find(c);
if (other_it == others_have.end()) {
// rare case where the other is participating but has nothing
@ -275,7 +293,14 @@ std::vector<ChunkPicker::ContentChunkR> ChunkPicker::updateChunkRequests(
const auto& other_have = other_it->second;
BitSet chunk_candidates = cc.have_chunk;
const auto& info = o.get<Components::FT1InfoSHA1>();
const auto total_chunks = info.chunks.size();
const auto* lhb = o.try_get<ObjComp::F::LocalHaveBitset>();
// if we dont have anything, this might not exist yet
BitSet chunk_candidates = lhb == nullptr ? BitSet{total_chunks} : (lhb->have.size_bits() >= total_chunks ? lhb->have : BitSet{total_chunks});
if (!other_have.have_all) {
// AND is the same as ~(~A | ~B)
// that means we leave chunk_candidates as (have is inverted want)
@ -288,8 +313,6 @@ std::vector<ChunkPicker::ContentChunkR> ChunkPicker::updateChunkRequests(
} else {
chunk_candidates.invert();
}
const auto& info = o.get<Components::FT1InfoSHA1>();
const auto total_chunks = info.chunks.size();
auto& requested_chunks = o.get_or_emplace<Components::FT1ChunkSHA1Requested>().chunks;
// TODO: trim off round up to 8, since they are now always set
@ -306,10 +329,10 @@ std::vector<ChunkPicker::ContentChunkR> ChunkPicker::updateChunkRequests(
// TODO: configurable
size_t start_offset {0u};
if (o.all_of<Components::ReadHeadHint>()) {
const auto byte_offset = o.get<Components::ReadHeadHint>().offset_into_file;
if (o.all_of<ObjComp::Ephemeral::File::ReadHeadHint>()) {
const auto byte_offset = o.get<ObjComp::Ephemeral::File::ReadHeadHint>().offset_into_file;
if (byte_offset <= info.file_size) {
start_offset = o.get<Components::ReadHeadHint>().offset_into_file/info.chunk_size;
start_offset = byte_offset/info.chunk_size;
} else {
// error?
}
@ -318,7 +341,8 @@ std::vector<ChunkPicker::ContentChunkR> ChunkPicker::updateChunkRequests(
//PickerStrategyRandom ps(chunk_candidates, total_chunks, _rng);
PickerStrategyRandomSequential ps(chunk_candidates, total_chunks, _rng, start_offset);
size_t out_chunk_idx {0};
while (ps.gen(out_chunk_idx) && req_ret.size() < num_requests) {
size_t req_from_this_o {0};
while (ps.gen(out_chunk_idx) && req_ret.size() < num_requests && req_from_this_o < std::max<size_t>(total_chunks/3, 1)) {
// out_chunk_idx is a potential candidate we can request form peer
// - check against double requests
@ -346,10 +370,12 @@ std::vector<ChunkPicker::ContentChunkR> ChunkPicker::updateChunkRequests(
// TODO: move this after packet was sent successfully
// (move net in? hmm)
requested_chunks[out_chunk_idx] = Components::FT1ChunkSHA1Requested::Entry{0.f, c};
req_from_this_o++;
}
}
if (it == participating_unfinished.end() || ++it == participating_unfinished.end()) {
if (it == participating_unfinished.end()) {
participating_in_last = entt::null;
} else {
participating_in_last = it->first;

View File

@ -1,6 +1,6 @@
#pragma once
#include <solanaceae/contact/contact_model3.hpp>
#include <solanaceae/contact/fwd.hpp>
#include <solanaceae/object_store/object_store.hpp>
#include "./components.hpp"
@ -50,7 +50,7 @@ struct ChunkPicker {
private: // TODO: properly sort
// updates participating_unfinished
void updateParticipation(
Contact3Handle c,
ContactHandle4 c,
ObjectRegistry& objreg
);
public:
@ -66,9 +66,9 @@ struct ChunkPicker {
};
// returns list of chunks to request
[[nodiscard]] std::vector<ContentChunkR> updateChunkRequests(
Contact3Handle c,
ContactHandle4 c,
ObjectRegistry& objreg,
ReceivingTransfers& rt,
const ReceivingTransfers& rt,
const size_t open_requests
//const size_t flow_window
//NGCFT1& nft

View File

@ -0,0 +1,131 @@
#include "./chunk_picker_systems.hpp"
#include <solanaceae/contact/contact_store_i.hpp>
#include <solanaceae/ngc_ft1/ngcft1_file_kind.hpp>
#include "./components.hpp"
#include "./chunk_picker.hpp"
#include "./contact_components.hpp"
#include <cassert>
#include <iostream>
namespace Systems {
void chunk_picker_updates(
ContactStore4I& cs,
ObjectRegistry& os_reg,
const entt::dense_map<Contact4, size_t>& peer_open_requests,
const ReceivingTransfers& receiving_transfers,
NGCFT1& nft, // TODO: remove this somehow
const float delta
) {
std::vector<ContactHandle4> cp_to_remove;
auto& cr = cs.registry();
// first, update timers
cr.view<ChunkPickerTimer>().each([&cr, delta](const Contact4 cv, ChunkPickerTimer& cpt) {
cpt.timer -= delta;
if (cpt.timer <= 0.f) {
cr.emplace_or_replace<ChunkPickerUpdateTag>(cv);
}
});
//std::cout << "number of chunkpickers: " << _cr.storage<ChunkPicker>().size() << ", of which " << _cr.storage<ChunkPickerUpdateTag>().size() << " need updating\n";
// now check for potentially missing cp
auto cput_view = cr.view<ChunkPickerUpdateTag>();
cput_view.each([&cr, &cp_to_remove](const Contact4 cv) {
ContactHandle4 c{cr, cv};
//std::cout << "cput :)\n";
if (!c.all_of<Contact::Components::ToxGroupPeerEphemeral, Contact::Components::FT1Participation>()) {
std::cout << "cput uh nuh :(\n";
cp_to_remove.push_back(c);
return;
}
if (!c.all_of<ChunkPicker>()) {
std::cout << "creating new cp!!\n";
c.emplace<ChunkPicker>();
c.emplace_or_replace<ChunkPickerTimer>();
}
});
// now update all cp that are tagged
cr.view<ChunkPicker, ChunkPickerUpdateTag>().each([&cr, &os_reg, &peer_open_requests, &receiving_transfers, &nft, &cp_to_remove](const Contact4 cv, ChunkPicker& cp) {
ContactHandle4 c{cr, cv};
if (!c.all_of<Contact::Components::ToxGroupPeerEphemeral, Contact::Components::FT1Participation>()) {
cp_to_remove.push_back(c);
return;
}
//std::cout << "cpu :)\n";
// HACK: expensive, dont do every tick, only on events
// do verification in debug instead?
//cp.validateParticipation(c, _os.registry());
size_t peer_open_request = 0;
if (peer_open_requests.contains(c)) {
peer_open_request += peer_open_requests.at(c);
}
auto new_requests = cp.updateChunkRequests(
c,
os_reg,
receiving_transfers,
peer_open_request
);
if (new_requests.empty()) {
// updateChunkRequests updates the unfinished
// TODO: pull out and check there?
if (cp.participating_unfinished.empty()) {
std::cout << "destroying empty useless cp\n";
cp_to_remove.push_back(c);
} else {
// most likely will have something soon
// TODO: mark dirty on have instead?
c.get_or_emplace<ChunkPickerTimer>().timer = 10.f;
}
return;
}
assert(c.all_of<Contact::Components::ToxGroupPeerEphemeral>());
const auto [group_number, peer_number] = c.get<Contact::Components::ToxGroupPeerEphemeral>();
for (const auto [r_o, r_idx] : new_requests) {
auto& cc = r_o.get<Components::FT1ChunkSHA1Cache>();
const auto& info = r_o.get<Components::FT1InfoSHA1>();
// request chunk_idx
nft.NGC_FT1_send_request_private(
group_number, peer_number,
static_cast<uint32_t>(NGCFT1_file_kind::HASH_SHA1_CHUNK),
info.chunks.at(r_idx).data.data(), info.chunks.at(r_idx).size()
);
std::cout << "SHA1_NGCFT1: requesting chunk [" << info.chunks.at(r_idx) << "] from " << group_number << ":" << peer_number << "\n";
}
// force update every minute
// TODO: add small random bias to spread load
c.get_or_emplace<ChunkPickerTimer>().timer = 60.f;
});
// unmark all marked
cr.clear<ChunkPickerUpdateTag>();
assert(cr.storage<ChunkPickerUpdateTag>().empty());
for (const auto& c : cp_to_remove) {
c.remove<ChunkPicker, ChunkPickerTimer>();
}
}
} // Systems

View File

@ -0,0 +1,22 @@
#pragma once
#include <solanaceae/contact/fwd.hpp>
#include <solanaceae/object_store/object_store.hpp>
#include <solanaceae/tox_contacts/components.hpp>
#include <solanaceae/ngc_ft1/ngcft1.hpp>
#include "./receiving_transfers.hpp"
namespace Systems {
void chunk_picker_updates(
ContactStore4I& cs,
ObjectRegistry& os_reg,
const entt::dense_map<Contact4, size_t>& peer_open_requests,
const ReceivingTransfers& receiving_transfers,
NGCFT1& nft, // TODO: remove this somehow
const float delta
);
} // Systems

View File

@ -1,6 +1,10 @@
#include "./components.hpp"
std::vector<size_t> Components::FT1ChunkSHA1Cache::chunkIndices(const SHA1Digest& hash) const {
#include <solanaceae/object_store/meta_components_file.hpp>
namespace Components {
std::vector<size_t> FT1ChunkSHA1Cache::chunkIndices(const SHA1Digest& hash) const {
const auto it = chunk_hash_to_index.find(hash);
if (it != chunk_hash_to_index.cend()) {
return it->second;
@ -9,17 +13,57 @@ std::vector<size_t> Components::FT1ChunkSHA1Cache::chunkIndices(const SHA1Digest
}
}
bool Components::FT1ChunkSHA1Cache::haveChunk(const SHA1Digest& hash) const {
if (have_all) { // short cut
bool FT1ChunkSHA1Cache::haveChunk(ObjectHandle o, const SHA1Digest& hash) const {
if (o.all_of<ObjComp::F::TagLocalHaveAll>()) {
return true;
}
const auto* lhb = o.try_get<ObjComp::F::LocalHaveBitset>();
if (lhb == nullptr) {
return false; // we dont have anything yet
}
if (auto i_vec = chunkIndices(hash); !i_vec.empty()) {
// TODO: should i test all?
return have_chunk[i_vec.front()];
//return have_chunk[i_vec.front()];
return lhb->have[i_vec.front()];
}
// not part of this file
return false;
}
void ReAnnounceTimer::set(const float new_timer) {
timer = new_timer;
last_max = new_timer;
}
void ReAnnounceTimer::reset(void) {
if (last_max <= 0.01f) {
last_max = 1.f;
}
last_max *= 2.f;
timer = last_max;
}
void ReAnnounceTimer::lower(void) {
timer *= 0.1f;
//last_max *= 0.1f; // is this a good idea?
last_max *= 0.9f; // is this a good idea?
}
void TransferStatsTally::Peer::trimSent(const float time_now) {
while (recently_sent.size() > 4 && time_now - recently_sent.front().time_point > 1.f) {
recently_sent.pop_front();
}
}
void TransferStatsTally::Peer::trimReceived(const float time_now) {
while (recently_received.size() > 4 && time_now - recently_received.front().time_point > 1.f) {
recently_received.pop_front();
}
}
} // Components

View File

@ -3,6 +3,7 @@
#include <solanaceae/contact/components.hpp>
#include <solanaceae/message3/components.hpp>
#include <solanaceae/message3/registry_message_model.hpp>
#include <solanaceae/object_store/meta_components_file.hpp>
#include <solanaceae/util/bitset.hpp>
@ -35,94 +36,85 @@ namespace Components {
};
struct FT1ChunkSHA1Cache {
// TODO: extract have_chunk, have_all and have_count to generic comp
// TODO: extract have_count to generic comp
// have_chunk is the size of info.chunks.size(), or empty if have_all
// keep in mind bitset rounds up to 8s
BitSet have_chunk{0};
//BitSet have_chunk{0};
bool have_all {false};
size_t have_count {0};
//bool have_all {false};
size_t have_count {0}; // move?
entt::dense_map<SHA1Digest, std::vector<size_t>> chunk_hash_to_index;
std::vector<size_t> chunkIndices(const SHA1Digest& hash) const;
bool haveChunk(const SHA1Digest& hash) const;
bool haveChunk(ObjectHandle o, const SHA1Digest& hash) const;
};
struct FT1File2 {
// the cached file2 for faster access
// should be destroyed when no activity and recreated on demand
std::unique_ptr<File2I> file;
// set to current time on init, read, write
float last_activity_ts {0.f};
};
struct FT1ChunkSHA1Requested {
// requested chunks with a timer since last request
struct Entry {
float timer {0.f};
Contact3 c {entt::null};
Contact4 c {entt::null};
};
entt::dense_map<size_t, Entry> chunks;
};
// TODO: once announce is shipped, remove the "Suspected"
struct SuspectedParticipants {
entt::dense_set<Contact3> participants;
};
struct RemoteHave {
struct Entry {
bool have_all {false};
BitSet have;
};
entt::dense_map<Contact3, Entry> others;
entt::dense_set<Contact4> participants;
};
struct ReRequestInfoTimer {
float timer {0.f};
};
struct DownloadPriority {
// download/retreival priority in comparison to other objects
// not all backends implement this
// priority can be weak, meaning low priority dls will still get transfer activity, just less often
enum class Priority {
HIGHER,
HIGH,
NORMAL,
LOW,
LOWER,
} p = Priority::NORMAL;
struct AnnounceTargets {
entt::dense_set<Contact4> targets;
};
struct ReadHeadHint {
// points to the first byte we want
// this is just a hint, that can be set from outside
// to guide the sequential "piece picker" strategy
// ? the strategy *should* set this to the first byte we dont yet have
uint64_t offset_into_file {0u};
};
struct ReAnnounceTimer {
float timer {0.f};
float last_max {0.f};
// this is per object/content
// more aplicable than "separated", so should be supported by most backends
struct TransferStats {
// in bytes per second
float rate_up {0.f};
float rate_down {0.f};
void set(const float new_timer);
// bytes
uint64_t total_up {0u};
uint64_t total_down {0u};
// exponential back-off
void reset(void);
// on peer join to group
void lower(void);
};
struct TransferStatsSeparated {
entt::dense_map<Contact3, TransferStats> stats;
entt::dense_map<Contact4, ObjComp::Ephemeral::File::TransferStats> stats;
};
// used to populate stats
struct TransferStatsTally {
struct Peer {
struct Entry {
float time_since {0.f};
size_t bytes {0u};
float time_point {0.f};
uint64_t bytes {0u};
bool accounted {false};
};
std::deque<Entry> recently_sent;
std::deque<Entry> recently_received;
// keep atleast 4 or 1sec
// trim too old front
void trimSent(const float time_now);
void trimReceived(const float time_now);
};
entt::dense_map<Contact3, Peer> tally;
entt::dense_map<Contact4, Peer> tally;
};
} // Components

View File

@ -0,0 +1,146 @@
#pragma once
#include <solanaceae/file/file2.hpp>
#include "./mio.hpp"
#include <filesystem>
#include <fstream>
#include <iostream>
#include <cstring>
#include <cassert>
struct File2RWMapped : public File2I {
mio::ummap_sink _file_map;
// TODO: add truncate support?
// TODO: rw always true?
File2RWMapped(std::string_view file_path, int64_t file_size = -1) : File2I(true, true) {
std::filesystem::path native_file_path{file_path};
if (!std::filesystem::exists(native_file_path)) {
std::ofstream(native_file_path) << '\0'; // force create the file
}
_file_size = std::filesystem::file_size(native_file_path);
if (file_size >= 0 && _file_size != file_size) {
try {
std::filesystem::resize_file(native_file_path, file_size); // ensure size, usually sparse
} catch (...) {
std::cerr << "FileRWMapped error: resizing file failed\n";
return;
}
_file_size = std::filesystem::file_size(native_file_path);
if (_file_size != file_size) {
std::cerr << "FileRWMapped error: resizing file failed (size mismatch)\n";
return;
}
}
std::error_code err;
// sink, is also read
_file_map.map(native_file_path.u8string(), 0, _file_size, err);
if (err) {
std::cerr << "FileRWMapped error: mapping file failed: " << err.message() << " (" << err << ")\n";
return;
}
}
virtual ~File2RWMapped(void) {
}
bool isGood(void) override {
return _file_map.is_mapped();
}
bool write(const ByteSpan data, int64_t pos = -1) override {
// TODO: support streaming write
if (pos < 0) {
return false;
}
if (data.empty()) {
return true; // false?
}
// file size is fix for mmaped files
if (pos+data.size > _file_size) {
return false;
}
std::memcpy(_file_map.data()+pos, data.ptr, data.size);
return true;
}
ByteSpanWithOwnership read(uint64_t size, int64_t pos = -1) override {
// TODO: support streaming read
if (pos < 0) {
assert(false && "streaming not implemented");
return ByteSpan{};
}
if (pos+size > _file_size) {
assert(false && "read past end");
return ByteSpan{};
}
// return non-owning
return ByteSpan{_file_map.data()+pos, size};
}
};
struct File2RMapped : public File2I {
mio::ummap_source _file_map;
File2RMapped(std::string_view file_path) : File2I(false, true) {
std::filesystem::path native_file_path{file_path};
if (!std::filesystem::exists(native_file_path)) {
std::cerr << "FileRMapped error: file does not exist\n";
return;
}
_file_size = std::filesystem::file_size(native_file_path);
std::error_code err;
_file_map.map(native_file_path.u8string(), err);
if (err) {
std::cerr << "FileRMapped error: mapping file failed: " << err.message() << " (" << err << ")\n";
return;
}
if (_file_size != _file_map.length()) {
std::cerr << "FileRMapped warning: file size and mapped file size mismatch.\n";
_file_size = _file_map.length();
}
}
virtual ~File2RMapped(void) {
}
bool isGood(void) override {
return _file_map.is_mapped();
}
bool write(const ByteSpan, int64_t = -1) override { return false; }
ByteSpanWithOwnership read(uint64_t size, int64_t pos = -1) override {
// TODO: support streaming read
if (pos < 0) {
assert(false && "streaming not implemented");
return ByteSpan{};
}
if (pos+size > _file_size) {
assert(false && "read past end");
return ByteSpan{};
}
// return non-owning
return ByteSpan{_file_map.data()+pos, size};
}
};

View File

@ -0,0 +1,12 @@
#include "./file_constructor.hpp"
#include "./file2_mapped.hpp"
std::unique_ptr<File2I> construct_file2_rw_mapped(std::string_view file_path, int64_t file_size) {
return std::make_unique<File2RWMapped>(file_path, file_size);
}
std::unique_ptr<File2I> construct_file2_r_mapped(std::string_view file_path) {
return std::make_unique<File2RMapped>(file_path);
}

View File

@ -0,0 +1,10 @@
#pragma once
#include <solanaceae/file/file2.hpp>
#include <memory>
#include <string_view>
std::unique_ptr<File2I> construct_file2_rw_mapped(std::string_view file_path, int64_t file_size = -1);
std::unique_ptr<File2I> construct_file2_r_mapped(std::string_view file_path);

View File

@ -0,0 +1,33 @@
#include "./file_inactivity_system.hpp"
#include <solanaceae/object_store/object_store.hpp>
#include <solanaceae/file/file2.hpp>
#include "./components.hpp"
#include <iostream>
namespace Systems {
void file_inactivity(
ObjectRegistry& os_reg,
float current_time
) {
std::vector<Object> to_close;
size_t total {0};
os_reg.view<Components::FT1File2>().each([&os_reg, &to_close, &total, current_time](Object ov, const Components::FT1File2& ft_f) {
if (current_time - ft_f.last_activity_ts >= 30.f) {
// after 30sec of inactivity
to_close.push_back(ov);
}
total++;
});
if (!to_close.empty()) {
std::cout << "SHA1_NGCFT1: closing " << to_close.size() << " out of " << total << " open files\n";
os_reg.remove<Components::FT1File2>(to_close.cbegin(), to_close.cend());
}
}
} // Systems

View File

@ -0,0 +1,13 @@
#pragma once
#include <solanaceae/object_store/fwd.hpp>
namespace Systems {
void file_inactivity(
ObjectRegistry& os_reg,
float current_time
);
} // Systems

View File

@ -1,76 +0,0 @@
#pragma once
#include <solanaceae/file/file2.hpp>
#include "./mio.hpp"
#include <filesystem>
#include <fstream>
#include <iostream>
#include <cstring>
struct File2RWMapped : public File2I {
mio::ummap_sink _file_map;
// TODO: add truncate support?
// TODO: rw always true?
File2RWMapped(std::string_view file_path, int64_t file_size = -1) : File2I(true, true) {
std::filesystem::path native_file_path{file_path};
if (!std::filesystem::exists(native_file_path)) {
std::ofstream(native_file_path) << '\0'; // force create the file
}
_file_size = std::filesystem::file_size(native_file_path);
if (file_size >= 0 && _file_size != file_size) {
_file_size = file_size;
std::filesystem::resize_file(native_file_path, file_size); // ensure size, usually sparse
}
std::error_code err;
// sink, is also read
_file_map.map(native_file_path.u8string(), 0, _file_size, err);
if (err) {
std::cerr << "FileRWMapped error: mapping file failed " << err << "\n";
return;
}
}
virtual ~File2RWMapped(void) override {}
bool isGood(void) override {
return _file_map.is_mapped();
}
bool write(const ByteSpan data, int64_t pos = -1) override {
// TODO: support streaming write
if (pos < 0) {
return false;
}
if (data.empty()) {
return true; // false?
}
// file size is fix for mmaped files
if (pos+data.size > _file_size) {
return false;
}
std::memcpy(_file_map.data()+pos, data.ptr, data.size);
return true;
}
ByteSpanWithOwnership read(uint64_t size, int64_t pos = -1) override {
if (pos+size > _file_size) {
//assert(false && "read past end");
return ByteSpan{};
}
// return non-owning
return ByteSpan{_file_map.data()+pos, size};
}
};

View File

@ -1,5 +1,8 @@
#include "./ft1_sha1_info.hpp"
// next power of two
#include <entt/core/memory.hpp>
#include <sodium.h>
SHA1Digest::SHA1Digest(const std::vector<uint8_t>& v) {
@ -28,6 +31,27 @@ std::ostream& operator<<(std::ostream& out, const SHA1Digest& v) {
return out;
}
uint32_t chunkSizeFromFileSize(uint64_t file_size) {
const uint64_t fs_low {UINT64_C(512)*1024};
const uint64_t fs_high {UINT64_C(2)*1024*1024*1024};
const uint32_t cs_low {32*1024};
const uint32_t cs_high {4*1024*1024};
if (file_size <= fs_low) { // 512kib
return cs_low; // 32kib
} else if (file_size >= fs_high) { // 2gib
return cs_high; // 4mib
}
double t = file_size - fs_low;
t /= fs_high;
double x = (1 - t) * cs_low + t * cs_high;
return entt::next_power_of_two(uint64_t(x));
}
size_t FT1InfoSHA1::chunkSize(size_t chunk_index) const {
if (chunk_index+1 == chunks.size()) {
// last chunk
@ -39,6 +63,7 @@ size_t FT1InfoSHA1::chunkSize(size_t chunk_index) const {
std::vector<uint8_t> FT1InfoSHA1::toBuffer(void) const {
std::vector<uint8_t> buffer;
buffer.reserve(256+8+4+20*chunks.size());
assert(!file_name.empty());
// TODO: optimize

View File

@ -25,21 +25,23 @@ std::ostream& operator<<(std::ostream& out, const SHA1Digest& v);
namespace std { // inject
template<> struct hash<SHA1Digest> {
std::size_t operator()(const SHA1Digest& h) const noexcept {
std::uint64_t operator()(const SHA1Digest& h) const noexcept {
return
size_t(h.data[0]) << (0*8) |
size_t(h.data[1]) << (1*8) |
size_t(h.data[2]) << (2*8) |
size_t(h.data[3]) << (3*8) |
size_t(h.data[4]) << (4*8) |
size_t(h.data[5]) << (5*8) |
size_t(h.data[6]) << (6*8) |
size_t(h.data[7]) << (7*8)
std::uint64_t(h.data[0]) << (0*8) |
std::uint64_t(h.data[1]) << (1*8) |
std::uint64_t(h.data[2]) << (2*8) |
std::uint64_t(h.data[3]) << (3*8) |
std::uint64_t(h.data[4]) << (4*8) |
std::uint64_t(h.data[5]) << (5*8) |
std::uint64_t(h.data[6]) << (6*8) |
std::uint64_t(h.data[7]) << (7*8)
;
}
};
} // std
uint32_t chunkSizeFromFileSize(uint64_t file_size);
struct FT1InfoSHA1 {
std::string file_name;
uint64_t file_size {0};

View File

@ -5,7 +5,7 @@
#include <iostream>
bool addParticipation(Contact3Handle c, ObjectHandle o) {
bool addParticipation(ContactHandle4 c, ObjectHandle o) {
bool was_new {false};
assert(static_cast<bool>(o));
assert(static_cast<bool>(c));
@ -25,7 +25,7 @@ bool addParticipation(Contact3Handle c, ObjectHandle o) {
return was_new;
}
void removeParticipation(Contact3Handle c, ObjectHandle o) {
void removeParticipation(ContactHandle4 c, ObjectHandle o) {
assert(static_cast<bool>(o));
assert(static_cast<bool>(c));

View File

@ -1,8 +1,8 @@
#pragma once
#include <solanaceae/object_store/object_store.hpp>
#include <solanaceae/contact/contact_model3.hpp>
#include <solanaceae/contact/fwd.hpp>
bool addParticipation(Contact3Handle c, ObjectHandle o);
void removeParticipation(Contact3Handle c, ObjectHandle o);
bool addParticipation(ContactHandle4 c, ObjectHandle o);
void removeParticipation(ContactHandle4 c, ObjectHandle o);

View File

@ -0,0 +1,92 @@
#include "./re_announce_systems.hpp"
#include "./components.hpp"
#include <solanaceae/object_store/meta_components_file.hpp>
#include <solanaceae/contact/contact_store_i.hpp>
#include <solanaceae/tox_contacts/components.hpp>
#include <solanaceae/ngc_ft1/ngcft1_file_kind.hpp>
#include <vector>
#include <cassert>
namespace Systems {
void re_announce(
ObjectRegistry& os_reg,
ContactStore4I& cs,
NGCEXTEventProvider& neep,
const float delta
) {
std::vector<Object> to_remove;
os_reg.view<Components::ReAnnounceTimer>().each([&os_reg, &cr = cs.registry(), &neep, &to_remove, delta](Object ov, Components::ReAnnounceTimer& rat) {
ObjectHandle o{os_reg, ov};
// if no known targets, or no hash, remove
if (!o.all_of<Components::AnnounceTargets, Components::FT1InfoSHA1Hash>()) {
to_remove.push_back(ov);
return;
}
// TODO: pause
//// if paused -> remove
//if (o.all_of<Message::Components::Transfer::TagPaused>()) {
// to_remove.push_back(ov);
// return;
//}
// // if not downloading or info incomplete -> remove
//if (!o.all_of<Components::FT1ChunkSHA1Cache, Components::FT1InfoSHA1Hash, Components::AnnounceTargets>()) {
// if not downloading AND info complete -> remove
if (!o.all_of<Components::FT1ChunkSHA1Cache>() && o.all_of<Components::FT1InfoSHA1Data>()) {
to_remove.push_back(ov);
return;
}
if (o.all_of<ObjComp::F::TagLocalHaveAll>()) {
// transfer done, we stop announcing
to_remove.push_back(ov);
return;
}
// update all timers
rat.timer -= delta;
// send announces
if (rat.timer <= 0.f) {
rat.reset(); // exponential back-off
std::vector<uint8_t> announce_id;
const uint32_t file_kind = static_cast<uint32_t>(NGCFT1_file_kind::HASH_SHA1_INFO);
for (size_t i = 0; i < sizeof(file_kind); i++) {
announce_id.push_back((file_kind>>(i*8)) & 0xff);
}
assert(o.all_of<Components::FT1InfoSHA1Hash>());
const auto& info_hash = o.get<Components::FT1InfoSHA1Hash>().hash;
announce_id.insert(announce_id.cend(), info_hash.cbegin(), info_hash.cend());
for (const auto cv : o.get<Components::AnnounceTargets>().targets) {
if (cr.all_of<Contact::Components::ToxGroupPeerEphemeral>(cv)) {
// private ?
const auto [group_number, peer_number] = cr.get<Contact::Components::ToxGroupPeerEphemeral>(cv);
neep.send_pc1_announce(group_number, peer_number, announce_id.data(), announce_id.size());
} else if (cr.all_of<Contact::Components::ToxGroupEphemeral>(cv)) {
// public
const auto group_number = cr.get<Contact::Components::ToxGroupEphemeral>(cv).group_number;
neep.send_all_pc1_announce(group_number, announce_id.data(), announce_id.size());
} else {
assert(false && "we dont know how to announce to this target");
}
}
}
});
for (const auto ov : to_remove) {
os_reg.remove<Components::ReAnnounceTimer>(ov);
// we keep the annouce target list around (if it exists)
// TODO: should we make the target list more generic?
}
// TODO: how to handle unpause?
}
} // Systems

View File

@ -0,0 +1,17 @@
#pragma once
#include <solanaceae/object_store/object_store.hpp>
#include <solanaceae/contact/fwd.hpp>
#include <solanaceae/ngc_ext/ngcext.hpp>
namespace Systems {
void re_announce(
ObjectRegistry& os_reg,
ContactStore4I& cs,
NGCEXTEventProvider& neep,
const float delta
);
} // Systems

View File

@ -7,8 +7,8 @@ void ReceivingTransfers::tick(float delta) {
for (auto it = peer_it->second.begin(); it != peer_it->second.end();) {
it->second.time_since_activity += delta;
// if we have not heard for 20sec, timeout
if (it->second.time_since_activity >= 20.f) {
// if we have not heard for 60sec, timeout
if (it->second.time_since_activity >= 60.f) {
std::cerr << "SHA1_NGCFT1 warning: receiving tansfer timed out " << "." << int(it->first) << "\n";
// TODO: if info, requeue? or just keep the timer comp? - no, timer comp will continue ticking, even if loading
//it->second.v

View File

@ -0,0 +1,128 @@
#include "./sending_transfers.hpp"
#include <iostream>
#include <cassert>
void SendingTransfers::tick(float delta) {
for (auto peer_it = _data.begin(); peer_it != _data.end();) {
for (auto it = peer_it->second.begin(); it != peer_it->second.end();) {
it->second.time_since_activity += delta;
// if we have not heard for 10min, timeout (lower level event on real timeout)
// (2min was too little, so it seems)
// TODO: do we really need this if we get events?
// FIXME: disabled for now, we are trusting ngcft1 for now
if (false && it->second.time_since_activity >= 60.f*10.f) {
std::cerr << "SHA1_NGCFT1 warning: sending tansfer timed out " << "." << int(it->first) << "\n";
assert(false);
it = peer_it->second.erase(it);
} else {
it++;
}
}
if (peer_it->second.empty()) {
// cleanup unused peers too agressive?
peer_it = _data.erase(peer_it);
} else {
peer_it++;
}
}
}
SendingTransfers::Entry& SendingTransfers::emplaceInfo(uint32_t group_number, uint32_t peer_number, uint8_t transfer_id, const Entry::Info& info) {
auto& ent = _data[combine_ids(group_number, peer_number)][transfer_id];
ent.v = info;
return ent;
}
SendingTransfers::Entry& SendingTransfers::emplaceChunk(uint32_t group_number, uint32_t peer_number, uint8_t transfer_id, const Entry::Chunk& chunk) {
assert(!containsPeerChunk(group_number, peer_number, chunk.o, chunk.chunk_index));
auto& ent = _data[combine_ids(group_number, peer_number)][transfer_id];
ent.v = chunk;
return ent;
}
bool SendingTransfers::containsPeerTransfer(uint32_t group_number, uint32_t peer_number, uint8_t transfer_id) const {
auto it = _data.find(combine_ids(group_number, peer_number));
if (it == _data.end()) {
return false;
}
return it->second.count(transfer_id);
}
bool SendingTransfers::containsChunk(ObjectHandle o, size_t chunk_idx) const {
for (const auto& [_, p] : _data) {
for (const auto& [_2, v] : p) {
if (!v.isChunk()) {
continue;
}
const auto& c = v.getChunk();
if (c.o != o) {
continue;
}
if (c.chunk_index == chunk_idx) {
return true;
}
}
}
return false;
}
bool SendingTransfers::containsPeerChunk(uint32_t group_number, uint32_t peer_number, ObjectHandle o, size_t chunk_idx) const {
auto it = _data.find(combine_ids(group_number, peer_number));
if (it == _data.end()) {
return false;
}
for (const auto& [_, v] : it->second) {
if (!v.isChunk()) {
continue;
}
const auto& c = v.getChunk();
if (c.o != o) {
continue;
}
if (c.chunk_index == chunk_idx) {
return true;
}
}
return false;
}
void SendingTransfers::removePeer(uint32_t group_number, uint32_t peer_number) {
_data.erase(combine_ids(group_number, peer_number));
}
void SendingTransfers::removePeerTransfer(uint32_t group_number, uint32_t peer_number, uint8_t transfer_id) {
auto it = _data.find(combine_ids(group_number, peer_number));
if (it == _data.end()) {
return;
}
it->second.erase(transfer_id);
}
size_t SendingTransfers::size(void) const {
size_t count {0};
for (const auto& [_, p] : _data) {
count += p.size();
}
return count;
}
size_t SendingTransfers::sizePeer(uint32_t group_number, uint32_t peer_number) const {
auto it = _data.find(combine_ids(group_number, peer_number));
if (it == _data.end()) {
return 0;
}
return it->second.size();
}

View File

@ -0,0 +1,67 @@
#pragma once
#include <solanaceae/object_store/object_store.hpp>
#include <entt/container/dense_map.hpp>
#include "./util.hpp"
#include <cstdint>
#include <variant>
#include <vector>
struct SendingTransfers {
struct Entry {
struct Info {
// copy of info data
// too large?
std::vector<uint8_t> info_data;
};
struct Chunk {
ObjectHandle o;
size_t chunk_index; // <.< remove offset_into_file
//uint64_t offset_into_file;
// or data?
// if memmapped, this would be just a pointer
};
std::variant<Info, Chunk> v;
float time_since_activity {0.f};
bool isInfo(void) const { return std::holds_alternative<Info>(v); }
bool isChunk(void) const { return std::holds_alternative<Chunk>(v); }
Info& getInfo(void) { return std::get<Info>(v); }
const Info& getInfo(void) const { return std::get<Info>(v); }
Chunk& getChunk(void) { return std::get<Chunk>(v); }
const Chunk& getChunk(void) const { return std::get<Chunk>(v); }
};
// key is groupid + peerid
// TODO: replace with contact
entt::dense_map<uint64_t, entt::dense_map<uint8_t, Entry>> _data;
void tick(float delta);
Entry& emplaceInfo(uint32_t group_number, uint32_t peer_number, uint8_t transfer_id, const Entry::Info& info);
Entry& emplaceChunk(uint32_t group_number, uint32_t peer_number, uint8_t transfer_id, const Entry::Chunk& chunk);
bool containsPeer(uint32_t group_number, uint32_t peer_number) const { return _data.count(combine_ids(group_number, peer_number)); }
bool containsPeerTransfer(uint32_t group_number, uint32_t peer_number, uint8_t transfer_id) const;
// less reliable, since we dont keep the list of chunk idecies
bool containsChunk(ObjectHandle o, size_t chunk_idx) const;
bool containsPeerChunk(uint32_t group_number, uint32_t peer_number, ObjectHandle o, size_t chunk_idx) const;
auto& getPeer(uint32_t group_number, uint32_t peer_number) { return _data.at(combine_ids(group_number, peer_number)); }
auto& getTransfer(uint32_t group_number, uint32_t peer_number, uint8_t transfer_id) { return getPeer(group_number, peer_number).at(transfer_id); }
void removePeer(uint32_t group_number, uint32_t peer_number);
void removePeerTransfer(uint32_t group_number, uint32_t peer_number, uint8_t transfer_id);
size_t size(void) const;
size_t sizePeer(uint32_t group_number, uint32_t peer_number) const;
};

File diff suppressed because it is too large Load Diff

View File

@ -3,38 +3,50 @@
// solanaceae port of sha1 fts for NGCFT1
#include <solanaceae/object_store/object_store.hpp>
#include <solanaceae/contact/contact_model3.hpp>
#include <solanaceae/contact/fwd.hpp>
#include <solanaceae/message3/registry_message_model.hpp>
#include <solanaceae/tox_contacts/tox_contact_model2.hpp>
#include <solanaceae/util/bitset.hpp>
#include <solanaceae/ngc_ft1/ngcft1.hpp>
#include "./ft1_sha1_info.hpp"
#include "./sending_transfers.hpp"
#include "./receiving_transfers.hpp"
#include <entt/entity/registry.hpp>
#include <entt/entity/handle.hpp>
#include "./backends/sha1_mapped_filesystem.hpp"
#include <entt/container/dense_map.hpp>
#include <variant>
#include <random>
#include <atomic>
#include <mutex>
#include <list>
#include <chrono>
class SHA1_NGCFT1 : public ToxEventI, public RegistryMessageModelEventI, public NGCFT1EventI, public NGCEXTEventI {
class SHA1_NGCFT1 : public ToxEventI, public RegistryMessageModelEventI, public ObjectStoreEventI, public NGCFT1EventI, public NGCEXTEventI {
ObjectStore2& _os;
ObjectStore2::SubscriptionReference _os_sr;
// TODO: backend abstraction
Contact3Registry& _cr;
RegistryMessageModel& _rmm;
ContactStore4I& _cs;
RegistryMessageModelI& _rmm;
RegistryMessageModelI::SubscriptionReference _rmm_sr;
NGCFT1& _nft;
NGCFT1::SubscriptionReference _nft_sr;
ToxContactModel2& _tcm;
ToxEventProviderI& _tep;
ToxEventProviderI::SubscriptionReference _tep_sr;
NGCEXTEventProvider& _neep;
NGCEXTEventProvider::SubscriptionReference _neep_sr;
Backends::SHA1MappedFilesystem _mfb;
bool _object_update_lock {false};
std::minstd_rand _rng {1337*11};
using clock = std::chrono::steady_clock;
clock::time_point _time_start_offset {clock::now()};
float getTimeNow(void) const {
return std::chrono::duration<float>{clock::now() - _time_start_offset}.count();
}
// limit this to each group?
entt::dense_map<SHA1Digest, ObjectHandle> _info_to_content;
@ -48,53 +60,35 @@ class SHA1_NGCFT1 : public ToxEventI, public RegistryMessageModelEventI, public
//void queueUpRequestInfo(uint32_t group_number, uint32_t peer_number, const SHA1Digest& hash);
void queueUpRequestChunk(uint32_t group_number, uint32_t peer_number, ObjectHandle content, const SHA1Digest& hash);
struct SendingTransfer {
struct Info {
// copy of info data
// too large?
std::vector<uint8_t> info_data;
};
struct Chunk {
ObjectHandle content;
size_t chunk_index; // <.< remove offset_into_file
//uint64_t offset_into_file;
// or data?
// if memmapped, this would be just a pointer
};
std::variant<Info, Chunk> v;
float time_since_activity {0.f};
};
// key is groupid + peerid
entt::dense_map<uint64_t, entt::dense_map<uint8_t, SendingTransfer>> _sending_transfers;
SendingTransfers _sending_transfers;
ReceivingTransfers _receiving_transfers;
// makes request rotate around open content
std::deque<ObjectHandle> _queue_content_want_info;
struct QBitsetEntry {
Contact3Handle c;
ContactHandle4 c;
ObjectHandle o;
};
std::deque<QBitsetEntry> _queue_send_bitset;
// workaround missing contact events
// only used to remove participation on peer exit
entt::dense_map<uint64_t, Contact3Handle> _tox_peer_to_contact;
// FIXME: workaround missing contact events
// only used on peer exit (no, also used to quicken lookups)
entt::dense_map<uint64_t, ContactHandle4> _tox_peer_to_contact;
std::atomic_bool _info_builder_dirty {false};
std::mutex _info_builder_queue_mutex;
using InfoBuilderEntry = std::function<void(void)>;
std::list<InfoBuilderEntry> _info_builder_queue;
// reset every iterate; kept here as an allocation optimization
entt::dense_map<Contact4, size_t> _peer_open_requests;
void updateMessages(ObjectHandle ce);
std::optional<std::pair<uint32_t, uint32_t>> selectPeerForRequest(ObjectHandle ce);
void queueBitsetSendFull(Contact3Handle c, ObjectHandle o);
void queueBitsetSendFull(ContactHandle4 c, ObjectHandle o);
File2I* objGetFile2Write(ObjectHandle o);
File2I* objGetFile2Read(ObjectHandle o);
float _file_inactivity_timer {0.f};
public: // TODO: config
bool _udp_only {false};
@ -105,8 +99,8 @@ class SHA1_NGCFT1 : public ToxEventI, public RegistryMessageModelEventI, public
public:
SHA1_NGCFT1(
ObjectStore2& os,
Contact3Registry& cr,
RegistryMessageModel& rmm,
ContactStore4I& cs,
RegistryMessageModelI& rmm,
NGCFT1& nft,
ToxContactModel2& tcm,
ToxEventProviderI& tep,
@ -115,8 +109,16 @@ class SHA1_NGCFT1 : public ToxEventI, public RegistryMessageModelEventI, public
float iterate(float delta);
void onSendFileHashFinished(ObjectHandle o, Message3Registry* reg_ptr, Contact4 c, uint64_t ts);
// construct the file part in a partially constructed message
ObjectHandle constructFileMessageInPlace(Message3Handle msg, NGCFT1_file_kind file_kind, ByteSpan file_id);
protected: // rmm events (actions)
bool onEvent(const Message::Events::MessageUpdated&) override;
bool sendFilePath(const Contact4 c, std::string_view file_name, std::string_view file_path) override;
protected: // os events (actions)
bool onEvent(const ObjectStore::Events::ObjectUpdate&) override;
protected: // events
bool onEvent(const Events::NGCFT1_recv_request&) override;
@ -127,8 +129,7 @@ class SHA1_NGCFT1 : public ToxEventI, public RegistryMessageModelEventI, public
bool onEvent(const Events::NGCFT1_send_done&) override;
bool onEvent(const Events::NGCFT1_recv_message&) override;
bool sendFilePath(const Contact3 c, std::string_view file_name, std::string_view file_path) override;
bool onToxEvent(const Tox_Event_Group_Peer_Join* e) override;
bool onToxEvent(const Tox_Event_Group_Peer_Exit* e) override;
bool onEvent(const Events::NGCEXT_ft1_have&) override;

View File

@ -0,0 +1,118 @@
#include "./transfer_stats_systems.hpp"
#include "./components.hpp"
#include <solanaceae/object_store/meta_components_file.hpp>
#include <iostream>
namespace Systems {
void transfer_tally_update(ObjectRegistry& os_reg, const float time_now) {
std::vector<Object> tally_to_remove;
// for each tally -> stats separated
os_reg.view<Components::TransferStatsTally>().each([&os_reg, time_now, &tally_to_remove](const auto ov, Components::TransferStatsTally& tally_comp) {
// for each peer
std::vector<Contact4> to_remove;
for (auto&& [peer_c, peer] : tally_comp.tally) {
auto& tss = os_reg.get_or_emplace<Components::TransferStatsSeparated>(ov).stats;
// special logic
// if newest older than 2sec
// discard
if (!peer.recently_sent.empty()) {
if (time_now - peer.recently_sent.back().time_point >= 2.f) {
// clean up stale
auto peer_in_stats_it = tss.find(peer_c);
if (peer_in_stats_it != tss.end()) {
peer_in_stats_it->second.rate_up = 0.f;
}
peer.recently_sent.clear();
if (peer.recently_received.empty()) {
to_remove.push_back(peer_c);
}
} else {
// else trim too old front
peer.trimSent(time_now);
size_t tally_bytes {0u};
for (auto& [time, bytes, accounted] : peer.recently_sent) {
if (!accounted) {
tss[peer_c].total_up += bytes;
accounted = true;
}
tally_bytes += bytes;
}
tss[peer_c].rate_up = tally_bytes / (time_now - peer.recently_sent.front().time_point + 0.00001f);
}
}
if (!peer.recently_received.empty()) {
if (time_now - peer.recently_received.back().time_point >= 2.f) {
// clean up stale
auto peer_in_stats_it = tss.find(peer_c);
if (peer_in_stats_it != tss.end()) {
peer_in_stats_it->second.rate_down = 0.f;
}
peer.recently_received.clear();
if (peer.recently_sent.empty()) {
to_remove.push_back(peer_c);
}
} else {
// else trim too old front
peer.trimReceived(time_now);
size_t tally_bytes {0u};
for (auto& [time, bytes, accounted] : peer.recently_received) {
if (!accounted) {
tss[peer_c].total_down += bytes;
accounted = true;
}
tally_bytes += bytes;
}
tss[peer_c].rate_down = tally_bytes / (time_now - peer.recently_received.front().time_point + 0.00001f);
}
}
}
for (const auto c : to_remove) {
tally_comp.tally.erase(c);
}
if (tally_comp.tally.empty()) {
tally_to_remove.push_back(ov);
}
});
// for each stats separated -> stats (total)
os_reg.view<Components::TransferStatsSeparated, Components::TransferStatsTally>().each([&os_reg](const auto ov, Components::TransferStatsSeparated& tss_comp, const auto&) {
auto& stats = os_reg.get_or_emplace<ObjComp::Ephemeral::File::TransferStats>(ov);
stats = {}; // reset
for (const auto& [_, peer_stats] : tss_comp.stats) {
stats.rate_up += peer_stats.rate_up;
stats.rate_down += peer_stats.rate_down;
stats.total_up += peer_stats.total_up;
stats.total_down += peer_stats.total_down;
}
#if 0
std::cout << "updated stats:\n"
<< " rate u:" << stats.rate_up/1024 << "KiB/s d:" << stats.rate_down/1024 << "KiB/s\n"
<< " total u:" << stats.total_up/1024 << "KiB d:" << stats.total_down/1024 << "KiB\n"
;
#endif
});
for (const auto ov : tally_to_remove) {
os_reg.remove<Components::TransferStatsTally>(ov);
}
}
} // Systems

View File

@ -0,0 +1,11 @@
#pragma once
#include <solanaceae/object_store/object_store.hpp>
namespace Systems {
// time only needs to be relative
void transfer_tally_update(ObjectRegistry& os_reg, const float time_now);
} // Systems

View File

@ -0,0 +1,534 @@
#include "./ngc_hs2_rizzler.hpp"
#include <solanaceae/contact/contact_store_i.hpp>
#include <solanaceae/contact/components.hpp>
#include <solanaceae/tox_contacts/components.hpp>
#include <solanaceae/tox_contacts/tox_contact_model2.hpp>
#include <solanaceae/message3/contact_components.hpp>
#include <solanaceae/message3/registry_message_model.hpp>
#include <solanaceae/message3/components.hpp>
#include <solanaceae/tox_messages/msg_components.hpp>
#include <solanaceae/ngc_ft1/ngcft1_file_kind.hpp>
// TODO: move somewhere else?
#include <solanaceae/ngc_ft1_sha1/util.hpp>
#include <solanaceae/util/span.hpp>
#include <solanaceae/util/time.hpp>
#include <entt/entity/entity.hpp>
#include <nlohmann/json.hpp>
#include "./serl.hpp"
#include <cstdint>
#include <deque>
#include <cstring>
#include <iostream>
// TODO: move to own file
namespace Components {
struct RequestedChatLogs {
struct Entry {
uint64_t ts_start;
uint64_t ts_end;
//std::vector<uint8_t> fid; // ?
};
std::deque<Entry> list;
bool contains(uint64_t ts_start, uint64_t ts_end);
void addRequest(uint64_t ts_start, uint64_t ts_end);
};
struct RunningChatLogs {
struct Entry {
uint64_t ts_start;
uint64_t ts_end;
std::vector<uint8_t> data;
float last_activity {0.f};
};
// list of transfers
entt::dense_map<uint8_t, Entry> list;
};
bool RequestedChatLogs::contains(uint64_t ts_start, uint64_t ts_end) {
auto it = std::find_if(list.cbegin(), list.cend(), [ts_start, ts_end](const auto& value) {
return value.ts_start == ts_start && value.ts_end == ts_end;
});
return it != list.cend();
}
void RequestedChatLogs::addRequest(uint64_t ts_start, uint64_t ts_end) {
if (contains(ts_start, ts_end)) {
return; // pre existing
}
list.push_back(Entry{ts_start, ts_end});
}
} // Components
NGCHS2Rizzler::NGCHS2Rizzler(
ContactStore4I& cs,
RegistryMessageModelI& rmm,
ToxContactModel2& tcm,
NGCFT1& nft,
ToxEventProviderI& tep,
SHA1_NGCFT1& sha1_nft
) :
_cs(cs),
_rmm(rmm),
_tcm(tcm),
_nft(nft),
_nftep_sr(_nft.newSubRef(this)),
_tep_sr(tep.newSubRef(this)),
_sha1_nft(sha1_nft)
{
_nftep_sr
.subscribe(NGCFT1_Event::recv_init)
.subscribe(NGCFT1_Event::recv_data)
.subscribe(NGCFT1_Event::recv_done)
;
_tep_sr
.subscribe(Tox_Event_Type::TOX_EVENT_GROUP_PEER_JOIN)
;
}
NGCHS2Rizzler::~NGCHS2Rizzler(void) {
}
float NGCHS2Rizzler::iterate(float delta) {
for (auto it = _request_queue.begin(); it != _request_queue.end();) {
it->second.timer += delta;
if (it->second.timer < it->second.delay) {
it++;
continue;
}
const ContactHandle4 c = _cs.contactHandle(it->first);
if (!c.all_of<Contact::Components::ToxGroupPeerEphemeral>()) {
// peer nolonger online
it = _request_queue.erase(it);
continue;
}
const auto [group_number, peer_number] = c.get<Contact::Components::ToxGroupPeerEphemeral>();
// now in sec
const uint64_t ts_now = getTimeMS()/1000;
const uint64_t ts_start = ts_now;
const uint64_t ts_end = ts_now-(60*60*48);
if (sendRequest(group_number, peer_number, ts_start, ts_end)) {
// TODO: requeue
// TODO: segment
// TODO: dont request already received ranges
//// on success, requeue with longer delay (minutes)
//it->second.timer = 0.f;
//it->second.delay = _delay_next_request_min + _rng_dist(_rng)*_delay_next_request_add;
//// double the delay for overlap (9m-15m)
//// TODO: finetune
//it->second.sync_delta = uint8_t((it->second.delay/60.f)*2.f) + 1;
//std::cout << "ZOX #### requeued request in " << it->second.delay << "s\n";
auto& rcl = c.get_or_emplace<Components::RequestedChatLogs>();
rcl.addRequest(ts_start, ts_end);
} else {
// on failure, assume disconnected
}
// remove from request queue
it = _request_queue.erase(it);
}
return 1000.f;
}
bool NGCHS2Rizzler::sendRequest(
uint32_t group_number, uint32_t peer_number,
uint64_t ts_start, uint64_t ts_end
) {
std::cout << "NGCHS2Rizzler: sending request to " << group_number << ":" << peer_number << " (" << ts_start << "," << ts_end << ")\n";
// build fid
std::vector<uint8_t> fid;
fid.reserve(sizeof(uint64_t)+sizeof(uint64_t));
serlSimpleType(fid, ts_start);
serlSimpleType(fid, ts_end);
assert(fid.size() == sizeof(uint64_t)+sizeof(uint64_t));
return _nft.NGC_FT1_send_request_private(
group_number, peer_number,
(uint32_t)NGCFT1_file_kind::HS2_RANGE_TIME_MSGPACK,
fid.data(), fid.size() // fid
);
}
void NGCHS2Rizzler::handleMsgPack(ContactHandle4 sync_by_c, const std::vector<uint8_t>& data) {
assert(sync_by_c);
auto* reg_ptr = _rmm.get(sync_by_c);
if (reg_ptr == nullptr) {
std::cerr << "NGCHS2Rizzler error: group without msg reg\n";
return;
}
Message3Registry& reg = *reg_ptr;
uint64_t now_ts = getTimeMS();
std::cout << "NGCHS2Rizzler: start parsing msgpack chatlog from " << entt::to_integral(sync_by_c.entity()) << "\n";
try {
const auto j = nlohmann::json::from_msgpack(data);
if (!j.is_array()) {
std::cerr << "NGCHS2Rizzler error: chatlog not array\n";
return;
}
std::cout << "NGCHS2Rizzler: chatlog has " << j.size() << " entries\n";
for (const auto j_entry : j) {
try {
// deci seconds
uint64_t ts = j_entry.at("ts");
// TODO: check against ts range
ts *= 100; // convert to ms
const uint64_t max_future_ms = 1u*60u*1000u; // accept up to 1 minute into the future
if (ts - max_future_ms > now_ts) {
// message is too far into the future
continue;
}
const auto& j_ppk = j_entry.at("ppk");
uint32_t mid = j_entry.at("mid");
if (
!(j_entry.count("text")) &&
!(j_entry.count("fkind") && j_entry.count("fid"))
) {
std::cerr << "NGCHS2Rizzler error: msg neither contains text nor file fields\n";
continue;
}
Contact4 from_c{entt::null};
{ // from_c
std::vector<uint8_t> id;
if (j_ppk.is_binary()) {
id = j_ppk.get_binary();
} else {
j_ppk.at("bytes").get_to(id);
}
const auto parent = sync_by_c.get<Contact::Components::Parent>().parent;
from_c = _cs.getOneContactByID(parent, ByteSpan{id});
auto& cr = _cs.registry();
if (!cr.valid(from_c)) {
// create sparse contact with id only
from_c = cr.create();
cr.emplace_or_replace<Contact::Components::ID>(from_c, id);
// TODO: only if public message
cr.emplace_or_replace<Contact::Components::Parent>(from_c, parent);
_cs.throwEventConstruct(from_c);
}
}
// TODO: from_c perm check
// hard to do without numbers
Message3Handle new_real_msg{reg, reg.create()};
new_real_msg.emplace<Message::Components::Timestamp>(ts); // reactive?
new_real_msg.emplace<Message::Components::ContactFrom>(from_c);
new_real_msg.emplace<Message::Components::ContactTo>(sync_by_c.get<Contact::Components::Parent>().parent);
new_real_msg.emplace<Message::Components::ToxGroupMessageID>(mid);
if (j_entry.contains("action") && static_cast<bool>(j_entry.at("action"))) {
new_real_msg.emplace<Message::Components::TagMessageIsAction>();
}
if (j_entry.contains("text")) {
const std::string& text = j_entry.at("text");
new_real_msg.emplace<Message::Components::MessageText>(text);
#if 0
std::cout
<< "msg ts:" << ts
//<< " ppk:" << j_ppk
<< " mid:" << mid
<< " type:" << type
<< " text:" << text
<< "\n"
;
#endif
} else if (j_entry.contains("fkind") && j_entry.contains("fid")) {
uint32_t fkind = j_entry.at("fkind");
const auto& j_fid = j_entry.at("fid");
std::vector<uint8_t> fid;
if (j_fid.is_binary()) {
fid = j_fid.get_binary();
} else {
j_fid.at("bytes").get_to(fid);
}
if (fkind == (uint32_t)NGCFT1_file_kind::HASH_SHA1_INFO) {
_sha1_nft.constructFileMessageInPlace(
new_real_msg,
NGCFT1_file_kind::HASH_SHA1_INFO,
ByteSpan{fid}
);
} else {
std::cerr << "NGCHS2Rizzler error: unknown file kind " << fkind << "\n";
}
#if 0
std::cout
<< "msg ts:" << ts
//<< " ppk:" << j_ppk
<< " mid:" << mid
<< " type:" << type
<< " fkind:" << fkind
<< " fid:" << j_fid
<< "\n"
;
#endif
}
// now check against pre existing
// TODO: dont do this afterwards
Message3Handle dup_msg{};
{ // check preexisting
// get comparator from contact
const ContactHandle4 reg_c = _cs.contactHandle(reg.ctx().get<Contact4>());
if (reg_c.all_of<Contact::Components::MessageIsSame>()) {
auto& comp = reg_c.get<Contact::Components::MessageIsSame>().comp;
// walking EVERY existing message OOF
// this needs optimizing
for (const Message3 other_msg : reg.view<Message::Components::Timestamp, Message::Components::ContactFrom, Message::Components::ContactTo>()) {
if (other_msg == new_real_msg) {
continue; // skip self
}
if (comp({reg, other_msg}, new_real_msg)) {
// dup
dup_msg = {reg, other_msg};
break;
}
}
} // else, default heuristic??
}
Message3Handle new_msg = new_real_msg;
if (dup_msg) {
// we leak objects here (if file)
reg.destroy(new_msg);
new_msg = dup_msg;
}
{ // by whom
auto& synced_by = new_msg.get_or_emplace<Message::Components::SyncedBy>().ts;
// dont overwrite
synced_by.try_emplace(sync_by_c, now_ts);
}
{ // now we also know they got the message
auto& list = new_msg.get_or_emplace<Message::Components::ReceivedBy>().ts;
// dont overwrite
list.try_emplace(sync_by_c, now_ts);
}
if (new_msg == dup_msg) {
// TODO: maybe update a timestamp?
_rmm.throwEventUpdate(reg, new_msg);
} else {
// pure new msg
new_msg.emplace<Message::Components::TimestampProcessed>(now_ts);
new_msg.emplace<Message::Components::TimestampWritten>(ts);
new_msg.emplace<Message::Components::TagUnread>();
_rmm.throwEventConstruct(reg, new_msg);
}
} catch (...) {
std::cerr << "NGCHS2Rizzler error: parsing entry '" << j_entry.dump() << "'\n";
}
}
} catch (...) {
std::cerr << "NGCHS2Rizzler error: failed parsing data as msgpack\n";
}
}
bool NGCHS2Rizzler::onEvent(const Events::NGCFT1_recv_init& e) {
if (e.file_kind != NGCFT1_file_kind::HS2_RANGE_TIME_MSGPACK) {
return false; // not for us
}
std::cout << "NGCHS2Rizzler: recv_init " << e.group_number << ":" << e.peer_number << "." << (int)e.transfer_id << "\n";
auto c = _tcm.getContactGroupPeer(e.group_number, e.peer_number);
if (!c) {
return false; // huh?
}
if (!c.all_of<Components::RequestedChatLogs>()) {
return false;
}
// parse start end
// TODO: extract
ByteSpan fid{e.file_id, e.file_id_size};
// TODO: better size check
if (fid.size != sizeof(uint64_t)+sizeof(uint64_t)) {
std::cerr << "NGCHS2S error: range not lange enough\n";
return true;
}
// seconds
uint64_t ts_start{0};
uint64_t ts_end{0};
// parse
try {
ByteSpan ts_start_bytes{fid.ptr, sizeof(uint64_t)};
ts_start = deserlTS(ts_start_bytes);
ByteSpan ts_end_bytes{ts_start_bytes.ptr+ts_start_bytes.size, sizeof(uint64_t)};
ts_end = deserlTS(ts_end_bytes);
} catch (...) {
std::cerr << "NGCHS2R error: failed to parse range\n";
return true;
}
if (ts_end >= ts_start) {
std::cerr << "NGCHS2R error: end not < start\n";
return true;
}
auto& reqcl = c.get<Components::RequestedChatLogs>();
if (!reqcl.contains(ts_start, ts_end)) {
// warn?
return true;
}
auto& rnncl = c.get_or_emplace<Components::RunningChatLogs>();
_tox_peer_to_contact[combine_ids(e.group_number, e.peer_number)] = c; // cache
auto& transfer = rnncl.list[e.transfer_id];
transfer.data.reserve(e.file_size); // danger?
transfer.last_activity = 0.f;
transfer.ts_start = ts_start;
transfer.ts_end = ts_end;
e.accept = true;
return true;
}
bool NGCHS2Rizzler::onEvent(const Events::NGCFT1_recv_data& e) {
auto c = _tcm.getContactGroupPeer(e.group_number, e.peer_number);
if (!c) {
return false;
}
if (!c.all_of<Components::RunningChatLogs>()) {
return false; // not ours
}
auto& rnncl = c.get<Components::RunningChatLogs>();
if (!rnncl.list.count(e.transfer_id)) {
return false; // not ours
}
std::cout << "NGCHS2Rizzler: recv_data " << e.group_number << ":" << e.peer_number << "." << (int)e.transfer_id << " " << e.data_size << "@" << e.data_offset << "\n";
auto& transfer = rnncl.list.at(e.transfer_id);
transfer.data.resize(e.data_offset+e.data_size);
std::memcpy(&transfer.data[e.data_offset], e.data, e.data_size);
transfer.last_activity = 0.f;
return true;
}
bool NGCHS2Rizzler::onEvent(const Events::NGCFT1_recv_done& e) {
// FIXME: this does not work, tcm just delteded the relation ship
//auto c = _tcm.getContactGroupPeer(e.group_number, e.peer_number);
//if (!c) {
// return false;
//}
const auto c_it = _tox_peer_to_contact.find(combine_ids(e.group_number, e.peer_number));
if (c_it == _tox_peer_to_contact.end()) {
return false;
}
auto c = c_it->second;
if (!static_cast<bool>(c)) {
return false;
}
if (!c.all_of<Components::RunningChatLogs>()) {
return false; // not ours
}
auto& rnncl = c.get<Components::RunningChatLogs>();
if (!rnncl.list.count(e.transfer_id)) {
return false; // not ours
}
std::cout << "NGCHS2Rizzler: recv_done " << e.group_number << ":" << e.peer_number << "." << (int)e.transfer_id << "\n";
{
auto& transfer = rnncl.list.at(e.transfer_id);
// TODO: done might mean failed, so we might be parsing bs here
// use data
// TODO: move out of packet handler
handleMsgPack(c, transfer.data);
}
rnncl.list.erase(e.transfer_id);
return true;
}
bool NGCHS2Rizzler::onToxEvent(const Tox_Event_Group_Peer_Join* e) {
const auto group_number = tox_event_group_peer_join_get_group_number(e);
const auto peer_number = tox_event_group_peer_join_get_peer_id(e);
const auto c = _tcm.getContactGroupPeer(group_number, peer_number);
if (!c) {
return false;
}
if (!_request_queue.count(c)) {
_request_queue[c] = {
_delay_before_first_request_min + _rng_dist(_rng)*_delay_before_first_request_add,
0.f,
0,
};
}
return false;
}

View File

@ -0,0 +1,73 @@
#pragma once
#include <solanaceae/contact/fwd.hpp>
#include <solanaceae/toxcore/tox_event_interface.hpp>
#include <solanaceae/ngc_ft1/ngcft1.hpp>
#include <solanaceae/ngc_ft1_sha1/sha1_ngcft1.hpp>
// fwd
class ToxContactModel2;
class RegistryMessageModelI;
class NGCHS2Rizzler : public ToxEventI, public NGCFT1EventI {
ContactStore4I& _cs;
RegistryMessageModelI& _rmm;
ToxContactModel2& _tcm;
NGCFT1& _nft;
NGCFT1EventProviderI::SubscriptionReference _nftep_sr;
ToxEventProviderI::SubscriptionReference _tep_sr;
SHA1_NGCFT1& _sha1_nft;
// 5s-6s
const float _delay_before_first_request_min {5.f};
const float _delay_before_first_request_add {1.f};
std::uniform_real_distribution<float> _rng_dist {0.0f, 1.0f};
std::minstd_rand _rng;
struct RequestQueueInfo {
float delay; // const
float timer;
uint64_t sync_delta; //?
};
// request queue
// c -> delay, timer
std::map<Contact4, RequestQueueInfo> _request_queue;
// FIXME: workaround missing contact events
// only used on peer exit (no, also used to quicken lookups)
entt::dense_map<uint64_t, ContactHandle4> _tox_peer_to_contact;
public:
NGCHS2Rizzler(
ContactStore4I& cs,
RegistryMessageModelI& rmm,
ToxContactModel2& tcm,
NGCFT1& nft,
ToxEventProviderI& tep,
SHA1_NGCFT1& sha1_nft
);
~NGCHS2Rizzler(void);
float iterate(float delta);
protected:
bool sendRequest(
uint32_t group_number, uint32_t peer_number,
uint64_t ts_start, uint64_t ts_end
);
void handleMsgPack(ContactHandle4 c, const std::vector<uint8_t>& data);
protected:
bool onEvent(const Events::NGCFT1_recv_init&) override;
bool onEvent(const Events::NGCFT1_recv_data&) override;
bool onEvent(const Events::NGCFT1_recv_done&) override;
protected:
bool onToxEvent(const Tox_Event_Group_Peer_Join* e) override;
};

View File

@ -0,0 +1,470 @@
#include "./ngc_hs2_sigma.hpp"
#include <solanaceae/util/span.hpp>
#include <solanaceae/contact/contact_store_i.hpp>
#include <solanaceae/tox_contacts/tox_contact_model2.hpp>
#include <solanaceae/contact/components.hpp>
#include <solanaceae/tox_contacts/components.hpp>
#include <solanaceae/message3/components.hpp>
#include <solanaceae/tox_messages/msg_components.hpp>
//#include <solanaceae/tox_messages/obj_components.hpp>
// TODO: this is kinda bad, needs improvement
// use tox fileid/filekind instead !
#include <solanaceae/ngc_ft1/ngcft1_file_kind.hpp>
#include <solanaceae/ngc_ft1_sha1/components.hpp>
// TODO: move somewhere else?
#include <solanaceae/ngc_ft1_sha1/util.hpp>
#include <nlohmann/json.hpp>
#include "./serl.hpp"
#include "./ts_find_start.hpp"
#include <iostream>
// https://www.youtube.com/watch?v=AdAqsgga3qo
// TODO: move to own file
namespace Components {
struct IncommingTimeRangeRequestQueue {
struct Entry {
TimeRangeRequest ir;
std::vector<uint8_t> fid;
};
std::deque<Entry> _queue;
// we should remove/notadd queued requests
// that are subsets of same or larger ranges
void queueRequest(const TimeRangeRequest& new_request, const ByteSpan fid);
};
struct IncommingTimeRangeRequestRunning {
struct Entry {
TimeRangeRequest ir;
std::vector<uint8_t> data; // transfer data in memory
float last_activity {0.f};
};
entt::dense_map<uint8_t, Entry> _list;
};
void IncommingTimeRangeRequestQueue::queueRequest(const TimeRangeRequest& new_request, const ByteSpan fid) {
// TODO: do more than exact dedupe
for (const auto& [time_range, _] : _queue) {
if (time_range.ts_start == new_request.ts_start && time_range.ts_end == new_request.ts_end) {
return; // already enqueued
// TODO: what about fid?
}
}
_queue.emplace_back(Entry{
new_request,
std::vector<uint8_t>{fid.cbegin(), fid.cend()}
});
}
} // Components
NGCHS2Sigma::NGCHS2Sigma(
ContactStore4I& cs,
RegistryMessageModelI& rmm,
ToxContactModel2& tcm,
NGCFT1& nft
) :
_cs(cs),
_rmm(rmm),
_tcm(tcm),
_nft(nft),
_nftep_sr(_nft.newSubRef(this))
{
_nftep_sr
.subscribe(NGCFT1_Event::recv_request)
.subscribe(NGCFT1_Event::send_data)
.subscribe(NGCFT1_Event::send_done)
;
}
NGCHS2Sigma::~NGCHS2Sigma(void) {
}
float NGCHS2Sigma::iterate(float delta) {
// limit how often we update here (new fts usually)
if (_iterate_heat > 0.f) {
_iterate_heat -= delta;
return 1000.f; // return heat?
} else {
_iterate_heat = _iterate_cooldown;
}
// work request queue
// check if already running, discard
auto fn_iirq = [this](auto&& view) {
for (auto&& [cv, iirq] : view.each()) {
if (iirq._queue.empty()) {
// TODO: remove comp?
continue;
}
ContactHandle4 c = _cs.contactHandle(cv);
auto& iirr = c.get_or_emplace<Components::IncommingTimeRangeRequestRunning>();
// dedup queued from running
if (iirr._list.size() >= _max_parallel_per_peer) {
continue;
}
// new ft here
// TODO: loop? nah just 1 per tick is enough
const auto request_entry = iirq._queue.front(); // copy
assert(!request_entry.fid.empty());
if (!c.all_of<Contact::Components::Parent>()) {
iirq._queue.pop_front();
continue; // how
}
const ContactHandle4 group_c = {*c.registry(), c.get<Contact::Components::Parent>().parent};
if (!c.all_of<Contact::Components::ToxGroupPeerEphemeral>()) {
iirq._queue.pop_front();
continue;
}
const auto [group_number, peer_number] = c.get<Contact::Components::ToxGroupPeerEphemeral>();
_tox_peer_to_contact[combine_ids(group_number, peer_number)] = c; // cache
// TODO: check allowed range here
//_max_time_into_past_default
// potentially heavy op
auto data = buildChatLogFileRange(group_c, request_entry.ir.ts_start, request_entry.ir.ts_end);
uint8_t transfer_id {0};
if (!_nft.NGC_FT1_send_init_private(
group_number, peer_number,
(uint32_t)NGCFT1_file_kind::HS2_RANGE_TIME_MSGPACK,
request_entry.fid.data(), request_entry.fid.size(),
data.size(),
&transfer_id,
true // can_compress (does nothing rn)
)) {
// sending failed, we do not pop but wait for next iterate
// TODO: cache data
// TODO: fail counter
// actually, fail probably means offline, so delete?
continue;
}
assert(iirr._list.count(transfer_id) == 0);
iirr._list[transfer_id] = {request_entry.ir, data};
iirq._queue.pop_front();
}
};
auto& cr = _cs.registry();
// first handle range requests on weak self
fn_iirq(cr.view<Components::IncommingTimeRangeRequestQueue, Contact::Components::TagSelfWeak>());
// we could stop here, if too much is already running
// then range on others
fn_iirq(cr.view<Components::IncommingTimeRangeRequestQueue>(entt::exclude_t<Contact::Components::TagSelfWeak>{}));
cr.view<Components::IncommingTimeRangeRequestRunning>().each(
[delta](const auto cv, Components::IncommingTimeRangeRequestRunning& irr) {
std::vector<uint8_t> to_remove;
for (auto&& [ft_id, entry] : irr._list) {
entry.last_activity += delta;
if (entry.last_activity >= 60.f) {
to_remove.push_back(ft_id);
}
}
for (const auto it : to_remove) {
std::cout << "NGCHS2Sigma warning: timed out ." << (int)it << "\n";
// TODO: need a way to tell ft?
irr._list.erase(it);
// technically we are not supposed to timeout and instead rely on the done event
}
}
);
return 1000.f;
}
void NGCHS2Sigma::handleTimeRange(ContactHandle4 c, const Events::NGCFT1_recv_request& e) {
ByteSpan fid{e.file_id, e.file_id_size};
// TODO: better size check
if (fid.size != sizeof(uint64_t)+sizeof(uint64_t)) {
std::cerr << "NGCHS2S error: range not lange enough\n";
return;
}
// seconds
uint64_t ts_start{0};
uint64_t ts_end{0};
// parse
try {
ByteSpan ts_start_bytes{fid.ptr, sizeof(uint64_t)};
ts_start = deserlTS(ts_start_bytes);
ByteSpan ts_end_bytes{ts_start_bytes.ptr+ts_start_bytes.size, sizeof(uint64_t)};
ts_end = deserlTS(ts_end_bytes);
} catch (...) {
std::cerr << "NGCHS2S error: failed to parse range\n";
return;
}
if (ts_end >= ts_start) {
std::cerr << "NGCHS2S error: end not < start\n";
return;
}
if (!c.all_of<Contact::Components::TagSelfWeak>()) {
// make sure we dont sync past the peers first appearance
if (const auto first_seen_ptr = c.try_get<Contact::Components::FirstSeen>(); first_seen_ptr != nullptr) {
ts_start = std::max(ts_start, first_seen_ptr->ts);
}
}
// dedupe insert into queue
// how much overlap do we allow?
c.get_or_emplace<Components::IncommingTimeRangeRequestQueue>().queueRequest(
{ts_start, ts_end},
fid
);
}
std::vector<uint8_t> NGCHS2Sigma::buildChatLogFileRange(ContactHandle4 c, uint64_t ts_start, uint64_t ts_end) {
const Message3Registry* reg_ptr = static_cast<const RegistryMessageModelI&>(_rmm).get(c);
if (reg_ptr == nullptr) {
return {};
}
const Message3Registry& msg_reg = *reg_ptr;
if (msg_reg.storage<Message::Components::Timestamp>() == nullptr) {
// nothing to do here
return {};
}
std::cout << "NGCHS2Sigma: building chatlog for time range " << ts_start-ts_end << "s\n";
// convert seconds to milliseconds
// TODO: lift out?
ts_start *= 1000;
ts_end *= 1000;
//std::cout << "!!!! starting msg ts search, ts_start:" << ts_start << " ts_end:" << ts_end << "\n";
auto ts_view = msg_reg.view<Message::Components::Timestamp>();
// we iterate "forward", so from newest to oldest
// start is the newest ts
const auto ts_start_it = find_start_by_ts(ts_view, ts_start);
// end is the oldest ts
// we only search for the start point, because we walk to the end anyway
auto j_array = nlohmann::json::array_t{};
// hmm
// maybe use other view or something?
for (auto it = ts_start_it; it != ts_view.end(); it++) {
const auto e = *it;
const auto& [ts_comp] = ts_view.get(e);
if (ts_comp.ts > ts_start) {
std::cerr << "!!!! msg ent in view too new\n";
continue;
} else if (ts_comp.ts < ts_end) {
// too old, we hit the end of the range
break;
}
if (!msg_reg.all_of<
Message::Components::ContactFrom,
Message::Components::ContactTo,
Message::Components::ToxGroupMessageID
>(e)) {
continue; // ??
}
if (!msg_reg.any_of<Message::Components::MessageText, Message::Components::MessageFileObject>(e)) {
continue; // skip
}
const auto& [c_from_c, c_to_c] = msg_reg.get<Message::Components::ContactFrom, Message::Components::ContactTo>(e);
if (c_to_c.c != c) {
// message was not public
continue;
}
ContactHandle4 c_from = _cs.contactHandle(c_from_c.c);
if (!static_cast<bool>(c_from)) {
continue; // ???
}
if (!c_from.all_of<Contact::Components::ToxGroupPeerPersistent>()) {
continue; // ???
}
if (_only_send_self_observed && msg_reg.all_of<Message::Components::SyncedBy>(e) && c.all_of<Contact::Components::Self>()) {
if (!msg_reg.get<Message::Components::SyncedBy>(e).ts.count(c.get<Contact::Components::Self>().self)) {
continue; // did not observe ourselfs, skip
}
}
auto j_entry = nlohmann::json::object_t{};
j_entry["ts"] = ts_comp.ts/100; // millisec -> decisec
{
const auto& ppk_ref = c_from.get<Contact::Components::ToxGroupPeerPersistent>().peer_key.data;
j_entry["ppk"] = nlohmann::json::binary_t{std::vector<uint8_t>{ppk_ref.cbegin(), ppk_ref.cend()}};
}
j_entry["mid"] = msg_reg.get<Message::Components::ToxGroupMessageID>(e).id;
if (msg_reg.all_of<Message::Components::TagMessageIsAction>(e)) {
j_entry["action"] = true;
}
if (msg_reg.all_of<Message::Components::MessageText>(e)) {
j_entry["text"] = msg_reg.get<Message::Components::MessageText>(e).text;
} else if (msg_reg.any_of<Message::Components::MessageFileObject>(e)) {
const auto& o = msg_reg.get<Message::Components::MessageFileObject>(e).o;
if (!o) {
continue;
}
// HACK: use tox fild_id and file_kind instead!!
if (o.all_of<Components::FT1InfoSHA1Hash>()) {
j_entry["fkind"] = NGCFT1_file_kind::HASH_SHA1_INFO;
j_entry["fid"] = nlohmann::json::binary_t{o.get<Components::FT1InfoSHA1Hash>().hash};
} else {
continue; // unknown file type
}
}
j_array.push_back(j_entry);
}
std::cout << "NGCHS2Sigma: built chat log with " << j_array.size() << " entries\n";
return nlohmann::json::to_msgpack(j_array);
}
bool NGCHS2Sigma::onEvent(const Message::Events::MessageConstruct&) {
return false;
}
bool NGCHS2Sigma::onEvent(const Message::Events::MessageUpdated&) {
return false;
}
bool NGCHS2Sigma::onEvent(const Message::Events::MessageDestory&) {
return false;
}
bool NGCHS2Sigma::onEvent(const Events::NGCFT1_recv_request& e) {
if (
e.file_kind != NGCFT1_file_kind::HS2_RANGE_TIME_MSGPACK
) {
return false; // not for us
}
// TODO: when is it done from queue?
auto c = _tcm.getContactGroupPeer(e.group_number, e.peer_number);
if (!c) {
return false; // how
}
// is other peer allowed to make requests
//bool quick_allow {false};
bool quick_allow {true}; // HACK: disable all restrictions for this early test
// TODO: quick deny?
{
// - tagged as weakself
if (!quick_allow && c.all_of<Contact::Components::TagSelfWeak>()) {
quick_allow = true;
}
// - sub perm level??
// - out of max time range (ft specific, not a quick_allow)
}
if (e.file_kind == NGCFT1_file_kind::HS2_RANGE_TIME_MSGPACK) {
handleTimeRange(c, e);
}
return true;
}
bool NGCHS2Sigma::onEvent(const Events::NGCFT1_send_data& e) {
auto c = _tcm.getContactGroupPeer(e.group_number, e.peer_number);
if (!c) {
return false;
}
if (!c.all_of<Components::IncommingTimeRangeRequestRunning>()) {
return false;
}
auto& irr = c.get<Components::IncommingTimeRangeRequestRunning>();
if (!irr._list.count(e.transfer_id)) {
return false; // not for us (maybe)
}
auto& transfer = irr._list.at(e.transfer_id);
if (transfer.data.size() < e.data_offset+e.data_size) {
std::cerr << "NGCHS2Sigma error: ft send data larger then file???\n";
assert(false && "how");
}
std::memcpy(e.data, transfer.data.data()+e.data_offset, e.data_size);
transfer.last_activity = 0.f;
return true;
}
bool NGCHS2Sigma::onEvent(const Events::NGCFT1_send_done& e) {
// TODO: this will return null if the peer just disconnected
// FIXME: this does not work, tcm just delteded the relation ship
//auto c = _tcm.getContactGroupPeer(e.group_number, e.peer_number);
//if (!c) {
// return false;
//}
const auto c_it = _tox_peer_to_contact.find(combine_ids(e.group_number, e.peer_number));
if (c_it == _tox_peer_to_contact.end()) {
return false;
}
auto c = c_it->second;
if (!static_cast<bool>(c)) {
return false;
}
if (!c.all_of<Components::IncommingTimeRangeRequestRunning>()) {
return false;
}
auto& irr = c.get<Components::IncommingTimeRangeRequestRunning>();
if (!irr._list.count(e.transfer_id)) {
return false; // not for us (maybe)
}
irr._list.erase(e.transfer_id);
// TODO: check if we completed it
std::cout << "NGCHS2Sigma: sent chatlog to " << e.group_number << ":" << e.peer_number << "." << (int)e.transfer_id << "\n";
return true;
}

View File

@ -0,0 +1,82 @@
#pragma once
#include <solanaceae/toxcore/tox_event_interface.hpp>
#include <solanaceae/contact/fwd.hpp>
#include <solanaceae/message3/registry_message_model.hpp>
#include <solanaceae/ngc_ft1/ngcft1.hpp>
#include <entt/container/dense_map.hpp>
#include <solanaceae/util/span.hpp>
#include <vector>
#include <deque>
// fwd
class ToxContactModel2;
struct TimeRangeRequest {
uint64_t ts_start{0};
uint64_t ts_end{0};
};
class NGCHS2Sigma : public RegistryMessageModelEventI, public NGCFT1EventI {
ContactStore4I& _cs;
RegistryMessageModelI& _rmm;
ToxContactModel2& _tcm;
NGCFT1& _nft;
NGCFT1EventProviderI::SubscriptionReference _nftep_sr;
float _iterate_heat {0.f};
constexpr static float _iterate_cooldown {1.22f}; // sec
// open/running range requests (by c)
// comp on peer c
// open/running range responses (by c)
// comp on peer c
// limit to 2 uploads per peer simultaniously
// TODO: increase for prod (4?) or maybe even lower?
// currently per type
constexpr static size_t _max_parallel_per_peer {2};
constexpr static bool _only_send_self_observed {true};
constexpr static int64_t _max_time_into_past_default {60*15}; // s
// FIXME: workaround missing contact events
// only used on peer exit (no, also used to quicken lookups)
entt::dense_map<uint64_t, ContactHandle4> _tox_peer_to_contact;
public:
NGCHS2Sigma(
ContactStore4I& cs,
RegistryMessageModelI& rmm,
ToxContactModel2& tcm,
NGCFT1& nft
);
~NGCHS2Sigma(void);
float iterate(float delta);
void handleTimeRange(ContactHandle4 c, const Events::NGCFT1_recv_request&);
// msg reg contact
// time ranges
[[nodiscard]] std::vector<uint8_t> buildChatLogFileRange(ContactHandle4 c, uint64_t ts_start, uint64_t ts_end);
protected:
bool onEvent(const Message::Events::MessageConstruct&) override;
bool onEvent(const Message::Events::MessageUpdated&) override;
bool onEvent(const Message::Events::MessageDestory&) override;
protected:
bool onEvent(const Events::NGCFT1_recv_request&) override;
bool onEvent(const Events::NGCFT1_send_data&) override;
bool onEvent(const Events::NGCFT1_send_done&) override;
};

View File

@ -0,0 +1,32 @@
#pragma once
#include <solanaceae/util/span.hpp>
#include <cstdint>
template<typename Type>
static uint64_t deserlSimpleType(ByteSpan bytes) {
if (bytes.size < sizeof(Type)) {
throw int(1);
}
Type value{};
for (size_t i = 0; i < sizeof(Type); i++) {
value |= Type(bytes[i]) << (i*8);
}
return value;
}
static uint64_t deserlTS(ByteSpan ts_bytes) {
return deserlSimpleType<uint64_t>(ts_bytes);
}
template<typename Type>
static void serlSimpleType(std::vector<uint8_t>& bytes, const Type& value) {
for (size_t i = 0; i < sizeof(Type); i++) {
bytes.push_back(uint8_t(value >> (i*8) & 0xff));
}
}

View File

@ -0,0 +1,110 @@
# [NGC] Group-History-Sync (v2.1) [PoC] [Draft]
Simple group history sync that uses `timestamp` + `peer public key` + `message_id` (`ts+ppk+mid`) to, mostly, uniquely identify messages and deliver them.
Messages are bundled up in a `msgpack` `array` and sent as a file transfer.
## Requirements
TODO: more?
### Msgpack
For serializing the messages.
### File transfers
For sending packs of messages.
Even a single message can be larger than a single custom packet, so this is a must-have.
This also allows for compression down the road.
## Procedure
Peer A can request `ts+ppk+mid+msg` list for a given time range from peer B.
Peer B then sends a filetransfer (with special file type) of list of `ts+ppk+mid+msg`.
Optionally compressed. (Delta-coding? / zstd)
Peer A keeps doing that until the desired time span is covered.
During all that, peer B usually does the same thing to peer A.
TODO: deny request explicitly. also why (like perms and time range too large etc)
## Traffic savings
It is recomended to remember if a range has been requested and answered from a given peer, to reduce traffic.
While compression is optional, it is recommended.
Timestamps fit delta coding.
Peer keys fit dicts.
Message ids are mostly high entropy.
The Message itself is text, so dict/huffman fits well.
TODO: store the 4 coloms SoA instead of AoS ?
## Message uniqueness
This protocol relies on the randomness of `message_id` and the clocks to be more or less synchronized.
However, `message_id` can be manipulated freely by any peer, this can make messages appear as duplicates.
This can be used here, if you don't wish your messages to be syncronized (to an extent).
## Security
Only sync publicly sent/recieved messages.
Only allow sync or extended time ranges from peers you trust (enough).
The default shall be to not offer any messages.
Indirect messages shall be low in credibility, while direct synced (by author), with mid credibility.
Either only high or mid credibility shall be sent.
Manual exceptions to all can be made at the users discretion, eg for other self owned devices.
## File transfer requests
TODO: is reusing the ft request api a good idea for this?
| fttype | name | content (ft id) |
|------------|------|---------------------|
| 0x00000f02 | time range msgpack | - ts start </br> - ts end |
## File transfer content
| fttype | name | content | note |
|------------|------|----------------------------|---|
| 0x00000f02 | time range msgpack | `message list` in msgpack | |
### time range msgpack
Msgpack array of messages.
```
name | type/size | note
-------------------------|-------------------|-----
- array | 32bit number msgs
- ts | 64bit deciseconds
- ppk | 32bytes
- mid | 16bit
- if action |
- action | bool
- if text |
- text | string | maybe byte array instead?
- if file |
- fkind | 32bit enum | is this right?
- fid | bytes kind | length depends on kind
```
Name is the actual string key.
Data type sizes are suggestions, if not defined by the tox protocol.
## TODO
- [ ] figure out a pro-active approach (instead of waiting for a range request)
- [ ] compression in the ft layer? (would make it reusable) hint/autodetect/autoenable for >1k ?

View File

@ -0,0 +1,82 @@
#include "./ts_find_start.hpp"
#include <solanaceae/message3/registry_message_model.hpp>
#include <solanaceae/message3/components.hpp>
#include <cassert>
int main(void) {
Message3Registry msg_reg;
{
std::cout << "TEST empty reg\n";
auto ts_view = msg_reg.view<Message::Components::Timestamp>();
const auto res = find_start_by_ts(ts_view, 42);
assert(res == ts_view.end());
}
{
std::cout << "TEST single msg newer (fail)\n";
Message3Handle msg{msg_reg, msg_reg.create()};
msg.emplace<Message::Components::Timestamp>(43ul);
auto ts_view = msg_reg.view<Message::Components::Timestamp>();
const auto res = find_start_by_ts(ts_view, 42);
assert(res == ts_view.end());
msg.destroy();
}
{
std::cout << "TEST single msg same (succ)\n";
Message3Handle msg{msg_reg, msg_reg.create()};
msg.emplace<Message::Components::Timestamp>(42ul);
auto ts_view = msg_reg.view<Message::Components::Timestamp>();
const auto res = find_start_by_ts(ts_view, 42);
assert(res != ts_view.end());
msg.destroy();
}
{
std::cout << "TEST single msg older (succ)\n";
Message3Handle msg{msg_reg, msg_reg.create()};
msg.emplace<Message::Components::Timestamp>(41ul);
auto ts_view = msg_reg.view<Message::Components::Timestamp>();
const auto res = find_start_by_ts(ts_view, 42);
assert(res != ts_view.end());
msg.destroy();
}
{
std::cout << "TEST multi msg\n";
Message3Handle msg{msg_reg, msg_reg.create()};
msg.emplace<Message::Components::Timestamp>(41ul);
Message3Handle msg2{msg_reg, msg_reg.create()};
msg2.emplace<Message::Components::Timestamp>(42ul);
Message3Handle msg3{msg_reg, msg_reg.create()};
msg3.emplace<Message::Components::Timestamp>(43ul);
// see message3/message_time_sort.cpp
msg_reg.sort<Message::Components::Timestamp>([](const auto& lhs, const auto& rhs) -> bool {
return lhs.ts > rhs.ts;
}, entt::insertion_sort{});
auto ts_view = msg_reg.view<Message::Components::Timestamp>();
auto res = find_start_by_ts(ts_view, 42);
assert(res != ts_view.end());
assert(*res == msg2);
res++;
assert(*res == msg);
msg3.destroy();
msg2.destroy();
msg.destroy();
}
return 0;
}

View File

@ -0,0 +1,31 @@
#pragma once
#include <algorithm>
#include <cstdint>
#include <iostream>
// perform binary search to find the first message not newer than ts_start
template<typename View>
auto find_start_by_ts(const View& view, uint64_t ts_start) {
//std::cout << "!!!! starting msg ts search, ts_start:" << ts_start << "\n";
// -> first value smaller than start ts
auto res = std::lower_bound(
view.begin(), view.end(),
ts_start,
[&view](const auto& a, const auto& b) {
const auto& [a_comp] = view.get(a);
return a_comp.ts > b; // > bc ts is sorted high to low?
}
);
if (res != view.end()) {
const auto& [ts_comp] = view.get(*res);
//std::cout << "!!!! first value not newer than start ts is " << ts_comp.ts << "\n";
} else {
//std::cout << "!!!! no first value not newer than start ts\n";
}
return res;
}