45 Commits

Author SHA1 Message Date
6e681aa3fd light cca refator and expose some cca values to the outside 2024-07-12 13:14:24 +02:00
1d97dbe73d rework bitset queue (worse) and send have_all instead (but better) 2024-07-10 15:47:33 +02:00
0e9b1b8877 add ext have all packet 2024-07-10 15:16:58 +02:00
f449cf623d fix bitset sizecheck and send out bitsets the first time someone
announces participation
2024-07-10 12:27:19 +02:00
bee7de3fb7 sequential strat now respects ReadHeadHint 2024-07-10 11:26:47 +02:00
822b979286 object download prio, not set anywhere yet, but the code is there now 2024-07-10 11:13:57 +02:00
ef91ec14fc explicit and better rng, remove junk and old code 2024-07-10 10:41:25 +02:00
699957f79a more consistenly tag cp update and lower cooldown just in case 2024-07-09 14:45:00 +02:00
02d58928f4 small refactors 2024-07-09 11:40:01 +02:00
60e6f91541 cleanup old workaround code 2024-07-09 11:04:19 +02:00
92373d34f7 work around missing contact events (better now)
fix missing ft event on reset (oops)
hard assert sending transfers can not time out higher level
2024-07-09 11:00:59 +02:00
e0b278b168 hot fix 2024-07-08 18:46:26 +02:00
e5681b4ad5 rework chunk picker update logic and participation logic
disable most spammy log
2024-07-08 18:12:47 +02:00
79e3070422 better random init 2024-07-07 17:13:30 +02:00
bf1fa64973 chunk picker strategies 2024-07-07 16:49:31 +02:00
11dee5870c fix round robin and reduce num empty spins to improve perf 2024-07-07 15:55:22 +02:00
fab3d42ee9 transfer time temporality buffer 2024-07-07 15:27:30 +02:00
269daaa764 work around missing contact events and properly clear on exit 2024-07-07 14:15:26 +02:00
ea945e6360 increase out number for 4 peers until proper sending per peer is implemented 2024-07-07 13:56:52 +02:00
b068819069 higher tickrate if open requests
(we expect an init soon and dont want to bounce around)
2024-07-07 13:21:59 +02:00
b64a4ae31c better bitset print 2024-07-07 13:07:57 +02:00
266cddf816 properly account for open requests when determining how much to request 2024-07-07 12:45:23 +02:00
eaaf798661 clear receiving transfers
TODO: actually keep around for 2*delay, so missing packets can still be retransmitted
but this fixes perf issues
2024-07-07 11:07:31 +02:00
d19fc6ba30 new chunk picker, basically working
still needs work on the sending side and more bug fixes
2024-07-03 12:11:20 +02:00
613b183592 fix have bit packing 2024-07-03 11:03:26 +02:00
3fd6183c21 combined id refactor 2024-07-02 16:09:59 +02:00
92b3d1a5fb more chunk picker prep 2024-07-02 15:52:25 +02:00
edf58b70f5 receiving count for peer 2024-07-02 14:54:08 +02:00
33560f8f8a receiving transfers refactor 2024-06-30 14:03:06 +02:00
3286a7228c more minor refactoring 2024-06-28 22:18:11 +02:00
b53e291c68 wip chunk picker (still unused) and a small refactor 2024-06-28 15:13:17 +02:00
27cade4dfe track remote have and bitset 2024-06-25 21:09:46 +02:00
0b4041db7e move bitset to util 2024-06-25 12:45:28 +02:00
e9e38db1d5 move self have_chunk to bitset 2024-06-25 12:08:17 +02:00
c8619561ec refactor: move (object/content) components out 2024-06-24 16:42:23 +02:00
1b630bc07f impl and test bitset util 2024-06-24 12:14:51 +02:00
ee2411b8e0 hack: send ft1_have every chunk we receive
produces unnecessary overhead, should be bundled
2024-06-23 15:12:31 +02:00
bc7417c1cd add send fn for new packets (parse and send still untested) 2024-06-23 12:55:23 +02:00
3827733f08 and remove the old code 2024-06-23 12:31:01 +02:00
5400c13f88 copy the remaining implemented send funcions over 2024-06-23 12:14:02 +02:00
8972386971 send out pc1 announces for ft infohash
will eliminate the guesswork in the future
2024-06-23 10:17:48 +02:00
b27107af4c start moving pkg sending to ngcext
wip, but working as far as its implemented
2024-06-23 10:14:03 +02:00
bcde244a3c handle pc1 announce and reduce chance to sample random peer
(will remove random sample sometime in the future)
2024-06-22 17:01:52 +02:00
e9f22bc9ae make ft1sha1 observe disconnects 2024-06-22 14:08:12 +02:00
c09f2e6f8f ngcext: parse ft1_have, ft1_bitset, pc1_announce 2024-06-22 12:48:54 +02:00
25 changed files with 2429 additions and 541 deletions

22
.gitignore vendored Normal file
View File

@ -0,0 +1,22 @@
.vs/
*.o
*.swp
~*
*~
.idea/
cmake-build-debug/
cmake-build-debugandtest/
cmake-build-release/
*.stackdump
*.coredump
compile_commands.json
/build*
.clangd
.cache
.DS_Store
.AppleDouble
.LSOverride
CMakeLists.txt.user*
CMakeCache.txt

View File

@ -51,9 +51,25 @@ add_library(solanaceae_sha1_ngcft1
./solanaceae/ngc_ft1_sha1/hash_utils.hpp ./solanaceae/ngc_ft1_sha1/hash_utils.hpp
./solanaceae/ngc_ft1_sha1/hash_utils.cpp ./solanaceae/ngc_ft1_sha1/hash_utils.cpp
./solanaceae/ngc_ft1_sha1/util.hpp
./solanaceae/ngc_ft1_sha1/ft1_sha1_info.hpp ./solanaceae/ngc_ft1_sha1/ft1_sha1_info.hpp
./solanaceae/ngc_ft1_sha1/ft1_sha1_info.cpp ./solanaceae/ngc_ft1_sha1/ft1_sha1_info.cpp
./solanaceae/ngc_ft1_sha1/components.hpp
./solanaceae/ngc_ft1_sha1/components.cpp
./solanaceae/ngc_ft1_sha1/contact_components.hpp
./solanaceae/ngc_ft1_sha1/chunk_picker.hpp
./solanaceae/ngc_ft1_sha1/chunk_picker.cpp
./solanaceae/ngc_ft1_sha1/participation.hpp
./solanaceae/ngc_ft1_sha1/participation.cpp
./solanaceae/ngc_ft1_sha1/receiving_transfers.hpp
./solanaceae/ngc_ft1_sha1/receiving_transfers.cpp
./solanaceae/ngc_ft1_sha1/sha1_ngcft1.hpp ./solanaceae/ngc_ft1_sha1/sha1_ngcft1.hpp
./solanaceae/ngc_ft1_sha1/sha1_ngcft1.cpp ./solanaceae/ngc_ft1_sha1/sha1_ngcft1.cpp
) )
@ -69,3 +85,22 @@ target_link_libraries(solanaceae_sha1_ngcft1 PUBLIC
solanaceae_file2 solanaceae_file2
) )
########################################
option(SOLANACEAE_NGCFT1_SHA1_BUILD_TESTING "Build the solanaceae_ngcft1_sha1 tests" OFF)
message("II SOLANACEAE_NGCFT1_SHA1_BUILD_TESTING " ${SOLANACEAE_NGCFT1_SHA1_BUILD_TESTING})
# TODO: proper options n shit
if (SOLANACEAE_NGCFT1_SHA1_BUILD_TESTING)
include(CTest)
#add_executable(bitset_tests
# ./solanaceae/ngc_ft1_sha1/bitset_tests.cpp
#)
#target_link_libraries(bitset_tests PUBLIC
# solanaceae_sha1_ngcft1
#)
endif()

View File

@ -1,8 +1,9 @@
#include "./ngcext.hpp" #include "./ngcext.hpp"
#include <iostream> #include <iostream>
#include <cassert>
NGCEXTEventProvider::NGCEXTEventProvider(ToxEventProviderI& tep) : _tep(tep) { NGCEXTEventProvider::NGCEXTEventProvider(ToxI& t, ToxEventProviderI& tep) : _t(t), _tep(tep) {
_tep.subscribe(this, Tox_Event_Type::TOX_EVENT_GROUP_CUSTOM_PACKET); _tep.subscribe(this, Tox_Event_Type::TOX_EVENT_GROUP_CUSTOM_PACKET);
_tep.subscribe(this, Tox_Event_Type::TOX_EVENT_GROUP_CUSTOM_PRIVATE_PACKET); _tep.subscribe(this, Tox_Event_Type::TOX_EVENT_GROUP_CUSTOM_PRIVATE_PACKET);
} }
@ -261,6 +262,163 @@ bool NGCEXTEventProvider::parse_ft1_init_ack_v2(
); );
} }
bool NGCEXTEventProvider::parse_ft1_have(
uint32_t group_number, uint32_t peer_number,
const uint8_t* data, size_t data_size,
bool _private
) {
if (!_private) {
std::cerr << "NGCEXT: ft1_have cant be public\n";
return false;
}
Events::NGCEXT_ft1_have e;
e.group_number = group_number;
e.peer_number = peer_number;
size_t curser = 0;
// - 4 byte (file_kind)
e.file_kind = 0u;
_DATA_HAVE(sizeof(e.file_kind), std::cerr << "NGCEXT: packet too small, missing file_kind\n"; return false)
for (size_t i = 0; i < sizeof(e.file_kind); i++, curser++) {
e.file_kind |= uint32_t(data[curser]) << (i*8);
}
// - X bytes (file_kind dependent id, differnt sizes)
uint16_t file_id_size = 0u;
_DATA_HAVE(sizeof(file_id_size), std::cerr << "NGCEXT: packet too small, missing file_id_size\n"; return false)
for (size_t i = 0; i < sizeof(file_id_size); i++, curser++) {
file_id_size |= uint32_t(data[curser]) << (i*8);
}
_DATA_HAVE(file_id_size, std::cerr << "NGCEXT: packet too small, missing file_id, or file_id_size too large(" << data_size-curser << ")\n"; return false)
e.file_id = {data+curser, data+curser+file_id_size};
curser += file_id_size;
// - array [
// - 4 bytes (chunk index)
// - ]
while (curser < data_size) {
_DATA_HAVE(sizeof(uint32_t), std::cerr << "NGCEXT: packet too small, broken chunk index\n"; return false)
uint32_t chunk_index = 0u;
for (size_t i = 0; i < sizeof(chunk_index); i++, curser++) {
chunk_index |= uint32_t(data[curser]) << (i*8);
}
e.chunks.push_back(chunk_index);
}
return dispatch(
NGCEXT_Event::FT1_HAVE,
e
);
}
bool NGCEXTEventProvider::parse_ft1_bitset(
uint32_t group_number, uint32_t peer_number,
const uint8_t* data, size_t data_size,
bool _private
) {
if (!_private) {
std::cerr << "NGCEXT: ft1_bitset cant be public\n";
return false;
}
Events::NGCEXT_ft1_bitset e;
e.group_number = group_number;
e.peer_number = peer_number;
size_t curser = 0;
// - 4 byte (file_kind)
e.file_kind = 0u;
_DATA_HAVE(sizeof(e.file_kind), std::cerr << "NGCEXT: packet too small, missing file_kind\n"; return false)
for (size_t i = 0; i < sizeof(e.file_kind); i++, curser++) {
e.file_kind |= uint32_t(data[curser]) << (i*8);
}
// - X bytes (file_kind dependent id, differnt sizes)
uint16_t file_id_size = 0u;
_DATA_HAVE(sizeof(file_id_size), std::cerr << "NGCEXT: packet too small, missing file_id_size\n"; return false)
for (size_t i = 0; i < sizeof(file_id_size); i++, curser++) {
file_id_size |= uint32_t(data[curser]) << (i*8);
}
_DATA_HAVE(file_id_size, std::cerr << "NGCEXT: packet too small, missing file_id, or file_id_size too large (" << data_size-curser << ")\n"; return false)
e.file_id = {data+curser, data+curser+file_id_size};
curser += file_id_size;
e.start_chunk = 0u;
_DATA_HAVE(sizeof(e.start_chunk), std::cerr << "NGCEXT: packet too small, missing start_chunk\n"; return false)
for (size_t i = 0; i < sizeof(e.start_chunk); i++, curser++) {
e.start_chunk |= uint32_t(data[curser]) << (i*8);
}
// - X bytes
// - array [
// - 1 bit (have chunk)
// - ] (filled up with zero)
// high to low?
// simply rest of file packet
e.chunk_bitset = {data+curser, data+curser+(data_size-curser)};
return dispatch(
NGCEXT_Event::FT1_BITSET,
e
);
}
bool NGCEXTEventProvider::parse_ft1_have_all(
uint32_t group_number, uint32_t peer_number,
const uint8_t* data, size_t data_size,
bool _private
) {
// can be public
// TODO: warn on public?
Events::NGCEXT_ft1_have_all e;
e.group_number = group_number;
e.peer_number = peer_number;
size_t curser = 0;
// - 4 byte (file_kind)
e.file_kind = 0u;
_DATA_HAVE(sizeof(e.file_kind), std::cerr << "NGCEXT: packet too small, missing file_kind\n"; return false)
for (size_t i = 0; i < sizeof(e.file_kind); i++, curser++) {
e.file_kind |= uint32_t(data[curser]) << (i*8);
}
_DATA_HAVE(1, std::cerr << "NGCEXT: packet too small, missing file_id\n"; return false)
// - X bytes (file_id, differnt sizes)
e.file_id = {data+curser, data+curser+(data_size-curser)};
return dispatch(
NGCEXT_Event::FT1_HAVE_ALL,
e
);
}
bool NGCEXTEventProvider::parse_pc1_announce(
uint32_t group_number, uint32_t peer_number,
const uint8_t* data, size_t data_size,
bool _private
) {
// can be public
Events::NGCEXT_pc1_announce e;
e.group_number = group_number;
e.peer_number = peer_number;
size_t curser = 0;
// - X bytes (id, differnt sizes)
e.id = {data+curser, data+curser+(data_size-curser)};
return dispatch(
NGCEXT_Event::PC1_ANNOUNCE,
e
);
}
bool NGCEXTEventProvider::handlePacket( bool NGCEXTEventProvider::handlePacket(
const uint32_t group_number, const uint32_t group_number,
const uint32_t peer_number, const uint32_t peer_number,
@ -292,6 +450,14 @@ bool NGCEXTEventProvider::handlePacket(
return parse_ft1_data_ack(group_number, peer_number, data+1, data_size-1, _private); return parse_ft1_data_ack(group_number, peer_number, data+1, data_size-1, _private);
case NGCEXT_Event::FT1_MESSAGE: case NGCEXT_Event::FT1_MESSAGE:
return parse_ft1_message(group_number, peer_number, data+1, data_size-1, _private); return parse_ft1_message(group_number, peer_number, data+1, data_size-1, _private);
case NGCEXT_Event::FT1_HAVE:
return parse_ft1_have(group_number, peer_number, data+1, data_size-1, _private);
case NGCEXT_Event::FT1_BITSET:
return parse_ft1_bitset(group_number, peer_number, data+1, data_size-1, _private);
case NGCEXT_Event::FT1_HAVE_ALL:
return parse_ft1_have_all(group_number, peer_number, data+1, data_size-1, _private);
case NGCEXT_Event::PC1_ANNOUNCE:
return parse_pc1_announce(group_number, peer_number, data+1, data_size-1, _private);
default: default:
return false; return false;
} }
@ -299,6 +465,276 @@ bool NGCEXTEventProvider::handlePacket(
return false; return false;
} }
bool NGCEXTEventProvider::send_ft1_request(
uint32_t group_number, uint32_t peer_number,
uint32_t file_kind,
const uint8_t* file_id, size_t file_id_size
) {
// - 1 byte packet id
// - 4 byte file_kind
// - X bytes file_id
std::vector<uint8_t> pkg;
pkg.push_back(static_cast<uint8_t>(NGCEXT_Event::FT1_REQUEST));
for (size_t i = 0; i < sizeof(file_kind); i++) {
pkg.push_back((file_kind>>(i*8)) & 0xff);
}
for (size_t i = 0; i < file_id_size; i++) {
pkg.push_back(file_id[i]);
}
// lossless
return _t.toxGroupSendCustomPrivatePacket(group_number, peer_number, true, pkg) == TOX_ERR_GROUP_SEND_CUSTOM_PRIVATE_PACKET_OK;
}
bool NGCEXTEventProvider::send_ft1_init(
uint32_t group_number, uint32_t peer_number,
uint32_t file_kind,
uint64_t file_size,
uint8_t transfer_id,
const uint8_t* file_id, size_t file_id_size
) {
// - 1 byte packet id
// - 4 byte (file_kind)
// - 8 bytes (data size)
// - 1 byte (temporary_file_tf_id, for this peer only, technically just a prefix to distinguish between simultainious fts)
// - X bytes (file_kind dependent id, differnt sizes)
std::vector<uint8_t> pkg;
pkg.push_back(static_cast<uint8_t>(NGCEXT_Event::FT1_INIT));
for (size_t i = 0; i < sizeof(file_kind); i++) {
pkg.push_back((file_kind>>(i*8)) & 0xff);
}
for (size_t i = 0; i < sizeof(file_size); i++) {
pkg.push_back((file_size>>(i*8)) & 0xff);
}
pkg.push_back(transfer_id);
for (size_t i = 0; i < file_id_size; i++) {
pkg.push_back(file_id[i]);
}
// lossless
return _t.toxGroupSendCustomPrivatePacket(group_number, peer_number, true, pkg) == TOX_ERR_GROUP_SEND_CUSTOM_PRIVATE_PACKET_OK;
}
bool NGCEXTEventProvider::send_ft1_init_ack(
uint32_t group_number, uint32_t peer_number,
uint8_t transfer_id
) {
// - 1 byte packet id
// - 1 byte transfer_id
std::vector<uint8_t> pkg;
pkg.push_back(static_cast<uint8_t>(NGCEXT_Event::FT1_INIT_ACK));
pkg.push_back(transfer_id);
// - 2 bytes max_lossy_data_size
const uint16_t max_lossy_data_size = _t.toxGroupMaxCustomLossyPacketLength() - 4;
for (size_t i = 0; i < sizeof(uint16_t); i++) {
pkg.push_back((max_lossy_data_size>>(i*8)) & 0xff);
}
// lossless
return _t.toxGroupSendCustomPrivatePacket(group_number, peer_number, true, pkg) == TOX_ERR_GROUP_SEND_CUSTOM_PRIVATE_PACKET_OK;
}
bool NGCEXTEventProvider::send_ft1_data(
uint32_t group_number, uint32_t peer_number,
uint8_t transfer_id,
uint16_t sequence_id,
const uint8_t* data, size_t data_size
) {
assert(data_size > 0);
// TODO
// check header_size+data_size <= max pkg size
std::vector<uint8_t> pkg;
pkg.reserve(2048); // saves a ton of allocations
pkg.push_back(static_cast<uint8_t>(NGCEXT_Event::FT1_DATA));
pkg.push_back(transfer_id);
pkg.push_back(sequence_id & 0xff);
pkg.push_back((sequence_id >> (1*8)) & 0xff);
// TODO: optimize
for (size_t i = 0; i < data_size; i++) {
pkg.push_back(data[i]);
}
// lossy
return _t.toxGroupSendCustomPrivatePacket(group_number, peer_number, false, pkg) == TOX_ERR_GROUP_SEND_CUSTOM_PRIVATE_PACKET_OK;
}
bool NGCEXTEventProvider::send_ft1_data_ack(
uint32_t group_number, uint32_t peer_number,
uint8_t transfer_id,
const uint16_t* seq_ids, size_t seq_ids_size
) {
std::vector<uint8_t> pkg;
pkg.reserve(1+1+2*32); // 32acks in a single pkg should be unlikely
pkg.push_back(static_cast<uint8_t>(NGCEXT_Event::FT1_DATA_ACK));
pkg.push_back(transfer_id);
// TODO: optimize
for (size_t i = 0; i < seq_ids_size; i++) {
pkg.push_back(seq_ids[i] & 0xff);
pkg.push_back((seq_ids[i] >> (1*8)) & 0xff);
}
// lossy
return _t.toxGroupSendCustomPrivatePacket(group_number, peer_number, false, pkg) == TOX_ERR_GROUP_SEND_CUSTOM_PRIVATE_PACKET_OK;
}
bool NGCEXTEventProvider::send_all_ft1_message(
uint32_t group_number,
uint32_t message_id,
uint32_t file_kind,
const uint8_t* file_id, size_t file_id_size
) {
std::vector<uint8_t> pkg;
pkg.push_back(static_cast<uint8_t>(NGCEXT_Event::FT1_MESSAGE));
for (size_t i = 0; i < sizeof(message_id); i++) {
pkg.push_back((message_id>>(i*8)) & 0xff);
}
for (size_t i = 0; i < sizeof(file_kind); i++) {
pkg.push_back((file_kind>>(i*8)) & 0xff);
}
for (size_t i = 0; i < file_id_size; i++) {
pkg.push_back(file_id[i]);
}
// lossless
return _t.toxGroupSendCustomPacket(group_number, true, pkg) == TOX_ERR_GROUP_SEND_CUSTOM_PACKET_OK;
}
bool NGCEXTEventProvider::send_ft1_have(
uint32_t group_number, uint32_t peer_number,
uint32_t file_kind,
const uint8_t* file_id, size_t file_id_size,
const uint32_t* chunks_data, size_t chunks_size
) {
// 16bit file id size
assert(file_id_size <= 0xffff);
if (file_id_size > 0xffff) {
return false;
}
std::vector<uint8_t> pkg;
pkg.push_back(static_cast<uint8_t>(NGCEXT_Event::FT1_HAVE));
for (size_t i = 0; i < sizeof(file_kind); i++) {
pkg.push_back((file_kind>>(i*8)) & 0xff);
}
// file id not last in packet, needs explicit size
const uint16_t file_id_size_cast = file_id_size;
for (size_t i = 0; i < sizeof(file_id_size_cast); i++) {
pkg.push_back((file_id_size_cast>>(i*8)) & 0xff);
}
for (size_t i = 0; i < file_id_size; i++) {
pkg.push_back(file_id[i]);
}
// rest is chunks
for (size_t c_i = 0; c_i < chunks_size; c_i++) {
for (size_t i = 0; i < sizeof(chunks_data[c_i]); i++) {
pkg.push_back((chunks_data[c_i]>>(i*8)) & 0xff);
}
}
// lossless
return _t.toxGroupSendCustomPrivatePacket(group_number, peer_number, true, pkg) == TOX_ERR_GROUP_SEND_CUSTOM_PRIVATE_PACKET_OK;
}
bool NGCEXTEventProvider::send_ft1_bitset(
uint32_t group_number, uint32_t peer_number,
uint32_t file_kind,
const uint8_t* file_id, size_t file_id_size,
uint32_t start_chunk,
const uint8_t* bitset_data, size_t bitset_size // size is bytes
) {
std::vector<uint8_t> pkg;
pkg.push_back(static_cast<uint8_t>(NGCEXT_Event::FT1_BITSET));
for (size_t i = 0; i < sizeof(file_kind); i++) {
pkg.push_back((file_kind>>(i*8)) & 0xff);
}
// file id not last in packet, needs explicit size
const uint16_t file_id_size_cast = file_id_size;
for (size_t i = 0; i < sizeof(file_id_size_cast); i++) {
pkg.push_back((file_id_size_cast>>(i*8)) & 0xff);
}
for (size_t i = 0; i < file_id_size; i++) {
pkg.push_back(file_id[i]);
}
for (size_t i = 0; i < sizeof(start_chunk); i++) {
pkg.push_back((start_chunk>>(i*8)) & 0xff);
}
for (size_t i = 0; i < bitset_size; i++) {
pkg.push_back(bitset_data[i]);
}
// lossless
return _t.toxGroupSendCustomPrivatePacket(group_number, peer_number, true, pkg) == TOX_ERR_GROUP_SEND_CUSTOM_PRIVATE_PACKET_OK;
}
bool NGCEXTEventProvider::send_ft1_have_all(
uint32_t group_number, uint32_t peer_number,
uint32_t file_kind,
const uint8_t* file_id, size_t file_id_size
) {
std::vector<uint8_t> pkg;
pkg.push_back(static_cast<uint8_t>(NGCEXT_Event::FT1_HAVE_ALL));
for (size_t i = 0; i < sizeof(file_kind); i++) {
pkg.push_back((file_kind>>(i*8)) & 0xff);
}
for (size_t i = 0; i < file_id_size; i++) {
pkg.push_back(file_id[i]);
}
// lossless
return _t.toxGroupSendCustomPrivatePacket(group_number, peer_number, true, pkg) == TOX_ERR_GROUP_SEND_CUSTOM_PRIVATE_PACKET_OK;
}
static std::vector<uint8_t> build_pc1_announce(const uint8_t* id_data, size_t id_size) {
// - 1 byte packet id
// - X bytes (id, differnt sizes)
std::vector<uint8_t> pkg;
pkg.push_back(static_cast<uint8_t>(NGCEXT_Event::PC1_ANNOUNCE));
for (size_t i = 0; i < id_size; i++) {
pkg.push_back(id_data[i]);
}
return pkg;
}
bool NGCEXTEventProvider::send_pc1_announce(
uint32_t group_number, uint32_t peer_number,
const uint8_t* id_data, size_t id_size
) {
auto pkg = build_pc1_announce(id_data, id_size);
std::cout << "NEEP: sending PC1_ANNOUNCE s:" << pkg.size() - sizeof(NGCEXT_Event::PC1_ANNOUNCE) << "\n";
// lossless?
return _t.toxGroupSendCustomPrivatePacket(group_number, peer_number, true, pkg) == TOX_ERR_GROUP_SEND_CUSTOM_PRIVATE_PACKET_OK;
}
bool NGCEXTEventProvider::send_all_pc1_announce(
uint32_t group_number,
const uint8_t* id_data, size_t id_size
) {
auto pkg = build_pc1_announce(id_data, id_size);
std::cout << "NEEP: sending all PC1_ANNOUNCE s:" << pkg.size() - sizeof(NGCEXT_Event::PC1_ANNOUNCE) << "\n";
// lossless?
return _t.toxGroupSendCustomPacket(group_number, true, pkg) == TOX_ERR_GROUP_SEND_CUSTOM_PACKET_OK;
}
bool NGCEXTEventProvider::onToxEvent(const Tox_Event_Group_Custom_Packet* e) { bool NGCEXTEventProvider::onToxEvent(const Tox_Event_Group_Custom_Packet* e) {
const auto group_number = tox_event_group_custom_packet_get_group_number(e); const auto group_number = tox_event_group_custom_packet_get_group_number(e);
const auto peer_number = tox_event_group_custom_packet_get_peer_id(e); const auto peer_number = tox_event_group_custom_packet_get_peer_id(e);

View File

@ -3,6 +3,7 @@
// solanaceae port of tox_ngc_ext // solanaceae port of tox_ngc_ext
#include <solanaceae/toxcore/tox_event_interface.hpp> #include <solanaceae/toxcore/tox_event_interface.hpp>
#include <solanaceae/toxcore/tox_interface.hpp>
#include <solanaceae/util/event_provider.hpp> #include <solanaceae/util/event_provider.hpp>
#include <solanaceae/toxcore/tox_key.hpp> #include <solanaceae/toxcore/tox_key.hpp>
@ -119,7 +120,6 @@ namespace Events {
// - 4 byte (message_id) // - 4 byte (message_id)
uint32_t message_id; uint32_t message_id;
// request the other side to initiate a FT
// - 4 byte (file_kind) // - 4 byte (file_kind)
uint32_t file_kind; uint32_t file_kind;
@ -127,6 +127,60 @@ namespace Events {
std::vector<uint8_t> file_id; std::vector<uint8_t> file_id;
}; };
struct NGCEXT_ft1_have {
uint32_t group_number;
uint32_t peer_number;
// - 4 byte (file_kind)
uint32_t file_kind;
// - X bytes (file_kind dependent id, differnt sizes)
std::vector<uint8_t> file_id;
// - array [
// - 4 bytes (chunk index)
// - ]
std::vector<uint32_t> chunks;
};
struct NGCEXT_ft1_bitset {
uint32_t group_number;
uint32_t peer_number;
// - 4 byte (file_kind)
uint32_t file_kind;
// - X bytes (file_kind dependent id, differnt sizes)
std::vector<uint8_t> file_id;
uint32_t start_chunk;
// - array [
// - 1 bit (have chunk)
// - ] (filled up with zero)
// high to low?
std::vector<uint8_t> chunk_bitset;
};
struct NGCEXT_ft1_have_all {
uint32_t group_number;
uint32_t peer_number;
// - 4 byte (file_kind)
uint32_t file_kind;
// - X bytes (file_kind dependent id, differnt sizes)
std::vector<uint8_t> file_id;
};
struct NGCEXT_pc1_announce {
uint32_t group_number;
uint32_t peer_number;
// - X bytes (id, differnt sizes)
std::vector<uint8_t> id;
};
} // Events } // Events
enum class NGCEXT_Event : uint8_t { enum class NGCEXT_Event : uint8_t {
@ -186,11 +240,50 @@ enum class NGCEXT_Event : uint8_t {
// send file as message // send file as message
// basically the opposite of request // basically the opposite of request
// contains file_kind and file_id (and timestamp?) // contains file_kind and file_id (and timestamp?)
// - 4 byte (message_id) // - 4 bytes (message_id)
// - 4 byte (file_kind) // - 4 bytes (file_kind)
// - X bytes (file_kind dependent id, differnt sizes) // - X bytes (file_kind dependent id, differnt sizes)
FT1_MESSAGE, FT1_MESSAGE,
// announce you have specified chunks, for given info
// this is info/chunk specific
// bundle these together to reduce overhead (like maybe every 16, max 1min)
// - 4 bytes (file_kind)
// - X bytes (file_kind dependent id, differnt sizes)
// - array [
// - 4 bytes (chunk index)
// - ]
FT1_HAVE,
// tell the other peer which chunks, for a given info you have
// compressed down to a bitset (in parts)
// supposed to only be sent once on participation announcement, when mutual interest
// it is always assumed by the other side, that you dont have the chunk, until told otherwise,
// so you can be smart about what you send.
// - 4 bytes (file_kind)
// - X bytes (file_kind dependent id, differnt sizes)
// - 4 bytes (first chunk index in bitset)
// - array [
// - 1 bit (have chunk)
// - ] (filled up with zero)
FT1_BITSET,
// announce you have all chunks, for given info
// prefer over have and bitset
// - 4 bytes (file_kind)
// - X bytes (file_kind dependent id, differnt sizes)
FT1_HAVE_ALL,
// TODO: FT1_IDONTHAVE, tell a peer you no longer have said chunk
// TODO: FT1_REJECT, tell a peer you wont fulfil the request
// tell another peer that you are participating in X
// you can reply with PC1_ANNOUNCE, to let the other side know, you too are participating in X
// you should NOT announce often, since this hits peers that not participate
// ft1 uses fk+id
// - x bytes (id, different sizes)
PC1_ANNOUNCE = 0x80 | 32u,
MAX MAX
}; };
@ -204,15 +297,20 @@ struct NGCEXTEventI {
virtual bool onEvent(const Events::NGCEXT_ft1_data&) { return false; } virtual bool onEvent(const Events::NGCEXT_ft1_data&) { return false; }
virtual bool onEvent(const Events::NGCEXT_ft1_data_ack&) { return false; } virtual bool onEvent(const Events::NGCEXT_ft1_data_ack&) { return false; }
virtual bool onEvent(const Events::NGCEXT_ft1_message&) { return false; } virtual bool onEvent(const Events::NGCEXT_ft1_message&) { return false; }
virtual bool onEvent(const Events::NGCEXT_ft1_have&) { return false; }
virtual bool onEvent(const Events::NGCEXT_ft1_bitset&) { return false; }
virtual bool onEvent(const Events::NGCEXT_ft1_have_all&) { return false; }
virtual bool onEvent(const Events::NGCEXT_pc1_announce&) { return false; }
}; };
using NGCEXTEventProviderI = EventProviderI<NGCEXTEventI>; using NGCEXTEventProviderI = EventProviderI<NGCEXTEventI>;
class NGCEXTEventProvider : public ToxEventI, public NGCEXTEventProviderI { class NGCEXTEventProvider : public ToxEventI, public NGCEXTEventProviderI {
ToxI& _t;
ToxEventProviderI& _tep; ToxEventProviderI& _tep;
public: public:
NGCEXTEventProvider(ToxEventProviderI& tep/*, ToxI& t*/); NGCEXTEventProvider(ToxI& t, ToxEventProviderI& tep);
protected: protected:
bool parse_hs1_request_last_ids( bool parse_hs1_request_last_ids(
@ -269,6 +367,30 @@ class NGCEXTEventProvider : public ToxEventI, public NGCEXTEventProviderI {
bool _private bool _private
); );
bool parse_ft1_have(
uint32_t group_number, uint32_t peer_number,
const uint8_t* data, size_t data_size,
bool _private
);
bool parse_ft1_bitset(
uint32_t group_number, uint32_t peer_number,
const uint8_t* data, size_t data_size,
bool _private
);
bool parse_ft1_have_all(
uint32_t group_number, uint32_t peer_number,
const uint8_t* data, size_t data_size,
bool _private
);
bool parse_pc1_announce(
uint32_t group_number, uint32_t peer_number,
const uint8_t* data, size_t data_size,
bool _private
);
bool handlePacket( bool handlePacket(
const uint32_t group_number, const uint32_t group_number,
const uint32_t peer_number, const uint32_t peer_number,
@ -277,6 +399,78 @@ class NGCEXTEventProvider : public ToxEventI, public NGCEXTEventProviderI {
const bool _private const bool _private
); );
public: // send api
bool send_ft1_request(
uint32_t group_number, uint32_t peer_number,
uint32_t file_kind,
const uint8_t* file_id, size_t file_id_size
);
bool send_ft1_init(
uint32_t group_number, uint32_t peer_number,
uint32_t file_kind,
uint64_t file_size,
uint8_t transfer_id,
const uint8_t* file_id, size_t file_id_size
);
bool send_ft1_init_ack(
uint32_t group_number, uint32_t peer_number,
uint8_t transfer_id
);
bool send_ft1_data(
uint32_t group_number, uint32_t peer_number,
uint8_t transfer_id,
uint16_t sequence_id,
const uint8_t* data, size_t data_size
);
bool send_ft1_data_ack(
uint32_t group_number, uint32_t peer_number,
uint8_t transfer_id,
const uint16_t* seq_ids, size_t seq_ids_size
);
// TODO: add private version
bool send_all_ft1_message(
uint32_t group_number,
uint32_t message_id,
uint32_t file_kind,
const uint8_t* file_id, size_t file_id_size
);
bool send_ft1_have(
uint32_t group_number, uint32_t peer_number,
uint32_t file_kind,
const uint8_t* file_id, size_t file_id_size,
const uint32_t* chunks_data, size_t chunks_size
);
bool send_ft1_bitset(
uint32_t group_number, uint32_t peer_number,
uint32_t file_kind,
const uint8_t* file_id, size_t file_id_size,
uint32_t start_chunk,
const uint8_t* bitset_data, size_t bitset_size // size is bytes
);
bool send_ft1_have_all(
uint32_t group_number, uint32_t peer_number,
uint32_t file_kind,
const uint8_t* file_id, size_t file_id_size
);
bool send_pc1_announce(
uint32_t group_number, uint32_t peer_number,
const uint8_t* id_data, size_t id_size
);
bool send_all_pc1_announce(
uint32_t group_number,
const uint8_t* id_data, size_t id_size
);
protected: protected:
bool onToxEvent(const Tox_Event_Group_Custom_Packet* e) override; bool onToxEvent(const Tox_Event_Group_Custom_Packet* e) override;
bool onToxEvent(const Tox_Event_Group_Custom_Private_Packet* e) override; bool onToxEvent(const Tox_Event_Group_Custom_Private_Packet* e) override;

View File

@ -54,7 +54,7 @@ struct CCAI {
virtual float getCurrentDelay(void) const = 0; virtual float getCurrentDelay(void) const = 0;
// return the current believed window in bytes of how much data can be inflight, // return the current believed window in bytes of how much data can be inflight,
virtual float getWindow(void) = 0; virtual float getWindow(void) const = 0;
// TODO: api for how much data we should send // TODO: api for how much data we should send
// take time since last sent into account // take time since last sent into account
@ -64,8 +64,12 @@ struct CCAI {
// get the list of timed out seq_ids // get the list of timed out seq_ids
virtual std::vector<SeqIDType> getTimeouts(void) const = 0; virtual std::vector<SeqIDType> getTimeouts(void) const = 0;
// returns -1 if not implemented, can return 0
virtual int64_t inFlightCount(void) const { return -1; } virtual int64_t inFlightCount(void) const { return -1; }
// returns -1 if not implemented, can return 0
virtual int64_t inFlightBytes(void) const { return -1; }
public: // callbacks public: // callbacks
// data size is without overhead // data size is without overhead
virtual void onSent(SeqIDType seq, size_t data_size) = 0; virtual void onSent(SeqIDType seq, size_t data_size) = 0;

View File

@ -76,7 +76,7 @@ void CUBIC::onCongestion(void) {
} }
} }
float CUBIC::getWindow(void) { float CUBIC::getWindow(void) const {
return std::min<float>(getCWnD(), FlowOnly::getWindow()); return std::min<float>(getCWnD(), FlowOnly::getWindow());
} }

View File

@ -2,13 +2,8 @@
#include "./flow_only.hpp" #include "./flow_only.hpp"
#include <chrono>
struct CUBIC : public FlowOnly { struct CUBIC : public FlowOnly {
//using clock = std::chrono::steady_clock;
public: // config public: // config
//static constexpr float BETA {0.7f};
static constexpr float BETA {0.8f}; static constexpr float BETA {0.8f};
static constexpr float SCALING_CONSTANT {0.4f}; static constexpr float SCALING_CONSTANT {0.4f};
static constexpr float RTT_EMA_ALPHA = 0.1f; // 0.1 is very smooth, might need more static constexpr float RTT_EMA_ALPHA = 0.1f; // 0.1 is very smooth, might need more
@ -26,35 +21,17 @@ struct CUBIC : public FlowOnly {
float getCWnD(void) const; float getCWnD(void) const;
// moving avg over the last few delay samples
// VERY sensitive to bundling acks
//float getCurrentDelay(void) const;
//void addRTT(float new_delay);
void onCongestion(void) override; void onCongestion(void) override;
public: // api public: // api
CUBIC(size_t maximum_segment_data_size) : FlowOnly(maximum_segment_data_size) {} CUBIC(size_t maximum_segment_data_size) : FlowOnly(maximum_segment_data_size) {}
virtual ~CUBIC(void) {} virtual ~CUBIC(void) {}
float getWindow(void) override; float getWindow(void) const override;
// TODO: api for how much data we should send // TODO: api for how much data we should send
// take time since last sent into account // take time since last sent into account
// respect max_byterate_allowed // respect max_byterate_allowed
int64_t canSend(float time_delta) override; int64_t canSend(float time_delta) override;
// get the list of timed out seq_ids
//std::vector<SeqIDType> getTimeouts(void) const override;
public: // callbacks
// data size is without overhead
//void onSent(SeqIDType seq, size_t data_size) override;
//void onAck(std::vector<SeqIDType> seqs) override;
// if discard, not resent, not inflight
//void onLoss(SeqIDType seq, bool discard) override;
}; };

View File

@ -30,6 +30,7 @@ void FlowOnly::updateWindow(void) {
} }
void FlowOnly::updateCongestion(void) { void FlowOnly::updateCongestion(void) {
updateWindow();
const auto tmp_window = getWindow(); const auto tmp_window = getWindow();
// packet window * 0.3 // packet window * 0.3
// but atleast 4 // but atleast 4
@ -57,8 +58,7 @@ void FlowOnly::updateCongestion(void) {
} }
} }
float FlowOnly::getWindow(void) { float FlowOnly::getWindow(void) const {
updateWindow();
return _fwnd; return _fwnd;
} }
@ -102,11 +102,18 @@ int64_t FlowOnly::inFlightCount(void) const {
return _in_flight.size(); return _in_flight.size();
} }
int64_t FlowOnly::inFlightBytes(void) const {
return _in_flight_bytes;
}
void FlowOnly::onSent(SeqIDType seq, size_t data_size) { void FlowOnly::onSent(SeqIDType seq, size_t data_size) {
if constexpr (true) { if constexpr (true) {
size_t sum {0u};
for (const auto& it : _in_flight) { for (const auto& it : _in_flight) {
assert(it.id != seq); assert(it.id != seq);
sum += it.bytes;
} }
assert(_in_flight_bytes == sum);
} }
const auto& new_entry = _in_flight.emplace_back( const auto& new_entry = _in_flight.emplace_back(

View File

@ -4,7 +4,6 @@
#include <chrono> #include <chrono>
#include <vector> #include <vector>
#include <tuple>
struct FlowOnly : public CCAI { struct FlowOnly : public CCAI {
protected: protected:
@ -52,7 +51,8 @@ struct FlowOnly : public CCAI {
// VERY sensitive to bundling acks // VERY sensitive to bundling acks
float getCurrentDelay(void) const override; float getCurrentDelay(void) const override;
float getWindow(void) override; // call updateWindow() to update this value
float getWindow(void) const override;
void addRTT(float new_delay); void addRTT(float new_delay);
@ -76,6 +76,7 @@ struct FlowOnly : public CCAI {
std::vector<SeqIDType> getTimeouts(void) const override; std::vector<SeqIDType> getTimeouts(void) const override;
int64_t inFlightCount(void) const override; int64_t inFlightCount(void) const override;
int64_t inFlightBytes(void) const override;
public: // callbacks public: // callbacks
// data size is without overhead // data size is without overhead

View File

@ -53,7 +53,7 @@ struct LEDBAT : public CCAI {
// return the current believed window in bytes of how much data can be inflight, // return the current believed window in bytes of how much data can be inflight,
// without overstepping the delay requirement // without overstepping the delay requirement
float getWindow(void) override { float getWindow(void) const override {
return _cwnd; return _cwnd;
} }

View File

@ -15,147 +15,6 @@
#include <cassert> #include <cassert>
#include <vector> #include <vector>
bool NGCFT1::sendPKG_FT1_REQUEST(
uint32_t group_number, uint32_t peer_number,
uint32_t file_kind,
const uint8_t* file_id, size_t file_id_size
) {
// - 1 byte packet id
// - 4 byte file_kind
// - X bytes file_id
std::vector<uint8_t> pkg;
pkg.push_back(static_cast<uint8_t>(NGCEXT_Event::FT1_REQUEST));
for (size_t i = 0; i < sizeof(file_kind); i++) {
pkg.push_back((file_kind>>(i*8)) & 0xff);
}
for (size_t i = 0; i < file_id_size; i++) {
pkg.push_back(file_id[i]);
}
// lossless
return _t.toxGroupSendCustomPrivatePacket(group_number, peer_number, true, pkg) == TOX_ERR_GROUP_SEND_CUSTOM_PRIVATE_PACKET_OK;
}
bool NGCFT1::sendPKG_FT1_INIT(
uint32_t group_number, uint32_t peer_number,
uint32_t file_kind,
uint64_t file_size,
uint8_t transfer_id,
const uint8_t* file_id, size_t file_id_size
) {
// - 1 byte packet id
// - 4 byte (file_kind)
// - 8 bytes (data size)
// - 1 byte (temporary_file_tf_id, for this peer only, technically just a prefix to distinguish between simultainious fts)
// - X bytes (file_kind dependent id, differnt sizes)
std::vector<uint8_t> pkg;
pkg.push_back(static_cast<uint8_t>(NGCEXT_Event::FT1_INIT));
for (size_t i = 0; i < sizeof(file_kind); i++) {
pkg.push_back((file_kind>>(i*8)) & 0xff);
}
for (size_t i = 0; i < sizeof(file_size); i++) {
pkg.push_back((file_size>>(i*8)) & 0xff);
}
pkg.push_back(transfer_id);
for (size_t i = 0; i < file_id_size; i++) {
pkg.push_back(file_id[i]);
}
// lossless
return _t.toxGroupSendCustomPrivatePacket(group_number, peer_number, true, pkg) == TOX_ERR_GROUP_SEND_CUSTOM_PRIVATE_PACKET_OK;
}
bool NGCFT1::sendPKG_FT1_INIT_ACK(
uint32_t group_number, uint32_t peer_number,
uint8_t transfer_id
) {
// - 1 byte packet id
// - 1 byte transfer_id
std::vector<uint8_t> pkg;
pkg.push_back(static_cast<uint8_t>(NGCEXT_Event::FT1_INIT_ACK));
pkg.push_back(transfer_id);
// - 2 bytes max_lossy_data_size
const uint16_t max_lossy_data_size = _t.toxGroupMaxCustomLossyPacketLength() - 4;
for (size_t i = 0; i < sizeof(uint16_t); i++) {
pkg.push_back((max_lossy_data_size>>(i*8)) & 0xff);
}
// lossless
return _t.toxGroupSendCustomPrivatePacket(group_number, peer_number, true, pkg) == TOX_ERR_GROUP_SEND_CUSTOM_PRIVATE_PACKET_OK;
}
bool NGCFT1::sendPKG_FT1_DATA(
uint32_t group_number, uint32_t peer_number,
uint8_t transfer_id,
uint16_t sequence_id,
const uint8_t* data, size_t data_size
) {
assert(data_size > 0);
// TODO
// check header_size+data_size <= max pkg size
std::vector<uint8_t> pkg;
pkg.reserve(2048); // saves a ton of allocations
pkg.push_back(static_cast<uint8_t>(NGCEXT_Event::FT1_DATA));
pkg.push_back(transfer_id);
pkg.push_back(sequence_id & 0xff);
pkg.push_back((sequence_id >> (1*8)) & 0xff);
// TODO: optimize
for (size_t i = 0; i < data_size; i++) {
pkg.push_back(data[i]);
}
// lossy
return _t.toxGroupSendCustomPrivatePacket(group_number, peer_number, false, pkg) == TOX_ERR_GROUP_SEND_CUSTOM_PRIVATE_PACKET_OK;
}
bool NGCFT1::sendPKG_FT1_DATA_ACK(
uint32_t group_number, uint32_t peer_number,
uint8_t transfer_id,
const uint16_t* seq_ids, size_t seq_ids_size
) {
std::vector<uint8_t> pkg;
pkg.reserve(1+1+2*32); // 32acks in a single pkg should be unlikely
pkg.push_back(static_cast<uint8_t>(NGCEXT_Event::FT1_DATA_ACK));
pkg.push_back(transfer_id);
// TODO: optimize
for (size_t i = 0; i < seq_ids_size; i++) {
pkg.push_back(seq_ids[i] & 0xff);
pkg.push_back((seq_ids[i] >> (1*8)) & 0xff);
}
// lossy
return _t.toxGroupSendCustomPrivatePacket(group_number, peer_number, false, pkg) == TOX_ERR_GROUP_SEND_CUSTOM_PRIVATE_PACKET_OK;
}
bool NGCFT1::sendPKG_FT1_MESSAGE(
uint32_t group_number,
uint32_t message_id,
uint32_t file_kind,
const uint8_t* file_id, size_t file_id_size
) {
std::vector<uint8_t> pkg;
pkg.push_back(static_cast<uint8_t>(NGCEXT_Event::FT1_MESSAGE));
for (size_t i = 0; i < sizeof(message_id); i++) {
pkg.push_back((message_id>>(i*8)) & 0xff);
}
for (size_t i = 0; i < sizeof(file_kind); i++) {
pkg.push_back((file_kind>>(i*8)) & 0xff);
}
for (size_t i = 0; i < file_id_size; i++) {
pkg.push_back(file_id[i]);
}
// lossless
return _t.toxGroupSendCustomPacket(group_number, true, pkg) == TOX_ERR_GROUP_SEND_CUSTOM_PACKET_OK;
}
void NGCFT1::updateSendTransfer(float time_delta, uint32_t group_number, uint32_t peer_number, Group::Peer& peer, size_t idx, std::set<CCAI::SeqIDType>& timeouts_set, int64_t& can_packet_size) { void NGCFT1::updateSendTransfer(float time_delta, uint32_t group_number, uint32_t peer_number, Group::Peer& peer, size_t idx, std::set<CCAI::SeqIDType>& timeouts_set, int64_t& can_packet_size) {
auto& tf_opt = peer.send_transfers.at(idx); auto& tf_opt = peer.send_transfers.at(idx);
assert(tf_opt.has_value()); assert(tf_opt.has_value());
@ -181,7 +40,8 @@ void NGCFT1::updateSendTransfer(float time_delta, uint32_t group_number, uint32_
} else { } else {
// timed out, resend // timed out, resend
std::cerr << "NGCFT1 warning: ft init timed out, resending\n"; std::cerr << "NGCFT1 warning: ft init timed out, resending\n";
sendPKG_FT1_INIT(group_number, peer_number, tf.file_kind, tf.file_size, idx, tf.file_id.data(), tf.file_id.size()); //sendPKG_FT1_INIT(group_number, peer_number, tf.file_kind, tf.file_size, idx, tf.file_id.data(), tf.file_id.size());
_neep.send_ft1_init(group_number, peer_number, tf.file_kind, tf.file_size, idx, tf.file_id.data(), tf.file_id.size());
tf.inits_sent++; tf.inits_sent++;
tf.time_since_activity = 0.f; tf.time_since_activity = 0.f;
} }
@ -191,7 +51,7 @@ void NGCFT1::updateSendTransfer(float time_delta, uint32_t group_number, uint32_
case State::FINISHING: // we still have unacked packets case State::FINISHING: // we still have unacked packets
tf.ssb.for_each(time_delta, [&](uint16_t id, const std::vector<uint8_t>& data, float& time_since_activity) { tf.ssb.for_each(time_delta, [&](uint16_t id, const std::vector<uint8_t>& data, float& time_since_activity) {
if (can_packet_size >= data.size() && timeouts_set.count({idx, id})) { if (can_packet_size >= data.size() && timeouts_set.count({idx, id})) {
sendPKG_FT1_DATA(group_number, peer_number, idx, id, data.data(), data.size()); _neep.send_ft1_data(group_number, peer_number, idx, id, data.data(), data.size());
peer.cca->onLoss({idx, id}, false); peer.cca->onLoss({idx, id}, false);
time_since_activity = 0.f; time_since_activity = 0.f;
timeouts_set.erase({idx, id}); timeouts_set.erase({idx, id});
@ -200,8 +60,14 @@ void NGCFT1::updateSendTransfer(float time_delta, uint32_t group_number, uint32_
}); });
if (tf.time_since_activity >= sending_give_up_after) { if (tf.time_since_activity >= sending_give_up_after) {
// no ack after 30sec, close ft // no ack after 30sec, close ft
// TODO: notify app
std::cerr << "NGCFT1 warning: sending ft finishing timed out, deleting\n"; std::cerr << "NGCFT1 warning: sending ft finishing timed out, deleting\n";
dispatch(
NGCFT1_Event::send_done,
Events::NGCFT1_send_done{
group_number, peer_number,
static_cast<uint8_t>(idx),
}
);
// clean up cca // clean up cca
tf.ssb.for_each(time_delta, [&](uint16_t id, const std::vector<uint8_t>& data, float& time_since_activity) { tf.ssb.for_each(time_delta, [&](uint16_t id, const std::vector<uint8_t>& data, float& time_since_activity) {
@ -241,7 +107,7 @@ void NGCFT1::updateSendTransfer(float time_delta, uint32_t group_number, uint32_
tf.ssb.for_each(time_delta, [&](uint16_t id, const std::vector<uint8_t>& data, float& time_since_activity) { tf.ssb.for_each(time_delta, [&](uint16_t id, const std::vector<uint8_t>& data, float& time_since_activity) {
if (can_packet_size >= data.size() && time_since_activity >= peer.cca->getCurrentDelay() && timeouts_set.count({idx, id})) { if (can_packet_size >= data.size() && time_since_activity >= peer.cca->getCurrentDelay() && timeouts_set.count({idx, id})) {
// TODO: can fail // TODO: can fail
sendPKG_FT1_DATA(group_number, peer_number, idx, id, data.data(), data.size()); _neep.send_ft1_data(group_number, peer_number, idx, id, data.data(), data.size());
peer.cca->onLoss({idx, id}, false); peer.cca->onLoss({idx, id}, false);
time_since_activity = 0.f; time_since_activity = 0.f;
timeouts_set.erase({idx, id}); timeouts_set.erase({idx, id});
@ -278,7 +144,7 @@ void NGCFT1::updateSendTransfer(float time_delta, uint32_t group_number, uint32_
); );
uint16_t seq_id = tf.ssb.add(std::move(new_data)); uint16_t seq_id = tf.ssb.add(std::move(new_data));
const bool sent = sendPKG_FT1_DATA(group_number, peer_number, idx, seq_id, tf.ssb.entries.at(seq_id).data.data(), tf.ssb.entries.at(seq_id).data.size()); const bool sent = _neep.send_ft1_data(group_number, peer_number, idx, seq_id, tf.ssb.entries.at(seq_id).data.data(), tf.ssb.entries.at(seq_id).data.size());
if (sent) { if (sent) {
peer.cca->onSent({idx, seq_id}, chunk_size); peer.cca->onSent({idx, seq_id}, chunk_size);
} else { } else {
@ -295,8 +161,8 @@ void NGCFT1::updateSendTransfer(float time_delta, uint32_t group_number, uint32_
break; break;
default: // invalid state, delete default: // invalid state, delete
std::cerr << "NGCFT1 error: ft in invalid state, deleting\n"; std::cerr << "NGCFT1 error: ft in invalid state, deleting\n";
assert(false && "ft in invalid state");
tf_opt.reset(); tf_opt.reset();
//continue;
return; return;
} }
} }
@ -335,10 +201,33 @@ void NGCFT1::iteratePeer(float time_delta, uint32_t group_number, uint32_t peer_
// TODO: receiving tranfers? // TODO: receiving tranfers?
} }
const CCAI* NGCFT1::getPeerCCA(
uint32_t group_number,
uint32_t peer_number
) const {
auto group_it = groups.find(group_number);
if (group_it == groups.end()) {
return nullptr;;
}
auto peer_it = group_it->second.peers.find(peer_number);
if (peer_it == group_it->second.peers.end()) {
return nullptr;;
}
const auto& cca_ptr = peer_it->second.cca;
if (!cca_ptr) {
return nullptr;;
}
return cca_ptr.get();
}
NGCFT1::NGCFT1( NGCFT1::NGCFT1(
ToxI& t, ToxI& t,
ToxEventProviderI& tep, ToxEventProviderI& tep,
NGCEXTEventProviderI& neep NGCEXTEventProvider& neep
) : _t(t), _tep(tep), _neep(neep) ) : _t(t), _tep(tep), _neep(neep)
{ {
_neep.subscribe(this, NGCEXT_Event::FT1_REQUEST); _neep.subscribe(this, NGCEXT_Event::FT1_REQUEST);
@ -352,6 +241,7 @@ NGCFT1::NGCFT1(
} }
float NGCFT1::iterate(float time_delta) { float NGCFT1::iterate(float time_delta) {
_time_since_activity += time_delta;
bool transfer_in_progress {false}; bool transfer_in_progress {false};
for (auto& [group_number, group] : groups) { for (auto& [group_number, group] : groups) {
for (auto& [peer_number, peer] : group.peers) { for (auto& [peer_number, peer] : group.peers) {
@ -378,9 +268,13 @@ float NGCFT1::iterate(float time_delta) {
} }
if (transfer_in_progress) { if (transfer_in_progress) {
_time_since_activity = 0.f;
// ~15ms for up to 1mb/s // ~15ms for up to 1mb/s
// ~5ms for up to 4mb/s // ~5ms for up to 4mb/s
return 0.005f; // 5ms return 0.005f; // 5ms
} else if (_time_since_activity < 0.5f) {
// bc of temporality
return 0.025f;
} else { } else {
return 1.f; // once a sec might be too little return 1.f; // once a sec might be too little
} }
@ -392,7 +286,7 @@ void NGCFT1::NGC_FT1_send_request_private(
const uint8_t* file_id, size_t file_id_size const uint8_t* file_id, size_t file_id_size
) { ) {
// TODO: error check // TODO: error check
sendPKG_FT1_REQUEST(group_number, peer_number, file_kind, file_id, file_id_size); _neep.send_ft1_request(group_number, peer_number, file_kind, file_id, file_id_size);
} }
bool NGCFT1::NGC_FT1_send_init_private( bool NGCFT1::NGC_FT1_send_init_private(
@ -433,7 +327,7 @@ bool NGCFT1::NGC_FT1_send_init_private(
} }
// TODO: check return value // TODO: check return value
sendPKG_FT1_INIT(group_number, peer_number, file_kind, file_size, idx, file_id, file_id_size); _neep.send_ft1_init(group_number, peer_number, file_kind, file_size, idx, file_id, file_id_size);
peer.send_transfers[idx] = Group::Peer::SendTransfer{ peer.send_transfers[idx] = Group::Peer::SendTransfer{
file_kind, file_kind,
@ -463,12 +357,58 @@ bool NGCFT1::NGC_FT1_send_message_public(
message_id = randombytes_random(); message_id = randombytes_random();
// TODO: check return value // TODO: check return value
return sendPKG_FT1_MESSAGE(group_number, message_id, file_kind, file_id, file_id_size); return _neep.send_all_ft1_message(group_number, message_id, file_kind, file_id, file_id_size);
}
float NGCFT1::getPeerDelay(uint32_t group_number, uint32_t peer_number) const {
auto* cca_ptr = getPeerCCA(group_number, peer_number);
if (cca_ptr == nullptr) {
return -1.f;
}
return cca_ptr->getCurrentDelay();
}
float NGCFT1::getPeerWindow(uint32_t group_number, uint32_t peer_number) const {
auto* cca_ptr = getPeerCCA(group_number, peer_number);
if (cca_ptr == nullptr) {
return -1.f;
}
return cca_ptr->getWindow();
}
int64_t NGCFT1::getPeerInFlightPackets(
uint32_t group_number,
uint32_t peer_number
) const {
auto* cca_ptr = getPeerCCA(group_number, peer_number);
if (cca_ptr == nullptr) {
return -1;
}
return cca_ptr->inFlightCount();
}
int64_t NGCFT1::getPeerInFlightBytes(
uint32_t group_number,
uint32_t peer_number
) const {
auto* cca_ptr = getPeerCCA(group_number, peer_number);
if (cca_ptr == nullptr) {
return -1;
}
return cca_ptr->inFlightCount();
} }
bool NGCFT1::onEvent(const Events::NGCEXT_ft1_request& e) { bool NGCFT1::onEvent(const Events::NGCEXT_ft1_request& e) {
//#if !NDEBUG //#if !NDEBUG
std::cout << "NGCFT1: FT1_REQUEST fk:" << e.file_kind << " [" << bin2hex(e.file_id) << "]\n"; std::cout << "NGCFT1: got FT1_REQUEST fk:" << e.file_kind << " [" << bin2hex(e.file_id) << "]\n";
//#endif //#endif
// .... just rethrow?? // .... just rethrow??
@ -485,7 +425,7 @@ bool NGCFT1::onEvent(const Events::NGCEXT_ft1_request& e) {
bool NGCFT1::onEvent(const Events::NGCEXT_ft1_init& e) { bool NGCFT1::onEvent(const Events::NGCEXT_ft1_init& e) {
//#if !NDEBUG //#if !NDEBUG
std::cout << "NGCFT1: FT1_INIT fk:" << e.file_kind << " fs:" << e.file_size << " tid:" << int(e.transfer_id) << " [" << bin2hex(e.file_id) << "]\n"; std::cout << "NGCFT1: got FT1_INIT fk:" << e.file_kind << " fs:" << e.file_size << " tid:" << int(e.transfer_id) << " [" << bin2hex(e.file_id) << "]\n";
//#endif //#endif
bool accept = false; bool accept = false;
@ -506,13 +446,13 @@ bool NGCFT1::onEvent(const Events::NGCEXT_ft1_init& e) {
return true; // return true? return true; // return true?
} }
sendPKG_FT1_INIT_ACK(e.group_number, e.peer_number, e.transfer_id); _neep.send_ft1_init_ack(e.group_number, e.peer_number, e.transfer_id);
std::cout << "NGCFT1: accepted init\n"; std::cout << "NGCFT1: accepted init\n";
auto& peer = groups[e.group_number].peers[e.peer_number]; auto& peer = groups[e.group_number].peers[e.peer_number];
if (peer.recv_transfers[e.transfer_id].has_value()) { if (peer.recv_transfers[e.transfer_id].has_value()) {
std::cerr << "NGCFT1 warning: overwriting existing recv_transfer " << int(e.transfer_id) << "\n"; std::cerr << "NGCFT1 warning: overwriting existing recv_transfer " << int(e.transfer_id) << ", other peer started new transfer on preexising\n";
} }
peer.recv_transfers[e.transfer_id] = Group::Peer::RecvTransfer{ peer.recv_transfers[e.transfer_id] = Group::Peer::RecvTransfer{
@ -529,7 +469,7 @@ bool NGCFT1::onEvent(const Events::NGCEXT_ft1_init& e) {
bool NGCFT1::onEvent(const Events::NGCEXT_ft1_init_ack& e) { bool NGCFT1::onEvent(const Events::NGCEXT_ft1_init_ack& e) {
//#if !NDEBUG //#if !NDEBUG
std::cout << "NGCFT1: FT1_INIT_ACK mds:" << e.max_lossy_data_size << "\n"; std::cout << "NGCFT1: got FT1_INIT_ACK mds:" << e.max_lossy_data_size << "\n";
//#endif //#endif
// we now should start sending data // we now should start sending data
@ -580,7 +520,7 @@ bool NGCFT1::onEvent(const Events::NGCEXT_ft1_init_ack& e) {
bool NGCFT1::onEvent(const Events::NGCEXT_ft1_data& e) { bool NGCFT1::onEvent(const Events::NGCEXT_ft1_data& e) {
#if !NDEBUG #if !NDEBUG
//std::cout << "NGCFT1: FT1_DATA\n"; //std::cout << "NGCFT1: got FT1_DATA\n";
#endif #endif
if (e.data.empty()) { if (e.data.empty()) {
@ -628,7 +568,7 @@ bool NGCFT1::onEvent(const Events::NGCEXT_ft1_data& e) {
// TODO: check if this caps at max acks // TODO: check if this caps at max acks
if (!ack_seq_ids.empty()) { if (!ack_seq_ids.empty()) {
// TODO: check return value // TODO: check return value
sendPKG_FT1_DATA_ACK(e.group_number, e.peer_number, e.transfer_id, ack_seq_ids.data(), ack_seq_ids.size()); _neep.send_ft1_data_ack(e.group_number, e.peer_number, e.transfer_id, ack_seq_ids.data(), ack_seq_ids.size());
} }
@ -642,6 +582,11 @@ bool NGCFT1::onEvent(const Events::NGCEXT_ft1_data& e) {
e.transfer_id e.transfer_id
} }
); );
// delete transfer
// TODO: keep around for remote timeout + delay + offset, so we can be sure all acks where received
// or implement a dedicated finished that needs to be acked
peer.recv_transfers[e.transfer_id].reset();
} }
return true; return true;
@ -649,7 +594,7 @@ bool NGCFT1::onEvent(const Events::NGCEXT_ft1_data& e) {
bool NGCFT1::onEvent(const Events::NGCEXT_ft1_data_ack& e) { bool NGCFT1::onEvent(const Events::NGCEXT_ft1_data_ack& e) {
#if !NDEBUG #if !NDEBUG
//std::cout << "NGCFT1: FT1_DATA_ACK\n"; //std::cout << "NGCFT1: got FT1_DATA_ACK\n";
#endif #endif
if (!groups.count(e.group_number)) { if (!groups.count(e.group_number)) {
@ -702,7 +647,7 @@ bool NGCFT1::onEvent(const Events::NGCEXT_ft1_data_ack& e) {
} }
bool NGCFT1::onEvent(const Events::NGCEXT_ft1_message& e) { bool NGCFT1::onEvent(const Events::NGCEXT_ft1_message& e) {
std::cout << "NGCFT1: FT1_MESSAGE mid:" << e.message_id << " fk:" << e.file_kind << " [" << bin2hex(e.file_id) << "]\n"; std::cout << "NGCFT1: got FT1_MESSAGE mid:" << e.message_id << " fk:" << e.file_kind << " [" << bin2hex(e.file_id) << "]\n";
// .... just rethrow?? // .... just rethrow??
// TODO: dont // TODO: dont

View File

@ -131,16 +131,17 @@ using NGCFT1EventProviderI = EventProviderI<NGCFT1EventI>;
class NGCFT1 : public ToxEventI, public NGCEXTEventI, public NGCFT1EventProviderI { class NGCFT1 : public ToxEventI, public NGCEXTEventI, public NGCFT1EventProviderI {
ToxI& _t; ToxI& _t;
ToxEventProviderI& _tep; ToxEventProviderI& _tep;
NGCEXTEventProviderI& _neep; NGCEXTEventProvider& _neep; // not the interface?
std::default_random_engine _rng{std::random_device{}()}; std::default_random_engine _rng{std::random_device{}()};
float _time_since_activity {10.f};
// TODO: config // TODO: config
size_t acks_per_packet {3u}; // 3 size_t acks_per_packet {3u}; // 3
float init_retry_timeout_after {4.f}; float init_retry_timeout_after {4.f};
float sending_give_up_after {15.f}; // 30sec (per active transfer) float sending_give_up_after {15.f}; // 30sec (per active transfer)
struct Group { struct Group {
struct Peer { struct Peer {
uint32_t max_packet_data_size {500-4}; uint32_t max_packet_data_size {500-4};
@ -201,27 +202,21 @@ class NGCFT1 : public ToxEventI, public NGCEXTEventI, public NGCFT1EventProvider
std::map<uint32_t, Group> groups; std::map<uint32_t, Group> groups;
protected: protected:
bool sendPKG_FT1_REQUEST(uint32_t group_number, uint32_t peer_number, uint32_t file_kind, const uint8_t* file_id, size_t file_id_size);
bool sendPKG_FT1_INIT(uint32_t group_number, uint32_t peer_number, uint32_t file_kind, uint64_t file_size, uint8_t transfer_id, const uint8_t* file_id, size_t file_id_size);
bool sendPKG_FT1_INIT_ACK(uint32_t group_number, uint32_t peer_number, uint8_t transfer_id);
bool sendPKG_FT1_DATA(uint32_t group_number, uint32_t peer_number, uint8_t transfer_id, uint16_t sequence_id, const uint8_t* data, size_t data_size);
bool sendPKG_FT1_DATA_ACK(uint32_t group_number, uint32_t peer_number, uint8_t transfer_id, const uint16_t* seq_ids, size_t seq_ids_size);
bool sendPKG_FT1_MESSAGE(uint32_t group_number, uint32_t message_id, uint32_t file_kind, const uint8_t* file_id, size_t file_id_size);
void updateSendTransfer(float time_delta, uint32_t group_number, uint32_t peer_number, Group::Peer& peer, size_t idx, std::set<CCAI::SeqIDType>& timeouts_set, int64_t& can_packet_size); void updateSendTransfer(float time_delta, uint32_t group_number, uint32_t peer_number, Group::Peer& peer, size_t idx, std::set<CCAI::SeqIDType>& timeouts_set, int64_t& can_packet_size);
void iteratePeer(float time_delta, uint32_t group_number, uint32_t peer_number, Group::Peer& peer); void iteratePeer(float time_delta, uint32_t group_number, uint32_t peer_number, Group::Peer& peer);
const CCAI* getPeerCCA(uint32_t group_number, uint32_t peer_number) const;
public: public:
NGCFT1( NGCFT1(
ToxI& t, ToxI& t,
ToxEventProviderI& tep, ToxEventProviderI& tep,
NGCEXTEventProviderI& neep NGCEXTEventProvider& neep
); );
float iterate(float delta); float iterate(float delta);
public: // ft1 api public: // ft1 api
// TODO: public variant?
void NGC_FT1_send_request_private( void NGC_FT1_send_request_private(
uint32_t group_number, uint32_t peer_number, uint32_t group_number, uint32_t peer_number,
uint32_t file_kind, uint32_t file_kind,
@ -245,6 +240,23 @@ class NGCFT1 : public ToxEventI, public NGCEXTEventI, public NGCFT1EventProvider
const uint8_t* file_id, size_t file_id_size const uint8_t* file_id, size_t file_id_size
); );
public: // cca stuff
// rtt/delay
// negative on error or no cca
float getPeerDelay(uint32_t group_number, uint32_t peer_number) const;
// belived possible current window
// negative on error or no cca
float getPeerWindow(uint32_t group_number, uint32_t peer_number) const;
// packets in flight
// returns -1 if error or no cca
int64_t getPeerInFlightPackets(uint32_t group_number, uint32_t peer_number) const;
// actual bytes in flight (aka window)
// returns -1 if error or no cca
int64_t getPeerInFlightBytes(uint32_t group_number, uint32_t peer_number) const;
protected: protected:
bool onEvent(const Events::NGCEXT_ft1_request&) override; bool onEvent(const Events::NGCEXT_ft1_request&) override;
bool onEvent(const Events::NGCEXT_ft1_init&) override; bool onEvent(const Events::NGCEXT_ft1_init&) override;

View File

@ -0,0 +1,354 @@
#include "./chunk_picker.hpp"
#include <solanaceae/tox_contacts/components.hpp>
#include "./components.hpp"
#include "./contact_components.hpp"
#include <algorithm>
#include <iostream>
// TODO: move ps to own file
// picker strategies are generators
// gen returns true if a valid chunk was picked
// ps should be light weight and no persistant state
// ps produce an index only once
// simply scans from the beginning, requesting chunks in that order
struct PickerStrategySequential {
const BitSet& chunk_candidates;
const size_t total_chunks;
size_t i {0u};
PickerStrategySequential(
const BitSet& chunk_candidates_,
const size_t total_chunks_,
const size_t start_offset_ = 0u
) :
chunk_candidates(chunk_candidates_),
total_chunks(total_chunks_),
i(start_offset_)
{}
bool gen(size_t& out_chunk_idx) {
for (; i < total_chunks && i < chunk_candidates.size_bits(); i++) {
if (chunk_candidates[i]) {
out_chunk_idx = i;
i++;
return true;
}
}
return false;
}
};
// chooses a random start position and then requests linearly from there
struct PickerStrategyRandom {
const BitSet& chunk_candidates;
const size_t total_chunks;
std::minstd_rand& rng;
size_t count {0u};
size_t i {rng()%total_chunks};
PickerStrategyRandom(
const BitSet& chunk_candidates_,
const size_t total_chunks_,
std::minstd_rand& rng_
) :
chunk_candidates(chunk_candidates_),
total_chunks(total_chunks_),
rng(rng_)
{}
bool gen(size_t& out_chunk_idx) {
for (; count < total_chunks; count++, i++) {
// wrap around
if (i >= total_chunks) {
i = i%total_chunks;
}
if (chunk_candidates[i]) {
out_chunk_idx = i;
count++;
i++;
return true;
}
}
return false;
}
};
// switches randomly between random and sequential
struct PickerStrategyRandomSequential {
PickerStrategyRandom psr;
PickerStrategySequential pssf;
// TODO: configurable
std::bernoulli_distribution d{0.5f};
PickerStrategyRandomSequential(
const BitSet& chunk_candidates_,
const size_t total_chunks_,
std::minstd_rand& rng_,
const size_t start_offset_ = 0u
) :
psr(chunk_candidates_, total_chunks_, rng_),
pssf(chunk_candidates_, total_chunks_, start_offset_)
{}
bool gen(size_t& out_chunk_idx) {
if (d(psr.rng)) {
return psr.gen(out_chunk_idx);
} else {
return pssf.gen(out_chunk_idx);
}
}
};
void ChunkPicker::updateParticipation(
Contact3Handle c,
ObjectRegistry& objreg
) {
if (!c.all_of<Contact::Components::FT1Participation>()) {
participating_unfinished.clear();
return;
}
entt::dense_set<Object> checked;
for (const Object ov : c.get<Contact::Components::FT1Participation>().participating) {
const ObjectHandle o {objreg, ov};
if (participating_unfinished.contains(o)) {
if (!o.all_of<Components::FT1ChunkSHA1Cache, Components::FT1InfoSHA1>()) {
participating_unfinished.erase(o);
continue;
}
if (o.all_of<Message::Components::Transfer::TagPaused>()) {
participating_unfinished.erase(o);
continue;
}
if (o.get<Components::FT1ChunkSHA1Cache>().have_all) {
participating_unfinished.erase(o);
}
} else {
if (!o.all_of<Components::FT1ChunkSHA1Cache, Components::FT1InfoSHA1>()) {
continue;
}
if (o.all_of<Message::Components::Transfer::TagPaused>()) {
continue;
}
if (!o.get<Components::FT1ChunkSHA1Cache>().have_all) {
using Priority = Components::DownloadPriority::Priority;
Priority prio = Priority::NORMAL;
if (o.all_of<Components::DownloadPriority>()) {
prio = o.get<Components::DownloadPriority>().p;
}
uint16_t pskips =
prio == Priority::HIGHER ? 0u :
prio == Priority::HIGH ? 1u :
prio == Priority::NORMAL ? 2u :
prio == Priority::LOW ? 4u :
8u
;
participating_unfinished.emplace(o, ParticipationEntry{pskips});
}
}
checked.emplace(o);
}
// now we still need to remove left over unfinished.
// TODO: how did they get left over
entt::dense_set<Object> to_remove;
for (const auto& [o, _] : participating_unfinished) {
if (!checked.contains(o)) {
std::cerr << "unfinished contained non participating\n";
to_remove.emplace(o);
}
}
for (const auto& o : to_remove) {
participating_unfinished.erase(o);
}
}
std::vector<ChunkPicker::ContentChunkR> ChunkPicker::updateChunkRequests(
Contact3Handle c,
ObjectRegistry& objreg,
ReceivingTransfers& rt,
const size_t open_requests
//NGCFT1& nft
) {
if (!static_cast<bool>(c)) {
assert(false); return {};
}
if (!c.all_of<Contact::Components::ToxGroupPeerEphemeral>()) {
assert(false); return {};
}
const auto [group_number, peer_number] = c.get<Contact::Components::ToxGroupPeerEphemeral>();
updateParticipation(c, objreg);
if (participating_unfinished.empty()) {
participating_in_last = entt::null;
return {};
}
std::vector<ContentChunkR> req_ret;
// count running tf and open requests
const size_t num_ongoing_transfers = rt.sizePeer(group_number, peer_number);
// TODO: account for open requests
const int64_t num_total = num_ongoing_transfers + open_requests;
// TODO: base max on rate(chunks per sec), gonna be ass with variable chunk size
const size_t num_requests = std::max<int64_t>(0, int64_t(max_tf_chunk_requests)-num_total);
std::cerr << "CP: want " << num_requests << "(rt:" << num_ongoing_transfers << " or:" << open_requests << ") from " << group_number << ":" << peer_number << "\n";
// while n < X
// round robin content (remember last obj)
if (!objreg.valid(participating_in_last) || !participating_unfinished.count(participating_in_last)) {
participating_in_last = participating_unfinished.begin()->first;
//participating_in_last = *participating_unfinished.begin();
}
assert(objreg.valid(participating_in_last));
auto it = participating_unfinished.find(participating_in_last);
// hard limit robin rounds to array size time 20
for (size_t i = 0; req_ret.size() < num_requests && i < participating_unfinished.size()*20; i++) {
if (it == participating_unfinished.end()) {
it = participating_unfinished.begin();
}
if (it->second.skips < it->second.should_skip) {
it->second.skips++;
continue;
}
ObjectHandle o {objreg, it->first};
// intersect self have with other have
if (!o.all_of<Components::RemoteHave, Components::FT1ChunkSHA1Cache, Components::FT1InfoSHA1>()) {
// rare case where no one other has anything
continue;
}
const auto& cc = o.get<Components::FT1ChunkSHA1Cache>();
if (cc.have_all) {
std::cerr << "ChunkPicker error: completed content still in participating_unfinished!\n";
continue;
}
const auto& others_have = o.get<Components::RemoteHave>().others;
auto other_it = others_have.find(c);
if (other_it == others_have.end()) {
// rare case where the other is participating but has nothing
continue;
}
const auto& other_have = other_it->second;
BitSet chunk_candidates = cc.have_chunk;
if (!other_have.have_all) {
// AND is the same as ~(~A | ~B)
// that means we leave chunk_candidates as (have is inverted want)
// merge is or
// invert at the end
chunk_candidates
.merge(other_have.have.invert())
.invert();
// TODO: add intersect for more perf
} else {
chunk_candidates.invert();
}
const auto& info = o.get<Components::FT1InfoSHA1>();
const auto total_chunks = info.chunks.size();
auto& requested_chunks = o.get_or_emplace<Components::FT1ChunkSHA1Requested>().chunks;
// TODO: trim off round up to 8, since they are now always set
// now select (globaly) unrequested other have
// TODO: how do we prioritize within a file?
// - sequential (walk from start (or readhead?))
// - random (choose random start pos and walk)
// - random/sequential (randomly choose between the 2)
// - rarest (keep track of rarity and sort by that)
// - steaming (use readhead to determain time critical chunks, potentially over requesting, first (relative to stream head) otherwise
// maybe look into libtorrens deadline stuff
// - arbitrary priority maps/functions (and combine with above in rations)
// TODO: configurable
size_t start_offset {0u};
if (o.all_of<Components::ReadHeadHint>()) {
const auto byte_offset = o.get<Components::ReadHeadHint>().offset_into_file;
if (byte_offset <= info.file_size) {
start_offset = o.get<Components::ReadHeadHint>().offset_into_file/info.chunk_size;
} else {
// error?
}
}
//PickerStrategySequential ps(chunk_candidates, total_chunks, start_offset);
//PickerStrategyRandom ps(chunk_candidates, total_chunks, _rng);
PickerStrategyRandomSequential ps(chunk_candidates, total_chunks, _rng, start_offset);
size_t out_chunk_idx {0};
while (ps.gen(out_chunk_idx) && req_ret.size() < num_requests) {
// out_chunk_idx is a potential candidate we can request form peer
// - check against double requests
if (std::find_if(req_ret.cbegin(), req_ret.cend(), [&](const ContentChunkR& x) -> bool {
return x.object == o && x.chunk_index == out_chunk_idx;
}) != req_ret.cend()) {
// already in return array
// how did we get here? should we fast exit? if sequential strat, we would want to
continue; // skip
}
// - check against global requests (this might differ based on strat)
if (requested_chunks.count(out_chunk_idx) != 0) {
continue;
}
// - we check against globally running transfers (this might differ based on strat)
if (rt.containsChunk(o, out_chunk_idx)) {
continue;
}
// if nothing else blocks this, add to ret
req_ret.push_back(ContentChunkR{o, out_chunk_idx});
// TODO: move this after packet was sent successfully
// (move net in? hmm)
requested_chunks[out_chunk_idx] = Components::FT1ChunkSHA1Requested::Entry{0.f, c};
}
}
if (it == participating_unfinished.end() || ++it == participating_unfinished.end()) {
participating_in_last = entt::null;
} else {
participating_in_last = it->first;
}
if (req_ret.size() < num_requests) {
std::cerr << "CP: could not fulfil, " << group_number << ":" << peer_number << " only has " << req_ret.size() << " candidates\n";
}
// -- no -- (just compat with old code, ignore)
// if n < X
// optimistically request 1 chunk other does not have
// (don't mark es requested? or lower cooldown to re-request?)
return req_ret;
}

View File

@ -0,0 +1,76 @@
#pragma once
#include <solanaceae/contact/contact_model3.hpp>
#include <solanaceae/object_store/object_store.hpp>
#include "./components.hpp"
#include "./receiving_transfers.hpp"
#include <entt/container/dense_map.hpp>
#include <entt/container/dense_set.hpp>
#include <cstddef>
#include <cstdint>
#include <random>
//#include <solanaceae/ngc_ft1/ngcft1.hpp>
// goal is to always keep 2 transfers running and X(6) requests queued up
// per peer
struct ChunkPickerUpdateTag {};
struct ChunkPickerTimer {
// adds update tag on 0
float timer {0.f};
};
// contact component?
struct ChunkPicker {
// max transfers
static constexpr size_t max_tf_info_requests {1};
static constexpr size_t max_tf_chunk_requests {3}; // TODO: dynamic, function/factor of (window(delay*speed)/chunksize)
// TODO: cheaper init? tls rng for deep seeding?
std::minstd_rand _rng{std::random_device{}()};
// TODO: handle with hash utils?
struct ParticipationEntry {
ParticipationEntry(void) {}
ParticipationEntry(uint16_t s) : should_skip(s) {}
// skips in round robin -> lower should_skip => higher priority
// TODO: replace with enum value
uint16_t should_skip {2}; // 0 high, 8 low (double each time? 0,1,2,4,8)
uint16_t skips {0};
};
entt::dense_map<Object, ParticipationEntry> participating_unfinished;
Object participating_in_last {entt::null};
private: // TODO: properly sort
// updates participating_unfinished
void updateParticipation(
Contact3Handle c,
ObjectRegistry& objreg
);
public:
// ---------- tick ----------
//void sendInfoRequests();
// is this like a system?
struct ContentChunkR {
ObjectHandle object;
size_t chunk_index;
};
// returns list of chunks to request
[[nodiscard]] std::vector<ContentChunkR> updateChunkRequests(
Contact3Handle c,
ObjectRegistry& objreg,
ReceivingTransfers& rt,
const size_t open_requests
//NGCFT1& nft
);
};

View File

@ -0,0 +1,25 @@
#include "./components.hpp"
std::vector<size_t> Components::FT1ChunkSHA1Cache::chunkIndices(const SHA1Digest& hash) const {
const auto it = chunk_hash_to_index.find(hash);
if (it != chunk_hash_to_index.cend()) {
return it->second;
} else {
return {};
}
}
bool Components::FT1ChunkSHA1Cache::haveChunk(const SHA1Digest& hash) const {
if (have_all) { // short cut
return true;
}
if (auto i_vec = chunkIndices(hash); !i_vec.empty()) {
// TODO: should i test all?
return have_chunk[i_vec.front()];
}
// not part of this file
return false;
}

View File

@ -0,0 +1,98 @@
#pragma once
#include <solanaceae/contact/components.hpp>
#include <solanaceae/message3/components.hpp>
#include <solanaceae/message3/registry_message_model.hpp>
#include <solanaceae/util/bitset.hpp>
#include <entt/container/dense_set.hpp>
#include "./ft1_sha1_info.hpp"
#include "./hash_utils.hpp"
#include <vector>
// TODO: rename to object components
namespace Components {
struct Messages {
// dense set instead?
std::vector<Message3Handle> messages;
};
using FT1InfoSHA1 = FT1InfoSHA1;
struct FT1InfoSHA1Data {
std::vector<uint8_t> data;
};
struct FT1InfoSHA1Hash {
std::vector<uint8_t> hash;
};
struct FT1ChunkSHA1Cache {
// TODO: extract have_chunk, have_all and have_count to generic comp
// have_chunk is the size of info.chunks.size(), or empty if have_all
// keep in mind bitset rounds up to 8s
BitSet have_chunk{0};
bool have_all {false};
size_t have_count {0};
entt::dense_map<SHA1Digest, std::vector<size_t>> chunk_hash_to_index;
std::vector<size_t> chunkIndices(const SHA1Digest& hash) const;
bool haveChunk(const SHA1Digest& hash) const;
};
struct FT1ChunkSHA1Requested {
// requested chunks with a timer since last request
struct Entry {
float timer {0.f};
Contact3 c {entt::null};
};
entt::dense_map<size_t, Entry> chunks;
};
// TODO: once announce is shipped, remove the "Suspected"
struct SuspectedParticipants {
entt::dense_set<Contact3> participants;
};
struct RemoteHave {
struct Entry {
bool have_all {false};
BitSet have;
};
entt::dense_map<Contact3, Entry> others;
};
struct ReRequestInfoTimer {
float timer {0.f};
};
struct DownloadPriority {
// download/retreival priority in comparison to other objects
// not all backends implement this
// priority can be weak, meaning low priority dls will still get transfer activity, just less often
enum class Priority {
HIGHER,
HIGH,
NORMAL,
LOW,
LOWER,
} p = Priority::NORMAL;
};
struct ReadHeadHint {
// points to the first byte we want
// this is just a hint, that can be set from outside
// to guide the sequential "piece picker" strategy
// ? the strategy *should* set this to the first byte we dont yet have
uint64_t offset_into_file {0u};
};
} // Components

View File

@ -0,0 +1,13 @@
#pragma once
#include <solanaceae/object_store/object_store.hpp>
#include <entt/container/dense_set.hpp>
namespace Contact::Components {
struct FT1Participation {
entt::dense_set<Object> participating;
};
} // Contact::Components

View File

@ -18,7 +18,7 @@ struct SHA1Digest {
bool operator==(const SHA1Digest& other) const { return data == other.data; } bool operator==(const SHA1Digest& other) const { return data == other.data; }
bool operator!=(const SHA1Digest& other) const { return data != other.data; } bool operator!=(const SHA1Digest& other) const { return data != other.data; }
size_t size(void) const { return data.size(); } constexpr size_t size(void) const { return data.size(); }
}; };
std::ostream& operator<<(std::ostream& out, const SHA1Digest& v); std::ostream& operator<<(std::ostream& out, const SHA1Digest& v);

View File

@ -0,0 +1,48 @@
#include "./participation.hpp"
#include "./contact_components.hpp"
#include "./chunk_picker.hpp"
#include <iostream>
bool addParticipation(Contact3Handle c, ObjectHandle o) {
bool was_new {false};
assert(static_cast<bool>(o));
assert(static_cast<bool>(c));
if (static_cast<bool>(o)) {
const auto [_, inserted] = o.get_or_emplace<Components::SuspectedParticipants>().participants.emplace(c);
was_new = inserted;
}
if (static_cast<bool>(c)) {
const auto [_, inserted] = c.get_or_emplace<Contact::Components::FT1Participation>().participating.emplace(o);
was_new = was_new || inserted;
}
//std::cout << "added " << (was_new?"new ":"") << "participant\n";
return was_new;
}
void removeParticipation(Contact3Handle c, ObjectHandle o) {
assert(static_cast<bool>(o));
assert(static_cast<bool>(c));
if (static_cast<bool>(o) && o.all_of<Components::SuspectedParticipants>()) {
o.get<Components::SuspectedParticipants>().participants.erase(c);
}
if (static_cast<bool>(c)) {
if (c.all_of<Contact::Components::FT1Participation>()) {
c.get<Contact::Components::FT1Participation>().participating.erase(o);
}
if (c.all_of<ChunkPicker>()) {
c.get<ChunkPicker>().participating_unfinished.erase(o);
}
}
//std::cout << "removed participant\n";
}

View File

@ -0,0 +1,8 @@
#pragma once
#include <solanaceae/object_store/object_store.hpp>
#include <solanaceae/contact/contact_model3.hpp>
bool addParticipation(Contact3Handle c, ObjectHandle o);
void removeParticipation(Contact3Handle c, ObjectHandle o);

View File

@ -0,0 +1,131 @@
#include "./receiving_transfers.hpp"
#include <iostream>
void ReceivingTransfers::tick(float delta) {
for (auto peer_it = _data.begin(); peer_it != _data.end();) {
for (auto it = peer_it->second.begin(); it != peer_it->second.end();) {
it->second.time_since_activity += delta;
// if we have not heard for 20sec, timeout
if (it->second.time_since_activity >= 20.f) {
std::cerr << "SHA1_NGCFT1 warning: receiving tansfer timed out " << "." << int(it->first) << "\n";
// TODO: if info, requeue? or just keep the timer comp? - no, timer comp will continue ticking, even if loading
//it->second.v
it = peer_it->second.erase(it);
} else {
it++;
}
}
if (peer_it->second.empty()) {
// cleanup unused peers too agressive?
peer_it = _data.erase(peer_it);
} else {
peer_it++;
}
}
}
ReceivingTransfers::Entry& ReceivingTransfers::emplaceInfo(uint32_t group_number, uint32_t peer_number, uint8_t transfer_id, const Entry::Info& info) {
auto& ent = _data[combine_ids(group_number, peer_number)][transfer_id];
ent.v = info;
return ent;
}
ReceivingTransfers::Entry& ReceivingTransfers::emplaceChunk(uint32_t group_number, uint32_t peer_number, uint8_t transfer_id, const Entry::Chunk& chunk) {
assert(!chunk.chunk_indices.empty());
assert(!containsPeerChunk(group_number, peer_number, chunk.content, chunk.chunk_indices.front()));
auto& ent = _data[combine_ids(group_number, peer_number)][transfer_id];
ent.v = chunk;
return ent;
}
bool ReceivingTransfers::containsPeerTransfer(uint32_t group_number, uint32_t peer_number, uint8_t transfer_id) const {
auto it = _data.find(combine_ids(group_number, peer_number));
if (it == _data.end()) {
return false;
}
return it->second.count(transfer_id);
}
bool ReceivingTransfers::containsChunk(ObjectHandle o, size_t chunk_idx) const {
for (const auto& [_, p] : _data) {
for (const auto& [_2, v] : p) {
if (!v.isChunk()) {
continue;
}
const auto& c = v.getChunk();
if (c.content != o) {
continue;
}
for (const auto idx : c.chunk_indices) {
if (idx == chunk_idx) {
return true;
}
}
}
}
return false;
}
bool ReceivingTransfers::containsPeerChunk(uint32_t group_number, uint32_t peer_number, ObjectHandle o, size_t chunk_idx) const {
auto it = _data.find(combine_ids(group_number, peer_number));
if (it == _data.end()) {
return false;
}
for (const auto& [_, v] : it->second) {
if (!v.isChunk()) {
continue;
}
const auto& c = v.getChunk();
if (c.content != o) {
continue;
}
for (const auto idx : c.chunk_indices) {
if (idx == chunk_idx) {
return true;
}
}
}
return false;
}
void ReceivingTransfers::removePeer(uint32_t group_number, uint32_t peer_number) {
_data.erase(combine_ids(group_number, peer_number));
}
void ReceivingTransfers::removePeerTransfer(uint32_t group_number, uint32_t peer_number, uint8_t transfer_id) {
auto it = _data.find(combine_ids(group_number, peer_number));
if (it == _data.end()) {
return;
}
it->second.erase(transfer_id);
}
size_t ReceivingTransfers::size(void) const {
size_t count {0};
for (const auto& [_, p] : _data) {
count += p.size();
}
return count;
}
size_t ReceivingTransfers::sizePeer(uint32_t group_number, uint32_t peer_number) const {
auto it = _data.find(combine_ids(group_number, peer_number));
if (it == _data.end()) {
return 0;
}
return it->second.size();
}

View File

@ -0,0 +1,66 @@
#pragma once
#include <solanaceae/object_store/object_store.hpp>
#include <entt/container/dense_map.hpp>
#include "./util.hpp"
#include <cstdint>
#include <variant>
#include <vector>
struct ReceivingTransfers {
struct Entry {
struct Info {
ObjectHandle content;
// copy of info data
// too large?
std::vector<uint8_t> info_data;
};
struct Chunk {
ObjectHandle content;
std::vector<size_t> chunk_indices;
// or data?
// if memmapped, this would be just a pointer
};
std::variant<Info, Chunk> v;
float time_since_activity {0.f};
bool isInfo(void) const { return std::holds_alternative<Info>(v); }
bool isChunk(void) const { return std::holds_alternative<Chunk>(v); }
Info& getInfo(void) { return std::get<Info>(v); }
const Info& getInfo(void) const { return std::get<Info>(v); }
Chunk& getChunk(void) { return std::get<Chunk>(v); }
const Chunk& getChunk(void) const { return std::get<Chunk>(v); }
};
// key is groupid + peerid
// TODO: replace with contact
//using ReceivingTransfers = entt::dense_map<uint64_t, entt::dense_map<uint8_t, ReceivingTransferE>>;
entt::dense_map<uint64_t, entt::dense_map<uint8_t, Entry>> _data;
void tick(float delta);
Entry& emplaceInfo(uint32_t group_number, uint32_t peer_number, uint8_t transfer_id, const Entry::Info& info);
Entry& emplaceChunk(uint32_t group_number, uint32_t peer_number, uint8_t transfer_id, const Entry::Chunk& chunk);
bool containsPeer(uint32_t group_number, uint32_t peer_number) const { return _data.count(combine_ids(group_number, peer_number)); }
bool containsPeerTransfer(uint32_t group_number, uint32_t peer_number, uint8_t transfer_id) const;
bool containsChunk(ObjectHandle o, size_t chunk_idx) const;
bool containsPeerChunk(uint32_t group_number, uint32_t peer_number, ObjectHandle o, size_t chunk_idx) const;
auto& getPeer(uint32_t group_number, uint32_t peer_number) { return _data.at(combine_ids(group_number, peer_number)); }
auto& getTransfer(uint32_t group_number, uint32_t peer_number, uint8_t transfer_id) { return getPeer(group_number, peer_number).at(transfer_id); }
void removePeer(uint32_t group_number, uint32_t peer_number);
void removePeerTransfer(uint32_t group_number, uint32_t peer_number, uint8_t transfer_id);
size_t size(void) const;
size_t sizePeer(uint32_t group_number, uint32_t peer_number) const;
};

File diff suppressed because it is too large Load Diff

View File

@ -6,10 +6,12 @@
#include <solanaceae/contact/contact_model3.hpp> #include <solanaceae/contact/contact_model3.hpp>
#include <solanaceae/message3/registry_message_model.hpp> #include <solanaceae/message3/registry_message_model.hpp>
#include <solanaceae/tox_contacts/tox_contact_model2.hpp> #include <solanaceae/tox_contacts/tox_contact_model2.hpp>
#include <solanaceae/util/bitset.hpp>
#include <solanaceae/ngc_ft1/ngcft1.hpp> #include <solanaceae/ngc_ft1/ngcft1.hpp>
#include "./ft1_sha1_info.hpp" #include "./ft1_sha1_info.hpp"
#include "./receiving_transfers.hpp"
#include <entt/entity/registry.hpp> #include <entt/entity/registry.hpp>
#include <entt/entity/handle.hpp> #include <entt/entity/handle.hpp>
@ -21,13 +23,15 @@
#include <mutex> #include <mutex>
#include <list> #include <list>
class SHA1_NGCFT1 : public RegistryMessageModelEventI, public NGCFT1EventI { class SHA1_NGCFT1 : public ToxEventI, public RegistryMessageModelEventI, public NGCFT1EventI, public NGCEXTEventI {
ObjectStore2& _os; ObjectStore2& _os;
// TODO: backend abstraction // TODO: backend abstraction
Contact3Registry& _cr; Contact3Registry& _cr;
RegistryMessageModel& _rmm; RegistryMessageModel& _rmm;
NGCFT1& _nft; NGCFT1& _nft;
ToxContactModel2& _tcm; ToxContactModel2& _tcm;
ToxEventProviderI& _tep;
NGCEXTEventProvider& _neep;
std::minstd_rand _rng {1337*11}; std::minstd_rand _rng {1337*11};
@ -66,55 +70,37 @@ class SHA1_NGCFT1 : public RegistryMessageModelEventI, public NGCFT1EventI {
// key is groupid + peerid // key is groupid + peerid
entt::dense_map<uint64_t, entt::dense_map<uint8_t, SendingTransfer>> _sending_transfers; entt::dense_map<uint64_t, entt::dense_map<uint8_t, SendingTransfer>> _sending_transfers;
struct ReceivingTransfer { ReceivingTransfers _receiving_transfers;
struct Info {
ObjectHandle content;
// copy of info data
// too large?
std::vector<uint8_t> info_data;
};
struct Chunk {
ObjectHandle content;
std::vector<size_t> chunk_indices;
// or data?
// if memmapped, this would be just a pointer
};
std::variant<Info, Chunk> v;
float time_since_activity {0.f};
};
// key is groupid + peerid
entt::dense_map<uint64_t, entt::dense_map<uint8_t, ReceivingTransfer>> _receiving_transfers;
// makes request rotate around open content // makes request rotate around open content
std::deque<ObjectHandle> _queue_content_want_info; std::deque<ObjectHandle> _queue_content_want_info;
std::deque<ObjectHandle> _queue_content_want_chunk;
struct QBitsetEntry {
Contact3Handle c;
ObjectHandle o;
};
std::deque<QBitsetEntry> _queue_send_bitset;
// workaround missing contact events
// only used to remove participation on peer exit
entt::dense_map<uint64_t, Contact3Handle> _tox_peer_to_contact;
std::atomic_bool _info_builder_dirty {false}; std::atomic_bool _info_builder_dirty {false};
std::mutex _info_builder_queue_mutex; std::mutex _info_builder_queue_mutex;
//struct InfoBuilderEntry {
//// called on completion on the iterate thread
//// (owning)
//std::function<void(void)> fn;
//};
using InfoBuilderEntry = std::function<void(void)>; using InfoBuilderEntry = std::function<void(void)>;
std::list<InfoBuilderEntry> _info_builder_queue; std::list<InfoBuilderEntry> _info_builder_queue;
static uint64_t combineIds(const uint32_t group_number, const uint32_t peer_number);
void updateMessages(ObjectHandle ce); void updateMessages(ObjectHandle ce);
std::optional<std::pair<uint32_t, uint32_t>> selectPeerForRequest(ObjectHandle ce); std::optional<std::pair<uint32_t, uint32_t>> selectPeerForRequest(ObjectHandle ce);
void queueBitsetSendFull(Contact3Handle c, ObjectHandle o);
public: // TODO: config public: // TODO: config
bool _udp_only {false}; bool _udp_only {false};
size_t _max_concurrent_in {6}; size_t _max_concurrent_in {4}; // info only
size_t _max_concurrent_out {8}; size_t _max_concurrent_out {3*4}; // HACK: allow ideal number for 4 peers
// TODO: probably also includes running transfers rn (meh)
size_t _max_pending_requests {32}; // per content
public: public:
SHA1_NGCFT1( SHA1_NGCFT1(
@ -122,10 +108,12 @@ class SHA1_NGCFT1 : public RegistryMessageModelEventI, public NGCFT1EventI {
Contact3Registry& cr, Contact3Registry& cr,
RegistryMessageModel& rmm, RegistryMessageModel& rmm,
NGCFT1& nft, NGCFT1& nft,
ToxContactModel2& tcm ToxContactModel2& tcm,
ToxEventProviderI& tep,
NGCEXTEventProvider& neep
); );
void iterate(float delta); float iterate(float delta);
protected: // rmm events (actions) protected: // rmm events (actions)
bool onEvent(const Message::Events::MessageUpdated&) override; bool onEvent(const Message::Events::MessageUpdated&) override;
@ -140,5 +128,13 @@ class SHA1_NGCFT1 : public RegistryMessageModelEventI, public NGCFT1EventI {
bool onEvent(const Events::NGCFT1_recv_message&) override; bool onEvent(const Events::NGCFT1_recv_message&) override;
bool sendFilePath(const Contact3 c, std::string_view file_name, std::string_view file_path) override; bool sendFilePath(const Contact3 c, std::string_view file_name, std::string_view file_path) override;
bool onToxEvent(const Tox_Event_Group_Peer_Exit* e) override;
bool onEvent(const Events::NGCEXT_ft1_have&) override;
bool onEvent(const Events::NGCEXT_ft1_bitset&) override;
bool onEvent(const Events::NGCEXT_ft1_have_all&) override;
bool onEvent(const Events::NGCEXT_pc1_announce&) override;
}; };

View File

@ -0,0 +1,13 @@
#pragma once
#include <cstdint>
inline static uint64_t combine_ids(const uint32_t group_number, const uint32_t peer_number) {
return (uint64_t(group_number) << 32) | peer_number;
}
inline static void decompose_ids(const uint64_t combined_id, uint32_t& group_number, uint32_t& peer_number) {
group_number = combined_id >> 32;
peer_number = combined_id & 0xffffffff;
}