Compare commits

...

10 Commits

12 changed files with 1445 additions and 8 deletions

View File

@ -30,6 +30,25 @@ target_link_libraries(ngcft1 PUBLIC
########################################
add_library(sha1_ngcft1 STATIC
./hash_utils.hpp
./hash_utils.cpp
./ft1_sha1_info.hpp
./ft1_sha1_info.cpp
./sha1_ngcft1.hpp
./sha1_ngcft1.cpp
)
target_link_libraries(sha1_ngcft1 PUBLIC
ngcft1
sha1::sha1
solanaceae_tox_contacts
solanaceae_message3
)
########################################
add_library(plugin_ngcft1 SHARED
./plugin_ngcft1.cpp
)
@ -38,6 +57,7 @@ target_link_libraries(plugin_ngcft1 PUBLIC
solanaceae_plugin
ngcext
ngcft1
sha1_ngcft1
)
########################################

130
src/ft1_sha1_info.cpp Normal file
View File

@ -0,0 +1,130 @@
#include "./ft1_sha1_info.hpp"
#include <sodium.h>
SHA1Digest::SHA1Digest(const std::vector<uint8_t>& v) {
assert(v.size() == data.size());
for (size_t i = 0; i < data.size(); i++) {
data[i] = v[i];
}
}
SHA1Digest::SHA1Digest(const uint8_t* d, size_t s) {
assert(s == data.size());
for (size_t i = 0; i < data.size(); i++) {
data[i] = d[i];
}
}
std::ostream& operator<<(std::ostream& out, const SHA1Digest& v) {
std::string str{};
str.resize(v.size()*2, '?');
// HECK, std is 1 larger than size returns ('\0')
sodium_bin2hex(str.data(), str.size()+1, v.data.data(), v.data.size());
out << str;
return out;
}
std::vector<uint8_t> FT1InfoSHA1::toBuffer(void) const {
std::vector<uint8_t> buffer;
assert(!file_name.empty());
// TODO: optimize
for (size_t i = 0; i < 256; i++) {
if (i < file_name.size()) {
buffer.push_back(file_name.at(i));
} else {
buffer.push_back(0);
}
}
assert(buffer.size() == 256);
{ // HACK: endianess
buffer.push_back((file_size>>(0*8)) & 0xff);
buffer.push_back((file_size>>(1*8)) & 0xff);
buffer.push_back((file_size>>(2*8)) & 0xff);
buffer.push_back((file_size>>(3*8)) & 0xff);
buffer.push_back((file_size>>(4*8)) & 0xff);
buffer.push_back((file_size>>(5*8)) & 0xff);
buffer.push_back((file_size>>(6*8)) & 0xff);
buffer.push_back((file_size>>(7*8)) & 0xff);
}
assert(buffer.size() == 256+8);
// chunk size
{ // HACK: endianess
buffer.push_back((chunk_size>>(0*8)) & 0xff);
buffer.push_back((chunk_size>>(1*8)) & 0xff);
buffer.push_back((chunk_size>>(2*8)) & 0xff);
buffer.push_back((chunk_size>>(3*8)) & 0xff);
}
assert(buffer.size() == 256+8+4);
for (const auto& chunk : chunks) {
for (size_t i = 0; i < chunk.data.size(); i++) {
buffer.push_back(chunk.data[i]);
}
}
assert(buffer.size() == 256+8+4+20*chunks.size());
return buffer;
}
void FT1InfoSHA1::fromBuffer(const std::vector<uint8_t>& buffer) {
assert(buffer.size() >= 256+8+4);
// TODO: optimize
file_name.clear();
for (size_t i = 0; i < 256; i++) {
char next_char = static_cast<char>(buffer[i]);
if (next_char == 0) {
break;
}
file_name.push_back(next_char);
}
{ // HACK: endianess
file_size = 0;
file_size |= uint64_t(buffer[256+0]) << (0*8);
file_size |= uint64_t(buffer[256+1]) << (1*8);
file_size |= uint64_t(buffer[256+2]) << (2*8);
file_size |= uint64_t(buffer[256+3]) << (3*8);
file_size |= uint64_t(buffer[256+4]) << (4*8);
file_size |= uint64_t(buffer[256+5]) << (5*8);
file_size |= uint64_t(buffer[256+6]) << (6*8);
file_size |= uint64_t(buffer[256+7]) << (7*8);
}
{ // HACK: endianess
chunk_size = 0;
chunk_size |= uint32_t(buffer[256+8+0]) << (0*8);
chunk_size |= uint32_t(buffer[256+8+1]) << (1*8);
chunk_size |= uint32_t(buffer[256+8+2]) << (2*8);
chunk_size |= uint32_t(buffer[256+8+3]) << (3*8);
}
assert((buffer.size()-(256+8+4)) % 20 == 0);
for (size_t offset = 256+8+4; offset < buffer.size();) {
assert(buffer.size() >= offset + 20);
auto& chunk = chunks.emplace_back();
for (size_t i = 0; i < chunk.size(); i++, offset++) {
chunk.data[i] = buffer.at(offset);
}
// TODO: error/leftover checking
}
}
std::ostream& operator<<(std::ostream& out, const FT1InfoSHA1& v) {
out << " file_name: " << v.file_name << "\n";
out << " file_size: " << v.file_size << "\n";
out << " chunk_size: " << v.chunk_size << "\n";
out << " chunks.size(): " << v.chunks.size() << "\n";
return out;
}

53
src/ft1_sha1_info.hpp Normal file
View File

@ -0,0 +1,53 @@
#pragma once
#include <cstddef>
#include <cstdint>
#include <array>
#include <ostream>
#include <vector>
#include <cassert>
#include <string>
struct SHA1Digest {
std::array<uint8_t, 20> data;
SHA1Digest(void) = default;
SHA1Digest(const std::vector<uint8_t>& v);
SHA1Digest(const uint8_t* d, size_t s);
bool operator==(const SHA1Digest& other) const { return data == other.data; }
bool operator!=(const SHA1Digest& other) const { return data != other.data; }
size_t size(void) const { return data.size(); }
};
std::ostream& operator<<(std::ostream& out, const SHA1Digest& v);
namespace std { // inject
template<> struct hash<SHA1Digest> {
std::size_t operator()(const SHA1Digest& h) const noexcept {
return
size_t(h.data[0]) << (0*8) |
size_t(h.data[1]) << (1*8) |
size_t(h.data[2]) << (2*8) |
size_t(h.data[3]) << (3*8) |
size_t(h.data[4]) << (4*8) |
size_t(h.data[5]) << (5*8) |
size_t(h.data[6]) << (6*8) |
size_t(h.data[7]) << (7*8)
;
}
};
} // std
struct FT1InfoSHA1 {
std::string file_name;
uint64_t file_size {0};
uint32_t chunk_size {128*1024}; // 128KiB for now
std::vector<SHA1Digest> chunks;
std::vector<uint8_t> toBuffer(void) const;
void fromBuffer(const std::vector<uint8_t>& buffer);
};
std::ostream& operator<<(std::ostream& out, const FT1InfoSHA1& v);

26
src/hash_utils.cpp Normal file
View File

@ -0,0 +1,26 @@
#include "./hash_utils.hpp"
#include <sha1.h>
// returns the 20bytes sha1 hash
std::vector<uint8_t> hash_sha1(const uint8_t* data, size_t size) {
SHA1_CTX ctx;
SHA1Init(&ctx);
{ // lib only takes uint32_t sizes, so chunk it
constexpr size_t hash_block_size {0xffffffff};
size_t i = 0;
for (; i + hash_block_size < size; i += hash_block_size) {
SHA1Update(&ctx, reinterpret_cast<const uint8_t*>(data) + i, hash_block_size);
}
if (i < size) {
SHA1Update(&ctx, reinterpret_cast<const uint8_t*>(data) + i, size - i);
}
}
std::vector<uint8_t> sha1_hash(20);
SHA1Final(sha1_hash.data(), &ctx);
return sha1_hash;
}

10
src/hash_utils.hpp Normal file
View File

@ -0,0 +1,10 @@
#pragma once
#include <cstdint>
#include <vector>
// returns the 20bytes sha1 hash
std::vector<uint8_t> hash_sha1(const uint8_t* data, size_t size);
inline std::vector<uint8_t> hash_sha1(const char* data, size_t size) { return hash_sha1(reinterpret_cast<const uint8_t*>(data), size); }

View File

@ -186,6 +186,44 @@ bool NGCEXTEventProvider::parse_ft1_data_ack(
);
}
bool NGCEXTEventProvider::parse_ft1_message(
uint32_t group_number, uint32_t peer_number,
const uint8_t* data, size_t data_size,
bool _private
) {
if (_private) {
std::cerr << "NGCEXT: ft1_message cant be private (yet)\n";
return false;
}
Events::NGCEXT_ft1_message e;
e.group_number = group_number;
e.peer_number = peer_number;
size_t curser = 0;
// - 4 byte (message_id)
e.message_id = 0u;
_DATA_HAVE(sizeof(e.message_id), std::cerr << "NGCEXT: packet too small, missing message_id\n"; return false)
for (size_t i = 0; i < sizeof(e.message_id); i++, curser++) {
e.message_id |= uint32_t(data[curser]) << (i*8);
}
// - 4 byte (file_kind)
e.file_kind = 0u;
_DATA_HAVE(sizeof(e.file_kind), std::cerr << "NGCEXT: packet too small, missing file_kind\n"; return false)
for (size_t i = 0; i < sizeof(e.file_kind); i++, curser++) {
e.file_kind |= uint32_t(data[curser]) << (i*8);
}
// - X bytes (file_kind dependent id, differnt sizes)
e.file_id = {data+curser, data+curser+(data_size-curser)};
return dispatch(
NGCEXT_Event::FT1_MESSAGE,
e
);
}
bool NGCEXTEventProvider::handlePacket(
const uint32_t group_number,
const uint32_t peer_number,
@ -214,6 +252,8 @@ bool NGCEXTEventProvider::handlePacket(
return parse_ft1_data(group_number, peer_number, data+1, data_size-1, _private);
case NGCEXT_Event::FT1_DATA_ACK:
return parse_ft1_data_ack(group_number, peer_number, data+1, data_size-1, _private);
case NGCEXT_Event::FT1_MESSAGE:
return parse_ft1_message(group_number, peer_number, data+1, data_size-1, _private);
default:
return false;
}

View File

@ -113,6 +113,21 @@ namespace Events {
std::vector<uint16_t> sequence_ids;
};
struct NGCEXT_ft1_message {
uint32_t group_number;
uint32_t peer_number;
// - 4 byte (message_id)
uint32_t message_id;
// request the other side to initiate a FT
// - 4 byte (file_kind)
uint32_t file_kind;
// - X bytes (file_kind dependent id, differnt sizes)
std::vector<uint8_t> file_id;
};
} // Events
enum class NGCEXT_Event : uint8_t {
@ -131,7 +146,7 @@ enum class NGCEXT_Event : uint8_t {
HS1_RESPONSE_LAST_IDS,
// request the other side to initiate a FT
// - 1 byte (file_kind)
// - 4 byte (file_kind)
// - X bytes (file_kind dependent id, differnt sizes)
FT1_REQUEST = 0x80 | 8u,
@ -139,7 +154,7 @@ enum class NGCEXT_Event : uint8_t {
// tell the other side you want to start a FT
// TODO: might use id layer instead. with it, it would look similar to friends_ft
// - 1 byte (file_kind)
// - 4 byte (file_kind)
// - 8 bytes (data size, can be 0 if unknown, BUT files have to be atleast 1 byte)
// - 1 byte (temporary_file_tf_id, for this peer only, technically just a prefix to distinguish between simultainious fts)
// - X bytes (file_kind dependent id, differnt sizes)
@ -168,6 +183,14 @@ enum class NGCEXT_Event : uint8_t {
// - ]
FT1_DATA_ACK,
// send file as message
// basically the opposite of request
// contains file_kind and file_id (and timestamp?)
// - 4 byte (message_id)
// - 4 byte (file_kind)
// - X bytes (file_kind dependent id, differnt sizes)
FT1_MESSAGE,
MAX
};
@ -180,6 +203,7 @@ struct NGCEXTEventI {
virtual bool onEvent(const Events::NGCEXT_ft1_init_ack&) { return false; }
virtual bool onEvent(const Events::NGCEXT_ft1_data&) { return false; }
virtual bool onEvent(const Events::NGCEXT_ft1_data_ack&) { return false; }
virtual bool onEvent(const Events::NGCEXT_ft1_message&) { return false; }
};
using NGCEXTEventProviderI = EventProviderI<NGCEXTEventI>;
@ -233,6 +257,12 @@ class NGCEXTEventProvider : public ToxEventI, public NGCEXTEventProviderI {
bool _private
);
bool parse_ft1_message(
uint32_t group_number, uint32_t peer_number,
const uint8_t* data, size_t data_size,
bool _private
);
bool handlePacket(
const uint32_t group_number,
const uint32_t peer_number,

View File

@ -2,6 +2,8 @@
#include <solanaceae/toxcore/utils.hpp>
#include <sodium.h>
#include <iostream>
#include <set>
#include <algorithm>
@ -118,6 +120,29 @@ bool NGCFT1::sendPKG_FT1_DATA_ACK(
return _t.toxGroupSendCustomPrivatePacket(group_number, peer_number, false, pkg) == TOX_ERR_GROUP_SEND_CUSTOM_PRIVATE_PACKET_OK;
}
bool NGCFT1::sendPKG_FT1_MESSAGE(
uint32_t group_number,
uint32_t message_id,
uint32_t file_kind,
const uint8_t* file_id, size_t file_id_size
) {
std::vector<uint8_t> pkg;
pkg.push_back(static_cast<uint8_t>(NGCEXT_Event::FT1_MESSAGE));
for (size_t i = 0; i < sizeof(message_id); i++) {
pkg.push_back((message_id>>(i*8)) & 0xff);
}
for (size_t i = 0; i < sizeof(file_kind); i++) {
pkg.push_back((file_kind>>(i*8)) & 0xff);
}
for (size_t i = 0; i < file_id_size; i++) {
pkg.push_back(file_id[i]);
}
// lossless
return _t.toxGroupSendCustomPacket(group_number, true, pkg) == TOX_ERR_GROUP_SEND_CUSTOM_PACKET_OK;
}
void NGCFT1::updateSendTransfer(float time_delta, uint32_t group_number, uint32_t peer_number, Group::Peer& peer, size_t idx, std::set<LEDBAT::SeqIDType>& timeouts_set) {
auto& tf_opt = peer.send_transfers.at(idx);
assert(tf_opt.has_value());
@ -132,6 +157,13 @@ void NGCFT1::updateSendTransfer(float time_delta, uint32_t group_number, uint32_
if (tf.inits_sent >= 3) {
// delete, timed out 3 times
std::cerr << "NGCFT1 warning: ft init timed out, deleting\n";
dispatch(
NGCFT1_Event::send_done,
Events::NGCFT1_send_done{
group_number, peer_number,
static_cast<uint8_t>(idx),
}
);
tf_opt.reset();
} else {
// timed out, resend
@ -158,8 +190,14 @@ void NGCFT1::updateSendTransfer(float time_delta, uint32_t group_number, uint32_
if (tf.time_since_activity >= sending_give_up_after) {
// no ack after 30sec, close ft
// TODO: notify app
std::cerr << "NGCFT1 warning: sending ft in progress timed out, deleting\n";
dispatch(
NGCFT1_Event::send_done,
Events::NGCFT1_send_done{
group_number, peer_number,
static_cast<uint8_t>(idx),
}
);
// clean up cca
tf.ssb.for_each(time_delta, [&](uint16_t id, const std::vector<uint8_t>& data, float& time_since_activity) {
@ -293,6 +331,7 @@ NGCFT1::NGCFT1(
_neep.subscribe(this, NGCEXT_Event::FT1_INIT_ACK);
_neep.subscribe(this, NGCEXT_Event::FT1_DATA);
_neep.subscribe(this, NGCEXT_Event::FT1_DATA_ACK);
_neep.subscribe(this, NGCEXT_Event::FT1_MESSAGE);
}
void NGCFT1::iterate(float time_delta) {
@ -370,6 +409,19 @@ bool NGCFT1::NGC_FT1_send_init_private(
return true;
}
bool NGCFT1::NGC_FT1_send_message_public(
uint32_t group_number,
uint32_t& message_id,
uint32_t file_kind,
const uint8_t* file_id, size_t file_id_size
) {
// create msg_id
message_id = randombytes_random();
// TODO: check return value
return sendPKG_FT1_MESSAGE(group_number, message_id, file_kind, file_id, file_id_size);
}
bool NGCFT1::onEvent(const Events::NGCEXT_ft1_request& e) {
std::cout << "NGCFT1: FT1_REQUEST fk:" << e.file_kind << " [" << bin2hex(e.file_id) << "]\n";
@ -386,7 +438,7 @@ bool NGCFT1::onEvent(const Events::NGCEXT_ft1_request& e) {
}
bool NGCFT1::onEvent(const Events::NGCEXT_ft1_init& e) {
std::cout << "NGCFT1: FT1_INIT fk:" << e.file_kind << " fs:" << e.file_size << " tid:" << e.transfer_id << " [" << bin2hex(e.file_id) << "]\n";
std::cout << "NGCFT1: FT1_INIT fk:" << e.file_kind << " fs:" << e.file_size << " tid:" << int(e.transfer_id) << " [" << bin2hex(e.file_id) << "]\n";
bool accept = false;
dispatch(
@ -550,10 +602,33 @@ bool NGCFT1::onEvent(const Events::NGCEXT_ft1_data_ack& e) {
// delete if all packets acked
if (transfer.file_size == transfer.file_size_current && transfer.ssb.size() == 0) {
std::cout << "NGCFT1: " << e.transfer_id << " done\n";
std::cout << "NGCFT1: " << int(e.transfer_id) << " done\n";
dispatch(
NGCFT1_Event::send_done,
Events::NGCFT1_send_done{
e.group_number, e.peer_number,
e.transfer_id,
}
);
peer.send_transfers[e.transfer_id].reset();
}
return true;
}
bool NGCFT1::onEvent(const Events::NGCEXT_ft1_message& e) {
std::cout << "NGCFT1: FT1_MESSAGE mid:" << e.message_id << " fk:" << e.file_kind << " [" << bin2hex(e.file_id) << "]\n";
// .... just rethrow??
// TODO: dont
return dispatch(
NGCFT1_Event::recv_message,
Events::NGCFT1_recv_message{
e.group_number, e.peer_number,
e.message_id,
static_cast<NGCFT1_file_kind>(e.file_kind),
e.file_id.data(), e.file_id.size()
}
);
}

View File

@ -67,14 +67,48 @@ namespace Events {
size_t data_size;
};
struct NGCFT1_recv_done {
uint32_t group_number;
uint32_t peer_number;
uint8_t transfer_id;
// TODO: reason
};
struct NGCFT1_send_done {
uint32_t group_number;
uint32_t peer_number;
uint8_t transfer_id;
// TODO: reason
};
struct NGCFT1_recv_message {
uint32_t group_number;
uint32_t peer_number;
uint32_t message_id;
NGCFT1_file_kind file_kind;
const uint8_t* file_id;
size_t file_id_size;
};
} // Events
enum class NGCFT1_Event : uint8_t {
recv_request,
recv_init,
recv_data,
send_data,
recv_done,
send_done,
recv_message,
MAX
};
@ -84,6 +118,9 @@ struct NGCFT1EventI {
virtual bool onEvent(const Events::NGCFT1_recv_init&) { return false; }
virtual bool onEvent(const Events::NGCFT1_recv_data&) { return false; }
virtual bool onEvent(const Events::NGCFT1_send_data&) { return false; } // const?
virtual bool onEvent(const Events::NGCFT1_recv_done&) { return false; }
virtual bool onEvent(const Events::NGCFT1_send_done&) { return false; }
virtual bool onEvent(const Events::NGCFT1_recv_message&) { return false; }
};
using NGCFT1EventProviderI = EventProviderI<NGCFT1EventI>;
@ -159,6 +196,7 @@ class NGCFT1 : public ToxEventI, public NGCEXTEventI, public NGCFT1EventProvider
bool sendPKG_FT1_INIT_ACK(uint32_t group_number, uint32_t peer_number, uint8_t transfer_id);
bool sendPKG_FT1_DATA(uint32_t group_number, uint32_t peer_number, uint8_t transfer_id, uint16_t sequence_id, const uint8_t* data, size_t data_size);
bool sendPKG_FT1_DATA_ACK(uint32_t group_number, uint32_t peer_number, uint8_t transfer_id, const uint16_t* seq_ids, size_t seq_ids_size);
bool sendPKG_FT1_MESSAGE(uint32_t group_number, uint32_t message_id, uint32_t file_kind, const uint8_t* file_id, size_t file_id_size);
void updateSendTransfer(float time_delta, uint32_t group_number, uint32_t peer_number, Group::Peer& peer, size_t idx, std::set<LEDBAT::SeqIDType>& timeouts_set);
void iteratePeer(float time_delta, uint32_t group_number, uint32_t peer_number, Group::Peer& peer);
@ -189,12 +227,21 @@ class NGCFT1 : public ToxEventI, public NGCEXTEventI, public NGCFT1EventProvider
uint8_t* transfer_id
);
// sends the message and fills in message_id
bool NGC_FT1_send_message_public(
uint32_t group_number,
uint32_t& message_id,
uint32_t file_kind,
const uint8_t* file_id, size_t file_id_size
);
protected:
bool onEvent(const Events::NGCEXT_ft1_request&) override;
bool onEvent(const Events::NGCEXT_ft1_init&) override;
bool onEvent(const Events::NGCEXT_ft1_init_ack&) override;
bool onEvent(const Events::NGCEXT_ft1_data&) override;
bool onEvent(const Events::NGCEXT_ft1_data_ack&) override;
bool onEvent(const Events::NGCEXT_ft1_message&) override;
protected:
//bool onToxEvent(const Tox_Event_Group_Custom_Packet* e) override;

View File

@ -2,19 +2,18 @@
#include "./ngcext.hpp"
#include "./ngcft1.hpp"
#include "./sha1_ngcft1.hpp"
#include <memory>
#include <iostream>
// fwd
//class RegMessageModel;
#define RESOLVE_INSTANCE(x) static_cast<x*>(solana_api->resolveInstance(#x))
#define PROVIDE_INSTANCE(x, p, v) solana_api->provideInstance(#x, p, static_cast<x*>(v))
static std::unique_ptr<NGCEXTEventProvider> g_ngcextep = nullptr;
// TODO: make sep plug
static std::unique_ptr<NGCFT1> g_ngcft1 = nullptr;
static std::unique_ptr<SHA1_NGCFT1> g_sha1_ngcft1 = nullptr;
extern "C" {
@ -35,10 +34,16 @@ SOLANA_PLUGIN_EXPORT uint32_t solana_plugin_start(struct SolanaAPI* solana_api)
ToxI* tox_i = nullptr;
ToxEventProviderI* tox_event_provider_i = nullptr;
Contact3Registry* cr = nullptr;
RegistryMessageModel* rmm = nullptr;
ToxContactModel2* tcm = nullptr;
{ // make sure required types are loaded
tox_i = RESOLVE_INSTANCE(ToxI);
tox_event_provider_i = RESOLVE_INSTANCE(ToxEventProviderI);
cr = RESOLVE_INSTANCE(Contact3Registry);
rmm = RESOLVE_INSTANCE(RegistryMessageModel);
tcm = RESOLVE_INSTANCE(ToxContactModel2);
if (tox_i == nullptr) {
std::cerr << "PLUGIN NGCEXT missing ToxI\n";
@ -49,12 +54,28 @@ SOLANA_PLUGIN_EXPORT uint32_t solana_plugin_start(struct SolanaAPI* solana_api)
std::cerr << "PLUGIN NGCEXT missing ToxEventProviderI\n";
return 2;
}
if (cr == nullptr) {
std::cerr << "PLUGIN NGCEXT missing Contact3Registry\n";
return 2;
}
if (rmm == nullptr) {
std::cerr << "PLUGIN NGCEXT missing RegistryMessageModel\n";
return 2;
}
if (tcm == nullptr) {
std::cerr << "PLUGIN NGCEXT missing ToxContactModel2\n";
return 2;
}
}
// static store, could be anywhere tho
// construct with fetched dependencies
g_ngcextep = std::make_unique<NGCEXTEventProvider>(*tox_event_provider_i);
g_ngcft1 = std::make_unique<NGCFT1>(*tox_i, *tox_event_provider_i, *g_ngcextep.get());
g_sha1_ngcft1 = std::make_unique<SHA1_NGCFT1>(*cr, *rmm, *g_ngcft1.get(), *tcm);
// register types
PROVIDE_INSTANCE(NGCEXTEventProviderI, "NGCEXT", g_ngcextep.get());
@ -62,12 +83,15 @@ SOLANA_PLUGIN_EXPORT uint32_t solana_plugin_start(struct SolanaAPI* solana_api)
PROVIDE_INSTANCE(NGCFT1EventProviderI, "NGCEXT", g_ngcft1.get());
PROVIDE_INSTANCE(NGCFT1, "NGCEXT", g_ngcft1.get());
PROVIDE_INSTANCE(SHA1_NGCFT1, "NGCEXT", g_sha1_ngcft1.get());
return 0;
}
SOLANA_PLUGIN_EXPORT void solana_plugin_stop(void) {
std::cout << "PLUGIN NGCEXT STOP()\n";
g_sha1_ngcft1.reset();
g_ngcft1.reset();
g_ngcextep.reset();
}
@ -76,6 +100,7 @@ SOLANA_PLUGIN_EXPORT void solana_plugin_tick(float delta) {
//std::cout << "PLUGIN NGCEXT TICK()\n";
g_ngcft1->iterate(delta);
g_sha1_ngcft1->iterate(delta);
}
} // extern C

856
src/sha1_ngcft1.cpp Normal file
View File

@ -0,0 +1,856 @@
#include "./sha1_ngcft1.hpp"
#include <solanaceae/toxcore/utils.hpp>
#include <solanaceae/contact/components.hpp>
#include <solanaceae/tox_contacts/components.hpp>
#include <solanaceae/message3/components.hpp>
#include <solanaceae/tox_messages/components.hpp>
#include <solanaceae/message3/file_r_file.hpp>
#include "./ft1_sha1_info.hpp"
#include "./hash_utils.hpp"
#include <sodium.h>
#include <entt/container/dense_set.hpp>
#include <iostream>
#include <variant>
namespace Message::Components {
using Content = ContentHandle;
} // Message::Components
// TODO: rename to content components
namespace Components {
struct Messages {
std::vector<Message3Handle> messages;
};
using FT1InfoSHA1 = FT1InfoSHA1;
struct FT1InfoSHA1Data {
std::vector<uint8_t> data;
};
struct FT1InfoSHA1Hash {
std::vector<uint8_t> hash;
};
struct FT1ChunkSHA1Cache {
std::vector<bool> have_chunk;
bool have_all {false};
size_t have_count {0};
entt::dense_map<SHA1Digest, size_t> chunk_hash_to_index;
std::optional<size_t> chunkIndex(const SHA1Digest& hash) const;
size_t chunkSize(size_t chunk_index) const;
bool haveChunk(const SHA1Digest& hash) const;
};
struct SuspectedParticipants {
entt::dense_set<Contact3> participants;
};
struct ReRequestInfoTimer {
float timer {0.f};
};
} // Components
std::optional<size_t> Components::FT1ChunkSHA1Cache::chunkIndex(const SHA1Digest& hash) const {
const auto it = chunk_hash_to_index.find(hash);
if (it != chunk_hash_to_index.cend()) {
return it->second;
} else {
return std::nullopt;
}
}
bool Components::FT1ChunkSHA1Cache::haveChunk(const SHA1Digest& hash) const {
if (have_all) { // short cut
return true;
}
if (auto i_opt = chunkIndex(hash); i_opt.has_value()) {
return have_chunk[i_opt.value()];
}
// not part of this file
return false;
}
static size_t chunkSize(const FT1InfoSHA1& sha1_info, size_t chunk_index) {
if (chunk_index+1 == sha1_info.chunks.size()) {
// last chunk
return sha1_info.file_size - chunk_index * sha1_info.chunk_size;
} else {
return sha1_info.chunk_size;
}
}
void SHA1_NGCFT1::queueUpRequestChunk(uint32_t group_number, uint32_t peer_number, ContentHandle content, const SHA1Digest& hash) {
// TODO: transfers
for (auto& [i_g, i_p, i_m, i_h, i_t] : _queue_requested_chunk) {
// if already in queue
if (i_g == group_number && i_p == peer_number && i_h == hash) {
// update timer
i_t = 0.f;
return;
}
}
// not in queue yet
_queue_requested_chunk.push_back(std::make_tuple(group_number, peer_number, content, hash, 0.f));
}
uint64_t SHA1_NGCFT1::combineIds(const uint32_t group_number, const uint32_t peer_number) {
return (uint64_t(group_number) << 32) | peer_number;
}
void SHA1_NGCFT1::updateMessages(ContentHandle ce) {
assert(ce.all_of<Components::Messages>());
for (auto msg : ce.get<Components::Messages>().messages) {
if (ce.all_of<Message::Components::Transfer::FileInfo>() && !msg.all_of<Message::Components::Transfer::FileInfo>()) {
msg.emplace<Message::Components::Transfer::FileInfo>(ce.get<Message::Components::Transfer::FileInfo>());
}
if (ce.all_of<Message::Components::Transfer::FileInfoLocal>()) {
msg.emplace_or_replace<Message::Components::Transfer::FileInfoLocal>(ce.get<Message::Components::Transfer::FileInfoLocal>());
}
if (ce.all_of<Message::Components::Transfer::BytesSent>()) {
msg.emplace_or_replace<Message::Components::Transfer::BytesSent>(ce.get<Message::Components::Transfer::BytesSent>());
}
if (auto* cc = ce.try_get<Components::FT1ChunkSHA1Cache>(); cc != nullptr && cc->have_all) {
msg.emplace_or_replace<Message::Components::Transfer::TagHaveAll>();
}
_rmm.throwEventUpdate(msg);
}
}
SHA1_NGCFT1::SHA1_NGCFT1(
Contact3Registry& cr,
RegistryMessageModel& rmm,
NGCFT1& nft,
ToxContactModel2& tcm
) :
_cr(cr),
_rmm(rmm),
_nft(nft),
_tcm(tcm)
{
_nft.subscribe(this, NGCFT1_Event::recv_request);
_nft.subscribe(this, NGCFT1_Event::recv_init);
_nft.subscribe(this, NGCFT1_Event::recv_data);
_nft.subscribe(this, NGCFT1_Event::send_data);
_nft.subscribe(this, NGCFT1_Event::recv_done);
_nft.subscribe(this, NGCFT1_Event::send_done);
_nft.subscribe(this, NGCFT1_Event::recv_message);
//_rmm.subscribe(this, RegistryMessageModel_Event::message_construct);
//_rmm.subscribe(this, RegistryMessageModel_Event::message_updated);
//_rmm.subscribe(this, RegistryMessageModel_Event::message_destroy);
_rmm.subscribe(this, RegistryMessageModel_Event::send_file_path);
}
void SHA1_NGCFT1::iterate(float delta) {
{ // timers
// sending transfers
for (auto peer_it = _sending_transfers.begin(); peer_it != _sending_transfers.end();) {
for (auto it = peer_it->second.begin(); it != peer_it->second.end();) {
it->second.time_since_activity += delta;
// if we have not heard for 10sec, timeout
if (it->second.time_since_activity >= 10.f) {
//std::cerr << "SHA1_NGCFT1 warning: sending chunk tansfer timed out " << std::get<0>(*it) << ":" << std::get<1>(*it) << "." << int(std::get<2>(*it)) << "\n";
std::cerr << "SHA1_NGCFT1 warning: sending chunk tansfer timed out " << "." << int(it->first) << "\n";
it = peer_it->second.erase(it);
} else {
it++;
}
}
if (peer_it->second.empty()) {
// cleanup unused peers too agressive?
peer_it = _sending_transfers.erase(peer_it);
} else {
peer_it++;
}
}
//for (auto it = _transfers_sending_chunk.begin(); it != _transfers_sending_chunk.end();) {
//float& time_since_remove_activity = std::get<float>(*it);
//time_since_remove_activity += delta;
//// if we have not heard for 10sec, timeout
//if (time_since_remove_activity >= 10.f) {
//std::cerr << "SHA1 sending chunk tansfer timed out " << std::get<0>(*it) << ":" << std::get<1>(*it) << "." << int(std::get<2>(*it)) << "\n";
//it = _transfers_sending_chunk.erase(it);
//} else {
//it++;
//}
//}
// queued requests
for (auto it = _queue_requested_chunk.begin(); it != _queue_requested_chunk.end();) {
float& timer = std::get<float>(*it);
timer += delta;
if (timer >= 10.f) {
it = _queue_requested_chunk.erase(it);
} else {
it++;
}
}
{ // requested info timers
std::vector<Content> timed_out;
_contentr.view<Components::ReRequestInfoTimer>().each([delta, &timed_out](Content e, Components::ReRequestInfoTimer& rrit) {
rrit.timer += delta;
// 15sec, TODO: config
if (rrit.timer >= 15.f) {
timed_out.push_back(e);
}
});
for (const auto e : timed_out) {
// TODO: avoid dups
_queue_content_want_info.push_back({_contentr, e});
_contentr.remove<Components::ReRequestInfoTimer>(e);
}
}
}
// if we have not reached the total cap for transfers
// count running transfers
size_t running_sending_transfer_count {0};
for (const auto& [_, transfers] : _sending_transfers) {
running_sending_transfer_count += transfers.size();
}
size_t running_receiving_transfer_count {0};
for (const auto& [_, transfers] : _receiving_transfers) {
running_receiving_transfer_count += transfers.size();
}
if (running_sending_transfer_count < _max_concurrent_out) {
// TODO: for each peer? transfer cap per peer?
// TODO: info queue
if (!_queue_requested_chunk.empty()) { // then check for chunk requests
const auto [group_number, peer_number, ce, chunk_hash, _] = _queue_requested_chunk.front();
auto chunk_idx_opt = ce.get<Components::FT1ChunkSHA1Cache>().chunkIndex(chunk_hash);
if (chunk_idx_opt.has_value()) {
const auto& info = ce.get<Components::FT1InfoSHA1>();
uint8_t transfer_id {0};
if (_nft.NGC_FT1_send_init_private(
group_number, peer_number,
static_cast<uint32_t>(NGCFT1_file_kind::HASH_SHA1_CHUNK),
chunk_hash.data.data(), chunk_hash.size(),
chunkSize(info, chunk_idx_opt.value()),
&transfer_id
)) {
_sending_transfers
[combineIds(group_number, peer_number)]
[transfer_id] // TODO: also save index?
.v = SendingTransfer::Chunk{ce, chunk_idx_opt.value() * info.chunk_size};
}
}
// remove from queue regardless
_queue_requested_chunk.pop_front();
}
}
if (running_receiving_transfer_count < _max_concurrent_in) {
// strictly priorize info
if (!_queue_content_want_info.empty()) {
const auto ce = _queue_content_want_info.front();
// make sure we are missing the info
assert(!ce.all_of<Components::ReRequestInfoTimer>());
assert(!ce.all_of<Components::FT1InfoSHA1>());
assert(!ce.all_of<Components::FT1InfoSHA1Data>());
assert(!ce.all_of<Components::FT1ChunkSHA1Cache>());
assert(ce.all_of<Components::FT1InfoSHA1Hash>());
// get a list of peers we can request this file from
// TODO: randomly request from non SuspectedParticipants
std::vector<std::pair<uint32_t, uint32_t>> tox_peers;
for (const auto c : ce.get<Components::SuspectedParticipants>().participants) {
// TODO: sort by con state?
// prio to direct?
if (const auto* cs = _cr.try_get<Contact::Components::ConnectionState>(c); cs == nullptr || cs->state == Contact::Components::ConnectionState::State::disconnected) {
continue;
}
if (_cr.all_of<Contact::Components::ToxGroupPeerEphemeral>(c)) {
const auto& tgpe = _cr.get<Contact::Components::ToxGroupPeerEphemeral>(c);
tox_peers.push_back({tgpe.group_number, tgpe.peer_number});
}
}
// 1 in 20 chance to ask random peer instead
// TODO: config + tweak
// TODO: save group in content to avoid the tox_peers list build
if (tox_peers.empty() || (_rng()%20) == 0) {
// meh
// HACK: determain group based on last tox_peers
if (!tox_peers.empty()) {
const uint32_t group_number = tox_peers.back().first;
auto gch = _tcm.getContactGroup(group_number);
assert(static_cast<bool>(gch));
std::vector<uint32_t> un_tox_peers;
for (const auto child : gch.get<Contact::Components::ParentOf>().subs) {
if (const auto* cs = _cr.try_get<Contact::Components::ConnectionState>(child); cs == nullptr || cs->state == Contact::Components::ConnectionState::State::disconnected) {
continue;
}
if (_cr.all_of<Contact::Components::ToxGroupPeerEphemeral>(child)) {
const auto& tgpe = _cr.get<Contact::Components::ToxGroupPeerEphemeral>(child);
un_tox_peers.push_back(tgpe.peer_number);
}
}
if (un_tox_peers.empty()) {
// no one online, we are out of luck
} else {
const size_t sample_i = _rng()%un_tox_peers.size();
const auto peer_number = un_tox_peers.at(sample_i);
//const auto& info = msg.get<Components::FT1InfoSHA1>();
const auto& info_hash = ce.get<Components::FT1InfoSHA1Hash>().hash;
_nft.NGC_FT1_send_request_private(
group_number, peer_number,
static_cast<uint32_t>(NGCFT1_file_kind::HASH_SHA1_INFO),
info_hash.data(), info_hash.size()
);
ce.emplace<Components::ReRequestInfoTimer>(0.f);
_queue_content_want_info.pop_front();
std::cout << "SHA1_NGCFT1: sent info request for [" << SHA1Digest{info_hash} << "] to " << group_number << ":" << peer_number << " (rng)\n";
}
}
} else {
const size_t sample_i = _rng()%tox_peers.size();
const auto [group_number, peer_number] = tox_peers.at(sample_i);
//const auto& info = msg.get<Components::FT1InfoSHA1>();
const auto& info_hash = ce.get<Components::FT1InfoSHA1Hash>().hash;
_nft.NGC_FT1_send_request_private(
group_number, peer_number,
static_cast<uint32_t>(NGCFT1_file_kind::HASH_SHA1_INFO),
info_hash.data(), info_hash.size()
);
ce.emplace<Components::ReRequestInfoTimer>(0.f);
_queue_content_want_info.pop_front();
std::cout << "SHA1_NGCFT1: sent info request for [" << SHA1Digest{info_hash} << "] to " << group_number << ":" << peer_number << "\n";
}
} else if (!_queue_content_want_chunk.empty()) {
}
}
}
bool SHA1_NGCFT1::onEvent(const Events::NGCFT1_recv_request& e) {
// only interested in sha1
if (e.file_kind != NGCFT1_file_kind::HASH_SHA1_INFO && e.file_kind != NGCFT1_file_kind::HASH_SHA1_CHUNK) {
return false;
}
//std::cout << "SHA1_NGCFT1: FT1_REQUEST fk:" << int(e.file_kind) << " [" << bin2hex({e.file_id, e.file_id+e.file_id_size}) << "]\n";
if (e.file_kind == NGCFT1_file_kind::HASH_SHA1_INFO) {
if (e.file_id_size != 20) {
// error
return false;
}
SHA1Digest info_hash{e.file_id, e.file_id_size};
if (!_info_to_content.count(info_hash)) {
// we dont know about this
return false;
}
auto content = _info_to_content.at(info_hash);
if (!content.all_of<Components::FT1InfoSHA1Data>()) {
// we dont have the info for that infohash (yet?)
return false;
}
// TODO: queue instead
//queueUpRequestInfo(e.group_number, e.peer_number, info_hash);
uint8_t transfer_id {0};
_nft.NGC_FT1_send_init_private(
e.group_number, e.peer_number,
static_cast<uint32_t>(e.file_kind),
e.file_id, e.file_id_size,
content.get<Components::FT1InfoSHA1Data>().data.size(),
&transfer_id
);
_sending_transfers
[combineIds(e.group_number, e.peer_number)]
[transfer_id]
.v = SendingTransfer::Info{content.get<Components::FT1InfoSHA1Data>().data};
} else if (e.file_kind == NGCFT1_file_kind::HASH_SHA1_CHUNK) {
if (e.file_id_size != 20) {
// error
return false;
}
SHA1Digest chunk_hash{e.file_id, e.file_id_size};
if (!_chunks.count(chunk_hash)) {
// we dont know about this
return false;
}
auto ce = _chunks.at(chunk_hash);
{ // they advertise interest in the content
const auto c = _tcm.getContactGroupPeer(e.group_number, e.peer_number);
ce.get_or_emplace<Components::SuspectedParticipants>().participants.emplace(c);
}
assert(msg.all_of<Components::FT1ChunkSHA1Cache>());
if (!ce.get<Components::FT1ChunkSHA1Cache>().haveChunk(chunk_hash)) {
// we dont have the chunk
return false;
}
// queue good request
queueUpRequestChunk(e.group_number, e.peer_number, ce, chunk_hash);
} else {
assert(false && "unhandled case");
}
return true;
}
bool SHA1_NGCFT1::onEvent(const Events::NGCFT1_recv_init& e) {
// only interested in sha1
if (e.file_kind != NGCFT1_file_kind::HASH_SHA1_INFO && e.file_kind != NGCFT1_file_kind::HASH_SHA1_CHUNK) {
return false;
}
return false;
}
bool SHA1_NGCFT1::onEvent(const Events::NGCFT1_recv_data& e) {
return false;
}
bool SHA1_NGCFT1::onEvent(const Events::NGCFT1_send_data& e) {
if (!_sending_transfers.count(combineIds(e.group_number, e.peer_number))) {
return false;
}
auto& peer = _sending_transfers.at(combineIds(e.group_number, e.peer_number));
if (!peer.count(e.transfer_id)) {
return false;
}
auto& transfer = peer.at(e.transfer_id);
if (std::holds_alternative<SendingTransfer::Info>(transfer.v)) {
auto& info_transfer = std::get<SendingTransfer::Info>(transfer.v);
for (size_t i = 0; i < e.data_size && (i + e.data_offset) < info_transfer.info_data.size(); i++) {
e.data[i] = info_transfer.info_data[i + e.data_offset];
}
if (e.data_offset + e.data_size >= info_transfer.info_data.size()) {
// was last read (probably TODO: add transfer destruction event)
peer.erase(e.transfer_id);
}
} else if (std::holds_alternative<SendingTransfer::Chunk>(transfer.v)) {
auto& chunk_transfer = std::get<SendingTransfer::Chunk>(transfer.v);
// TODO: should we really use file?
const auto data = chunk_transfer.content.get<Message::Components::Transfer::File>()->read(chunk_transfer.offset_into_file + e.data_offset, e.data_size);
// TODO: optimize
for (size_t i = 0; i < e.data_size && i < data.size(); i++) {
e.data[i] = data[i];
}
chunk_transfer.content.get_or_emplace<Message::Components::Transfer::BytesSent>().total += data.size();
// TODO: add event to propergate to messages
//_rmm.throwEventUpdate(transfer); // should we?
//if (e.data_offset + e.data_size >= *insert chunk size here*) {
//// was last read (probably TODO: add transfer destruction event)
//peer.erase(e.transfer_id);
//}
} else {
assert(false && "not implemented?");
}
transfer.time_since_activity = 0.f;
return true;
}
bool SHA1_NGCFT1::onEvent(const Events::NGCFT1_recv_done& e) {
return false;
}
bool SHA1_NGCFT1::onEvent(const Events::NGCFT1_send_done& e) {
if (!_sending_transfers.count(combineIds(e.group_number, e.peer_number))) {
return false;
}
auto& peer_transfers = _sending_transfers.at(combineIds(e.group_number, e.peer_number));
if (!peer_transfers.count(e.transfer_id)) {
return false;
}
const auto& tv = peer_transfers[e.transfer_id].v;
if (std::holds_alternative<SendingTransfer::Chunk>(tv)) {
updateMessages(std::get<SendingTransfer::Chunk>(tv).content); // mostly for sent bytes
}
peer_transfers.erase(e.transfer_id);
return true;
}
bool SHA1_NGCFT1::onEvent(const Events::NGCFT1_recv_message& e) {
if (e.file_kind != NGCFT1_file_kind::HASH_SHA1_INFO) {
return false;
}
uint64_t ts = std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::system_clock::now().time_since_epoch()).count();
const auto c = _tcm.getContactGroupPeer(e.group_number, e.peer_number);
const auto self_c = c.get<Contact::Components::Self>().self;
auto* reg_ptr = _rmm.get(c);
if (reg_ptr == nullptr) {
std::cerr << "SHA1_NGCFT1 error: cant find reg\n";
return false;
}
Message3Registry& reg = *reg_ptr;
// TODO: check for existence, hs or other syncing mechanics might have sent it already (or like, it arrived 2x or whatever)
auto new_msg_e = reg.create();
{ // contact
// from
reg.emplace<Message::Components::ContactFrom>(new_msg_e, c);
// to
reg.emplace<Message::Components::ContactTo>(new_msg_e, c.get<Contact::Components::Parent>().parent);
}
reg.emplace<Message::Components::ToxGroupMessageID>(new_msg_e, e.message_id);
reg.emplace<Message::Components::Transfer::TagReceiving>(new_msg_e); // add sending?
reg.emplace<Message::Components::TimestampProcessed>(new_msg_e, ts);
//reg.emplace<Components::TimestampWritten>(new_msg_e, 0);
reg.emplace<Message::Components::Timestamp>(new_msg_e, ts); // reactive?
{ // by whom
auto& synced_by = reg.get_or_emplace<Message::Components::SyncedBy>(new_msg_e).list;
synced_by.emplace(self_c);
}
// check if content exists
const auto sha1_info_hash = std::vector<uint8_t>{e.file_id, e.file_id+e.file_id_size};
ContentHandle ce;
if (_info_to_content.count(sha1_info_hash)) {
ce = _info_to_content.at(sha1_info_hash);
std::cout << "SHA1_NGCFT1: new message has existing content\n";
} else {
ce = {_contentr, _contentr.create()};
_info_to_content[sha1_info_hash] = ce;
std::cout << "SHA1_NGCFT1: new message has new content\n";
//ce.emplace<Components::FT1InfoSHA1>(sha1_info);
//ce.emplace<Components::FT1InfoSHA1Data>(sha1_info_data); // keep around? or file?
ce.emplace<Components::FT1InfoSHA1Hash>(sha1_info_hash);
//{ // lookup tables and have
//auto& cc = ce.emplace<Components::FT1ChunkSHA1Cache>();
//cc.have_all = true;
//// skip have vec, since all
////cc.have_chunk
//cc.have_count = sha1_info.chunks.size(); // need?
//_info_to_content[sha1_info_hash] = ce;
//for (size_t i = 0; i < sha1_info.chunks.size(); i++) {
//_chunks[sha1_info.chunks[i]] = ce;
//cc.chunk_hash_to_index[sha1_info.chunks[i]] = i;
//}
//}
// TODO: ft1 specific comp
//ce.emplace<Message::Components::Transfer::File>(std::move(file_impl));
}
ce.get_or_emplace<Components::Messages>().messages.push_back({reg, new_msg_e});
ce.get_or_emplace<Components::SuspectedParticipants>().participants.emplace(c);
if (!ce.all_of<Components::ReRequestInfoTimer>() && !ce.all_of<Components::FT1InfoSHA1>()) {
// TODO: check if already receiving
_queue_content_want_info.push_back(ce);
}
// TODO: queue info dl
//reg_ptr->emplace<Components::FT1InfoSHA1>(e, sha1_info);
//reg_ptr->emplace<Components::FT1InfoSHA1Data>(e, sha1_info_data); // keep around? or file?
//reg.emplace<Components::FT1InfoSHA1Hash>(new_msg_e, std::vector<uint8_t>{e.file_id, e.file_id+e.file_id_size});
if (auto* cc = ce.try_get<Components::FT1ChunkSHA1Cache>(); cc != nullptr && cc->have_all) {
reg_ptr->emplace<Message::Components::Transfer::TagHaveAll>(new_msg_e);
}
if (ce.all_of<Message::Components::Transfer::FileInfo>()) {
reg_ptr->emplace<Message::Components::Transfer::FileInfo>(new_msg_e, ce.get<Message::Components::Transfer::FileInfo>());
}
if (ce.all_of<Message::Components::Transfer::FileInfoLocal>()) {
reg_ptr->emplace<Message::Components::Transfer::FileInfoLocal>(new_msg_e, ce.get<Message::Components::Transfer::FileInfoLocal>());
}
if (ce.all_of<Message::Components::Transfer::BytesSent>()) {
reg_ptr->emplace<Message::Components::Transfer::BytesSent>(new_msg_e, ce.get<Message::Components::Transfer::BytesSent>());
}
// TODO: queue info/check if we already have info
_rmm.throwEventConstruct(reg, new_msg_e);
return true; // false?
}
bool SHA1_NGCFT1::sendFilePath(const Contact3 c, std::string_view file_name, std::string_view file_path) {
if (
// TODO: add support of offline queuing
!_cr.all_of<Contact::Components::ToxGroupEphemeral>(c)
) {
return false;
}
std::cout << "SHA1_NGCFT1: got sendFilePath()\n";
auto* reg_ptr = _rmm.get(c);
if (reg_ptr == nullptr) {
return false;
}
// TODO: rw?
// TODO: memory mapped would be king
auto file_impl = std::make_unique<FileRFile>(file_path);
if (!file_impl->isGood()) {
std::cerr << "SHA1_NGCFT1 error: failed opening file '" << file_path << "'!\n";
return true;
}
// get current time unix epoch utc
uint64_t ts = std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::system_clock::now().time_since_epoch()).count();
// 1. build info by hashing all chunks
FT1InfoSHA1 sha1_info;
// build info
sha1_info.file_name = file_name;
sha1_info.file_size = file_impl->_file_size;
{ // build chunks
// HACK: load file fully
// TODO: the speed is truly horrid
const auto file_data = file_impl->read(0, file_impl->_file_size);
size_t i = 0;
for (; i + sha1_info.chunk_size < file_data.size(); i += sha1_info.chunk_size) {
sha1_info.chunks.push_back(hash_sha1(file_data.data()+i, sha1_info.chunk_size));
}
if (i < file_data.size()) {
sha1_info.chunks.push_back(hash_sha1(file_data.data()+i, file_data.size()-i));
}
}
// 2. hash info
std::vector<uint8_t> sha1_info_data;
std::vector<uint8_t> sha1_info_hash;
std::cout << "SHA1_NGCFT1 info is: \n" << sha1_info;
sha1_info_data = sha1_info.toBuffer();
std::cout << "SHA1_NGCFT1 sha1_info size: " << sha1_info_data.size() << "\n";
sha1_info_hash = hash_sha1(sha1_info_data.data(), sha1_info_data.size());
std::cout << "SHA1_NGCFT1 sha1_info_hash: " << bin2hex(sha1_info_hash) << "\n";
// check if content exists
ContentHandle ce;
if (_info_to_content.count(sha1_info_hash)) {
ce = _info_to_content.at(sha1_info_hash);
// TODO: check if content is incomplete and use file instead
if (!ce.all_of<Components::FT1InfoSHA1>()) {
ce.emplace<Components::FT1InfoSHA1>(sha1_info);
}
if (!ce.all_of<Components::FT1InfoSHA1Data>()) {
ce.emplace<Components::FT1InfoSHA1Data>(sha1_info_data);
}
// hash has to be set already
// Components::FT1InfoSHA1Hash
{ // lookup tables and have
auto& cc = ce.get_or_emplace<Components::FT1ChunkSHA1Cache>();
cc.have_all = true;
// skip have vec, since all
//cc.have_chunk
cc.have_count = sha1_info.chunks.size(); // need?
_info_to_content[sha1_info_hash] = ce;
for (size_t i = sha1_info.chunks.size(); i > 0; i--) {
_chunks[sha1_info.chunks[i-1]] = ce;
// chunks can have more then 1 index ..., for now, build reverse and have the first index be the real index
cc.chunk_hash_to_index[sha1_info.chunks[i-1]] = i-1;
}
}
{ // file info
// TODO: not overwrite fi? since same?
auto& file_info = ce.emplace_or_replace<Message::Components::Transfer::FileInfo>();
file_info.file_list.emplace_back() = {std::string{file_name}, file_impl->_file_size};
file_info.total_size = file_impl->_file_size;
ce.emplace_or_replace<Message::Components::Transfer::FileInfoLocal>(std::vector{std::string{file_path}});
}
// cleanup file
if (ce.all_of<Message::Components::Transfer::File>()) {
// replace
ce.remove<Message::Components::Transfer::File>();
}
ce.emplace<Message::Components::Transfer::File>(std::move(file_impl));
if (!ce.all_of<Message::Components::Transfer::BytesSent>()) {
ce.emplace<Message::Components::Transfer::BytesSent>(0u);
}
// TODO: make sure to abort every receiving transfer (sending info and chunk should be fine, info uses copy and chunk handle)
} else {
ce = {_contentr, _contentr.create()};
_info_to_content[sha1_info_hash] = ce;
ce.emplace<Components::FT1InfoSHA1>(sha1_info);
ce.emplace<Components::FT1InfoSHA1Data>(sha1_info_data); // keep around? or file?
ce.emplace<Components::FT1InfoSHA1Hash>(sha1_info_hash);
{ // lookup tables and have
auto& cc = ce.emplace<Components::FT1ChunkSHA1Cache>();
cc.have_all = true;
// skip have vec, since all
//cc.have_chunk
cc.have_count = sha1_info.chunks.size(); // need?
_info_to_content[sha1_info_hash] = ce;
for (size_t i = 0; i < sha1_info.chunks.size(); i++) {
_chunks[sha1_info.chunks[i]] = ce;
cc.chunk_hash_to_index[sha1_info.chunks[i]] = i;
}
}
{ // file info
auto& file_info = ce.emplace<Message::Components::Transfer::FileInfo>();
//const auto& file = ce.get<Message::Components::Transfer::File>();
file_info.file_list.emplace_back() = {std::string{file_name}, file_impl->_file_size};
file_info.total_size = file_impl->_file_size;
ce.emplace<Message::Components::Transfer::FileInfoLocal>(std::vector{std::string{file_path}});
}
ce.emplace<Message::Components::Transfer::File>(std::move(file_impl));
ce.emplace<Message::Components::Transfer::BytesSent>(0u);
}
const auto c_self = _cr.get<Contact::Components::Self>(c).self;
if (!_cr.valid(c_self)) {
std::cerr << "SHA1_NGCFT1 error: failed to get self!\n";
return true;
}
const auto msg_e = reg_ptr->create();
reg_ptr->emplace<Message::Components::ContactTo>(msg_e, c);
reg_ptr->emplace<Message::Components::ContactFrom>(msg_e, c_self);
reg_ptr->emplace<Message::Components::Timestamp>(msg_e, ts); // reactive?
reg_ptr->emplace<Message::Components::Transfer::TagHaveAll>(msg_e);
reg_ptr->emplace<Message::Components::Transfer::TagSending>(msg_e);
ce.get_or_emplace<Components::Messages>().messages.push_back({*reg_ptr, msg_e});
//reg_ptr->emplace<Message::Components::Transfer::FileKind>(e, file_kind);
// file id would be sha1_info hash or something
//reg_ptr->emplace<Message::Components::Transfer::FileID>(e, file_id);
if (ce.all_of<Message::Components::Transfer::FileInfo>()) {
reg_ptr->emplace<Message::Components::Transfer::FileInfo>(msg_e, ce.get<Message::Components::Transfer::FileInfo>());
}
if (ce.all_of<Message::Components::Transfer::FileInfoLocal>()) {
reg_ptr->emplace<Message::Components::Transfer::FileInfoLocal>(msg_e, ce.get<Message::Components::Transfer::FileInfoLocal>());
}
if (ce.all_of<Message::Components::Transfer::BytesSent>()) {
reg_ptr->emplace<Message::Components::Transfer::BytesSent>(msg_e, ce.get<Message::Components::Transfer::BytesSent>());
}
// TODO: determine if this is true
//reg_ptr->emplace<Message::Components::Transfer::TagPaused>(e);
#if 0
const auto friend_number = _cr.get<Contact::Components::ToxFriendEphemeral>(c).friend_number;
const auto&& [transfer_id, err] = _t.toxFileSend(friend_number, file_kind, file_impl->_file_size, file_id, file_name);
if (err == TOX_ERR_FILE_SEND_OK) {
reg_ptr->emplace<Message::Components::Transfer::ToxTransferFriend>(e, friend_number, transfer_id.value());
// TODO: add tag signifying init sent status?
toxFriendLookupAdd({*reg_ptr, e});
} // else queue?
#endif
if (_cr.any_of<Contact::Components::ToxGroupEphemeral>(c)) {
const uint32_t group_number = _cr.get<Contact::Components::ToxGroupEphemeral>(c).group_number;
uint32_t message_id = 0;
// TODO: check return
_nft.NGC_FT1_send_message_public(group_number, message_id, static_cast<uint32_t>(NGCFT1_file_kind::HASH_SHA1_INFO), sha1_info_hash.data(), sha1_info_hash.size());
reg_ptr->emplace<Message::Components::ToxGroupMessageID>(msg_e, message_id);
// TODO: generalize?
auto& synced_by = reg_ptr->emplace<Message::Components::SyncedBy>(msg_e).list;
synced_by.emplace(c_self);
} else if (
// non online group
_cr.any_of<Contact::Components::ToxGroupPersistent>(c)
) {
// create msg_id
const uint32_t message_id = randombytes_random();
reg_ptr->emplace<Message::Components::ToxGroupMessageID>(msg_e, message_id);
// TODO: generalize?
auto& synced_by = reg_ptr->emplace<Message::Components::SyncedBy>(msg_e).list;
synced_by.emplace(c_self);
}
_rmm.throwEventConstruct(*reg_ptr, msg_e);
// TODO: place in iterate?
updateMessages(ce);
return true;
}

125
src/sha1_ngcft1.hpp Normal file
View File

@ -0,0 +1,125 @@
#pragma once
// solanaceae port of sha1 fts for NGCFT1
#include <solanaceae/contact/contact_model3.hpp>
#include <solanaceae/message3/registry_message_model.hpp>
#include <solanaceae/tox_contacts/tox_contact_model2.hpp>
#include "./ngcft1.hpp"
#include "./ft1_sha1_info.hpp"
#include <entt/entity/registry.hpp>
#include <entt/entity/handle.hpp>
#include <entt/container/dense_map.hpp>
#include <variant>
#include <random>
enum class Content : uint32_t {};
using ContentRegistry = entt::basic_registry<Content>;
using ContentHandle = entt::basic_handle<ContentRegistry>;
class SHA1_NGCFT1 : public RegistryMessageModelEventI, public NGCFT1EventI {
Contact3Registry& _cr;
RegistryMessageModel& _rmm;
NGCFT1& _nft;
ToxContactModel2& _tcm;
std::minstd_rand _rng {1337*11};
// registry per group?
ContentRegistry _contentr;
// limit this to each group?
entt::dense_map<SHA1Digest, ContentHandle> _info_to_content;
// sha1 chunk index
// TODO: optimize lookup
entt::dense_map<SHA1Digest, ContentHandle> _chunks;
// group_number, peer_number, content, chunk_hash, timer
std::deque<std::tuple<uint32_t, uint32_t, ContentHandle, SHA1Digest, float>> _queue_requested_chunk;
//void queueUpRequestInfo(uint32_t group_number, uint32_t peer_number, const SHA1Digest& hash);
void queueUpRequestChunk(uint32_t group_number, uint32_t peer_number, ContentHandle content, const SHA1Digest& hash);
struct SendingTransfer {
struct Info {
// copy of info data
// too large?
std::vector<uint8_t> info_data;
};
struct Chunk {
ContentHandle content;
uint64_t offset_into_file;
// or data?
// if memmapped, this would be just a pointer
};
std::variant<Info, Chunk> v;
float time_since_activity {0.f};
};
// key is groupid + peerid
entt::dense_map<uint64_t, entt::dense_map<uint8_t, SendingTransfer>> _sending_transfers;
struct ReceivingTransfer {
struct Info {
ContentHandle content;
// copy of info data
// too large?
std::vector<uint8_t> info_data;
};
struct Chunk {
ContentHandle content;
uint64_t offset_into_file;
// or data?
// if memmapped, this would be just a pointer
};
std::variant<Info, Chunk> v;
float time_since_activity {0.f};
};
// key is groupid + peerid
entt::dense_map<uint64_t, entt::dense_map<uint8_t, ReceivingTransfer>> _receiving_transfers;
// makes request rotate around open content
std::deque<ContentHandle> _queue_content_want_info;
std::deque<ContentHandle> _queue_content_want_chunk;
static uint64_t combineIds(const uint32_t group_number, const uint32_t peer_number);
void updateMessages(ContentHandle ce);
public: // TODO: config
bool _udp_only {false};
size_t _max_concurrent_in {8};
size_t _max_concurrent_out {4};
public:
SHA1_NGCFT1(
Contact3Registry& cr,
RegistryMessageModel& rmm,
NGCFT1& nft,
ToxContactModel2& tcm
);
void iterate(float delta);
protected: // events
bool onEvent(const Events::NGCFT1_recv_request&) override;
bool onEvent(const Events::NGCFT1_recv_init&) override;
bool onEvent(const Events::NGCFT1_recv_data&) override;
bool onEvent(const Events::NGCFT1_send_data&) override; // const?
bool onEvent(const Events::NGCFT1_recv_done&) override;
bool onEvent(const Events::NGCFT1_send_done&) override;
bool onEvent(const Events::NGCFT1_recv_message&) override;
bool sendFilePath(const Contact3 c, std::string_view file_name, std::string_view file_path) override;
};