2023-08-08 20:13:10 +02:00
|
|
|
#include "./sha1_ngcft1.hpp"
|
|
|
|
|
|
|
|
#include <solanaceae/toxcore/utils.hpp>
|
|
|
|
|
2023-08-08 23:55:12 +02:00
|
|
|
#include <solanaceae/contact/components.hpp>
|
|
|
|
#include <solanaceae/tox_contacts/components.hpp>
|
|
|
|
#include <solanaceae/message3/components.hpp>
|
|
|
|
|
|
|
|
#include <solanaceae/message3/file_r_file.hpp>
|
|
|
|
|
|
|
|
#include "./ft1_sha1_info.hpp"
|
|
|
|
#include "./hash_utils.hpp"
|
|
|
|
|
2023-08-08 20:13:10 +02:00
|
|
|
#include <iostream>
|
2023-08-09 00:25:08 +02:00
|
|
|
#include <variant>
|
2023-08-08 20:13:10 +02:00
|
|
|
|
2023-08-08 23:55:12 +02:00
|
|
|
namespace Components {
|
|
|
|
|
|
|
|
using FT1InfoSHA1 = FT1InfoSHA1;
|
|
|
|
|
|
|
|
struct FT1InfoSHA1Data {
|
|
|
|
std::vector<uint8_t> data;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct FT1InfoSHA1Hash {
|
|
|
|
std::vector<uint8_t> hash;
|
|
|
|
};
|
|
|
|
|
2023-08-09 23:02:29 +02:00
|
|
|
struct FT1ChunkSHA1Cache {
|
|
|
|
std::vector<bool> have_chunk;
|
|
|
|
bool have_all {false};
|
|
|
|
size_t have_count {0};
|
|
|
|
entt::dense_map<SHA1Digest, size_t> chunk_hash_to_index;
|
|
|
|
|
|
|
|
std::optional<size_t> chunkIndex(const SHA1Digest& hash) const;
|
|
|
|
size_t chunkSize(size_t chunk_index) const;
|
|
|
|
bool haveChunk(const SHA1Digest& hash) const;
|
|
|
|
};
|
|
|
|
|
2023-08-08 23:55:12 +02:00
|
|
|
} // Components
|
|
|
|
|
2023-08-09 23:02:29 +02:00
|
|
|
std::optional<size_t> Components::FT1ChunkSHA1Cache::chunkIndex(const SHA1Digest& hash) const {
|
|
|
|
const auto it = chunk_hash_to_index.find(hash);
|
|
|
|
if (it != chunk_hash_to_index.cend()) {
|
|
|
|
return it->second;
|
|
|
|
} else {
|
|
|
|
return std::nullopt;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
bool Components::FT1ChunkSHA1Cache::haveChunk(const SHA1Digest& hash) const {
|
|
|
|
if (have_all) { // short cut
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (auto i_opt = chunkIndex(hash); i_opt.has_value()) {
|
|
|
|
return have_chunk[i_opt.value()];
|
|
|
|
}
|
|
|
|
|
|
|
|
// not part of this file
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
static size_t chunkSize(const FT1InfoSHA1& sha1_info, size_t chunk_index) {
|
|
|
|
if (chunk_index+1 == sha1_info.chunks.size()) {
|
|
|
|
// last chunk
|
|
|
|
return sha1_info.file_size - chunk_index * sha1_info.chunk_size;
|
|
|
|
} else {
|
|
|
|
return sha1_info.chunk_size;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void SHA1_NGCFT1::queueUpRequestChunk(uint32_t group_number, uint32_t peer_number, Message3Handle msg, const SHA1Digest& hash) {
|
|
|
|
// TODO: transfers
|
|
|
|
for (auto& [i_g, i_p, i_m, i_h, i_t] : _queue_requested_chunk) {
|
|
|
|
// if already in queue
|
|
|
|
if (i_g == group_number && i_p == peer_number && i_h == hash) {
|
|
|
|
// update timer
|
|
|
|
i_t = 0.f;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// not in queue yet
|
|
|
|
_queue_requested_chunk.push_back(std::make_tuple(group_number, peer_number, msg, hash, 0.f));
|
|
|
|
}
|
|
|
|
|
2023-08-09 00:25:08 +02:00
|
|
|
uint64_t SHA1_NGCFT1::combineIds(const uint32_t group_number, const uint32_t peer_number) {
|
|
|
|
return (uint64_t(group_number) << 32) | peer_number;
|
|
|
|
}
|
|
|
|
|
2023-08-08 20:13:10 +02:00
|
|
|
SHA1_NGCFT1::SHA1_NGCFT1(
|
2023-08-08 23:55:12 +02:00
|
|
|
Contact3Registry& cr,
|
2023-08-08 20:13:10 +02:00
|
|
|
RegistryMessageModel& rmm,
|
2023-08-08 23:55:12 +02:00
|
|
|
NGCFT1& nft,
|
2023-08-08 20:13:10 +02:00
|
|
|
ToxContactModel2& tcm
|
|
|
|
) :
|
2023-08-08 23:55:12 +02:00
|
|
|
_cr(cr),
|
2023-08-08 20:13:10 +02:00
|
|
|
_rmm(rmm),
|
2023-08-08 23:55:12 +02:00
|
|
|
_nft(nft),
|
2023-08-08 20:13:10 +02:00
|
|
|
_tcm(tcm)
|
|
|
|
{
|
2023-08-08 23:55:12 +02:00
|
|
|
_nft.subscribe(this, NGCFT1_Event::recv_request);
|
|
|
|
_nft.subscribe(this, NGCFT1_Event::recv_init);
|
|
|
|
_nft.subscribe(this, NGCFT1_Event::recv_data);
|
|
|
|
_nft.subscribe(this, NGCFT1_Event::send_data);
|
2023-08-09 23:49:28 +02:00
|
|
|
_nft.subscribe(this, NGCFT1_Event::recv_done);
|
|
|
|
_nft.subscribe(this, NGCFT1_Event::send_done);
|
2023-08-08 23:55:12 +02:00
|
|
|
|
|
|
|
//_rmm.subscribe(this, RegistryMessageModel_Event::message_construct);
|
|
|
|
//_rmm.subscribe(this, RegistryMessageModel_Event::message_updated);
|
|
|
|
//_rmm.subscribe(this, RegistryMessageModel_Event::message_destroy);
|
|
|
|
|
|
|
|
_rmm.subscribe(this, RegistryMessageModel_Event::send_file_path);
|
2023-08-08 20:13:10 +02:00
|
|
|
}
|
|
|
|
|
2023-08-09 23:02:29 +02:00
|
|
|
void SHA1_NGCFT1::iterate(float delta) {
|
|
|
|
{ // timers
|
|
|
|
// chunk sending
|
|
|
|
for (auto peer_it = _sending_transfers.begin(); peer_it != _sending_transfers.end();) {
|
|
|
|
for (auto it = peer_it->second.begin(); it != peer_it->second.end();) {
|
|
|
|
it->second.time_since_activity += delta;
|
|
|
|
|
|
|
|
// if we have not heard for 10sec, timeout
|
|
|
|
if (it->second.time_since_activity >= 10.f) {
|
|
|
|
//std::cerr << "SHA1_NGCFT1 warning: sending chunk tansfer timed out " << std::get<0>(*it) << ":" << std::get<1>(*it) << "." << int(std::get<2>(*it)) << "\n";
|
|
|
|
std::cerr << "SHA1_NGCFT1 warning: sending chunk tansfer timed out " << "." << int(it->first) << "\n";
|
|
|
|
it = peer_it->second.erase(it);
|
|
|
|
} else {
|
|
|
|
it++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (peer_it->second.empty()) {
|
|
|
|
// cleanup unused peers too agressive?
|
|
|
|
peer_it = _sending_transfers.erase(peer_it);
|
|
|
|
} else {
|
|
|
|
peer_it++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
//for (auto it = _transfers_sending_chunk.begin(); it != _transfers_sending_chunk.end();) {
|
|
|
|
//float& time_since_remove_activity = std::get<float>(*it);
|
|
|
|
//time_since_remove_activity += delta;
|
|
|
|
|
|
|
|
//// if we have not heard for 10sec, timeout
|
|
|
|
//if (time_since_remove_activity >= 10.f) {
|
|
|
|
//std::cerr << "SHA1 sending chunk tansfer timed out " << std::get<0>(*it) << ":" << std::get<1>(*it) << "." << int(std::get<2>(*it)) << "\n";
|
|
|
|
//it = _transfers_sending_chunk.erase(it);
|
|
|
|
//} else {
|
|
|
|
//it++;
|
|
|
|
//}
|
|
|
|
//}
|
|
|
|
|
|
|
|
// queued requests
|
|
|
|
for (auto it = _queue_requested_chunk.begin(); it != _queue_requested_chunk.end();) {
|
|
|
|
float& timer = std::get<float>(*it);
|
|
|
|
timer += delta;
|
|
|
|
|
|
|
|
if (timer >= 10.f) {
|
|
|
|
it = _queue_requested_chunk.erase(it);
|
|
|
|
} else {
|
|
|
|
it++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// if we have not reached the total cap for transfers
|
|
|
|
// count running transfers
|
|
|
|
size_t running_transfer_count {0};
|
|
|
|
for (const auto& [_, transfers] : _sending_transfers) {
|
|
|
|
running_transfer_count += transfers.size();
|
|
|
|
}
|
|
|
|
if (running_transfer_count < _max_concurrent_out) {
|
|
|
|
// for each peer? transfer cap per peer?
|
|
|
|
#if 0
|
|
|
|
// first check requests for info
|
|
|
|
if (!_queue_requested_info.empty()) {
|
|
|
|
// send init to _queue_requested_info
|
|
|
|
const auto [group_number, peer_number] = _queue_requested_info.front();
|
|
|
|
|
|
|
|
if (_tcl.getGroupPeerConnectionStatus(group_number, peer_number) != TOX_CONNECTION_NONE) {
|
|
|
|
uint8_t transfer_id {0};
|
|
|
|
|
|
|
|
if (_tcl.sendFT1InitPrivate(
|
|
|
|
group_number, peer_number,
|
|
|
|
NGC_FT1_file_kind::HASH_SHA1_INFO,
|
|
|
|
_sha1_info_hash.data.data(), _sha1_info_hash.size(), // id (info hash)
|
|
|
|
_sha1_info_data.size(), // "file_size"
|
|
|
|
transfer_id
|
|
|
|
)) {
|
|
|
|
_transfers_requested_info.push_back({
|
|
|
|
group_number, peer_number,
|
|
|
|
transfer_id,
|
|
|
|
0.f
|
|
|
|
});
|
|
|
|
|
|
|
|
_queue_requested_info.pop_front();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else
|
|
|
|
#endif
|
|
|
|
if (!_queue_requested_chunk.empty()) { // then check for chunk requests
|
|
|
|
const auto [group_number, peer_number, msg, chunk_hash, _] = _queue_requested_chunk.front();
|
|
|
|
|
|
|
|
auto chunk_idx_opt = msg.get<Components::FT1ChunkSHA1Cache>().chunkIndex(chunk_hash);
|
|
|
|
if (chunk_idx_opt.has_value()) {
|
|
|
|
const auto& info = msg.get<Components::FT1InfoSHA1>();
|
|
|
|
|
|
|
|
uint8_t transfer_id {0};
|
|
|
|
if (_nft.NGC_FT1_send_init_private(
|
|
|
|
group_number, peer_number,
|
|
|
|
static_cast<uint32_t>(NGCFT1_file_kind::HASH_SHA1_CHUNK),
|
|
|
|
chunk_hash.data.data(), chunk_hash.size(),
|
|
|
|
chunkSize(info, chunk_idx_opt.value()),
|
|
|
|
&transfer_id
|
|
|
|
)) {
|
|
|
|
_sending_transfers
|
|
|
|
[combineIds(group_number, peer_number)]
|
|
|
|
[transfer_id] // TODO: also save index?
|
|
|
|
.v = SendingTransfer::Chunk{msg, chunk_idx_opt.value() * info.chunk_size};
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// remove from queue regardless
|
|
|
|
_queue_requested_chunk.pop_front();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-08-08 20:13:10 +02:00
|
|
|
bool SHA1_NGCFT1::onEvent(const Events::NGCFT1_recv_request& e) {
|
|
|
|
// only interested in sha1
|
|
|
|
if (e.file_kind != NGCFT1_file_kind::HASH_SHA1_INFO && e.file_kind != NGCFT1_file_kind::HASH_SHA1_CHUNK) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
//std::cout << "SHA1_NGCFT1: FT1_REQUEST fk:" << int(e.file_kind) << " [" << bin2hex({e.file_id, e.file_id+e.file_id_size}) << "]\n";
|
|
|
|
|
2023-08-08 23:55:12 +02:00
|
|
|
if (e.file_kind == NGCFT1_file_kind::HASH_SHA1_INFO) {
|
|
|
|
if (e.file_id_size != 20) {
|
|
|
|
// error
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
SHA1Digest info_hash{e.file_id, e.file_id_size};
|
|
|
|
if (!_info_to_message.count(info_hash)) {
|
|
|
|
// we dont know about this
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
auto msg = _info_to_message.at(info_hash);
|
|
|
|
|
|
|
|
assert(msg.all_of<Components::FT1InfoSHA1Data>());
|
|
|
|
|
2023-08-09 23:02:29 +02:00
|
|
|
// TODO: queue instead
|
|
|
|
//queueUpRequestInfo(e.group_number, e.peer_number, info_hash);
|
2023-08-09 00:25:08 +02:00
|
|
|
uint8_t transfer_id {0};
|
2023-08-08 23:55:12 +02:00
|
|
|
_nft.NGC_FT1_send_init_private(
|
|
|
|
e.group_number, e.peer_number,
|
|
|
|
static_cast<uint32_t>(e.file_kind),
|
|
|
|
e.file_id, e.file_id_size,
|
|
|
|
msg.get<Components::FT1InfoSHA1Data>().data.size(),
|
2023-08-09 00:25:08 +02:00
|
|
|
&transfer_id
|
2023-08-08 23:55:12 +02:00
|
|
|
);
|
2023-08-09 00:25:08 +02:00
|
|
|
|
|
|
|
_sending_transfers
|
|
|
|
[combineIds(e.group_number, e.peer_number)]
|
|
|
|
[transfer_id]
|
|
|
|
.v = SendingTransfer::Info{msg.get<Components::FT1InfoSHA1Data>().data};
|
2023-08-09 23:02:29 +02:00
|
|
|
} else if (e.file_kind == NGCFT1_file_kind::HASH_SHA1_CHUNK) {
|
|
|
|
if (e.file_id_size != 20) {
|
|
|
|
// error
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
SHA1Digest chunk_hash{e.file_id, e.file_id_size};
|
|
|
|
|
|
|
|
if (!_chunks.count(chunk_hash)) {
|
|
|
|
// we dont know about this
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
auto msg = _chunks.at(chunk_hash);
|
|
|
|
|
|
|
|
assert(msg.all_of<Components::FT1ChunkSHA1Cache>());
|
|
|
|
|
|
|
|
if (!msg.get<Components::FT1ChunkSHA1Cache>().haveChunk(chunk_hash)) {
|
|
|
|
// we dont have the chunk
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// queue good request
|
|
|
|
queueUpRequestChunk(e.group_number, e.peer_number, msg, chunk_hash);
|
|
|
|
} else {
|
|
|
|
assert(false && "unhandled case");
|
2023-08-08 23:55:12 +02:00
|
|
|
}
|
|
|
|
|
2023-08-09 23:02:29 +02:00
|
|
|
return true;
|
2023-08-08 20:13:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
bool SHA1_NGCFT1::onEvent(const Events::NGCFT1_recv_init& e) {
|
|
|
|
// only interested in sha1
|
|
|
|
if (e.file_kind != NGCFT1_file_kind::HASH_SHA1_INFO && e.file_kind != NGCFT1_file_kind::HASH_SHA1_CHUNK) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool SHA1_NGCFT1::onEvent(const Events::NGCFT1_recv_data& e) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool SHA1_NGCFT1::onEvent(const Events::NGCFT1_send_data& e) {
|
2023-08-09 00:25:08 +02:00
|
|
|
if (!_sending_transfers.count(combineIds(e.group_number, e.peer_number))) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
auto& peer = _sending_transfers.at(combineIds(e.group_number, e.peer_number));
|
|
|
|
|
|
|
|
if (!peer.count(e.transfer_id)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
auto& transfer = peer.at(e.transfer_id);
|
|
|
|
if (std::holds_alternative<SendingTransfer::Info>(transfer.v)) {
|
|
|
|
auto& info_transfer = std::get<SendingTransfer::Info>(transfer.v);
|
|
|
|
for (size_t i = 0; i < e.data_size && (i + e.data_offset) < info_transfer.info_data.size(); i++) {
|
|
|
|
e.data[i] = info_transfer.info_data[i + e.data_offset];
|
|
|
|
}
|
|
|
|
|
|
|
|
if (e.data_offset + e.data_size >= info_transfer.info_data.size()) {
|
|
|
|
// was last read (probably TODO: add transfer destruction event)
|
|
|
|
peer.erase(e.transfer_id);
|
|
|
|
}
|
2023-08-09 23:02:29 +02:00
|
|
|
} else if (std::holds_alternative<SendingTransfer::Chunk>(transfer.v)) {
|
|
|
|
auto& chunk_transfer = std::get<SendingTransfer::Chunk>(transfer.v);
|
|
|
|
const auto data = chunk_transfer.msg.get<Message::Components::Transfer::File>()->read(chunk_transfer.offset_into_file + e.data_offset, e.data_size);
|
|
|
|
|
|
|
|
// TODO: optimize
|
|
|
|
for (size_t i = 0; i < e.data_size && i < data.size(); i++) {
|
|
|
|
e.data[i] = data[i];
|
|
|
|
}
|
|
|
|
|
|
|
|
chunk_transfer.msg.get_or_emplace<Message::Components::Transfer::BytesSent>().total += data.size();
|
|
|
|
//_rmm.throwEventUpdate(transfer); // should we?
|
|
|
|
|
|
|
|
//if (e.data_offset + e.data_size >= *insert chunk size here*) {
|
|
|
|
//// was last read (probably TODO: add transfer destruction event)
|
|
|
|
//peer.erase(e.transfer_id);
|
|
|
|
//}
|
|
|
|
} else {
|
|
|
|
assert(false && "not implemented?");
|
2023-08-09 00:25:08 +02:00
|
|
|
}
|
|
|
|
|
2023-08-09 23:02:29 +02:00
|
|
|
transfer.time_since_activity = 0.f;
|
|
|
|
|
2023-08-09 00:25:08 +02:00
|
|
|
return true;
|
2023-08-08 20:13:10 +02:00
|
|
|
}
|
|
|
|
|
2023-08-09 23:49:28 +02:00
|
|
|
bool SHA1_NGCFT1::onEvent(const Events::NGCFT1_recv_done& e) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool SHA1_NGCFT1::onEvent(const Events::NGCFT1_send_done& e) {
|
|
|
|
if (!_sending_transfers.count(combineIds(e.group_number, e.peer_number))) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
auto& peer_transfers = _sending_transfers.at(combineIds(e.group_number, e.peer_number));
|
|
|
|
if (!peer_transfers.count(e.transfer_id)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
peer_transfers.erase(e.transfer_id);
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2023-08-08 23:55:12 +02:00
|
|
|
bool SHA1_NGCFT1::sendFilePath(const Contact3 c, std::string_view file_name, std::string_view file_path) {
|
|
|
|
if (
|
|
|
|
// TODO: add support of offline queuing
|
|
|
|
!_cr.all_of<Contact::Components::ToxGroupEphemeral>(c)
|
|
|
|
) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
std::cout << "SHA1_NGCFT1: got sendFilePath()\n";
|
|
|
|
|
|
|
|
auto* reg_ptr = _rmm.get(c);
|
|
|
|
if (reg_ptr == nullptr) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// TODO: rw
|
|
|
|
auto file_impl = std::make_unique<FileRFile>(file_path);
|
|
|
|
if (!file_impl->isGood()) {
|
|
|
|
std::cerr << "SHA1_NGCFT1 error: failed opening file '" << file_path << "'!\n";
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
// get current time unix epoch utc
|
|
|
|
uint64_t ts = std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::system_clock::now().time_since_epoch()).count();
|
|
|
|
|
|
|
|
// 1. build info by hashing all chunks
|
|
|
|
|
|
|
|
FT1InfoSHA1 sha1_info;
|
|
|
|
// build info
|
|
|
|
sha1_info.file_name = file_name;
|
|
|
|
sha1_info.file_size = file_impl->_file_size;
|
|
|
|
|
|
|
|
{ // build chunks
|
|
|
|
// HACK: load file fully
|
|
|
|
// TODO: the speed is truly horrid
|
|
|
|
const auto file_data = file_impl->read(0, file_impl->_file_size);
|
|
|
|
size_t i = 0;
|
|
|
|
for (; i + sha1_info.chunk_size < file_data.size(); i += sha1_info.chunk_size) {
|
|
|
|
sha1_info.chunks.push_back(hash_sha1(file_data.data()+i, sha1_info.chunk_size));
|
|
|
|
}
|
|
|
|
|
|
|
|
if (i < file_data.size()) {
|
|
|
|
sha1_info.chunks.push_back(hash_sha1(file_data.data()+i, file_data.size()-i));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// 2. hash info
|
|
|
|
std::vector<uint8_t> sha1_info_data;
|
|
|
|
std::vector<uint8_t> sha1_info_hash;
|
|
|
|
|
|
|
|
std::cout << "SHA1_NGCFT1 info is: \n" << sha1_info;
|
|
|
|
sha1_info_data = sha1_info.toBuffer();
|
|
|
|
std::cout << "SHA1_NGCFT1 sha1_info size: " << sha1_info_data.size() << "\n";
|
|
|
|
sha1_info_hash = hash_sha1(sha1_info_data.data(), sha1_info_data.size());
|
|
|
|
std::cout << "SHA1_NGCFT1 sha1_info_hash: " << bin2hex(sha1_info_hash) << "\n";
|
|
|
|
|
|
|
|
const auto c_self = _cr.get<Contact::Components::Self>(c).self;
|
|
|
|
if (!_cr.valid(c_self)) {
|
|
|
|
std::cerr << "SHA1_NGCFT1 error: failed to get self!\n";
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
const auto e = reg_ptr->create();
|
|
|
|
reg_ptr->emplace<Message::Components::ContactTo>(e, c);
|
|
|
|
reg_ptr->emplace<Message::Components::ContactFrom>(e, c_self);
|
|
|
|
reg_ptr->emplace<Message::Components::Timestamp>(e, ts); // reactive?
|
|
|
|
|
|
|
|
reg_ptr->emplace<Message::Components::Transfer::TagHaveAll>(e);
|
|
|
|
reg_ptr->emplace<Message::Components::Transfer::TagSending>(e);
|
|
|
|
|
|
|
|
reg_ptr->emplace<Components::FT1InfoSHA1>(e, sha1_info);
|
|
|
|
reg_ptr->emplace<Components::FT1InfoSHA1Data>(e, sha1_info_data); // keep around? or file?
|
|
|
|
reg_ptr->emplace<Components::FT1InfoSHA1Hash>(e, sha1_info_hash);
|
2023-08-09 23:02:29 +02:00
|
|
|
{ // lookup tables and have
|
|
|
|
auto& cc = reg_ptr->emplace<Components::FT1ChunkSHA1Cache>(e);
|
|
|
|
cc.have_all = true;
|
|
|
|
// skip have vec, since all
|
|
|
|
//cc.have_chunk
|
|
|
|
cc.have_count = sha1_info.chunks.size(); // need?
|
|
|
|
|
|
|
|
_info_to_message[sha1_info_hash] = {*reg_ptr, e};
|
|
|
|
for (size_t i = 0; i < sha1_info.chunks.size(); i++) {
|
|
|
|
_chunks[sha1_info.chunks[i]] = {*reg_ptr, e};
|
|
|
|
cc.chunk_hash_to_index[sha1_info.chunks[i]] = i;
|
|
|
|
}
|
|
|
|
}
|
2023-08-08 23:55:12 +02:00
|
|
|
|
|
|
|
//reg_ptr->emplace<Message::Components::Transfer::FileKind>(e, file_kind);
|
|
|
|
// file id would be sha1_info hash or something
|
|
|
|
//reg_ptr->emplace<Message::Components::Transfer::FileID>(e, file_id);
|
|
|
|
|
|
|
|
{ // file info
|
|
|
|
auto& file_info = reg_ptr->emplace<Message::Components::Transfer::FileInfo>(e);
|
|
|
|
file_info.file_list.emplace_back() = {std::string{file_name}, file_impl->_file_size};
|
|
|
|
file_info.total_size = file_impl->_file_size;
|
|
|
|
|
|
|
|
reg_ptr->emplace<Message::Components::Transfer::FileInfoLocal>(e, std::vector{std::string{file_path}});
|
|
|
|
}
|
|
|
|
|
|
|
|
reg_ptr->emplace<Message::Components::Transfer::BytesSent>(e);
|
|
|
|
|
|
|
|
// TODO: determine if this is true
|
|
|
|
//reg_ptr->emplace<Message::Components::Transfer::TagPaused>(e);
|
|
|
|
|
|
|
|
// TODO: ft1 specific comp
|
|
|
|
reg_ptr->emplace<Message::Components::Transfer::File>(e, std::move(file_impl));
|
|
|
|
#if 0
|
|
|
|
const auto friend_number = _cr.get<Contact::Components::ToxFriendEphemeral>(c).friend_number;
|
|
|
|
const auto&& [transfer_id, err] = _t.toxFileSend(friend_number, file_kind, file_impl->_file_size, file_id, file_name);
|
|
|
|
if (err == TOX_ERR_FILE_SEND_OK) {
|
|
|
|
reg_ptr->emplace<Message::Components::Transfer::ToxTransferFriend>(e, friend_number, transfer_id.value());
|
|
|
|
// TODO: add tag signifying init sent status?
|
|
|
|
|
|
|
|
toxFriendLookupAdd({*reg_ptr, e});
|
|
|
|
} // else queue?
|
|
|
|
#endif
|
|
|
|
|
|
|
|
_rmm.throwEventConstruct(*reg_ptr, e);
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|