ft filekind refactor + connection state drafting

This commit is contained in:
Green Sky 2023-01-19 14:17:44 +01:00
parent 9f8f681e18
commit 294a427eb0
No known key found for this signature in database
7 changed files with 59 additions and 28 deletions

@ -1 +1 @@
Subproject commit bbc96bd776a29a6581545d60b5cb6e60e7453f1f Subproject commit a7b5c3136958f12fbef747c5d2b9a45ae3fa0b4b

@ -1 +1 @@
Subproject commit 88ffd1a6495aa59dcd7144b552a91df0bdb5b6d9 Subproject commit 63f993a3314c143b02996adc0ff61d0f3bda8743

View File

@ -49,6 +49,17 @@ struct FTInfoSHA1 {
std::vector<uint8_t> toBuffer(void) const; std::vector<uint8_t> toBuffer(void) const;
void fromBuffer(const std::vector<uint8_t>& buffer); void fromBuffer(const std::vector<uint8_t>& buffer);
}; };
std::ostream& operator<<(std::ostream& out, const FTInfoSHA1& v);
// TODO: use
struct FTInfoSHA1v2 {
std::vector<std::string> file_names;
uint64_t file_size {0};
uint32_t chunk_size {128*1024}; // 128KiB for now
std::vector<SHA1Digest> chunks;
std::vector<uint8_t> toBuffer(void) const;
void fromBuffer(const std::vector<uint8_t>& buffer);
};
std::ostream& operator<<(std::ostream& out, const FTInfoSHA1& v); std::ostream& operator<<(std::ostream& out, const FTInfoSHA1& v);

View File

@ -50,13 +50,13 @@ bool ReceiveStartSHA1::iterate(float delta) {
// TODO: select random and try, not blas // TODO: select random and try, not blas
// ... and we are blasing // ... and we are blasing
_tcl.forEachGroup([this](const uint32_t group_number) { _tcl.forEachGroup([this](const uint32_t group_number) {
_tcl.forEachGroupPeer(group_number, [this, group_number](uint32_t peer_number) { _tcl.forEachGroupPeer(group_number, [this, group_number](uint32_t peer_number, Tox_Connection connection_status) {
_tcl.sendFT1RequestPrivate( _tcl.sendFT1RequestPrivate(
group_number, peer_number, group_number, peer_number,
NGC_FT1_file_kind::HASH_SHA1_INFO, NGC_FT1_file_kind::HASH_SHA1_INFO,
_sha1_info_hash.data.data(), _sha1_info_hash.size() _sha1_info_hash.data.data(), _sha1_info_hash.size()
); );
std::cout << "ReceiveStartSHA1 sendig info request to " << group_number << ":" << peer_number << "\n"; std::cout << "ReceiveStartSHA1 sendig info request to " << group_number << ":" << peer_number << " over " << (connection_status == Tox_Connection::TOX_CONNECTION_TCP ? "tcp" : "udp") <<"\n";
}); });
}); });
} }
@ -108,7 +108,7 @@ std::unique_ptr<StateI> ReceiveStartSHA1::nextState(void) {
} }
} }
std::cout << "ReceiveStartSHA1 have " << tmp_have_count << "/" << sha1_info.chunks.size() << " chunks (" << float(tmp_have_count)/sha1_info.chunks.size() << "%)\n"; std::cout << "ReceiveStartSHA1 have " << tmp_have_count << "/" << sha1_info.chunks.size() << " chunks (" << float(tmp_have_count)/sha1_info.chunks.size() * 100.f << "%)\n";
} }
std::cout << "ReceiveStartSHA1 switching state to SHA1\n"; std::cout << "ReceiveStartSHA1 switching state to SHA1\n";

View File

@ -6,6 +6,7 @@
#include <iostream> #include <iostream>
#include <tuple> #include <tuple>
#include <random>
namespace States { namespace States {
@ -174,27 +175,46 @@ bool SHA1::iterate(float delta) {
if (!_have_all && !_chunk_want_queue.empty() && _chunks_requested.size() + _transfers_receiving_chunk.size() < _max_concurrent_in) { if (!_have_all && !_chunk_want_queue.empty() && _chunks_requested.size() + _transfers_receiving_chunk.size() < _max_concurrent_in) {
// send out request, no burst tho // send out request, no burst tho
std::vector<std::pair<uint32_t, uint32_t>> target_peers; std::vector<std::pair<uint32_t, uint32_t>> target_peers;
_tcl.forEachGroup([&target_peers, this](uint32_t group_number) { std::vector<std::pair<uint32_t, uint32_t>> target_peers_tcp;
_tcl.forEachGroupPeer(group_number, [&target_peers, group_number](uint32_t peer_number) { _tcl.forEachGroup([&target_peers, &target_peers_tcp, this](uint32_t group_number) {
_tcl.forEachGroupPeer(group_number, [&target_peers, &target_peers_tcp, group_number](uint32_t peer_number, Tox_Connection connection_status) {
if (connection_status == Tox_Connection::TOX_CONNECTION_UDP) {
target_peers.push_back({group_number, peer_number}); target_peers.push_back({group_number, peer_number});
} else {
target_peers_tcp.push_back({group_number, peer_number});
}
}); });
}); });
if (!target_peers.empty()) { if (!(target_peers.empty() && target_peers_tcp.empty())) {
//if (_distrib.max() != target_peers.size()) { uint32_t group_number;
//std::uniform_int_distribution<size_t> new_dist{0, target_peers.size()-1}; uint32_t peer_number;
//_distrib.param(new_dist.param());
//}
//size_t target_index = _distrib(_rng); if (!target_peers.empty() && !target_peers_tcp.empty()) { // have udp & tcp peers
// 75% chance to roll udp over tcp
if (std::generate_canonical<float, 10>(_rng) >= 0.25f) {
//std::cout << "rolled upd\n";
size_t target_index = _rng()%target_peers.size(); size_t target_index = _rng()%target_peers.size();
auto [group_number, peer_number] = target_peers.at(target_index); std::tie(group_number, peer_number) = target_peers.at(target_index);
} else { // tcp
//std::cout << "rolled tcp\n";
size_t target_index = _rng()%target_peers_tcp.size();
std::tie(group_number, peer_number) = target_peers_tcp.at(target_index);
}
} else if (!target_peers.empty()) { // udp
size_t target_index = _rng()%target_peers.size();
std::tie(group_number, peer_number) = target_peers.at(target_index);
} else { // tcp
size_t target_index = _rng()%target_peers_tcp.size();
std::tie(group_number, peer_number) = target_peers_tcp.at(target_index);
}
size_t chunk_index = _chunk_want_queue.front(); size_t chunk_index = _chunk_want_queue.front();
_chunks_requested[chunk_index] = 0.f; _chunks_requested[chunk_index] = 0.f;
_chunk_want_queue.pop_front(); _chunk_want_queue.pop_front();
_tcl.sendFT1RequestPrivate(group_number, peer_number, NGC_FT1_file_kind::HASH_SHA1_CHUNK, _sha1_info.chunks[chunk_index].data.data(), 20); _tcl.sendFT1RequestPrivate(group_number, peer_number, NGC_FT1_file_kind::HASH_SHA1_CHUNK, _sha1_info.chunks[chunk_index].data.data(), 20);
//std::cout << "sent request " << group_number << ":" << peer_number << "\n";
} }
} }

View File

@ -274,8 +274,8 @@ void ToxClient::onToxGroupInvite(uint32_t friend_number, const uint8_t* invite_d
void ToxClient::onToxGroupPeerJoin(uint32_t group_number, uint32_t peer_id) { void ToxClient::onToxGroupPeerJoin(uint32_t group_number, uint32_t peer_id) {
std::cout << "TCL group peer join " << group_number << ":" << peer_id << "\n"; std::cout << "TCL group peer join " << group_number << ":" << peer_id << "\n";
_groups[group_number].emplace(peer_id); //_groups[group_number].emplace(peer_id);
//_groups[group_number][peer_id] = tox_group_peer_get_connection_status(_tox, group_number, peer_id, nullptr); _groups[group_number][peer_id] = tox_group_peer_get_connection_status(_tox, group_number, peer_id, nullptr);
_tox_profile_dirty = true; _tox_profile_dirty = true;
} }
@ -298,7 +298,7 @@ StateI& ToxClient::getState(void) {
return *_state.get(); return *_state.get();
} }
bool ToxClient::sendFT1RequestPrivate(uint32_t group_number, uint32_t peer_number, NGC_FT1_file_kind file_kind, const uint8_t* file_id, size_t file_id_size) { bool ToxClient::sendFT1RequestPrivate(uint32_t group_number, uint32_t peer_number, uint32_t file_kind, const uint8_t* file_id, size_t file_id_size) {
NGC_FT1_send_request_private( NGC_FT1_send_request_private(
_tox, _ft1_ctx, _tox, _ft1_ctx,
group_number, peer_number, group_number, peer_number,
@ -310,7 +310,7 @@ bool ToxClient::sendFT1RequestPrivate(uint32_t group_number, uint32_t peer_numbe
return true; return true;
} }
bool ToxClient::sendFT1InitPrivate(uint32_t group_number, uint32_t peer_number, NGC_FT1_file_kind file_kind, const uint8_t* file_id, size_t file_id_size, uint64_t file_size, uint8_t& transfer_id) { bool ToxClient::sendFT1InitPrivate(uint32_t group_number, uint32_t peer_number, uint32_t file_kind, const uint8_t* file_id, size_t file_id_size, uint64_t file_size, uint8_t& transfer_id) {
return NGC_FT1_send_init_private( return NGC_FT1_send_init_private(
_tox, _ft1_ctx, _tox, _ft1_ctx,
group_number, peer_number, group_number, peer_number,

View File

@ -43,8 +43,8 @@ struct ToxClient {
template<typename FN> template<typename FN>
void forEachGroupPeer(uint32_t group_number, FN&& fn) const { void forEachGroupPeer(uint32_t group_number, FN&& fn) const {
if (_groups.count(group_number)) { if (_groups.count(group_number)) {
for (const uint32_t peer_number : _groups.at(group_number)) { for (const auto [peer_number, connection_status] : _groups.at(group_number)) {
fn(peer_number); fn(peer_number, connection_status);
} }
} }
} }
@ -63,8 +63,8 @@ struct ToxClient {
StateI& getState(void); // public accessor for callbacks StateI& getState(void); // public accessor for callbacks
public: // FT1 sends public: // FT1 sends
bool sendFT1RequestPrivate(uint32_t group_number, uint32_t peer_number, NGC_FT1_file_kind file_kind, const uint8_t* file_id, size_t file_id_size); bool sendFT1RequestPrivate(uint32_t group_number, uint32_t peer_number, uint32_t file_kind, const uint8_t* file_id, size_t file_id_size);
bool sendFT1InitPrivate(uint32_t group_number, uint32_t peer_number, NGC_FT1_file_kind file_kind, const uint8_t* file_id, size_t file_id_size, uint64_t file_size, uint8_t& transfer_id); bool sendFT1InitPrivate(uint32_t group_number, uint32_t peer_number, uint32_t file_kind, const uint8_t* file_id, size_t file_id_size, uint64_t file_size, uint8_t& transfer_id);
private: private:
void saveToxProfile(void); void saveToxProfile(void);
@ -86,7 +86,7 @@ struct ToxClient {
std::unique_ptr<StateI> _state; std::unique_ptr<StateI> _state;
// key groupid, value set of peer ids // key groupid, value set of peer ids
std::map<uint32_t, std::set<uint32_t>> _groups; //std::map<uint32_t, std::set<uint32_t>> _groups;
// std::map<uint32_t, std::map<uint32_t, Tox_Connection>> _groups; std::map<uint32_t, std::map<uint32_t, Tox_Connection>> _groups;
}; };