ft filekind refactor + connection state drafting
This commit is contained in:
parent
9f8f681e18
commit
294a427eb0
2
external/tox_ngc_ft1/tox_ngc_ft1
vendored
2
external/tox_ngc_ft1/tox_ngc_ft1
vendored
@ -1 +1 @@
|
||||
Subproject commit bbc96bd776a29a6581545d60b5cb6e60e7453f1f
|
||||
Subproject commit a7b5c3136958f12fbef747c5d2b9a45ae3fa0b4b
|
2
external/toxcore/c-toxcore
vendored
2
external/toxcore/c-toxcore
vendored
@ -1 +1 @@
|
||||
Subproject commit 88ffd1a6495aa59dcd7144b552a91df0bdb5b6d9
|
||||
Subproject commit 63f993a3314c143b02996adc0ff61d0f3bda8743
|
@ -49,6 +49,17 @@ struct FTInfoSHA1 {
|
||||
std::vector<uint8_t> toBuffer(void) const;
|
||||
void fromBuffer(const std::vector<uint8_t>& buffer);
|
||||
};
|
||||
|
||||
std::ostream& operator<<(std::ostream& out, const FTInfoSHA1& v);
|
||||
|
||||
// TODO: use
|
||||
struct FTInfoSHA1v2 {
|
||||
std::vector<std::string> file_names;
|
||||
uint64_t file_size {0};
|
||||
uint32_t chunk_size {128*1024}; // 128KiB for now
|
||||
std::vector<SHA1Digest> chunks;
|
||||
|
||||
std::vector<uint8_t> toBuffer(void) const;
|
||||
void fromBuffer(const std::vector<uint8_t>& buffer);
|
||||
};
|
||||
std::ostream& operator<<(std::ostream& out, const FTInfoSHA1& v);
|
||||
|
||||
|
@ -50,13 +50,13 @@ bool ReceiveStartSHA1::iterate(float delta) {
|
||||
// TODO: select random and try, not blas
|
||||
// ... and we are blasing
|
||||
_tcl.forEachGroup([this](const uint32_t group_number) {
|
||||
_tcl.forEachGroupPeer(group_number, [this, group_number](uint32_t peer_number) {
|
||||
_tcl.forEachGroupPeer(group_number, [this, group_number](uint32_t peer_number, Tox_Connection connection_status) {
|
||||
_tcl.sendFT1RequestPrivate(
|
||||
group_number, peer_number,
|
||||
NGC_FT1_file_kind::HASH_SHA1_INFO,
|
||||
_sha1_info_hash.data.data(), _sha1_info_hash.size()
|
||||
);
|
||||
std::cout << "ReceiveStartSHA1 sendig info request to " << group_number << ":" << peer_number << "\n";
|
||||
std::cout << "ReceiveStartSHA1 sendig info request to " << group_number << ":" << peer_number << " over " << (connection_status == Tox_Connection::TOX_CONNECTION_TCP ? "tcp" : "udp") <<"\n";
|
||||
});
|
||||
});
|
||||
}
|
||||
@ -108,7 +108,7 @@ std::unique_ptr<StateI> ReceiveStartSHA1::nextState(void) {
|
||||
}
|
||||
}
|
||||
|
||||
std::cout << "ReceiveStartSHA1 have " << tmp_have_count << "/" << sha1_info.chunks.size() << " chunks (" << float(tmp_have_count)/sha1_info.chunks.size() << "%)\n";
|
||||
std::cout << "ReceiveStartSHA1 have " << tmp_have_count << "/" << sha1_info.chunks.size() << " chunks (" << float(tmp_have_count)/sha1_info.chunks.size() * 100.f << "%)\n";
|
||||
}
|
||||
|
||||
std::cout << "ReceiveStartSHA1 switching state to SHA1\n";
|
||||
@ -149,7 +149,7 @@ bool ReceiveStartSHA1::onFT1ReceiveInitSHA1Info(uint32_t group_number, uint32_t
|
||||
_sha1_info_data.resize(file_size);
|
||||
|
||||
_transfer = std::make_tuple(group_number, peer_number, transfer_id, 0.f);
|
||||
std::cout << "ReceiveStartSHA1 accepted info transfer" << group_number << ":" << peer_number << "." << int(transfer_id) << "\n";
|
||||
std::cout << "ReceiveStartSHA1 accepted info transfer " << group_number << ":" << peer_number << "." << int(transfer_id) << "\n";
|
||||
|
||||
// accept
|
||||
return true;
|
||||
|
@ -6,6 +6,7 @@
|
||||
|
||||
#include <iostream>
|
||||
#include <tuple>
|
||||
#include <random>
|
||||
|
||||
namespace States {
|
||||
|
||||
@ -174,27 +175,46 @@ bool SHA1::iterate(float delta) {
|
||||
if (!_have_all && !_chunk_want_queue.empty() && _chunks_requested.size() + _transfers_receiving_chunk.size() < _max_concurrent_in) {
|
||||
// send out request, no burst tho
|
||||
std::vector<std::pair<uint32_t, uint32_t>> target_peers;
|
||||
_tcl.forEachGroup([&target_peers, this](uint32_t group_number) {
|
||||
_tcl.forEachGroupPeer(group_number, [&target_peers, group_number](uint32_t peer_number) {
|
||||
target_peers.push_back({group_number, peer_number});
|
||||
std::vector<std::pair<uint32_t, uint32_t>> target_peers_tcp;
|
||||
_tcl.forEachGroup([&target_peers, &target_peers_tcp, this](uint32_t group_number) {
|
||||
_tcl.forEachGroupPeer(group_number, [&target_peers, &target_peers_tcp, group_number](uint32_t peer_number, Tox_Connection connection_status) {
|
||||
if (connection_status == Tox_Connection::TOX_CONNECTION_UDP) {
|
||||
target_peers.push_back({group_number, peer_number});
|
||||
} else {
|
||||
target_peers_tcp.push_back({group_number, peer_number});
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
if (!target_peers.empty()) {
|
||||
//if (_distrib.max() != target_peers.size()) {
|
||||
//std::uniform_int_distribution<size_t> new_dist{0, target_peers.size()-1};
|
||||
//_distrib.param(new_dist.param());
|
||||
//}
|
||||
if (!(target_peers.empty() && target_peers_tcp.empty())) {
|
||||
uint32_t group_number;
|
||||
uint32_t peer_number;
|
||||
|
||||
//size_t target_index = _distrib(_rng);
|
||||
size_t target_index = _rng()%target_peers.size();
|
||||
auto [group_number, peer_number] = target_peers.at(target_index);
|
||||
if (!target_peers.empty() && !target_peers_tcp.empty()) { // have udp & tcp peers
|
||||
// 75% chance to roll udp over tcp
|
||||
if (std::generate_canonical<float, 10>(_rng) >= 0.25f) {
|
||||
//std::cout << "rolled upd\n";
|
||||
size_t target_index = _rng()%target_peers.size();
|
||||
std::tie(group_number, peer_number) = target_peers.at(target_index);
|
||||
} else { // tcp
|
||||
//std::cout << "rolled tcp\n";
|
||||
size_t target_index = _rng()%target_peers_tcp.size();
|
||||
std::tie(group_number, peer_number) = target_peers_tcp.at(target_index);
|
||||
}
|
||||
} else if (!target_peers.empty()) { // udp
|
||||
size_t target_index = _rng()%target_peers.size();
|
||||
std::tie(group_number, peer_number) = target_peers.at(target_index);
|
||||
} else { // tcp
|
||||
size_t target_index = _rng()%target_peers_tcp.size();
|
||||
std::tie(group_number, peer_number) = target_peers_tcp.at(target_index);
|
||||
}
|
||||
|
||||
size_t chunk_index = _chunk_want_queue.front();
|
||||
_chunks_requested[chunk_index] = 0.f;
|
||||
_chunk_want_queue.pop_front();
|
||||
|
||||
_tcl.sendFT1RequestPrivate(group_number, peer_number, NGC_FT1_file_kind::HASH_SHA1_CHUNK, _sha1_info.chunks[chunk_index].data.data(), 20);
|
||||
//std::cout << "sent request " << group_number << ":" << peer_number << "\n";
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -274,8 +274,8 @@ void ToxClient::onToxGroupInvite(uint32_t friend_number, const uint8_t* invite_d
|
||||
|
||||
void ToxClient::onToxGroupPeerJoin(uint32_t group_number, uint32_t peer_id) {
|
||||
std::cout << "TCL group peer join " << group_number << ":" << peer_id << "\n";
|
||||
_groups[group_number].emplace(peer_id);
|
||||
//_groups[group_number][peer_id] = tox_group_peer_get_connection_status(_tox, group_number, peer_id, nullptr);
|
||||
//_groups[group_number].emplace(peer_id);
|
||||
_groups[group_number][peer_id] = tox_group_peer_get_connection_status(_tox, group_number, peer_id, nullptr);
|
||||
_tox_profile_dirty = true;
|
||||
}
|
||||
|
||||
@ -298,7 +298,7 @@ StateI& ToxClient::getState(void) {
|
||||
return *_state.get();
|
||||
}
|
||||
|
||||
bool ToxClient::sendFT1RequestPrivate(uint32_t group_number, uint32_t peer_number, NGC_FT1_file_kind file_kind, const uint8_t* file_id, size_t file_id_size) {
|
||||
bool ToxClient::sendFT1RequestPrivate(uint32_t group_number, uint32_t peer_number, uint32_t file_kind, const uint8_t* file_id, size_t file_id_size) {
|
||||
NGC_FT1_send_request_private(
|
||||
_tox, _ft1_ctx,
|
||||
group_number, peer_number,
|
||||
@ -310,7 +310,7 @@ bool ToxClient::sendFT1RequestPrivate(uint32_t group_number, uint32_t peer_numbe
|
||||
return true;
|
||||
}
|
||||
|
||||
bool ToxClient::sendFT1InitPrivate(uint32_t group_number, uint32_t peer_number, NGC_FT1_file_kind file_kind, const uint8_t* file_id, size_t file_id_size, uint64_t file_size, uint8_t& transfer_id) {
|
||||
bool ToxClient::sendFT1InitPrivate(uint32_t group_number, uint32_t peer_number, uint32_t file_kind, const uint8_t* file_id, size_t file_id_size, uint64_t file_size, uint8_t& transfer_id) {
|
||||
return NGC_FT1_send_init_private(
|
||||
_tox, _ft1_ctx,
|
||||
group_number, peer_number,
|
||||
|
@ -43,8 +43,8 @@ struct ToxClient {
|
||||
template<typename FN>
|
||||
void forEachGroupPeer(uint32_t group_number, FN&& fn) const {
|
||||
if (_groups.count(group_number)) {
|
||||
for (const uint32_t peer_number : _groups.at(group_number)) {
|
||||
fn(peer_number);
|
||||
for (const auto [peer_number, connection_status] : _groups.at(group_number)) {
|
||||
fn(peer_number, connection_status);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -63,8 +63,8 @@ struct ToxClient {
|
||||
StateI& getState(void); // public accessor for callbacks
|
||||
|
||||
public: // FT1 sends
|
||||
bool sendFT1RequestPrivate(uint32_t group_number, uint32_t peer_number, NGC_FT1_file_kind file_kind, const uint8_t* file_id, size_t file_id_size);
|
||||
bool sendFT1InitPrivate(uint32_t group_number, uint32_t peer_number, NGC_FT1_file_kind file_kind, const uint8_t* file_id, size_t file_id_size, uint64_t file_size, uint8_t& transfer_id);
|
||||
bool sendFT1RequestPrivate(uint32_t group_number, uint32_t peer_number, uint32_t file_kind, const uint8_t* file_id, size_t file_id_size);
|
||||
bool sendFT1InitPrivate(uint32_t group_number, uint32_t peer_number, uint32_t file_kind, const uint8_t* file_id, size_t file_id_size, uint64_t file_size, uint8_t& transfer_id);
|
||||
|
||||
private:
|
||||
void saveToxProfile(void);
|
||||
@ -86,7 +86,7 @@ struct ToxClient {
|
||||
std::unique_ptr<StateI> _state;
|
||||
|
||||
// key groupid, value set of peer ids
|
||||
std::map<uint32_t, std::set<uint32_t>> _groups;
|
||||
// std::map<uint32_t, std::map<uint32_t, Tox_Connection>> _groups;
|
||||
//std::map<uint32_t, std::set<uint32_t>> _groups;
|
||||
std::map<uint32_t, std::map<uint32_t, Tox_Connection>> _groups;
|
||||
};
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user