Compare commits

...

2 Commits

7 changed files with 57 additions and 35 deletions

View File

@ -13,7 +13,7 @@ this will print this tools tox_id and the generated info_hash
this will first download the info using the info_hash and then all the file chunks listed in the info this will first download the info using the info_hash and then all the file chunks listed in the info
while simultaneously resharing allready downloaded chunks and info (swarming) while simultaneously resharing allready downloaded chunks and info (swarming)
to increase the amount of parallel chunk down/up loads, you can increase the value with the -I and -O option. the default for -I is 32 and for -O 16, which are relatively low numbers, which in practice can get you up to ~400KiB/s. to increase the amount of parallel chunk down/up loads, you can increase the value with the -I and -O option. the default for -I is 32 and for -O 16, which are relatively low numbers, which in practice can get you up to ~700KiB/s.
## Usage ## Usage

View File

@ -57,15 +57,15 @@ struct CommandLine {
// advanced FT1: // advanced FT1:
// --ft_ack_per_packet // --ft_ack_per_packet
size_t ft_acks_per_packet {5}; size_t ft_acks_per_packet {3};
// --ft_init_retry_timeout_after // --ft_init_retry_timeout_after
float ft_init_retry_timeout_after {10.f}; float ft_init_retry_timeout_after {5.f};
// --ft_sending_resend_without_ack_after // --ft_sending_resend_without_ack_after
float ft_sending_resend_without_ack_after {5.f}; float ft_sending_resend_without_ack_after {3.f};
// --ft_sending_give_up_after // --ft_sending_give_up_after
float ft_sending_give_up_after {30.f}; float ft_sending_give_up_after {30.f};
// --ft_packet_window_size // --ft_packet_window_size
size_t ft_packet_window_size {5}; size_t ft_packet_window_size {8};
// ---- TODO ---- // ---- TODO ----

View File

@ -47,22 +47,25 @@ bool ReceiveStartSHA1::iterate(float delta) {
} }
} else if (_time_since_last_request >= 15.f) { // blast ever 15sec } else if (_time_since_last_request >= 15.f) { // blast ever 15sec
_time_since_last_request = 0.f; _time_since_last_request = 0.f;
// TODO: select random and try, not blas
// ... and we are blasing
_tcl.forEachGroup([this](const uint32_t group_number) { _tcl.forEachGroup([this](const uint32_t group_number) {
_tcl.forEachGroupPeer(group_number, [this, group_number](uint32_t peer_number, Tox_Connection connection_status) { _tcl.forEachGroupPeer(group_number, [this, group_number](uint32_t peer_number) {
_tcl.sendFT1RequestPrivate( _tcl.sendFT1RequestPrivate(
group_number, peer_number, group_number, peer_number,
NGC_FT1_file_kind::HASH_SHA1_INFO, NGC_FT1_file_kind::HASH_SHA1_INFO,
_sha1_info_hash.data.data(), _sha1_info_hash.size() _sha1_info_hash.data.data(), _sha1_info_hash.size()
); );
std::cout << "ReceiveStartSHA1 sendig info request to " << group_number << ":" << peer_number << " over " << (connection_status == Tox_Connection::TOX_CONNECTION_TCP ? "tcp" : "udp") <<"\n"; std::cout
<< "ReceiveStartSHA1 sendig info request to "
<< group_number << ":" << peer_number
<< " over " << (_tcl.getGroupPeerConnectionStatus(group_number, peer_number) == Tox_Connection::TOX_CONNECTION_TCP ? "tcp" : "udp")
<< "\n"
;
}); });
}); });
} }
// if not transfer, request from random peer (equal dist!!) // if not transfer, request from random peer (equal dist!!)
// TODO: return true if done
return _done; return _done;
} }

View File

@ -30,6 +30,7 @@ SHA1::SHA1(
{ {
assert(_have_chunk.size() == _sha1_info.chunks.size()); assert(_have_chunk.size() == _sha1_info.chunks.size());
_udp_only = cl.request_only_from_udp_peer;
_max_concurrent_in = cl.max_incoming_transfers; _max_concurrent_in = cl.max_incoming_transfers;
_max_concurrent_out = cl.max_incoming_transfers; _max_concurrent_out = cl.max_incoming_transfers;
@ -194,12 +195,21 @@ bool SHA1::iterate(float delta) {
_peer_speed_mesurement_interval_timer = 0.f; // we lose some time here, but precision is not the issue _peer_speed_mesurement_interval_timer = 0.f; // we lose some time here, but precision is not the issue
_peer_in_bytes_array_index = (_peer_in_bytes_array_index + 1) % _peer_speed_mesurement_interval_count; _peer_in_bytes_array_index = (_peer_in_bytes_array_index + 1) % _peer_speed_mesurement_interval_count;
for (const auto& [peer, array] : _peer_in_bytes_array) { //for (const auto& [peer, array] : _peer_in_bytes_array) {
for (auto it = _peer_in_bytes_array.begin(); it != _peer_in_bytes_array.end();) {
const auto& [peer, array] = *it;
float avg {0.f}; float avg {0.f};
for (size_t i = 0; i < array.size(); i++) { for (size_t i = 0; i < array.size(); i++) {
avg += array[i]; avg += array[i];
} }
if (avg == 0.f || _tcl.getGroupPeerConnectionStatus(peer.first, peer.second) == Tox_Connection::TOX_CONNECTION_NONE) {
_peer_in_speed.erase(peer);
it = _peer_in_bytes_array.erase(it);
continue;
}
// if 6 mesurment every 0.5sec -> avg is over 3sec -> /3 for /s // if 6 mesurment every 0.5sec -> avg is over 3sec -> /3 for /s
avg /= _peer_speed_mesurement_interval * _peer_speed_mesurement_interval_count; avg /= _peer_speed_mesurement_interval * _peer_speed_mesurement_interval_count;
@ -207,12 +217,14 @@ bool SHA1::iterate(float delta) {
_peer_in_bytes_array[peer][_peer_in_bytes_array_index] = 0; _peer_in_bytes_array[peer][_peer_in_bytes_array_index] = 0;
_peer_in_speed[peer] = avg; _peer_in_speed[peer] = avg;
it++;
} }
_peer_in_targets.clear(); _peer_in_targets.clear();
_tcl.forEachGroup([this](uint32_t group_number) { _tcl.forEachGroup([this](uint32_t group_number) {
_tcl.forEachGroupPeer(group_number, [group_number, this](uint32_t peer_number, Tox_Connection connection_status) { _tcl.forEachGroupPeer(group_number, [group_number, this](uint32_t peer_number) {
if (connection_status == Tox_Connection::TOX_CONNECTION_UDP || !_udp_only) { if (!_udp_only || _tcl.getGroupPeerConnectionStatus(group_number, peer_number) == Tox_Connection::TOX_CONNECTION_UDP) {
_peer_in_targets.push_back({group_number, peer_number}); _peer_in_targets.push_back({group_number, peer_number});
} }
}); });
@ -278,7 +290,12 @@ bool SHA1::iterate(float delta) {
std::cout << "SHA1 cwq:" << _chunk_want_queue.size() << " cwqr:" << _chunks_requested.size() << " trc:" << _transfers_receiving_chunk.size() << " tsc:" << _transfers_sending_chunk.size() << "\n"; std::cout << "SHA1 cwq:" << _chunk_want_queue.size() << " cwqr:" << _chunks_requested.size() << " trc:" << _transfers_receiving_chunk.size() << " tsc:" << _transfers_sending_chunk.size() << "\n";
std::cout << "SHA1 peer down speeds:\n"; std::cout << "SHA1 peer down speeds:\n";
for (const auto& [peer, speed] : _peer_in_speed) { for (const auto& [peer, speed] : _peer_in_speed) {
std::cout << " " << peer.first << ":" << peer.second << "(" << _tcl.getGroupPeerName(peer.first, peer.second) << ")" << "\t" << speed / 1024.f << "KiB/s\n"; std::cout
<< " " << peer.first << ":" << peer.second
<< " " << (_tcl.getGroupPeerConnectionStatus(peer.first, peer.second) == Tox_Connection::TOX_CONNECTION_TCP ? "tcp" : "udp")
<< " (" << _tcl.getGroupPeerName(peer.first, peer.second) << ")"
<< " " << speed / 1024.f << "KiB/s\n"
;
} }
} }

View File

@ -57,9 +57,13 @@ struct SHA1 final : public StateI {
size_t chunkSize(size_t chunk_index) const; size_t chunkSize(size_t chunk_index) const;
bool haveChunk(const SHA1Digest& hash) const; bool haveChunk(const SHA1Digest& hash) const;
private: public: // config
bool _udp_only {false}; bool _udp_only {false};
size_t _max_concurrent_in {32};
size_t _max_concurrent_out {16};
private:
mio::mmap_sink _file_map; // writable if not all mio::mmap_sink _file_map; // writable if not all
const FTInfoSHA1 _sha1_info; const FTInfoSHA1 _sha1_info;
const std::vector<uint8_t> _sha1_info_data; const std::vector<uint8_t> _sha1_info_data;
@ -79,11 +83,7 @@ struct SHA1 final : public StateI {
// chunk_index -> time since request // chunk_index -> time since request
std::map<size_t, float> _chunks_requested; std::map<size_t, float> _chunks_requested;
size_t _max_concurrent_in {32};
size_t _max_concurrent_out {16};
std::minstd_rand _rng {1337}; std::minstd_rand _rng {1337};
std::uniform_int_distribution<size_t> _distrib;
std::unordered_map<SHA1Digest, size_t> _chunk_hash_to_index; std::unordered_map<SHA1Digest, size_t> _chunk_hash_to_index;
@ -114,7 +114,6 @@ struct SHA1 final : public StateI {
// _peer_in_speed feeds directly into _peer_in_targets_dist // _peer_in_speed feeds directly into _peer_in_targets_dist
std::vector<std::pair<uint32_t, uint32_t>> _peer_in_targets; std::vector<std::pair<uint32_t, uint32_t>> _peer_in_targets;
std::discrete_distribution<size_t> _peer_in_targets_dist; std::discrete_distribution<size_t> _peer_in_targets_dist;
}; };
} // States } // States

View File

@ -238,6 +238,10 @@ std::string_view ToxClient::getGroupPeerName(uint32_t group_number, uint32_t pee
} }
} }
TOX_CONNECTION ToxClient::getGroupPeerConnectionStatus(uint32_t group_number, uint32_t peer_number) const {
return tox_group_peer_get_connection_status(_tox, group_number, peer_number, nullptr);
}
void ToxClient::onToxSelfConnectionStatus(TOX_CONNECTION connection_status) { void ToxClient::onToxSelfConnectionStatus(TOX_CONNECTION connection_status) {
std::cout << "TCL self status: "; std::cout << "TCL self status: ";
switch (connection_status) { switch (connection_status) {
@ -269,14 +273,14 @@ void ToxClient::onToxGroupPeerName(uint32_t group_number, uint32_t peer_id, std:
_groups[group_number][peer_id].name = name; _groups[group_number][peer_id].name = name;
} }
void ToxClient::onToxGroupPeerConnection(uint32_t group_number, uint32_t peer_id, TOX_CONNECTION connection_status) { //void ToxClient::onToxGroupPeerConnection(uint32_t group_number, uint32_t peer_id, TOX_CONNECTION connection_status) {
std::cout << "TCL peer " << group_number << ":" << peer_id << " status: "; //std::cout << "TCL peer " << group_number << ":" << peer_id << " status: ";
switch (connection_status) { //switch (connection_status) {
case TOX_CONNECTION::TOX_CONNECTION_NONE: std::cout << "offline\n"; break; //case TOX_CONNECTION::TOX_CONNECTION_NONE: std::cout << "offline\n"; break;
case TOX_CONNECTION::TOX_CONNECTION_TCP: std::cout << "TCP-relayed\n"; break; //case TOX_CONNECTION::TOX_CONNECTION_TCP: std::cout << "TCP-relayed\n"; break;
case TOX_CONNECTION::TOX_CONNECTION_UDP: std::cout << "UDP-direct\n"; break; //case TOX_CONNECTION::TOX_CONNECTION_UDP: std::cout << "UDP-direct\n"; break;
} //}
} //}
void ToxClient::onToxGroupCustomPacket(uint32_t group_number, uint32_t peer_id, const uint8_t *data, size_t length) { void ToxClient::onToxGroupCustomPacket(uint32_t group_number, uint32_t peer_id, const uint8_t *data, size_t length) {
// TODO: signal private? // TODO: signal private?
@ -310,7 +314,7 @@ void ToxClient::onToxGroupPeerJoin(uint32_t group_number, uint32_t peer_id) {
tmp_name.push_back('\0'); // make sure its null terminated tmp_name.push_back('\0'); // make sure its null terminated
_groups[group_number][peer_id] = { _groups[group_number][peer_id] = {
tox_group_peer_get_connection_status(_tox, group_number, peer_id, nullptr), //tox_group_peer_get_connection_status(_tox, group_number, peer_id, nullptr),
reinterpret_cast<const char*>(tmp_name.data()) reinterpret_cast<const char*>(tmp_name.data())
}; };
@ -325,6 +329,7 @@ void ToxClient::onToxGroupPeerExit(uint32_t group_number, uint32_t peer_id, Tox_
void ToxClient::onToxGroupSelfJoin(uint32_t group_number) { void ToxClient::onToxGroupSelfJoin(uint32_t group_number) {
std::cout << "TCL group self join " << group_number << "\n"; std::cout << "TCL group self join " << group_number << "\n";
tox_group_self_set_name(_tox, group_number, reinterpret_cast<const uint8_t*>(_self_name.data()), _self_name.size(), nullptr);
// ??? // ???
// can be triggered after other peers already joined o.o // can be triggered after other peers already joined o.o
_tox_profile_dirty = true; _tox_profile_dirty = true;

View File

@ -34,6 +34,7 @@ struct ToxClient {
std::string getOwnAddress(void) const; std::string getOwnAddress(void) const;
std::string_view getGroupPeerName(uint32_t group_number, uint32_t peer_number) const; std::string_view getGroupPeerName(uint32_t group_number, uint32_t peer_number) const;
TOX_CONNECTION getGroupPeerConnectionStatus(uint32_t group_number, uint32_t peer_number) const;
template<typename FN> template<typename FN>
void forEachGroup(FN&& fn) const { void forEachGroup(FN&& fn) const {
@ -46,8 +47,7 @@ struct ToxClient {
void forEachGroupPeer(uint32_t group_number, FN&& fn) const { void forEachGroupPeer(uint32_t group_number, FN&& fn) const {
if (_groups.count(group_number)) { if (_groups.count(group_number)) {
for (const auto& [peer_number, peer] : _groups.at(group_number)) { for (const auto& [peer_number, peer] : _groups.at(group_number)) {
const auto& [connection_status, name] = peer; fn(peer_number);
fn(peer_number, connection_status);
} }
} }
} }
@ -56,7 +56,7 @@ struct ToxClient {
void onToxSelfConnectionStatus(TOX_CONNECTION connection_status); void onToxSelfConnectionStatus(TOX_CONNECTION connection_status);
void onToxFriendRequest(const uint8_t* public_key, std::string_view message); void onToxFriendRequest(const uint8_t* public_key, std::string_view message);
void onToxGroupPeerName(uint32_t group_number, uint32_t peer_id, std::string_view name); void onToxGroupPeerName(uint32_t group_number, uint32_t peer_id, std::string_view name);
void onToxGroupPeerConnection(uint32_t group_number, uint32_t peer_id, TOX_CONNECTION connection_status); //void onToxGroupPeerConnection(uint32_t group_number, uint32_t peer_id, TOX_CONNECTION connection_status);
void onToxGroupCustomPacket(uint32_t group_number, uint32_t peer_id, const uint8_t *data, size_t length); void onToxGroupCustomPacket(uint32_t group_number, uint32_t peer_id, const uint8_t *data, size_t length);
void onToxGroupCustomPrivatePacket(uint32_t group_number, uint32_t peer_id, const uint8_t *data, size_t length); void onToxGroupCustomPrivatePacket(uint32_t group_number, uint32_t peer_id, const uint8_t *data, size_t length);
void onToxGroupInvite(uint32_t friend_number, const uint8_t* invite_data, size_t invite_length, std::string_view group_name); void onToxGroupInvite(uint32_t friend_number, const uint8_t* invite_data, size_t invite_length, std::string_view group_name);
@ -90,12 +90,10 @@ struct ToxClient {
std::unique_ptr<StateI> _state; std::unique_ptr<StateI> _state;
// key groupid, value set of peer ids
//std::map<uint32_t, std::set<uint32_t>> _groups;
struct Peer { struct Peer {
Tox_Connection connection_status {Tox_Connection::TOX_CONNECTION_NONE};
std::string name; std::string name;
}; };
// key groupid, key peerid
std::map<uint32_t, std::map<uint32_t, Peer>> _groups; std::map<uint32_t, std::map<uint32_t, Peer>> _groups;
}; };