Compare commits
No commits in common. "de247c9e91aa13438b0da7dfe8dd21a92443b1f8" and "613ae14530d65e33e247312483d0546023a62a13" have entirely different histories.
de247c9e91
...
613ae14530
@ -13,7 +13,7 @@ this will print this tools tox_id and the generated info_hash
|
||||
|
||||
this will first download the info using the info_hash and then all the file chunks listed in the info
|
||||
while simultaneously resharing allready downloaded chunks and info (swarming)
|
||||
to increase the amount of parallel chunk down/up loads, you can increase the value with the -I and -O option. the default for -I is 32 and for -O 16, which are relatively low numbers, which in practice can get you up to ~700KiB/s.
|
||||
to increase the amount of parallel chunk down/up loads, you can increase the value with the -I and -O option. the default for -I is 32 and for -O 16, which are relatively low numbers, which in practice can get you up to ~400KiB/s.
|
||||
|
||||
## Usage
|
||||
|
||||
|
@ -57,15 +57,15 @@ struct CommandLine {
|
||||
|
||||
// advanced FT1:
|
||||
// --ft_ack_per_packet
|
||||
size_t ft_acks_per_packet {3};
|
||||
size_t ft_acks_per_packet {5};
|
||||
// --ft_init_retry_timeout_after
|
||||
float ft_init_retry_timeout_after {5.f};
|
||||
float ft_init_retry_timeout_after {10.f};
|
||||
// --ft_sending_resend_without_ack_after
|
||||
float ft_sending_resend_without_ack_after {3.f};
|
||||
float ft_sending_resend_without_ack_after {5.f};
|
||||
// --ft_sending_give_up_after
|
||||
float ft_sending_give_up_after {30.f};
|
||||
// --ft_packet_window_size
|
||||
size_t ft_packet_window_size {8};
|
||||
size_t ft_packet_window_size {5};
|
||||
|
||||
// ---- TODO ----
|
||||
|
||||
|
@ -47,25 +47,22 @@ bool ReceiveStartSHA1::iterate(float delta) {
|
||||
}
|
||||
} else if (_time_since_last_request >= 15.f) { // blast ever 15sec
|
||||
_time_since_last_request = 0.f;
|
||||
|
||||
// TODO: select random and try, not blas
|
||||
// ... and we are blasing
|
||||
_tcl.forEachGroup([this](const uint32_t group_number) {
|
||||
_tcl.forEachGroupPeer(group_number, [this, group_number](uint32_t peer_number) {
|
||||
_tcl.forEachGroupPeer(group_number, [this, group_number](uint32_t peer_number, Tox_Connection connection_status) {
|
||||
_tcl.sendFT1RequestPrivate(
|
||||
group_number, peer_number,
|
||||
NGC_FT1_file_kind::HASH_SHA1_INFO,
|
||||
_sha1_info_hash.data.data(), _sha1_info_hash.size()
|
||||
);
|
||||
std::cout
|
||||
<< "ReceiveStartSHA1 sendig info request to "
|
||||
<< group_number << ":" << peer_number
|
||||
<< " over " << (_tcl.getGroupPeerConnectionStatus(group_number, peer_number) == Tox_Connection::TOX_CONNECTION_TCP ? "tcp" : "udp")
|
||||
<< "\n"
|
||||
;
|
||||
std::cout << "ReceiveStartSHA1 sendig info request to " << group_number << ":" << peer_number << " over " << (connection_status == Tox_Connection::TOX_CONNECTION_TCP ? "tcp" : "udp") <<"\n";
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
// if not transfer, request from random peer (equal dist!!)
|
||||
// TODO: return true if done
|
||||
return _done;
|
||||
}
|
||||
|
||||
|
@ -30,7 +30,6 @@ SHA1::SHA1(
|
||||
{
|
||||
assert(_have_chunk.size() == _sha1_info.chunks.size());
|
||||
|
||||
_udp_only = cl.request_only_from_udp_peer;
|
||||
_max_concurrent_in = cl.max_incoming_transfers;
|
||||
_max_concurrent_out = cl.max_incoming_transfers;
|
||||
|
||||
@ -195,21 +194,12 @@ bool SHA1::iterate(float delta) {
|
||||
_peer_speed_mesurement_interval_timer = 0.f; // we lose some time here, but precision is not the issue
|
||||
|
||||
_peer_in_bytes_array_index = (_peer_in_bytes_array_index + 1) % _peer_speed_mesurement_interval_count;
|
||||
//for (const auto& [peer, array] : _peer_in_bytes_array) {
|
||||
for (auto it = _peer_in_bytes_array.begin(); it != _peer_in_bytes_array.end();) {
|
||||
const auto& [peer, array] = *it;
|
||||
|
||||
for (const auto& [peer, array] : _peer_in_bytes_array) {
|
||||
float avg {0.f};
|
||||
for (size_t i = 0; i < array.size(); i++) {
|
||||
avg += array[i];
|
||||
}
|
||||
|
||||
if (avg == 0.f || _tcl.getGroupPeerConnectionStatus(peer.first, peer.second) == Tox_Connection::TOX_CONNECTION_NONE) {
|
||||
_peer_in_speed.erase(peer);
|
||||
it = _peer_in_bytes_array.erase(it);
|
||||
continue;
|
||||
}
|
||||
|
||||
// if 6 mesurment every 0.5sec -> avg is over 3sec -> /3 for /s
|
||||
avg /= _peer_speed_mesurement_interval * _peer_speed_mesurement_interval_count;
|
||||
|
||||
@ -217,14 +207,12 @@ bool SHA1::iterate(float delta) {
|
||||
_peer_in_bytes_array[peer][_peer_in_bytes_array_index] = 0;
|
||||
|
||||
_peer_in_speed[peer] = avg;
|
||||
|
||||
it++;
|
||||
}
|
||||
|
||||
_peer_in_targets.clear();
|
||||
_tcl.forEachGroup([this](uint32_t group_number) {
|
||||
_tcl.forEachGroupPeer(group_number, [group_number, this](uint32_t peer_number) {
|
||||
if (!_udp_only || _tcl.getGroupPeerConnectionStatus(group_number, peer_number) == Tox_Connection::TOX_CONNECTION_UDP) {
|
||||
_tcl.forEachGroupPeer(group_number, [group_number, this](uint32_t peer_number, Tox_Connection connection_status) {
|
||||
if (connection_status == Tox_Connection::TOX_CONNECTION_UDP || !_udp_only) {
|
||||
_peer_in_targets.push_back({group_number, peer_number});
|
||||
}
|
||||
});
|
||||
@ -290,12 +278,7 @@ bool SHA1::iterate(float delta) {
|
||||
std::cout << "SHA1 cwq:" << _chunk_want_queue.size() << " cwqr:" << _chunks_requested.size() << " trc:" << _transfers_receiving_chunk.size() << " tsc:" << _transfers_sending_chunk.size() << "\n";
|
||||
std::cout << "SHA1 peer down speeds:\n";
|
||||
for (const auto& [peer, speed] : _peer_in_speed) {
|
||||
std::cout
|
||||
<< " " << peer.first << ":" << peer.second
|
||||
<< " " << (_tcl.getGroupPeerConnectionStatus(peer.first, peer.second) == Tox_Connection::TOX_CONNECTION_TCP ? "tcp" : "udp")
|
||||
<< " (" << _tcl.getGroupPeerName(peer.first, peer.second) << ")"
|
||||
<< " " << speed / 1024.f << "KiB/s\n"
|
||||
;
|
||||
std::cout << " " << peer.first << ":" << peer.second << "(" << _tcl.getGroupPeerName(peer.first, peer.second) << ")" << "\t" << speed / 1024.f << "KiB/s\n";
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -57,13 +57,9 @@ struct SHA1 final : public StateI {
|
||||
size_t chunkSize(size_t chunk_index) const;
|
||||
bool haveChunk(const SHA1Digest& hash) const;
|
||||
|
||||
public: // config
|
||||
private:
|
||||
bool _udp_only {false};
|
||||
|
||||
size_t _max_concurrent_in {32};
|
||||
size_t _max_concurrent_out {16};
|
||||
|
||||
private:
|
||||
mio::mmap_sink _file_map; // writable if not all
|
||||
const FTInfoSHA1 _sha1_info;
|
||||
const std::vector<uint8_t> _sha1_info_data;
|
||||
@ -83,7 +79,11 @@ struct SHA1 final : public StateI {
|
||||
// chunk_index -> time since request
|
||||
std::map<size_t, float> _chunks_requested;
|
||||
|
||||
size_t _max_concurrent_in {32};
|
||||
size_t _max_concurrent_out {16};
|
||||
|
||||
std::minstd_rand _rng {1337};
|
||||
std::uniform_int_distribution<size_t> _distrib;
|
||||
|
||||
std::unordered_map<SHA1Digest, size_t> _chunk_hash_to_index;
|
||||
|
||||
@ -114,6 +114,7 @@ struct SHA1 final : public StateI {
|
||||
// _peer_in_speed feeds directly into _peer_in_targets_dist
|
||||
std::vector<std::pair<uint32_t, uint32_t>> _peer_in_targets;
|
||||
std::discrete_distribution<size_t> _peer_in_targets_dist;
|
||||
|
||||
};
|
||||
|
||||
} // States
|
||||
|
@ -238,10 +238,6 @@ std::string_view ToxClient::getGroupPeerName(uint32_t group_number, uint32_t pee
|
||||
}
|
||||
}
|
||||
|
||||
TOX_CONNECTION ToxClient::getGroupPeerConnectionStatus(uint32_t group_number, uint32_t peer_number) const {
|
||||
return tox_group_peer_get_connection_status(_tox, group_number, peer_number, nullptr);
|
||||
}
|
||||
|
||||
void ToxClient::onToxSelfConnectionStatus(TOX_CONNECTION connection_status) {
|
||||
std::cout << "TCL self status: ";
|
||||
switch (connection_status) {
|
||||
@ -273,14 +269,14 @@ void ToxClient::onToxGroupPeerName(uint32_t group_number, uint32_t peer_id, std:
|
||||
_groups[group_number][peer_id].name = name;
|
||||
}
|
||||
|
||||
//void ToxClient::onToxGroupPeerConnection(uint32_t group_number, uint32_t peer_id, TOX_CONNECTION connection_status) {
|
||||
//std::cout << "TCL peer " << group_number << ":" << peer_id << " status: ";
|
||||
//switch (connection_status) {
|
||||
//case TOX_CONNECTION::TOX_CONNECTION_NONE: std::cout << "offline\n"; break;
|
||||
//case TOX_CONNECTION::TOX_CONNECTION_TCP: std::cout << "TCP-relayed\n"; break;
|
||||
//case TOX_CONNECTION::TOX_CONNECTION_UDP: std::cout << "UDP-direct\n"; break;
|
||||
//}
|
||||
//}
|
||||
void ToxClient::onToxGroupPeerConnection(uint32_t group_number, uint32_t peer_id, TOX_CONNECTION connection_status) {
|
||||
std::cout << "TCL peer " << group_number << ":" << peer_id << " status: ";
|
||||
switch (connection_status) {
|
||||
case TOX_CONNECTION::TOX_CONNECTION_NONE: std::cout << "offline\n"; break;
|
||||
case TOX_CONNECTION::TOX_CONNECTION_TCP: std::cout << "TCP-relayed\n"; break;
|
||||
case TOX_CONNECTION::TOX_CONNECTION_UDP: std::cout << "UDP-direct\n"; break;
|
||||
}
|
||||
}
|
||||
|
||||
void ToxClient::onToxGroupCustomPacket(uint32_t group_number, uint32_t peer_id, const uint8_t *data, size_t length) {
|
||||
// TODO: signal private?
|
||||
@ -314,7 +310,7 @@ void ToxClient::onToxGroupPeerJoin(uint32_t group_number, uint32_t peer_id) {
|
||||
tmp_name.push_back('\0'); // make sure its null terminated
|
||||
|
||||
_groups[group_number][peer_id] = {
|
||||
//tox_group_peer_get_connection_status(_tox, group_number, peer_id, nullptr),
|
||||
tox_group_peer_get_connection_status(_tox, group_number, peer_id, nullptr),
|
||||
reinterpret_cast<const char*>(tmp_name.data())
|
||||
};
|
||||
|
||||
@ -329,7 +325,6 @@ void ToxClient::onToxGroupPeerExit(uint32_t group_number, uint32_t peer_id, Tox_
|
||||
|
||||
void ToxClient::onToxGroupSelfJoin(uint32_t group_number) {
|
||||
std::cout << "TCL group self join " << group_number << "\n";
|
||||
tox_group_self_set_name(_tox, group_number, reinterpret_cast<const uint8_t*>(_self_name.data()), _self_name.size(), nullptr);
|
||||
// ???
|
||||
// can be triggered after other peers already joined o.o
|
||||
_tox_profile_dirty = true;
|
||||
|
@ -34,7 +34,6 @@ struct ToxClient {
|
||||
std::string getOwnAddress(void) const;
|
||||
|
||||
std::string_view getGroupPeerName(uint32_t group_number, uint32_t peer_number) const;
|
||||
TOX_CONNECTION getGroupPeerConnectionStatus(uint32_t group_number, uint32_t peer_number) const;
|
||||
|
||||
template<typename FN>
|
||||
void forEachGroup(FN&& fn) const {
|
||||
@ -47,7 +46,8 @@ struct ToxClient {
|
||||
void forEachGroupPeer(uint32_t group_number, FN&& fn) const {
|
||||
if (_groups.count(group_number)) {
|
||||
for (const auto& [peer_number, peer] : _groups.at(group_number)) {
|
||||
fn(peer_number);
|
||||
const auto& [connection_status, name] = peer;
|
||||
fn(peer_number, connection_status);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -56,7 +56,7 @@ struct ToxClient {
|
||||
void onToxSelfConnectionStatus(TOX_CONNECTION connection_status);
|
||||
void onToxFriendRequest(const uint8_t* public_key, std::string_view message);
|
||||
void onToxGroupPeerName(uint32_t group_number, uint32_t peer_id, std::string_view name);
|
||||
//void onToxGroupPeerConnection(uint32_t group_number, uint32_t peer_id, TOX_CONNECTION connection_status);
|
||||
void onToxGroupPeerConnection(uint32_t group_number, uint32_t peer_id, TOX_CONNECTION connection_status);
|
||||
void onToxGroupCustomPacket(uint32_t group_number, uint32_t peer_id, const uint8_t *data, size_t length);
|
||||
void onToxGroupCustomPrivatePacket(uint32_t group_number, uint32_t peer_id, const uint8_t *data, size_t length);
|
||||
void onToxGroupInvite(uint32_t friend_number, const uint8_t* invite_data, size_t invite_length, std::string_view group_name);
|
||||
@ -90,10 +90,12 @@ struct ToxClient {
|
||||
|
||||
std::unique_ptr<StateI> _state;
|
||||
|
||||
// key groupid, value set of peer ids
|
||||
//std::map<uint32_t, std::set<uint32_t>> _groups;
|
||||
struct Peer {
|
||||
Tox_Connection connection_status {Tox_Connection::TOX_CONNECTION_NONE};
|
||||
std::string name;
|
||||
};
|
||||
// key groupid, key peerid
|
||||
std::map<uint32_t, std::map<uint32_t, Peer>> _groups;
|
||||
};
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user