Compare commits
No commits in common. "1f0e086b7aec513b3a3b13cc25b05db4f4edaced" and "e89f1be660396fff0646ba17e5166cb35d44e00d" have entirely different histories.
1f0e086b7a
...
e89f1be660
2
external/mio/single_include/mio/mio.hpp
vendored
2
external/mio/single_include/mio/mio.hpp
vendored
@ -794,7 +794,7 @@ inline DWORD int64_low(int64_t n) noexcept
|
|||||||
return n & 0xffffffff;
|
return n & 0xffffffff;
|
||||||
}
|
}
|
||||||
|
|
||||||
inline std::wstring s_2_ws(const std::string& s)
|
std::wstring s_2_ws(const std::string& s)
|
||||||
{
|
{
|
||||||
if (s.empty())
|
if (s.empty())
|
||||||
return{};
|
return{};
|
||||||
|
2
external/tox_ngc_ext/tox_ngc_ext
vendored
2
external/tox_ngc_ext/tox_ngc_ext
vendored
@ -1 +1 @@
|
|||||||
Subproject commit fc13d93d42d6e4e0a4a5b7be3ff18672e68e4600
|
Subproject commit c9479153892ff3aee0e65d33cc61aff8df6aa7bc
|
2
external/tox_ngc_ft1/tox_ngc_ft1
vendored
2
external/tox_ngc_ft1/tox_ngc_ft1
vendored
@ -1 +1 @@
|
|||||||
Subproject commit a4497390508cab73ac62a8d36aac3d018f10df49
|
Subproject commit 04befb21be0327996eb3597cb91d5bdd00066974
|
3
external/toxcore/toxcore.cmake
vendored
3
external/toxcore/toxcore.cmake
vendored
@ -127,8 +127,7 @@ target_include_directories(toxcore PRIVATE "${TOX_DIR}toxcore")
|
|||||||
target_include_directories(toxcore PUBLIC "${TOX_DIR}")
|
target_include_directories(toxcore PUBLIC "${TOX_DIR}")
|
||||||
|
|
||||||
target_compile_definitions(toxcore PUBLIC USE_IPV6=1)
|
target_compile_definitions(toxcore PUBLIC USE_IPV6=1)
|
||||||
#target_compile_definitions(toxcore PUBLIC MIN_LOGGER_LEVEL=LOGGER_LEVEL_DEBUG)
|
target_compile_definitions(toxcore PUBLIC MIN_LOGGER_LEVEL=LOGGER_LEVEL_DEBUG)
|
||||||
target_compile_definitions(toxcore PUBLIC MIN_LOGGER_LEVEL=LOGGER_LEVEL_INFO)
|
|
||||||
|
|
||||||
find_package(unofficial-sodium CONFIG QUIET)
|
find_package(unofficial-sodium CONFIG QUIET)
|
||||||
find_package(sodium QUIET)
|
find_package(sodium QUIET)
|
||||||
|
@ -27,7 +27,7 @@ SendStartSHA1::SendStartSHA1(ToxClient& tcl, const CommandLine& cl) : StateI(tcl
|
|||||||
assert(!_file_map.empty());
|
assert(!_file_map.empty());
|
||||||
|
|
||||||
// build info
|
// build info
|
||||||
_sha1_info.file_name = std::filesystem::path(cl.send_path).filename().string();
|
_sha1_info.file_name = std::filesystem::path(cl.send_path).filename();
|
||||||
_sha1_info.file_size = _file_map.length();
|
_sha1_info.file_size = _file_map.length();
|
||||||
|
|
||||||
{ // build chunks
|
{ // build chunks
|
||||||
|
@ -82,24 +82,11 @@ bool SHA1::iterate(float delta) {
|
|||||||
// if we have not heard for 10sec, timeout
|
// if we have not heard for 10sec, timeout
|
||||||
if (time_since_remove_activity >= 10.f) {
|
if (time_since_remove_activity >= 10.f) {
|
||||||
std::cerr << "SHA1 receiving chunk tansfer timed out " << std::get<0>(*it) << ":" << std::get<1>(*it) << "." << int(std::get<2>(*it)) << "\n";
|
std::cerr << "SHA1 receiving chunk tansfer timed out " << std::get<0>(*it) << ":" << std::get<1>(*it) << "." << int(std::get<2>(*it)) << "\n";
|
||||||
_chunk_want_queue.push_back(std::get<4>(*it)); // put it back
|
|
||||||
it = _transfers_receiving_chunk.erase(it);
|
it = _transfers_receiving_chunk.erase(it);
|
||||||
} else {
|
} else {
|
||||||
it++;
|
it++;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// sent requests
|
|
||||||
for (auto it = _chunks_requested.begin(); it != _chunks_requested.end();) {
|
|
||||||
it->second += delta;
|
|
||||||
|
|
||||||
// if we have not heard for 15sec, timeout
|
|
||||||
if (it->second >= 15.f) {
|
|
||||||
_chunk_want_queue.push_back(it->first); // put it back
|
|
||||||
it = _chunks_requested.erase(it);
|
|
||||||
} else {
|
|
||||||
it++;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// if we have not reached the total cap for transfers
|
// if we have not reached the total cap for transfers
|
||||||
@ -155,7 +142,7 @@ bool SHA1::iterate(float delta) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!_have_all && !_chunk_want_queue.empty() && _chunks_requested.size() + _transfers_receiving_chunk.size() < _max_concurrent_in) {
|
if (!_have_all && !_chunk_want_queue.empty() && _transfers_receiving_chunk.size() < _max_concurrent_in) {
|
||||||
// send out request, no burst tho
|
// send out request, no burst tho
|
||||||
std::vector<std::pair<uint32_t, uint32_t>> target_peers;
|
std::vector<std::pair<uint32_t, uint32_t>> target_peers;
|
||||||
_tcl.forEachGroup([&target_peers, this](uint32_t group_number) {
|
_tcl.forEachGroup([&target_peers, this](uint32_t group_number) {
|
||||||
@ -175,7 +162,7 @@ bool SHA1::iterate(float delta) {
|
|||||||
auto [group_number, peer_number] = target_peers.at(target_index);
|
auto [group_number, peer_number] = target_peers.at(target_index);
|
||||||
|
|
||||||
size_t chunk_index = _chunk_want_queue.front();
|
size_t chunk_index = _chunk_want_queue.front();
|
||||||
_chunks_requested[chunk_index] = 0.f;
|
_chunks_requested.emplace(chunk_index);
|
||||||
_chunk_want_queue.pop_front();
|
_chunk_want_queue.pop_front();
|
||||||
|
|
||||||
_tcl.sendFT1RequestPrivate(group_number, peer_number, NGC_FT1_file_kind::HASH_SHA1_CHUNK, _sha1_info.chunks[chunk_index].data.data(), 20);
|
_tcl.sendFT1RequestPrivate(group_number, peer_number, NGC_FT1_file_kind::HASH_SHA1_CHUNK, _sha1_info.chunks[chunk_index].data.data(), 20);
|
||||||
@ -314,9 +301,6 @@ bool SHA1::onFT1ReceiveInitSHA1Chunk(uint32_t group_number, uint32_t peer_number
|
|||||||
)
|
)
|
||||||
);
|
);
|
||||||
|
|
||||||
// remove form requests
|
|
||||||
_chunks_requested.erase(chunk_index);
|
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -7,7 +7,7 @@
|
|||||||
#include <mio/mio.hpp>
|
#include <mio/mio.hpp>
|
||||||
|
|
||||||
#include <unordered_map>
|
#include <unordered_map>
|
||||||
#include <map>
|
#include <set>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
#include <deque>
|
#include <deque>
|
||||||
#include <random>
|
#include <random>
|
||||||
@ -66,11 +66,10 @@ struct SHA1 final : public StateI {
|
|||||||
bool _have_all {false};
|
bool _have_all {false};
|
||||||
size_t _have_count {0};
|
size_t _have_count {0};
|
||||||
std::deque<size_t> _chunk_want_queue;
|
std::deque<size_t> _chunk_want_queue;
|
||||||
// chunk_index -> time since request
|
std::set<size_t> _chunks_requested;
|
||||||
std::map<size_t, float> _chunks_requested;
|
|
||||||
|
|
||||||
const size_t _max_concurrent_out {4};
|
const size_t _max_concurrent_out {4};
|
||||||
const size_t _max_concurrent_in {16};
|
const size_t _max_concurrent_in {4};
|
||||||
|
|
||||||
std::minstd_rand _rng {1337};
|
std::minstd_rand _rng {1337};
|
||||||
std::uniform_int_distribution<size_t> _distrib;
|
std::uniform_int_distribution<size_t> _distrib;
|
||||||
|
@ -253,7 +253,6 @@ void ToxClient::onToxGroupPeerExit(uint32_t group_number, uint32_t peer_id, Tox_
|
|||||||
void ToxClient::onToxGroupSelfJoin(uint32_t group_number) {
|
void ToxClient::onToxGroupSelfJoin(uint32_t group_number) {
|
||||||
std::cout << "TCL group self join " << group_number << "\n";
|
std::cout << "TCL group self join " << group_number << "\n";
|
||||||
// ???
|
// ???
|
||||||
// can be triggered after other peers allready joined o.o
|
|
||||||
_tox_profile_dirty = true;
|
_tox_profile_dirty = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user