From 07099e4832317ac541a1d79d4fb6e77ecacadfbb Mon Sep 17 00:00:00 2001 From: Green Sky Date: Sun, 4 Aug 2024 10:14:59 +0200 Subject: [PATCH] tag chunkpicker for update more often --- solanaceae/ngc_ft1_sha1/sha1_ngcft1.cpp | 62 ++++++++++++++----------- 1 file changed, 34 insertions(+), 28 deletions(-) diff --git a/solanaceae/ngc_ft1_sha1/sha1_ngcft1.cpp b/solanaceae/ngc_ft1_sha1/sha1_ngcft1.cpp index 00891b2..6970d96 100644 --- a/solanaceae/ngc_ft1_sha1/sha1_ngcft1.cpp +++ b/solanaceae/ngc_ft1_sha1/sha1_ngcft1.cpp @@ -1372,7 +1372,7 @@ bool SHA1_NGCFT1::onEvent(const Events::NGCEXT_ft1_have& e) { // we might not know yet if (addParticipation(c, o)) { // something happend, update chunk picker - c.emplace_or_replace(); + //c.emplace_or_replace(); } auto& remote_have = o.get_or_emplace().others; @@ -1381,42 +1381,50 @@ bool SHA1_NGCFT1::onEvent(const Events::NGCEXT_ft1_have& e) { remote_have.emplace(c, Components::RemoteHaveBitset::Entry{false, num_total_chunks}); // new have? nice - // (always update on biset, not always on have) - c.emplace_or_replace(); + //c.emplace_or_replace(); } auto& remote_have_peer = remote_have.at(c); - if (!remote_have_peer.have_all) { - assert(remote_have_peer.have.size_bits() >= num_total_chunks); + if (remote_have_peer.have_all) { + return true; // peer somehow already had all, ignoring + } - for (const auto c_i : e.chunks) { - if (c_i >= num_total_chunks) { - std::cerr << "SHA1_NGCFT1 error: remote sent have with out-of-range chunk index!!!\n"; - std::cerr << info_hash << ": " << c_i << " >= " << num_total_chunks << "\n"; - continue; - } + assert(remote_have_peer.have.size_bits() >= num_total_chunks); - assert(c_i < num_total_chunks); - remote_have_peer.have.set(c_i); + bool a_valid_change {false}; + for (const auto c_i : e.chunks) { + if (c_i >= num_total_chunks) { + std::cerr << "SHA1_NGCFT1 error: remote sent have with out-of-range chunk index!!!\n"; + std::cerr << info_hash << ": " << c_i << " >= " << num_total_chunks << "\n"; + continue; } - // check for completion? - // TODO: optimize - bool test_all {true}; - for (size_t i = 0; i < remote_have_peer.have.size_bits(); i++) { - if (!remote_have_peer.have[i]) { - test_all = false; - break; - } - } + assert(c_i < num_total_chunks); + remote_have_peer.have.set(c_i); + a_valid_change = true; + } - if (test_all) { - // optimize - remote_have_peer.have_all = true; - remote_have_peer.have = BitSet{}; + if (a_valid_change) { + // new have? nice + c.emplace_or_replace(); + } + + // check for completion? + // TODO: optimize + bool test_all {true}; + for (size_t i = 0; i < remote_have_peer.have.size_bits(); i++) { + if (!remote_have_peer.have[i]) { + test_all = false; + break; } } + if (test_all) { + // optimize + remote_have_peer.have_all = true; + remote_have_peer.have = BitSet{}; + } + return true; } @@ -1491,7 +1499,6 @@ bool SHA1_NGCFT1::onEvent(const Events::NGCEXT_ft1_bitset& e) { } // new have? nice - // (always update on bitset, not always on have) c.emplace_or_replace(); return true; @@ -1530,7 +1537,6 @@ bool SHA1_NGCFT1::onEvent(const Events::NGCEXT_ft1_have_all& e) { remote_have[c] = Components::RemoteHaveBitset::Entry{true, {}}; // new have? nice - // (always update on have_all, not always on have) c.emplace_or_replace(); return true;