2024-07-02 15:52:25 +02:00
|
|
|
#include "./chunk_picker.hpp"
|
|
|
|
|
2024-07-03 12:11:20 +02:00
|
|
|
#include <solanaceae/tox_contacts/components.hpp>
|
2024-07-24 17:55:31 +02:00
|
|
|
#include "./contact_components.hpp"
|
2024-07-02 15:52:25 +02:00
|
|
|
|
2024-07-24 17:55:31 +02:00
|
|
|
#include <solanaceae/object_store/meta_components_file.hpp>
|
2024-07-02 15:52:25 +02:00
|
|
|
#include "./components.hpp"
|
|
|
|
|
|
|
|
#include <algorithm>
|
|
|
|
|
|
|
|
#include <iostream>
|
|
|
|
|
2024-07-07 16:49:31 +02:00
|
|
|
// TODO: move ps to own file
|
|
|
|
// picker strategies are generators
|
|
|
|
// gen returns true if a valid chunk was picked
|
|
|
|
// ps should be light weight and no persistant state
|
|
|
|
// ps produce an index only once
|
|
|
|
|
|
|
|
// simply scans from the beginning, requesting chunks in that order
|
2024-07-09 11:40:01 +02:00
|
|
|
struct PickerStrategySequential {
|
2024-07-07 16:49:31 +02:00
|
|
|
const BitSet& chunk_candidates;
|
|
|
|
const size_t total_chunks;
|
|
|
|
|
|
|
|
size_t i {0u};
|
|
|
|
|
2024-07-09 11:40:01 +02:00
|
|
|
PickerStrategySequential(
|
2024-07-07 16:49:31 +02:00
|
|
|
const BitSet& chunk_candidates_,
|
2024-07-10 11:26:47 +02:00
|
|
|
const size_t total_chunks_,
|
|
|
|
const size_t start_offset_ = 0u
|
2024-07-07 16:49:31 +02:00
|
|
|
) :
|
|
|
|
chunk_candidates(chunk_candidates_),
|
2024-07-10 11:26:47 +02:00
|
|
|
total_chunks(total_chunks_),
|
|
|
|
i(start_offset_)
|
2024-07-07 16:49:31 +02:00
|
|
|
{}
|
|
|
|
|
|
|
|
|
|
|
|
bool gen(size_t& out_chunk_idx) {
|
|
|
|
for (; i < total_chunks && i < chunk_candidates.size_bits(); i++) {
|
|
|
|
if (chunk_candidates[i]) {
|
|
|
|
out_chunk_idx = i;
|
|
|
|
i++;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
// chooses a random start position and then requests linearly from there
|
|
|
|
struct PickerStrategyRandom {
|
|
|
|
const BitSet& chunk_candidates;
|
|
|
|
const size_t total_chunks;
|
2024-07-10 10:41:25 +02:00
|
|
|
std::minstd_rand& rng;
|
2024-07-07 16:49:31 +02:00
|
|
|
|
|
|
|
size_t count {0u};
|
|
|
|
size_t i {rng()%total_chunks};
|
|
|
|
|
|
|
|
PickerStrategyRandom(
|
|
|
|
const BitSet& chunk_candidates_,
|
|
|
|
const size_t total_chunks_,
|
2024-07-10 10:41:25 +02:00
|
|
|
std::minstd_rand& rng_
|
2024-07-07 16:49:31 +02:00
|
|
|
) :
|
|
|
|
chunk_candidates(chunk_candidates_),
|
|
|
|
total_chunks(total_chunks_),
|
|
|
|
rng(rng_)
|
|
|
|
{}
|
|
|
|
|
|
|
|
bool gen(size_t& out_chunk_idx) {
|
|
|
|
for (; count < total_chunks; count++, i++) {
|
|
|
|
// wrap around
|
|
|
|
if (i >= total_chunks) {
|
|
|
|
i = i%total_chunks;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (chunk_candidates[i]) {
|
|
|
|
out_chunk_idx = i;
|
|
|
|
count++;
|
|
|
|
i++;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2024-07-09 11:40:01 +02:00
|
|
|
// switches randomly between random and sequential
|
|
|
|
struct PickerStrategyRandomSequential {
|
2024-07-07 16:49:31 +02:00
|
|
|
PickerStrategyRandom psr;
|
2024-07-09 11:40:01 +02:00
|
|
|
PickerStrategySequential pssf;
|
2024-07-07 16:49:31 +02:00
|
|
|
|
2024-07-09 11:40:01 +02:00
|
|
|
// TODO: configurable
|
2024-07-07 16:49:31 +02:00
|
|
|
std::bernoulli_distribution d{0.5f};
|
|
|
|
|
2024-07-09 11:40:01 +02:00
|
|
|
PickerStrategyRandomSequential(
|
2024-07-07 16:49:31 +02:00
|
|
|
const BitSet& chunk_candidates_,
|
|
|
|
const size_t total_chunks_,
|
2024-07-10 11:26:47 +02:00
|
|
|
std::minstd_rand& rng_,
|
|
|
|
const size_t start_offset_ = 0u
|
2024-07-07 16:49:31 +02:00
|
|
|
) :
|
|
|
|
psr(chunk_candidates_, total_chunks_, rng_),
|
2024-07-10 11:26:47 +02:00
|
|
|
pssf(chunk_candidates_, total_chunks_, start_offset_)
|
2024-07-07 16:49:31 +02:00
|
|
|
{}
|
|
|
|
|
|
|
|
bool gen(size_t& out_chunk_idx) {
|
|
|
|
if (d(psr.rng)) {
|
|
|
|
return psr.gen(out_chunk_idx);
|
|
|
|
} else {
|
|
|
|
return pssf.gen(out_chunk_idx);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
};
|
2024-07-03 12:11:20 +02:00
|
|
|
|
2024-07-12 14:42:13 +02:00
|
|
|
// TODO: return bytes instead, so it can be done chunk size independent
|
|
|
|
static constexpr size_t flowWindowToRequestCount(size_t flow_window) {
|
|
|
|
// based on 500KiB/s with ~0.05s delay looks fine
|
|
|
|
// increase to 4 at wnd >= 25*1024
|
|
|
|
if (flow_window >= 25*1024) {
|
|
|
|
return 4u;
|
|
|
|
}
|
|
|
|
return 3u;
|
|
|
|
}
|
|
|
|
|
2024-07-03 12:11:20 +02:00
|
|
|
void ChunkPicker::updateParticipation(
|
|
|
|
Contact3Handle c,
|
|
|
|
ObjectRegistry& objreg
|
|
|
|
) {
|
2024-07-08 18:12:47 +02:00
|
|
|
if (!c.all_of<Contact::Components::FT1Participation>()) {
|
|
|
|
participating_unfinished.clear();
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
entt::dense_set<Object> checked;
|
|
|
|
for (const Object ov : c.get<Contact::Components::FT1Participation>().participating) {
|
2024-11-22 13:51:48 +01:00
|
|
|
using Priority = ObjComp::Ephemeral::File::DownloadPriority::Priority;
|
2024-07-08 18:12:47 +02:00
|
|
|
const ObjectHandle o {objreg, ov};
|
|
|
|
|
|
|
|
if (participating_unfinished.contains(o)) {
|
|
|
|
if (!o.all_of<Components::FT1ChunkSHA1Cache, Components::FT1InfoSHA1>()) {
|
|
|
|
participating_unfinished.erase(o);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2024-07-24 17:55:31 +02:00
|
|
|
if (o.all_of<ObjComp::Ephemeral::File::TagTransferPaused>()) {
|
2024-07-08 18:12:47 +02:00
|
|
|
participating_unfinished.erase(o);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2024-07-24 17:55:31 +02:00
|
|
|
if (o.all_of<ObjComp::F::TagLocalHaveAll>()) {
|
2024-07-08 18:12:47 +02:00
|
|
|
participating_unfinished.erase(o);
|
2024-07-24 17:55:31 +02:00
|
|
|
continue;
|
2024-07-08 18:12:47 +02:00
|
|
|
}
|
2024-11-22 13:51:48 +01:00
|
|
|
|
|
|
|
// TODO: optimize this to only change on dirty, or something
|
|
|
|
if (o.all_of<ObjComp::Ephemeral::File::DownloadPriority>()) {
|
|
|
|
Priority prio = o.get<ObjComp::Ephemeral::File::DownloadPriority>().p;
|
|
|
|
|
|
|
|
uint16_t pskips =
|
|
|
|
prio == Priority::HIGHEST ? 0u :
|
|
|
|
prio == Priority::HIGH ? 1u :
|
|
|
|
prio == Priority::NORMAL ? 2u :
|
|
|
|
prio == Priority::LOW ? 4u :
|
|
|
|
8u // LOWEST
|
|
|
|
;
|
|
|
|
|
|
|
|
participating_unfinished.at(o).should_skip = pskips;
|
|
|
|
}
|
2024-07-08 18:12:47 +02:00
|
|
|
} else {
|
|
|
|
if (!o.all_of<Components::FT1ChunkSHA1Cache, Components::FT1InfoSHA1>()) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2024-07-24 17:55:31 +02:00
|
|
|
if (o.all_of<ObjComp::Ephemeral::File::TagTransferPaused>()) {
|
2024-07-08 18:12:47 +02:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2024-07-24 17:55:31 +02:00
|
|
|
if (!o.all_of<ObjComp::F::TagLocalHaveAll>()) {
|
2024-07-10 11:13:57 +02:00
|
|
|
Priority prio = Priority::NORMAL;
|
|
|
|
|
2024-07-24 17:55:31 +02:00
|
|
|
if (o.all_of<ObjComp::Ephemeral::File::DownloadPriority>()) {
|
|
|
|
prio = o.get<ObjComp::Ephemeral::File::DownloadPriority>().p;
|
2024-07-10 11:13:57 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
uint16_t pskips =
|
2024-07-24 17:55:31 +02:00
|
|
|
prio == Priority::HIGHEST ? 0u :
|
2024-07-10 11:13:57 +02:00
|
|
|
prio == Priority::HIGH ? 1u :
|
|
|
|
prio == Priority::NORMAL ? 2u :
|
|
|
|
prio == Priority::LOW ? 4u :
|
2024-07-24 17:55:31 +02:00
|
|
|
8u // LOWEST
|
2024-07-10 11:13:57 +02:00
|
|
|
;
|
|
|
|
|
|
|
|
participating_unfinished.emplace(o, ParticipationEntry{pskips});
|
2024-07-08 18:12:47 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
checked.emplace(o);
|
|
|
|
}
|
|
|
|
|
|
|
|
// now we still need to remove left over unfinished.
|
|
|
|
// TODO: how did they get left over
|
|
|
|
entt::dense_set<Object> to_remove;
|
|
|
|
for (const auto& [o, _] : participating_unfinished) {
|
|
|
|
if (!checked.contains(o)) {
|
|
|
|
std::cerr << "unfinished contained non participating\n";
|
|
|
|
to_remove.emplace(o);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for (const auto& o : to_remove) {
|
|
|
|
participating_unfinished.erase(o);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-07-02 15:52:25 +02:00
|
|
|
std::vector<ChunkPicker::ContentChunkR> ChunkPicker::updateChunkRequests(
|
|
|
|
Contact3Handle c,
|
|
|
|
ObjectRegistry& objreg,
|
2024-07-13 12:36:49 +02:00
|
|
|
const ReceivingTransfers& rt,
|
2024-07-07 12:44:17 +02:00
|
|
|
const size_t open_requests
|
2024-07-12 14:42:13 +02:00
|
|
|
//const size_t flow_window
|
2024-07-02 15:52:25 +02:00
|
|
|
//NGCFT1& nft
|
|
|
|
) {
|
2024-07-03 12:11:20 +02:00
|
|
|
if (!static_cast<bool>(c)) {
|
|
|
|
assert(false); return {};
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!c.all_of<Contact::Components::ToxGroupPeerEphemeral>()) {
|
|
|
|
assert(false); return {};
|
|
|
|
}
|
|
|
|
const auto [group_number, peer_number] = c.get<Contact::Components::ToxGroupPeerEphemeral>();
|
|
|
|
|
2024-07-08 18:12:47 +02:00
|
|
|
updateParticipation(c, objreg);
|
|
|
|
|
2024-07-07 12:44:17 +02:00
|
|
|
if (participating_unfinished.empty()) {
|
|
|
|
participating_in_last = entt::null;
|
|
|
|
return {};
|
|
|
|
}
|
|
|
|
|
2024-07-02 15:52:25 +02:00
|
|
|
std::vector<ContentChunkR> req_ret;
|
|
|
|
|
|
|
|
// count running tf and open requests
|
2024-07-03 12:11:20 +02:00
|
|
|
const size_t num_ongoing_transfers = rt.sizePeer(group_number, peer_number);
|
|
|
|
// TODO: account for open requests
|
2024-07-07 12:44:17 +02:00
|
|
|
const int64_t num_total = num_ongoing_transfers + open_requests;
|
2024-07-12 14:42:13 +02:00
|
|
|
|
2024-07-03 12:11:20 +02:00
|
|
|
// TODO: base max on rate(chunks per sec), gonna be ass with variable chunk size
|
2024-07-12 14:42:13 +02:00
|
|
|
//const size_t num_max = std::max(max_tf_chunk_requests, flowWindowToRequestCount(flow_window));
|
|
|
|
const size_t num_max = max_tf_chunk_requests;
|
|
|
|
|
|
|
|
const size_t num_requests = std::max<int64_t>(0, int64_t(num_max)-num_total);
|
2024-07-07 12:44:17 +02:00
|
|
|
std::cerr << "CP: want " << num_requests << "(rt:" << num_ongoing_transfers << " or:" << open_requests << ") from " << group_number << ":" << peer_number << "\n";
|
2024-07-02 15:52:25 +02:00
|
|
|
|
|
|
|
// while n < X
|
2024-07-03 12:11:20 +02:00
|
|
|
|
|
|
|
// round robin content (remember last obj)
|
|
|
|
if (!objreg.valid(participating_in_last) || !participating_unfinished.count(participating_in_last)) {
|
|
|
|
participating_in_last = participating_unfinished.begin()->first;
|
|
|
|
}
|
|
|
|
assert(objreg.valid(participating_in_last));
|
|
|
|
|
|
|
|
auto it = participating_unfinished.find(participating_in_last);
|
2024-07-07 15:37:49 +02:00
|
|
|
// hard limit robin rounds to array size time 20
|
2024-11-22 13:51:48 +01:00
|
|
|
for (size_t i = 0; req_ret.size() < num_requests && i < participating_unfinished.size()*20; i++, it++) {
|
2024-07-03 12:11:20 +02:00
|
|
|
if (it == participating_unfinished.end()) {
|
|
|
|
it = participating_unfinished.begin();
|
2024-07-02 15:52:25 +02:00
|
|
|
}
|
|
|
|
|
2024-07-03 12:11:20 +02:00
|
|
|
if (it->second.skips < it->second.should_skip) {
|
|
|
|
it->second.skips++;
|
|
|
|
continue;
|
|
|
|
}
|
2024-11-22 13:51:48 +01:00
|
|
|
it->second.skips = 0;
|
2024-07-03 12:11:20 +02:00
|
|
|
|
|
|
|
ObjectHandle o {objreg, it->first};
|
|
|
|
|
|
|
|
// intersect self have with other have
|
2024-07-24 17:55:31 +02:00
|
|
|
if (!o.all_of<Components::RemoteHaveBitset, Components::FT1ChunkSHA1Cache, Components::FT1InfoSHA1>()) {
|
2024-07-14 19:47:22 +02:00
|
|
|
// rare case where no one else has anything
|
2024-07-03 12:11:20 +02:00
|
|
|
continue;
|
|
|
|
}
|
2024-07-02 15:52:25 +02:00
|
|
|
|
2024-07-24 17:55:31 +02:00
|
|
|
if (o.all_of<ObjComp::F::TagLocalHaveAll>()) {
|
2024-07-03 12:11:20 +02:00
|
|
|
std::cerr << "ChunkPicker error: completed content still in participating_unfinished!\n";
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2024-07-24 17:55:31 +02:00
|
|
|
//const auto& cc = o.get<Components::FT1ChunkSHA1Cache>();
|
|
|
|
|
|
|
|
const auto& others_have = o.get<Components::RemoteHaveBitset>().others;
|
2024-07-03 12:11:20 +02:00
|
|
|
auto other_it = others_have.find(c);
|
|
|
|
if (other_it == others_have.end()) {
|
|
|
|
// rare case where the other is participating but has nothing
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
const auto& other_have = other_it->second;
|
|
|
|
|
2024-07-24 17:55:31 +02:00
|
|
|
const auto& info = o.get<Components::FT1InfoSHA1>();
|
|
|
|
const auto total_chunks = info.chunks.size();
|
|
|
|
|
|
|
|
const auto* lhb = o.try_get<ObjComp::F::LocalHaveBitset>();
|
|
|
|
|
|
|
|
// if we dont have anything, this might not exist yet
|
2024-07-25 14:57:27 +02:00
|
|
|
BitSet chunk_candidates = lhb == nullptr ? BitSet{total_chunks} : (lhb->have.size_bits() >= total_chunks ? lhb->have : BitSet{total_chunks});
|
2024-07-24 17:55:31 +02:00
|
|
|
|
2024-07-03 12:11:20 +02:00
|
|
|
if (!other_have.have_all) {
|
|
|
|
// AND is the same as ~(~A | ~B)
|
|
|
|
// that means we leave chunk_candidates as (have is inverted want)
|
|
|
|
// merge is or
|
|
|
|
// invert at the end
|
|
|
|
chunk_candidates
|
|
|
|
.merge(other_have.have.invert())
|
|
|
|
.invert();
|
|
|
|
// TODO: add intersect for more perf
|
|
|
|
} else {
|
|
|
|
chunk_candidates.invert();
|
|
|
|
}
|
|
|
|
auto& requested_chunks = o.get_or_emplace<Components::FT1ChunkSHA1Requested>().chunks;
|
|
|
|
|
|
|
|
// TODO: trim off round up to 8, since they are now always set
|
|
|
|
|
|
|
|
// now select (globaly) unrequested other have
|
2024-07-09 11:40:01 +02:00
|
|
|
// TODO: how do we prioritize within a file?
|
|
|
|
// - sequential (walk from start (or readhead?))
|
2024-07-03 12:11:20 +02:00
|
|
|
// - random (choose random start pos and walk)
|
2024-07-09 11:40:01 +02:00
|
|
|
// - random/sequential (randomly choose between the 2)
|
2024-07-03 12:11:20 +02:00
|
|
|
// - rarest (keep track of rarity and sort by that)
|
2024-07-09 11:40:01 +02:00
|
|
|
// - steaming (use readhead to determain time critical chunks, potentially over requesting, first (relative to stream head) otherwise
|
2024-07-03 12:11:20 +02:00
|
|
|
// maybe look into libtorrens deadline stuff
|
|
|
|
// - arbitrary priority maps/functions (and combine with above in rations)
|
|
|
|
|
2024-07-09 11:40:01 +02:00
|
|
|
// TODO: configurable
|
2024-07-10 11:26:47 +02:00
|
|
|
size_t start_offset {0u};
|
2024-07-24 17:55:31 +02:00
|
|
|
if (o.all_of<ObjComp::Ephemeral::File::ReadHeadHint>()) {
|
|
|
|
const auto byte_offset = o.get<ObjComp::Ephemeral::File::ReadHeadHint>().offset_into_file;
|
2024-07-10 11:26:47 +02:00
|
|
|
if (byte_offset <= info.file_size) {
|
2024-07-14 19:47:22 +02:00
|
|
|
start_offset = byte_offset/info.chunk_size;
|
2024-07-10 11:26:47 +02:00
|
|
|
} else {
|
|
|
|
// error?
|
|
|
|
}
|
|
|
|
}
|
|
|
|
//PickerStrategySequential ps(chunk_candidates, total_chunks, start_offset);
|
2024-07-07 16:49:31 +02:00
|
|
|
//PickerStrategyRandom ps(chunk_candidates, total_chunks, _rng);
|
2024-07-10 11:26:47 +02:00
|
|
|
PickerStrategyRandomSequential ps(chunk_candidates, total_chunks, _rng, start_offset);
|
2024-07-07 16:49:31 +02:00
|
|
|
size_t out_chunk_idx {0};
|
2024-08-02 13:06:55 +02:00
|
|
|
size_t req_from_this_o {0};
|
|
|
|
while (ps.gen(out_chunk_idx) && req_ret.size() < num_requests && req_from_this_o < std::max<size_t>(total_chunks/3, 1)) {
|
2024-07-07 16:49:31 +02:00
|
|
|
// out_chunk_idx is a potential candidate we can request form peer
|
2024-07-02 15:52:25 +02:00
|
|
|
|
2024-07-07 12:44:17 +02:00
|
|
|
// - check against double requests
|
|
|
|
if (std::find_if(req_ret.cbegin(), req_ret.cend(), [&](const ContentChunkR& x) -> bool {
|
2024-07-07 16:49:31 +02:00
|
|
|
return x.object == o && x.chunk_index == out_chunk_idx;
|
2024-07-03 12:11:20 +02:00
|
|
|
}) != req_ret.cend()) {
|
|
|
|
// already in return array
|
2024-07-09 11:40:01 +02:00
|
|
|
// how did we get here? should we fast exit? if sequential strat, we would want to
|
2024-07-03 12:11:20 +02:00
|
|
|
continue; // skip
|
2024-07-02 15:52:25 +02:00
|
|
|
}
|
|
|
|
|
2024-07-07 12:44:17 +02:00
|
|
|
// - check against global requests (this might differ based on strat)
|
2024-07-07 16:49:31 +02:00
|
|
|
if (requested_chunks.count(out_chunk_idx) != 0) {
|
2024-07-02 15:52:25 +02:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2024-07-07 12:44:17 +02:00
|
|
|
// - we check against globally running transfers (this might differ based on strat)
|
2024-07-07 16:49:31 +02:00
|
|
|
if (rt.containsChunk(o, out_chunk_idx)) {
|
2024-07-02 15:52:25 +02:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2024-07-03 12:11:20 +02:00
|
|
|
// if nothing else blocks this, add to ret
|
2024-07-07 16:49:31 +02:00
|
|
|
req_ret.push_back(ContentChunkR{o, out_chunk_idx});
|
2024-07-03 12:11:20 +02:00
|
|
|
|
2024-07-07 12:44:17 +02:00
|
|
|
// TODO: move this after packet was sent successfully
|
|
|
|
// (move net in? hmm)
|
2024-07-07 16:49:31 +02:00
|
|
|
requested_chunks[out_chunk_idx] = Components::FT1ChunkSHA1Requested::Entry{0.f, c};
|
2024-08-02 13:06:55 +02:00
|
|
|
|
|
|
|
req_from_this_o++;
|
2024-07-02 15:52:25 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-11-22 13:51:48 +01:00
|
|
|
//if (it == participating_unfinished.end() || ++it == participating_unfinished.end()) {
|
|
|
|
if (it == participating_unfinished.end()) {
|
2024-07-07 15:37:49 +02:00
|
|
|
participating_in_last = entt::null;
|
|
|
|
} else {
|
|
|
|
participating_in_last = it->first;
|
|
|
|
}
|
|
|
|
|
2024-07-07 12:44:17 +02:00
|
|
|
if (req_ret.size() < num_requests) {
|
|
|
|
std::cerr << "CP: could not fulfil, " << group_number << ":" << peer_number << " only has " << req_ret.size() << " candidates\n";
|
|
|
|
}
|
|
|
|
|
2024-07-02 15:52:25 +02:00
|
|
|
// -- no -- (just compat with old code, ignore)
|
|
|
|
// if n < X
|
|
|
|
// optimistically request 1 chunk other does not have
|
|
|
|
// (don't mark es requested? or lower cooldown to re-request?)
|
|
|
|
|
|
|
|
return req_ret;
|
|
|
|
}
|
|
|
|
|