a bunch of allocation optimizations
This commit is contained in:
parent
741f1428d3
commit
5601ad91f5
@ -255,6 +255,7 @@ bool NGCEXTEventProvider::parse_ft1_data_ack(
|
|||||||
_DATA_HAVE(sizeof(e.transfer_id), std::cerr << "NGCEXT: packet too small, missing transfer_id\n"; return false)
|
_DATA_HAVE(sizeof(e.transfer_id), std::cerr << "NGCEXT: packet too small, missing transfer_id\n"; return false)
|
||||||
e.transfer_id = data[curser++];
|
e.transfer_id = data[curser++];
|
||||||
|
|
||||||
|
e.sequence_ids.reserve(std::max<int64_t>(data_size-curser, 1)/sizeof(uint16_t));
|
||||||
while (curser < data_size) {
|
while (curser < data_size) {
|
||||||
_DATA_HAVE(sizeof(uint16_t), std::cerr << "NGCEXT: packet too small, missing seq_id\n"; return false)
|
_DATA_HAVE(sizeof(uint16_t), std::cerr << "NGCEXT: packet too small, missing seq_id\n"; return false)
|
||||||
uint16_t seq_id = data[curser++];
|
uint16_t seq_id = data[curser++];
|
||||||
|
@ -86,6 +86,7 @@ int64_t FlowOnly::canSend(float time_delta) {
|
|||||||
|
|
||||||
std::vector<FlowOnly::SeqIDType> FlowOnly::getTimeouts(void) const {
|
std::vector<FlowOnly::SeqIDType> FlowOnly::getTimeouts(void) const {
|
||||||
std::vector<SeqIDType> list;
|
std::vector<SeqIDType> list;
|
||||||
|
list.reserve(_in_flight.size()/3); // we dont know, so we just guess
|
||||||
|
|
||||||
// after 3 rtt delay, we trigger timeout
|
// after 3 rtt delay, we trigger timeout
|
||||||
const auto now_adjusted = getTimeNow() - getCurrentDelay()*3.f;
|
const auto now_adjusted = getTimeNow() - getCurrentDelay()*3.f;
|
||||||
|
@ -672,6 +672,7 @@ bool NGCFT1::onEvent(const Events::NGCEXT_ft1_data_ack& e) {
|
|||||||
|
|
||||||
{
|
{
|
||||||
std::vector<CCAI::SeqIDType> seqs;
|
std::vector<CCAI::SeqIDType> seqs;
|
||||||
|
seqs.reserve(e.sequence_ids.size());
|
||||||
for (const auto it : e.sequence_ids) {
|
for (const auto it : e.sequence_ids) {
|
||||||
// TODO: improve this o.o
|
// TODO: improve this o.o
|
||||||
seqs.push_back({e.transfer_id, it});
|
seqs.push_back({e.transfer_id, it});
|
||||||
|
@ -63,6 +63,7 @@ size_t FT1InfoSHA1::chunkSize(size_t chunk_index) const {
|
|||||||
|
|
||||||
std::vector<uint8_t> FT1InfoSHA1::toBuffer(void) const {
|
std::vector<uint8_t> FT1InfoSHA1::toBuffer(void) const {
|
||||||
std::vector<uint8_t> buffer;
|
std::vector<uint8_t> buffer;
|
||||||
|
buffer.reserve(256+8+4+20*chunks.size());
|
||||||
|
|
||||||
assert(!file_name.empty());
|
assert(!file_name.empty());
|
||||||
// TODO: optimize
|
// TODO: optimize
|
||||||
|
@ -250,7 +250,7 @@ float SHA1_NGCFT1::iterate(float delta) {
|
|||||||
//std::cerr << "---------- new tick ----------\n";
|
//std::cerr << "---------- new tick ----------\n";
|
||||||
_mfb.tick(); // does not need to be called as often, once every sec would be enough, but the pointer deref + atomic bool should be very fast
|
_mfb.tick(); // does not need to be called as often, once every sec would be enough, but the pointer deref + atomic bool should be very fast
|
||||||
|
|
||||||
entt::dense_map<Contact3, size_t> peer_open_requests;
|
_peer_open_requests.clear();
|
||||||
|
|
||||||
{ // timers
|
{ // timers
|
||||||
// sending transfers
|
// sending transfers
|
||||||
@ -299,7 +299,7 @@ float SHA1_NGCFT1::iterate(float delta) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
{ // requested chunk timers
|
{ // requested chunk timers
|
||||||
_os.registry().view<Components::FT1ChunkSHA1Requested>().each([delta, &peer_open_requests](Components::FT1ChunkSHA1Requested& ftchunk_requested) {
|
_os.registry().view<Components::FT1ChunkSHA1Requested>().each([this, delta](Components::FT1ChunkSHA1Requested& ftchunk_requested) {
|
||||||
for (auto it = ftchunk_requested.chunks.begin(); it != ftchunk_requested.chunks.end();) {
|
for (auto it = ftchunk_requested.chunks.begin(); it != ftchunk_requested.chunks.end();) {
|
||||||
it->second.timer += delta;
|
it->second.timer += delta;
|
||||||
|
|
||||||
@ -307,7 +307,7 @@ float SHA1_NGCFT1::iterate(float delta) {
|
|||||||
if (it->second.timer >= 60.f) {
|
if (it->second.timer >= 60.f) {
|
||||||
it = ftchunk_requested.chunks.erase(it);
|
it = ftchunk_requested.chunks.erase(it);
|
||||||
} else {
|
} else {
|
||||||
peer_open_requests[it->second.c] += 1;
|
_peer_open_requests[it->second.c] += 1;
|
||||||
it++;
|
it++;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -447,7 +447,7 @@ float SHA1_NGCFT1::iterate(float delta) {
|
|||||||
Systems::chunk_picker_updates(
|
Systems::chunk_picker_updates(
|
||||||
_cr,
|
_cr,
|
||||||
_os.registry(),
|
_os.registry(),
|
||||||
peer_open_requests,
|
_peer_open_requests,
|
||||||
_receiving_transfers,
|
_receiving_transfers,
|
||||||
_nft,
|
_nft,
|
||||||
delta
|
delta
|
||||||
@ -456,7 +456,7 @@ float SHA1_NGCFT1::iterate(float delta) {
|
|||||||
// transfer statistics systems
|
// transfer statistics systems
|
||||||
Systems::transfer_tally_update(_os.registry(), getTimeNow());
|
Systems::transfer_tally_update(_os.registry(), getTimeNow());
|
||||||
|
|
||||||
if (peer_open_requests.empty()) {
|
if (_peer_open_requests.empty()) {
|
||||||
return 2.f;
|
return 2.f;
|
||||||
} else {
|
} else {
|
||||||
// pretty conservative and should be ajusted on a per peer, per delay basis
|
// pretty conservative and should be ajusted on a per peer, per delay basis
|
||||||
|
@ -74,6 +74,9 @@ class SHA1_NGCFT1 : public ToxEventI, public RegistryMessageModelEventI, public
|
|||||||
// only used to remove participation on peer exit
|
// only used to remove participation on peer exit
|
||||||
entt::dense_map<uint64_t, Contact3Handle> _tox_peer_to_contact;
|
entt::dense_map<uint64_t, Contact3Handle> _tox_peer_to_contact;
|
||||||
|
|
||||||
|
// reset every iterate; kept here as an allocation optimization
|
||||||
|
entt::dense_map<Contact3, size_t> _peer_open_requests;
|
||||||
|
|
||||||
void updateMessages(ObjectHandle ce);
|
void updateMessages(ObjectHandle ce);
|
||||||
|
|
||||||
std::optional<std::pair<uint32_t, uint32_t>> selectPeerForRequest(ObjectHandle ce);
|
std::optional<std::pair<uint32_t, uint32_t>> selectPeerForRequest(ObjectHandle ce);
|
||||||
|
Loading…
Reference in New Issue
Block a user