Compare commits

...

36 Commits

Author SHA1 Message Date
f940d44952 better includes 2023-01-13 17:33:03 +01:00
9e262120b0 timeout started filetransfers 2023-01-13 16:23:28 +01:00
1f4c2a39c5 fix it 2023-01-13 03:22:04 +01:00
f7313aa2bf compiled again, some kinda bug still there 2023-01-13 00:39:56 +01:00
89f9610730 midway through refactor, ft done (?) 2023-01-12 22:48:51 +01:00
2031ef94b6 remove ngc ext (moved to gh) 2023-01-12 20:26:21 +01:00
e974522a38 fix message reading and writing 2023-01-12 03:24:15 +01:00
f8816d5c53 notify toxic as if we are toxcore 2023-01-12 03:04:20 +01:00
e858f23fc1 works 2023-01-12 02:36:29 +01:00
dbcf2777c0 tf now more or less working 2023-01-11 19:00:49 +01:00
776be06433 accept init ack, send sequenced, handle data ack
still need handle data and send ack
2023-01-11 03:14:11 +01:00
6852cdd046 add logging cats 2023-01-04 20:25:41 +01:00
d4f3d3c4fb prot simp, iterate starte, more send code 2022-10-26 02:58:42 +02:00
428048c79f simplify ft_send.dot 2022-10-23 01:23:38 +02:00
d9e9a6b83d dot ft recv mostly done 2022-10-21 17:25:05 +02:00
b190617285 remove extra ft packets, continue callback stuff 2022-10-14 22:07:57 +02:00
bf0433a348 start recv 2022-10-14 22:06:57 +02:00
dbfdc942d7 send graph 2022-10-11 14:13:56 +02:00
9edf3fcf2f ft data cb 2022-10-09 21:02:05 +02:00
4afd13f70e ft raw packet send functions 2022-10-09 02:28:55 +02:00
213457fccf ft raw packet function refactor 2022-10-09 02:07:31 +02:00
9dadd8d254 start refactoring again 2022-10-08 20:58:10 +02:00
e6803275a0 hs init recv logic, + fs send init ack (not tracking it yet) 2022-10-08 00:15:51 +02:00
bd0c45f885 more init handle 2022-10-06 22:00:06 +02:00
6d4ee7de42 init ft (no ack or data yet) 2022-10-06 02:24:23 +02:00
41150a0332 wip send file request 2022-10-03 23:33:08 +02:00
97a5241fad mirnor refactor 2022-10-03 03:54:16 +02:00
ad55a345b2 refactor done, now feature parity with before refactor + ft begins 2022-10-03 03:23:38 +02:00
5d6a5d8c01 wip more refactoring 2022-10-03 01:15:49 +02:00
9a8bfe36f8 add poc ft packetids 2022-10-01 22:37:46 +02:00
edd2401244 minor refactor 2022-10-01 19:39:42 +02:00
bbf000b4b5 record heard msg ids 2022-10-01 01:38:42 +02:00
3362653928 fffff got that logic finally 2022-09-30 23:38:19 +02:00
52ea6b5196 more hackery, now sends requests 2022-09-30 02:28:36 +02:00
ba168d5f2d more 2022-09-29 02:39:54 +02:00
3832906ebb lets c++ 2022-09-28 20:40:49 +02:00
9 changed files with 2035 additions and 86 deletions

6
README.md Normal file
View File

@ -0,0 +1,6 @@
.h -> c header (public interface)
.hpp -> c++ header (private interface)
.cpp -> c++ source (private implementation)

33
ft_recv.dot Normal file
View File

@ -0,0 +1,33 @@
digraph ft_send {
fontname="Helvetica,Arial,sans-serif"
node [fontname="Helvetica,Arial,sans-serif"]
edge [fontname="Helvetica,Arial,sans-serif"]
label="FT1_Receiver";
labelloc="t";
start [label="start | we got FT_INIT"];
fail [shape=doublecircle];
done [shape=doublecircle];
// peer offline (how)
start -> fail [label="peer offline"]
start -> starting [label="send FT_INIT_ACK"];
starting -> starting [label="if timeout or get FT_INIT send FT_INIT_ACK"];
starting -> got_data [label="got FT_DATA"];
starting -> fail [label="if timeout too often"];
got_data -> fail [label="peer offline"];
subgraph cluster_data {
label="for each data chunk";
style=filled;
got_data -> wait_for_all_data [label="send FT_DATA_ACK"];
}
wait_for_all_data -> fail [label="peer offline"];
wait_for_all_data -> done [label="all chunks received and acked"];
}

47
ft_send.dot Normal file
View File

@ -0,0 +1,47 @@
digraph ft_send {
fontname="Helvetica,Arial,sans-serif"
node [fontname="Helvetica,Arial,sans-serif"]
edge [fontname="Helvetica,Arial,sans-serif"]
label="FT1_Sender";
labelloc="t";
//layout=fdp
//rankdir = LR
start;
fail [shape=doublecircle];
done [shape=doublecircle];
// peer offline (how)
start -> fail [label="peer offline"];
start -> sent_init [label="send FT_INIT"];
// peer offline (how)
sent_init -> fail [label="peer offline OR timeout too often"];
sent_init -> sent_init [label="timeout & sentcount <= 3, send FT_INIT"];
// got an ack after an init
sent_init -> have_data [label="got init_ack"]
subgraph cluster_data {
label="for each data chunk";
style=filled;
//node [style=filled];
have_data -> data_awaiting_ack [label="send FT_DATA"];
data_awaiting_ack -> data_wait_for_all [label="get FT_DATA_ACK"];
data_awaiting_ack -> have_data [label="no FT_DATA_ACK"];
data_wait_for_all;
}
// fail
have_data -> fail [label="peer offline"]
data_wait_for_all -> fail [label="peer offline"]
data_wait_for_all -> done [label="all chunks acked"];
}

848
ngc_ft1.cpp Normal file
View File

@ -0,0 +1,848 @@
#include "./ngc_ft1.h"
#include "../tox_ngc_ext/ngc_ext.hpp"
#include <vector>
#include <deque>
#include <unordered_map>
#include <map>
#include <optional>
#include <cassert>
struct SendSequenceBuffer {
struct SSBEntry {
std::vector<uint8_t> data; // the data (variable size, but smaller than 500)
float time_since_activity {0.f};
};
// sequence_id -> entry
std::map<uint16_t, SSBEntry> entries;
uint16_t next_seq_id {0};
void erase(uint16_t seq) {
entries.erase(seq);
}
// inflight chunks
size_t size(void) const {
return entries.size();
}
uint16_t add(std::vector<uint8_t>&& data) {
entries[next_seq_id] = {data, 0.f};
return next_seq_id++;
}
template<typename FN>
void for_each(float time_delta, FN&& fn) {
for (auto& [id, entry] : entries) {
entry.time_since_activity += time_delta;
fn(id, entry.data, entry.time_since_activity);
}
}
};
struct RecvSequenceBuffer {
struct RSBEntry {
std::vector<uint8_t> data;
};
// sequence_id -> entry
std::map<uint16_t, RSBEntry> entries;
uint16_t next_seq_id {0};
// list of seq_ids to ack, this is seperate bc rsbentries are deleted once processed
std::deque<uint16_t> ack_seq_ids;
void erase(uint16_t seq) {
entries.erase(seq);
}
// inflight chunks
size_t size(void) const {
return entries.size();
}
void add(uint16_t seq_id, std::vector<uint8_t>&& data) {
entries[seq_id] = {data};
ack_seq_ids.push_back(seq_id);
if (ack_seq_ids.size() > 5) { // TODO: magic
ack_seq_ids.pop_front();
}
}
bool canPop(void) const {
return entries.count(next_seq_id);
}
std::vector<uint8_t> pop(void) {
assert(canPop());
auto tmp_data = entries.at(next_seq_id).data;
erase(next_seq_id);
next_seq_id++;
return tmp_data;
}
// for acking, might be bad since its front
std::vector<uint16_t> frontSeqIDs(size_t count = 5) const {
std::vector<uint16_t> seq_ids;
auto it = entries.cbegin();
for (size_t i = 0; i < 5 && it != entries.cend(); i++, it++) {
seq_ids.push_back(it->first);
}
return seq_ids;
}
};
struct NGC_FT1 {
NGC_FT1_options options;
std::unordered_map<NGC_FT1_file_kind, NGC_FT1_recv_request_cb*> cb_recv_request;
std::unordered_map<NGC_FT1_file_kind, NGC_FT1_recv_init_cb*> cb_recv_init;
std::unordered_map<NGC_FT1_file_kind, NGC_FT1_recv_data_cb*> cb_recv_data;
std::unordered_map<NGC_FT1_file_kind, NGC_FT1_send_data_cb*> cb_send_data;
std::unordered_map<NGC_FT1_file_kind, void*> ud_recv_request;
std::unordered_map<NGC_FT1_file_kind, void*> ud_recv_init;
std::unordered_map<NGC_FT1_file_kind, void*> ud_recv_data;
std::unordered_map<NGC_FT1_file_kind, void*> ud_send_data;
struct Group {
struct Peer {
struct RecvTransfer {
NGC_FT1_file_kind file_kind;
std::vector<uint8_t> file_id;
enum class State {
INITED, //init acked, but no data received yet (might be dropped)
RECV, // receiving data
} state;
// float time_since_last_activity ?
size_t file_size {0};
size_t file_size_current {0};
// sequence id based reassembly
RecvSequenceBuffer rsb;
};
std::array<std::optional<RecvTransfer>, 256> recv_transfers;
size_t next_recv_transfer_idx {0}; // next id will be 0
struct SendTransfer {
NGC_FT1_file_kind file_kind;
std::vector<uint8_t> file_id;
enum class State {
INIT_SENT, // keep this state until ack or deny or giveup
SENDING, // we got the ack and are now sending data
// is this real?
FINISHING, // we sent all data but acks still outstanding????
FINFIN, // we sent the data_fin and are waiting for the data_fin_ack
// delete
} state;
size_t inits_sent {1}; // is sent when creating
float time_since_activity {0.f};
size_t file_size {0};
size_t file_size_current {0};
// sequence array
// list of sent but not acked seq_ids
SendSequenceBuffer ssb;
};
std::array<std::optional<SendTransfer>, 256> send_transfers;
size_t next_send_transfer_idx {0}; // next id will be 0
};
std::map<uint32_t, Peer> peers;
};
std::map<uint32_t, Group> groups;
};
// send pkgs
static bool _send_pkg_FT1_REQUEST(const Tox* tox, uint32_t group_number, uint32_t peer_number, uint8_t file_kind, const uint8_t* file_id, size_t file_id_size);
static bool _send_pkg_FT1_INIT(const Tox* tox, uint32_t group_number, uint32_t peer_number, uint8_t file_kind, uint64_t file_size, uint8_t transfer_id, const uint8_t* file_id, size_t file_id_size);
static bool _send_pkg_FT1_INIT_ACK(const Tox* tox, uint32_t group_number, uint32_t peer_number, uint8_t transfer_id);
static bool _send_pkg_FT1_DATA(const Tox* tox, uint32_t group_number, uint32_t peer_number, uint8_t transfer_id, uint16_t sequence_id, const uint8_t* data, size_t data_size);
static bool _send_pkg_FT1_DATA_ACK(const Tox* tox, uint32_t group_number, uint32_t peer_number, uint8_t transfer_id, const uint16_t* seq_ids, size_t seq_ids_size);
// handle pkgs
static void _handle_FT1_REQUEST(Tox* tox, NGC_EXT_CTX* ngc_ext_ctx, uint32_t group_number, uint32_t peer_number, const uint8_t *data, size_t length, void* user_data);
static void _handle_FT1_INIT(Tox* tox, NGC_EXT_CTX* ngc_ext_ctx, uint32_t group_number, uint32_t peer_number, const uint8_t *data, size_t length, void* user_data);
static void _handle_FT1_INIT_ACK(Tox* tox, NGC_EXT_CTX* ngc_ext_ctx, uint32_t group_number, uint32_t peer_number, const uint8_t *data, size_t length, void* user_data);
static void _handle_FT1_DATA(Tox* tox, NGC_EXT_CTX* ngc_ext_ctx, uint32_t group_number, uint32_t peer_number, const uint8_t *data, size_t length, void* user_data);
static void _handle_FT1_DATA_ACK(Tox* tox, NGC_EXT_CTX* ngc_ext_ctx, uint32_t group_number, uint32_t peer_number, const uint8_t *data, size_t length, void* user_data);
NGC_FT1* NGC_FT1_new(const struct NGC_FT1_options* options) {
NGC_FT1* ngc_ft1_ctx = new NGC_FT1;
ngc_ft1_ctx->options = *options;
return ngc_ft1_ctx;
}
bool NGC_FT1_register_ext(NGC_FT1* ngc_ft1_ctx, NGC_EXT_CTX* ngc_ext_ctx) {
ngc_ext_ctx->callbacks[NGC_EXT::FT1_REQUEST] = _handle_FT1_REQUEST;
ngc_ext_ctx->callbacks[NGC_EXT::FT1_INIT] = _handle_FT1_INIT;
ngc_ext_ctx->callbacks[NGC_EXT::FT1_INIT_ACK] = _handle_FT1_INIT_ACK;
ngc_ext_ctx->callbacks[NGC_EXT::FT1_DATA] = _handle_FT1_DATA;
ngc_ext_ctx->callbacks[NGC_EXT::FT1_DATA_ACK] = _handle_FT1_DATA_ACK;
ngc_ext_ctx->user_data[NGC_EXT::FT1_REQUEST] = ngc_ft1_ctx;
ngc_ext_ctx->user_data[NGC_EXT::FT1_INIT] = ngc_ft1_ctx;
ngc_ext_ctx->user_data[NGC_EXT::FT1_INIT_ACK] = ngc_ft1_ctx;
ngc_ext_ctx->user_data[NGC_EXT::FT1_DATA] = ngc_ft1_ctx;
ngc_ext_ctx->user_data[NGC_EXT::FT1_DATA_ACK] = ngc_ft1_ctx;
return true;
}
#if 0
bool NGC_FT1_init(NGC_EXT_CTX* ngc_ext_ctx, const struct NGC_FT1_options* options) {
ngc_ext_ctx->ngc_ft1_ctx = new NGC_FT1;
ngc_ext_ctx->ngc_ft1_ctx->options = *options;
ngc_ext_ctx->callbacks[FT1_REQUEST] = _handle_FT1_REQUEST;
ngc_ext_ctx->callbacks[FT1_INIT] = _handle_FT1_INIT;
ngc_ext_ctx->callbacks[FT1_INIT_ACK] = _handle_FT1_INIT_ACK;
ngc_ext_ctx->callbacks[FT1_DATA] = _handle_FT1_DATA;
ngc_ext_ctx->callbacks[FT1_DATA_ACK] = _handle_FT1_DATA_ACK;
return true;
}
#endif
void NGC_FT1_kill(NGC_FT1* ngc_ft1_ctx) {
delete ngc_ft1_ctx;
}
#if 0
void NGC_FT1_kill(NGC_EXT_CTX* ngc_ext_ctx) {
delete ngc_ext_ctx->ngc_ft1_ctx;
ngc_ext_ctx->ngc_ft1_ctx = nullptr;
}
#endif
void NGC_FT1_iterate(Tox *tox, NGC_FT1* ngc_ft1_ctx) {
//void NGC_FT1_iterate(Tox *tox, NGC_EXT_CTX* ngc_ext_ctx/*, void *user_data*/) {
assert(ngc_ft1_ctx);
for (auto& [group_number, group] : ngc_ft1_ctx->groups) {
for (auto& [peer_number, peer] : group.peers) {
//for (auto& tf_opt : peer.send_transfers) {
for (size_t idx = 0; idx < peer.send_transfers.size(); idx++) {
auto& tf_opt = peer.send_transfers[idx];
if (tf_opt) {
auto& tf = tf_opt.value();
tf.time_since_activity += 0.025f; // TODO: actual delta
switch (tf.state) {
using State = NGC_FT1::Group::Peer::SendTransfer::State;
case State::INIT_SENT:
if (tf.time_since_activity >= 20.f) {
if (tf.inits_sent >= 3) {
// delete, timed out 3 times
fprintf(stderr, "FT: warning, ft init timed out, deleting\n");
tf_opt.reset();
continue; // dangerous control flow
} else {
// timed out, resend
fprintf(stderr, "FT: warning, ft init timed out, resending\n");
_send_pkg_FT1_INIT(tox, group_number, peer_number, tf.file_kind, tf.file_size, idx, tf.file_id.data(), tf.file_id.size());
tf.inits_sent++;
tf.time_since_activity = 0.f;
}
}
break;
case State::SENDING: {
tf.ssb.for_each(0.025f, [&](uint16_t id, const std::vector<uint8_t>& data, float& time_since_activity) {
// no ack after 5 sec -> resend
if (time_since_activity >= 5.f) {
_send_pkg_FT1_DATA(tox, group_number, peer_number, idx, id, data.data(), data.size());
time_since_activity = 0.f;
}
});
if (tf.time_since_activity >= 30.f) {
// no ack after 30sec, close ft
// TODO: notify app
fprintf(stderr, "FT: warning, sending ft in progress timed out, deleting\n");
tf_opt.reset();
continue; // dangerous control flow
}
assert(ngc_ft1_ctx->cb_send_data.count(tf.file_kind));
// if chunks in flight < window size (1 lol)
while (tf.ssb.size() < 1) {
std::vector<uint8_t> new_data;
size_t chunk_size = std::min<size_t>(10u, tf.file_size - tf.file_size_current);
if (chunk_size == 0) {
// TODO: set to finishing?
break; // we done
}
new_data.resize(chunk_size);
ngc_ft1_ctx->cb_send_data[tf.file_kind](
tox,
group_number, peer_number,
idx,
tf.file_size_current,
new_data.data(), new_data.size(),
ngc_ft1_ctx->ud_send_data.count(tf.file_kind) ? ngc_ft1_ctx->ud_send_data.at(tf.file_kind) : nullptr
);
uint16_t seq_id = tf.ssb.add(std::move(new_data));
_send_pkg_FT1_DATA(tox, group_number, peer_number, idx, seq_id, tf.ssb.entries.at(seq_id).data.data(), tf.ssb.entries.at(seq_id).data.size());
fprintf(stderr, "FT: sent data size: %ld (seq %d)\n", chunk_size, seq_id);
tf.file_size_current += chunk_size;
}
}
break;
case State::FINISHING:
break;
// finfin o.o
default: // invalid state, delete
fprintf(stderr, "FT: error, ft in invalid state, deleting\n");
tf_opt.reset();
continue;
}
}
}
}
}
}
void NGC_FT1_register_callback_recv_request(
NGC_FT1* ngc_ft1_ctx,
NGC_FT1_file_kind file_kind,
NGC_FT1_recv_request_cb* callback,
void* user_data
) {
assert(ngc_ft1_ctx);
ngc_ft1_ctx->cb_recv_request[file_kind] = callback;
ngc_ft1_ctx->ud_recv_request[file_kind] = user_data;
}
void NGC_FT1_register_callback_recv_init(
NGC_FT1* ngc_ft1_ctx,
NGC_FT1_file_kind file_kind,
NGC_FT1_recv_init_cb* callback,
void* user_data
) {
assert(ngc_ft1_ctx);
ngc_ft1_ctx->cb_recv_init[file_kind] = callback;
ngc_ft1_ctx->ud_recv_init[file_kind] = user_data;
}
void NGC_FT1_register_callback_recv_data(
NGC_FT1* ngc_ft1_ctx,
NGC_FT1_file_kind file_kind,
NGC_FT1_recv_data_cb* callback,
void* user_data
) {
assert(ngc_ft1_ctx);
ngc_ft1_ctx->cb_recv_data[file_kind] = callback;
ngc_ft1_ctx->ud_recv_data[file_kind] = user_data;
}
void NGC_FT1_register_callback_send_data(
NGC_FT1* ngc_ft1_ctx,
NGC_FT1_file_kind file_kind,
NGC_FT1_send_data_cb* callback,
void* user_data
) {
assert(ngc_ft1_ctx);
ngc_ft1_ctx->cb_send_data[file_kind] = callback;
ngc_ft1_ctx->ud_send_data[file_kind] = user_data;
}
void NGC_FT1_send_request_private(
Tox *tox, NGC_FT1* ngc_ft1_ctx,
uint32_t group_number,
uint32_t peer_number,
NGC_FT1_file_kind file_kind,
const uint8_t* file_id,
size_t file_id_size
) {
assert(tox);
assert(ngc_ft1_ctx);
// record locally that we sent(or want to send) the request?
_send_pkg_FT1_REQUEST(tox, group_number, peer_number, file_kind, file_id, file_id_size);
}
bool NGC_FT1_send_init_private(
Tox *tox, NGC_FT1* ngc_ft1_ctx,
uint32_t group_number, uint32_t peer_number,
NGC_FT1_file_kind file_kind,
const uint8_t* file_id, size_t file_id_size,
size_t file_size,
uint8_t* transfer_id
) {
//fprintf(stderr, "TODO: init ft for %08X\n", msg_id);
fprintf(stderr, "FT: init ft\n");
if (tox_group_peer_get_connection_status(tox, group_number, peer_number, nullptr) == TOX_CONNECTION_NONE) {
fprintf(stderr, "FT: error: cant init ft, peer offline\n");
return false;
}
auto& peer = ngc_ft1_ctx->groups[group_number].peers[peer_number];
// allocate transfer_id
size_t idx = peer.next_send_transfer_idx;
peer.next_send_transfer_idx = (peer.next_send_transfer_idx + 1) % 256;
{ // TODO: extract
size_t i = idx;
bool found = false;
do {
if (!peer.send_transfers[i].has_value()) {
// free slot
idx = i;
found = true;
break;
}
i = (i + 1) % 256;
} while (i != idx);
if (!found) {
fprintf(stderr, "FT: error: cant init ft, no free transfer slot\n");
return false;
}
}
_send_pkg_FT1_INIT(tox, group_number, peer_number, file_kind, file_size, idx, file_id, file_id_size);
peer.send_transfers[idx] = NGC_FT1::Group::Peer::SendTransfer{
file_kind,
std::vector(file_id, file_id+file_id_size),
NGC_FT1::Group::Peer::SendTransfer::State::INIT_SENT,
1,
0.f,
file_size,
0,
};
if (transfer_id != nullptr) {
*transfer_id = idx;
}
return true;
}
static bool _send_pkg_FT1_REQUEST(const Tox* tox, uint32_t group_number, uint32_t peer_number, uint8_t file_kind, const uint8_t* file_id, size_t file_id_size) {
// - 1 byte packet id
// - 1 byte (TODO: more?) file_kind
// - X bytes file_id
std::vector<uint8_t> pkg;
pkg.push_back(NGC_EXT::FT1_REQUEST);
pkg.push_back(file_kind);
for (size_t i = 0; i < file_id_size; i++) {
pkg.push_back(file_id[i]);
}
// lossless
return tox_group_send_custom_private_packet(tox, group_number, peer_number, true, pkg.data(), pkg.size(), nullptr);
}
static bool _send_pkg_FT1_INIT(const Tox* tox, uint32_t group_number, uint32_t peer_number, uint8_t file_kind, uint64_t file_size, uint8_t transfer_id, const uint8_t* file_id, size_t file_id_size) {
// - 1 byte packet id
// - 1 byte (file_kind)
// - 8 bytes (data size)
// - 1 byte (temporary_file_tf_id, for this peer only, technically just a prefix to distinguish between simultainious fts)
// - X bytes (file_kind dependent id, differnt sizes)
std::vector<uint8_t> pkg;
pkg.push_back(NGC_EXT::FT1_INIT);
pkg.push_back(file_kind);
for (size_t i = 0; i < sizeof(file_size); i++) {
pkg.push_back((file_size>>(i*8)) & 0xff);
}
pkg.push_back(transfer_id);
for (size_t i = 0; i < file_id_size; i++) {
pkg.push_back(file_id[i]);
}
// lossless
return tox_group_send_custom_private_packet(tox, group_number, peer_number, true, pkg.data(), pkg.size(), nullptr);
}
static bool _send_pkg_FT1_INIT_ACK(const Tox* tox, uint32_t group_number, uint32_t peer_number, uint8_t transfer_id) {
// send ack
// - 1 byte packet id
// - 1 byte transfer_id
std::vector<uint8_t> pkg;
pkg.push_back(NGC_EXT::FT1_INIT_ACK);
pkg.push_back(transfer_id);
// lossless
return tox_group_send_custom_private_packet(tox, group_number, peer_number, true, pkg.data(), pkg.size(), nullptr);
}
static bool _send_pkg_FT1_DATA(const Tox* tox, uint32_t group_number, uint32_t peer_number, uint8_t transfer_id, uint16_t sequence_id, const uint8_t* data, size_t data_size) {
assert(data_size > 0);
// TODO
// check header_size+data_size <= max pkg size
std::vector<uint8_t> pkg;
pkg.push_back(NGC_EXT::FT1_DATA);
pkg.push_back(transfer_id);
pkg.push_back(sequence_id & 0xff);
pkg.push_back((sequence_id >> (1*8)) & 0xff);
// TODO: optimize
for (size_t i = 0; i < data_size; i++) {
pkg.push_back(data[i]);
}
// lossless?
return tox_group_send_custom_private_packet(tox, group_number, peer_number, true, pkg.data(), pkg.size(), nullptr);
}
static bool _send_pkg_FT1_DATA_ACK(const Tox* tox, uint32_t group_number, uint32_t peer_number, uint8_t transfer_id, const uint16_t* seq_ids, size_t seq_ids_size) {
std::vector<uint8_t> pkg;
pkg.push_back(NGC_EXT::FT1_DATA_ACK);
pkg.push_back(transfer_id);
// TODO: optimize
for (size_t i = 0; i < seq_ids_size; i++) {
pkg.push_back(seq_ids[i] & 0xff);
pkg.push_back((seq_ids[i] >> (1*8)) & 0xff);
}
// lossless?
return tox_group_send_custom_private_packet(tox, group_number, peer_number, true, pkg.data(), pkg.size(), nullptr);
}
#define _DATA_HAVE(x, error) if ((length - curser) < (x)) { error; }
static void _handle_FT1_REQUEST(
Tox* tox,
NGC_EXT_CTX* ngc_ext_ctx,
uint32_t group_number,
uint32_t peer_number,
const uint8_t *data,
size_t length,
void* user_data
) {
NGC_FT1* ngc_ft1_ctx = static_cast<NGC_FT1*>(user_data);
size_t curser = 0;
// TODO: might be uint16_t or even larger
uint8_t file_kind_u8;
_DATA_HAVE(sizeof(file_kind_u8), fprintf(stderr, "FT: packet too small, missing file_kind\n"); return)
file_kind_u8 = data[curser++];
auto file_kind = static_cast<NGC_FT1_file_kind>(file_kind_u8);
fprintf(stderr, "FT: got FT request with file_kind %u [", file_kind_u8);
for (size_t curser_copy = curser; curser_copy < length; curser_copy++) {
fprintf(stderr, "%02X", data[curser_copy]);
}
fprintf(stderr, "]\n");
NGC_FT1_recv_request_cb* fn_ptr = nullptr;
if (ngc_ft1_ctx->cb_recv_request.count(file_kind)) {
fn_ptr = ngc_ft1_ctx->cb_recv_request.at(file_kind);
}
void* ud_ptr = nullptr;
if (ngc_ft1_ctx->ud_recv_request.count(file_kind)) {
ud_ptr = ngc_ft1_ctx->ud_recv_request.at(file_kind);
}
if (fn_ptr) {
fn_ptr(tox, group_number, peer_number, data+curser, length-curser, ud_ptr);
} else {
fprintf(stderr, "FT: missing cb for requests\n");
}
}
static void _handle_FT1_INIT(
Tox* tox,
NGC_EXT_CTX* ngc_ext_ctx,
uint32_t group_number,
uint32_t peer_number,
const uint8_t *data,
size_t length,
void* user_data
) {
NGC_FT1* ngc_ft1_ctx = static_cast<NGC_FT1*>(user_data);
size_t curser = 0;
// - 1 byte (file_kind)
// TODO: might be uint16_t or even larger
uint8_t file_kind_u8;
_DATA_HAVE(sizeof(file_kind_u8), fprintf(stderr, "FT: packet too small, missing file_kind\n"); return)
file_kind_u8 = data[curser++];
auto file_kind = static_cast<NGC_FT1_file_kind>(file_kind_u8);
// - 8 bytes (data size)
size_t file_size {0u};
_DATA_HAVE(sizeof(file_size), fprintf(stderr, "FT: packet too small, missing file_size\n"); return)
for (size_t i = 0; i < sizeof(file_size); i++, curser++) {
file_size |= size_t(data[curser]) << (i*8);
}
// - 1 byte (temporary_file_tf_id, for this peer only, technically just a prefix to distinguish between simultainious fts)
uint8_t transfer_id;
_DATA_HAVE(sizeof(transfer_id), fprintf(stderr, "FT: packet too small, missing transfer_id\n"); return)
transfer_id = data[curser++];
// - X bytes (file_kind dependent id, differnt sizes)
const std::vector file_id(data+curser, data+curser+(length-curser));
fprintf(stderr, "FT: got FT init with file_kind:%u file_size:%lu tf_id:%u [", file_kind_u8, file_size, transfer_id);
for (size_t curser_copy = curser; curser_copy < length; curser_copy++) {
fprintf(stderr, "%02X", data[curser_copy]);
}
fprintf(stderr, "]\n");
// check if slot free ?
// did we allready ack this and the other side just did not see the ack?
NGC_FT1_recv_init_cb* fn_ptr = nullptr;
if (ngc_ft1_ctx->cb_recv_init.count(file_kind)) {
fn_ptr = ngc_ft1_ctx->cb_recv_init.at(file_kind);
}
void* ud_ptr = nullptr;
if (ngc_ft1_ctx->ud_recv_init.count(file_kind)) {
ud_ptr = ngc_ft1_ctx->ud_recv_init.at(file_kind);
}
bool accept_ft;
if (fn_ptr) {
// last part of message (file_id) is not yet parsed, just give it to cb
accept_ft = fn_ptr(tox, group_number, peer_number, data+curser, length-curser, transfer_id, file_size, ud_ptr);
} else {
fprintf(stderr, "FT: missing cb for init\n");
accept_ft = false;
}
if (accept_ft) {
_send_pkg_FT1_INIT_ACK(tox, group_number, peer_number, transfer_id);
fprintf(stderr, "FT: accepted init\n");
auto& peer = ngc_ft1_ctx->groups[group_number].peers[peer_number];
if (peer.recv_transfers[transfer_id].has_value()) {
fprintf(stderr, "FT: overwriting existing recv_transfer %d\n", transfer_id);
}
peer.recv_transfers[transfer_id] = NGC_FT1::Group::Peer::RecvTransfer{
file_kind,
file_id,
NGC_FT1::Group::Peer::RecvTransfer::State::INITED,
file_size,
0u,
};
} else {
// TODO deny?
fprintf(stderr, "FT: rejected init\n");
}
}
static void _handle_FT1_INIT_ACK(
Tox* tox,
NGC_EXT_CTX* ngc_ext_ctx,
uint32_t group_number,
uint32_t peer_number,
const uint8_t *data,
size_t length,
void* user_data
) {
NGC_FT1* ngc_ft1_ctx = static_cast<NGC_FT1*>(user_data);
size_t curser = 0;
// - 1 byte (transfer_id)
uint8_t transfer_id;
_DATA_HAVE(sizeof(transfer_id), fprintf(stderr, "FT: packet too small, missing transfer_id\n"); return)
transfer_id = data[curser++];
// we now should start sending data
auto& groups = ngc_ft1_ctx->groups;
if (!groups.count(group_number)) {
fprintf(stderr, "FT: init_ack for unknown group\n");
return;
}
NGC_FT1::Group::Peer& peer = groups[group_number].peers[peer_number];
if (!peer.send_transfers[transfer_id].has_value()) {
fprintf(stderr, "FT: inti_ack for unknown transfer\n");
return;
}
NGC_FT1::Group::Peer::SendTransfer& transfer = peer.send_transfers[transfer_id].value();
using State = NGC_FT1::Group::Peer::SendTransfer::State;
if (transfer.state != State::INIT_SENT) {
fprintf(stderr, "FT: inti_ack but not in INIT_SENT state\n");
return;
}
// iterate will now call NGC_FT1_send_data_cb
transfer.state = State::SENDING;
transfer.time_since_activity = 0.f;
}
static void _handle_FT1_DATA(
Tox* tox,
NGC_EXT_CTX* ngc_ext_ctx,
uint32_t group_number,
uint32_t peer_number,
const uint8_t *data, size_t length,
void* user_data
) {
NGC_FT1* ngc_ft1_ctx = static_cast<NGC_FT1*>(user_data);
size_t curser = 0;
// - 1 byte (transfer_id)
uint8_t transfer_id;
_DATA_HAVE(sizeof(transfer_id), fprintf(stderr, "FT: packet too small, missing transfer_id\n"); return)
transfer_id = data[curser++];
// - 2 bytes (sequence_id)
uint16_t sequence_id;
_DATA_HAVE(sizeof(sequence_id), fprintf(stderr, "FT: packet too small, missing sequence_id\n"); return)
sequence_id = data[curser++];
sequence_id |= data[curser++] << (1*8);
if (curser == length) {
fprintf(stderr, "FT: data of size 0!\n");
return;
}
auto& groups = ngc_ft1_ctx->groups;
if (!groups.count(group_number)) {
fprintf(stderr, "FT: data for unknown group\n");
return;
}
NGC_FT1::Group::Peer& peer = groups[group_number].peers[peer_number];
if (!peer.recv_transfers[transfer_id].has_value()) {
fprintf(stderr, "FT: data for unknown transfer\n");
return;
}
auto& transfer = peer.recv_transfers[transfer_id].value();
// do reassembly, ignore dups
transfer.rsb.add(sequence_id, std::vector<uint8_t>(data+curser, data+curser+(length-curser)));
NGC_FT1_recv_data_cb* fn_ptr = nullptr;
if (ngc_ft1_ctx->cb_recv_data.count(transfer.file_kind)) {
fn_ptr = ngc_ft1_ctx->cb_recv_data.at(transfer.file_kind);
}
void* ud_ptr = nullptr;
if (ngc_ft1_ctx->ud_recv_data.count(transfer.file_kind)) {
ud_ptr = ngc_ft1_ctx->ud_recv_data.at(transfer.file_kind);
}
if (!fn_ptr) {
fprintf(stderr, "FT: missing cb for recv_data\n");
return;
}
// loop for chunks without holes
while (transfer.rsb.canPop()) {
auto data = transfer.rsb.pop();
fn_ptr(tox, group_number, peer_number, transfer_id, transfer.file_size_current, data.data(), data.size(), ud_ptr);
transfer.file_size_current += data.size();
}
// send acks
std::vector<uint16_t> ack_seq_ids(transfer.rsb.ack_seq_ids.cbegin(), transfer.rsb.ack_seq_ids.cend());
if (!ack_seq_ids.empty()) {
_send_pkg_FT1_DATA_ACK(tox, group_number, peer_number, transfer_id, ack_seq_ids.data(), ack_seq_ids.size());
}
}
static void _handle_FT1_DATA_ACK(
Tox* tox,
NGC_EXT_CTX* ngc_ext_ctx,
uint32_t group_number,
uint32_t peer_number,
const uint8_t *data,
size_t length,
void* user_data
) {
NGC_FT1* ngc_ft1_ctx = static_cast<NGC_FT1*>(user_data);
size_t curser = 0;
// - 1 byte (transfer_id)
uint8_t transfer_id;
_DATA_HAVE(sizeof(transfer_id), fprintf(stderr, "FT: packet too small, missing transfer_id\n"); return)
transfer_id = data[curser++];
auto& groups = ngc_ft1_ctx->groups;
if (!groups.count(group_number)) {
fprintf(stderr, "FT: data_ack for unknown group\n");
return;
}
NGC_FT1::Group::Peer& peer = groups[group_number].peers[peer_number];
if (!peer.send_transfers[transfer_id].has_value()) {
fprintf(stderr, "FT: data_ack for unknown transfer\n");
return;
}
NGC_FT1::Group::Peer::SendTransfer& transfer = peer.send_transfers[transfer_id].value();
using State = NGC_FT1::Group::Peer::SendTransfer::State;
if (transfer.state != State::SENDING) {
fprintf(stderr, "FT: data_ack but not in SENDING state\n");
return;
}
_DATA_HAVE(sizeof(uint16_t), fprintf(stderr, "FT: packet too small, atleast 1 seq_id\n"); return)
if ((length - curser) % sizeof(uint16_t) != 0) {
fprintf(stderr, "FT: data_ack with misaligned data\n");
return;
}
while (curser < length) {
uint16_t seq_id = data[curser++];
seq_id |= data[curser++] << (1*8);
transfer.ssb.erase(seq_id);
}
if (transfer.file_size == transfer.file_size_current) {
fprintf(stderr, "FT: %d done\n", transfer_id);
peer.send_transfers[transfer_id] = std::nullopt;
}
}
#undef _DATA_HAVE

162
ngc_ft1.h Normal file
View File

@ -0,0 +1,162 @@
#ifndef C_NGC_FT1_H
#define C_NGC_FT1_H
// this is a c header
#include <tox/tox.h>
#include "ngc_ext.h"
#ifdef __cplusplus
extern "C" {
#endif
// ========== struct / typedef ==========
typedef struct NGC_FT1 NGC_FT1;
struct NGC_FT1_options {
int tmp;
};
// uint16_t ?
// ffs c does not allow types
typedef enum NGC_FT1_file_kind /*: uint8_t*/ {
//INVALID = 0u,
// id:
// group (implicit)
// peer pub key + msg_id
NGC_HS1_MESSAGE_BY_ID = 1u, // history sync PoC 1
// :)
// draft for fun and profit
// TODO: should we even support v1?
// TODO: design the same thing again for tox? (msg_pack instead of bencode?)
// id: infohash
TORRENT_V1_METAINFO = 8u,
// id: sha1
TORRENT_V1_CHUNK, // alias with SHA1_CHUNK?
// id: infohash
TORRENT_V2_METAINFO, // meta info is kind of more complicated than that <.<
// id: sha256
TORRENT_V2_CHUNK,
} NGC_FT1_file_kind;
// ========== init / kill ==========
// (see tox api)
NGC_FT1* NGC_FT1_new(const struct NGC_FT1_options* options);
bool NGC_FT1_register_ext(NGC_FT1* ngc_ft1_ctx, NGC_EXT_CTX* ngc_ext_ctx);
//bool NGC_FT1_init(NGC_EXT_CTX* ngc_ext_ctx, const struct NGC_FT1_options* options);
void NGC_FT1_kill(NGC_FT1* ngc_ft1_ctx);
//void NGC_FT1_kill(NGC_EXT_CTX* ngc_ext_ctx);
// ========== iterate ==========
void NGC_FT1_iterate(Tox *tox, NGC_FT1* ngc_ft1_ctx);
//void NGC_FT1_iterate(Tox *tox, NGC_EXT_CTX* ngc_ext_ctx/*, void *user_data*/);
// TODO: announce
// ========== request ==========
// TODO: public variant?
void NGC_FT1_send_request_private(
Tox *tox, NGC_FT1* ngc_ft1_ctx,
uint32_t group_number, uint32_t peer_number,
NGC_FT1_file_kind file_kind,
const uint8_t* file_id, size_t file_id_size
);
typedef void NGC_FT1_recv_request_cb(
Tox *tox,
uint32_t group_number, uint32_t peer_number,
const uint8_t* file_id, size_t file_id_size,
void* user_data
);
void NGC_FT1_register_callback_recv_request(
NGC_FT1* ngc_ft1_ctx,
NGC_FT1_file_kind file_kind,
NGC_FT1_recv_request_cb* callback,
void* user_data
);
// ========== send/accept ==========
// public does not make sense here
bool NGC_FT1_send_init_private(
Tox *tox, NGC_FT1* ngc_ft1_ctx,
uint32_t group_number, uint32_t peer_number,
NGC_FT1_file_kind file_kind,
const uint8_t* file_id, size_t file_id_size,
size_t file_size,
uint8_t* transfer_id
);
// return true to accept, false to deny
typedef bool NGC_FT1_recv_init_cb(
Tox *tox,
uint32_t group_number, uint32_t peer_number,
const uint8_t* file_id, size_t file_id_size,
const uint8_t transfer_id,
const size_t file_size,
void* user_data
);
void NGC_FT1_register_callback_recv_init(
NGC_FT1* ngc_ft1_ctx,
NGC_FT1_file_kind file_kind,
NGC_FT1_recv_init_cb* callback,
void* user_data
);
// ========== data ==========
typedef void NGC_FT1_recv_data_cb(
Tox *tox,
uint32_t group_number,
uint32_t peer_number,
uint8_t transfer_id,
size_t data_offset, const uint8_t* data, size_t data_size,
void* user_data
);
void NGC_FT1_register_callback_recv_data(
NGC_FT1* ngc_ft1_ctx,
NGC_FT1_file_kind file_kind,
NGC_FT1_recv_data_cb* callback,
void* user_data
);
// request to fill data_size bytes into data
typedef void NGC_FT1_send_data_cb(
Tox *tox,
uint32_t group_number,
uint32_t peer_number,
uint8_t transfer_id,
size_t data_offset, uint8_t* data, size_t data_size,
void* user_data
);
void NGC_FT1_register_callback_send_data(
NGC_FT1* ngc_ft1_ctx,
NGC_FT1_file_kind file_kind,
NGC_FT1_send_data_cb* callback,
void* user_data
);
// ========== peer online/offline ==========
//void NGC_FT1_peer_online(Tox* tox, NGC_FT1* ngc_hs1_ctx, uint32_t group_number, uint32_t peer_number, bool online);
#ifdef __cplusplus
}
#endif
#endif // C_NGC_FT1_H

View File

@ -1,65 +0,0 @@
#include "./ngc_hs1.h"
#include <stdlib.h>
struct NGC_HS1 {
void* temp;
NGC_HS1_options options;
// key - key - value store
// peer pubkey - msg_id - message(type + text)
};
NGC_HS1* NGC_HS1_new(const struct NGC_HS1_options* options) {
NGC_HS1* context = malloc(sizeof(NGC_HS1));
context->options = *options;
return context;
}
void NGC_HS1_kill(NGC_HS1* ngc_hs1_ctx) {
free(ngc_hs1_ctx);
}
void NGC_HS1_iterate(Tox *tox, NGC_HS1* ngc_hs1_ctx/*, void *user_data*/) {
}
bool NGC_HS1_shim_group_send_message(
const Tox *tox,
NGC_HS1* ngc_hs1_ctx,
uint32_t group_number,
Tox_Message_Type type, const uint8_t *message, size_t length,
uint32_t *message_id,
Tox_Err_Group_Send_Message *error
) {
uint32_t* msg_id_ptr = message_id;
uint32_t msg_id_placeholder = 0;
if (msg_id_ptr == NULL) {
msg_id_ptr = &msg_id_placeholder;
}
bool ret = tox_group_send_message(tox, group_number, type, message, length, msg_id_ptr, error);
NGC_HS1_record_own_message(tox, group_number, type, message, length, *msg_id_ptr);
return ret;
}
// record own msg
void NGC_HS1_record_own_message(
const Tox *tox,
NGC_HS1* ngc_hs1_ctx,
uint32_t group_number,
Tox_Message_Type type, const uint8_t *message, size_t length, uint32_t message_id
) {
printf("record_own_message %u\n", message_id);
}

781
ngc_hs1.cpp Normal file
View File

@ -0,0 +1,781 @@
#include "./ngc_hs1.hpp"
#include "ngc_ft1.h"
#include <cstdint>
#include <cassert>
#include <new>
#include <map>
#include <list>
#include <set>
#include <optional>
#include <algorithm>
void NGC_HS1::Peer::append(uint32_t msg_id, Tox_Message_Type type, const std::string& text) {
order.push_back(msg_id);
// overwrites
auto& new_msg = dict[msg_id];
new_msg.msg_id = msg_id;
new_msg.type = type;
new_msg.text = text;
if (heard_of.count(msg_id)) {
// we got history before we got the message
heard_of.erase(msg_id);
}
fprintf(stderr, "HS: ######## last msgs ########\n");
auto rit = order.crbegin();
for (size_t i = 0; i < 10 && rit != order.crend(); i++, rit++) {
fprintf(stderr, " %08X - %s\n", *rit, dict.at(*rit).text.c_str());
}
}
bool NGC_HS1::Peer::hear(uint32_t msg_id, uint32_t peer_number) {
if (dict.count(msg_id)) {
// we know
return false;
}
if (heard_of.count(msg_id) && heard_of.at(msg_id).count(peer_number)) {
// we heard it from that peer before
return false;
}
heard_of[msg_id].emplace(peer_number);
return true;
}
void _handle_HS1_ft_recv_request(
Tox *tox,
uint32_t group_number,
uint32_t peer_number,
const uint8_t* file_id, size_t file_id_size,
void* user_data
);
bool _handle_HS1_ft_recv_init(
Tox *tox,
uint32_t group_number,
uint32_t peer_number,
const uint8_t* file_id, size_t file_id_size,
const uint8_t transfer_id,
const size_t file_size,
void* user_data
);
void _handle_HS1_ft_recv_data(
Tox *tox,
uint32_t group_number,
uint32_t peer_number,
uint8_t transfer_id,
size_t data_offset,
const uint8_t* data, size_t data_size,
void* user_data
);
void _handle_HS1_ft_send_data(
Tox *tox,
uint32_t group_number,
uint32_t peer_number,
uint8_t transfer_id,
size_t data_offset, uint8_t* data, size_t data_size,
void* user_data
);
NGC_HS1* NGC_HS1_new(const struct NGC_HS1_options* options) {
auto* ngc_hs1_ctx = new NGC_HS1;
ngc_hs1_ctx->options = *options;
return ngc_hs1_ctx;
}
bool NGC_HS1_register_ext(NGC_HS1* ngc_hs1_ctx, NGC_EXT_CTX* ngc_ext_ctx) {
ngc_ext_ctx->callbacks[NGC_EXT::HS1_REQUEST_LAST_IDS] = _handle_HS1_REQUEST_LAST_IDS;
ngc_ext_ctx->callbacks[NGC_EXT::HS1_RESPONSE_LAST_IDS] = _handle_HS1_RESPONSE_LAST_IDS;
ngc_ext_ctx->user_data[NGC_EXT::HS1_REQUEST_LAST_IDS] = ngc_hs1_ctx;
ngc_ext_ctx->user_data[NGC_EXT::HS1_RESPONSE_LAST_IDS] = ngc_hs1_ctx;
return true;
}
bool NGC_HS1_register_ft1(NGC_HS1* ngc_hs1_ctx, NGC_FT1* ngc_ft1_ctx) {
ngc_hs1_ctx->ngc_ft1_ctx = ngc_ft1_ctx;
NGC_FT1_register_callback_recv_request(ngc_ft1_ctx, NGC_FT1_file_kind::NGC_HS1_MESSAGE_BY_ID, _handle_HS1_ft_recv_request, ngc_hs1_ctx);
NGC_FT1_register_callback_recv_init(ngc_ft1_ctx, NGC_FT1_file_kind::NGC_HS1_MESSAGE_BY_ID, _handle_HS1_ft_recv_init, ngc_hs1_ctx);
NGC_FT1_register_callback_recv_data(ngc_ft1_ctx, NGC_FT1_file_kind::NGC_HS1_MESSAGE_BY_ID, _handle_HS1_ft_recv_data, ngc_hs1_ctx);
NGC_FT1_register_callback_send_data(ngc_ft1_ctx, NGC_FT1_file_kind::NGC_HS1_MESSAGE_BY_ID, _handle_HS1_ft_send_data, ngc_hs1_ctx);
return true;
}
void NGC_HS1_kill(NGC_HS1* ngc_hs1_ctx) {
delete ngc_hs1_ctx;
}
static void _iterate_group(Tox *tox, NGC_HS1* ngc_hs1_ctx, uint32_t group_number, float time_delta) {
//fprintf(stderr, "g:%u\n", g_i);
NGC_EXT::GroupKey g_id{};
{ // TODO: error
tox_group_get_chat_id(tox, group_number, g_id.data.data(), nullptr);
}
if (ngc_hs1_ctx->history.count(g_id) == 0) {
fprintf(stderr, "HS: adding new group: %u %X%X%X%X\n",
group_number,
g_id.data.data()[0],
g_id.data.data()[1],
g_id.data.data()[2],
g_id.data.data()[3]
);
ngc_hs1_ctx->history[g_id];
} else {
auto& group = ngc_hs1_ctx->history[g_id];
// check if transfers have timed out
for (auto it = group.transfers.begin(); it != group.transfers.end();) {
it->second.time_since_ft_activity += time_delta;
if (it->second.time_since_ft_activity >= ngc_hs1_ctx->options.ft_activity_timeout) {
// timed out
fprintf(stderr, "HS: !!! ft timed out (%08X)\n", it->first.first);
it = group.transfers.erase(it);
} else {
it++;
}
}
// for each peer
for (auto& [peer_key, peer] : group.peers) {
//fprintf(stderr, " p: %X%X%X%X\n", key.data.data()[0], key.data.data()[1], key.data.data()[2], key.data.data()[3]);
peer.time_since_last_request_sent += time_delta;
if (peer.time_since_last_request_sent > ngc_hs1_ctx->options.query_interval_per_peer) {
peer.time_since_last_request_sent = 0.f;
//fprintf(stderr, "HS: requesting ids for %X%X%X%X\n", peer_key.data.data()[0], peer_key.data.data()[1], peer_key.data.data()[2], peer_key.data.data()[3]);
// TODO: other way around?
// ask everyone if they have newer stuff for this peer
// - 1 byte packet id
// - peer_key bytes (peer key we want to know ids for)
// - 1 byte (uint8_t count ids, atleast 1)
std::array<uint8_t, 1+TOX_GROUP_PEER_PUBLIC_KEY_SIZE+1> pkg;
pkg[0] = NGC_EXT::HS1_REQUEST_LAST_IDS;
std::copy(peer_key.data.begin(), peer_key.data.end(), pkg.begin()+1);
pkg[1+TOX_GROUP_PEER_PUBLIC_KEY_SIZE] = ngc_hs1_ctx->options.last_msg_ids_count; // request last (up to) 5 msg_ids
tox_group_send_custom_packet(tox, group_number, true, pkg.data(), pkg.size(), nullptr);
}
// check if pending msg requests have timed out
for (auto it = peer.pending.begin(); it != peer.pending.end();) {
it->second.time_since_ft_activity += time_delta;
if (it->second.time_since_ft_activity >= ngc_hs1_ctx->options.ft_activity_timeout) {
// timed out
fprintf(stderr, "HS: !!! pending ft request timed out (%08X)\n", it->first);
it = peer.pending.erase(it);
} else {
it++;
}
}
// request FT for only heard of message_ids
size_t request_made_count = 0;
for (const auto& [msg_id, remote_peer_numbers] : peer.heard_of) {
if (request_made_count >= 2) { // 2 for test
// TODO: limit requests per iterate option
break;
}
if (peer.pending.count(msg_id)) {
continue; // allready requested
}
if (remote_peer_numbers.empty()) {
fprintf(stderr, "HS: !!! msg_id we heard of, but no remote peer !!!\n");
continue;
}
const uint32_t remote_peer_number = *remote_peer_numbers.begin();
// craft file id
std::array<uint8_t, TOX_GROUP_PEER_PUBLIC_KEY_SIZE+sizeof(uint32_t)> file_id{};
{
std::copy(peer_key.data.cbegin(), peer_key.data.cend(), file_id.begin());
// HACK: little endian
const uint8_t* tmp_ptr = reinterpret_cast<const uint8_t*>(&msg_id);
std::copy(tmp_ptr, tmp_ptr+sizeof(uint32_t), file_id.begin()+TOX_GROUP_PEER_PUBLIC_KEY_SIZE);
}
// send request
NGC_FT1_send_request_private(
tox, ngc_hs1_ctx->ngc_ft1_ctx,
group_number, remote_peer_number,
NGC_FT1_file_kind::NGC_HS1_MESSAGE_BY_ID,
file_id.data(), file_id.size()
);
peer.pending[msg_id] = {remote_peer_number, 0.f};
request_made_count++;
}
}
}
assert(ngc_hs1_ctx->history.size() != 0);
assert(ngc_hs1_ctx->history.count(g_id));
}
void NGC_HS1_iterate(Tox *tox, NGC_HS1* ngc_hs1_ctx) {
//void NGC_HS1_iterate(Tox *tox, NGC_EXT_CTX* ngc_ext_ctx/*, void *user_data*/) {
assert(ngc_hs1_ctx);
//fprintf(stderr, "groups: %u\n", ngc_hs1_ctx->history.size());
uint32_t group_count = tox_group_get_number_groups(tox);
// this can loop endless if toxcore misbehaves
for (uint32_t g_i = 0, g_c_done = 0; g_c_done < group_count; g_i++) {
Tox_Err_Group_Is_Connected g_err;
if (tox_group_is_connected(tox, g_i, &g_err)) {
// valid and connected here
// TODO: delta time, or other timers
_iterate_group(tox, ngc_hs1_ctx, g_i, 0.02f);
g_c_done++;
} else if (g_err != TOX_ERR_GROUP_IS_CONNECTED_GROUP_NOT_FOUND) {
g_c_done++;
} // else do nothing
// safety
if (g_i > group_count + 1000) {
fprintf(stderr, "HS: WAY PAST GOUPS in iterate\n");
break;
}
}
}
void NGC_HS1_peer_online(Tox* tox, NGC_HS1* ngc_hs1_ctx, uint32_t group_number, uint32_t peer_number, bool online) {
// get group id
NGC_EXT::GroupKey g_id{};
{ // TODO: error
tox_group_get_chat_id(tox, group_number, g_id.data.data(), nullptr);
}
auto& group = ngc_hs1_ctx->history[g_id];
if (online) {
// get peer id
NGC_EXT::PeerKey p_id{};
{ // TODO: error
tox_group_peer_get_public_key(tox, group_number, peer_number, p_id.data.data(), nullptr);
}
auto& peer = group.peers[p_id];
peer.id = peer_number;
} else { // offline
// search
for (auto& [key, peer] : group.peers) {
if (peer.id.has_value() && peer.id.value() == peer_number) {
peer.id = {}; // reset
break;
}
}
}
}
bool NGC_HS1_shim_group_send_message(
const Tox *tox,
NGC_HS1* ngc_hs1_ctx,
uint32_t group_number,
Tox_Message_Type type, const uint8_t *message, size_t length,
uint32_t *message_id,
Tox_Err_Group_Send_Message *error
) {
uint32_t* msg_id_ptr = message_id;
uint32_t msg_id_placeholder = 0;
if (msg_id_ptr == nullptr) {
msg_id_ptr = &msg_id_placeholder;
}
bool ret = tox_group_send_message(tox, group_number, type, message, length, msg_id_ptr, error);
NGC_HS1_record_own_message(tox, ngc_hs1_ctx, group_number, type, message, length, *msg_id_ptr);
return ret;
}
// record own msg
void NGC_HS1_record_own_message(
const Tox *tox,
NGC_HS1* ngc_hs1_ctx,
uint32_t group_number,
Tox_Message_Type type, const uint8_t *message, size_t length, uint32_t message_id
) {
fprintf(stderr, "HS: record_own_message %08X\n", message_id);
// get group id
NGC_EXT::GroupKey g_id{};
{ // TODO: error
tox_group_get_chat_id(tox, group_number, g_id.data.data(), nullptr);
}
// get peer id
NGC_EXT::PeerKey p_id{};
{ // TODO: error
tox_group_self_get_public_key(tox, group_number, p_id.data.data(), nullptr);
}
ngc_hs1_ctx->history[g_id].peers[p_id].append(message_id, type, std::string{message, message+length});
assert(ngc_hs1_ctx->history.size() != 0);
assert(ngc_hs1_ctx->history.count(g_id));
}
void NGC_HS1_register_callback_group_message(NGC_HS1* ngc_hs1_ctx, NGC_HS1_group_message_cb* callback) {
assert(ngc_hs1_ctx);
ngc_hs1_ctx->cb_group_message = callback;
}
// record others msg
void NGC_HS1_record_message(
const Tox *tox,
NGC_HS1* ngc_hs1_ctx,
uint32_t group_number,
uint32_t peer_number,
Tox_Message_Type type, const uint8_t *message, size_t length, uint32_t message_id
) {
if (!ngc_hs1_ctx->options.record_others) {
return;
}
fprintf(stderr, "HS: record_message %08X\n", message_id);
// get group id
NGC_EXT::GroupKey g_id{};
{ // TODO: error
tox_group_get_chat_id(tox, group_number, g_id.data.data(), nullptr);
}
// get peer id
NGC_EXT::PeerKey p_id{};
{ // TODO: error
tox_group_peer_get_public_key(tox, group_number, peer_number, p_id.data.data(), nullptr);
}
ngc_hs1_ctx->history[g_id].peers[p_id].append(message_id, type, std::string{message, message+length});
}
void _handle_HS1_ft_recv_request(
Tox *tox,
uint32_t group_number,
uint32_t peer_number,
const uint8_t* file_id, size_t file_id_size,
void* user_data
) {
assert(user_data);
NGC_HS1* ngc_hs1_ctx = static_cast<NGC_HS1*>(user_data);
assert(file_id_size == TOX_GROUP_PEER_PUBLIC_KEY_SIZE+sizeof(uint32_t));
// get peer_key from file_id
NGC_EXT::PeerKey peer_key;
std::copy(file_id, file_id+peer_key.size(), peer_key.data.begin());
// get msg_id from file_id
// HACK: little endian
uint32_t msg_id;
uint8_t* tmp_ptr = reinterpret_cast<uint8_t*>(&msg_id);
std::copy(file_id+TOX_GROUP_PEER_PUBLIC_KEY_SIZE, file_id+TOX_GROUP_PEER_PUBLIC_KEY_SIZE+sizeof(uint32_t), tmp_ptr);
fprintf(stderr, "HS: got a ft request for xxx msg_id %08X\n", msg_id);
// get group id
NGC_EXT::GroupKey group_id{};
{ // TODO: error
tox_group_get_chat_id(tox, group_number, group_id.data.data(), nullptr);
}
const auto& peers = ngc_hs1_ctx->history[group_id].peers;
// do we have that message
if (!peers.count(peer_key)) {
fprintf(stderr, "HS: got ft request for unknown peer\n");
return;
}
const auto& peer = peers.at(peer_key);
if (!peer.dict.count(msg_id)) {
fprintf(stderr, "HS: got ft request for unknown message_id %08X\n", msg_id);
return;
}
// yes we do. now we need to init ft?
//fprintf(stderr, "TODO: init ft for %08X\n", msg_id);
// filesize is
// - 1 byte msg_type (normal / action)
// - x bytes msg_text
// msg_id is part of file_id
const auto& msg = peer.dict.at(msg_id);
size_t file_size = 1 + msg.text.size();
uint8_t transfer_id {0};
NGC_FT1_send_init_private(
tox, ngc_hs1_ctx->ngc_ft1_ctx,
group_number, peer_number,
NGC_HS1_MESSAGE_BY_ID,
file_id, file_id_size,
file_size,
&transfer_id
);
//TODO: can fail
ngc_hs1_ctx->history[group_id].sending[std::make_pair(peer_number, transfer_id)] = {peer_key, msg_id};
}
bool _handle_HS1_ft_recv_init(
Tox *tox,
uint32_t group_number,
uint32_t peer_number,
const uint8_t* file_id, size_t file_id_size,
const uint8_t transfer_id,
const size_t file_size,
void* user_data
) {
assert(user_data);
NGC_HS1* ngc_hs1_ctx = static_cast<NGC_HS1*>(user_data);
//fprintf(stderr, "HS: -------hs handle ft init\n");
// peer id and msg id from file id
// TODO: replace, remote crash
assert(file_id_size == TOX_GROUP_PEER_PUBLIC_KEY_SIZE+sizeof(uint32_t));
// get peer_key from file_id
NGC_EXT::PeerKey peer_key;
std::copy(file_id, file_id+peer_key.size(), peer_key.data.begin());
// get msg_id from file_id
// HACK: little endian
uint32_t msg_id;
uint8_t* tmp_ptr = reinterpret_cast<uint8_t*>(&msg_id);
std::copy(file_id+TOX_GROUP_PEER_PUBLIC_KEY_SIZE, file_id+TOX_GROUP_PEER_PUBLIC_KEY_SIZE+sizeof(uint32_t), tmp_ptr);
// did we ask for this?
// get group id
NGC_EXT::GroupKey g_id{};
{ // TODO: error
tox_group_get_chat_id(tox, group_number, g_id.data.data(), nullptr);
}
auto& group = ngc_hs1_ctx->history[g_id];
auto& pending = group.peers[peer_key].pending;
if (!pending.count(msg_id)) {
// we did not ask for this
// TODO: accept?
fprintf(stderr, "HS: ft init from peer we did not ask\n");
return false; // deny
}
if (pending.at(msg_id).peer_number != peer_number) {
// wrong peer ?
fprintf(stderr, "HS: ft init from peer we did not ask while asking someone else\n");
return false; // deny
}
// TODO: if allready acked but got init again, they did not get the ack
// move from pending to transfers
group.transfers[std::make_pair(peer_number, transfer_id)] = {
peer_key,
msg_id,
0.f,
{}, // empty buffer
file_size,
};
pending.at(msg_id).time_since_ft_activity = 0.f;
// TODO: keep the pending until later?
//pending.erase(msg_id);
return true; // accept
}
void _handle_HS1_ft_recv_data(
Tox *tox,
uint32_t group_number,
uint32_t peer_number,
uint8_t transfer_id,
size_t data_offset,
const uint8_t* data, size_t data_size,
void* user_data
) {
assert(user_data);
NGC_HS1* ngc_hs1_ctx = static_cast<NGC_HS1*>(user_data);
// get group id
NGC_EXT::GroupKey g_id{};
{ // TODO: error
tox_group_get_chat_id(tox, group_number, g_id.data.data(), nullptr);
}
auto& group = ngc_hs1_ctx->history[g_id];
// get based on transfer_id
if (!group.transfers.count(std::make_pair(peer_number, transfer_id))) {
if (data_offset != 0) {
fprintf(stderr, "HS: !! got stray tf data from %d tid:%d\n", peer_number, transfer_id);
return;
}
// new transfer?
fprintf(stderr, "HS: !! got new transfer from %d tid:%d\n", peer_number, transfer_id);
}
fprintf(stderr, "HS: recv_data from %d tid:%d\n", peer_number, transfer_id);
auto& transfer = group.transfers.at(std::make_pair(peer_number, transfer_id));
transfer.time_since_ft_activity = 0.f;
// TODO: also timer for pending?
// TODO: optimize
for (size_t i = 0; i < data_size; i++) {
transfer.recv_buffer.push_back(data[i]);
}
// TODO: data done?
if (data_offset + data_size == transfer.file_size) {
fprintf(stderr, "HS: transfer done %d:%d\n", peer_number, transfer_id);
transfer.recv_buffer.push_back('\0');
fprintf(stderr, " message was %s\n", transfer.recv_buffer.data()+1);
auto& peer = group.peers[transfer.msg_peer];
peer.pending.erase(transfer.msg_id);
peer.append(transfer.msg_id, static_cast<Tox_Message_Type>(transfer.recv_buffer.front()), std::string(reinterpret_cast<const char*>(transfer.recv_buffer.data()+1)));
assert(ngc_hs1_ctx->cb_group_message);
// we dont notify if we dont know the peer id. this kinda breaks some stuff
if (peer.id.has_value()) {
ngc_hs1_ctx->cb_group_message(
tox,
group_number, peer.id.value(),
static_cast<Tox_Message_Type>(transfer.recv_buffer.front()),
transfer.recv_buffer.data()+1,
transfer.recv_buffer.size()-2,
transfer.msg_id
);
}
group.transfers.erase(std::make_pair(peer_number, transfer_id));
}
}
void _handle_HS1_ft_send_data(
Tox *tox,
uint32_t group_number,
uint32_t peer_number,
uint8_t transfer_id,
size_t data_offset, uint8_t* data, size_t data_size,
void* user_data
) {
assert(user_data);
NGC_HS1* ngc_hs1_ctx = static_cast<NGC_HS1*>(user_data);
// get group id
NGC_EXT::GroupKey g_id{};
{ // TODO: error
tox_group_get_chat_id(tox, group_number, g_id.data.data(), nullptr);
}
auto& group = ngc_hs1_ctx->history[g_id];
if (!group.sending.count(std::make_pair(peer_number, transfer_id))) {
fprintf(stderr, "HS: error, unknown sending transfer %d:%d\n", peer_number, transfer_id);
return;
}
// map peer_number and transfer_id to peer_key and message_id
const auto& [msg_peer, msg_id] = group.sending.at(std::make_pair(peer_number, transfer_id));
// get msg
const auto& message = group.peers.at(msg_peer).dict.at(msg_id);
size_t data_i = 0;
if (data_offset == 0) {
// serl type
data[data_i++] = message.type;
data_offset += 1;
}
for (size_t i = 0; data_i < data_size; i++, data_i++) {
data[data_i] = message.text.at(data_offset+i-1);
}
if (data_offset + data_size == 1 + message.text.size()) {
// done
fprintf(stderr, "HS: done %d:%d\n", peer_number, transfer_id);
group.sending.erase(std::make_pair(peer_number, transfer_id));
}
}
#define _HS1_HAVE(x, error) if ((length - curser) < (x)) { error; }
void _handle_HS1_REQUEST_LAST_IDS(
Tox* tox,
NGC_EXT_CTX* ngc_ext_ctx,
uint32_t group_number,
uint32_t peer_number,
const uint8_t *data,
size_t length,
void* user_data
) {
assert(user_data);
NGC_HS1* ngc_hs1_ctx = static_cast<NGC_HS1*>(user_data);
size_t curser = 0;
NGC_EXT::PeerKey p_key;
_HS1_HAVE(p_key.data.size(), fprintf(stderr, "HS: packet too small, missing pkey\n"); return)
std::copy(data+curser, data+curser+p_key.data.size(), p_key.data.begin());
curser += p_key.data.size();
_HS1_HAVE(1, fprintf(stderr, "HS: packet too small, missing count\n"); return)
uint8_t last_msg_id_count = data[curser++];
//fprintf(stderr, "HS: got request for last %u ids\n", last_msg_id_count);
// get group id
NGC_EXT::GroupKey g_id{};
{ // TODO: error
tox_group_get_chat_id(tox, group_number, g_id.data.data(), nullptr);
}
auto& group = ngc_hs1_ctx->history[g_id];
std::vector<uint32_t> message_ids{};
if (!group.peers.empty() && group.peers.count(p_key)) {
const auto& peer = group.peers.at(p_key);
auto rit = peer.order.crbegin();
for (size_t c = 0; c < last_msg_id_count && rit != peer.order.crend(); c++, rit++) {
message_ids.push_back(*rit);
}
}
// - 1 byte packet id
// respond to a request with 0 or more message ids, sorted by newest first
// - peer_key bytes (the msg_ids are from)
// - 1 byte (uint8_t count ids, can be 0)
// - array [
// - msg_id bytes (the message id
// - ]
//std::array<uint8_t, 1+TOX_GROUP_PEER_PUBLIC_KEY_SIZE+1+> pkg;
std::vector<uint8_t> pkg;
pkg.resize(1+TOX_GROUP_PEER_PUBLIC_KEY_SIZE+1+sizeof(uint32_t)*message_ids.size());
size_t packing_curser = 0;
pkg[packing_curser++] = NGC_EXT::HS1_RESPONSE_LAST_IDS;
std::copy(p_key.data.begin(), p_key.data.end(), pkg.begin()+packing_curser);
packing_curser += p_key.data.size();
pkg[packing_curser++] = message_ids.size();
for (size_t i = 0; i < message_ids.size(); i++) {
const uint8_t* tmp_ptr = reinterpret_cast<uint8_t*>(message_ids.data()+i);
// HACK: little endian
//std::copy(tmp_ptr, tmp_ptr+sizeof(uint32_t), pkg.begin()+1+TOX_GROUP_PEER_PUBLIC_KEY_SIZE+1+i*sizeof(uint32_t));
std::copy(tmp_ptr, tmp_ptr+sizeof(uint32_t), pkg.begin()+packing_curser);
packing_curser += sizeof(uint32_t);
}
tox_group_send_custom_private_packet(tox, group_number, peer_number, true, pkg.data(), pkg.size(), nullptr);
}
void _handle_HS1_RESPONSE_LAST_IDS(
Tox* tox,
NGC_EXT_CTX* ngc_ext_ctx,
uint32_t group_number,
uint32_t peer_number,
const uint8_t *data,
size_t length,
void* user_data
) {
assert(user_data);
NGC_HS1* ngc_hs1_ctx = static_cast<NGC_HS1*>(user_data);
size_t curser = 0;
NGC_EXT::PeerKey p_key;
_HS1_HAVE(p_key.data.size(), fprintf(stderr, "HS: packet too small, missing pkey\n"); return)
std::copy(data+curser, data+curser+p_key.data.size(), p_key.data.begin());
curser += p_key.data.size();
// TODO: did we ask?
_HS1_HAVE(1, fprintf(stderr, "HS: packet too small, missing count\n"); return)
uint8_t last_msg_id_count = data[curser++];
fprintf(stderr, "HS: got response with last %u ids:\n", last_msg_id_count);
if (last_msg_id_count == 0) {
return;
}
// get group id
NGC_EXT::GroupKey g_id{};
{ // TODO: error
tox_group_get_chat_id(tox, group_number, g_id.data.data(), nullptr);
}
// get peer
auto& peer = ngc_hs1_ctx->history[g_id].peers[p_key];
//std::vector<uint32_t> message_ids{};
for (size_t i = 0; i < last_msg_id_count && curser+sizeof(uint32_t) <= length; i++) {
uint32_t msg_id;
// HACK: little endian
std::copy(data+curser, data+curser+sizeof(uint32_t), reinterpret_cast<uint8_t*>(&msg_id));
curser += sizeof(uint32_t);
//message_ids.push_back(msg_id);
fprintf(stderr, " %08X", msg_id);
if (peer.hear(msg_id, peer_number)) {
fprintf(stderr, " - NEW");
}
fprintf(stderr, "\n");
}
// TODO: replace, remote crash
assert(curser == length);
}
#undef _HS1_HAVE

View File

@ -3,31 +3,19 @@
// this is a c header
// outline:
//
//#include <stdbool.h>
//#include <stddef.h>
//#include <stdint.h>
#include <tox/tox.h>
#include "ngc_ext.h"
#include "ngc_ft1.h"
#ifdef __cplusplus
extern "C" {
#endif
// copy from tox.h:
#ifndef TOX_DEFINED
#define TOX_DEFINED
typedef struct Tox Tox;
#endif /* TOX_DEFINED */
// ========== struct / typedef ==========
typedef struct NGC_HS1 NGC_HS1;
@ -39,18 +27,33 @@ struct NGC_HS1_options {
// 2 mods
// 3 founders
// 4 no one (above founder)
uint8_t default_trust_level = 2;
uint8_t default_trust_level /*= 2*/;
//bool test;
bool record_others;
float query_interval_per_peer; // 15.f
size_t last_msg_ids_count; // 5
float ft_activity_timeout; // seconds 60.f
};
// ========== init / kill ==========
// (see tox api)
NGC_HS1* NGC_HS1_new(const struct NGC_HS1_options* options);
bool NGC_HS1_register_ext(NGC_HS1* ngc_hs1_ctx, NGC_EXT_CTX* ngc_ext_ctx);
bool NGC_HS1_register_ft1(NGC_HS1* ngc_hs1_ctx, NGC_FT1* ngc_ft1_ctx);
//bool NGC_HS1_init(NGC_EXT_CTX* ngc_ext_ctx, const struct NGC_HS1_options* options);
void NGC_HS1_kill(NGC_HS1* ngc_hs1_ctx);
//void NGC_HS1_kill(NGC_EXT_CTX* ngc_ext_ctx);
// ========== iterate ==========
void NGC_HS1_iterate(Tox *tox, NGC_HS1* ngc_hs1_ctx/*, void *user_data*/);
void NGC_HS1_iterate(Tox *tox, NGC_HS1* ngc_hs1_ctx);
//void NGC_HS1_iterate(Tox *tox, NGC_EXT_CTX* ngc_ext_ctx/*, void *user_data*/);
// ========== peer online/offline ==========
void NGC_HS1_peer_online(Tox* tox, NGC_HS1* ngc_hs1_ctx, uint32_t group_number, uint32_t peer_number, bool online);
// ========== send ==========
@ -81,11 +84,29 @@ void NGC_HS1_record_own_message(
// ========== receive message ==========
// shim (same interface)
typedef void NGC_HS1_group_message_cb(
Tox *tox,
uint32_t group_number,
uint32_t peer_id,
Tox_Message_Type type,
const uint8_t *message,
size_t length,
uint32_t message_id
);
// ========== receive request ==========
// callback for when history sync has a new message
void NGC_HS1_register_callback_group_message(NGC_HS1* ngc_hs1_ctx, NGC_HS1_group_message_cb* callback); // TODO: userdata
// ========== receive answer ==========
// record others msg
void NGC_HS1_record_message(
const Tox *tox,
NGC_HS1* ngc_hs1_ctx,
uint32_t group_number,
uint32_t peer_number,
Tox_Message_Type type, const uint8_t *message, size_t length, uint32_t message_id
);
#ifdef __cplusplus
}

116
ngc_hs1.hpp Normal file
View File

@ -0,0 +1,116 @@
#pragma once
#include "./ngc_hs1.h"
#include "../tox_ngc_ext/ngc_ext.hpp"
#include "ngc_ft1.h"
#include <cstdint>
#include <map>
#include <list>
#include <set>
#include <vector>
#include <optional>
struct NGC_HS1 {
NGC_HS1_options options;
NGC_FT1* ngc_ft1_ctx {nullptr};
// callbacks
NGC_HS1_group_message_cb* cb_group_message {nullptr};
// key - key - key - value store
// group pubkey - peer pubkey - msg_id - message(type + text)
struct Message {
uint32_t msg_id{};
Tox_Message_Type type{};
std::string text{};
};
struct Peer {
std::optional<uint32_t> id;
std::map<uint32_t, Message> dict;
std::list<uint32_t> order; // ordered list of message ids
// msg_ids we have only heard of, with peer_number of who we heard it from
std::map<uint32_t, std::set<uint32_t>> heard_of;
struct PendingFTRequest {
uint32_t peer_number; // the peer we requested the message from
float time_since_ft_activity {0.f};
};
std::map<uint32_t, PendingFTRequest> pending; // key msg_id
// dont start immediatly
float time_since_last_request_sent {0.f};
void append(uint32_t msg_id, Tox_Message_Type type, const std::string& text);
// returns if new (from that peer)
bool hear(uint32_t msg_id, uint32_t peer_number);
};
struct Group {
std::map<NGC_EXT::PeerKey, Peer> peers;
struct FileTransfers {
NGC_EXT::PeerKey msg_peer;
uint32_t msg_id;
float time_since_ft_activity {0.f};
std::vector<uint8_t> recv_buffer; // message gets dumped into here
size_t file_size {0};
};
// key: peer_number + transfer_id
std::map<std::pair<uint32_t, uint8_t>, FileTransfers> transfers;
struct Sending {
NGC_EXT::PeerKey msg_peer;
uint32_t msg_id;
};
std::map<std::pair<uint32_t, uint8_t>, Sending> sending;
};
std::map<NGC_EXT::GroupKey, Group> history;
};
void _handle_HS1_REQUEST_LAST_IDS(
Tox* tox,
NGC_EXT_CTX* ngc_ext_ctx,
uint32_t group_number,
uint32_t peer_number,
const uint8_t *data,
size_t length,
void* user_data
);
void _handle_HS1_RESPONSE_LAST_IDS(
Tox* tox,
NGC_EXT_CTX* ngc_ext_ctx,
uint32_t group_number,
uint32_t peer_number,
const uint8_t *data,
size_t length,
void* user_data
);
void _handle_HS1_ft_request_message(
Tox *tox, NGC_EXT_CTX* ngc_ext_ctx,
uint32_t group_number,
uint32_t peer_number,
const uint8_t* file_id, size_t file_id_size
);
bool _handle_HS1_ft_init_message(
Tox *tox, NGC_EXT_CTX* ngc_ext_ctx,
uint32_t group_number,
uint32_t peer_number,
const uint8_t* file_id, size_t file_id_size,
const uint8_t transfer_id,
const size_t file_size
);