Compare commits

..

4 Commits

3 changed files with 378 additions and 68 deletions

View File

@ -214,7 +214,6 @@ struct TextDocument {
if (!differ && list_start == state.list.size() && text_start == text.size()) { if (!differ && list_start == state.list.size() && text_start == text.size()) {
return {}; return {};
} }
//std::cout << "list.size: " << state.list.size() << "(" << getText().size() << ")" << " text.size: " << text.size() << "\n"; //std::cout << "list.size: " << state.list.size() << "(" << getText().size() << ")" << " text.size: " << text.size() << "\n";
//std::cout << "list_start: " << list_start << " text_start: " << text_start << "\n"; //std::cout << "list_start: " << list_start << " text_start: " << text_start << "\n";
@ -224,7 +223,9 @@ struct TextDocument {
//for (; list_end > 0 && text_end > 0 && list_end >= list_start && text_end >= text_start;) { //for (; list_end > 0 && text_end > 0 && list_end >= list_start && text_end >= text_start;) {
//while (list_end >= list_start && text_end >= text_start) { //while (list_end >= list_start && text_end >= text_start) {
size_t list_end_counted = 0; size_t list_end_counted = 0;
while (list_start_counted - list_end_counted > state.doc_size && text_end >= text_start) { differ = false; // var reuse
//while (list_start_counted - list_end_counted > state.doc_size && text_end >= text_start) {
while (state.doc_size - list_start_counted > list_end_counted && text_end >= text_start) {
// jump over tombstones // jump over tombstones
if (!state.list[list_end-1].value.has_value()) { if (!state.list[list_end-1].value.has_value()) {
list_end--; list_end--;
@ -232,6 +233,7 @@ struct TextDocument {
} }
if (state.list[list_end-1].value.value() != text[text_end-1]) { if (state.list[list_end-1].value.value() != text[text_end-1]) {
differ = true;
break; break;
} }
@ -240,20 +242,29 @@ struct TextDocument {
list_end_counted++; list_end_counted++;
} }
if (!differ && text_start == text_end+1) {
// we ran into eachother without seeing the different char
// TODO: do we need to increment list_end? text_end?
list_end++;
}
//std::cout << "list_end: " << list_end << " text_end: " << text_end << "\n"; //std::cout << "list_end: " << list_end << " text_end: " << text_end << "\n";
//std::cout << "substring before: " << text.substr(text_start, text.size() - state.doc_size) << "\n";
std::vector<Op> ops; std::vector<Op> ops;
// 1. clear range (del all list_start - list_end) // 1. clear range (del all list_start - list_end)
if (list_start <= list_end && list_start < state.list.size()) { if (list_start <= list_end && list_start < state.list.size()) {
//list_end += list_start == list_end;
ops = delRange( ops = delRange(
state.list[list_start].id, state.list[list_start].id,
(list_start == list_end ? list_end+1 : list_end) < state.list.size() ? std::make_optional(state.list[list_end].id) : std::nullopt list_end < state.list.size() ? std::make_optional(state.list[list_end].id) : std::nullopt
); );
//std::cout << "deleted: " << ops.size() << "\n"; //std::cout << "deleted: " << ops.size() << "\n";
} }
//std::cout << "text between: " << getText() << "\n"; //std::cout << "text between: " << getText() << "\n";
//std::cout << "substring between: " << text.substr(text_start, text.size() - state.doc_size) << "\n";
// 2. add range (add all text_start - text_end) // 2. add range (add all text_start - text_end)
if (state.doc_size < text.size()) { if (state.doc_size < text.size()) {
@ -266,7 +277,6 @@ struct TextDocument {
ops.insert(ops.end(), tmp_add_ops.begin(), tmp_add_ops.end()); ops.insert(ops.end(), tmp_add_ops.begin(), tmp_add_ops.end());
} }
//assert(false && "implement me");
return ops; return ops;
} }
}; };

View File

@ -418,8 +418,9 @@ void testBugDoubleDel(void) {
{ {
std::string_view new_text{"a"}; std::string_view new_text{"a"};
doc.merge(new_text); const auto ops = doc.merge(new_text);
assert(doc.getText() == new_text); assert(doc.getText() == new_text);
assert(ops.size() == 1);
} }
{ {
@ -445,20 +446,23 @@ void testBugSameDel(void) {
{ {
std::string_view new_text{"a"}; std::string_view new_text{"a"};
doc.merge(new_text); const auto ops = doc.merge(new_text);
assert(doc.getText() == new_text); assert(doc.getText() == new_text);
assert(ops.size() == 1);
} }
{ {
std::string_view new_text{"aa"}; std::string_view new_text{"aa"};
const auto ops = doc.merge(new_text); const auto ops = doc.merge(new_text);
assert(doc.getText() == new_text); assert(doc.getText() == new_text);
assert(ops.size() == 1);
} }
{ {
std::string_view new_text{"a"}; std::string_view new_text{"a"};
const auto ops = doc.merge(new_text); const auto ops = doc.merge(new_text);
assert(doc.getText() == new_text); assert(doc.getText() == new_text);
assert(ops.size() == 1);
} }
} }
@ -468,32 +472,122 @@ void testBugSameDel2(void) {
{ {
std::string_view new_text{"a"}; std::string_view new_text{"a"};
doc.merge(new_text); const auto ops = doc.merge(new_text);
assert(doc.getText() == new_text); assert(doc.getText() == new_text);
assert(ops.size() == 1);
} }
{ {
std::string_view new_text{"aa"}; std::string_view new_text{"aa"};
const auto ops = doc.merge(new_text); const auto ops = doc.merge(new_text);
assert(doc.getText() == new_text); assert(doc.getText() == new_text);
assert(ops.size() == 1);
} }
{ {
std::string_view new_text{"aaa"}; std::string_view new_text{"aaa"};
const auto ops = doc.merge(new_text); const auto ops = doc.merge(new_text);
assert(doc.getText() == new_text); assert(doc.getText() == new_text);
assert(ops.size() == 1);
} }
{ {
std::string_view new_text{"aa"}; std::string_view new_text{"aa"};
const auto ops = doc.merge(new_text); const auto ops = doc.merge(new_text);
assert(doc.getText() == new_text); assert(doc.getText() == new_text);
assert(ops.size() == 1);
} }
{ {
std::string_view new_text{"a"}; std::string_view new_text{"a"};
const auto ops = doc.merge(new_text); const auto ops = doc.merge(new_text);
assert(doc.getText() == new_text); assert(doc.getText() == new_text);
assert(ops.size() == 1);
}
}
void testMulti1(void) {
Doc docA;
docA.local_agent = 'A';
Doc docB;
docB.local_agent = 'B';
// state A
{
std::string_view new_text{"iiiiiii"};
const auto ops = docA.merge(new_text);
assert(docA.getText() == new_text);
assert(docB.apply(ops));
assert(docB.getText() == new_text);
assert(docB.state.doc_size == docA.state.doc_size);
assert(docB.state.list.size() == docA.state.list.size());
}
// now B inserts b
{
std::string_view new_text{"iiibiiii"};
const auto ops = docB.merge(new_text);
assert(docB.getText() == new_text);
assert(ops.size() == 1); // 1 new inserted char, nothing to delete
assert(docA.apply(ops));
assert(docA.getText() == new_text);
}
}
void testPaste1(void) {
Doc docA;
docA.local_agent = 'A';
{
std::string_view new_text{"iiiiiii"};
const auto ops = docA.merge(new_text);
assert(ops.size() == 7);
assert(docA.getText() == new_text);
}
{
std::string_view new_text{"iiiiiii\n"};
const auto ops = docA.merge(new_text);
assert(ops.size() == 1);
assert(docA.getText() == new_text);
}
{
std::string_view new_text{"iiiiiii\niiiiiii"};
const auto ops = docA.merge(new_text);
assert(ops.size() == 7);
assert(docA.getText() == new_text);
}
}
void testPaste2(void) {
Doc docA;
docA.local_agent = 'A';
{
std::string_view new_text{"aiiiiib"};
const auto ops = docA.merge(new_text);
assert(ops.size() == 7);
assert(docA.getText() == new_text);
}
{
std::string_view new_text{"aiiiiib\n"};
const auto ops = docA.merge(new_text);
assert(ops.size() == 1);
assert(docA.getText() == new_text);
}
{
std::string_view new_text{"aiiiiib\naiiiiib"};
const auto ops = docA.merge(new_text);
assert(ops.size() == 7);
assert(docA.getText() == new_text);
} }
} }
@ -580,6 +674,27 @@ int main(void) {
testBugSameDel2(); testBugSameDel2();
} }
std::cout << std::string(40, '=') << "\n";
{
std::cout << "testMulti1:\n";
testMulti1();
}
std::cout << std::string(40, '=') << "\n";
{
std::cout << "testPaste1:\n";
testPaste1();
}
std::cout << std::string(40, '=') << "\n";
{
std::cout << "testPaste2:\n";
testPaste2();
}
return 0; return 0;
} }

View File

@ -1,4 +1,3 @@
#include "toxcore/tox.h"
#include <crdt/text_document.hpp> #include <crdt/text_document.hpp>
#include <nlohmann/json.hpp> #include <nlohmann/json.hpp>
@ -19,6 +18,8 @@ extern "C" {
#include <mutex> #include <mutex>
#include <atomic> #include <atomic>
#include <chrono> #include <chrono>
#include <random>
#include <utility>
#include <iostream> #include <iostream>
#include <cassert> #include <cassert>
@ -286,6 +287,24 @@ echo 'setup done'
} // namespace vim } // namespace vim
// visibility hack
struct RequestCommand {
Agent agent;
uint64_t seq{0};
};
NLOHMANN_DEFINE_TYPE_NON_INTRUSIVE(RequestCommand,
agent,
seq
)
// hash for unordered_set
template<>
struct std::hash<std::pair<uint32_t, Agent>> {
std::size_t operator()(std::pair<uint32_t, Agent> const& s) const noexcept {
return std::hash<uint32_t>{}(s.first) << 3 ^ std::hash<Agent>{}(s.second);
}
};
struct SharedContext { struct SharedContext {
std::atomic_bool should_quit {false}; std::atomic_bool should_quit {false};
@ -302,6 +321,10 @@ struct SharedContext {
std::unordered_map<ToxPubKey, uint64_t> staging_frontier; // last seq we have in command_lists, via tox std::unordered_map<ToxPubKey, uint64_t> staging_frontier; // last seq we have in command_lists, via tox
// (can be lower then command_frontier for local agent // (can be lower then command_frontier for local agent
// TODO
std::mutex unknown_agents_mutex;
std::unordered_set<ToxPubKey> unknown_agents; // list of agents we read about but dont have in command/saging frontier
// contains remote changes with missing parent seq // contains remote changes with missing parent seq
// could merge into comamnd_lists // could merge into comamnd_lists
std::unordered_map<ToxPubKey, std::unordered_map<uint64_t, Command>> buffer; std::unordered_map<ToxPubKey, std::unordered_map<uint64_t, Command>> buffer;
@ -310,10 +333,17 @@ struct SharedContext {
std::unordered_set<ToxPubKey> should_gossip_remote; // list of ids we have new seq for (only modified by tox thread) std::unordered_set<ToxPubKey> should_gossip_remote; // list of ids we have new seq for (only modified by tox thread)
std::unordered_map<ToxPubKey, uint64_t> heard_gossip; // seq frontiers we have heard about std::unordered_map<ToxPubKey, uint64_t> heard_gossip; // seq frontiers we have heard about
// peer ids that requested the last known seq for agent
std::unordered_set<std::pair<uint32_t, Agent>> requested_frontier;
// peer ids that requested a command
std::unordered_map<uint32_t, RequestCommand> requested_commands;
Tox* tox {nullptr}; Tox* tox {nullptr};
bool tox_dht_online {false}; bool tox_dht_online {false};
bool tox_group_online {false}; bool tox_group_online {false};
uint32_t tox_group_number {-1u}; uint32_t tox_group_number {-1u};
std::unordered_set<uint32_t> tox_seen_peers;
}; };
namespace tox { namespace tox {
@ -323,9 +353,10 @@ namespace pkg {
enum PKGID : uint8_t { enum PKGID : uint8_t {
FRONTIER = 32, FRONTIER = 32,
REQUEST_FRONTIER, REQUEST_FRONTIER,
REQUEST_FRONTIERS,
COMMAND, COMMAND,
REQUEST_COMMANDS, REQUEST_COMMAND,
}; };
// send the currently last known seq you have (excluding buffer) // send the currently last known seq you have (excluding buffer)
@ -350,16 +381,18 @@ namespace pkg {
using Command = ::Command; using Command = ::Command;
// request every command for agent after seq (inclusive) // request every command for agent after_seq - until_seq (inclusive)
struct RequestCommands { //struct RequestCommands {
Agent agent; //Agent agent;
uint64_t seq{0}; //uint64_t after_seq{0};
}; //uint64_t until_seq{0};
//};
using RequestCommand = ::RequestCommand;
NLOHMANN_DEFINE_TYPE_NON_INTRUSIVE(RequestCommands, //NLOHMANN_DEFINE_TYPE_NON_INTRUSIVE(RequestCommand,
agent, //agent,
seq //seq
) //)
} // namespace pkg } // namespace pkg
@ -392,6 +425,8 @@ static void group_custom_private_packet_cb(Tox* tox, uint32_t group_number, uint
void toxThread(SharedContext* ctx) { void toxThread(SharedContext* ctx) {
using namespace std::chrono_literals; using namespace std::chrono_literals;
std::minstd_rand rng{1337};
TOX_ERR_OPTIONS_NEW err_opt_new; TOX_ERR_OPTIONS_NEW err_opt_new;
Tox_Options* options = tox_options_new(&err_opt_new); Tox_Options* options = tox_options_new(&err_opt_new);
assert(err_opt_new == TOX_ERR_OPTIONS_NEW::TOX_ERR_OPTIONS_NEW_OK); assert(err_opt_new == TOX_ERR_OPTIONS_NEW::TOX_ERR_OPTIONS_NEW_OK);
@ -430,8 +465,8 @@ void toxThread(SharedContext* ctx) {
}; };
DHT_node nodes[] { DHT_node nodes[] {
{"tox.plastiras.org", 33445, "8E8B63299B3D520FB377FE5100E65E3322F7AE5B20A0ACED2981769FC5B43725", {}}, // 14 {"tox.plastiras.org", 33445, "8E8B63299B3D520FB377FE5100E65E3322F7AE5B20A0ACED2981769FC5B43725", {}}, // 14
{"tox2.plastiras.org", 33445, "B6626D386BE7E3ACA107B46F48A5C4D522D29281750D44A0CBA6A2721E79C951", {}}, // 14 {"tox2.plastiras.org", 33445, "B6626D386BE7E3ACA107B46F48A5C4D522D29281750D44A0CBA6A2721E79C951", {}}, // 14
}; };
for (size_t i = 0; i < sizeof(nodes)/sizeof(DHT_node); i ++) { for (size_t i = 0; i < sizeof(nodes)/sizeof(DHT_node); i ++) {
@ -449,10 +484,6 @@ void toxThread(SharedContext* ctx) {
// "thread local" // "thread local"
Agent agent_local; Agent agent_local;
//tox_group_self_get_public_key()
//tox_group_send_custom_packet()
//tox_group_send_custom_private_packet()
while (!ctx->should_quit) { while (!ctx->should_quit) {
// tox iterate // tox iterate
tox_iterate(ctx->tox, ctx); tox_iterate(ctx->tox, ctx);
@ -484,52 +515,140 @@ void toxThread(SharedContext* ctx) {
std::cout << "tox connected to group\n"; std::cout << "tox connected to group\n";
} }
} else { // do the thing } else { // do the thing
std::vector<std::pair<Agent, uint64_t>> missing_in_buffer;
{ // pump from buffer to staging { // pump from buffer to staging
const size_t max_commands = 1; const size_t max_commands = 2;
size_t number_of_commands_done = 0; size_t number_of_commands_done = 0;
std::lock_guard lg_staging{ctx->staging_mutex}; std::vector<Agent> empty_buffers;
for (auto& [agent, buffer] : ctx->buffer) { {
if (agent == agent_local) { std::lock_guard lg_staging{ctx->staging_mutex};
// skip ? self for (auto& [agent, buffer] : ctx->buffer) {
continue; if (buffer.empty()) {
} empty_buffers.push_back(agent);
if (number_of_commands_done >= max_commands) { continue;
break; }
} if (agent == agent_local) {
// skip ? self
// determain the seq we are looking for in buffer continue;
uint64_t seq {0}; }
if (ctx->staging_frontier.count(agent)) {
seq = ctx->staging_frontier.at(agent) + 1;
}
if (!buffer.count(seq)) { // not in buffer, skip
continue;
}
// this can lead to dead locks, if other code does this wrong
std::lock_guard lg{ctx->command_lists_mutex};
for (; buffer.count(seq); seq++) {
ctx->command_lists[agent][seq] = buffer.at(seq);
number_of_commands_done++;
if (number_of_commands_done >= max_commands) { if (number_of_commands_done >= max_commands) {
break; break;
} }
}
ctx->staging_frontier[agent] = seq; // determain the seq we are looking for in buffer
ctx->should_gossip_remote.emplace(agent); uint64_t seq {0};
if (ctx->staging_frontier.count(agent)) {
seq = ctx->staging_frontier.at(agent) + 1;
}
if (!buffer.count(seq)) { // not in buffer, skip
// check if old in buffer
for (const auto& it : buffer) {
if (it.first < seq) {
assert(false && "buffer not clean !!");
}
}
//std::cout << "!!! buffer not empty but not next seq\n";
missing_in_buffer.push_back(std::make_pair(agent, seq));
continue;
}
std::vector<uint64_t> seq_to_remove;
{ // this can lead to dead locks, if other code does this wrong
std::lock_guard lg{ctx->command_lists_mutex};
for (; buffer.count(seq); seq++) {
ctx->command_lists[agent][seq] = buffer.at(seq);
ctx->staging_frontier[agent] = seq;
seq_to_remove.push_back(seq);
number_of_commands_done++;
if (number_of_commands_done >= max_commands) {
break;
}
}
//ctx->staging_frontier[agent] = seq;
}
ctx->should_gossip_remote.emplace(agent);
for (const auto key : seq_to_remove) {
buffer.erase(key);
}
if (buffer.empty()) {
empty_buffers.push_back(agent);
}
}
} // scope for staging lock
for (const auto& agent : empty_buffers) {
ctx->buffer.erase(agent);
} }
} }
// request missing in buffer { // request missing in buffer
// (every tick lol)
for (const auto& [agent, seq] : missing_in_buffer) {
// ask random peer_id we have seen before
const uint32_t peer_id = *ctx->tox_seen_peers.cbegin() + (rng() % ctx->tox_seen_peers.size());
const auto status = tox_group_peer_get_connection_status(ctx->tox, ctx->tox_group_number, peer_id, nullptr);
if (status == TOX_CONNECTION_NONE) {
// bad luck, skip
// TODO: do seen peers cleanup
continue;
}
// send request for command
pkg::RequestCommand rc_pkg{
agent, seq
};
std::vector<uint8_t> data = nlohmann::json::to_msgpack(rc_pkg);
// prepend pkgid
data.emplace(data.begin(), static_cast<uint8_t>(pkg::PKGID::REQUEST_COMMAND));
Tox_Err_Group_Send_Custom_Private_Packet send_err{TOX_ERR_GROUP_SEND_CUSTOM_PRIVATE_PACKET_OK};
if (!tox_group_send_custom_private_packet(ctx->tox, ctx->tox_group_number, peer_id, true, data.data(), data.size(), &send_err)) {
std::cerr << "failed to send command request packet for " << std::dec << seq << " from " << std::hex << (int)agent[0] << " " << std::dec << (int)send_err << "\n";
assert(send_err != TOX_ERR_GROUP_SEND_CUSTOM_PRIVATE_PACKET_TOO_LONG);
} else {
std::cout << "sent command request packet for " << std::dec << seq << " from " << std::hex << (int)agent[0] << " to " << std::dec << peer_id << "\n";
}
}
}
// request frontier (implicit list of agents) // request frontier (implicit list of agents)
// only every couple of second, can get large // only every couple of second, can get large
// OR get back random agent and do it often // OR get back random agent and do it often
// handle requests { // handle requests
// TODO: this lock is trash
if (!ctx->requested_commands.empty()) {
std::lock_guard lg{ctx->command_lists_mutex};
for (const auto& [peer_id, request] : ctx->requested_commands) {
if (ctx->command_lists.count(request.agent) && ctx->command_lists.at(request.agent).count(request.seq)) {
const auto& command = ctx->command_lists.at(request.agent).at(request.seq);
// send command
std::vector<uint8_t> data = nlohmann::json::to_msgpack(command);
// prepend pkgid
data.emplace(data.begin(), static_cast<uint8_t>(pkg::PKGID::COMMAND));
Tox_Err_Group_Send_Custom_Private_Packet send_err{TOX_ERR_GROUP_SEND_CUSTOM_PRIVATE_PACKET_OK};
if (!tox_group_send_custom_private_packet(ctx->tox, ctx->tox_group_number, peer_id, true, data.data(), data.size(), &send_err)) {
std::cerr << "failed to send command packet " << send_err << "\n";
assert(send_err != TOX_ERR_GROUP_SEND_CUSTOM_PRIVATE_PACKET_TOO_LONG);
} else {
std::cout << "sent requested command to " << peer_id << "\n";
}
}
// else, we dont care. maybe check staging too
}
// HACK: clear each tick
ctx->requested_commands.clear();
std::cout << "cleared requested commands\n";
}
}
{ // gossip frontier { // gossip frontier
// for mutex locking simplicity this is an either-or // for mutex locking simplicity this is an either-or
@ -560,8 +679,10 @@ void toxThread(SharedContext* ctx) {
// prepend pkgid // prepend pkgid
data.emplace(data.begin(), static_cast<uint8_t>(pkg::PKGID::FRONTIER)); data.emplace(data.begin(), static_cast<uint8_t>(pkg::PKGID::FRONTIER));
if (!tox_group_send_custom_packet(ctx->tox, 0, true, data.data(), data.size(), nullptr)) { Tox_Err_Group_Send_Custom_Packet send_err{TOX_ERR_GROUP_SEND_CUSTOM_PACKET_OK};
std::cerr << "failed to send gossip packet of local agent\n"; if (!tox_group_send_custom_packet(ctx->tox, 0, true, data.data(), data.size(), &send_err)) {
std::cerr << "failed to send gossip packet of local agent" << send_err << "\n";
assert(send_err != TOX_ERR_GROUP_SEND_CUSTOM_PACKET_TOO_LONG);
// TODO: set should_gossip_local back to true? // TODO: set should_gossip_local back to true?
} else { } else {
std::cout << "sent gossip of local agent\n"; std::cout << "sent gossip of local agent\n";
@ -572,14 +693,45 @@ void toxThread(SharedContext* ctx) {
// prepend pkgid // prepend pkgid
data.emplace(data.begin(), static_cast<uint8_t>(pkg::PKGID::COMMAND)); data.emplace(data.begin(), static_cast<uint8_t>(pkg::PKGID::COMMAND));
if (!tox_group_send_custom_packet(ctx->tox, 0, true, data.data(), data.size(), nullptr)) { Tox_Err_Group_Send_Custom_Packet send_err{TOX_ERR_GROUP_SEND_CUSTOM_PACKET_OK};
if (!tox_group_send_custom_packet(ctx->tox, 0, true, data.data(), data.size(), &send_err)) {
std::cerr << "failed to send command packet of local agent\n"; std::cerr << "failed to send command packet of local agent\n";
assert(send_err != TOX_ERR_GROUP_SEND_CUSTOM_PACKET_TOO_LONG);
} else { } else {
std::cout << "sent command of local agent\n"; std::cout << "sent command of local agent\n";
} }
} }
} else if (!ctx->should_gossip_remote.empty()) { } else if (!ctx->should_gossip_remote.empty()) {
std::lock_guard lg{ctx->command_lists_mutex}; // we got new remote staged, lets amp the traffic
// only do first
auto it = ctx->should_gossip_remote.cbegin();
pkg::Frontier f_pkg{
*it,
0u
};
{ // lock
std::lock_guard lg{ctx->staging_mutex};
assert(ctx->staging_frontier.count(*it));
f_pkg.seq = ctx->staging_frontier.at(*it);
}
std::vector<uint8_t> data = nlohmann::json::to_msgpack(f_pkg);
// prepend pkgid
data.emplace(data.begin(), static_cast<uint8_t>(pkg::PKGID::FRONTIER));
Tox_Err_Group_Send_Custom_Packet send_err{TOX_ERR_GROUP_SEND_CUSTOM_PACKET_OK};
if (!tox_group_send_custom_packet(ctx->tox, 0, true, data.data(), data.size(), &send_err)) {
std::cerr << "failed to send gossip packet " << send_err << "\n";
assert(send_err != TOX_ERR_GROUP_SEND_CUSTOM_PACKET_TOO_LONG);
} else {
std::cout << "sent gossip of remote agent\n";
}
ctx->should_gossip_remote.erase(it);
} }
} }
} }
@ -824,9 +976,12 @@ int main(void) {
seq = ctx.command_frontier[agent] + 1; seq = ctx.command_frontier[agent] + 1;
} }
for (; seq <= staging_seq; seq++) { for (; seq <= staging_seq; seq++) {
assert(ctx.command_lists.count(agent));
assert(ctx.command_lists.at(agent).count(seq));
// !! this can get expensive, while we are holding locks :/ // !! this can get expensive, while we are holding locks :/
bool r = doc.apply(ctx.command_lists.at(agent).at(seq).ops); bool r = doc.apply(ctx.command_lists.at(agent).at(seq).ops);
// TODO: actually this can fail with missing parents of an agent we never heard about before
if (!r) { if (!r) {
std::cout << "faild to apply:\n"; std::cout << "faild to apply:\n";
for (const auto& op : ctx.command_lists.at(agent).at(seq).ops) { for (const auto& op : ctx.command_lists.at(agent).at(seq).ops) {
@ -920,9 +1075,20 @@ int main(void) {
if (ctx.command_frontier.count(ctx.agent)) { // get last own seq if (ctx.command_frontier.count(ctx.agent)) { // get last own seq
seq = ctx.command_frontier[ctx.agent] + 1; seq = ctx.command_frontier[ctx.agent] + 1;
} }
const size_t max_ops {5}; // limit ops per command so we can fit them into packets
for (size_t i = 0; i < ops.size(); i+=max_ops, seq++) { // 5 can be too much
std::vector<Doc::Op> tmp_ops {ops.cbegin()+i, ops.cbegin()+i+1}; // 3 seems save, but is slow
const size_t max_ops {4}; // limit ops per command so we can fit them into packets
size_t check_op_count {0};
for (size_t i = 0; i < ops.size(); seq++) {
// TODO: check
//size_t chunk_size = std::min(max_ops, ops.size()-i);
//std::vector<Doc::Op> tmp_ops {ops.cbegin()+i, ops.cbegin()+i+chunk_size};
std::vector<Doc::Op> tmp_ops;
for (auto it = ops.cbegin()+i; it != ops.cend() && tmp_ops.size() <= max_ops; it++) {
tmp_ops.push_back(*it);
}
assert(!tmp_ops.empty()); assert(!tmp_ops.empty());
local_command_list.emplace(seq, Command{ local_command_list.emplace(seq, Command{
@ -931,7 +1097,11 @@ int main(void) {
tmp_ops tmp_ops
}); });
ctx.command_frontier[ctx.agent] = seq; ctx.command_frontier[ctx.agent] = seq;
i += tmp_ops.size();
check_op_count += tmp_ops.size();
} }
assert(check_op_count == ops.size());
} }
ctx.should_gossip_local.store(true); ctx.should_gossip_local.store(true);
} }
@ -963,7 +1133,7 @@ static void self_connection_status_cb(Tox*, TOX_CONNECTION connection_status, vo
std::cout << "self_connection_status_cb " << connection_status << "\n"; std::cout << "self_connection_status_cb " << connection_status << "\n";
} }
static void handle_pkg(SharedContext& ctx, const uint8_t* data, size_t length) { static void handle_pkg(SharedContext& ctx, const uint8_t* data, size_t length, uint32_t peer_id) {
if (length < 2) { if (length < 2) {
std::cerr << "got too short pkg " << length << "\n"; std::cerr << "got too short pkg " << length << "\n";
return; return;
@ -976,6 +1146,9 @@ static void handle_pkg(SharedContext& ctx, const uint8_t* data, size_t length) {
return; return;
} }
// TODO: keep track of time/connected disconnected
ctx.tox_seen_peers.emplace(peer_id);
std::cout << "pkg " << pkg_id << " j:" << p_j.dump() << "\n"; std::cout << "pkg " << pkg_id << " j:" << p_j.dump() << "\n";
switch (pkg_id) { switch (pkg_id) {
@ -990,6 +1163,7 @@ static void handle_pkg(SharedContext& ctx, const uint8_t* data, size_t length) {
} }
case pkg::PKGID::REQUEST_FRONTIER: { case pkg::PKGID::REQUEST_FRONTIER: {
pkg::RequestFrontier pkg = p_j; pkg::RequestFrontier pkg = p_j;
ctx.requested_frontier.emplace(peer_id, pkg.agent);
break; break;
} }
case pkg::PKGID::COMMAND: { case pkg::PKGID::COMMAND: {
@ -997,14 +1171,23 @@ static void handle_pkg(SharedContext& ctx, const uint8_t* data, size_t length) {
// push to buffer, if not in buffer // push to buffer, if not in buffer
if (!ctx.buffer[pkg.agent].count(pkg.seq)) { if (!ctx.buffer[pkg.agent].count(pkg.seq)) {
{ // also check staging frontier, if its not a dup
std::lock_guard lg {ctx.staging_mutex};
if (ctx.staging_frontier.count(pkg.agent) && pkg.seq <= ctx.staging_frontier.at(pkg.agent)) {
break; // allready in staging or master
}
}
ctx.buffer[pkg.agent].emplace(pkg.seq, pkg); ctx.buffer[pkg.agent].emplace(pkg.seq, pkg);
std::cout << "pushed to buffer " << pkg.seq << " from " << pkg.agent << "\n"; std::cout << "pushed to buffer " << pkg.seq << " from " << pkg.agent << "\n";
} }
// TODO: notify something? // TODO: notify something?
break; break;
} }
case pkg::PKGID::REQUEST_COMMANDS: { case pkg::PKGID::REQUEST_COMMAND: {
pkg::RequestCommands pkg = p_j; pkg::RequestCommand pkg = p_j;
// TODO: this can lead to double requests
// TODO: maybe settle for single seq requests for now?, since they are indivitual packets anyway
ctx.requested_commands[peer_id] = pkg;
break; break;
} }
default: default:
@ -1016,13 +1199,15 @@ static void handle_pkg(SharedContext& ctx, const uint8_t* data, size_t length) {
static void group_custom_packet_cb(Tox*, uint32_t group_number, uint32_t peer_id, const uint8_t* data, size_t length, void* user_data) { static void group_custom_packet_cb(Tox*, uint32_t group_number, uint32_t peer_id, const uint8_t* data, size_t length, void* user_data) {
std::cout << "group_custom_packet_cb\n"; std::cout << "group_custom_packet_cb\n";
SharedContext& ctx = *static_cast<SharedContext*>(user_data); SharedContext& ctx = *static_cast<SharedContext*>(user_data);
handle_pkg(ctx, data, length); assert(ctx.tox_group_number == group_number);
handle_pkg(ctx, data, length, peer_id);
} }
static void group_custom_private_packet_cb(Tox*, uint32_t group_number, uint32_t peer_id, const uint8_t* data, size_t length, void* user_data) { static void group_custom_private_packet_cb(Tox*, uint32_t group_number, uint32_t peer_id, const uint8_t* data, size_t length, void* user_data) {
std::cout << "group_custom_private_packet_cb\n"; std::cout << "group_custom_private_packet_cb\n";
SharedContext& ctx = *static_cast<SharedContext*>(user_data); SharedContext& ctx = *static_cast<SharedContext*>(user_data);
handle_pkg(ctx, data, length); assert(ctx.tox_group_number == group_number);
handle_pkg(ctx, data, length, peer_id);
} }
} }