have_chunk + refactor
This commit is contained in:
parent
0606db457b
commit
707bfc7fd6
@ -16,6 +16,9 @@ add_executable(tox_ngc_ft1_tool
|
||||
./hash_utils.hpp
|
||||
./hash_utils.cpp
|
||||
|
||||
./ft_sha1_info.hpp
|
||||
./ft_sha1_info.cpp
|
||||
|
||||
./tox_callbacks.hpp
|
||||
./tox_callbacks.cpp
|
||||
|
||||
|
68
src/ft_sha1_info.cpp
Normal file
68
src/ft_sha1_info.cpp
Normal file
@ -0,0 +1,68 @@
|
||||
#include "./ft_sha1_info.hpp"
|
||||
|
||||
#include <sodium.h>
|
||||
|
||||
SHA1Digest::SHA1Digest(const std::vector<uint8_t>& v) {
|
||||
assert(v.size() == data.size());
|
||||
for (size_t i = 0; i < data.size(); i++) {
|
||||
data[i] = v[i];
|
||||
}
|
||||
}
|
||||
|
||||
SHA1Digest::SHA1Digest(const uint8_t* d, size_t s) {
|
||||
assert(s == data.size());
|
||||
for (size_t i = 0; i < data.size(); i++) {
|
||||
data[i] = d[i];
|
||||
}
|
||||
}
|
||||
|
||||
std::ostream& operator<<(std::ostream& out, const SHA1Digest& v) {
|
||||
std::string str{};
|
||||
str.resize(v.size()*2, '?');
|
||||
|
||||
// HECK, std is 1 larger than size returns ('\0')
|
||||
sodium_bin2hex(str.data(), str.size()+1, v.data.data(), v.data.size());
|
||||
|
||||
out << str;
|
||||
|
||||
return out;
|
||||
}
|
||||
|
||||
std::vector<uint8_t> FTInfoSHA1::toBuffer(void) const {
|
||||
std::vector<uint8_t> buffer;
|
||||
|
||||
assert(!file_name.empty());
|
||||
// TODO: optimize
|
||||
for (size_t i = 0; i < 256; i++) {
|
||||
if (i < file_name.size()) {
|
||||
buffer.push_back(file_name.at(i));
|
||||
} else {
|
||||
buffer.push_back(0);
|
||||
}
|
||||
}
|
||||
assert(buffer.size() == 256);
|
||||
|
||||
{ // HACK: endianess
|
||||
buffer.push_back((file_size>>(0*8)) & 0xff);
|
||||
buffer.push_back((file_size>>(1*8)) & 0xff);
|
||||
buffer.push_back((file_size>>(2*8)) & 0xff);
|
||||
buffer.push_back((file_size>>(3*8)) & 0xff);
|
||||
buffer.push_back((file_size>>(4*8)) & 0xff);
|
||||
buffer.push_back((file_size>>(5*8)) & 0xff);
|
||||
buffer.push_back((file_size>>(6*8)) & 0xff);
|
||||
buffer.push_back((file_size>>(7*8)) & 0xff);
|
||||
}
|
||||
assert(buffer.size() == 256+8);
|
||||
|
||||
// chunk size?
|
||||
|
||||
for (const auto& chunk : chunks) {
|
||||
for (size_t i = 0; i < chunk.data.size(); i++) {
|
||||
buffer.push_back(chunk.data[i]);
|
||||
}
|
||||
}
|
||||
assert(buffer.size() == 256+8+20*chunks.size());
|
||||
|
||||
return buffer;
|
||||
}
|
||||
|
@ -2,6 +2,7 @@
|
||||
|
||||
#include <cstdint>
|
||||
#include <array>
|
||||
#include <ostream>
|
||||
#include <vector>
|
||||
#include <cassert>
|
||||
#include <string>
|
||||
@ -9,56 +10,24 @@
|
||||
struct SHA1Digest {
|
||||
std::array<uint8_t, 20> data;
|
||||
|
||||
SHA1Digest(const std::vector<uint8_t>& v) {
|
||||
assert(v.size() == data.size());
|
||||
for (size_t i = 0; i < data.size(); i++) {
|
||||
data[i] = v[i];
|
||||
}
|
||||
}
|
||||
SHA1Digest(const std::vector<uint8_t>& v);
|
||||
|
||||
SHA1Digest(const uint8_t* d, size_t s);
|
||||
|
||||
bool operator==(const SHA1Digest& other) const { return data == other.data; }
|
||||
bool operator!=(const SHA1Digest& other) const { return data != other.data; }
|
||||
|
||||
size_t size(void) const { return data.size(); }
|
||||
};
|
||||
|
||||
std::ostream& operator<<(std::ostream& out, const SHA1Digest& v);
|
||||
|
||||
struct FTInfoSHA1 {
|
||||
std::string file_name;
|
||||
uint64_t file_size {0};
|
||||
static constexpr size_t chunk_size {4*1024}; // 4KiB for now
|
||||
std::vector<SHA1Digest> chunks;
|
||||
|
||||
std::vector<uint8_t> toBuffer(void) const {
|
||||
std::vector<uint8_t> buffer;
|
||||
|
||||
assert(!file_name.empty());
|
||||
// TODO: optimize
|
||||
for (size_t i = 0; i < 256; i++) {
|
||||
if (i < file_name.size()) {
|
||||
buffer.push_back(file_name.at(i));
|
||||
} else {
|
||||
buffer.push_back(0);
|
||||
}
|
||||
}
|
||||
assert(buffer.size() == 256);
|
||||
|
||||
{ // HACK: endianess
|
||||
buffer.push_back((file_size>>(0*8)) & 0xff);
|
||||
buffer.push_back((file_size>>(1*8)) & 0xff);
|
||||
buffer.push_back((file_size>>(2*8)) & 0xff);
|
||||
buffer.push_back((file_size>>(3*8)) & 0xff);
|
||||
buffer.push_back((file_size>>(4*8)) & 0xff);
|
||||
buffer.push_back((file_size>>(5*8)) & 0xff);
|
||||
buffer.push_back((file_size>>(6*8)) & 0xff);
|
||||
buffer.push_back((file_size>>(7*8)) & 0xff);
|
||||
}
|
||||
assert(buffer.size() == 256+8);
|
||||
|
||||
// chunk size?
|
||||
|
||||
for (const auto& chunk : chunks) {
|
||||
for (size_t i = 0; i < chunk.data.size(); i++) {
|
||||
buffer.push_back(chunk.data[i]);
|
||||
}
|
||||
}
|
||||
assert(buffer.size() == 256+8+20*chunks.size());
|
||||
|
||||
return buffer;
|
||||
}
|
||||
std::vector<uint8_t> toBuffer(void) const;
|
||||
};
|
||||
|
||||
|
@ -12,6 +12,7 @@
|
||||
|
||||
#include <iostream>
|
||||
#include <cassert>
|
||||
#include <vector>
|
||||
|
||||
namespace States {
|
||||
|
||||
@ -61,13 +62,15 @@ bool SendStartSHA1::iterate(void) {
|
||||
|
||||
std::unique_ptr<StateI> SendStartSHA1::nextState(void) {
|
||||
std::cout << "SendStartSHA1 switching state to SHA1\n";
|
||||
std::vector<bool> have_chunk(_sha1_info.chunks.size(), true);
|
||||
// we are done setting up
|
||||
return std::make_unique<SHA1>(
|
||||
_tcl,
|
||||
std::move(_file_map),
|
||||
std::move(_sha1_info),
|
||||
std::move(_sha1_info_data),
|
||||
std::move(_sha1_info_hash)
|
||||
std::move(_sha1_info_hash),
|
||||
std::move(have_chunk)
|
||||
);
|
||||
}
|
||||
|
||||
|
@ -1,5 +1,7 @@
|
||||
#include "./sha1.hpp"
|
||||
|
||||
#include <iostream>
|
||||
|
||||
namespace States {
|
||||
|
||||
SHA1::SHA1(
|
||||
@ -7,17 +9,28 @@ SHA1::SHA1(
|
||||
mio::mmap_source&& file_map,
|
||||
const FTInfoSHA1&& sha1_info,
|
||||
const std::vector<uint8_t>&& sha1_info_data,
|
||||
const std::vector<uint8_t>&& sha1_info_hash
|
||||
const std::vector<uint8_t>&& sha1_info_hash,
|
||||
std::vector<bool>&& have_chunk
|
||||
) :
|
||||
StateI(tcl),
|
||||
_file_map(std::move(file_map)),
|
||||
_sha1_info(std::move(sha1_info)),
|
||||
_sha1_info_data(std::move(sha1_info_data)),
|
||||
_sha1_info_hash(std::move(sha1_info_hash))
|
||||
_sha1_info_hash(std::move(sha1_info_hash)),
|
||||
_have_chunk(std::move(have_chunk))
|
||||
{
|
||||
_have_all = true;
|
||||
for (const bool it : _have_chunk) {
|
||||
if (!it) {
|
||||
_have_all = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bool SHA1::iterate(void) {
|
||||
// TODO: unmap and remap the file every couple of minutes to keep ram usage down?
|
||||
// TODO: when to stop?
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -27,20 +40,53 @@ std::unique_ptr<StateI> SHA1::nextState(void) {
|
||||
|
||||
// sha1_info
|
||||
void SHA1::onFT1ReceiveRequestSHA1Info(uint32_t group_number, uint32_t peer_number, const uint8_t* file_id, size_t file_id_size) {
|
||||
// start tf (init) for sha1_info
|
||||
if (file_id_size != _sha1_info_hash.size()) {
|
||||
std::cerr << "SHA1 got request for sha1_info of wrong size!!\n";
|
||||
return;
|
||||
}
|
||||
|
||||
bool SHA1::onFT1ReceiveInitSHA1Info(uint32_t group_number, uint32_t peer_number, const uint8_t* file_id, size_t file_id_size, const uint8_t transfer_id, const size_t file_size) {
|
||||
SHA1Digest requested_hash(file_id, file_id_size);
|
||||
|
||||
if (requested_hash != _sha1_info_hash) {
|
||||
std::cout << "SHA1 ignoring diffenrent info request " << requested_hash << "\n";
|
||||
}
|
||||
|
||||
// same hash, should respond
|
||||
// prio higher then chunks?
|
||||
}
|
||||
|
||||
bool SHA1::onFT1ReceiveInitSHA1Info(uint32_t, uint32_t, const uint8_t*, size_t, const uint8_t, const size_t) {
|
||||
// no, in this state we have init
|
||||
return false;
|
||||
}
|
||||
|
||||
void SHA1::onFT1ReceiveDataSHA1Info(uint32_t group_number, uint32_t peer_number, uint8_t transfer_id, size_t data_offset, const uint8_t* data, size_t data_size) {
|
||||
void SHA1::onFT1ReceiveDataSHA1Info(uint32_t, uint32_t, uint8_t, size_t, const uint8_t*, size_t) {
|
||||
// no, in this state we have init
|
||||
}
|
||||
|
||||
void SHA1::onFT1SendDataSHA1Info(uint32_t group_number, uint32_t peer_number, uint8_t transfer_id, size_t data_offset, uint8_t* data, size_t data_size) {
|
||||
// should all be fine
|
||||
(void)group_number;
|
||||
(void)peer_number;
|
||||
(void)transfer_id;
|
||||
|
||||
for (size_t i = 0; i < data_size; i++) {
|
||||
data[i] = _sha1_info_data.at(data_offset+i);
|
||||
}
|
||||
// knowing when to end might be important
|
||||
}
|
||||
|
||||
// sha1_chunk
|
||||
void SHA1::onFT1ReceiveRequestSHA1Chunk(uint32_t group_number, uint32_t peer_number, const uint8_t* file_id, size_t file_id_size) {
|
||||
#if 0
|
||||
bool have {false};
|
||||
if (_have_all) {
|
||||
have = _have_all;
|
||||
} else if (haveChunk(xxx)) {
|
||||
have = true;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
bool SHA1::onFT1ReceiveInitSHA1Chunk(uint32_t group_number, uint32_t peer_number, const uint8_t* file_id, size_t file_id_size, const uint8_t transfer_id, const size_t file_size) {
|
||||
|
@ -5,6 +5,7 @@
|
||||
#include "../ft_sha1_info.hpp"
|
||||
|
||||
#include <mio/mio.hpp>
|
||||
#include <unordered_map>
|
||||
|
||||
namespace States {
|
||||
|
||||
@ -17,7 +18,8 @@ struct SHA1 final : public StateI {
|
||||
mio::mmap_source&& file_map,
|
||||
const FTInfoSHA1&& sha1_info,
|
||||
const std::vector<uint8_t>&& sha1_info_data,
|
||||
const std::vector<uint8_t>&& sha1_info_hash
|
||||
const std::vector<uint8_t>&& sha1_info_hash,
|
||||
std::vector<bool>&& have_chunk = {}
|
||||
);
|
||||
~SHA1(void) override = default;
|
||||
|
||||
@ -41,7 +43,11 @@ struct SHA1 final : public StateI {
|
||||
mio::mmap_source _file_map;
|
||||
const FTInfoSHA1 _sha1_info;
|
||||
const std::vector<uint8_t> _sha1_info_data;
|
||||
const std::vector<uint8_t> _sha1_info_hash;
|
||||
const SHA1Digest _sha1_info_hash;
|
||||
|
||||
// index is the same as for info
|
||||
std::vector<bool> _have_chunk;
|
||||
bool _have_all {false};
|
||||
};
|
||||
|
||||
} // States
|
||||
|
Loading…
Reference in New Issue
Block a user