2023-01-15 02:26:23 +01:00
# include "./sha1.hpp"
2023-01-15 20:15:48 +01:00
# include "../tox_client.hpp"
2023-01-16 03:50:47 +01:00
# include "../hash_utils.hpp"
2023-01-15 03:16:43 +01:00
# include <iostream>
2023-01-16 01:21:23 +01:00
# include <tuple>
2023-01-15 03:16:43 +01:00
2023-01-15 02:26:23 +01:00
namespace States {
SHA1 : : SHA1 (
ToxClient & tcl ,
2023-01-16 00:00:42 +01:00
mio : : mmap_sink & & file_map ,
2023-01-15 02:26:23 +01:00
const FTInfoSHA1 & & sha1_info ,
const std : : vector < uint8_t > & & sha1_info_data ,
2023-01-15 20:46:09 +01:00
//const std::vector<uint8_t>&& sha1_info_hash,
const SHA1Digest & & sha1_info_hash ,
2023-01-15 03:16:43 +01:00
std : : vector < bool > & & have_chunk
2023-01-15 02:26:23 +01:00
) :
StateI ( tcl ) ,
_file_map ( std : : move ( file_map ) ) ,
_sha1_info ( std : : move ( sha1_info ) ) ,
_sha1_info_data ( std : : move ( sha1_info_data ) ) ,
2023-01-15 03:16:43 +01:00
_sha1_info_hash ( std : : move ( sha1_info_hash ) ) ,
_have_chunk ( std : : move ( have_chunk ) )
2023-01-15 02:26:23 +01:00
{
2023-01-16 03:50:47 +01:00
assert ( _have_chunk . size ( ) = = _sha1_info . chunks . size ( ) ) ;
2023-01-15 03:16:43 +01:00
_have_all = true ;
2023-01-16 01:21:23 +01:00
_have_count = 0 ;
2023-01-16 03:50:47 +01:00
for ( size_t i = 0 ; i < _have_chunk . size ( ) ; i + + ) {
if ( _have_chunk [ i ] ) {
2023-01-16 01:21:23 +01:00
_have_count + + ;
2023-01-16 03:50:47 +01:00
} else {
_have_all = false ;
_chunk_want_queue . push_back ( i ) ;
2023-01-15 03:16:43 +01:00
}
}
2023-01-16 01:21:23 +01:00
2023-01-16 03:50:47 +01:00
// if not sequential, shuffle _chunk_want_queue
2023-01-16 01:21:23 +01:00
// build lookup table
for ( size_t i = 0 ; i < _sha1_info . chunks . size ( ) ; i + + ) {
_chunk_hash_to_index [ _sha1_info . chunks [ i ] ] = i ;
}
2023-01-15 02:26:23 +01:00
}
2023-01-15 20:15:48 +01:00
bool SHA1 : : iterate ( float delta ) {
2023-01-16 03:50:47 +01:00
{ // timer and timeouts
// info
for ( auto it = _transfers_requested_info . begin ( ) ; it ! = _transfers_requested_info . end ( ) ; ) {
float & time_since_remove_activity = std : : get < float > ( * it ) ;
time_since_remove_activity + = delta ;
// if we have not heard for 10sec, timeout
if ( time_since_remove_activity > = 10.f ) {
std : : cerr < < " SHA1 info tansfer timed out " < < std : : get < 0 > ( * it ) < < " : " < < std : : get < 1 > ( * it ) < < " . " < < int ( std : : get < 2 > ( * it ) ) < < " \n " ;
it = _transfers_requested_info . erase ( it ) ;
} else {
it + + ;
}
2023-01-15 20:15:48 +01:00
}
2023-01-16 03:50:47 +01:00
// chunk sending
for ( auto it = _transfers_sending_chunk . begin ( ) ; it ! = _transfers_sending_chunk . end ( ) ; ) {
float & time_since_remove_activity = std : : get < float > ( * it ) ;
time_since_remove_activity + = delta ;
// if we have not heard for 10sec, timeout
if ( time_since_remove_activity > = 10.f ) {
std : : cerr < < " SHA1 sending chunk tansfer timed out " < < std : : get < 0 > ( * it ) < < " : " < < std : : get < 1 > ( * it ) < < " . " < < int ( std : : get < 2 > ( * it ) ) < < " \n " ;
it = _transfers_sending_chunk . erase ( it ) ;
} else {
it + + ;
}
2023-01-16 01:21:23 +01:00
}
2023-01-16 03:50:47 +01:00
// chunk receiving
for ( auto it = _transfers_receiving_chunk . begin ( ) ; it ! = _transfers_receiving_chunk . end ( ) ; ) {
float & time_since_remove_activity = std : : get < float > ( * it ) ;
time_since_remove_activity + = delta ;
// if we have not heard for 10sec, timeout
if ( time_since_remove_activity > = 10.f ) {
std : : cerr < < " SHA1 receiving chunk tansfer timed out " < < std : : get < 0 > ( * it ) < < " : " < < std : : get < 1 > ( * it ) < < " . " < < int ( std : : get < 2 > ( * it ) ) < < " \n " ;
2023-01-16 16:28:59 +01:00
_chunk_want_queue . push_front ( std : : get < 4 > ( * it ) ) ; // put it back
2023-01-16 03:50:47 +01:00
it = _transfers_receiving_chunk . erase ( it ) ;
} else {
it + + ;
}
2023-01-16 01:21:23 +01:00
}
2023-01-16 15:29:49 +01:00
// sent requests
for ( auto it = _chunks_requested . begin ( ) ; it ! = _chunks_requested . end ( ) ; ) {
it - > second + = delta ;
// if we have not heard for 15sec, timeout
if ( it - > second > = 15.f ) {
2023-01-16 16:28:59 +01:00
_chunk_want_queue . push_front ( it - > first ) ; // put it back
2023-01-16 15:29:49 +01:00
it = _chunks_requested . erase ( it ) ;
} else {
it + + ;
}
}
2023-01-16 01:21:23 +01:00
}
2023-01-15 20:15:48 +01:00
// if we have not reached the total cap for transfers
2023-01-16 03:50:47 +01:00
if ( _transfers_requested_info . size ( ) + _transfers_sending_chunk . size ( ) < _max_concurrent_out ) {
2023-01-15 20:15:48 +01:00
// for each peer? transfer cap per peer?
// first check requests for info
if ( ! _queue_requested_info . empty ( ) ) {
// send init to _queue_requested_info
const auto [ group_number , peer_number ] = _queue_requested_info . front ( ) ;
uint8_t transfer_id { 0 } ;
_tcl . sendFT1InitPrivate (
group_number , peer_number ,
NGC_FT1_file_kind : : HASH_SHA1_INFO ,
_sha1_info_hash . data . data ( ) , _sha1_info_hash . size ( ) , // id (info hash)
_sha1_info_data . size ( ) , // "file_size"
transfer_id
) ;
_transfers_requested_info . push_back ( {
group_number , peer_number ,
transfer_id ,
0.f
} ) ;
_queue_requested_info . pop_front ( ) ;
2023-01-16 01:21:23 +01:00
} else if ( ! _queue_requested_chunk . empty ( ) ) { // then check for chunk requests
const auto [ group_number , peer_number , chunk_hash ] = _queue_requested_chunk . front ( ) ;
size_t chunk_index = chunkIndex ( chunk_hash ) . value ( ) ;
2023-01-16 03:50:47 +01:00
size_t chunk_file_size = chunkSize ( chunk_index ) ;
2023-01-16 01:21:23 +01:00
uint8_t transfer_id { 0 } ;
_tcl . sendFT1InitPrivate (
group_number , peer_number ,
NGC_FT1_file_kind : : HASH_SHA1_CHUNK ,
chunk_hash . data . data ( ) , chunk_hash . size ( ) , // id (chunk hash)
chunk_file_size ,
transfer_id
) ;
_transfers_sending_chunk . push_back ( {
group_number , peer_number ,
transfer_id ,
0.f ,
chunk_index
} ) ;
_queue_requested_chunk . pop_front ( ) ;
2023-01-15 20:15:48 +01:00
}
}
2023-01-16 15:29:49 +01:00
if ( ! _have_all & & ! _chunk_want_queue . empty ( ) & & _chunks_requested . size ( ) + _transfers_receiving_chunk . size ( ) < _max_concurrent_in ) {
2023-01-16 03:50:47 +01:00
// send out request, no burst tho
std : : vector < std : : pair < uint32_t , uint32_t > > target_peers ;
_tcl . forEachGroup ( [ & target_peers , this ] ( uint32_t group_number ) {
_tcl . forEachGroupPeer ( group_number , [ & target_peers , group_number ] ( uint32_t peer_number ) {
target_peers . push_back ( { group_number , peer_number } ) ;
} ) ;
} ) ;
if ( ! target_peers . empty ( ) ) {
//if (_distrib.max() != target_peers.size()) {
//std::uniform_int_distribution<size_t> new_dist{0, target_peers.size()-1};
//_distrib.param(new_dist.param());
//}
//size_t target_index = _distrib(_rng);
size_t target_index = _rng ( ) % target_peers . size ( ) ;
auto [ group_number , peer_number ] = target_peers . at ( target_index ) ;
size_t chunk_index = _chunk_want_queue . front ( ) ;
2023-01-16 15:29:49 +01:00
_chunks_requested [ chunk_index ] = 0.f ;
2023-01-16 03:50:47 +01:00
_chunk_want_queue . pop_front ( ) ;
_tcl . sendFT1RequestPrivate ( group_number , peer_number , NGC_FT1_file_kind : : HASH_SHA1_CHUNK , _sha1_info . chunks [ chunk_index ] . data . data ( ) , 20 ) ;
}
}
2023-01-15 03:16:43 +01:00
// TODO: unmap and remap the file every couple of minutes to keep ram usage down?
// TODO: when to stop?
2023-01-15 02:26:23 +01:00
return false ;
}
std : : unique_ptr < StateI > SHA1 : : nextState ( void ) {
return nullptr ;
}
// sha1_info
void SHA1 : : onFT1ReceiveRequestSHA1Info ( uint32_t group_number , uint32_t peer_number , const uint8_t * file_id , size_t file_id_size ) {
2023-01-15 03:16:43 +01:00
// start tf (init) for sha1_info
if ( file_id_size ! = _sha1_info_hash . size ( ) ) {
std : : cerr < < " SHA1 got request for sha1_info of wrong size!! \n " ;
return ;
}
SHA1Digest requested_hash ( file_id , file_id_size ) ;
if ( requested_hash ! = _sha1_info_hash ) {
2023-01-15 21:13:14 +01:00
std : : cout < < " SHA1 ignoring different info request " < < requested_hash < < " \n " ;
return ;
2023-01-15 03:16:43 +01:00
}
// same hash, should respond
// prio higher then chunks?
2023-01-15 20:15:48 +01:00
// add to requested queue
queueUpRequestInfo ( group_number , peer_number ) ;
2023-01-15 02:26:23 +01:00
}
2023-01-15 03:16:43 +01:00
bool SHA1 : : onFT1ReceiveInitSHA1Info ( uint32_t , uint32_t , const uint8_t * , size_t , const uint8_t , const size_t ) {
// no, in this state we have init
2023-01-15 02:26:23 +01:00
return false ;
}
2023-01-15 03:16:43 +01:00
void SHA1 : : onFT1ReceiveDataSHA1Info ( uint32_t , uint32_t , uint8_t , size_t , const uint8_t * , size_t ) {
// no, in this state we have init
2023-01-15 20:15:48 +01:00
assert ( false & & " ft should have said dropped this for us! " ) ;
2023-01-15 02:26:23 +01:00
}
void SHA1 : : onFT1SendDataSHA1Info ( uint32_t group_number , uint32_t peer_number , uint8_t transfer_id , size_t data_offset , uint8_t * data , size_t data_size ) {
2023-01-15 03:16:43 +01:00
// should all be fine
( void ) group_number ;
( void ) peer_number ;
( void ) transfer_id ;
for ( size_t i = 0 ; i < data_size ; i + + ) {
data [ i ] = _sha1_info_data . at ( data_offset + i ) ;
}
2023-01-15 20:15:48 +01:00
2023-01-16 00:00:42 +01:00
// TODO: sub optimal
for ( auto it = _transfers_requested_info . begin ( ) ; it ! = _transfers_requested_info . end ( ) ; it + + ) {
if ( std : : get < 0 > ( * it ) = = group_number & & std : : get < 1 > ( * it ) = = peer_number & & std : : get < 2 > ( * it ) = = transfer_id ) {
std : : get < float > ( * it ) = 0.f ;
break ;
}
}
2023-01-15 20:15:48 +01:00
// if last data block
if ( data_offset + data_size = = _sha1_info_data . size ( ) ) {
// this transfer is "done" (ft still could have to retransfer)
for ( auto it = _transfers_requested_info . cbegin ( ) ; it ! = _transfers_requested_info . cend ( ) ; it + + ) {
if ( std : : get < 0 > ( * it ) = = group_number & & std : : get < 1 > ( * it ) = = peer_number & & std : : get < 2 > ( * it ) = = transfer_id ) {
std : : cout < < " SHA1 info tansfer finished " < < std : : get < 0 > ( * it ) < < " : " < < std : : get < 1 > ( * it ) < < " . " < < std : : get < 2 > ( * it ) < < " \n " ;
_transfers_requested_info . erase ( it ) ;
break ;
}
}
}
2023-01-15 02:26:23 +01:00
}
// sha1_chunk
void SHA1 : : onFT1ReceiveRequestSHA1Chunk ( uint32_t group_number , uint32_t peer_number , const uint8_t * file_id , size_t file_id_size ) {
2023-01-16 01:21:23 +01:00
if ( file_id_size ! = 20 ) {
std : : cerr < < " SHA1 got request for sha1_chunk of wrong size!! \n " ;
return ;
}
SHA1Digest requested_hash ( file_id , file_id_size ) ;
if ( ! haveChunk ( requested_hash ) ) {
// we dont have, ignore
return ;
2023-01-15 03:16:43 +01:00
}
2023-01-16 01:21:23 +01:00
queueUpRequestChunk ( group_number , peer_number , requested_hash ) ;
2023-01-15 02:26:23 +01:00
}
bool SHA1 : : onFT1ReceiveInitSHA1Chunk ( uint32_t group_number , uint32_t peer_number , const uint8_t * file_id , size_t file_id_size , const uint8_t transfer_id , const size_t file_size ) {
2023-01-16 03:50:47 +01:00
if ( _transfers_receiving_chunk . size ( ) > = _max_concurrent_in ) {
// reject, max tf in
return false ;
}
if ( file_id_size ! = 20 ) {
std : : cerr < < " SHA1 got request for sha1_chunk of wrong size!! \n " ;
return false ;
}
SHA1Digest incomming_hash ( file_id , file_id_size ) ;
if ( haveChunk ( incomming_hash ) ) {
2023-01-16 16:53:58 +01:00
std : : cout < < " SHA1 ignoring init for chunk we already have " < < incomming_hash < < " \n " ;
2023-01-16 03:50:47 +01:00
return false ;
}
auto chunk_i_opt = chunkIndex ( incomming_hash ) ;
if ( ! chunk_i_opt . has_value ( ) ) {
std : : cout < < " SHA1 ignoring init for unrelated chunk " < < incomming_hash < < " \n " ;
return false ;
}
size_t chunk_index = chunk_i_opt . value ( ) ;
// check transfers
for ( const auto & it : _transfers_receiving_chunk ) {
if ( std : : get < 4 > ( it ) = = chunk_index ) {
2023-01-16 16:53:58 +01:00
// already in transition
2023-01-16 03:50:47 +01:00
return false ;
}
}
_transfers_receiving_chunk . push_back (
std : : make_tuple (
group_number , peer_number ,
transfer_id ,
0.f ,
chunk_index
)
) ;
2023-01-16 15:29:49 +01:00
// remove form requests
_chunks_requested . erase ( chunk_index ) ;
2023-01-16 03:50:47 +01:00
return true ;
2023-01-15 02:26:23 +01:00
}
void SHA1 : : onFT1ReceiveDataSHA1Chunk ( uint32_t group_number , uint32_t peer_number , uint8_t transfer_id , size_t data_offset , const uint8_t * data , size_t data_size ) {
2023-01-16 03:50:47 +01:00
// check transfers
for ( auto it = _transfers_receiving_chunk . begin ( ) ; it ! = _transfers_receiving_chunk . end ( ) ; it + + ) {
if ( std : : get < 0 > ( * it ) = = group_number & & std : : get < 1 > ( * it ) = = peer_number & & std : : get < 2 > ( * it ) = = transfer_id ) {
std : : get < float > ( * it ) = 0.f ; // time
const size_t chunk_index = std : : get < 4 > ( * it ) ;
size_t file_offset = chunk_index * _sha1_info . chunk_size ;
// TODO: optimize
for ( size_t i = 0 ; i < data_size ; i + + ) {
_file_map [ file_offset + data_offset + i ] = data [ i ] ;
}
size_t chunk_file_size = chunkSize ( chunk_index ) ;
// if last data block
if ( data_offset + data_size = = chunk_file_size ) {
// hash and verify
SHA1Digest test_hash = hash_sha1 ( _file_map . data ( ) + file_offset , chunk_file_size ) ;
if ( test_hash ! = _sha1_info . chunks [ chunk_index ] ) {
std : : cerr < < " SHA1 received chunks's hash does not match!, discarding \n " ;
_transfers_receiving_chunk . erase ( it ) ;
2023-01-16 16:28:59 +01:00
_chunk_want_queue . push_front ( chunk_index ) ; // put back in queue
2023-01-16 03:50:47 +01:00
break ;
}
_have_chunk [ chunk_index ] = true ;
_have_count + + ;
_have_all = _have_count = = _sha1_info . chunks . size ( ) ;
std : : cout < < " SHA1 chunk received " < < std : : get < 0 > ( * it ) < < " : " < < std : : get < 1 > ( * it ) < < " . " < < int ( std : : get < 2 > ( * it ) ) < < " " < < chunk_index < < " ( " < < 100.f * float ( _have_count ) / _sha1_info . chunks . size ( ) < < " %) \n " ;
_transfers_receiving_chunk . erase ( it ) ;
}
break ;
}
}
2023-01-15 02:26:23 +01:00
}
void SHA1 : : onFT1SendDataSHA1Chunk ( uint32_t group_number , uint32_t peer_number , uint8_t transfer_id , size_t data_offset , uint8_t * data , size_t data_size ) {
2023-01-16 01:21:23 +01:00
// TODO: sub optimal
for ( auto it = _transfers_sending_chunk . begin ( ) ; it ! = _transfers_sending_chunk . end ( ) ; it + + ) {
if ( std : : get < 0 > ( * it ) = = group_number & & std : : get < 1 > ( * it ) = = peer_number & & std : : get < 2 > ( * it ) = = transfer_id ) {
std : : get < float > ( * it ) = 0.f ; // time
const size_t chunk_index = std : : get < 4 > ( * it ) ;
size_t file_offset = chunk_index * _sha1_info . chunk_size ;
// TODO: optimize
for ( size_t i = 0 ; i < data_size ; i + + ) {
data [ i ] = _file_map [ file_offset + data_offset + i ] ;
}
// if last data block
2023-01-16 03:50:47 +01:00
if ( data_offset + data_size = = chunkSize ( chunk_index ) ) {
std : : cout < < " SHA1 chunk sent " < < std : : get < 0 > ( * it ) < < " : " < < std : : get < 1 > ( * it ) < < " . " < < int ( std : : get < 2 > ( * it ) ) < < " " < < chunk_index < < " \n " ;
2023-01-16 01:21:23 +01:00
_transfers_sending_chunk . erase ( it ) ;
}
break ;
}
}
2023-01-15 02:26:23 +01:00
}
2023-01-15 20:15:48 +01:00
void SHA1 : : queueUpRequestInfo ( uint32_t group_number , uint32_t peer_number ) {
// check ongoing transfers for dup
for ( const auto & it : _transfers_requested_info ) {
2023-01-16 16:53:58 +01:00
// if already in queue
2023-01-15 20:15:48 +01:00
if ( std : : get < 0 > ( it ) = = group_number & & std : : get < 1 > ( it ) = = peer_number ) {
return ;
}
}
for ( auto & [ i_g , i_p ] : _queue_requested_info ) {
2023-01-16 16:53:58 +01:00
// if already in queue
2023-01-15 20:15:48 +01:00
if ( i_g = = group_number & & i_p = = peer_number ) {
return ;
}
}
// not in queue yet
_queue_requested_info . push_back ( std : : make_pair ( group_number , peer_number ) ) ;
}
2023-01-16 01:21:23 +01:00
void SHA1 : : queueUpRequestChunk ( uint32_t group_number , uint32_t peer_number , const SHA1Digest & hash ) {
// TODO: transfers
for ( auto & [ i_g , i_p , i_h ] : _queue_requested_chunk ) {
2023-01-16 16:53:58 +01:00
// if already in queue
2023-01-16 01:21:23 +01:00
if ( i_g = = group_number & & i_p = = peer_number & & i_h = = hash ) {
return ;
}
}
// not in queue yet
_queue_requested_chunk . push_back ( std : : make_tuple ( group_number , peer_number , hash ) ) ;
}
std : : optional < size_t > SHA1 : : chunkIndex ( const SHA1Digest & hash ) const {
const auto it = _chunk_hash_to_index . find ( hash ) ;
if ( it ! = _chunk_hash_to_index . cend ( ) ) {
return it - > second ;
} else {
return std : : nullopt ;
}
}
2023-01-16 03:50:47 +01:00
size_t SHA1 : : chunkSize ( size_t chunk_index ) const {
if ( chunk_index + 1 = = _sha1_info . chunks . size ( ) ) {
// last chunk
return _sha1_info . file_size - chunk_index * _sha1_info . chunk_size ;
} else {
return _sha1_info . chunk_size ;
}
}
2023-01-16 01:21:23 +01:00
bool SHA1 : : haveChunk ( const SHA1Digest & hash ) const {
if ( _have_all ) {
return true ;
}
if ( auto i_opt = chunkIndex ( hash ) ; i_opt . has_value ( ) ) {
return _have_chunk [ i_opt . value ( ) ] ;
}
// not part of this file
return false ;
}
2023-01-15 02:26:23 +01:00
} // States