From 964f6de6563b478c5584d552497234742ca2d9c9 Mon Sep 17 00:00:00 2001 From: Green Sky Date: Sun, 15 Sep 2024 11:39:23 +0200 Subject: [PATCH] big stream progress - stream manager (with bare bones ui) - debug video tap - toxav progress - toxav debug ui - some default devices --- src/CMakeLists.txt | 10 ++ src/content/frame_stream2.hpp | 54 ++++-- src/content/sdl_audio_frame_stream2.cpp | 32 +++- src/content/sdl_audio_frame_stream2.hpp | 19 +- src/content/sdl_video_frame_stream2.cpp | 22 ++- src/content/sdl_video_frame_stream2.hpp | 29 +-- src/debug_tox_call.cpp | 226 ++++++++++++++++++++++++ src/debug_tox_call.hpp | 53 ++++++ src/debug_video_tap.cpp | 204 +++++++++++++++++++++ src/debug_video_tap.hpp | 32 ++++ src/main.cpp | 20 +-- src/main_screen.cpp | 69 +++++++- src/main_screen.hpp | 10 ++ src/sdlrenderer_texture_uploader.cpp | 2 +- src/stream_manager.hpp | 210 ++++++++++++++++++++++ src/stream_manager_ui.cpp | 39 ++++ src/stream_manager_ui.hpp | 15 ++ src/tox_av.cpp | 10 +- src/tox_av.hpp | 2 +- 19 files changed, 989 insertions(+), 69 deletions(-) create mode 100644 src/debug_tox_call.cpp create mode 100644 src/debug_tox_call.hpp create mode 100644 src/debug_video_tap.cpp create mode 100644 src/debug_video_tap.hpp create mode 100644 src/stream_manager.hpp create mode 100644 src/stream_manager_ui.cpp create mode 100644 src/stream_manager_ui.hpp diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index f8e337f..e3baf15 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -103,6 +103,13 @@ target_sources(tomato PUBLIC ./chat_gui4.hpp ./chat_gui4.cpp + ./stream_manager.hpp + ./stream_manager_ui.hpp + ./stream_manager_ui.cpp + + ./debug_video_tap.hpp + ./debug_video_tap.cpp + ./content/content.hpp ./content/frame_stream2.hpp ./content/sdl_video_frame_stream2.hpp @@ -116,6 +123,9 @@ if (TOMATO_TOX_AV) target_sources(tomato PUBLIC ./tox_av.hpp ./tox_av.cpp + + ./debug_tox_call.hpp + ./debug_tox_call.cpp ) target_compile_definitions(tomato PUBLIC TOMATO_TOX_AV) diff --git a/src/content/frame_stream2.hpp b/src/content/frame_stream2.hpp index 2186b40..494c8ac 100644 --- a/src/content/frame_stream2.hpp +++ b/src/content/frame_stream2.hpp @@ -33,6 +33,20 @@ struct FrameStream2I { virtual bool push(const FrameType& value) = 0; }; +template +struct FrameStream2SourceI { + virtual ~FrameStream2SourceI(void) {} + [[nodiscard]] virtual std::shared_ptr> subscribe(void) = 0; + virtual bool unsubscribe(const std::shared_ptr>& sub) = 0; +}; + +template +struct FrameStream2SinkI { + virtual ~FrameStream2SinkI(void) {} + [[nodiscard]] virtual std::shared_ptr> subscribe(void) = 0; + virtual bool unsubscribe(const std::shared_ptr>& sub) = 0; +}; + // needs count frames queue size // having ~1-2sec buffer size is often sufficent template @@ -76,33 +90,52 @@ struct QueuedFrameStream2 : public FrameStream2I { } }; -// implements a stream that pops or pushes to all sub streams -// you need to mind the direction you intend it to use +// implements a stream that pushes to all sub streams // release all streams before destructing! // TODO: improve lifetime here, maybe some shared semaphore? template> -struct FrameStream2MultiStream : public FrameStream2I { +struct FrameStream2MultiSource : public FrameStream2SourceI, public FrameStream2I { using sub_stream_type_t = SubStreamType; // pointer stability - std::vector> _sub_streams; + std::vector> _sub_streams; std::mutex _sub_stream_lock; // accessing the _sub_streams array needs to be exclusive // a simple lock here is ok, since this tends to be a rare operation, // except for the push, which is always on the same thread - // TODO: forward args instead - SubStreamType* aquireSubStream(size_t queue_size = 10, bool lossy = true) { + virtual ~FrameStream2MultiSource(void) {} + + //// TODO: forward args instead + //SubStreamType* aquireSubStream(size_t queue_size = 10, bool lossy = true) { + // std::lock_guard lg{_sub_stream_lock}; + // return _sub_streams.emplace_back(std::make_unique(queue_size, lossy)).get(); + //} + std::shared_ptr> subscribe(void) override { + // TODO: args??? + size_t queue_size = 10; + bool lossy = true; + std::lock_guard lg{_sub_stream_lock}; - return _sub_streams.emplace_back(std::make_unique(queue_size, lossy)).get(); + return _sub_streams.emplace_back(std::make_unique(queue_size, lossy)); } - void releaseSubStream(SubStreamType* sub) { + //void releaseSubStream(SubStreamType* sub) { + // std::lock_guard lg{_sub_stream_lock}; + // for (auto it = _sub_streams.begin(); it != _sub_streams.end(); it++) { + // if (it->get() == sub) { + // _sub_streams.erase(it); + // break; + // } + // } + //} + bool unsubscribe(const std::shared_ptr>& sub) override { std::lock_guard lg{_sub_stream_lock}; for (auto it = _sub_streams.begin(); it != _sub_streams.end(); it++) { - if (it->get() == sub) { + if (*it == sub) { _sub_streams.erase(it); - break; + return true; } } + return false; // ? } // stream interface @@ -113,6 +146,7 @@ struct FrameStream2MultiStream : public FrameStream2I { } std::optional pop(void) override { + // nope assert(false && "this logic is very frame type specific, provide an impl"); return std::nullopt; } diff --git a/src/content/sdl_audio_frame_stream2.cpp b/src/content/sdl_audio_frame_stream2.cpp index 26a21cf..a6d6999 100644 --- a/src/content/sdl_audio_frame_stream2.cpp +++ b/src/content/sdl_audio_frame_stream2.cpp @@ -124,7 +124,7 @@ bool SDLAudioOutputDeviceDefaultInstance::push(const AudioFrame& value) { std::cerr << "empty audio frame??\n"; } - if (SDL_PutAudioStreamData(_stream.get(), data.ptr, data.size * sizeof(int16_t)) < 0) { + if (!SDL_PutAudioStreamData(_stream.get(), data.ptr, data.size * sizeof(int16_t))) { std::cerr << "put data error\n"; return false; // return true? } @@ -145,26 +145,40 @@ SDLAudioOutputDeviceDefaultInstance::SDLAudioOutputDeviceDefaultInstance(SDLAudi SDLAudioOutputDeviceDefaultInstance::~SDLAudioOutputDeviceDefaultInstance(void) { } -SDLAudioOutputDeviceDefaultInstance SDLAudioOutputDeviceDefaultFactory::create(void) { - SDLAudioOutputDeviceDefaultInstance new_instance; + +SDLAudioOutputDeviceDefaultSink::~SDLAudioOutputDeviceDefaultSink(void) { + // TODO: pause and close device? +} + +std::shared_ptr> SDLAudioOutputDeviceDefaultSink::subscribe(void) { + auto new_instance = std::make_shared(); constexpr SDL_AudioSpec spec = { SDL_AUDIO_S16, 1, 48000 }; - new_instance._stream = { + new_instance->_stream = { SDL_OpenAudioDeviceStream(SDL_AUDIO_DEVICE_DEFAULT_PLAYBACK, &spec, nullptr, nullptr), &SDL_DestroyAudioStream }; - new_instance._last_sample_rate = spec.freq; - new_instance._last_channels = spec.channels; - new_instance._last_format = spec.format; + new_instance->_last_sample_rate = spec.freq; + new_instance->_last_channels = spec.channels; + new_instance->_last_format = spec.format; - if (!static_cast(new_instance._stream)) { + if (!static_cast(new_instance->_stream)) { std::cerr << "SDL open audio device failed!\n"; + return nullptr; } - const auto audio_device_id = SDL_GetAudioStreamDevice(new_instance._stream.get()); + const auto audio_device_id = SDL_GetAudioStreamDevice(new_instance->_stream.get()); SDL_ResumeAudioDevice(audio_device_id); return new_instance; } +bool SDLAudioOutputDeviceDefaultSink::unsubscribe(const std::shared_ptr>& sub) { + if (!sub) { + return false; + } + + return true; +} + diff --git a/src/content/sdl_audio_frame_stream2.hpp b/src/content/sdl_audio_frame_stream2.hpp index 08ae4d0..d5bc782 100644 --- a/src/content/sdl_audio_frame_stream2.hpp +++ b/src/content/sdl_audio_frame_stream2.hpp @@ -11,13 +11,13 @@ #include // we dont have to multicast ourself, because sdl streams and virtual devices already do this, but we do it anyway -using AudioFrameStream2MultiStream = FrameStream2MultiStream; -using AudioFrameStream2 = AudioFrameStream2MultiStream::sub_stream_type_t; // just use the default for now +using AudioFrameStream2MultiSource = FrameStream2MultiSource; +using AudioFrameStream2 = AudioFrameStream2MultiSource::sub_stream_type_t; // just use the default for now // object components? // source -struct SDLAudioInputDeviceDefault : protected AudioFrameStream2MultiStream { +struct SDLAudioInputDeviceDefault : protected AudioFrameStream2MultiSource { std::unique_ptr _stream; std::atomic _thread_should_quit {false}; @@ -30,12 +30,12 @@ struct SDLAudioInputDeviceDefault : protected AudioFrameStream2MultiStream { // stops the thread and closes the device? ~SDLAudioInputDeviceDefault(void); - using AudioFrameStream2MultiStream::aquireSubStream; - using AudioFrameStream2MultiStream::releaseSubStream; + using AudioFrameStream2MultiSource::subscribe; + using AudioFrameStream2MultiSource::unsubscribe; }; // sink -struct SDLAudioOutputDeviceDefaultInstance : protected AudioFrameStream2I { +struct SDLAudioOutputDeviceDefaultInstance : public AudioFrameStream2I { std::unique_ptr _stream; uint32_t _last_sample_rate {48'000}; @@ -53,9 +53,12 @@ struct SDLAudioOutputDeviceDefaultInstance : protected AudioFrameStream2I { }; // constructs entirely new streams, since sdl handles sync and mixing for us (or should) -struct SDLAudioOutputDeviceDefaultFactory { +struct SDLAudioOutputDeviceDefaultSink : public FrameStream2SinkI { // TODO: pause device? - SDLAudioOutputDeviceDefaultInstance create(void); + ~SDLAudioOutputDeviceDefaultSink(void); + + std::shared_ptr> subscribe(void) override; + bool unsubscribe(const std::shared_ptr>& sub) override; }; diff --git a/src/content/sdl_video_frame_stream2.cpp b/src/content/sdl_video_frame_stream2.cpp index d7c82f7..eff7fb4 100644 --- a/src/content/sdl_video_frame_stream2.cpp +++ b/src/content/sdl_video_frame_stream2.cpp @@ -1,4 +1,6 @@ #include "./sdl_video_frame_stream2.hpp" +#include "SDL3/SDL_camera.h" +#include "SDL3/SDL_pixels.h" #include #include @@ -6,6 +8,7 @@ #include #include +// TODO: move out and create lazy cam for each device SDLVideoCameraContent::SDLVideoCameraContent(void) { int devcount {0}; //SDL_CameraDeviceID *devices = SDL_GetCameraDevices(&devcount); @@ -25,7 +28,6 @@ SDLVideoCameraContent::SDLVideoCameraContent(void) { int speccount {0}; SDL_CameraSpec** specs = SDL_GetCameraSupportedFormats(device, &speccount); - //SDL_CameraSpec* specs = SDL_GetCameraSupportedFormats(device, &speccount); if (specs == nullptr) { std::cout << " - no supported spec\n"; } else { @@ -42,22 +44,29 @@ SDLVideoCameraContent::SDLVideoCameraContent(void) { // FORCE a diffrent pixel format //SDL_PIXELFORMAT_RGBA8888, //SDL_PIXELFORMAT_UNKNOWN, - SDL_PIXELFORMAT_IYUV, + //SDL_PIXELFORMAT_IYUV, + SDL_PIXELFORMAT_YUY2, - SDL_COLORSPACE_SRGB, //SDL_COLORSPACE_UNKNOWN, + //SDL_COLORSPACE_SRGB, + //SDL_COLORSPACE_SRGB_LINEAR, + SDL_COLORSPACE_YUV_DEFAULT, //1280, 720, //640, 360, - 640, 480, + //640, 480, + 696, 392, //1, 30 30, 1 }; _camera = { - SDL_OpenCamera(devices[0], &spec), + //SDL_OpenCamera(devices[devcount-1], &spec), + SDL_OpenCamera(devices[0], nullptr), + //SDL_OpenCamera(devices[0], &spec), &SDL_CloseCamera }; + SDL_GetCameraFormat(_camera.get(), &spec); } SDL_free(devices); if (!static_cast(_camera)) { @@ -76,8 +85,9 @@ SDLVideoCameraContent::SDLVideoCameraContent(void) { SDL_CameraSpec spec; float fps {1.f}; - if (SDL_GetCameraFormat(_camera.get(), &spec) != 0) { + if (!SDL_GetCameraFormat(_camera.get(), &spec)) { // meh + throw int(5); } else { fps = float(spec.framerate_numerator)/float(spec.framerate_denominator); std::cout << "camera fps: " << fps << "fps (" << spec.framerate_numerator << "/" << spec.framerate_denominator << ")\n"; diff --git a/src/content/sdl_video_frame_stream2.hpp b/src/content/sdl_video_frame_stream2.hpp index c36ff0d..85cc413 100644 --- a/src/content/sdl_video_frame_stream2.hpp +++ b/src/content/sdl_video_frame_stream2.hpp @@ -28,25 +28,28 @@ struct SDLVideoFrame { SDLVideoFrame(const SDLVideoFrame& other) { timestampNS = other.timestampNS; if (static_cast(other.surface)) { - // TODO: use SDL_DuplicateSurface() + //surface = { + // SDL_CreateSurface( + // other.surface->w, + // other.surface->h, + // other.surface->format + // ), + // &SDL_DestroySurface + //}; + //SDL_BlitSurface(other.surface.get(), nullptr, surface.get(), nullptr); surface = { - SDL_CreateSurface( - other.surface->w, - other.surface->h, - other.surface->format - ), - &SDL_DestroySurface + SDL_DuplicateSurface(other.surface.get()), + &SDL_DestroySurface }; - SDL_BlitSurface(other.surface.get(), nullptr, surface.get(), nullptr); } } SDLVideoFrame& operator=(const SDLVideoFrame& other) = delete; }; -using SDLVideoFrameStream2MultiStream = FrameStream2MultiStream; -using SDLVideoFrameStream2 = SDLVideoFrameStream2MultiStream::sub_stream_type_t; // just use the default for now +using SDLVideoFrameStream2MultiSource = FrameStream2MultiSource; +using SDLVideoFrameStream2 = SDLVideoFrameStream2MultiSource::sub_stream_type_t; // just use the default for now -struct SDLVideoCameraContent : protected SDLVideoFrameStream2MultiStream { +struct SDLVideoCameraContent : public SDLVideoFrameStream2MultiSource { // meh, empty default std::unique_ptr _camera {nullptr, &SDL_CloseCamera}; std::atomic _thread_should_quit {false}; @@ -60,7 +63,7 @@ struct SDLVideoCameraContent : protected SDLVideoFrameStream2MultiStream { ~SDLVideoCameraContent(void); // make only some of writer public - using SDLVideoFrameStream2MultiStream::aquireSubStream; - using SDLVideoFrameStream2MultiStream::releaseSubStream; + using SDLVideoFrameStream2MultiSource::subscribe; + using SDLVideoFrameStream2MultiSource::unsubscribe; }; diff --git a/src/debug_tox_call.cpp b/src/debug_tox_call.cpp new file mode 100644 index 0000000..14acf43 --- /dev/null +++ b/src/debug_tox_call.cpp @@ -0,0 +1,226 @@ +#include "./debug_tox_call.hpp" + +#include + +#include +#include + +#include + +#include + +// fwd +namespace Message { + uint64_t getTimeMS(void); +} + +static constexpr float lerp(float a, float b, float t) { + return a + t * (b - a); +} + +namespace Components { + struct ToxAVFriendAudioSource { + }; + + struct ToxAVFriendAudioSink { + }; + + struct ToxAVFriendVideoSource { + }; + + struct ToxAVFriendVideoSink { + }; +} + +DebugToxCall::DebugToxCall(ObjectStore2& os, ToxAV& toxav, TextureUploaderI& tu) : _os(os), _toxav(toxav), _tu(tu) { + _toxav.subscribe(this, ToxAV_Event::friend_call); + _toxav.subscribe(this, ToxAV_Event::friend_call_state); + _toxav.subscribe(this, ToxAV_Event::friend_audio_bitrate); + _toxav.subscribe(this, ToxAV_Event::friend_video_bitrate); + _toxav.subscribe(this, ToxAV_Event::friend_audio_frame); + _toxav.subscribe(this, ToxAV_Event::friend_video_frame); +} + +void DebugToxCall::tick(float time_delta) { +} + +float DebugToxCall::render(void) { + float next_frame {2.f}; + if (ImGui::Begin("toxav debug")) { + ImGui::Text("Calls:"); + ImGui::Indent(); + for (auto& [fid, call] : _calls) { + ImGui::PushID(fid); + + ImGui::Text("fid:%d state:%d", fid, call.state); + if (call.incoming) { + ImGui::SameLine(); + if (ImGui::SmallButton("answer")) { + const auto ret = _toxav.toxavAnswer(fid, 0, 0); + if (ret == TOXAV_ERR_ANSWER_OK) { + call.incoming = false; + } + } + } else if (call.state != TOXAV_FRIEND_CALL_STATE_FINISHED) { + next_frame = std::min(next_frame, 0.1f); + ImGui::SameLine(); + if (ImGui::SmallButton("hang up")) { + const auto ret = _toxav.toxavCallControl(fid, TOXAV_CALL_CONTROL_CANCEL); + if (ret == TOXAV_ERR_CALL_CONTROL_OK) { + // we hung up + // not sure if its possible for toxcore to tell this us too when the other side does this at the same time? + call.state = TOXAV_FRIEND_CALL_STATE_FINISHED; + } + } + + //if (ImGui::BeginCombo("audio src", "---")) { + // ImGui::EndCombo(); + //} + //if (ImGui::BeginCombo("video src", "---")) { + // ImGui::EndCombo(); + //} + } + + //if (call.last_v_frame_tex != 0 && ImGui::BeginItemTooltip()) { + if (call.last_v_frame_tex != 0) { + next_frame = std::min(next_frame, call.v_frame_interval_avg); + ImGui::Text("vframe interval avg: %f", call.v_frame_interval_avg); + ImGui::Image( + reinterpret_cast(call.last_v_frame_tex), + //ImVec2{float(call.last_v_frame_width), float(call.last_v_frame_height)} + ImVec2{100.f, 100.f * float(call.last_v_frame_height)/call.last_v_frame_width} + ); + //ImGui::EndTooltip(); + } + + ImGui::PopID(); + } + ImGui::Unindent(); + } + ImGui::End(); + + return next_frame; +} + +bool DebugToxCall::onEvent(const Events::FriendCall& e) { + auto& call = _calls[e.friend_number]; + call.incoming = true; + call.incoming_a = e.audio_enabled; + call.incoming_v = e.video_enabled; + //call.state = TOXAV_FRIEND_CALL_STATE_NONE; + + return true; +} + +bool DebugToxCall::onEvent(const Events::FriendCallState& e) { + auto& call = _calls[e.friend_number]; + call.state = e.state; + + return true; +} + +bool DebugToxCall::onEvent(const Events::FriendAudioBitrate&) { + return false; +} + +bool DebugToxCall::onEvent(const Events::FriendVideoBitrate&) { + return false; +} + +bool DebugToxCall::onEvent(const Events::FriendAudioFrame& e) { + auto& call = _calls[e.friend_number]; + call.num_a_frames++; + return false; +} + +bool DebugToxCall::onEvent(const Events::FriendVideoFrame& e) { + auto& call = _calls[e.friend_number]; + call.num_v_frames++; + + if (call.last_v_frame_timepoint == 0) { + call.last_v_frame_timepoint = Message::getTimeMS(); + } else { + const auto new_time_point = Message::getTimeMS(); + auto time_delta_ms = new_time_point - call.last_v_frame_timepoint; + call.last_v_frame_timepoint = new_time_point; + time_delta_ms = std::min(time_delta_ms, 10*1000); // cap at 10sec + + if (call.v_frame_interval_avg == 0) { + call.v_frame_interval_avg = time_delta_ms/1000.f; + } else { + std::cerr << "lerp(" << call.v_frame_interval_avg << ", " << time_delta_ms/1000.f << ", 0.2f) = "; + call.v_frame_interval_avg = lerp(call.v_frame_interval_avg, time_delta_ms/1000.f, 0.2f); + std::cerr << call.v_frame_interval_avg << "\n"; + } + } + + auto* new_surf = SDL_CreateSurface(e.width, e.height, SDL_PIXELFORMAT_IYUV); + assert(new_surf); + if (SDL_LockSurface(new_surf)) { + // copy the data + // we know how the implementation works, its y u v consecutivlely + // y + for (size_t y = 0; y < e.height; y++) { + std::memcpy( + //static_cast(new_surf->pixels) + new_surf->pitch*y, + static_cast(new_surf->pixels) + e.width*y, + e.y.ptr + e.ystride*y, + e.width + ); + } + + // u + for (size_t y = 0; y < e.height/2; y++) { + std::memcpy( + static_cast(new_surf->pixels) + (e.width*e.height) + (e.width/2)*y, + e.u.ptr + e.ustride*y, + e.width/2 + ); + } + + // v + for (size_t y = 0; y < e.height/2; y++) { + std::memcpy( + static_cast(new_surf->pixels) + (e.width*e.height) + ((e.width/2)*(e.height/2)) + (e.width/2)*y, + e.v.ptr + e.vstride*y, + e.width/2 + ); + } + + SDL_UnlockSurface(new_surf); + } + + auto* converted_surf = SDL_ConvertSurfaceAndColorspace(new_surf, SDL_PIXELFORMAT_RGBA32, nullptr, SDL_COLORSPACE_YUV_DEFAULT, 0); + SDL_DestroySurface(new_surf); + if (converted_surf == nullptr) { + assert(false); + return true; + } + + SDL_LockSurface(converted_surf); + if (call.last_v_frame_tex == 0 || call.last_v_frame_width != e.width || call.last_v_frame_height != e.height) { + _tu.destroy(call.last_v_frame_tex); + call.last_v_frame_tex = _tu.uploadRGBA( + static_cast(converted_surf->pixels), + converted_surf->w, + converted_surf->h, + TextureUploaderI::LINEAR, + TextureUploaderI::STREAMING + ); + + call.last_v_frame_width = e.width; + call.last_v_frame_height = e.height; + } else { + _tu.updateRGBA(call.last_v_frame_tex, static_cast(converted_surf->pixels), converted_surf->w * converted_surf->h * 4); + } + SDL_UnlockSurface(converted_surf); + SDL_DestroySurface(converted_surf); + + // TODO: use this instead + //SDL_UpdateYUVTexture(tex, nullptr, e.y.ptr, e.ystride,... + + std::cout << "DTC: updated video texture " << call.last_v_frame_tex << "\n"; + + return false; +} + diff --git a/src/debug_tox_call.hpp b/src/debug_tox_call.hpp new file mode 100644 index 0000000..a17f981 --- /dev/null +++ b/src/debug_tox_call.hpp @@ -0,0 +1,53 @@ +#pragma once + +#include +#include "./tox_av.hpp" +#include "./texture_uploader.hpp" + +#include +#include + +class DebugToxCall : public ToxAVEventI { + ObjectStore2& _os; + ToxAV& _toxav; + TextureUploaderI& _tu; + + struct Call { + bool incoming {false}; + bool incoming_a {false}; + bool incoming_v {false}; + + uint32_t state {0}; // ? just last state ? + + uint32_t abr {0}; + uint32_t vbr {0}; + + size_t num_a_frames {0}; + size_t num_v_frames {0}; + + // fps moving interval + uint64_t last_v_frame_timepoint {0}; + float v_frame_interval_avg {0.f}; + + uint64_t last_v_frame_tex {0}; + uint64_t last_v_frame_width {0}; + uint64_t last_v_frame_height {0}; + }; + // tox friend id -> call + std::map _calls; + + public: + DebugToxCall(ObjectStore2& os, ToxAV& toxav, TextureUploaderI& tu); + ~DebugToxCall(void) {} + + void tick(float time_delta); + float render(void); + + protected: // toxav events + bool onEvent(const Events::FriendCall&) override; + bool onEvent(const Events::FriendCallState&) override; + bool onEvent(const Events::FriendAudioBitrate&) override; + bool onEvent(const Events::FriendVideoBitrate&) override; + bool onEvent(const Events::FriendAudioFrame&) override; + bool onEvent(const Events::FriendVideoFrame&) override; +}; diff --git a/src/debug_video_tap.cpp b/src/debug_video_tap.cpp new file mode 100644 index 0000000..c21aab9 --- /dev/null +++ b/src/debug_video_tap.cpp @@ -0,0 +1,204 @@ +#include "./debug_video_tap.hpp" + +#include + +#include + +#include + +#include + +#include "./content/sdl_video_frame_stream2.hpp" + +#include +#include +#include + +struct DebugVideoTapSink : public FrameStream2SinkI { + std::shared_ptr> _writer; + + DebugVideoTapSink(void) {} + ~DebugVideoTapSink(void) {} + + // sink + std::shared_ptr> subscribe(void) override { + if (_writer) { + // max 1 (exclusive) + return nullptr; + } + + _writer = std::make_shared>(1, true); + + return _writer; + } + + bool unsubscribe(const std::shared_ptr>& sub) override { + if (!sub || !_writer) { + // nah + return false; + } + + if (sub == _writer) { + _writer = nullptr; + return true; + } + + // what + return false; + } +}; + +DebugVideoTap::DebugVideoTap(ObjectStore2& os, StreamManager& sm, TextureUploaderI& tu) : _os(os), _sm(sm), _tu(tu) { + // post self as video sink + _tap = {_os.registry(), _os.registry().create()}; + try { + auto dvts = std::make_unique(); + _tap.emplace(dvts.get()); // to get our data back + _tap.emplace>( + std::move(dvts) + ); + + _tap.emplace("DebugVideoTap", std::string{entt::type_name::value()}); + } catch (...) { + _os.registry().destroy(_tap); + } +} + +DebugVideoTap::~DebugVideoTap(void) { + if (static_cast(_tap)) { + _os.registry().destroy(_tap); + } +} + +float DebugVideoTap::render(void) { + if (ImGui::Begin("DebugVideoTap")) { + // list sources dropdown to connect too + std::string preview_label {"none"}; + if (static_cast(_selected_src)) { + preview_label = std::to_string(entt::to_integral(_selected_src.entity())) + " (" + _selected_src.get().name + ")"; + } + + if (ImGui::BeginCombo("selected source", preview_label.c_str())) { + if (ImGui::Selectable("none")) { + switchTo({}); + } + + for (const auto& [oc, ss] : _os.registry().view().each()) { + if (ss.frame_type_name != entt::type_name::value()) { + continue; + } + std::string label = std::to_string(entt::to_integral(oc)) + " (" + ss.name + ")"; + if (ImGui::Selectable(label.c_str())) { + switchTo({_os.registry(), oc}); + } + } + + ImGui::EndCombo(); + } + + { // first pull the latest img from sink and update the texture + assert(static_cast(_tap)); + + auto& dvtsw = _tap.get()->_writer; + if (dvtsw) { + while (true) { + auto new_frame_opt = dvtsw->pop(); + if (new_frame_opt.has_value()) { + // timing + if (_v_last_ts == 0) { + _v_last_ts = new_frame_opt.value().timestampNS; + } else { + auto delta = int64_t(new_frame_opt.value().timestampNS) - int64_t(_v_last_ts); + _v_last_ts = new_frame_opt.value().timestampNS; + + //delta = std::min(delta, 10*1000*1000); + + if (_v_interval_avg == 0) { + _v_interval_avg = delta/1'000'000'000.f; + } else { + const float r = 0.2f; + _v_interval_avg = _v_interval_avg * (1-r) + (delta/1'000'000'000.f) * r; + } + } + + SDL_Surface* new_frame_surf = new_frame_opt.value().surface.get(); + + SDL_Surface* converted_surf = new_frame_surf; + if (new_frame_surf->format != SDL_PIXELFORMAT_RGBA32) { + // we need to convert + //std::cerr << "DVT: need to convert\n"; + converted_surf = SDL_ConvertSurfaceAndColorspace(new_frame_surf, SDL_PIXELFORMAT_RGBA32, nullptr, SDL_COLORSPACE_RGB_DEFAULT, 0); + assert(converted_surf->format == SDL_PIXELFORMAT_RGBA32); + } + + SDL_LockSurface(converted_surf); + if (_tex == 0 || (int)_tex_w != converted_surf->w || (int)_tex_h != converted_surf->h) { + _tu.destroy(_tex); + _tex = _tu.uploadRGBA( + static_cast(converted_surf->pixels), + converted_surf->w, + converted_surf->h, + TextureUploaderI::LINEAR, + TextureUploaderI::STREAMING + ); + + _tex_w = converted_surf->w; + _tex_h = converted_surf->h; + } else { + _tu.updateRGBA(_tex, static_cast(converted_surf->pixels), converted_surf->w * converted_surf->h * 4); + } + SDL_UnlockSurface(converted_surf); + + if (new_frame_surf != converted_surf) { + // clean up temp + SDL_DestroySurface(converted_surf); + } + } else { + break; + } + } + } + } + + // img here + if (_tex != 0) { + ImGui::Text("moving avg interval: %f", _v_interval_avg); + const float img_w = ImGui::GetContentRegionAvail().x; + ImGui::Image( + reinterpret_cast(_tex), + ImVec2{img_w, img_w * float(_tex_h)/_tex_w} + ); + } + } + ImGui::End(); + + if (_v_interval_avg != 0) { + return _v_interval_avg; + } else { + return 2.f; + } +} + +void DebugVideoTap::switchTo(ObjectHandle o) { + if (o == _selected_src) { + std::cerr << "DVT: switch to same ...\n"; + return; + } + + _tu.destroy(_tex); + _tex = 0; + _v_last_ts = 0; + _v_interval_avg = 0; + + if (static_cast(_selected_src)) { + _sm.disconnect(_selected_src, _tap); + } + + if (static_cast(o) && _sm.connect(o, _tap)) { + _selected_src = o; + } else { + std::cerr << "DVT: cleared video source\n"; + _selected_src = {}; + } +} + diff --git a/src/debug_video_tap.hpp b/src/debug_video_tap.hpp new file mode 100644 index 0000000..3d9624f --- /dev/null +++ b/src/debug_video_tap.hpp @@ -0,0 +1,32 @@ +#pragma once + +#include +#include +#include "./stream_manager.hpp" +#include "./texture_uploader.hpp" + +// provides a sink and a small window displaying a SDLVideoFrame +class DebugVideoTap { + ObjectStore2& _os; + StreamManager& _sm; + TextureUploaderI& _tu; + + ObjectHandle _selected_src; + ObjectHandle _tap; + + uint64_t _tex {0}; + uint32_t _tex_w {0}; + uint32_t _tex_h {0}; + + uint64_t _v_last_ts {0}; // ns + float _v_interval_avg {0.f}; // s + + public: + DebugVideoTap(ObjectStore2& os, StreamManager& sm, TextureUploaderI& tu); + ~DebugVideoTap(void); + + float render(void); + + void switchTo(ObjectHandle o); +}; + diff --git a/src/main.cpp b/src/main.cpp index 0036d1d..1e0805a 100644 --- a/src/main.cpp +++ b/src/main.cpp @@ -38,7 +38,7 @@ int main(int argc, char** argv) { // setup hints #ifndef __ANDROID__ - if (SDL_SetHint(SDL_HINT_VIDEO_ALLOW_SCREENSAVER, "1") != SDL_TRUE) { + if (!SDL_SetHint(SDL_HINT_VIDEO_ALLOW_SCREENSAVER, "1")) { std::cerr << "Failed to set '" << SDL_HINT_VIDEO_ALLOW_SCREENSAVER << "' to 1\n"; } #endif @@ -76,34 +76,34 @@ int main(int argc, char** argv) { std::cout << "SDL Renderer: " << SDL_GetRendererName(renderer.get()) << "\n"; // optionally init audio and camera - if (SDL_Init(SDL_INIT_AUDIO) < 0) { + if (!SDL_Init(SDL_INIT_AUDIO)) { std::cerr << "SDL_Init AUDIO failed (" << SDL_GetError() << ")\n"; } else if (false) { SDLAudioInputDeviceDefault aidd; - auto* reader = aidd.aquireSubStream(); + auto reader = aidd.subscribe(); - auto writer = SDLAudioOutputDeviceDefaultFactory{}.create(); + auto writer = SDLAudioOutputDeviceDefaultSink{}.subscribe(); - for (size_t i = 0; i < 100; i++) { + for (size_t i = 0; i < 200; i++) { std::this_thread::sleep_for(std::chrono::milliseconds(10)); auto new_frame_opt = reader->pop(); if (new_frame_opt.has_value()) { std::cout << "audio frame was seq:" << new_frame_opt.value().seq << " sr:" << new_frame_opt.value().sample_rate << " " << (new_frame_opt.value().isS16()?"S16":"F32") << " l:" << (new_frame_opt.value().isS16()?new_frame_opt.value().getSpan().size:new_frame_opt.value().getSpan().size) << "\n"; - writer.push(new_frame_opt.value()); + writer->push(new_frame_opt.value()); } else { std::cout << "no audio frame\n"; } } - aidd.releaseSubStream(reader); + aidd.unsubscribe(reader); } - if (SDL_Init(SDL_INIT_CAMERA) < 0) { + if (!SDL_Init(SDL_INIT_CAMERA)) { std::cerr << "SDL_Init CAMERA failed (" << SDL_GetError() << ")\n"; } else if (false) { // HACK std::cerr << "CAMERA initialized\n"; SDLVideoCameraContent vcc; - auto* reader = vcc.aquireSubStream(); + auto reader = vcc.subscribe(); for (size_t i = 0; i < 20; i++) { std::this_thread::sleep_for(std::chrono::milliseconds(50)); auto new_frame_opt = reader->pop(); @@ -111,7 +111,7 @@ int main(int argc, char** argv) { std::cout << "video frame was " << new_frame_opt.value().surface->w << "x" << new_frame_opt.value().surface->h << " " << new_frame_opt.value().timestampNS << "ns " << new_frame_opt.value().surface->format << "sf\n"; } } - vcc.releaseSubStream(reader); + vcc.unsubscribe(reader); } std::cout << "after sdl video stuffery\n"; diff --git a/src/main_screen.cpp b/src/main_screen.cpp index d42488f..0d0fe18 100644 --- a/src/main_screen.cpp +++ b/src/main_screen.cpp @@ -9,6 +9,11 @@ #include +#include "./content/sdl_video_frame_stream2.hpp" +#include "content/audio_stream.hpp" +#include "content/sdl_audio_frame_stream2.hpp" +#include "stream_manager.hpp" + #include #include #include @@ -19,11 +24,13 @@ MainScreen::MainScreen(SimpleConfigModel&& conf_, SDL_Renderer* renderer_, Theme rmm(cr), msnj{cr, {}, {}}, mts(rmm), + sm(os), tc(save_path, save_password), tpi(tc.getTox()), ad(tc), #if TOMATO_TOX_AV tav(tc.getTox()), + dtc(os, tav, sdlrtu), #endif tcm(cr, tc, tc), tmm(rmm, cr, tcm, tc, tc), @@ -40,6 +47,8 @@ MainScreen::MainScreen(SimpleConfigModel&& conf_, SDL_Renderer* renderer_, Theme cg(conf, os, rmm, cr, sdlrtu, contact_tc, msg_tc, theme), sw(conf), osui(os), + smui(os, sm), + dvt(os, sm, sdlrtu), tuiu(tc, conf), tdch(tpi) { @@ -136,6 +145,43 @@ MainScreen::MainScreen(SimpleConfigModel&& conf_, SDL_Renderer* renderer_, Theme } conf.dump(); + + { // add system av devices + { + ObjectHandle vsrc {os.registry(), os.registry().create()}; + try { + vsrc.emplace>( + std::make_unique() + ); + + vsrc.emplace("WebCam", std::string{entt::type_name::value()}); + } catch (...) { + os.registry().destroy(vsrc); + } + } + + { // audio in + ObjectHandle asrc {os.registry(), os.registry().create()}; + try { + throw int(2); + } catch (...) { + os.registry().destroy(asrc); + } + } + + { // audio out + ObjectHandle asink {os.registry(), os.registry().create()}; + try { + asink.emplace>( + std::make_unique() + ); + + asink.emplace("LoudSpeaker", std::string{entt::type_name::value()}); + } catch (...) { + os.registry().destroy(asink); + } + } + } } MainScreen::~MainScreen(void) { @@ -252,14 +298,19 @@ Screen* MainScreen::render(float time_delta, bool&) { } // ACTUALLY NOT IF RENDERED, MOVED LOGIC TO ABOVE // it might unload textures, so it needs to be done before rendering - const float ctc_interval = contact_tc.update(); - const float msgtc_interval = msg_tc.update(); + float animation_interval = contact_tc.update(); + animation_interval = std::min(animation_interval, msg_tc.update()); const float cg_interval = cg.render(time_delta); // render sw.render(); // render osui.render(); + smui.render(); + animation_interval = std::min(animation_interval, dvt.render()); tuiu.render(); // render tdch.render(); // render +#if TOMATO_TOX_AV + animation_interval = std::min(animation_interval, dtc.render()); +#endif { // main window menubar injection if (ImGui::Begin("tomato")) { @@ -440,8 +491,7 @@ Screen* MainScreen::render(float time_delta, bool&) { // low delay time window if (!_window_hidden && _time_since_event < curr_profile.low_delay_window) { - _render_interval = std::min(_render_interval, ctc_interval); - _render_interval = std::min(_render_interval, msgtc_interval); + _render_interval = std::min(_render_interval, animation_interval); _render_interval = std::clamp( _render_interval, @@ -450,8 +500,7 @@ Screen* MainScreen::render(float time_delta, bool&) { ); // mid delay time window } else if (!_window_hidden && _time_since_event < curr_profile.mid_delay_window) { - _render_interval = std::min(_render_interval, ctc_interval); - _render_interval = std::min(_render_interval, msgtc_interval); + _render_interval = std::min(_render_interval, animation_interval); _render_interval = std::clamp( _render_interval, @@ -479,10 +528,13 @@ Screen* MainScreen::tick(float time_delta, bool& quit) { #if TOMATO_TOX_AV tav.toxavIterate(); const float av_interval = tav.toxavIterationInterval()/1000.f; + dtc.tick(time_delta); #endif tcm.iterate(time_delta); // compute + const float sm_interval = sm.tick(time_delta); + const float fo_interval = tffom.tick(time_delta); tam.iterate(); // compute @@ -522,6 +574,11 @@ Screen* MainScreen::tick(float time_delta, bool& quit) { ); #endif + _min_tick_interval = std::min( + _min_tick_interval, + sm_interval + ); + //std::cout << "MS: min tick interval: " << _min_tick_interval << "\n"; switch (_compute_perf_mode) { diff --git a/src/main_screen.hpp b/src/main_screen.hpp index 1aa5939..3d955e5 100644 --- a/src/main_screen.hpp +++ b/src/main_screen.hpp @@ -16,6 +16,8 @@ #include #include +#include "./stream_manager.hpp" + #include "./tox_client.hpp" #include "./auto_dirty.hpp" @@ -30,12 +32,15 @@ #include "./chat_gui4.hpp" #include "./chat_gui/settings_window.hpp" #include "./object_store_ui.hpp" +#include "./stream_manager_ui.hpp" +#include "./debug_video_tap.hpp" #include "./tox_ui_utils.hpp" #include "./tox_dht_cap_histo.hpp" #include "./tox_friend_faux_offline_messaging.hpp" #if TOMATO_TOX_AV #include "./tox_av.hpp" +#include "./debug_tox_call.hpp" #endif #include @@ -58,12 +63,15 @@ struct MainScreen final : public Screen { MessageSerializerNJ msnj; MessageTimeSort mts; + StreamManager sm; + ToxEventLogger tel{std::cout}; ToxClient tc; ToxPrivateImpl tpi; AutoDirty ad; #if TOMATO_TOX_AV ToxAV tav; + DebugToxCall dtc; #endif ToxContactModel2 tcm; ToxMessageManager tmm; @@ -86,6 +94,8 @@ struct MainScreen final : public Screen { ChatGui4 cg; SettingsWindow sw; ObjectStoreUI osui; + StreamManagerUI smui; + DebugVideoTap dvt; ToxUIUtils tuiu; ToxDHTCapHisto tdch; diff --git a/src/sdlrenderer_texture_uploader.cpp b/src/sdlrenderer_texture_uploader.cpp index 66b810c..4ea9b67 100644 --- a/src/sdlrenderer_texture_uploader.cpp +++ b/src/sdlrenderer_texture_uploader.cpp @@ -32,7 +32,7 @@ uint64_t SDLRendererTextureUploader::uploadRGBA(const uint8_t* data, uint32_t wi SDL_UpdateTexture(tex, nullptr, surf->pixels, surf->pitch); SDL_BlendMode surf_blend_mode = SDL_BLENDMODE_NONE; - if (SDL_GetSurfaceBlendMode(surf, &surf_blend_mode) == 0) { + if (SDL_GetSurfaceBlendMode(surf, &surf_blend_mode)) { SDL_SetTextureBlendMode(tex, surf_blend_mode); } diff --git a/src/stream_manager.hpp b/src/stream_manager.hpp new file mode 100644 index 0000000..09688df --- /dev/null +++ b/src/stream_manager.hpp @@ -0,0 +1,210 @@ +#pragma once + +#include +#include + +#include + +#include "./content/frame_stream2.hpp" + +#include +#include +#include + +namespace Components { + struct StreamSource { + std::string name; + std::string frame_type_name; + // TODO: connect fn + }; + + struct StreamSink { + std::string name; + std::string frame_type_name; + // TODO: connect fn + }; + + template + using FrameStream2Source = std::unique_ptr>; + + template + using FrameStream2Sink = std::unique_ptr>; + +} // Components + + +class StreamManager { + friend class StreamManagerUI; + ObjectStore2& _os; + + struct Connection { + ObjectHandle src; + ObjectHandle sink; + + std::function pump_fn; + + bool on_main_thread {true}; + std::atomic_bool stop {false}; // disconnect + std::atomic_bool finished {false}; // disconnect + + // pump thread + + // frame interval counters and estimates + + Connection(void) = default; + Connection( + ObjectHandle src_, + ObjectHandle sink_, + std::function&& pump_fn_, + bool on_main_thread_ = true + ) : + src(src_), + sink(sink_), + pump_fn(std::move(pump_fn_)), + on_main_thread(on_main_thread_) + {} + }; + std::vector> _connections; + + public: + StreamManager(ObjectStore2& os) : _os(os) {} + virtual ~StreamManager(void) {} + + // TODO: default typed sources and sinks + + // stream type is FrameStream2I + // TODO: improve this design + // src and sink need to be a FrameStream2MultiStream + template + bool connect(Object src, Object sink) { + auto res = std::find_if( + _connections.cbegin(), _connections.cend(), + [&](const auto& a) { return a->src == src && a->sink == sink; } + ); + if (res != _connections.cend()) { + // already exists + return false; + } + + auto h_src = _os.objectHandle(src); + auto h_sink = _os.objectHandle(sink); + if (!static_cast(h_src) || !static_cast(h_sink)) { + // an object does not exist + return false; + } + + if (!h_src.all_of>()) { + // src not stream source + return false; + } + + if (!h_sink.all_of>()) { + // sink not stream sink + return false; + } + + // HACK: + if (!h_src.all_of()) { + h_src.emplace("", std::string{entt::type_name::value()}); + } + if (!h_sink.all_of()) { + h_sink.emplace("", std::string{entt::type_name::value()}); + } + + auto& src_stream = h_src.get>(); + auto& sink_stream = h_sink.get>(); + + auto reader = src_stream->subscribe(); + if (!reader) { + return false; + } + auto writer = sink_stream->subscribe(); + if (!writer) { + return false; + } + + _connections.push_back(std::make_unique( + h_src, + h_sink, + // refactor extract, we just need the type info here + [reader = std::move(reader), writer = std::move(writer)](Connection& con) -> void { + // there might be more stored + for (size_t i = 0; i < 10; i++) { + auto new_frame_opt = reader->pop(); + // TODO: frame interval estimates + if (new_frame_opt.has_value()) { + writer->push(new_frame_opt.value()); + } else { + break; + } + } + + if (con.stop) { + auto* src_stream_ptr = con.src.try_get>(); + if (src_stream_ptr != nullptr) { + (*src_stream_ptr)->unsubscribe(reader); + } + auto* sink_stream_ptr = con.sink.try_get>(); + if (sink_stream_ptr != nullptr) { + (*sink_stream_ptr)->unsubscribe(writer); + } + con.finished = true; + } + }, + true // TODO: threaded + )); + + return true; + } + + template + bool disconnect(Object src, Object sink) { + auto res = std::find_if( + _connections.cbegin(), _connections.cend(), + [&](const auto& a) { return a->src == src && a->sink == sink; } + ); + if (res == _connections.cend()) { + // not found + return false; + } + + // do disconnect + (*res)->stop = true; + + return true; + } + + template + bool disconnectAll(Object o) { + bool succ {false}; + for (const auto& con : _connections) { + if (con->src == o || con->sink == o) { + con->stop = true; + succ = true; + } + } + + return succ; + } + + // do we need the time delta? + float tick(float) { + // pump all mainthread connections + for (auto it = _connections.begin(); it != _connections.end();) { + auto& con = **it; + if (con.on_main_thread) { + con.pump_fn(con); + } + + if (con.stop && con.finished) { + it = _connections.erase(it); + } else { + it++; + } + } + + // return min over intervals instead + return 0.01f; + } +}; + diff --git a/src/stream_manager_ui.cpp b/src/stream_manager_ui.cpp new file mode 100644 index 0000000..4196647 --- /dev/null +++ b/src/stream_manager_ui.cpp @@ -0,0 +1,39 @@ +#include "./stream_manager_ui.hpp" + +#include + +#include + +StreamManagerUI::StreamManagerUI(ObjectStore2& os, StreamManager& sm) : _os(os), _sm(sm) { +} + +void StreamManagerUI::render(void) { + if (ImGui::Begin("StreamManagerUI")) { + // TODO: node canvas? + + // by fametype ?? + + ImGui::SeparatorText("Sources"); + + // list sources + for (const auto& [oc, ss] : _os.registry().view().each()) { + ImGui::Text("src %d (%s)[%s]", entt::to_integral(oc), ss.name.c_str(), ss.frame_type_name.c_str()); + } + + ImGui::SeparatorText("Sinks"); + + // list sinks + for (const auto& [oc, ss] : _os.registry().view().each()) { + ImGui::Text("sink %d (%s)[%s]", entt::to_integral(oc), ss.name.c_str(), ss.frame_type_name.c_str()); + } + + ImGui::SeparatorText("Connections"); + + // list connections + for (const auto& con : _sm._connections) { + ImGui::Text("con %d->%d", entt::to_integral(con->src.entity()), entt::to_integral(con->sink.entity())); + } + } + ImGui::End(); +} + diff --git a/src/stream_manager_ui.hpp b/src/stream_manager_ui.hpp new file mode 100644 index 0000000..de30af0 --- /dev/null +++ b/src/stream_manager_ui.hpp @@ -0,0 +1,15 @@ +#pragma once + +#include +#include "./stream_manager.hpp" + +class StreamManagerUI { + ObjectStore2& _os; + StreamManager& _sm; + + public: + StreamManagerUI(ObjectStore2& os, StreamManager& sm); + + void render(void); +}; + diff --git a/src/tox_av.cpp b/src/tox_av.cpp index 6720632..4d3d7dd 100644 --- a/src/tox_av.cpp +++ b/src/tox_av.cpp @@ -153,11 +153,11 @@ Toxav_Err_Bit_Rate_Set ToxAV::toxavVideoSetBitRate(uint32_t friend_number, uint3 void ToxAV::cb_call(uint32_t friend_number, bool audio_enabled, bool video_enabled) { std::cerr << "TOXAV: receiving call f:" << friend_number << " a:" << audio_enabled << " v:" << video_enabled << "\n"; - Toxav_Err_Answer err_answer { TOXAV_ERR_ANSWER_OK }; - toxav_answer(_tox_av, friend_number, 0, 0, &err_answer); - if (err_answer != TOXAV_ERR_ANSWER_OK) { - std::cerr << "!!!!!!!! answer failed " << err_answer << "\n"; - } + //Toxav_Err_Answer err_answer { TOXAV_ERR_ANSWER_OK }; + //toxav_answer(_tox_av, friend_number, 0, 0, &err_answer); + //if (err_answer != TOXAV_ERR_ANSWER_OK) { + // std::cerr << "!!!!!!!! answer failed " << err_answer << "\n"; + //} dispatch( ToxAV_Event::friend_call, diff --git a/src/tox_av.hpp b/src/tox_av.hpp index 27ce351..bc00ce0 100644 --- a/src/tox_av.hpp +++ b/src/tox_av.hpp @@ -68,7 +68,7 @@ enum class ToxAV_Event : uint32_t { MAX }; -struct ToxAVEventI { +struct ToxAVEventI { using enumType = ToxAV_Event; virtual ~ToxAVEventI(void) {}