use sdl yuv to yuv conversion, and better fallbacks

display intended main screen intervals
actually report min interval in debug view and more
This commit is contained in:
Green Sky 2024-10-03 12:33:52 +02:00
parent a290bec8f1
commit 7cb4f67f96
No known key found for this signature in database
7 changed files with 39 additions and 162 deletions

View File

@ -116,7 +116,6 @@ target_sources(tomato PUBLIC
./frame_streams/sdl/sdl_audio2_frame_stream2.cpp
./frame_streams/sdl/video.hpp
./frame_streams/sdl/video_push_converter.hpp
./frame_streams/sdl/video_push_converter.cpp
./frame_streams/sdl/sdl_video_frame_stream2.hpp
./frame_streams/sdl/sdl_video_frame_stream2.cpp

View File

@ -220,7 +220,7 @@ float DebugVideoTap::render(void) {
for (auto& [view, stream] : dvtsw) {
std::string window_title {"DebugVideoTap #"};
window_title += std::to_string(view._id);
ImGui::SetNextWindowSize({250, 250}, ImGuiCond_Appearing);
ImGui::SetNextWindowSize({400, 420}, ImGuiCond_Appearing);
if (ImGui::Begin(window_title.c_str())) {
while (auto new_frame_opt = stream->pop()) {
// timing
@ -233,7 +233,7 @@ float DebugVideoTap::render(void) {
if (view._v_interval_avg == 0) {
view._v_interval_avg = delta/1'000'000.f;
} else {
const float r = 0.2f;
const float r = 0.05f;
view._v_interval_avg = view._v_interval_avg * (1.f-r) + (delta/1'000'000.f) * r;
}
}
@ -277,7 +277,7 @@ float DebugVideoTap::render(void) {
// img here
if (view._tex != 0) {
ImGui::SameLine();
ImGui::Text("moving avg interval: %f", view._v_interval_avg);
ImGui::Text("%dx%d ~avg interval: %.0fms (%.2ffps)", view._tex_w, view._tex_h, view._v_interval_avg*1000.f, 1.f/view._v_interval_avg);
const float img_w = ImGui::GetContentRegionAvail().x;
ImGui::Image(
reinterpret_cast<ImTextureID>(view._tex),
@ -287,6 +287,9 @@ float DebugVideoTap::render(void) {
);
}
if (view._v_interval_avg > 0) {
min_interval = std::min(min_interval, view._v_interval_avg);
}
}
ImGui::End();
}

View File

@ -1,106 +0,0 @@
#include "./video_push_converter.hpp"
#include <cstring>
SDL_Surface* convertYUY2_IYUV(SDL_Surface* surf) {
if (surf->format != SDL_PIXELFORMAT_YUY2) {
return nullptr;
}
if ((surf->w % 2) != 0) {
SDL_SetError("YUY2->IYUV does not support odd widths");
// hmmm, we dont handle odd widths
return nullptr;
}
SDL_LockSurface(surf);
SDL_Surface* conv_surf = SDL_CreateSurface(surf->w, surf->h, SDL_PIXELFORMAT_IYUV);
SDL_LockSurface(conv_surf);
// YUY2 is 4:2:2 packed
// Y is simple, we just copy it over
// U V are double the resolution (vertically), so we avg both
// Packed mode: Y0+U0+Y1+V0 (1 plane)
uint8_t* y_plane = static_cast<uint8_t*>(conv_surf->pixels);
uint8_t* u_plane = static_cast<uint8_t*>(conv_surf->pixels) + conv_surf->w*conv_surf->h;
uint8_t* v_plane = static_cast<uint8_t*>(conv_surf->pixels) + conv_surf->w*conv_surf->h + (conv_surf->w/2)*(conv_surf->h/2);
const uint8_t* yuy2_data = static_cast<const uint8_t*>(surf->pixels);
for (int y = 0; y < surf->h; y++) {
for (int x = 0; x < surf->w; x += 2) {
// every pixel uses 2 bytes
const uint8_t* yuy2_curser = yuy2_data + y*surf->w*2 + x*2;
uint8_t src_y0 = yuy2_curser[0];
uint8_t src_u = yuy2_curser[1];
uint8_t src_y1 = yuy2_curser[2];
uint8_t src_v = yuy2_curser[3];
y_plane[y*conv_surf->w + x] = src_y0;
y_plane[y*conv_surf->w + x+1] = src_y1;
size_t uv_index = (y/2) * (conv_surf->w/2) + x/2;
if (y % 2 == 0) {
// first write
u_plane[uv_index] = src_u;
v_plane[uv_index] = src_v;
} else {
// second write, mix with existing value
u_plane[uv_index] = (int(u_plane[uv_index]) + int(src_u)) / 2;
v_plane[uv_index] = (int(v_plane[uv_index]) + int(src_v)) / 2;
}
}
}
SDL_UnlockSurface(conv_surf);
SDL_UnlockSurface(surf);
return conv_surf;
}
SDL_Surface* convertNV12_IYUV(SDL_Surface* surf) {
if (surf->format != SDL_PIXELFORMAT_NV12) {
return nullptr;
}
if ((surf->w % 2) != 0) {
SDL_SetError("NV12->IYUV does not support odd widths");
// hmmm, we dont handle odd widths
return nullptr;
}
SDL_LockSurface(surf);
SDL_Surface* conv_surf = SDL_CreateSurface(surf->w, surf->h, SDL_PIXELFORMAT_IYUV);
SDL_LockSurface(conv_surf);
// NV12 is planar Y and interleaved UV in 4:2:0
// Y is simple, we just copy it over
// U V are ... packed?
uint8_t* y_plane = static_cast<uint8_t*>(conv_surf->pixels);
uint8_t* u_plane = static_cast<uint8_t*>(conv_surf->pixels) + conv_surf->w*conv_surf->h;
uint8_t* v_plane = static_cast<uint8_t*>(conv_surf->pixels) + conv_surf->w*conv_surf->h + (conv_surf->w/2)*(conv_surf->h/2);
const uint8_t* nv12_y_plane = static_cast<const uint8_t*>(surf->pixels);
const uint8_t* nv12_uv_data = static_cast<const uint8_t*>(surf->pixels) + surf->w*surf->h;
// y can be copied as is
std::memcpy(
y_plane,
nv12_y_plane,
surf->w*surf->h
);
// we operate at half res
for (int y = 0; y < surf->h/2; y++) {
for (int x = 0; x < surf->w/2; x++) {
u_plane[y*(surf->w/2)+x] = nv12_uv_data[y*(surf->w/2)*2+x*2];
v_plane[y*(surf->w/2)+x] = nv12_uv_data[y*(surf->w/2)*2+x*2+1];
}
}
SDL_UnlockSurface(conv_surf);
SDL_UnlockSurface(surf);
return conv_surf;
}

View File

@ -7,25 +7,10 @@
#include <iostream> // meh
static bool isFormatYUV(SDL_PixelFormat f) {
return
f == SDL_PIXELFORMAT_YV12 ||
f == SDL_PIXELFORMAT_IYUV ||
f == SDL_PIXELFORMAT_YUY2 ||
f == SDL_PIXELFORMAT_UYVY ||
f == SDL_PIXELFORMAT_YVYU ||
f == SDL_PIXELFORMAT_NV12 ||
f == SDL_PIXELFORMAT_NV21 ||
f == SDL_PIXELFORMAT_P010
;
}
SDL_Surface* convertYUY2_IYUV(SDL_Surface* surf);
SDL_Surface* convertNV12_IYUV(SDL_Surface* surf);
template<typename RealStream>
struct PushConversionVideoStream : public RealStream {
SDL_PixelFormat _forced_format {SDL_PIXELFORMAT_IYUV};
// TODO: force colorspace?
template<typename... Args>
PushConversionVideoStream(SDL_PixelFormat forced_format, Args&&... args) : RealStream(std::forward<Args>(args)...), _forced_format(forced_format) {}
@ -34,43 +19,33 @@ struct PushConversionVideoStream : public RealStream {
bool push(const SDLVideoFrame& value) override {
SDL_Surface* surf = value.surface.get();
if (surf->format != _forced_format) {
//std::cerr << "DTC: need to convert from " << SDL_GetPixelFormatName(converted_surf->format) << " to SDL_PIXELFORMAT_IYUV\n";
if (surf->format == SDL_PIXELFORMAT_YUY2 && _forced_format == SDL_PIXELFORMAT_IYUV) {
// optimized custom impl
//std::cerr << "PCVS: need to convert from " << SDL_GetPixelFormatName(surf->format) << " to " << SDL_GetPixelFormatName(_forced_format) << "\n";
if ((surf = SDL_ConvertSurface(surf, _forced_format)) == nullptr) {
surf = value.surface.get(); // reset ptr
//std::cerr << "PCVS warning: default conversion failed: " << SDL_GetError() << "\n";
//auto start = Message::getTimeMS();
surf = convertYUY2_IYUV(surf);
//auto end = Message::getTimeMS();
// 3ms
//std::cerr << "DTC: timing " << SDL_GetPixelFormatName(converted_surf->format) << "->SDL_PIXELFORMAT_IYUV: " << end-start << "ms\n";
} else if (surf->format == SDL_PIXELFORMAT_NV12 && _forced_format == SDL_PIXELFORMAT_IYUV) {
surf = convertNV12_IYUV(surf);
} else if (isFormatYUV(surf->format)) {
// TODO: fix sdl rgb->yuv conversion resulting in too dark (colorspace) issues
// https://github.com/libsdl-org/SDL/issues/10877
// meh, need to convert to rgb as a stopgap
//auto start = Message::getTimeMS();
SDL_Surface* tmp_conv_surf = SDL_ConvertSurfaceAndColorspace(surf, SDL_PIXELFORMAT_RGB24, nullptr, SDL_COLORSPACE_RGB_DEFAULT, 0);
//auto end = Message::getTimeMS();
// 1ms
//std::cerr << "DTC: timing " << SDL_GetPixelFormatName(converted_surf->format) << "->SDL_PIXELFORMAT_RGB24: " << end-start << "ms\n";
//start = Message::getTimeMS();
surf = SDL_ConvertSurfaceAndColorspace(tmp_conv_surf, _forced_format, nullptr, SDL_COLORSPACE_YUV_DEFAULT, 0);
//end = Message::getTimeMS();
// 60ms
//std::cerr << "DTC: timing SDL_PIXELFORMAT_RGB24->" << SDL_GetPixelFormatName(_forced_format) << ": " << end-start << "ms\n";
SDL_DestroySurface(tmp_conv_surf);
// sdl hardcodes BT709_LIMITED
if ((surf = SDL_ConvertSurfaceAndColorspace(surf, _forced_format, nullptr, SDL_GetSurfaceColorspace(surf), 0)) == nullptr) {
//if ((surf = SDL_ConvertSurfaceAndColorspace(surf, _forced_format, nullptr, SDL_COLORSPACE_BT709_LIMITED, 0)) == nullptr) {
surf = value.surface.get(); // reset ptr
//std::cerr << "PCVS warning: default conversion with same colorspace failed: " << SDL_GetError() << "\n";
// simple convert failed, fall back to ->rgb->yuv
//SDL_Surface* tmp_conv_surf = SDL_ConvertSurfaceAndColorspace(surf, SDL_PIXELFORMAT_RGB24, nullptr, SDL_COLORSPACE_RGB_DEFAULT, 0);
SDL_Surface* tmp_conv_surf = SDL_ConvertSurface(surf, SDL_PIXELFORMAT_RGB24);
if (tmp_conv_surf == nullptr) {
std::cerr << "PCVS error: conversion to RGB failed: " << SDL_GetError() << "\n";
} else {
surf = SDL_ConvertSurface(surf, _forced_format);
//surf = SDL_ConvertSurfaceAndColorspace(tmp_conv_surf, _forced_format, nullptr, SDL_COLORSPACE_YUV_DEFAULT, 0);
surf = SDL_ConvertSurface(tmp_conv_surf, _forced_format);
//surf = SDL_ConvertSurfaceAndColorspace(tmp_conv_surf, _forced_format, nullptr, SDL_COLORSPACE_BT601_LIMITED, 0);
SDL_DestroySurface(tmp_conv_surf);
}
}
}
if (surf == nullptr) {
// oh god
std::cerr << "DTC error: failed to convert surface to IYUV: " << SDL_GetError() << "\n";
std::cerr << "PCVS error: failed to convert surface to IYUV: " << SDL_GetError() << "\n";
return false;
}
}

View File

@ -194,7 +194,7 @@ bool StreamManager::connect(Object src, Object sink, bool threaded) {
std::move(our_data),
[](Connection& con) -> void {
// there might be more stored
for (size_t i = 0; i < 10; i++) {
for (size_t i = 0; i < 64; i++) {
auto new_frame_opt = static_cast<inlineData*>(con.data.get())->reader->pop();
// TODO: frame interval estimates
if (new_frame_opt.has_value()) {

View File

@ -346,6 +346,11 @@ Screen* MainScreen::render(float time_delta, bool&) {
ImGui::SetItemTooltip("Limiting compute can slow down things like filetransfers!");
}
ImGui::Separator();
ImGui::Text("render interval: %.0fms (%.2ffps)", _render_interval*1000.f, 1.f/_render_interval);
ImGui::Text("tick interval: %.0fms (%.2ftps)", _min_tick_interval*1000.f, 1.f/_min_tick_interval);
ImGui::EndMenu();
}
if (ImGui::BeginMenu("Settings")) {
@ -504,12 +509,12 @@ Screen* MainScreen::render(float time_delta, bool&) {
// min over non animations in all cases
_render_interval = std::min<float>(pm_interval, cg_interval);
_render_interval = std::min<float>(_render_interval, tc_unfinished_queue_interval);
_render_interval = std::min<float>(_render_interval, dvt_interval);
// low delay time window
if (!_window_hidden && _time_since_event < curr_profile.low_delay_window) {
_render_interval = std::min<float>(_render_interval, ctc_interval);
_render_interval = std::min<float>(_render_interval, msgtc_interval);
_render_interval = std::min<float>(_render_interval, dvt_interval);
_render_interval = std::clamp(
_render_interval,
@ -520,7 +525,6 @@ Screen* MainScreen::render(float time_delta, bool&) {
} else if (!_window_hidden && _time_since_event < curr_profile.mid_delay_window) {
_render_interval = std::min<float>(_render_interval, ctc_interval);
_render_interval = std::min<float>(_render_interval, msgtc_interval);
_render_interval = std::min<float>(_render_interval, dvt_interval);
_render_interval = std::clamp(
_render_interval,

View File

@ -701,7 +701,9 @@ bool ToxAVVoIPModel::onEvent(const Events::FriendVideoFrame& e) {
vsrc.get<FrameStream2MultiSource<SDLVideoFrame>*>()->push({
// ms -> us
Message::getTimeMS() * 1000, // TODO: make more precise
// would be nice if we had been giving this from toxcore
// TODO: make more precise
Message::getTimeMS() * 1000,
new_surf
});