5 Commits
video ... pyqt5

Author SHA1 Message Date
3546dbd75b some broken dialogs fixed 2017-05-08 23:58:23 +03:00
b081bff2ce some mainscreen widgets fixes 2017-04-16 22:56:31 +03:00
e257d8f96f signals && imports fixes 2017-04-13 19:22:46 +03:00
972a073cb9 missed translations fix, travis fix, updates 2017-04-11 21:38:00 +03:00
e4998cd5b5 pyqt5 initial commit 2017-04-11 21:10:03 +03:00
16 changed files with 122 additions and 447 deletions

View File

@ -7,11 +7,9 @@ before_install:
- sudo apt-get install -y checkinstall build-essential
- sudo apt-get install portaudio19-dev
- sudo apt-get install libconfig-dev libvpx-dev check -qq
- sudo apt-get install -y python3-pyqt5
install:
- pip install sip
- pip install pyqt5
- pip install pyaudio
- pip install opencv-python
before_script:
# OPUS
- wget http://downloads.xiph.org/releases/opus/opus-1.0.3.tar.gz

View File

@ -8,20 +8,15 @@ import sys
version = program_version + '.0'
MODULES = ['numpy', 'PyQt5']
MODULES = []
if system() in ('Windows', 'Darwin'):
MODULES.append('PyAudio')
MODULES = ['PyAudio', 'PyQt5']
else:
try:
import pyaudio
except ImportError:
MODULES.append('PyAudio')
DEP_LINKS = []
if system() == 'Windows':
DEP_LINKS = [] # TODO: add opencv.whl
MODULES = ['PyAudio']
class InstallScript(install):
@ -60,7 +55,6 @@ setup(name='Toxygen',
license='GPL3',
packages=['toxygen', 'toxygen.plugins', 'toxygen.styles'],
install_requires=MODULES,
dependency_links=DEP_LINKS,
include_package_data=True,
classifiers=[
'Programming Language :: Python :: 3 :: Only',

View File

@ -19,7 +19,6 @@ class IncomingCallWidget(widgets.CenteredWidget):
self.avatar_label.setScaledContents(False)
self.name = widgets.DataLabel(self)
self.name.setGeometry(QtCore.QRect(90, 20, 300, 25))
self._friend_number = friend_number
font = QtGui.QFont()
font.setFamily(settings.Settings.get_instance()['font'])
font.setPointSize(16)
@ -52,10 +51,10 @@ class IncomingCallWidget(widgets.CenteredWidget):
self.setWindowTitle(text)
self.name.setText(name)
self.call_type.setText(text)
self._processing = False
self.accept_audio.clicked.connect(self.accept_call_with_audio)
self.accept_video.clicked.connect(self.accept_call_with_video)
self.decline.clicked.connect(self.decline_call)
pr = profile.Profile.get_instance()
self.accept_audio.clicked.connect(lambda: pr.accept_call(friend_number, True, False) or self.stop())
# self.accept_video.clicked.connect(lambda: pr.start_call(friend_number, True, True))
self.decline.clicked.connect(lambda: pr.stop_call(friend_number, False) or self.stop())
class SoundPlay(QtCore.QThread):
@ -106,29 +105,31 @@ class IncomingCallWidget(widgets.CenteredWidget):
self.thread.wait()
self.close()
def accept_call_with_audio(self):
if self._processing:
return
self._processing = True
pr = profile.Profile.get_instance()
pr.accept_call(self._friend_number, True, False)
self.stop()
def accept_call_with_video(self):
if self._processing:
return
self._processing = True
pr = profile.Profile.get_instance()
pr.accept_call(self._friend_number, True, True)
self.stop()
def decline_call(self):
if self._processing:
return
self._processing = True
pr = profile.Profile.get_instance()
pr.stop_call(self._friend_number, False)
self.stop()
def set_pixmap(self, pixmap):
self.avatar_label.setPixmap(pixmap)
class AudioMessageRecorder(widgets.CenteredWidget):
def __init__(self, friend_number, name):
super(AudioMessageRecorder, self).__init__()
self.label = QtWidgets.QLabel(self)
self.label.setGeometry(QtCore.QRect(10, 20, 250, 20))
text = QtWidgets.QApplication.translate("MenuWindow", "Send audio message to friend {}")
self.label.setText(text.format(name))
self.record = QtWidgets.QPushButton(self)
self.record.setGeometry(QtCore.QRect(20, 100, 150, 150))
self.record.setText(QtWidgets.QApplication.translate("MenuWindow", "Start recording"))
self.record.clicked.connect(self.start_or_stop_recording)
self.recording = False
self.friend_num = friend_number
def start_or_stop_recording(self):
if not self.recording:
self.recording = True
self.record.setText(QtWidgets.QApplication.translate("MenuWindow", "Stop recording"))
else:
self.close()

View File

@ -9,8 +9,7 @@ from plugin_support import PluginLoader
import queue
import threading
import util
import cv2
import numpy as np
# -----------------------------------------------------------------------------------------------------------------
# Threads
@ -313,67 +312,11 @@ def callback_audio(toxav, friend_number, samples, audio_samples_per_channel, aud
"""
New audio chunk
"""
Profile.get_instance().call.audio_chunk(
Profile.get_instance().call.chunk(
bytes(samples[:audio_samples_per_channel * 2 * audio_channels_count]),
audio_channels_count,
rate)
# -----------------------------------------------------------------------------------------------------------------
# Callbacks - video
# -----------------------------------------------------------------------------------------------------------------
def video_receive_frame(toxav, friend_number, width, height, y, u, v, ystride, ustride, vstride, user_data):
"""
Creates yuv frame from y, u, v and shows it using OpenCV
For yuv => bgr we need this YUV420 frame:
width
-------------------------
| |
| Y | height
| |
-------------------------
| | |
| U even | U odd | height // 4
| | |
-------------------------
| | |
| V even | V odd | height // 4
| | |
-------------------------
width // 2 width // 2
It can be created from initial y, u, v using slices
For more info see callback_video_receive_frame docs
"""
try:
y_size = abs(max(width, abs(ystride)))
u_size = abs(max(width // 2, abs(ustride)))
v_size = abs(max(width // 2, abs(vstride)))
y = np.asarray(y[:y_size * height], dtype=np.uint8).reshape(height, y_size)
u = np.asarray(u[:u_size * height // 2], dtype=np.uint8).reshape(height // 2, u_size)
v = np.asarray(v[:v_size * height // 2], dtype=np.uint8).reshape(height // 2, v_size)
width -= width % 4
height -= height % 4
frame = np.zeros((int(height * 1.5), width), dtype=np.uint8)
frame[:height, :] = y[:height, :width]
frame[height:height * 5 // 4, :width // 2] = u[:height // 2:2, :width // 2]
frame[height:height * 5 // 4, width // 2:] = u[1:height // 2:2, :width // 2]
frame[height * 5 // 4:, :width // 2] = v[:height // 2:2, :width // 2]
frame[height * 5 // 4:, width // 2:] = v[1:height // 2:2, :width // 2]
frame = cv2.cvtColor(frame, cv2.COLOR_YUV2BGR_I420)
invoke_in_main_thread(cv2.imshow, str(friend_number), frame)
except Exception as ex:
print(ex)
# -----------------------------------------------------------------------------------------------------------------
# Callbacks - initialization
@ -407,7 +350,7 @@ def init_callbacks(tox, window, tray):
toxav.callback_call_state(call_state, 0)
toxav.callback_call(call, 0)
toxav.callback_audio_receive_frame(callback_audio, 0)
toxav.callback_video_receive_frame(video_receive_frame, 0)
tox.callback_friend_lossless_packet(lossless_packet, 0)
tox.callback_friend_lossy_packet(lossy_packet, 0)

View File

@ -3,68 +3,14 @@ import time
import threading
import settings
from toxav_enums import *
import cv2
import itertools
import numpy as np
# TODO: play sound until outgoing call will be started or cancelled
# TODO: play sound until outgoing call will be started or cancelled and add timeout
# TODO: add widget for call
class Call:
def __init__(self, out_audio, out_video, in_audio=False, in_video=False):
self._in_audio = in_audio
self._in_video = in_video
self._out_audio = out_audio
self._out_video = out_video
self._is_active = False
def get_is_active(self):
return self._is_active
def set_is_active(self, value):
self._is_active = value
is_active = property(get_is_active, set_is_active)
# -----------------------------------------------------------------------------------------------------------------
# Audio
# -----------------------------------------------------------------------------------------------------------------
def get_in_audio(self):
return self._in_audio
def set_in_audio(self, value):
self._in_audio = value
in_audio = property(get_in_audio, set_in_audio)
def get_out_audio(self):
return self._out_audio
def set_out_audio(self, value):
self._out_audio = value
out_audio = property(get_out_audio, set_out_audio)
# -----------------------------------------------------------------------------------------------------------------
# Video
# -----------------------------------------------------------------------------------------------------------------
def get_in_video(self):
return self._in_video
def set_in_video(self, value):
self._in_video = value
in_video = property(get_in_video, set_in_video)
def get_out_video(self):
return self._out_video
def set_out_video(self, value):
self._in_video = value
out_video = property(get_out_video, set_out_video)
CALL_TYPE = {
'NONE': 0,
'AUDIO': 1,
'VIDEO': 2
}
class AV:
@ -73,7 +19,7 @@ class AV:
self._toxav = toxav
self._running = True
self._calls = {} # dict: key - friend number, value - Call instance
self._calls = {} # dict: key - friend number, value - call type
self._audio = None
self._audio_stream = None
@ -86,75 +32,27 @@ class AV:
self._audio_duration = 60
self._audio_sample_count = self._audio_rate * self._audio_channels * self._audio_duration // 1000
self._video = None
self._video_thread = None
self._video_running = False
self._video_width = 640
self._video_height = 480
def stop(self):
self._running = False
self.stop_audio_thread()
self.stop_video_thread()
def __contains__(self, friend_number):
return friend_number in self._calls
# -----------------------------------------------------------------------------------------------------------------
# Calls
# -----------------------------------------------------------------------------------------------------------------
def __call__(self, friend_number, audio, video):
"""Call friend with specified number"""
self._toxav.call(friend_number, 32 if audio else 0, 5000 if video else 0)
self._calls[friend_number] = Call(audio, video)
threading.Timer(30.0, lambda: self.finish_not_started_call(friend_number)).start()
def accept_call(self, friend_number, audio_enabled, video_enabled):
if self._running:
self._calls[friend_number] = Call(audio_enabled, video_enabled)
self._toxav.answer(friend_number, 32 if audio_enabled else 0, 5000 if video_enabled else 0)
if audio_enabled:
self._calls[friend_number] = CALL_TYPE['AUDIO']
self.start_audio_thread()
if video_enabled:
self.start_video_thread()
def finish_call(self, friend_number, by_friend=False):
if not by_friend:
self._toxav.call_control(friend_number, TOXAV_CALL_CONTROL['CANCEL'])
if friend_number in self._calls:
del self._calls[friend_number]
if not len(list(filter(lambda c: c.out_audio, self._calls))):
if not len(self._calls):
self.stop_audio_thread()
if not len(list(filter(lambda c: c.out_video, self._calls))):
self.stop_video_thread()
def finish_not_started_call(self, friend_number):
if friend_number in self:
call = self._calls[friend_number]
if not call.is_active:
self.finish_call(friend_number)
def toxav_call_state_cb(self, friend_number, state):
"""
New call state
"""
call = self._calls[friend_number]
call.is_active = True
call.in_audio = state | TOXAV_FRIEND_CALL_STATE['SENDING_A']
call.in_video = state | TOXAV_FRIEND_CALL_STATE['SENDING_V']
if state | TOXAV_FRIEND_CALL_STATE['ACCEPTING_A'] and call.out_audio:
self.start_audio_thread()
if state | TOXAV_FRIEND_CALL_STATE['ACCEPTING_V'] and call.out_video:
self.start_video_thread()
# -----------------------------------------------------------------------------------------------------------------
# Threads
# -----------------------------------------------------------------------------------------------------------------
def stop(self):
self._running = False
self.stop_audio_thread()
def start_audio_thread(self):
"""
@ -194,37 +92,7 @@ class AV:
self._out_stream.close()
self._out_stream = None
def start_video_thread(self):
if self._video_thread is not None:
return
self._video_running = True
s = settings.Settings.get_instance()
self._video_width = s.video['width']
self._video_height = s.video['height']
self._video = cv2.VideoCapture(s.video['device'])
self._video.set(cv2.CAP_PROP_FPS, 25)
self._video.set(cv2.CAP_PROP_FRAME_WIDTH, self._video_width)
self._video.set(cv2.CAP_PROP_FRAME_HEIGHT, self._video_height)
self._video_thread = threading.Thread(target=self.send_video)
self._video_thread.start()
def stop_video_thread(self):
if self._video_thread is None:
return
self._video_running = False
self._video_thread.join()
self._video_thread = None
self._video = None
# -----------------------------------------------------------------------------------------------------------------
# Incoming chunks
# -----------------------------------------------------------------------------------------------------------------
def audio_chunk(self, samples, channels_count, rate):
def chunk(self, samples, channels_count, rate):
"""
Incoming chunk
"""
@ -237,10 +105,6 @@ class AV:
output=True)
self._out_stream.write(samples)
# -----------------------------------------------------------------------------------------------------------------
# AV sending
# -----------------------------------------------------------------------------------------------------------------
def send_audio(self):
"""
This method sends audio to friends
@ -250,10 +114,10 @@ class AV:
try:
pcm = self._audio_stream.read(self._audio_sample_count)
if pcm:
for friend_num in self._calls:
if self._calls[friend_num].out_audio:
for friend in self._calls:
if self._calls[friend] & 1:
try:
self._toxav.audio_send_frame(friend_num, pcm, self._audio_sample_count,
self._toxav.audio_send_frame(friend, pcm, self._audio_sample_count,
self._audio_channels, self._audio_rate)
except:
pass
@ -262,71 +126,19 @@ class AV:
time.sleep(0.01)
def send_video(self):
def accept_call(self, friend_number, audio_enabled, video_enabled):
if self._running:
self._calls[friend_number] = int(video_enabled) * 2 + int(audio_enabled)
self._toxav.answer(friend_number, 32 if audio_enabled else 0, 5000 if video_enabled else 0)
self.start_audio_thread()
def toxav_call_state_cb(self, friend_number, state):
"""
This method sends video to friends
New call state
"""
while self._video_running:
try:
result, frame = self._video.read()
if result:
height, width, channels = frame.shape
for friend_num in self._calls:
if self._calls[friend_num].out_video:
try:
y, u, v = self.convert_bgr_to_yuv(frame)
self._toxav.video_send_frame(friend_num, width, height, y, u, v)
except Exception as e:
print(e)
except Exception as e:
print(e)
if self._running:
time.sleep(0.01)
if state & TOXAV_FRIEND_CALL_STATE['ACCEPTING_A']:
self._calls[friend_number] |= 1
def convert_bgr_to_yuv(self, frame):
"""
:param frame: input bgr frame
:return y, u, v: y, u, v values of frame
How this function works:
OpenCV creates YUV420 frame from BGR
This frame has following structure and size:
width, height - dim of input frame
width, height * 1.5 - dim of output frame
width
-------------------------
| |
| Y | height
| |
-------------------------
| | |
| U even | U odd | height // 4
| | |
-------------------------
| | |
| V even | V odd | height // 4
| | |
-------------------------
width // 2 width // 2
Y, U, V can be extracted using slices and joined in one list using itertools.chain.from_iterable()
Function returns bytes(y), bytes(u), bytes(v), because it is required for ctypes
"""
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2YUV_I420)
y = frame[:self._video_height, :]
y = list(itertools.chain.from_iterable(y))
u = np.zeros((self._video_height // 2, self._video_width // 2), dtype=np.int)
u[::2, :] = frame[self._video_height:self._video_height * 5 // 4, :self._video_width // 2]
u[1::2, :] = frame[self._video_height:self._video_height * 5 // 4, self._video_width // 2:]
u = list(itertools.chain.from_iterable(u))
v = np.zeros((self._video_height // 2, self._video_width // 2), dtype=np.int)
v[::2, :] = frame[self._video_height * 5 // 4:, :self._video_width // 2]
v[1::2, :] = frame[self._video_height * 5 // 4:, self._video_width // 2:]
v = list(itertools.chain.from_iterable(v))
return bytes(y), bytes(u), bytes(v)

Binary file not shown.

Before

Width:  |  Height:  |  Size: 3.0 KiB

After

Width:  |  Height:  |  Size: 4.2 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 3.0 KiB

After

Width:  |  Height:  |  Size: 3.6 KiB

View File

Before

Width:  |  Height:  |  Size: 3.5 KiB

After

Width:  |  Height:  |  Size: 3.5 KiB

View File

@ -541,6 +541,8 @@ class InlineImageItem(QtWidgets.QScrollArea):
fl = QtCore.QFile(directory + '/toxygen_inline_' + curr_time().replace(':', '_') + '.png')
self._pixmap.save(fl, 'PNG')
return False
def mark_as_sent(self):
return False

View File

@ -63,7 +63,6 @@ class MainWindow(QtWidgets.QMainWindow, Singleton):
self.actionSettings = QtWidgets.QAction(Form)
self.actionSettings.setObjectName("actionSettings")
self.audioSettings = QtWidgets.QAction(Form)
self.videoSettings = QtWidgets.QAction(Form)
self.pluginData = QtWidgets.QAction(Form)
self.importPlugin = QtWidgets.QAction(Form)
self.reloadPlugins = QtWidgets.QAction(Form)
@ -76,7 +75,6 @@ class MainWindow(QtWidgets.QMainWindow, Singleton):
self.menuSettings.addAction(self.actionNotifications)
self.menuSettings.addAction(self.actionNetwork)
self.menuSettings.addAction(self.audioSettings)
self.menuSettings.addAction(self.videoSettings)
self.menuSettings.addAction(self.updateSettings)
self.menuPlugins.addAction(self.pluginData)
self.menuPlugins.addAction(self.importPlugin)
@ -96,7 +94,6 @@ class MainWindow(QtWidgets.QMainWindow, Singleton):
self.actionInterface_settings.triggered.connect(self.interface_settings)
self.actionNotifications.triggered.connect(self.notification_settings)
self.audioSettings.triggered.connect(self.audio_settings)
self.videoSettings.triggered.connect(self.video_settings)
self.updateSettings.triggered.connect(self.update_settings)
self.pluginData.triggered.connect(self.plugins_menu)
self.lockApp.triggered.connect(self.lock_app)
@ -131,7 +128,6 @@ class MainWindow(QtWidgets.QMainWindow, Singleton):
self.actionAbout_program.setText(QtWidgets.QApplication.translate("MainWindow", "About program"))
self.actionSettings.setText(QtWidgets.QApplication.translate("MainWindow", "Settings"))
self.audioSettings.setText(QtWidgets.QApplication.translate("MainWindow", "Audio"))
self.videoSettings.setText(QtWidgets.QApplication.translate("MainWindow", "Video"))
self.updateSettings.setText(QtWidgets.QApplication.translate("MainWindow", "Updates"))
self.contact_name.setPlaceholderText(QtWidgets.QApplication.translate("MainWindow", "Search"))
self.sendMessageButton.setToolTip(QtWidgets.QApplication.translate("MainWindow", "Send message"))
@ -463,10 +459,6 @@ class MainWindow(QtWidgets.QMainWindow, Singleton):
self.audio_s = AudioSettings()
self.audio_s.show()
def video_settings(self):
self.video_s = VideoSettings()
self.video_s.show()
def update_settings(self):
self.update_s = UpdateSettings()
self.update_s.show()
@ -567,15 +559,14 @@ class MainWindow(QtWidgets.QMainWindow, Singleton):
def call_finished(self):
self.update_call_state('call')
def update_call_state(self, state):
def update_call_state(self, fl):
# TODO: do smth with video call button
os.chdir(curr_directory() + '/images/')
pixmap = QtGui.QPixmap(curr_directory() + '/images/{}.png'.format(state))
pixmap = QtGui.QPixmap(curr_directory() + '/images/{}.png'.format(fl))
icon = QtGui.QIcon(pixmap)
self.callButton.setIcon(icon)
self.callButton.setIconSize(QtCore.QSize(50, 50))
pixmap = QtGui.QPixmap(curr_directory() + '/images/{}_video.png'.format(state))
pixmap = QtGui.QPixmap(curr_directory() + '/images/videocall.png')
icon = QtGui.QIcon(pixmap)
self.videocallButton.setIcon(icon)
self.videocallButton.setIconSize(QtCore.QSize(35, 35))

View File

@ -207,8 +207,8 @@ class DropdownMenu(QtWidgets.QWidget):
super(DropdownMenu, self).__init__(parent)
self.installEventFilter(self)
self.setWindowFlags(QtCore.Qt.FramelessWindowHint)
self.setMaximumSize(120, 120)
self.setMinimumSize(120, 120)
self.setMaximumSize(180, 120)
self.setMinimumSize(180, 120)
self.screenshotButton = QRightClickButton(self)
self.screenshotButton.setGeometry(QtCore.QRect(0, 60, 60, 60))
self.screenshotButton.setObjectName("screenshotButton")
@ -217,9 +217,15 @@ class DropdownMenu(QtWidgets.QWidget):
self.fileTransferButton.setGeometry(QtCore.QRect(60, 60, 60, 60))
self.fileTransferButton.setObjectName("fileTransferButton")
self.audioMessageButton = QtWidgets.QPushButton(self)
self.audioMessageButton.setGeometry(QtCore.QRect(120, 60, 60, 60))
self.smileyButton = QtWidgets.QPushButton(self)
self.smileyButton.setGeometry(QtCore.QRect(0, 0, 60, 60))
self.videoMessageButton = QtWidgets.QPushButton(self)
self.videoMessageButton.setGeometry(QtCore.QRect(120, 0, 60, 60))
self.stickerButton = QtWidgets.QPushButton(self)
self.stickerButton.setGeometry(QtCore.QRect(60, 0, 60, 60))
@ -227,17 +233,22 @@ class DropdownMenu(QtWidgets.QWidget):
icon = QtGui.QIcon(pixmap)
self.fileTransferButton.setIcon(icon)
self.fileTransferButton.setIconSize(QtCore.QSize(50, 50))
pixmap = QtGui.QPixmap(util.curr_directory() + '/images/screenshot.png')
icon = QtGui.QIcon(pixmap)
self.screenshotButton.setIcon(icon)
self.screenshotButton.setIconSize(QtCore.QSize(50, 60))
pixmap = QtGui.QPixmap(util.curr_directory() + '/images/audio_message.png')
icon = QtGui.QIcon(pixmap)
self.audioMessageButton.setIcon(icon)
self.audioMessageButton.setIconSize(QtCore.QSize(50, 50))
pixmap = QtGui.QPixmap(util.curr_directory() + '/images/smiley.png')
icon = QtGui.QIcon(pixmap)
self.smileyButton.setIcon(icon)
self.smileyButton.setIconSize(QtCore.QSize(50, 50))
pixmap = QtGui.QPixmap(util.curr_directory() + '/images/video_message.png')
icon = QtGui.QIcon(pixmap)
self.videoMessageButton.setIcon(icon)
self.videoMessageButton.setIconSize(QtCore.QSize(55, 55))
pixmap = QtGui.QPixmap(util.curr_directory() + '/images/sticker.png')
icon = QtGui.QIcon(pixmap)
self.stickerButton.setIcon(icon)
@ -245,6 +256,8 @@ class DropdownMenu(QtWidgets.QWidget):
self.screenshotButton.setToolTip(QtWidgets.QApplication.translate("MenuWindow", "Send screenshot"))
self.fileTransferButton.setToolTip(QtWidgets.QApplication.translate("MenuWindow", "Send file"))
self.audioMessageButton.setToolTip(QtWidgets.QApplication.translate("MenuWindow", "Send audio message"))
self.videoMessageButton.setToolTip(QtWidgets.QApplication.translate("MenuWindow", "Send video message"))
self.smileyButton.setToolTip(QtWidgets.QApplication.translate("MenuWindow", "Add smiley"))
self.stickerButton.setToolTip(QtWidgets.QApplication.translate("MenuWindow", "Send sticker"))
@ -257,7 +270,7 @@ class DropdownMenu(QtWidgets.QWidget):
def leaveEvent(self, event):
self.close()
def eventFilter(self, obj, event):
def eventFilter(self, object, event):
if event.type() == QtCore.QEvent.WindowDeactivate:
self.close()
return False

View File

@ -247,12 +247,12 @@ class ProfileSettings(CenteredWidget):
def set_avatar(self):
choose = QtWidgets.QApplication.translate("ProfileSettingsForm", "Choose avatar")
name = QtWidgets.QFileDialog.getOpenFileName(self, choose, None, 'Images (*.png)',
QtGui.QComboBoxQtWidgets.QFileDialog.DontUseNativeDialog)
name = QtWidgets.QFileDialog.getOpenFileName(self, choose, None, None, 'Images (*.png)',
QtWidgets.QFileDialog.DontUseNativeDialog)
if name[0]:
bitmap = QtGui.QPixmap(name[0])
bitmap.scaled(QtCore.QSize(128, 128), aspectMode=QtCore.Qt.KeepAspectRatio,
mode=QtCore.Qt.SmoothTransformation)
bitmap.scaled(128, 128, QtCore.Qt.KeepAspectRatio,
QtCore.Qt.SmoothTransformation)
byte_array = QtCore.QByteArray()
buffer = QtCore.QBuffer(byte_array)
@ -261,8 +261,9 @@ class ProfileSettings(CenteredWidget):
Profile.get_instance().set_avatar(bytes(byte_array.data()))
def export_profile(self):
directory = QtWidgets.QFileDialog.getExistingDirectory(options=QtWidgets.QFileDialog.DontUseNativeDialog,
dir=curr_directory()) + '/'
directory = QtWidgets.QFileDialog.getExistingDirectory(self, '', curr_directory() + '/',
QtWidgets.QFileDialog.ShowDirsOnly | QtWidgets.QFileDialog.DontUseNativeDialog)
if directory != '/':
reply = QtWidgets.QMessageBox.question(None,
QtWidgets.QApplication.translate("ProfileSettingsForm",
@ -493,7 +494,8 @@ class PrivacySettings(CenteredWidget):
settings.save()
def new_path(self):
directory = QtWidgets.QFileDialog.getExistingDirectory(options=QtWidgets.QFileDialog.DontUseNativeDialog) + '/'
directory = QtWidgets.QFileDialog.getExistingDirectory(self, '', curr_directory() + '/',
QtWidgets.QFileDialog.ShowDirsOnly | QtWidgets.QFileDialog.DontUseNativeDialog) + '/'
if directory != '/':
self.path.setPlainText(directory)
@ -795,83 +797,6 @@ class AudioSettings(CenteredWidget):
settings.save()
class VideoSettings(CenteredWidget):
"""
Audio calls settings form
"""
def __init__(self):
super().__init__()
self.initUI()
self.retranslateUi()
self.center()
def initUI(self):
self.setObjectName("videoSettingsForm")
self.resize(400, 120)
self.setMinimumSize(QtCore.QSize(400, 120))
self.setMaximumSize(QtCore.QSize(400, 120))
self.in_label = QtWidgets.QLabel(self)
self.in_label.setGeometry(QtCore.QRect(25, 5, 350, 20))
settings = Settings.get_instance()
font = QtGui.QFont()
font.setPointSize(16)
font.setBold(True)
font.setFamily(settings['font'])
self.in_label.setFont(font)
self.video_size = QtWidgets.QComboBox(self)
self.video_size.setGeometry(QtCore.QRect(25, 70, 350, 30))
self.input = QtWidgets.QComboBox(self)
self.input.setGeometry(QtCore.QRect(25, 30, 350, 30))
self.input.currentIndexChanged.connect(self.selectionChanged)
import cv2
self.devices = []
self.frame_max_sizes = []
for i in range(10):
v = cv2.VideoCapture(i)
if v.isOpened():
v.set(cv2.CAP_PROP_FRAME_WIDTH, 10000)
v.set(cv2.CAP_PROP_FRAME_HEIGHT, 10000)
width = int(v.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(v.get(cv2.CAP_PROP_FRAME_HEIGHT))
del v
self.devices.append(i)
self.frame_max_sizes.append((width, height))
self.input.addItem('Device #' + str(i))
index = self.devices.index(settings.video['device'])
if index + 1:
self.input.setCurrentIndex(index)
def retranslateUi(self):
self.setWindowTitle(QtWidgets.QApplication.translate("videoSettingsForm", "Video settings"))
self.in_label.setText(QtWidgets.QApplication.translate("videoSettingsForm", "Device:"))
def closeEvent(self, event):
settings = Settings.get_instance()
settings.video['device'] = self.devices[self.input.currentIndex()]
text = self.video_size.currentText()
settings.video['width'] = int(text.split(' ')[0])
settings.video['height'] = int(text.split(' ')[-1])
settings.save()
def selectionChanged(self):
width, height = self.frame_max_sizes[self.input.currentIndex()]
self.video_size.clear()
dims = [
(320, 240),
(640, 360),
(640, 480),
(720, 480),
(1280, 720),
(1920, 1080),
(2560, 1440)
]
for w, h in dims:
if w <= width and h <= height:
self.video_size.addItem(str(w) + ' * ' + str(h))
class PluginsSettings(CenteredWidget):
"""
Plugins settings form

View File

@ -14,8 +14,6 @@ import avwidgets
import plugin_support
import basecontact
import items_factory
import cv2
import threading
class Profile(basecontact.BaseContact, Singleton):
@ -38,7 +36,6 @@ class Profile(basecontact.BaseContact, Singleton):
self._tox = tox
self._file_transfers = {} # dict of file transfers. key - tuple (friend_number, file_number)
self._call = calls.AV(tox.AV) # object with data about calls
self._call_widgets = {} # dict of incoming call widgets
self._incoming_calls = set()
self._load_history = True
self._waiting_for_reconnection = False
@ -1229,9 +1226,10 @@ class Profile(basecontact.BaseContact, Singleton):
self._messages.scrollToBottom()
else:
friend.actions = True
self._call_widgets[friend_number] = avwidgets.IncomingCallWidget(friend_number, text, friend.name)
self._call_widgets[friend_number].set_pixmap(friend.get_pixmap())
self._call_widgets[friend_number].show()
# TODO: dict of widgets
self._call_widget = avwidgets.IncomingCallWidget(friend_number, text, friend.name)
self._call_widget.set_pixmap(friend.get_pixmap())
self._call_widget.show()
def accept_call(self, friend_number, audio, video):
"""
@ -1241,7 +1239,8 @@ class Profile(basecontact.BaseContact, Singleton):
self._screen.active_call()
if friend_number in self._incoming_calls:
self._incoming_calls.remove(friend_number)
del self._call_widgets[friend_number]
if hasattr(self, '_call_widget'):
del self._call_widget
def stop_call(self, friend_number, by_friend):
"""
@ -1255,9 +1254,8 @@ class Profile(basecontact.BaseContact, Singleton):
self._screen.call_finished()
self._call.finish_call(friend_number, by_friend) # finish or decline call
if hasattr(self, '_call_widget'):
self._call_widget[friend_number].close()
del self._call_widget[friend_number]
threading.Timer(2.0, lambda: cv2.destroyWindow(str(friend_number))).start()
self._call_widget.close()
del self._call_widget
friend = self.get_friend_by_number(friend_number)
friend.append_message(InfoMessage(text, time.time()))
if friend_number == self.get_active_number():

View File

@ -47,7 +47,6 @@ class Settings(dict, Singleton):
self.audio = {'input': p.get_default_input_device_info()['index'] if input_devices else -1,
'output': p.get_default_output_device_info()['index'] if output_devices else -1,
'enabled': input_devices and output_devices}
self.video = {'device': 0, 'width': 640, 'height': 480}
@staticmethod
def get_auto_profile():

View File

@ -30,8 +30,7 @@ def tox_dns(email):
netman.setProxy(proxy)
for url in urls:
try:
request = QtNetwork.QNetworkRequest()
request.setUrl(QtCore.QUrl(url))
request = QtNetwork.QNetworkRequest(url)
request.setHeader(QtNetwork.QNetworkRequest.ContentTypeHeader, "application/json")
reply = netman.post(request, bytes(json.dumps(data), 'utf-8'))

View File

@ -4,7 +4,7 @@ import shutil
import sys
import re
program_version = '0.3.0'
program_version = '0.2.8'
def log(data):