Add audio input device selector
This commit is contained in:
parent
02dfc8039f
commit
df65093374
@ -264,6 +264,7 @@ CallManager::handleEvent(const RoomEvent<CallInvite> &callInviteEvent)
|
||||
caller.display_name,
|
||||
QString::fromStdString(roomInfo.name),
|
||||
QString::fromStdString(roomInfo.avatar_url),
|
||||
settings_,
|
||||
MainWindow::instance());
|
||||
connect(dialog, &dialogs::AcceptCall::accept, this, [this, callInviteEvent]() {
|
||||
MainWindow::instance()->hideOverlay();
|
||||
|
@ -474,6 +474,7 @@ ChatPage::ChatPage(QSharedPointer<UserSettings> userSettings, QWidget *parent)
|
||||
callee.display_name,
|
||||
QString::fromStdString(roomInfo.name),
|
||||
QString::fromStdString(roomInfo.avatar_url),
|
||||
userSettings_,
|
||||
MainWindow::instance());
|
||||
connect(dialog, &dialogs::PlaceCall::voice, this, [this]() {
|
||||
callManager_.sendInvite(current_room_);
|
||||
|
@ -77,7 +77,8 @@ UserSettings::load()
|
||||
presence_ =
|
||||
settings.value("user/presence", QVariant::fromValue(Presence::AutomaticPresence))
|
||||
.value<Presence>();
|
||||
useStunServer_ = settings.value("user/use_stun_server", false).toBool();
|
||||
useStunServer_ = settings.value("user/use_stun_server", false).toBool();
|
||||
defaultAudioSource_ = settings.value("user/default_audio_source", QString()).toString();
|
||||
|
||||
applyTheme();
|
||||
}
|
||||
@ -290,6 +291,16 @@ UserSettings::setUseStunServer(bool useStunServer)
|
||||
save();
|
||||
}
|
||||
|
||||
void
|
||||
UserSettings::setDefaultAudioSource(const QString &defaultAudioSource)
|
||||
{
|
||||
if (defaultAudioSource == defaultAudioSource_)
|
||||
return;
|
||||
defaultAudioSource_ = defaultAudioSource;
|
||||
emit defaultAudioSourceChanged(defaultAudioSource);
|
||||
save();
|
||||
}
|
||||
|
||||
void
|
||||
UserSettings::applyTheme()
|
||||
{
|
||||
@ -376,6 +387,7 @@ UserSettings::save()
|
||||
settings.setValue("emoji_font_family", emojiFont_);
|
||||
settings.setValue("presence", QVariant::fromValue(presence_));
|
||||
settings.setValue("use_stun_server", useStunServer_);
|
||||
settings.setValue("default_audio_source", defaultAudioSource_);
|
||||
|
||||
settings.endGroup();
|
||||
|
||||
@ -501,6 +513,9 @@ UserSettingsPage::UserSettingsPage(QSharedPointer<UserSettings> settings, QWidge
|
||||
callsLabel->setFont(font);
|
||||
useStunServer_ = new Toggle{this};
|
||||
|
||||
defaultAudioSourceValue_ = new QLabel(this);
|
||||
defaultAudioSourceValue_->setFont(font);
|
||||
|
||||
auto encryptionLabel_ = new QLabel{tr("ENCRYPTION"), this};
|
||||
encryptionLabel_->setFixedHeight(encryptionLabel_->minimumHeight() + LayoutTopMargin);
|
||||
encryptionLabel_->setAlignment(Qt::AlignBottom);
|
||||
@ -634,9 +649,10 @@ UserSettingsPage::UserSettingsPage(QSharedPointer<UserSettings> settings, QWidge
|
||||
|
||||
formLayout_->addRow(callsLabel);
|
||||
formLayout_->addRow(new HorizontalLine{this});
|
||||
boxWrap(tr("Allow Fallback Call Assist Server"),
|
||||
boxWrap(tr("Allow fallback call assist server"),
|
||||
useStunServer_,
|
||||
tr("Will use turn.matrix.org as assist when your home server does not offer one."));
|
||||
boxWrap(tr("Default audio source device"), defaultAudioSourceValue_);
|
||||
|
||||
formLayout_->addRow(encryptionLabel_);
|
||||
formLayout_->addRow(new HorizontalLine{this});
|
||||
@ -797,6 +813,7 @@ UserSettingsPage::showEvent(QShowEvent *)
|
||||
deviceIdValue_->setText(QString::fromStdString(http::client()->device_id()));
|
||||
timelineMaxWidthSpin_->setValue(settings_->timelineMaxWidth());
|
||||
useStunServer_->setState(!settings_->useStunServer());
|
||||
defaultAudioSourceValue_->setText(settings_->defaultAudioSource());
|
||||
|
||||
deviceFingerprintValue_->setText(
|
||||
utils::humanReadableFingerprint(olm::client()->identity_keys().ed25519));
|
||||
|
@ -73,6 +73,8 @@ class UserSettings : public QObject
|
||||
Q_PROPERTY(Presence presence READ presence WRITE setPresence NOTIFY presenceChanged)
|
||||
Q_PROPERTY(
|
||||
bool useStunServer READ useStunServer WRITE setUseStunServer NOTIFY useStunServerChanged)
|
||||
Q_PROPERTY(QString defaultAudioSource READ defaultAudioSource WRITE setDefaultAudioSource
|
||||
NOTIFY defaultAudioSourceChanged)
|
||||
|
||||
public:
|
||||
UserSettings();
|
||||
@ -110,6 +112,7 @@ public:
|
||||
void setDecryptSidebar(bool state);
|
||||
void setPresence(Presence state);
|
||||
void setUseStunServer(bool state);
|
||||
void setDefaultAudioSource(const QString &deviceName);
|
||||
|
||||
QString theme() const { return !theme_.isEmpty() ? theme_ : defaultTheme_; }
|
||||
bool messageHoverHighlight() const { return messageHoverHighlight_; }
|
||||
@ -136,6 +139,7 @@ public:
|
||||
QString emojiFont() const { return emojiFont_; }
|
||||
Presence presence() const { return presence_; }
|
||||
bool useStunServer() const { return useStunServer_; }
|
||||
QString defaultAudioSource() const { return defaultAudioSource_; }
|
||||
|
||||
signals:
|
||||
void groupViewStateChanged(bool state);
|
||||
@ -159,6 +163,7 @@ signals:
|
||||
void emojiFontChanged(QString state);
|
||||
void presenceChanged(Presence state);
|
||||
void useStunServerChanged(bool state);
|
||||
void defaultAudioSourceChanged(const QString &deviceName);
|
||||
|
||||
private:
|
||||
// Default to system theme if QT_QPA_PLATFORMTHEME var is set.
|
||||
@ -187,6 +192,7 @@ private:
|
||||
QString emojiFont_;
|
||||
Presence presence_;
|
||||
bool useStunServer_;
|
||||
QString defaultAudioSource_;
|
||||
};
|
||||
|
||||
class HorizontalLine : public QFrame
|
||||
@ -244,6 +250,7 @@ private:
|
||||
Toggle *decryptSidebar_;
|
||||
QLabel *deviceFingerprintValue_;
|
||||
QLabel *deviceIdValue_;
|
||||
QLabel *defaultAudioSourceValue_;
|
||||
|
||||
QComboBox *themeCombo_;
|
||||
QComboBox *scaleFactorCombo_;
|
||||
|
@ -487,23 +487,74 @@ WebRTCSession::startPipeline(int opusPayloadType)
|
||||
return true;
|
||||
}
|
||||
|
||||
#define RTP_CAPS_OPUS "application/x-rtp,media=audio,encoding-name=OPUS,payload="
|
||||
|
||||
bool
|
||||
WebRTCSession::createPipeline(int opusPayloadType)
|
||||
{
|
||||
std::string pipeline("webrtcbin bundle-policy=max-bundle name=webrtcbin "
|
||||
"autoaudiosrc ! volume name=srclevel ! audioconvert ! "
|
||||
"audioresample ! queue ! opusenc ! rtpopuspay ! "
|
||||
"queue ! " RTP_CAPS_OPUS +
|
||||
std::to_string(opusPayloadType) + " ! webrtcbin.");
|
||||
int nSources = audioSources_ ? g_list_length(audioSources_) : 0;
|
||||
if (nSources == 0) {
|
||||
nhlog::ui()->error("WebRTC: no audio sources");
|
||||
return false;
|
||||
}
|
||||
|
||||
webrtc_ = nullptr;
|
||||
GError *error = nullptr;
|
||||
pipe_ = gst_parse_launch(pipeline.c_str(), &error);
|
||||
if (error) {
|
||||
nhlog::ui()->error("WebRTC: failed to parse pipeline: {}", error->message);
|
||||
g_error_free(error);
|
||||
if (audioSourceIndex_ < 0 || audioSourceIndex_ >= nSources) {
|
||||
nhlog::ui()->error("WebRTC: invalid audio source index");
|
||||
return false;
|
||||
}
|
||||
|
||||
GstElement *source = gst_device_create_element(
|
||||
GST_DEVICE_CAST(g_list_nth_data(audioSources_, audioSourceIndex_)), nullptr);
|
||||
GstElement *volume = gst_element_factory_make("volume", "srclevel");
|
||||
GstElement *convert = gst_element_factory_make("audioconvert", nullptr);
|
||||
GstElement *resample = gst_element_factory_make("audioresample", nullptr);
|
||||
GstElement *queue1 = gst_element_factory_make("queue", nullptr);
|
||||
GstElement *opusenc = gst_element_factory_make("opusenc", nullptr);
|
||||
GstElement *rtp = gst_element_factory_make("rtpopuspay", nullptr);
|
||||
GstElement *queue2 = gst_element_factory_make("queue", nullptr);
|
||||
GstElement *capsfilter = gst_element_factory_make("capsfilter", nullptr);
|
||||
|
||||
GstCaps *rtpcaps = gst_caps_new_simple("application/x-rtp",
|
||||
"media",
|
||||
G_TYPE_STRING,
|
||||
"audio",
|
||||
"encoding-name",
|
||||
G_TYPE_STRING,
|
||||
"OPUS",
|
||||
"payload",
|
||||
G_TYPE_INT,
|
||||
opusPayloadType,
|
||||
nullptr);
|
||||
g_object_set(capsfilter, "caps", rtpcaps, nullptr);
|
||||
gst_caps_unref(rtpcaps);
|
||||
|
||||
GstElement *webrtcbin = gst_element_factory_make("webrtcbin", "webrtcbin");
|
||||
g_object_set(webrtcbin, "bundle-policy", GST_WEBRTC_BUNDLE_POLICY_MAX_BUNDLE, nullptr);
|
||||
|
||||
pipe_ = gst_pipeline_new(nullptr);
|
||||
gst_bin_add_many(GST_BIN(pipe_),
|
||||
source,
|
||||
volume,
|
||||
convert,
|
||||
resample,
|
||||
queue1,
|
||||
opusenc,
|
||||
rtp,
|
||||
queue2,
|
||||
capsfilter,
|
||||
webrtcbin,
|
||||
nullptr);
|
||||
|
||||
if (!gst_element_link_many(source,
|
||||
volume,
|
||||
convert,
|
||||
resample,
|
||||
queue1,
|
||||
opusenc,
|
||||
rtp,
|
||||
queue2,
|
||||
capsfilter,
|
||||
webrtcbin,
|
||||
nullptr)) {
|
||||
nhlog::ui()->error("WebRTC: failed to link pipeline elements");
|
||||
end();
|
||||
return false;
|
||||
}
|
||||
@ -541,3 +592,42 @@ WebRTCSession::end()
|
||||
if (state_ != State::DISCONNECTED)
|
||||
emit stateChanged(State::DISCONNECTED);
|
||||
}
|
||||
|
||||
void
|
||||
WebRTCSession::refreshDevices()
|
||||
{
|
||||
if (!initialised_)
|
||||
return;
|
||||
|
||||
static GstDeviceMonitor *monitor = nullptr;
|
||||
if (!monitor) {
|
||||
monitor = gst_device_monitor_new();
|
||||
GstCaps *caps = gst_caps_new_empty_simple("audio/x-raw");
|
||||
gst_device_monitor_add_filter(monitor, "Audio/Source", caps);
|
||||
gst_caps_unref(caps);
|
||||
}
|
||||
g_list_free_full(audioSources_, g_object_unref);
|
||||
audioSources_ = gst_device_monitor_get_devices(monitor);
|
||||
}
|
||||
|
||||
std::vector<std::string>
|
||||
WebRTCSession::getAudioSourceNames(const std::string &defaultDevice)
|
||||
{
|
||||
if (!initialised_)
|
||||
return {};
|
||||
|
||||
refreshDevices();
|
||||
std::vector<std::string> ret;
|
||||
ret.reserve(g_list_length(audioSources_));
|
||||
for (GList *l = audioSources_; l != nullptr; l = l->next) {
|
||||
gchar *name = gst_device_get_display_name(GST_DEVICE_CAST(l->data));
|
||||
ret.emplace_back(name);
|
||||
g_free(name);
|
||||
if (ret.back() == defaultDevice) {
|
||||
// move default device to top of the list
|
||||
std::swap(audioSources_->data, l->data);
|
||||
std::swap(ret.front(), ret.back());
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
@ -7,6 +7,7 @@
|
||||
|
||||
#include "mtx/events/voip.hpp"
|
||||
|
||||
typedef struct _GList GList;
|
||||
typedef struct _GstElement GstElement;
|
||||
|
||||
class WebRTCSession : public QObject
|
||||
@ -46,6 +47,9 @@ public:
|
||||
void setStunServer(const std::string &stunServer) { stunServer_ = stunServer; }
|
||||
void setTurnServers(const std::vector<std::string> &uris) { turnServers_ = uris; }
|
||||
|
||||
std::vector<std::string> getAudioSourceNames(const std::string &defaultDevice);
|
||||
void setAudioSource(int audioDeviceIndex) { audioSourceIndex_ = audioDeviceIndex; }
|
||||
|
||||
signals:
|
||||
void offerCreated(const std::string &sdp,
|
||||
const std::vector<mtx::events::msg::CallCandidates::Candidate> &);
|
||||
@ -66,9 +70,12 @@ private:
|
||||
GstElement *webrtc_ = nullptr;
|
||||
std::string stunServer_;
|
||||
std::vector<std::string> turnServers_;
|
||||
GList *audioSources_ = nullptr;
|
||||
int audioSourceIndex_ = -1;
|
||||
|
||||
bool startPipeline(int opusPayloadType);
|
||||
bool createPipeline(int opusPayloadType);
|
||||
void refreshDevices();
|
||||
|
||||
public:
|
||||
WebRTCSession(WebRTCSession const &) = delete;
|
||||
|
@ -1,11 +1,14 @@
|
||||
#include <QComboBox>
|
||||
#include <QLabel>
|
||||
#include <QPixmap>
|
||||
#include <QPushButton>
|
||||
#include <QString>
|
||||
#include <QVBoxLayout>
|
||||
|
||||
#include "ChatPage.h"
|
||||
#include "Config.h"
|
||||
#include "UserSettingsPage.h"
|
||||
#include "Utils.h"
|
||||
#include "WebRTCSession.h"
|
||||
#include "dialogs/AcceptCall.h"
|
||||
#include "ui/Avatar.h"
|
||||
|
||||
@ -15,9 +18,25 @@ AcceptCall::AcceptCall(const QString &caller,
|
||||
const QString &displayName,
|
||||
const QString &roomName,
|
||||
const QString &avatarUrl,
|
||||
QSharedPointer<UserSettings> settings,
|
||||
QWidget *parent)
|
||||
: QWidget(parent)
|
||||
{
|
||||
std::string errorMessage;
|
||||
if (!WebRTCSession::instance().init(&errorMessage)) {
|
||||
emit ChatPage::instance()->showNotification(QString::fromStdString(errorMessage));
|
||||
emit close();
|
||||
return;
|
||||
}
|
||||
audioDevices_ = WebRTCSession::instance().getAudioSourceNames(
|
||||
settings->defaultAudioSource().toStdString());
|
||||
if (audioDevices_.empty()) {
|
||||
emit ChatPage::instance()->showNotification(
|
||||
"Incoming call: No audio sources found.");
|
||||
emit close();
|
||||
return;
|
||||
}
|
||||
|
||||
setAutoFillBackground(true);
|
||||
setWindowFlags(Qt::Tool | Qt::WindowStaysOnTopHint);
|
||||
setWindowModality(Qt::WindowModal);
|
||||
@ -55,7 +74,7 @@ AcceptCall::AcceptCall(const QString &caller,
|
||||
else
|
||||
avatar->setLetter(utils::firstChar(roomName));
|
||||
|
||||
const int iconSize = 24;
|
||||
const int iconSize = 22;
|
||||
QLabel *callTypeIndicator = new QLabel(this);
|
||||
callTypeIndicator->setPixmap(
|
||||
QIcon(":/icons/icons/ui/place-call.png").pixmap(QSize(iconSize * 2, iconSize * 2)));
|
||||
@ -66,7 +85,7 @@ AcceptCall::AcceptCall(const QString &caller,
|
||||
callTypeLabel->setAlignment(Qt::AlignCenter);
|
||||
|
||||
auto buttonLayout = new QHBoxLayout;
|
||||
buttonLayout->setSpacing(20);
|
||||
buttonLayout->setSpacing(18);
|
||||
acceptBtn_ = new QPushButton(tr("Accept"), this);
|
||||
acceptBtn_->setDefault(true);
|
||||
acceptBtn_->setIcon(QIcon(":/icons/icons/ui/place-call.png"));
|
||||
@ -78,6 +97,19 @@ AcceptCall::AcceptCall(const QString &caller,
|
||||
buttonLayout->addWidget(acceptBtn_);
|
||||
buttonLayout->addWidget(rejectBtn_);
|
||||
|
||||
auto deviceLayout = new QHBoxLayout;
|
||||
auto audioLabel = new QLabel(this);
|
||||
audioLabel->setPixmap(
|
||||
QIcon(":/icons/icons/ui/microphone-unmute.png").pixmap(QSize(iconSize, iconSize)));
|
||||
|
||||
auto deviceList = new QComboBox(this);
|
||||
for (const auto &d : audioDevices_)
|
||||
deviceList->addItem(QString::fromStdString(d));
|
||||
|
||||
deviceLayout->addStretch();
|
||||
deviceLayout->addWidget(audioLabel);
|
||||
deviceLayout->addWidget(deviceList);
|
||||
|
||||
if (displayNameLabel)
|
||||
layout->addWidget(displayNameLabel, 0, Qt::AlignCenter);
|
||||
layout->addWidget(callerLabel, 0, Qt::AlignCenter);
|
||||
@ -85,8 +117,12 @@ AcceptCall::AcceptCall(const QString &caller,
|
||||
layout->addWidget(callTypeIndicator, 0, Qt::AlignCenter);
|
||||
layout->addWidget(callTypeLabel, 0, Qt::AlignCenter);
|
||||
layout->addLayout(buttonLayout);
|
||||
layout->addLayout(deviceLayout);
|
||||
|
||||
connect(acceptBtn_, &QPushButton::clicked, this, [this]() {
|
||||
connect(acceptBtn_, &QPushButton::clicked, this, [this, deviceList, settings]() {
|
||||
WebRTCSession::instance().setAudioSource(deviceList->currentIndex());
|
||||
settings->setDefaultAudioSource(
|
||||
QString::fromStdString(audioDevices_[deviceList->currentIndex()]));
|
||||
emit accept();
|
||||
emit close();
|
||||
});
|
||||
|
@ -1,9 +1,14 @@
|
||||
#pragma once
|
||||
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
#include <QSharedPointer>
|
||||
#include <QWidget>
|
||||
|
||||
class QPushButton;
|
||||
class QString;
|
||||
class UserSettings;
|
||||
|
||||
namespace dialogs {
|
||||
|
||||
@ -16,6 +21,7 @@ public:
|
||||
const QString &displayName,
|
||||
const QString &roomName,
|
||||
const QString &avatarUrl,
|
||||
QSharedPointer<UserSettings> settings,
|
||||
QWidget *parent = nullptr);
|
||||
|
||||
signals:
|
||||
@ -25,6 +31,7 @@ signals:
|
||||
private:
|
||||
QPushButton *acceptBtn_;
|
||||
QPushButton *rejectBtn_;
|
||||
std::vector<std::string> audioDevices_;
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -1,10 +1,14 @@
|
||||
#include <QComboBox>
|
||||
#include <QLabel>
|
||||
#include <QPushButton>
|
||||
#include <QString>
|
||||
#include <QVBoxLayout>
|
||||
|
||||
#include "ChatPage.h"
|
||||
#include "Config.h"
|
||||
#include "UserSettingsPage.h"
|
||||
#include "Utils.h"
|
||||
#include "WebRTCSession.h"
|
||||
#include "dialogs/PlaceCall.h"
|
||||
#include "ui/Avatar.h"
|
||||
|
||||
@ -14,9 +18,24 @@ PlaceCall::PlaceCall(const QString &callee,
|
||||
const QString &displayName,
|
||||
const QString &roomName,
|
||||
const QString &avatarUrl,
|
||||
QSharedPointer<UserSettings> settings,
|
||||
QWidget *parent)
|
||||
: QWidget(parent)
|
||||
{
|
||||
std::string errorMessage;
|
||||
if (!WebRTCSession::instance().init(&errorMessage)) {
|
||||
emit ChatPage::instance()->showNotification(QString::fromStdString(errorMessage));
|
||||
emit close();
|
||||
return;
|
||||
}
|
||||
audioDevices_ = WebRTCSession::instance().getAudioSourceNames(
|
||||
settings->defaultAudioSource().toStdString());
|
||||
if (audioDevices_.empty()) {
|
||||
emit ChatPage::instance()->showNotification("No audio sources found.");
|
||||
emit close();
|
||||
return;
|
||||
}
|
||||
|
||||
setAutoFillBackground(true);
|
||||
setWindowFlags(Qt::Tool | Qt::WindowStaysOnTopHint);
|
||||
setWindowModality(Qt::WindowModal);
|
||||
@ -37,25 +56,42 @@ PlaceCall::PlaceCall(const QString &callee,
|
||||
avatar->setImage(avatarUrl);
|
||||
else
|
||||
avatar->setLetter(utils::firstChar(roomName));
|
||||
const int iconSize = 24;
|
||||
const int iconSize = 18;
|
||||
voiceBtn_ = new QPushButton(tr("Voice"), this);
|
||||
voiceBtn_->setIcon(QIcon(":/icons/icons/ui/place-call.png"));
|
||||
voiceBtn_->setIconSize(QSize(iconSize, iconSize));
|
||||
voiceBtn_->setDefault(true);
|
||||
cancelBtn_ = new QPushButton(tr("Cancel"), this);
|
||||
|
||||
buttonLayout->addStretch(1);
|
||||
buttonLayout->addWidget(avatar);
|
||||
buttonLayout->addStretch();
|
||||
buttonLayout->addWidget(voiceBtn_);
|
||||
buttonLayout->addWidget(cancelBtn_);
|
||||
|
||||
QString name = displayName.isEmpty() ? callee : displayName;
|
||||
QLabel *label = new QLabel("Place a call to " + name + "?", this);
|
||||
|
||||
auto deviceLayout = new QHBoxLayout;
|
||||
auto audioLabel = new QLabel(this);
|
||||
audioLabel->setPixmap(QIcon(":/icons/icons/ui/microphone-unmute.png")
|
||||
.pixmap(QSize(iconSize * 1.2, iconSize * 1.2)));
|
||||
|
||||
auto deviceList = new QComboBox(this);
|
||||
for (const auto &d : audioDevices_)
|
||||
deviceList->addItem(QString::fromStdString(d));
|
||||
|
||||
deviceLayout->addStretch();
|
||||
deviceLayout->addWidget(audioLabel);
|
||||
deviceLayout->addWidget(deviceList);
|
||||
|
||||
layout->addWidget(label);
|
||||
layout->addLayout(buttonLayout);
|
||||
layout->addLayout(deviceLayout);
|
||||
|
||||
connect(voiceBtn_, &QPushButton::clicked, this, [this]() {
|
||||
connect(voiceBtn_, &QPushButton::clicked, this, [this, deviceList, settings]() {
|
||||
WebRTCSession::instance().setAudioSource(deviceList->currentIndex());
|
||||
settings->setDefaultAudioSource(
|
||||
QString::fromStdString(audioDevices_[deviceList->currentIndex()]));
|
||||
emit voice();
|
||||
emit close();
|
||||
});
|
||||
|
@ -1,9 +1,14 @@
|
||||
#pragma once
|
||||
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
#include <QSharedPointer>
|
||||
#include <QWidget>
|
||||
|
||||
class QPushButton;
|
||||
class QString;
|
||||
class UserSettings;
|
||||
|
||||
namespace dialogs {
|
||||
|
||||
@ -16,6 +21,7 @@ public:
|
||||
const QString &displayName,
|
||||
const QString &roomName,
|
||||
const QString &avatarUrl,
|
||||
QSharedPointer<UserSettings> settings,
|
||||
QWidget *parent = nullptr);
|
||||
|
||||
signals:
|
||||
@ -25,6 +31,7 @@ signals:
|
||||
private:
|
||||
QPushButton *voiceBtn_;
|
||||
QPushButton *cancelBtn_;
|
||||
std::vector<std::string> audioDevices_;
|
||||
};
|
||||
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user