Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Audio source code query #6729

Open
Anindya-uy opened this issue Feb 19, 2025 · 2 comments
Open

Audio source code query #6729

Anindya-uy opened this issue Feb 19, 2025 · 2 comments
Labels
stale-support This issue hasn't seen any activity in recent time and will probably be closed soon. support

Comments

@Anindya-uy
Copy link

The issue

I wan to know Where in the server code audio is being routed to the client

Mumble version

1.4.0

Mumble component

Server

OS

Linux

Additional information

No response

@davidebeatrici
Copy link
Member

mumble/src/murmur/Server.cpp

Lines 1145 to 1342 in 5b8c175

void Server::processMsg(ServerUser *u, Mumble::Protocol::AudioData audioData, AudioReceiverBuffer &buffer,
Mumble::Protocol::UDPAudioEncoder< Mumble::Protocol::Role::Server > &encoder) {
ZoneScoped;
// Note that in this function we never have to acquire a read-lock on qrwlVoiceThread
// as all places that call this function will hold that lock at the point of calling
// this function.
// This function is currently called from Server::msgUDPTunnel, Server::run and
// Server::message
if (u->sState != ServerUser::Authenticated || u->bMute || u->bSuppress || u->bSelfMute)
return;
// Check the voice data rate limit.
{
BandwidthRecord *bw = &u->bwr;
// IP + UDP + Crypt + Data
const std::size_t packetsize = 20 + 8 + 4 + audioData.payload.size();
if (!bw->addFrame(static_cast< int >(packetsize), iMaxBandwidth / 8)) {
// Suppress packet.
return;
}
}
buffer.clear();
if (audioData.targetOrContext == Mumble::Protocol::ReservedTargetIDs::SERVER_LOOPBACK) {
buffer.forceAddReceiver(*u, Mumble::Protocol::AudioContext::NORMAL, audioData.containsPositionalData);
} else if (audioData.targetOrContext == Mumble::Protocol::ReservedTargetIDs::REGULAR_SPEECH) {
Channel *c = u->cChannel;
// Send audio to all users that are listening to the channel
foreach (unsigned int currentSession, m_channelListenerManager.getListenersForChannel(c->iId)) {
ServerUser *pDst = static_cast< ServerUser * >(qhUsers.value(currentSession));
if (pDst) {
buffer.addReceiver(*u, *pDst, Mumble::Protocol::AudioContext::LISTEN, audioData.containsPositionalData,
m_channelListenerManager.getListenerVolumeAdjustment(pDst->uiSession, c->iId));
}
}
// Send audio to all users in the same channel
for (User *p : c->qlUsers) {
ServerUser *pDst = static_cast< ServerUser * >(p);
buffer.addReceiver(*u, *pDst, Mumble::Protocol::AudioContext::NORMAL, audioData.containsPositionalData);
}
// Send audio to all linked channels the user has speak-permission
if (!c->qhLinks.isEmpty()) {
QSet< Channel * > chans = c->allLinks();
chans.remove(c);
QMutexLocker qml(&qmCache);
for (Channel *l : chans) {
if (ChanACL::hasPermission(u, l, ChanACL::Speak, &acCache)) {
// Send the audio stream to all users that are listening to the linked channel
for (unsigned int currentSession : m_channelListenerManager.getListenersForChannel(l->iId)) {
ServerUser *pDst = static_cast< ServerUser * >(qhUsers.value(currentSession));
if (pDst) {
buffer.addReceiver(
*u, *pDst, Mumble::Protocol::AudioContext::LISTEN, audioData.containsPositionalData,
m_channelListenerManager.getListenerVolumeAdjustment(pDst->uiSession, l->iId));
}
}
// Send audio to users in the linked channel
for (User *p : l->qlUsers) {
ServerUser *pDst = static_cast< ServerUser * >(p);
buffer.addReceiver(*u, *pDst, Mumble::Protocol::AudioContext::NORMAL,
audioData.containsPositionalData);
}
}
}
}
} else if (u->qmTargets.contains(static_cast< int >(audioData.targetOrContext))) { // Whisper/Shout
QSet< ServerUser * > channel;
QSet< ServerUser * > direct;
QHash< ServerUser *, VolumeAdjustment > cachedListeners;
if (u->qmTargetCache.contains(static_cast< int >(audioData.targetOrContext))) {
ZoneScopedN(TracyConstants::AUDIO_WHISPER_CACHE_STORE);
const WhisperTargetCache &cache = u->qmTargetCache.value(static_cast< int >(audioData.targetOrContext));
channel = cache.channelTargets;
direct = cache.directTargets;
cachedListeners = cache.listeningTargets;
} else {
ZoneScopedN(TracyConstants::AUDIO_WHISPER_CACHE_CREATE);
const unsigned int uiSession = u->uiSession;
qrwlVoiceThread.unlock();
qrwlVoiceThread.lockForWrite();
if (!qhUsers.contains(uiSession)) {
return;
}
// Create cache entry for the given target
// Note: We have to compute the cache entry and add it to the user's cache store in an atomic
// transaction (ensured by the lock) to avoid running into situations in which a user from the cache
// gets deleted without this particular cache entry being purged (which happens, if the cache entry is
// in the store at the point of deleting the user).
const WhisperTarget &wt = u->qmTargets.value(static_cast< int >(audioData.targetOrContext));
WhisperTargetCache cache = createWhisperTargetCacheFor(*u, wt);
u->qmTargetCache.insert(static_cast< int >(audioData.targetOrContext), std::move(cache));
qrwlVoiceThread.unlock();
qrwlVoiceThread.lockForRead();
if (!qhUsers.contains(uiSession))
return;
}
// These users receive the audio because someone is shouting to their channel
for (ServerUser *pDst : channel) {
buffer.addReceiver(*u, *pDst, Mumble::Protocol::AudioContext::SHOUT, audioData.containsPositionalData);
}
// These users receive audio because someone is whispering to them
for (ServerUser *pDst : direct) {
buffer.addReceiver(*u, *pDst, Mumble::Protocol::AudioContext::WHISPER, audioData.containsPositionalData);
}
// These users receive audio because someone is sending audio to one of their listeners
QHashIterator< ServerUser *, VolumeAdjustment > it(cachedListeners);
while (it.hasNext()) {
it.next();
ServerUser *user = it.key();
const VolumeAdjustment &volumeAdjustment = it.value();
buffer.addReceiver(*u, *user, Mumble::Protocol::AudioContext::LISTEN, audioData.containsPositionalData,
volumeAdjustment);
}
}
ZoneNamedN(__tracy_scoped_zone2, TracyConstants::AUDIO_SENDOUT_ZONE, true);
buffer.preprocessBuffer();
bool isFirstIteration = true;
QByteArray tcpCache;
for (bool includePositionalData : { true, false }) {
std::vector< AudioReceiver > &receiverList = buffer.getReceivers(includePositionalData);
audioData.containsPositionalData = includePositionalData && audioData.containsPositionalData;
if (!audioData.containsPositionalData) {
encoder.dropPositionalData();
}
// Note: The receiver-ranges are determined in such a way, that they are all going to receive the exact
// same audio packet.
ReceiverRange< std::vector< AudioReceiver >::iterator > currentRange =
AudioReceiverBuffer::getReceiverRange(receiverList.begin(), receiverList.end());
while (currentRange.begin != currentRange.end) {
// Setup encoder for this range
if (isFirstIteration
|| !Mumble::Protocol::protocolVersionsAreCompatible(encoder.getProtocolVersion(),
currentRange.begin->getReceiver().m_version)) {
ZoneScopedN(TracyConstants::AUDIO_ENCODE);
encoder.setProtocolVersion(currentRange.begin->getReceiver().m_version);
// We have to re-encode the "fixed" part of the audio message
encoder.prepareAudioPacket(audioData);
if (audioData.containsPositionalData) {
encoder.addPositionalData(audioData);
}
isFirstIteration = false;
}
audioData.targetOrContext = currentRange.begin->getContext();
audioData.volumeAdjustment = currentRange.begin->getVolumeAdjustment();
// Update data
TracyCZoneN(__tracy_zone, TracyConstants::AUDIO_UPDATE, true);
gsl::span< const Mumble::Protocol::byte > encodedPacket = encoder.updateAudioPacket(audioData);
TracyCZoneEnd(__tracy_zone);
// Clear TCP cache
tcpCache.clear();
// Send encoded packet to all receivers of this range
for (auto it = currentRange.begin; it != currentRange.end; ++it) {
sendMessage(it->getReceiver(), encodedPacket.data(), static_cast< int >(encodedPacket.size()),
tcpCache);
}
// Find next range
currentRange = AudioReceiverBuffer::getReceiverRange(currentRange.end, receiverList.end());
}
}
}

Copy link

As there has been no activity on this issue for a couple of days, we assume that your issue has been fixed in the meantime.
Should this not be the case, please let us know.

If no further activity happens, this issue will be closed within 3 days.

@github-actions github-actions bot added the stale-support This issue hasn't seen any activity in recent time and will probably be closed soon. label Feb 24, 2025
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
stale-support This issue hasn't seen any activity in recent time and will probably be closed soon. support
Projects
None yet
Development

No branches or pull requests

2 participants