diff --git a/include/AudioBuffer.h b/include/AudioBuffer.h new file mode 100644 index 0000000000..eeab320d10 --- /dev/null +++ b/include/AudioBuffer.h @@ -0,0 +1,421 @@ +/* + * AudioBuffer.h + * + * Copyright (c) 2026 Dalton Messmer + * + * This file is part of LMMS - https://lmms.io + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program (see COPYING); if not, write to the + * Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301 USA. + * + */ + +#ifndef LMMS_AUDIO_BUFFER_H +#define LMMS_AUDIO_BUFFER_H + +#include +#include + +#include "AudioBufferView.h" +#include "ArrayVector.h" +#include "LmmsTypes.h" +#include "lmms_constants.h" +#include "lmms_export.h" + +namespace lmms +{ + +/** + * An owning collection of audio channels for an instrument track, mixer channel, or audio processor. + * + * Features: + * - Up to `MaxChannelsPerAudioBuffer` total channels + * - Audio data in planar format (plus a temporary interleaved buffer for conversions until we use planar only) + * - All planar buffers are sourced from the same large buffer for better cache locality + * - Custom allocator support + * - Silence tracking for each channel (NOTE: requires careful use so that non-silent data is not written to a + * channel marked silent without updating that channel's silence flag afterward) + * - Methods for sanitizing, silencing, and calculating the absolute peak value of channels, and doing so more + * efficiently using the data from silence tracking + * - Can organize channels into arbitrary groups. For example, you could have 6 total channels divided into 2 groups + * where the 1st group contains 2 channels (stereo) and the 2nd contains 4 channels (quadraphonic). + * - Extensive unit testing - @ref AudioBufferTest.cpp + * + * Audio data layout explanation: + * - All planar audio data for all channels in an AudioBuffer is sourced from the same large contiguous + * buffer called the source buffer (m_sourceBuffer). + * - The source buffer consists of the buffer for 1st channel followed by the buffer for the 2nd channel, and so on + * for all channels. In total, the number of elements is `channels * frames`. + * - A separate vector of non-owning pointers to channel buffers is also maintained. In this vector, each index + * corresponds to a channel, providing a mapping from the channel index to a pointer to the start of that + * channel's buffer within the source buffer. This is called the access buffer (m_accessBuffer). + * - The purpose of the access buffer is to provide channel-wise access to buffers within the source buffer, so + * it's `m_accessBuffer[channelIdx][frameIdx]` instead of `m_sourceBuffer[channelIdx * frames + frameIdx]`. + * This is very important since many APIs dealing with planar audio expect it in this `float**` 2D array form. + * - Groups have no effect on the audio data layout in the source/access buffers and are merely a layer built on top. + * Conveniently, if you take `m_accessBuffer` and offset it by `channelIndex`, you get another `float**` + * starting at that channel. This what the `float**` buffer stored in each ChannelGroup is. + * + * Naming notes: + * - When this class is used in an instrument track or mixer channel, its channels could be referred to + * as "track channels" or "internal channels", since they are equivalent to the "track channels" used + * in other DAWs such as REAPER. + * - When this class is used in an audio processor or audio plugin, its channels could be referred to + * as "processor channels" or "plugin channels". + */ +class LMMS_EXPORT AudioBuffer +{ +public: + using ChannelFlags = std::bitset; + + //! Non-owning collection of audio channels + metadata + class ChannelGroup + { + public: + ChannelGroup() = default; + ChannelGroup(float** buffers, ch_cnt_t channels) + : m_buffers{buffers} + , m_channels{channels} + {} + + auto buffers() const -> const float* const* { return m_buffers; } + auto buffers() -> float** { return m_buffers; } + + auto buffer(ch_cnt_t channel) const -> const float* + { + assert(channel < m_channels); + return m_buffers[channel]; + } + + auto buffer(ch_cnt_t channel) -> float* + { + assert(channel < m_channels); + return m_buffers[channel]; + } + + auto channels() const -> ch_cnt_t { return m_channels; } + + void setBuffers(float** newBuffers) { m_buffers = newBuffers; } + void setChannels(ch_cnt_t channels) { m_channels = channels; } + + // TODO: Future additions: Group names, type (main/aux), speaker arrangements (for surround sound), ... + + private: + /** + * Provides access to individual channel buffers. + * [channel index][frame index] + */ + float** m_buffers = nullptr; + + //! Number of channels in `m_buffers` - currently only 2 is used + ch_cnt_t m_channels = 0; + }; + + AudioBuffer() = delete; + + AudioBuffer(const AudioBuffer&) = delete; + AudioBuffer(AudioBuffer&&) noexcept = default; + auto operator=(const AudioBuffer&) -> AudioBuffer& = delete; + auto operator=(AudioBuffer&&) noexcept -> AudioBuffer& = default; + + /** + * Creates AudioBuffer with a 1st (main) channel group. + * + * Silence tracking is enabled or disabled depending on the auto-quit setting. + * + * @param frames frame count for each channel + * @param channels channel count for the 1st group, or zero to skip adding the 1st group + * @param resource memory resource for all buffers + */ + explicit AudioBuffer(f_cnt_t frames, ch_cnt_t channels = DEFAULT_CHANNELS, + std::pmr::memory_resource* resource = std::pmr::get_default_resource()); + + /** + * Creates AudioBuffer with groups defined. + * + * Silence tracking is enabled or disabled depending on the auto-quit setting. + * + * @param frames frame count for each channel + * @param channels total channel count + * @param groups group count + * @param resource memory resource for all buffers + * @param groupVisitor see @ref setGroups + */ + template + AudioBuffer(f_cnt_t frames, ch_cnt_t channels, group_cnt_t groups, + std::pmr::memory_resource* resource, F&& groupVisitor) + : AudioBuffer{frames, channels, resource} + { + setGroups(groups, std::forward(groupVisitor)); + } + + //! The presence of the temporary interleaved buffer is opt-in. Call this to create it. + void allocateInterleavedBuffer(); + + auto hasInterleavedBuffer() const -> bool { return !m_interleavedBuffer.empty(); } + + /** + * @returns the number of bytes needed to allocate buffers with given frame and channel counts. + * Useful for preallocating a buffer for a shared memory resource. + */ + static auto allocationSize(f_cnt_t frames, ch_cnt_t channels, + bool withInterleavedBuffer = false) -> std::size_t; + + //! @returns current number of channel groups + auto groupCount() const -> group_cnt_t { return static_cast(m_groups.size()); } + + auto group(group_cnt_t index) const -> const ChannelGroup& { return m_groups[index]; } + auto group(group_cnt_t index) -> ChannelGroup& { return m_groups[index]; } + + //! @returns the buffers for all channel groups + auto allBuffers() const -> PlanarBufferView + { + return {m_accessBuffer.data(), totalChannels(), m_frames}; + } + + //! @returns the buffers for all channel groups + auto allBuffers() -> PlanarBufferView + { + return {m_accessBuffer.data(), totalChannels(), m_frames}; + } + + //! @returns the buffers of the given channel group + auto groupBuffers(group_cnt_t index) const -> PlanarBufferView + { + assert(index < groupCount()); + const ChannelGroup& g = m_groups[index]; + return {g.buffers(), g.channels(), m_frames}; + } + + //! @returns the buffers of the given channel group + auto groupBuffers(group_cnt_t index) -> PlanarBufferView + { + assert(index < groupCount()); + ChannelGroup& g = m_groups[index]; + return {g.buffers(), g.channels(), m_frames}; + } + + //! @returns the buffer for the given channel + auto buffer(ch_cnt_t channel) const -> std::span + { + return {m_accessBuffer[channel], m_frames}; + } + + //! @returns the buffer for the given channel + auto buffer(ch_cnt_t channel) -> std::span + { + return {m_accessBuffer[channel], m_frames}; + } + + //! @returns the total channel count (never exceeds MaxChannelsPerAudioBuffer) + auto totalChannels() const -> ch_cnt_t { return static_cast(m_accessBuffer.size()); } + + //! @returns the frame count for each channel buffer + auto frames() const -> f_cnt_t { return m_frames; } + + //! @returns scratch buffer for conversions between interleaved and planar TODO: Remove once using planar only + auto interleavedBuffer() const -> InterleavedBufferView + { + assert(hasInterleavedBuffer()); + return {m_interleavedBuffer.data(), m_frames}; + } + + //! @returns scratch buffer for conversions between interleaved and planar TODO: Remove once using planar only + auto interleavedBuffer() -> InterleavedBufferView + { + assert(hasInterleavedBuffer()); + return {m_interleavedBuffer.data(), m_frames}; + } + + /** + * @brief Adds a new channel group at the end of the list. + * + * If the memory resource is `SharedMemoryResource`, all buffers (source, channels, + * and interleaved) will be reallocated. The number of bytes allocated will be + * `allocationSize(frames(), totalChannels() + channels, hasInterleavedBuffer())`. + * + * @param channels how many channels the new group should have + * @returns the newly created group, or nullptr upon failure + */ + auto addGroup(ch_cnt_t channels) -> ChannelGroup*; + + /** + * @brief Changes the channel grouping without changing the channel count. + * Does not reallocate any buffers. + * + * @param groups the new group count + * @param groupVisitor called for each new group, passed the index and group reference, and is + * expected to return the channel count for that group. The visitor may + * also set the group's metadata. + */ + template + void setGroups(group_cnt_t groups, F&& groupVisitor) + { + static_assert(std::is_invocable_r_v, + "groupVisitor is passed the group index + group reference and must return the group's channel count"); + + m_groups.clear(); + ch_cnt_t ch = 0; + for (group_cnt_t idx = 0; idx < groups; ++idx) + { + auto& group = m_groups.emplace_back(); + + const auto channels = groupVisitor(idx, group); + if (channels == 0) { throw std::runtime_error{"group cannot have zero channels"}; } + + group.setBuffers(&m_accessBuffer[ch]); + group.setChannels(channels); + + ch += channels; + if (ch > this->totalChannels()) + { + throw std::runtime_error{"sum of group channel counts exceeds total channels"}; + } + } + } + + /** + * Channels which are known to be quiet, AKA the silence status. + * 1 = channel is known to be silent + * 0 = channel is assumed to be non-silent (or, when silence tracking + * is enabled, *known* to be non-silent) + * + * NOTE: If any channel buffers are used and their data modified outside of this class, + * their silence flags will be invalidated until `updateSilenceFlags()` is called. + * Therefore, calling code must be careful to always keep the silence flags up-to-date. + */ + auto silenceFlags() const -> const ChannelFlags& { return m_silenceFlags; } + + //! Forcibly pessimizes silence tracking for a specific channel + void assumeNonSilent(ch_cnt_t channel) { m_silenceFlags[channel] = false; } + + /** + * When silence tracking is enabled, channels will be checked for silence whenever their data may + * have changed, so it'll always be known whether they are silent or non-silent. There is a performance cost + * to this, but it is likely worth it since this information allows many effects to be put to sleep + * when their inputs are silent ("auto-quit"). When a channel is known to be silent, it also + * enables optimizations in buffer sanitization, buffer zeroing, and finding the absolute peak sample value. + * + * When silence tracking is disabled, channels are not checked for silence, so a silence flag may be + * unset despite the channel being silent. Non-silence must be assumed whenever the silence status is not + * known, so the optimizations which silent buffers allow will not be possible as often. + */ + void enableSilenceTracking(bool enabled); + auto silenceTrackingEnabled() const -> bool { return m_silenceTrackingEnabled; } + + //! Mixes the silence flags of the other `AudioBuffer` with this `AudioBuffer` + void mixSilenceFlags(const AudioBuffer& other); + + /** + * Checks whether any of the selected channels are non-silent (has a signal). + * + * If silence tracking is disabled, all channels that aren't marked + * as silent are assumed to be non-silent. + * + * A processor could check for a signal present at any of its inputs by + * calling this method selecting all of the track channels that are routed + * to at least one of its inputs. + * + * @param channels channels to check for a signal; 1 = selected, 0 = ignore + */ + auto hasSignal(const ChannelFlags& channels) const -> bool; + + //! Checks whether any channel is non-silent (has a signal). @see hasSignal + auto hasAnySignal() const -> bool; + + /** + * @brief Sanitizes specified channels of any Inf/NaN values if "nanhandler" setting is enabled + * + * @param channels channels to sanitize; 1 = selected, 0 = skip + * @param upperBound any channel indexes at or above this are skipped + */ + void sanitize(const ChannelFlags& channels, ch_cnt_t upperBound = MaxChannelsPerAudioBuffer); + + //! Sanitizes all channels. @see sanitize + void sanitizeAll(); + + /** + * @brief Updates the silence status of the given channels, up to the upperBound index. + * + * @param channels channels to update; 1 = selected, 0 = skip + * @param upperBound any channel indexes at or above this are skipped + * @returns true if all selected channels were silent + */ + auto updateSilenceFlags(const ChannelFlags& channels, ch_cnt_t upperBound = MaxChannelsPerAudioBuffer) -> bool; + + //! Updates the silence status of all channels. @see updateSilenceFlags + auto updateAllSilenceFlags() -> bool; + + /** + * @brief Silences (zeroes) the given channels + * + * @param channels channels to silence; 1 = selected, 0 = skip + * @param upperBound any channel indexes at or above this are skipped + */ + void silenceChannels(const ChannelFlags& channels, ch_cnt_t upperBound = MaxChannelsPerAudioBuffer); + + //! Silences (zeroes) all channels. @see silenceChannels + void silenceAllChannels(); + + //! @returns absolute peak sample value for the given channel + auto absPeakValue(ch_cnt_t channel) const -> float; + +private: + /** + * Large buffer that all channel buffers are sourced from. + * + * [channel index] + */ + std::pmr::vector m_sourceBuffer; + + /** + * Provides access to individual channel buffers within the source buffer. + * + * [channel index][frame index] + */ + std::pmr::vector m_accessBuffer; + + /** + * Interleaved scratch buffer for conversions between interleaved and planar. + * + * TODO: Remove once using planar only + */ + std::pmr::vector m_interleavedBuffer; + + //! Divides channels into arbitrary groups + ArrayVector m_groups; + + //! Frame count for every channel buffer + f_cnt_t m_frames = 0; + + /** + * Stores which channels are known to be quiet, AKA the silence status. + * + * This must always be kept in sync with the buffer data when enabled - at minimum + * avoiding any false positives where a channel is marked as "silent" when it isn't. + * Any channel bits at or above `totalChannels()` must always be marked silent. + * + * 1 = channel is known to be silent + * 0 = channel is assumed to be non-silent (or, when silence tracking + * is enabled, *known* to be non-silent) + */ + ChannelFlags m_silenceFlags; + + bool m_silenceTrackingEnabled = false; +}; + +} // namespace lmms + +#endif // LMMS_AUDIO_BUFFER_H diff --git a/include/AudioBufferView.h b/include/AudioBufferView.h index f44437a075..1bb62bb52a 100644 --- a/include/AudioBufferView.h +++ b/include/AudioBufferView.h @@ -39,20 +39,20 @@ namespace lmms { //! Use when the number of channels is not known at compile time -inline constexpr auto DynamicChannelCount = static_cast(-1); +inline constexpr auto DynamicChannelCount = static_cast(-1); namespace detail { // For buffer views with static channel count -template +template class BufferViewData { public: constexpr BufferViewData() = default; constexpr BufferViewData(const BufferViewData&) = default; - constexpr BufferViewData(T* data, [[maybe_unused]] proc_ch_t channels, f_cnt_t frames) noexcept + constexpr BufferViewData(T* data, [[maybe_unused]] ch_cnt_t channels, f_cnt_t frames) noexcept : m_data{data} , m_frames{frames} { @@ -66,7 +66,7 @@ public: } constexpr auto data() const noexcept -> T* { return m_data; } - static constexpr auto channels() noexcept -> proc_ch_t { return channelCount; } + static constexpr auto channels() noexcept -> ch_cnt_t { return channelCount; } constexpr auto frames() const noexcept -> f_cnt_t { return m_frames; } protected: @@ -82,7 +82,7 @@ public: constexpr BufferViewData() = default; constexpr BufferViewData(const BufferViewData&) = default; - constexpr BufferViewData(T* data, proc_ch_t channels, f_cnt_t frames) noexcept + constexpr BufferViewData(T* data, ch_cnt_t channels, f_cnt_t frames) noexcept : m_data{data} , m_channels{channels} , m_frames{frames} @@ -91,17 +91,17 @@ public: } constexpr auto data() const noexcept -> T* { return m_data; } - constexpr auto channels() const noexcept -> proc_ch_t { return m_channels; } + constexpr auto channels() const noexcept -> ch_cnt_t { return m_channels; } constexpr auto frames() const noexcept -> f_cnt_t { return m_frames; } protected: T* m_data = nullptr; - proc_ch_t m_channels = 0; + ch_cnt_t m_channels = 0; f_cnt_t m_frames = 0; }; // For interleaved frame iterators with static channel count -template +template class InterleavedFrameIteratorData { public: @@ -113,7 +113,7 @@ public: { } - static constexpr auto channels() noexcept -> proc_ch_t { return channelCount; } + static constexpr auto channels() noexcept -> ch_cnt_t { return channelCount; } protected: T* m_data = nullptr; @@ -127,21 +127,21 @@ public: constexpr InterleavedFrameIteratorData() = default; constexpr InterleavedFrameIteratorData(const InterleavedFrameIteratorData&) = default; - constexpr InterleavedFrameIteratorData(T* data, proc_ch_t channels) noexcept + constexpr InterleavedFrameIteratorData(T* data, ch_cnt_t channels) noexcept : m_data{data} , m_channels{channels} { } - constexpr auto channels() const noexcept -> proc_ch_t { return m_channels; } + constexpr auto channels() const noexcept -> ch_cnt_t { return m_channels; } protected: T* m_data = nullptr; - proc_ch_t m_channels = 0; + ch_cnt_t m_channels = 0; }; // Allows for iterating over the frames of `InterleavedBufferView` -template +template class InterleavedFrameIterator : public InterleavedFrameIteratorData { using Base = InterleavedFrameIteratorData; @@ -286,7 +286,7 @@ concept SampleType = detail::OneOf, * * TODO C++23: Use std::mdspan? */ -template +template class InterleavedBufferView : public detail::BufferViewData { using Base = detail::BufferViewData; @@ -312,24 +312,24 @@ public: } //! Construct dynamic channel count from static - template + template requires (channelCount == DynamicChannelCount && otherChannels != DynamicChannelCount) constexpr InterleavedBufferView(InterleavedBufferView other) noexcept : Base{other.data(), otherChannels, other.frames()} { } - //! Construct from std::span - InterleavedBufferView(std::span buffer) noexcept + //! Construct from SampleFrame* + InterleavedBufferView(SampleFrame* data, f_cnt_t frames) noexcept requires (std::is_same_v, float> && channelCount == 2) - : Base{reinterpret_cast(buffer.data()), buffer.size()} + : Base{reinterpret_cast(data), frames} { } - //! Construct from std::span - InterleavedBufferView(std::span buffer) noexcept + //! Construct from const SampleFrame* + InterleavedBufferView(const SampleFrame* data, f_cnt_t frames) noexcept requires (std::is_same_v && channelCount == 2) - : Base{reinterpret_cast(buffer.data()), buffer.size()} + : Base{reinterpret_cast(data), frames} { } @@ -437,13 +437,13 @@ public: return reinterpret_cast(this->m_data)[index]; } - auto toSampleFrames() noexcept -> std::span + auto asSampleFrames() noexcept -> std::span requires (std::is_same_v && channelCount == 2) { return {reinterpret_cast(this->m_data), this->m_frames}; } - auto toSampleFrames() const noexcept -> std::span + auto asSampleFrames() const noexcept -> std::span requires (std::is_same_v && channelCount == 2) { return {reinterpret_cast(this->m_data), this->m_frames}; @@ -457,6 +457,10 @@ public: static_assert(sizeof(InterleavedBufferView) > sizeof(InterleavedBufferView)); static_assert(sizeof(InterleavedBufferView) == sizeof(void*) + sizeof(f_cnt_t)); +// Deduction guides +InterleavedBufferView(const SampleFrame*, f_cnt_t) -> InterleavedBufferView; +InterleavedBufferView(SampleFrame*, f_cnt_t) -> InterleavedBufferView; + /** * Non-owning view for multi-channel non-interleaved audio data @@ -466,7 +470,7 @@ static_assert(sizeof(InterleavedBufferView) == sizeof(void*) + sizeof( * * TODO C++23: Use std::mdspan? */ -template +template class PlanarBufferView : public detail::BufferViewData { using Base = detail::BufferViewData; @@ -489,7 +493,7 @@ public: } //! Construct dynamic channel count from static - template + template requires (channelCount == DynamicChannelCount && otherChannels != DynamicChannelCount) constexpr PlanarBufferView(PlanarBufferView other) noexcept : Base{other.data(), otherChannels, other.frames()} @@ -502,13 +506,13 @@ public: } //! @return the buffer of the given channel - constexpr auto buffer(proc_ch_t channel) const noexcept -> std::span + constexpr auto buffer(ch_cnt_t channel) const noexcept -> std::span { return {bufferPtr(channel), this->m_frames}; } //! @return the buffer of the given channel - template requires (channelCount != DynamicChannelCount) + template requires (channelCount != DynamicChannelCount) constexpr auto buffer() const noexcept -> std::span { return {bufferPtr(), this->m_frames}; @@ -518,7 +522,7 @@ public: * @return pointer to the buffer of the given channel. * The size of the buffer is `frames()`. */ - constexpr auto bufferPtr(proc_ch_t channel) const noexcept -> T* + constexpr auto bufferPtr(ch_cnt_t channel) const noexcept -> T* { assert(channel < Base::channels()); assert(this->m_data != nullptr); @@ -529,7 +533,7 @@ public: * @return pointer to the buffer of the given channel. * The size of the buffer is `frames()`. */ - template requires (channelCount != DynamicChannelCount) + template requires (channelCount != DynamicChannelCount) constexpr auto bufferPtr() const noexcept -> T* { static_assert(channel < channelCount); @@ -541,7 +545,7 @@ public: * @return pointer to the buffer of a given channel. * The size of the buffer is `frames()`. */ - constexpr auto operator[](proc_ch_t channel) const noexcept -> T* + constexpr auto operator[](ch_cnt_t channel) const noexcept -> T* { return bufferPtr(channel); } @@ -556,10 +560,55 @@ static_assert(sizeof(PlanarBufferView) == sizeof(void**) + sizeof(f_cn //! Concept for any audio buffer view, interleaved or planar -template +template concept AudioBufferView = SampleType && (std::convertible_to> || std::convertible_to>); + +//! Converts planar buffers to interleaved buffers +template +constexpr void toInterleaved(PlanarBufferView src, + InterleavedBufferView, outputs> dst) +{ + assert(src.frames() == dst.frames()); + if constexpr (inputs == DynamicChannelCount || outputs == DynamicChannelCount) + { + assert(src.channels() == dst.channels()); + } + else { static_assert(inputs == outputs); } + + for (f_cnt_t frame = 0; frame < dst.frames(); ++frame) + { + auto* framePtr = dst.framePtr(frame); + for (ch_cnt_t channel = 0; channel < dst.channels(); ++channel) + { + framePtr[channel] = src.bufferPtr(channel)[frame]; + } + } +} + +//! Converts interleaved buffers to planar buffers +template +constexpr void toPlanar(InterleavedBufferView src, + PlanarBufferView, outputs> dst) +{ + assert(src.frames() == dst.frames()); + if constexpr (inputs == DynamicChannelCount || outputs == DynamicChannelCount) + { + assert(src.channels() == dst.channels()); + } + else { static_assert(inputs == outputs); } + + for (ch_cnt_t channel = 0; channel < dst.channels(); ++channel) + { + auto* channelPtr = dst.bufferPtr(channel); + for (f_cnt_t frame = 0; frame < dst.frames(); ++frame) + { + channelPtr[frame] = src.framePtr(frame)[channel]; + } + } +} + } // namespace lmms #endif // LMMS_AUDIO_BUFFER_VIEW_H diff --git a/include/AudioBusHandle.h b/include/AudioBusHandle.h index c58cb93792..ea7650700b 100644 --- a/include/AudioBusHandle.h +++ b/include/AudioBusHandle.h @@ -30,6 +30,7 @@ #include #include +#include "AudioBuffer.h" #include "PlayHandle.h" namespace lmms @@ -58,8 +59,6 @@ public: BoolModel* mutedModel = nullptr); virtual ~AudioBusHandle(); - SampleFrame* buffer() { return m_buffer; } - // indicate whether JACK & Co should provide output-buffer at ext. port bool extOutputEnabled() const { return m_extOutputEnabled; } void setExtOutputEnabled(bool enabled); @@ -85,7 +84,7 @@ public: private: volatile bool m_bufferUsage; - SampleFrame* const m_buffer; + AudioBuffer m_buffer; bool m_extOutputEnabled; mix_ch_t m_nextMixerChannel; diff --git a/include/Effect.h b/include/Effect.h index d6454b32be..3287ada7d1 100644 --- a/include/Effect.h +++ b/include/Effect.h @@ -37,6 +37,7 @@ namespace lmms { +class AudioBuffer; class EffectChain; class EffectControls; @@ -65,7 +66,7 @@ public: } //! Returns true if audio was processed and should continue being processed - bool processAudioBuffer(SampleFrame* buf, const fpp_t frames); + bool processAudioBuffer(AudioBuffer& inOut); inline bool isOkay() const { @@ -77,22 +78,10 @@ public: m_okay = _state; } - - inline bool isRunning() const + //! "Awake" means the effect has not been put to sleep by auto-quit + bool isAwake() const { - return m_running; - } - - void startRunning() - { - m_quietBufferCount = 0; - m_running = true; - } - - void stopRunning() - { - m_quietBufferCount = 0; - m_running = false; + return m_awake; } inline bool isEnabled() const @@ -125,7 +114,12 @@ public: { m_noRun = _state; } - + + bool isProcessingAudio() const + { + return isEnabled() && isAwake() && isOkay() && !dontRun(); + } + inline TempoSyncKnobModel* autoQuitModel() { return &m_autoQuitModel; @@ -162,21 +156,32 @@ protected: }; /** - * The main audio processing method that runs when plugin is not asleep + * The main audio processing method that runs when plugin is awake and running */ virtual ProcessStatus processImpl(SampleFrame* buf, const fpp_t frames) = 0; /** - * Optional method that runs when plugin is sleeping (not enabled, - * not running, not in the Okay state, or in the Don't Run state) + * Optional method that runs instead of `processImpl` when an effect + * is awake but not running. */ virtual void processBypassedImpl() {} gui::PluginView* instantiateView( QWidget * ) override; - virtual void onEnabledChanged() {} + void goToSleep() + { + m_quietBufferCount = 0; + m_awake = false; + } + void wakeUp() + { + m_quietBufferCount = 0; + m_awake = true; + } + + virtual void onEnabledChanged() {} private: /** @@ -184,14 +189,14 @@ private: * after "decay" ms of the output buffer remaining below the silence threshold, the effect is * turned off and won't be processed again until it receives new audio input. */ - void handleAutoQuit(std::span output); + void handleAutoQuit(bool silentOutput); EffectChain * m_parent; bool m_okay; bool m_noRun; - bool m_running; + bool m_awake; //! The number of consecutive periods where output buffers remain below the silence threshold f_cnt_t m_quietBufferCount = 0; diff --git a/include/EffectChain.h b/include/EffectChain.h index 0af23c4b82..20af598172 100644 --- a/include/EffectChain.h +++ b/include/EffectChain.h @@ -33,8 +33,8 @@ namespace lmms { +class AudioBuffer; class Effect; -class SampleFrame; namespace gui { @@ -63,8 +63,7 @@ public: void removeEffect( Effect * _effect ); void moveDown( Effect * _effect ); void moveUp( Effect * _effect ); - bool processAudioBuffer( SampleFrame* _buf, const fpp_t _frames, bool hasInputNoise ); - void startRunning(); + bool processAudioBuffer(AudioBuffer& buffer); void clear(); diff --git a/include/LmmsTypes.h b/include/LmmsTypes.h index c348a85bfc..0b6f26cdd8 100644 --- a/include/LmmsTypes.h +++ b/include/LmmsTypes.h @@ -43,12 +43,11 @@ using int_sample_t = std::int16_t; // 16-bit-int-sample using sample_rate_t = std::uint32_t; // sample-rate using fpp_t = std::size_t; // frames per period (0-16384) using f_cnt_t = std::size_t; // standard frame-count -using ch_cnt_t = std::uint8_t; // channel-count (0-DEFAULT_CHANNELS) +using ch_cnt_t = std::uint8_t; // audio channel index/count (0-MaxChannelsPerAudioBuffer) using bpm_t = std::uint16_t; // tempo (MIN_BPM to MAX_BPM) using bitrate_t = std::uint16_t; // bitrate in kbps using mix_ch_t = std::uint16_t; // Mixer-channel (0 to MAX_CHANNEL) -using track_ch_t = std::uint16_t; // track channel index/count (0-256) -using proc_ch_t = std::uint16_t; // audio processor channel index/count +using group_cnt_t = std::uint8_t; // channel group index/count (0-MaxGroupsPerAudioBuffer) using jo_id_t = std::uint32_t; // (unique) ID of a journalling object diff --git a/include/MixHelpers.h b/include/MixHelpers.h index 3b0ecf968f..210d24f04c 100644 --- a/include/MixHelpers.h +++ b/include/MixHelpers.h @@ -25,7 +25,7 @@ #ifndef LMMS_MIX_HELPERS_H #define LMMS_MIX_HELPERS_H -#include "LmmsTypes.h" +#include "AudioBufferView.h" namespace lmms { @@ -38,15 +38,28 @@ namespace MixHelpers bool isSilent( const SampleFrame* src, int frames ); +bool isSilent(std::span buffer); + bool useNaNHandler(); void setNaNHandler( bool use ); -bool sanitize( SampleFrame* src, int frames ); +/** + * @brief Sanitizes a buffer of infs/NaNs, zeroing the entire buffer if + * any is detected. + * + * Only performs sanitization when the NaN handler is active. + * + * @returns true if inf or NaN was detected + */ +bool sanitize(std::span buffer); /*! \brief Add samples from src to dst */ void add( SampleFrame* dst, const SampleFrame* src, int frames ); +/*! \brief Add samples from src to dst */ +void add(PlanarBufferView dst, PlanarBufferView src); + /*! \brief Multiply samples from `dst` by `coeff` */ void multiply(SampleFrame* dst, float coeff, int frames); diff --git a/include/Mixer.h b/include/Mixer.h index 6e3c86565c..65698267cb 100644 --- a/include/Mixer.h +++ b/include/Mixer.h @@ -25,9 +25,10 @@ #ifndef LMMS_MIXER_H #define LMMS_MIXER_H -#include "Model.h" +#include "AudioBuffer.h" #include "EffectChain.h" #include "JournallingObject.h" +#include "Model.h" #include "ThreadableJob.h" #include @@ -43,56 +44,54 @@ using MixerRouteVector = std::vector; class MixerChannel : public ThreadableJob { - public: - MixerChannel( int idx, Model * _parent ); - virtual ~MixerChannel(); +public: + MixerChannel(int idx, Model* _parent); + virtual ~MixerChannel(); - EffectChain m_fxChain; + EffectChain m_fxChain; - // set to true when input fed from mixToChannel or child channel - bool m_hasInput; - // set to true if any effect in the channel is enabled and running - bool m_stillRunning; + // set to true if any effect in the channel is enabled and running + bool m_stillRunning; - float m_peakLeft; - float m_peakRight; - SampleFrame* m_buffer; - bool m_muteBeforeSolo; - BoolModel m_muteModel; - BoolModel m_soloModel; - FloatModel m_volumeModel; - QString m_name; - QMutex m_lock; - bool m_queued; // are we queued up for rendering yet? - bool m_muted; // are we muted? updated per period so we don't have to call m_muteModel.value() twice + float m_peakLeft; + float m_peakRight; + AudioBuffer m_buffer; + bool m_muteBeforeSolo; + BoolModel m_muteModel; + BoolModel m_soloModel; + FloatModel m_volumeModel; + QString m_name; + QMutex m_lock; + bool m_queued; // are we queued up for rendering yet? + bool m_muted; // are we muted? updated per period so we don't have to call m_muteModel.value() twice - // pointers to other channels that this one sends to - MixerRouteVector m_sends; + // pointers to other channels that this one sends to + MixerRouteVector m_sends; - // pointers to other channels that send to this one - MixerRouteVector m_receives; + // pointers to other channels that send to this one + MixerRouteVector m_receives; - int index() const { return m_channelIndex; } - void setIndex(int index) { m_channelIndex = index; } + int index() const { return m_channelIndex; } + void setIndex(int index) { m_channelIndex = index; } - bool isMaster() { return m_channelIndex == 0; } + bool isMaster() { return m_channelIndex == 0; } - bool requiresProcessing() const override { return true; } - void unmuteForSolo(); - void unmuteSenderForSolo(); - void unmuteReceiverForSolo(); + bool requiresProcessing() const override { return true; } + void unmuteForSolo(); + void unmuteSenderForSolo(); + void unmuteReceiverForSolo(); - auto color() const -> const std::optional& { return m_color; } - void setColor(const std::optional& color) { m_color = color; } + auto color() const -> const std::optional& { return m_color; } + void setColor(const std::optional& color) { m_color = color; } - std::atomic_size_t m_dependenciesMet; - void incrementDeps(); - void processed(); - - private: - void doProcessing() override; - int m_channelIndex; - std::optional m_color; + std::atomic_size_t m_dependenciesMet; + void incrementDeps(); + void processed(); + +private: + void doProcessing() override; + int m_channelIndex; + std::optional m_color; }; class MixerRoute : public QObject @@ -143,7 +142,7 @@ public: Mixer(); ~Mixer() override; - void mixToChannel( const SampleFrame* _buf, mix_ch_t _ch ); + void mixToChannel(const AudioBuffer& buffer, mix_ch_t dest); void prepareMasterMix(); void masterMix( SampleFrame* _buf ); diff --git a/include/SharedMemory.h b/include/SharedMemory.h index 3ed1330d4e..9e8e93b269 100644 --- a/include/SharedMemory.h +++ b/include/SharedMemory.h @@ -2,6 +2,7 @@ * SharedMemory.h * * Copyright (c) 2022 Dominic Clark + * Copyright (c) 2025-2026 Dalton Messmer * * This file is part of LMMS - https://lmms.io * @@ -26,6 +27,8 @@ #define LMMS_SHARED_MEMORY_H #include +#include +#include #include #include @@ -42,9 +45,9 @@ class SharedMemoryData { public: SharedMemoryData() noexcept; - SharedMemoryData(std::string&& key, bool readOnly); - SharedMemoryData(std::string&& key, std::size_t size, bool readOnly); - SharedMemoryData(std::size_t size, bool readOnly); + SharedMemoryData(std::string&& key, bool readOnly, bool isArray); + SharedMemoryData(std::string&& key, std::size_t size, bool readOnly, bool isArray); + SharedMemoryData(std::size_t size, bool readOnly, bool isArray); ~SharedMemoryData(); SharedMemoryData(SharedMemoryData&& other) noexcept; @@ -65,7 +68,7 @@ public: const std::string& key() const noexcept { return m_key; } void* get() const noexcept { return m_ptr; } - std::size_t size_bytes() const noexcept; + std::size_t arraySize() const noexcept; private: std::string m_key; @@ -76,30 +79,96 @@ private: } // namespace detail +//! Similar to std::pmr::monotonic_buffer_resource, but the initial buffer can be replaced +class SharedMemoryResource final : public std::pmr::memory_resource +{ +public: + SharedMemoryResource() = default; + SharedMemoryResource(void* buffer, std::size_t bufferSize) noexcept + : m_buffer{buffer} + , m_availableBytes{bufferSize} + , m_initialBuffer{buffer} + , m_initialBufferSize{bufferSize} + {} + + SharedMemoryResource(const SharedMemoryResource&) = delete; + auto operator=(const SharedMemoryResource&) -> SharedMemoryResource& = delete; + SharedMemoryResource(SharedMemoryResource&&) = default; + auto operator=(SharedMemoryResource&&) -> SharedMemoryResource& = default; + + //! Returns the buffer back to its initial state + void reset() noexcept + { + m_buffer = m_initialBuffer; + m_availableBytes = m_initialBufferSize; + } + + //! @returns the number of bytes that can still be allocated + auto availableBytes() const noexcept -> std::size_t { return m_availableBytes; } + + template + friend class SharedMemory; + +private: + //! Replaces the initial buffer + void reset(void* newBuffer, std::size_t newBufferSize) noexcept + { + m_buffer = newBuffer; + m_availableBytes = newBufferSize; + m_initialBuffer = newBuffer; + m_initialBufferSize = newBufferSize; + } + + void* do_allocate(std::size_t bytes, std::size_t alignment) override + { + void* p = std::align(alignment, bytes, m_buffer, m_availableBytes); + if (!p) { throw std::bad_alloc{}; } + + m_buffer = static_cast(m_buffer) + bytes; + m_availableBytes -= bytes; + return p; + } + void do_deallocate(void*, std::size_t, std::size_t) override {} // no-op + bool do_is_equal(const std::pmr::memory_resource& other) const noexcept override + { + return this == &other; + } + +private: + void* m_buffer = nullptr; + std::size_t m_availableBytes = 0; + void* m_initialBuffer = nullptr; + std::size_t m_initialBufferSize = 0; +}; + + template class SharedMemory { // This is stricter than necessary, but keeps things easy for now static_assert(std::is_trivial_v, "objects held in shared memory must be trivial"); + static_assert(sizeof(T) > 0); public: SharedMemory() = default; + SharedMemory(const SharedMemory&) = delete; + SharedMemory& operator=(const SharedMemory&) = delete; SharedMemory(SharedMemory&&) = default; SharedMemory& operator=(SharedMemory&&) = default; void attach(std::string key) { - m_data = detail::SharedMemoryData{std::move(key), std::is_const_v}; + m_data = detail::SharedMemoryData{std::move(key), std::is_const_v, false}; } void create(std::string key) { - m_data = detail::SharedMemoryData{std::move(key), sizeof(T), std::is_const_v}; + m_data = detail::SharedMemoryData{std::move(key), sizeof(T), std::is_const_v, false}; } void create() { - m_data = detail::SharedMemoryData{sizeof(T), std::is_const_v}; + m_data = detail::SharedMemoryData{sizeof(T), std::is_const_v, false}; } void detach() noexcept @@ -126,43 +195,53 @@ class SharedMemory { // This is stricter than necessary, but keeps things easy for now static_assert(std::is_trivial_v, "objects held in shared memory must be trivial"); + static_assert(sizeof(T) > 0); public: SharedMemory() = default; + SharedMemory(const SharedMemory&) = delete; + SharedMemory& operator=(const SharedMemory&) = delete; SharedMemory(SharedMemory&&) = default; SharedMemory& operator=(SharedMemory&&) = default; void attach(std::string key) { - m_data = detail::SharedMemoryData{std::move(key), std::is_const_v}; + m_data = detail::SharedMemoryData{std::move(key), std::is_const_v, true}; + m_resource.reset(m_data.get(), size_bytes()); } void create(std::string key, std::size_t size) { - m_data = detail::SharedMemoryData{std::move(key), size * sizeof(T), std::is_const_v}; + m_data = detail::SharedMemoryData{std::move(key), size * sizeof(T), std::is_const_v, true}; + m_resource.reset(m_data.get(), size_bytes()); } void create(std::size_t size) { - m_data = detail::SharedMemoryData{size * sizeof(T), std::is_const_v}; + m_data = detail::SharedMemoryData{size * sizeof(T), std::is_const_v, true}; + m_resource.reset(m_data.get(), size_bytes()); } void detach() noexcept { m_data = detail::SharedMemoryData{}; + m_resource.reset(nullptr, 0); } const std::string& key() const noexcept { return m_data.key(); } T* get() const noexcept { return static_cast(m_data.get()); } - std::size_t size() const noexcept { return m_data.size_bytes() / sizeof(T); } - std::size_t size_bytes() const noexcept { return m_data.size_bytes(); } + std::size_t size() const noexcept { return m_data.arraySize() / sizeof(T); } + std::size_t size_bytes() const noexcept { return m_data.arraySize(); } T& operator[](std::size_t index) const noexcept { return get()[index]; } explicit operator bool() const noexcept { return get() != nullptr; } + SharedMemoryResource* resource() noexcept { return &m_resource; } + private: detail::SharedMemoryData m_data; + SharedMemoryResource m_resource; }; } // namespace lmms diff --git a/include/lmms_constants.h b/include/lmms_constants.h index a5dbad94d2..ea6c45d184 100644 --- a/include/lmms_constants.h +++ b/include/lmms_constants.h @@ -34,7 +34,10 @@ namespace lmms // using this directly inline constexpr float F_EPSILON = 1.0e-10f; // 10^-10 -inline constexpr ch_cnt_t DEFAULT_CHANNELS = 2; +// Channel counts +inline constexpr auto DEFAULT_CHANNELS = ch_cnt_t{2}; +inline constexpr auto MaxChannelsPerAudioBuffer = ch_cnt_t{128}; +inline constexpr auto MaxGroupsPerAudioBuffer = group_cnt_t{MaxChannelsPerAudioBuffer / 2}; // Microtuner inline constexpr unsigned MaxScaleCount = 10; //!< number of scales per project diff --git a/plugins/Compressor/CompressorControlDialog.cpp b/plugins/Compressor/CompressorControlDialog.cpp index 40b7e679d5..baca840229 100755 --- a/plugins/Compressor/CompressorControlDialog.cpp +++ b/plugins/Compressor/CompressorControlDialog.cpp @@ -377,7 +377,7 @@ void CompressorControlDialog::updateDisplay() return; } - if (!m_controls->m_effect->isEnabled() || !m_controls->m_effect->isRunning()) + if (!m_controls->m_effect->isProcessingAudio()) { m_controls->m_effect->m_displayPeak[0] = COMP_NOISE_FLOOR; m_controls->m_effect->m_displayPeak[1] = COMP_NOISE_FLOOR; @@ -616,7 +616,7 @@ void CompressorControlDialog::paintEvent(QPaintEvent *event) m_p.setOpacity(0.25); m_p.drawPixmap(0, 0, m_kneePixmap); m_p.setOpacity(1); - if (m_controls->m_effect->isEnabled() && m_controls->m_effect->isRunning()) + if (m_controls->m_effect->isProcessingAudio()) { m_p.drawPixmap(0, 0, m_kneePixmap2); } diff --git a/plugins/LadspaEffect/LadspaEffect.cpp b/plugins/LadspaEffect/LadspaEffect.cpp index 16ac99aa8c..671f27b672 100644 --- a/plugins/LadspaEffect/LadspaEffect.cpp +++ b/plugins/LadspaEffect/LadspaEffect.cpp @@ -128,7 +128,7 @@ void LadspaEffect::changeSampleRate() Effect::ProcessStatus LadspaEffect::processImpl(SampleFrame* buf, const fpp_t frames) { m_pluginMutex.lock(); - if (!isOkay() || dontRun() || !isEnabled() || !isRunning()) + if (!isProcessingAudio()) { m_pluginMutex.unlock(); return ProcessStatus::Sleep; diff --git a/plugins/StereoEnhancer/StereoEnhancer.cpp b/plugins/StereoEnhancer/StereoEnhancer.cpp index 1a199d4adf..c6a2544136 100644 --- a/plugins/StereoEnhancer/StereoEnhancer.cpp +++ b/plugins/StereoEnhancer/StereoEnhancer.cpp @@ -84,6 +84,7 @@ StereoEnhancerEffect::~StereoEnhancerEffect() Effect::ProcessStatus StereoEnhancerEffect::processImpl(SampleFrame* buf, const fpp_t frames) { + m_delayBufferCleared = false; const float d = dryLevel(); const float w = wetLevel(); @@ -119,19 +120,24 @@ Effect::ProcessStatus StereoEnhancerEffect::processImpl(SampleFrame* buf, const m_currFrame %= DEFAULT_BUFFER_SIZE; } - if( !isRunning() ) - { - clearMyBuffer(); - } - return ProcessStatus::ContinueIfNotQuiet; } +void StereoEnhancerEffect::processBypassedImpl() +{ + clearMyBuffer(); +} + + + + void StereoEnhancerEffect::clearMyBuffer() { + if (m_delayBufferCleared) { return; } + for (auto i = std::size_t{0}; i < DEFAULT_BUFFER_SIZE; i++) { m_delayBuffer[i][0] = 0.0f; @@ -139,6 +145,7 @@ void StereoEnhancerEffect::clearMyBuffer() } m_currFrame = 0; + m_delayBufferCleared = true; } diff --git a/plugins/StereoEnhancer/StereoEnhancer.h b/plugins/StereoEnhancer/StereoEnhancer.h index 3e27330add..f08d8ac797 100644 --- a/plugins/StereoEnhancer/StereoEnhancer.h +++ b/plugins/StereoEnhancer/StereoEnhancer.h @@ -42,6 +42,7 @@ public: ~StereoEnhancerEffect() override; ProcessStatus processImpl(SampleFrame* buf, const fpp_t frames) override; + void processBypassedImpl() override; EffectControls * controls() override { @@ -56,7 +57,8 @@ private: SampleFrame* m_delayBuffer; int m_currFrame; - + bool m_delayBufferCleared = true; + StereoEnhancerControls m_bbControls; friend class StereoEnhancerControls; diff --git a/src/common/SharedMemory.cpp b/src/common/SharedMemory.cpp index 6ef815c6db..5952b0d0b0 100644 --- a/src/common/SharedMemory.cpp +++ b/src/common/SharedMemory.cpp @@ -2,6 +2,7 @@ * SharedMemory.cpp * * Copyright (c) 2022 Dominic Clark + * Copyright (c) 2025-2026 Dalton Messmer * * This file is part of LMMS - https://lmms.io * @@ -46,6 +47,18 @@ namespace lmms::detail { +namespace { + +//! Header for communicating the shared memory's data size in-band +struct Header +{ + //! The requested shared memory data size in bytes. + //! `sizeof(Header) + size` is the total allocation size. + std::uint64_t size; +}; + +} // namespace + #if _POSIX_SHARED_MEMORY_OBJECTS > 0 || defined(LMMS_BUILD_APPLE) namespace { @@ -75,8 +88,8 @@ using ShmObject = UniqueNullableResource; class SharedMemoryImpl { public: - SharedMemoryImpl(const std::string& key, bool readOnly) : - m_key{'/' + key} + SharedMemoryImpl(const std::string& key, bool readOnly, bool isArray) + : m_key{'/' + key} { const auto openFlags = readOnly ? O_RDONLY : O_RDWR; const auto fd = FileDescriptor{ @@ -86,17 +99,31 @@ public: auto stat = (struct stat){}; if (fstat(fd.get(), &stat) == -1) { throwSystemError("SharedMemoryImpl: fstat() failed"); } + + // NOTE: On macOS, this is the page size, not the size used to create the shared memory m_size = stat.st_size; const auto mappingProtection = readOnly ? PROT_READ : PROT_READ | PROT_WRITE; m_mapping = mmap(nullptr, m_size, mappingProtection, MAP_SHARED, fd.get(), 0); if (m_mapping == MAP_FAILED) { throwSystemError("SharedMemoryImpl: mmap() failed"); } + + if (isArray) + { + // Array size is stored in-band + m_arraySize = static_cast(m_mapping)->size; + } } - SharedMemoryImpl(const std::string& key, std::size_t size, bool readOnly) : - m_key{'/' + key}, - m_size{size} + SharedMemoryImpl(const std::string& key, std::size_t size, bool readOnly, bool isArray) + : m_key{'/' + key} + , m_size{size} { + if (isArray) + { + m_size += sizeof(Header); // space for the header + m_arraySize = size; + } + const auto fd = FileDescriptor{ retryWhileInterrupted([&]() noexcept { return shm_open(m_key.c_str(), O_RDWR | O_CREAT | O_EXCL, 0600); }) }; @@ -110,6 +137,11 @@ public: const auto mappingProtection = readOnly ? PROT_READ : PROT_READ | PROT_WRITE; m_mapping = mmap(nullptr, m_size, mappingProtection, MAP_SHARED, fd.get(), 0); if (m_mapping == MAP_FAILED) { throwSystemError("SharedMemoryImpl: mmap() failed"); } + + if (isArray) + { + new (m_mapping) Header(size); + } } SharedMemoryImpl(const SharedMemoryImpl&) = delete; @@ -120,12 +152,19 @@ public: munmap(m_mapping, m_size); } - auto get() const noexcept -> void* { return m_mapping; } - auto size_bytes() const noexcept -> std::size_t { return m_size; } + auto get() const noexcept -> void* + { + return m_arraySize > 0 + ? static_cast(m_mapping) + sizeof(Header) + : m_mapping; + } + + auto arraySize() const noexcept -> std::size_t { return m_arraySize; } private: std::string m_key; std::size_t m_size = 0; + std::size_t m_arraySize = 0; // non-zero if it's an array void* m_mapping = nullptr; ShmObject m_object; }; @@ -157,7 +196,7 @@ using FileView = UniqueNullableResource; class SharedMemoryImpl { public: - SharedMemoryImpl(const std::string& key, bool readOnly) + SharedMemoryImpl(const std::string& key, bool readOnly, bool isArray) { const auto access = readOnly ? FILE_MAP_READ : FILE_MAP_WRITE; m_mapping.reset(OpenFileMappingA(access, false, key.c_str())); @@ -166,19 +205,18 @@ public: m_view.reset(MapViewOfFile(m_mapping.get(), access, 0, 0, 0)); if (!m_view) { throwLastError("SharedMemoryImpl: MapViewOfFile() failed"); } - MEMORY_BASIC_INFORMATION mbi; - if (VirtualQuery(m_view.get(), &mbi, sizeof(mbi)) == 0) + if (isArray) { - throwLastError("SharedMemoryImpl: VirtualQuery() failed"); + // Array size is stored in-band + m_arraySize = static_cast(static_cast(m_view.get()))->size; } - - m_size = static_cast(mbi.RegionSize); } - SharedMemoryImpl(const std::string& key, std::size_t size, bool readOnly) : - m_size{size} + SharedMemoryImpl(const std::string& key, std::size_t size, bool readOnly, bool isArray) { - const auto [high, low] = sizeToHighAndLow(size); + const auto [high, low] = isArray + ? sizeToHighAndLow(size + sizeof(Header)) + : sizeToHighAndLow(size); m_mapping.reset(CreateFileMappingA(INVALID_HANDLE_VALUE, nullptr, PAGE_READWRITE, high, low, key.c_str())); // This constructor is supposed to create a new shared memory object, // but passing the name of an existing object causes CreateFileMappingA @@ -191,18 +229,30 @@ public: const auto access = readOnly ? FILE_MAP_READ : FILE_MAP_WRITE; m_view.reset(MapViewOfFile(m_mapping.get(), access, 0, 0, 0)); if (!m_view) { throwLastError("SharedMemoryImpl: MapViewOfFile() failed"); } + + if (isArray) + { + new (static_cast(m_view.get())) Header(size); + m_arraySize = size; + } } SharedMemoryImpl(const SharedMemoryImpl&) = delete; auto operator=(const SharedMemoryImpl&) -> SharedMemoryImpl& = delete; - auto get() const noexcept -> void* { return m_view.get(); } - auto size_bytes() const noexcept -> std::size_t { return m_size; } + auto get() const noexcept -> void* + { + return m_arraySize > 0 + ? static_cast(static_cast(m_view.get())) + sizeof(Header) + : m_view.get(); + } + + auto arraySize() const noexcept -> std::size_t { return m_arraySize; } private: UniqueHandle m_mapping; FileView m_view; - std::size_t m_size = 0; + std::size_t m_arraySize = 0; // non-zero if it's an array }; #endif @@ -232,20 +282,20 @@ auto createKey() -> std::string SharedMemoryData::SharedMemoryData() noexcept = default; -SharedMemoryData::SharedMemoryData(std::string&& key, bool readOnly) : - m_key{std::move(key)}, - m_impl{std::make_unique(m_key, readOnly)}, - m_ptr{m_impl->get()} +SharedMemoryData::SharedMemoryData(std::string&& key, bool readOnly, bool isArray) + : m_key{std::move(key)} + , m_impl{std::make_unique(m_key, readOnly, isArray)} + , m_ptr{m_impl->get()} { } -SharedMemoryData::SharedMemoryData(std::string&& key, std::size_t size, bool readOnly) : - m_key{std::move(key)}, - m_impl{std::make_unique(m_key, std::max(size, std::size_t{1}), readOnly)}, - m_ptr{m_impl->get()} +SharedMemoryData::SharedMemoryData(std::string&& key, std::size_t size, bool readOnly, bool isArray) + : m_key{std::move(key)} + , m_impl{std::make_unique(m_key, std::max(size, std::size_t{1}), readOnly, isArray)} + , m_ptr{m_impl->get()} { } -SharedMemoryData::SharedMemoryData(std::size_t size, bool readOnly) : - SharedMemoryData{createKey(), size, readOnly} +SharedMemoryData::SharedMemoryData(std::size_t size, bool readOnly, bool isArray) + : SharedMemoryData{createKey(), size, readOnly, isArray} { } SharedMemoryData::~SharedMemoryData() = default; @@ -256,9 +306,9 @@ SharedMemoryData::SharedMemoryData(SharedMemoryData&& other) noexcept : m_ptr{std::exchange(other.m_ptr, nullptr)} { } -auto SharedMemoryData::size_bytes() const noexcept -> std::size_t +auto SharedMemoryData::arraySize() const noexcept -> std::size_t { - return m_impl ? m_impl->size_bytes() : 0; + return m_impl ? m_impl->arraySize() : 0; } } // namespace lmms::detail diff --git a/src/core/AudioBuffer.cpp b/src/core/AudioBuffer.cpp new file mode 100644 index 0000000000..553a0b559e --- /dev/null +++ b/src/core/AudioBuffer.cpp @@ -0,0 +1,360 @@ +/* + * AudioBuffer.cpp + * + * Copyright (c) 2026 Dalton Messmer + * + * This file is part of LMMS - https://lmms.io + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program (see COPYING); if not, write to the + * Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301 USA. + * + */ + +#include "AudioBuffer.h" + +#include "ConfigManager.h" +#include "MixHelpers.h" +#include "SharedMemory.h" + +namespace lmms +{ + +namespace +{ + +//! @returns Bitset with all bits at or above `pos` set to `value` and the rest set to `!value` +template +auto createMask(ch_cnt_t pos) noexcept -> AudioBuffer::ChannelFlags +{ + assert(pos <= MaxChannelsPerAudioBuffer); + + AudioBuffer::ChannelFlags mask; + mask.set(); + + if constexpr (value) + { + mask <<= pos; + } + else + { + mask >>= (MaxChannelsPerAudioBuffer - pos); + } + + return mask; +} + +} // namespace + + +AudioBuffer::AudioBuffer(f_cnt_t frames, ch_cnt_t channels, + std::pmr::memory_resource* bufferResource) + : m_sourceBuffer{bufferResource} + , m_accessBuffer{bufferResource} + , m_interleavedBuffer{bufferResource} + , m_frames{frames} + , m_silenceTrackingEnabled{ConfigManager::inst()->value("ui", "disableautoquit", "1").toInt() == 0} +{ + if (channels == 0) + { + m_silenceFlags.set(); + return; + } + + if (!addGroup(channels)) + { + throw std::runtime_error{"failed to add group"}; + } +} + +void AudioBuffer::allocateInterleavedBuffer() +{ + m_interleavedBuffer.resize(2 * m_frames); +} + +auto AudioBuffer::allocationSize(f_cnt_t frames, ch_cnt_t channels, bool withInterleavedBuffer) -> std::size_t +{ + auto bytes = frames * channels * sizeof(float) // for m_sourceBuffer + + channels * sizeof(float*); // for m_accessBuffer + + if (withInterleavedBuffer) + { + bytes += frames * 2 * sizeof(float); // for m_interleavedBuffer + } + + return bytes; +} + +auto AudioBuffer::addGroup(ch_cnt_t channels) -> ChannelGroup* +{ + if (m_groups.size() >= m_groups.capacity()) + { + // Maximum groups reached + return nullptr; + } + + if (channels == 0) + { + // Invalid channel count for a group + return nullptr; + } + + const auto oldTotalChannels = totalChannels(); + const auto newTotalChannels = totalChannels() + channels; + if (newTotalChannels > MaxChannelsPerAudioBuffer) + { + // Not enough room for requested channels + return nullptr; + } + + // Check if using a shared memory resource since its semantics are + // more restrictive than the default memory resource + const auto usesSharedMemory = dynamic_cast( + m_accessBuffer.get_allocator().resource()) != nullptr; + + const auto usesInterleavedBuffer = hasInterleavedBuffer(); + + if (usesSharedMemory) + { + // Shared memory must be reallocated without any over-allocations, + // since it only has a fixed amount of space + m_accessBuffer.clear(); + m_sourceBuffer.clear(); + m_interleavedBuffer.clear(); + } + + // Next, resize the buffers. The order here is important so no padding bytes + // are needed when allocating using a shared memory resource. The buffer + // with stricter padding requirements (m_accessBuffer) gets allocated first. + static_assert(alignof(float*) >= alignof(float)); + m_accessBuffer.resize(newTotalChannels); + m_sourceBuffer.resize(newTotalChannels * m_frames); + if (usesInterleavedBuffer) + { + m_interleavedBuffer.resize(2 * m_frames); + } + + // Fix channel buffers + float* ptr = m_sourceBuffer.data(); + ch_cnt_t channel = 0; + while (channel < newTotalChannels) + { + m_accessBuffer[channel] = ptr; + + ptr += m_frames; + ++channel; + } + + // Fix group buffers + channel = 0; + for (ChannelGroup& group : m_groups) + { + group.setBuffers(&m_accessBuffer[channel]); + channel += group.channels(); + } + + // Ensure the new channels (and all the higher, unused + // channels) are set to "silent" + m_silenceFlags |= createMask(oldTotalChannels); + + // Append new group + return &m_groups.emplace_back(&m_accessBuffer[oldTotalChannels], channels); +} + +void AudioBuffer::enableSilenceTracking(bool enabled) +{ + const auto oldValue = m_silenceTrackingEnabled; + m_silenceTrackingEnabled = enabled; + if (!oldValue && enabled) + { + updateAllSilenceFlags(); + } +} + +void AudioBuffer::mixSilenceFlags(const AudioBuffer& other) +{ + m_silenceFlags &= other.silenceFlags(); +} + +auto AudioBuffer::hasSignal(const ChannelFlags& channels) const -> bool +{ + auto nonSilent = ~m_silenceFlags; + nonSilent &= channels; + return nonSilent.any(); +} + +auto AudioBuffer::hasAnySignal() const -> bool +{ + // This is possible due to the invariant that any channel bits + // at or above `totalChannels()` must always be marked silent + return !m_silenceFlags.all(); +} + +void AudioBuffer::sanitize(const ChannelFlags& channels, ch_cnt_t upperBound) +{ + if (!MixHelpers::useNaNHandler()) { return; } + + bool changesMade = false; + + const auto totalChannels = std::min(upperBound, this->totalChannels()); + for (ch_cnt_t ch = 0; ch < totalChannels; ++ch) + { + if (channels[ch]) + { + // This channel needs to be sanitized + if (MixHelpers::sanitize(buffer(ch))) + { + // Inf/NaN detected and buffer cleared + m_silenceFlags[ch] = true; + changesMade = true; + } + } + } + + if (changesMade && hasInterleavedBuffer() && (channels[0] || channels[1])) + { + // Keep the temporary interleaved buffer in sync + toInterleaved(groupBuffers(0), interleavedBuffer()); + } +} + +void AudioBuffer::sanitizeAll() +{ + if (!MixHelpers::useNaNHandler()) { return; } + + bool changesMade = false; + for (ch_cnt_t ch = 0; ch < totalChannels(); ++ch) + { + if (MixHelpers::sanitize(buffer(ch))) + { + // Inf/NaN detected and buffer cleared + m_silenceFlags[ch] = true; + changesMade = true; + } + } + + if (changesMade && hasInterleavedBuffer()) + { + // Keep the temporary interleaved buffer in sync + toInterleaved(groupBuffers(0), interleavedBuffer()); + } +} + +auto AudioBuffer::updateSilenceFlags(const ChannelFlags& channels, ch_cnt_t upperBound) -> bool +{ + assert(upperBound <= MaxChannelsPerAudioBuffer); + + // Invariant: Any channel bits at or above `totalChannels()` must be marked silent + assert((~m_silenceFlags & createMask(totalChannels())).none()); + + // If no channels are selected, return true (all selected channels are silent) + if (channels.none()) { return true; } + + const auto totalChannels = std::min(upperBound, this->totalChannels()); + + if (!m_silenceTrackingEnabled) + { + // Mark specified channels (up to the upper bound) as non-silent + auto temp = ~channels; + temp |= createMask(totalChannels); + m_silenceFlags &= temp; + return false; + } + + bool allQuiet = true; + for (ch_cnt_t ch = 0; ch < totalChannels; ++ch) + { + if (channels[ch]) + { + // This channel needs to be updated + const auto quiet = MixHelpers::isSilent(buffer(ch)); + + m_silenceFlags[ch] = quiet; + allQuiet = allQuiet && quiet; + } + } + + return allQuiet; +} + +auto AudioBuffer::updateAllSilenceFlags() -> bool +{ + // Invariant: Any channel bits at or above `totalChannels()` must be marked silent + assert((~m_silenceFlags & createMask(totalChannels())).none()); + + // If there are no channels, return true (all channels are silent) + if (totalChannels() == 0) { return true; } + + if (!m_silenceTrackingEnabled) + { + // Mark all channels below `totalChannels()` as non-silent + m_silenceFlags &= createMask(totalChannels()); + return false; + } + + bool allQuiet = true; + for (ch_cnt_t ch = 0; ch < totalChannels(); ++ch) + { + const auto quiet = MixHelpers::isSilent(buffer(ch)); + + m_silenceFlags[ch] = quiet; + allQuiet = allQuiet && quiet; + } + + return allQuiet; +} + +void AudioBuffer::silenceChannels(const ChannelFlags& channels, ch_cnt_t upperBound) +{ + auto needSilenced = ~m_silenceFlags; + needSilenced &= channels; + + const auto totalChannels = std::min(upperBound, this->totalChannels()); + for (ch_cnt_t ch = 0; ch < totalChannels; ++ch) + { + if (needSilenced[ch]) + { + std::ranges::fill(buffer(ch), 0.f); + } + } + + if (hasInterleavedBuffer() && (needSilenced[0] || needSilenced[1])) + { + // Keep the temporary interleaved buffer in sync + toInterleaved(groupBuffers(0), interleavedBuffer()); + } + + m_silenceFlags |= channels; +} + +void AudioBuffer::silenceAllChannels() +{ + std::ranges::fill(m_sourceBuffer, 0); + std::ranges::fill(m_interleavedBuffer, 0); + + m_silenceFlags.set(); +} + +auto AudioBuffer::absPeakValue(ch_cnt_t channel) const -> float +{ + if (m_silenceFlags[channel]) + { + // Skip calculation if channel is already known to be silent + return 0; + } + + return std::ranges::max(buffer(channel), {}, static_cast(std::abs)); +} + +} // namespace lmms diff --git a/src/core/AudioBusHandle.cpp b/src/core/AudioBusHandle.cpp index e27a8c8ad4..96f03eae58 100644 --- a/src/core/AudioBusHandle.cpp +++ b/src/core/AudioBusHandle.cpp @@ -23,16 +23,16 @@ * */ +#include "AudioBusHandle.h" + #include -#include "AudioBusHandle.h" #include "AudioDevice.h" #include "AudioEngine.h" #include "EffectChain.h" #include "Mixer.h" #include "Engine.h" #include "MixHelpers.h" -#include "BufferManager.h" namespace lmms { @@ -41,7 +41,7 @@ AudioBusHandle::AudioBusHandle(const QString& name, bool hasEffectChain, FloatModel* volumeModel, FloatModel* panningModel, BoolModel* mutedModel) : m_bufferUsage(false), - m_buffer(BufferManager::acquire()), + m_buffer(Engine::audioEngine()->framesPerPeriod()), m_extOutputEnabled(false), m_nextMixerChannel(0), m_name(name), @@ -50,6 +50,8 @@ AudioBusHandle::AudioBusHandle(const QString& name, bool hasEffectChain, m_panningModel(panningModel), m_mutedModel(mutedModel) { + m_buffer.allocateInterleavedBuffer(); + Engine::audioEngine()->addAudioBusHandle(this); setExtOutputEnabled(true); } @@ -61,7 +63,6 @@ AudioBusHandle::~AudioBusHandle() { setExtOutputEnabled(false); Engine::audioEngine()->removeAudioBusHandle(this); - BufferManager::release(m_buffer); } @@ -99,7 +100,7 @@ bool AudioBusHandle::processEffects() { if (m_effects) { - bool more = m_effects->processAudioBuffer(m_buffer, Engine::audioEngine()->framesPerPeriod(), m_bufferUsage); + bool more = m_effects->processAudioBuffer(m_buffer); return more; } return false; @@ -116,7 +117,7 @@ void AudioBusHandle::doProcessing() const fpp_t fpp = Engine::audioEngine()->framesPerPeriod(); // clear the buffer - zeroSampleFrames(m_buffer, fpp); + m_buffer.silenceAllChannels(); //qDebug( "Playhandles: %d", m_playHandles.size() ); for (PlayHandle* ph : m_playHandles) // now we mix all playhandle buffers into our internal buffer @@ -128,7 +129,9 @@ void AudioBusHandle::doProcessing() || !MixHelpers::isSilent(ph->buffer(), fpp))) { m_bufferUsage = true; - MixHelpers::add(m_buffer, ph->buffer(), fpp); + + // Writing to temporary interleaved buffer until PlayHandle and MixHelpers switch to planar + MixHelpers::add(m_buffer.interleavedBuffer().asSampleFrames().data(), ph->buffer(), fpp); } ph->releaseBuffer(); // gets rid of playhandle's buffer and sets // pointer to null, so if it doesn't get re-acquired we know to skip it next time @@ -137,6 +140,9 @@ void AudioBusHandle::doProcessing() if (m_bufferUsage) { + // PlayHandle buffers were written to the temporary interleaved buffer + auto buffer = m_buffer.interleavedBuffer(); + // handle volume and panning // has both vol and pan models if (m_volumeModel && m_panningModel) @@ -151,8 +157,8 @@ void AudioBusHandle::doProcessing() { float v = volBuf->values()[f] * 0.01f; float p = panBuf->values()[f] * 0.01f; - m_buffer[f][0] *= (p <= 0 ? 1.0f : 1.0f - p) * v; - m_buffer[f][1] *= (p >= 0 ? 1.0f : 1.0f + p) * v; + buffer[f][0] *= (p <= 0 ? 1.0f : 1.0f - p) * v; + buffer[f][1] *= (p >= 0 ? 1.0f : 1.0f + p) * v; } } @@ -165,8 +171,8 @@ void AudioBusHandle::doProcessing() for (f_cnt_t f = 0; f < fpp; ++f) { float v = volBuf->values()[f] * 0.01f; - m_buffer[f][0] *= v * l; - m_buffer[f][1] *= v * r; + buffer[f][0] *= v * l; + buffer[f][1] *= v * r; } } @@ -177,8 +183,8 @@ void AudioBusHandle::doProcessing() for (f_cnt_t f = 0; f < fpp; ++f) { float p = panBuf->values()[f] * 0.01f; - m_buffer[f][0] *= (p <= 0 ? 1.0f : 1.0f - p) * v; - m_buffer[f][1] *= (p >= 0 ? 1.0f : 1.0f + p) * v; + buffer[f][0] *= (p <= 0 ? 1.0f : 1.0f - p) * v; + buffer[f][1] *= (p >= 0 ? 1.0f : 1.0f + p) * v; } } @@ -189,8 +195,8 @@ void AudioBusHandle::doProcessing() float v = m_volumeModel->value() * 0.01f; for (f_cnt_t f = 0; f < fpp; ++f) { - m_buffer[f][0] *= (p <= 0 ? 1.0f : 1.0f - p) * v; - m_buffer[f][1] *= (p >= 0 ? 1.0f : 1.0f + p) * v; + buffer[f][0] *= (p <= 0 ? 1.0f : 1.0f - p) * v; + buffer[f][1] *= (p >= 0 ? 1.0f : 1.0f + p) * v; } } } @@ -205,8 +211,8 @@ void AudioBusHandle::doProcessing() for (f_cnt_t f = 0; f < fpp; ++f) { float v = volBuf->values()[f] * 0.01f; - m_buffer[f][0] *= v; - m_buffer[f][1] *= v; + buffer[f][0] *= v; + buffer[f][1] *= v; } } else @@ -214,11 +220,20 @@ void AudioBusHandle::doProcessing() float v = m_volumeModel->value() * 0.01f; for (f_cnt_t f = 0; f < fpp; ++f) { - m_buffer[f][0] *= v; - m_buffer[f][1] *= v; + buffer[f][0] *= v; + buffer[f][1] *= v; } } } + + // Copy from temporary interleaved buffer to the main planar buffer + // so they stay in sync + toPlanar(buffer, m_buffer.groupBuffers(0)); + + m_buffer.sanitizeAll(); + + // Update silence status of all channels for instrument output + m_buffer.updateAllSilenceFlags(); } // as of now there's no situation where we only have panning model but no volume model // if we have neither, we don't have to do anything here - just pass the audio as is @@ -227,8 +242,8 @@ void AudioBusHandle::doProcessing() const bool anyOutputAfterEffects = processEffects(); if (anyOutputAfterEffects || m_bufferUsage) { - Engine::mixer()->mixToChannel(m_buffer, m_nextMixerChannel); // send output to mixer - // TODO: improve the flow here - convert to pull model + // TODO: improve the flow here - convert to pull model + Engine::mixer()->mixToChannel(m_buffer, m_nextMixerChannel); // send output to mixer m_bufferUsage = false; } } diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt index 85344487b6..036ba0cd67 100644 --- a/src/core/CMakeLists.txt +++ b/src/core/CMakeLists.txt @@ -1,6 +1,7 @@ set(LMMS_SRCS ${LMMS_SRCS} + core/AudioBuffer.cpp core/AudioBusHandle.cpp core/AudioEngine.cpp core/AudioEngineProfiler.cpp diff --git a/src/core/Effect.cpp b/src/core/Effect.cpp index 5ba7707bb1..368a0ad89a 100644 --- a/src/core/Effect.cpp +++ b/src/core/Effect.cpp @@ -23,14 +23,15 @@ * */ +#include "Effect.h" + #include -#include "Effect.h" +#include "AudioBuffer.h" +#include "ConfigManager.h" #include "EffectChain.h" #include "EffectControls.h" #include "EffectView.h" - -#include "ConfigManager.h" #include "SampleFrame.h" namespace lmms @@ -44,7 +45,7 @@ Effect::Effect( const Plugin::Descriptor * _desc, m_parent( nullptr ), m_okay( true ), m_noRun( false ), - m_running( false ), + m_awake(false), m_enabledModel( true, this, tr( "Effect enabled" ) ), m_wetDryModel( 1.0f, -1.0f, 1.0f, 0.01f, this, tr( "Wet/Dry mix" ) ), m_autoQuitModel( 1.0f, 1.0f, 8000.0f, 100.0f, 1.0f, this, tr( "Decay" ) ), @@ -91,29 +92,56 @@ void Effect::loadSettings( const QDomElement & _this ) -bool Effect::processAudioBuffer(SampleFrame* buf, const fpp_t frames) +bool Effect::processAudioBuffer(AudioBuffer& inOut) { - if (!isOkay() || dontRun() || !isEnabled() || !isRunning()) + if (!isAwake()) { + if (!inOut.hasSignal(0b11)) + { + // Sleeping plugins need to zero any track channels their output is routed to in order to + // prevent sudden track channel passthrough behavior when the plugin is put to sleep. + // Otherwise auto-quit could become audibly noticeable, which is not intended. + + inOut.silenceChannels(0b11); + + return false; + } + + wakeUp(); + } + + if (!isProcessingAudio()) + { + // Plugin is awake but not processing audio processBypassedImpl(); return false; } - const auto status = processImpl(buf, frames); + const auto status = processImpl(inOut.interleavedBuffer().asSampleFrames().data(), inOut.frames()); + + // Copy interleaved plugin output to planar + toPlanar(inOut.interleavedBuffer(), inOut.groupBuffers(0)); + + inOut.sanitize(0b11); + + // Update silence status for track channels the processor wrote to + const bool silentOutput = inOut.updateSilenceFlags(0b11); + switch (status) { case ProcessStatus::Continue: break; case ProcessStatus::ContinueIfNotQuiet: - handleAutoQuit({buf, frames}); + handleAutoQuit(silentOutput); break; case ProcessStatus::Sleep: + goToSleep(); return false; default: break; } - return isRunning(); + return isAwake(); } @@ -142,55 +170,29 @@ Effect * Effect::instantiate( const QString& pluginName, -void Effect::handleAutoQuit(std::span output) +void Effect::handleAutoQuit(bool silentOutput) { if (!m_autoQuitEnabled) { return; } - /* - * In the past, the RMS was calculated then compared with a threshold of 10^(-10). - * Now we use a different algorithm to determine whether a buffer is non-quiet, so - * a new threshold is needed for the best compatibility. The following is how it's derived. - * - * Old method: - * RMS = average (L^2 + R^2) across stereo buffer. - * RMS threshold = 10^(-10) - * - * So for a single channel, it would be: - * RMS/2 = average M^2 across single channel buffer. - * RMS/2 threshold = 5^(-11) - * - * The new algorithm for determining whether a buffer is non-silent compares M with the threshold, - * not M^2, so the square root of M^2's threshold should give us the most compatible threshold for - * the new algorithm: - * - * (RMS/2)^0.5 = (5^(-11))^0.5 = 0.0001431 (approx.) - * - * In practice though, the exact value shouldn't really matter so long as it's sufficiently small. - */ - static constexpr auto threshold = 0.0001431f; - // Check whether we need to continue processing input. Restart the // counter if the threshold has been exceeded. - for (const SampleFrame& frame : output) + if (silentOutput) { - const auto abs = frame.abs(); - if (abs.left() >= threshold || abs.right() >= threshold) + // The output buffer is quiet, so check if auto-quit should be activated yet + if (++m_quietBufferCount > timeout()) { - // The output buffer is not quiet - m_quietBufferCount = 0; - return; + // Activate auto-quit + goToSleep(); } } - - // The output buffer is quiet, so check if auto-quit should be activated yet - if (++m_quietBufferCount > timeout()) + else { - // Activate auto-quit - stopRunning(); + // The output buffer is not quiet + m_quietBufferCount = 0; } } diff --git a/src/core/EffectChain.cpp b/src/core/EffectChain.cpp index 0a6a496f35..264b61781f 100644 --- a/src/core/EffectChain.cpp +++ b/src/core/EffectChain.cpp @@ -23,11 +23,12 @@ * */ +#include "EffectChain.h" #include #include -#include "EffectChain.h" +#include "AudioBuffer.h" #include "Effect.h" #include "DummyEffect.h" #include "MixHelpers.h" @@ -184,23 +185,19 @@ void EffectChain::moveUp( Effect * _effect ) -bool EffectChain::processAudioBuffer( SampleFrame* _buf, const fpp_t _frames, bool hasInputNoise ) +bool EffectChain::processAudioBuffer(AudioBuffer& buffer) { if( m_enabledModel.value() == false ) { return false; } - MixHelpers::sanitize( _buf, _frames ); + buffer.sanitizeAll(); bool moreEffects = false; - for (const auto& effect : m_effects) + for (Effect* effect : m_effects) { - if (hasInputNoise || effect->isRunning()) - { - moreEffects |= effect->processAudioBuffer(_buf, _frames); - MixHelpers::sanitize(_buf, _frames); - } + moreEffects |= effect->processAudioBuffer(buffer); } return moreEffects; @@ -209,22 +206,6 @@ bool EffectChain::processAudioBuffer( SampleFrame* _buf, const fpp_t _frames, bo -void EffectChain::startRunning() -{ - if( m_enabledModel.value() == false ) - { - return; - } - - for (const auto& effect : m_effects) - { - effect->startRunning(); - } -} - - - - void EffectChain::clear() { emit aboutToClear(); diff --git a/src/core/MixHelpers.cpp b/src/core/MixHelpers.cpp index 22fb43fd9d..f4f1d9cbbe 100644 --- a/src/core/MixHelpers.cpp +++ b/src/core/MixHelpers.cpp @@ -25,9 +25,10 @@ #include "MixHelpers.h" #ifdef LMMS_DEBUG -#include +#include #endif +#include #include #include "ValueBuffer.h" @@ -41,9 +42,13 @@ static bool s_NaNHandler; namespace lmms::MixHelpers { +namespace { + +constexpr auto SilenceThreshold = 0.000001f; // -120 dBFS + /*! \brief Function for applying MIXOP on all sample frames */ template -static inline void run( SampleFrame* dst, const SampleFrame* src, int frames, const MIXOP& OP ) +inline void run(SampleFrame* dst, const SampleFrame* src, int frames, const MIXOP& OP) { for( int i = 0; i < frames; ++i ) { @@ -53,7 +58,7 @@ static inline void run( SampleFrame* dst, const SampleFrame* src, int frames, co /*! \brief Function for applying MIXOP on all sample frames - split source */ template -static inline void run( SampleFrame* dst, const sample_t* srcLeft, const sample_t* srcRight, int frames, const MIXOP& OP ) +inline void run(SampleFrame* dst, const sample_t* srcLeft, const sample_t* srcRight, int frames, const MIXOP& OP) { for( int i = 0; i < frames; ++i ) { @@ -62,15 +67,13 @@ static inline void run( SampleFrame* dst, const sample_t* srcLeft, const sample_ } } - +} // namespace bool isSilent( const SampleFrame* src, int frames ) { - const float silenceThreshold = 0.0000001f; - for( int i = 0; i < frames; ++i ) { - if (std::abs(src[i][0]) >= silenceThreshold || std::abs(src[i][1]) >= silenceThreshold) + if (std::abs(src[i][0]) >= SilenceThreshold || std::abs(src[i][1]) >= SilenceThreshold) { return false; } @@ -79,6 +82,11 @@ bool isSilent( const SampleFrame* src, int frames ) return true; } +bool isSilent(std::span buffer) +{ + return std::ranges::all_of(buffer, [&](const sample_t s) { return std::abs(s) < SilenceThreshold; }); +} + bool useNaNHandler() { return s_NaNHandler; @@ -89,41 +97,33 @@ void setNaNHandler( bool use ) s_NaNHandler = use; } -/*! \brief Function for sanitizing a buffer of infs/nans - returns true if those are found */ -bool sanitize( SampleFrame* src, int frames ) +bool sanitize(std::span buffer) { - if( !useNaNHandler() ) - { - return false; - } + if (!useNaNHandler()) { return false; } - for (int f = 0; f < frames; ++f) + for (std::size_t f = 0; f < buffer.size(); ++f) { - auto& currentFrame = src[f]; - - if (currentFrame.containsInf() || currentFrame.containsNaN()) + sample_t& sample = buffer[f]; + if (std::isinf(sample) || std::isnan(sample)) { - #ifdef LMMS_DEBUG - // TODO don't use printf here - printf("Bad data, clearing buffer. frame: "); - printf("%d: value %f, %f\n", f, currentFrame.left(), currentFrame.right()); - #endif +#ifdef LMMS_DEBUG + std::cerr << "Bad data, clearing buffer. frame: " + << f << ", value: " << sample << "\n"; +#endif - // Clear the whole buffer if a problem is found - zeroSampleFrames(src, frames); + // Clear the channel if a problem is found + std::ranges::fill(buffer, 0.f); return true; } else { - currentFrame.clamp(sample_t(-1000.0), sample_t(1000.0)); + sample = std::clamp(sample, sample_t(-1000.0), sample_t(1000.0)); } - }; - + } return false; } - struct AddOp { void operator()( SampleFrame& dst, const SampleFrame& src ) const @@ -138,6 +138,24 @@ void add( SampleFrame* dst, const SampleFrame* src, int frames ) } +void add(PlanarBufferView dst, PlanarBufferView src) +{ + assert(dst.channels() == src.channels()); + assert(dst.frames() == src.frames()); + + const auto channels = dst.channels(); + const auto frames = dst.frames(); + for (ch_cnt_t channel = 0; channel < channels; ++channel) + { + auto* dstPtr = dst.bufferPtr(channel); + const auto* srcPtr = src.bufferPtr(channel); + for (f_cnt_t frame = 0; frame < frames; ++frame) + { + dstPtr[frame] += srcPtr[frame]; + } + } +} + struct AddMultipliedOp { diff --git a/src/core/Mixer.cpp b/src/core/Mixer.cpp index 1569873361..27c4f60289 100644 --- a/src/core/Mixer.cpp +++ b/src/core/Mixer.cpp @@ -59,11 +59,10 @@ void MixerRoute::updateName() MixerChannel::MixerChannel( int idx, Model * _parent ) : m_fxChain( nullptr ), - m_hasInput( false ), m_stillRunning( false ), m_peakLeft( 0.0f ), m_peakRight( 0.0f ), - m_buffer( new SampleFrame[Engine::audioEngine()->framesPerPeriod()] ), + m_buffer(Engine::audioEngine()->framesPerPeriod()), m_muteModel( false, _parent ), m_soloModel( false, _parent ), m_volumeModel(1.f, 0.f, 2.f, 0.001f, _parent), @@ -73,7 +72,7 @@ MixerChannel::MixerChannel( int idx, Model * _parent ) : m_dependenciesMet(0), m_channelIndex(idx) { - zeroSampleFrames(m_buffer, Engine::audioEngine()->framesPerPeriod()); + m_buffer.allocateInterleavedBuffer(); } @@ -81,7 +80,6 @@ MixerChannel::MixerChannel( int idx, Model * _parent ) : MixerChannel::~MixerChannel() { - delete[] m_buffer; } @@ -173,51 +171,48 @@ void MixerChannel::doProcessing() FloatModel * sendModel = senderRoute->amount(); if( ! sendModel ) qFatal( "Error: no send model found from %d to %d", senderRoute->senderIndex(), m_channelIndex ); - if( sender->m_hasInput || sender->m_stillRunning ) + if (sender->m_buffer.hasAnySignal() || sender->m_stillRunning) { + auto buffer = m_buffer.interleavedBuffer().asSampleFrames(); + // figure out if we're getting sample-exact input ValueBuffer * sendBuf = sendModel->valueBuffer(); ValueBuffer * volBuf = sender->m_volumeModel.valueBuffer(); // mix it's output with this one's output - SampleFrame* ch_buf = sender->m_buffer; + auto ch_buf = sender->m_buffer.interleavedBuffer().asSampleFrames(); // use sample-exact mixing if sample-exact values are available if( ! volBuf && ! sendBuf ) // neither volume nor send has sample-exact data... { const float v = sender->m_volumeModel.value() * sendModel->value(); - MixHelpers::addSanitizedMultiplied( m_buffer, ch_buf, v, fpp ); + MixHelpers::addSanitizedMultiplied(buffer.data(), ch_buf.data(), v, fpp); } else if( volBuf && sendBuf ) // both volume and send have sample-exact data { - MixHelpers::addSanitizedMultipliedByBuffers( m_buffer, ch_buf, volBuf, sendBuf, fpp ); + MixHelpers::addSanitizedMultipliedByBuffers(buffer.data(), ch_buf.data(), volBuf, sendBuf, fpp); } else if( volBuf ) // volume has sample-exact data but send does not { const float v = sendModel->value(); - MixHelpers::addSanitizedMultipliedByBuffer( m_buffer, ch_buf, v, volBuf, fpp ); + MixHelpers::addSanitizedMultipliedByBuffer(buffer.data(), ch_buf.data(), v, volBuf, fpp); } else // vice versa { const float v = sender->m_volumeModel.value(); - MixHelpers::addSanitizedMultipliedByBuffer( m_buffer, ch_buf, v, sendBuf, fpp ); + MixHelpers::addSanitizedMultipliedByBuffer(buffer.data(), ch_buf.data(), v, sendBuf, fpp); } - m_hasInput = true; + toPlanar(m_buffer.interleavedBuffer(), m_buffer.groupBuffers(0)); + m_buffer.mixSilenceFlags(sender->m_buffer); } } const float v = m_volumeModel.value(); - if( m_hasInput ) - { - // only start fxchain when we have input... - m_fxChain.startRunning(); - } + m_stillRunning = m_fxChain.processAudioBuffer(m_buffer); - m_stillRunning = m_fxChain.processAudioBuffer( m_buffer, fpp, m_hasInput ); - - SampleFrame peakSamples = getAbsPeakValues(m_buffer, fpp); + const auto peakSamples = SampleFrame{m_buffer.absPeakValue(0), m_buffer.absPeakValue(1)}; m_peakLeft = std::max(m_peakLeft, peakSamples[0] * v); m_peakRight = std::max(m_peakRight, peakSamples[1] * v); } @@ -642,14 +637,18 @@ FloatModel * Mixer::channelSendModel( mix_ch_t fromChannel, mix_ch_t toChannel ) -void Mixer::mixToChannel( const SampleFrame* _buf, mix_ch_t _ch ) +void Mixer::mixToChannel(const AudioBuffer& buffer, mix_ch_t dest) { - const auto channel = m_mixerChannels[_ch]; + const auto channel = m_mixerChannels[dest]; if (!channel->m_muteModel.value()) { channel->m_lock.lock(); - MixHelpers::add(channel->m_buffer, _buf, Engine::audioEngine()->framesPerPeriod()); - channel->m_hasInput = true; + MixHelpers::add(channel->m_buffer.groupBuffers(0), buffer.groupBuffers(0)); + + // Copy the planar buffer to the temporary interleaved buffer so they stay in sync + toInterleaved(channel->m_buffer.groupBuffers(0), channel->m_buffer.interleavedBuffer()); + + channel->m_buffer.mixSilenceFlags(buffer); channel->m_lock.unlock(); } } @@ -659,7 +658,7 @@ void Mixer::mixToChannel( const SampleFrame* _buf, mix_ch_t _ch ) void Mixer::prepareMasterMix() { - zeroSampleFrames(m_mixerChannels[0]->m_buffer, Engine::audioEngine()->framesPerPeriod()); + m_mixerChannels[0]->m_buffer.silenceAllChannels(); } @@ -710,6 +709,8 @@ void Mixer::masterMix( SampleFrame* _buf ) AudioEngineWorkerThread::startAndWaitForJobs(); } + auto buffer = m_mixerChannels[0]->m_buffer.interleavedBuffer().asSampleFrames(); + // handle sample-exact data in master volume fader ValueBuffer * volBuf = m_mixerChannels[0]->m_volumeModel.valueBuffer(); @@ -717,25 +718,23 @@ void Mixer::masterMix( SampleFrame* _buf ) { for( int f = 0; f < fpp; f++ ) { - m_mixerChannels[0]->m_buffer[f][0] *= volBuf->values()[f]; - m_mixerChannels[0]->m_buffer[f][1] *= volBuf->values()[f]; + buffer[f][0] *= volBuf->values()[f]; + buffer[f][1] *= volBuf->values()[f]; } } const float v = volBuf ? 1.0f : m_mixerChannels[0]->m_volumeModel.value(); - MixHelpers::addSanitizedMultiplied( _buf, m_mixerChannels[0]->m_buffer, v, fpp ); + MixHelpers::addSanitizedMultiplied(_buf, buffer.data(), v, fpp); // clear all channel buffers and // reset channel process state for( int i = 0; i < numChannels(); ++i) { - zeroSampleFrames(m_mixerChannels[i]->m_buffer, Engine::audioEngine()->framesPerPeriod()); + m_mixerChannels[i]->m_buffer.silenceAllChannels(); m_mixerChannels[i]->reset(); m_mixerChannels[i]->m_queued = false; - // also reset hasInput - m_mixerChannels[i]->m_hasInput = false; m_mixerChannels[i]->m_dependenciesMet = 0; } } diff --git a/src/tracks/InstrumentTrack.cpp b/src/tracks/InstrumentTrack.cpp index 18ad5c9584..0d0f64c702 100644 --- a/src/tracks/InstrumentTrack.cpp +++ b/src/tracks/InstrumentTrack.cpp @@ -249,10 +249,6 @@ void InstrumentTrack::processAudioBuffer( SampleFrame* buf, const fpp_t frames, m_silentBuffersProcessed = false; } - // if effects "went to sleep" because there was no input, wake them up - // now - m_audioBusHandle.effects()->startRunning(); - // get volume knob data static const float DefaultVolumeRatio = 1.0f / DefaultVolume; /*ValueBuffer * volBuf = m_volumeModel.valueBuffer(); diff --git a/src/tracks/SampleTrack.cpp b/src/tracks/SampleTrack.cpp index fe83b99e24..de548e514f 100644 --- a/src/tracks/SampleTrack.cpp +++ b/src/tracks/SampleTrack.cpp @@ -73,7 +73,6 @@ SampleTrack::~SampleTrack() bool SampleTrack::play( const TimePos & _start, const fpp_t _frames, const f_cnt_t _offset, int _clip_num ) { - m_audioBusHandle.effects()->startRunning(); bool played_a_note = false; // will be return variable diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 6e4df88549..b70d9fde9e 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -5,6 +5,7 @@ set(CMAKE_AUTOMOC ON) set(LMMS_TESTS src/core/ArrayVectorTest.cpp + src/core/AudioBufferTest.cpp src/core/AutomatableModelTest.cpp src/core/MathTest.cpp src/core/ProjectVersionTest.cpp @@ -30,4 +31,5 @@ foreach(LMMS_TEST_SRC IN LISTS LMMS_TESTS) ) target_compile_features(${LMMS_TEST_NAME} PRIVATE cxx_std_20) + target_compile_definitions(${LMMS_TEST_NAME} PRIVATE LMMS_TESTING) endforeach() diff --git a/tests/src/core/AudioBufferTest.cpp b/tests/src/core/AudioBufferTest.cpp new file mode 100644 index 0000000000..318c4edabf --- /dev/null +++ b/tests/src/core/AudioBufferTest.cpp @@ -0,0 +1,757 @@ +/* + * AudioBufferTest.cpp + * + * Copyright (c) 2026 Dalton Messmer + * + * This file is part of LMMS - https://lmms.io + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program (see COPYING); if not, write to the + * Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301 USA. + * + */ + +#include "AudioBuffer.h" + +#include +#include + +#include "MixHelpers.h" +#include "SharedMemory.h" + +using lmms::AudioBuffer; + +class AudioBufferTest : public QObject +{ + Q_OBJECT + +private slots: + //! Verifies constructor with default channels adds single stereo group + void Constructor_DefaultChannels() + { + auto ab = AudioBuffer{10}; + QCOMPARE(ab.groupCount(), 1); + QCOMPARE(ab.group(0).channels(), 2); + QCOMPARE(ab.totalChannels(), 2); + QCOMPARE(ab.frames(), 10); + QCOMPARE(ab.hasInterleavedBuffer(), false); + } + + //! Verifies constructor with no channels does not create a first group + void Constructor_NoChannels() + { + auto ab = AudioBuffer{10, 0}; + QCOMPARE(ab.groupCount(), 0); + QCOMPARE(ab.totalChannels(), 0); + QCOMPARE(ab.frames(), 10); + QCOMPARE(ab.hasInterleavedBuffer(), false); + } + + //! Verifies constructor with `SharedMemoryResource` allocates correct number of bytes + void Constructor_SharedMemoryResource() + { + lmms::SharedMemory sm; + sm.create(AudioBuffer::allocationSize(7, 3)); + QCOMPARE(sm.resource()->availableBytes(), AudioBuffer::allocationSize(7, 3)); + + auto ab = AudioBuffer{7, 3, sm.resource()}; + QCOMPARE(ab.groupCount(), 1); + QCOMPARE(ab.totalChannels(), 3); + QCOMPARE(ab.frames(), 7); + QCOMPARE(ab.hasInterleavedBuffer(), false); + + // All the bytes in the shared memory should have been used by AudioBuffer + QCOMPARE(sm.resource()->availableBytes(), 0); + } + + //! Verifies that the `allocateInterleavedBuffer` method allocates the interleaved buffer + void AllocateInterleavedBuffer() + { + auto ab = AudioBuffer{10, 0}; + QCOMPARE(ab.hasInterleavedBuffer(), false); + + ab.allocateInterleavedBuffer(); + QCOMPARE(ab.hasInterleavedBuffer(), true); + QVERIFY(ab.interleavedBuffer().data() != nullptr); + QCOMPARE(ab.interleavedBuffer().frames(), 10); + QCOMPARE(ab.interleavedBuffer().channels(), 2); + } + + + //! Verifies that the `addGroup` method can add the first group correctly + void AddGroup_FirstGroup() + { + // Begin with zero groups + auto ab = AudioBuffer{10, 0}; + + // Add a first group with 5 channels + auto group = ab.addGroup(5); + QVERIFY(group != nullptr); + QCOMPARE(&ab.group(0), group); + QCOMPARE(group->channels(), 5); + QCOMPARE(ab.groupCount(), 1); + QCOMPARE(ab.totalChannels(), 5); + } + + //! Verifies that a 2nd group can be appended after the 1st group + void AddGroup_SecondGroup() + { + // Begin with 1 group + auto ab = AudioBuffer{10, 3}; + + // Add a 2nd group with 4 channels + auto group = ab.addGroup(4); + QVERIFY(group != nullptr); + QCOMPARE(&ab.group(1), group); + QCOMPARE(group->channels(), 4); + QCOMPARE(ab.groupCount(), 2); + QCOMPARE(ab.totalChannels(), 7); + } + + //! Verifies that a group with 0 channels cannot be added and doing so has no effect + void AddGroup_ZeroChannelsFails() + { + auto ab = AudioBuffer{10}; + QCOMPARE(ab.groupCount(), 1); + QCOMPARE(ab.totalChannels(), 2); + + auto group = ab.addGroup(0); + QCOMPARE(group, nullptr); + + // Nothing should have changed + QCOMPARE(ab.groupCount(), 1); + QCOMPARE(ab.totalChannels(), 2); + } + + //! Verifies that groups cannot be added past the maximum group count + void AddGroup_MaximumGroups() + { + auto ab = AudioBuffer{10, 0}; + + // Add groups until no more can be added + auto groupsLeft = static_cast(lmms::MaxGroupsPerAudioBuffer); + QVERIFY(groupsLeft >= 0); + while (groupsLeft > 0) + { + auto temp = ab.addGroup(1); + QVERIFY(temp != nullptr); + --groupsLeft; + } + QCOMPARE(groupsLeft, 0); + QCOMPARE(ab.groupCount(), lmms::MaxGroupsPerAudioBuffer); + QCOMPARE(ab.totalChannels(), lmms::MaxGroupsPerAudioBuffer); + + // Next group should fail + auto group = ab.addGroup(1); + QCOMPARE(group, nullptr); + QCOMPARE(ab.groupCount(), lmms::MaxGroupsPerAudioBuffer); + QCOMPARE(ab.totalChannels(), lmms::MaxGroupsPerAudioBuffer); + } + + //! Verifies that groups cannot be added past the maximum total channel count for the track + void AddGroup_MaximumTotalChannels() + { + auto ab = AudioBuffer{10, lmms::MaxChannelsPerAudioBuffer - 1}; + + // Try adding a group with enough channels + // to push the total channels past the maximum for the track (should fail) + auto group = ab.addGroup(2); + QCOMPARE(group, nullptr); + QCOMPARE(ab.totalChannels(), lmms::MaxChannelsPerAudioBuffer - 1); + + // Ok, how about just enough to hit the maximum + // total channels for the track (should succeed) + group = ab.addGroup(1); + QVERIFY(group != nullptr); + QCOMPARE(ab.totalChannels(), lmms::MaxChannelsPerAudioBuffer); + } + + //! Verifies that `addGroup` with a `SharedMemoryResource` allocates the amount of bytes + //! specified by the `allocationSize` method + void AddGroup_SharedMemoryResource() + { + // Create enough shared memory for 3 channels with 7 frames each + lmms::SharedMemory sm; + sm.create(AudioBuffer::allocationSize(7, 3)); + QCOMPARE(sm.resource()->availableBytes(), AudioBuffer::allocationSize(7, 3)); + + // Create AudioBuffer using the shared memory + auto ab = AudioBuffer{7, 3, sm.resource()}; + QCOMPARE(sm.resource()->availableBytes(), 0); + + // Reallocate the shared memory in preparation for adding 10 more channels + sm.create(AudioBuffer::allocationSize(7, 13)); + QCOMPARE(sm.resource()->availableBytes(), AudioBuffer::allocationSize(7, 13)); + + // Now add the 10 additional channels + auto group = ab.addGroup(10); + QVERIFY(group != nullptr); + QCOMPARE(ab.totalChannels(), 13); + + QCOMPARE(sm.resource()->availableBytes(), 0); + } + + //! Verifies that groups can be specified using `setGroups` + void SetGroups() + { + // Start with 6 channels, all in one group + auto ab = AudioBuffer{10, 6}; + float* const* allBuffers = ab.allBuffers().data(); + + QCOMPARE(ab.groupCount(), 1); + QCOMPARE(ab.group(0).channels(), 6); + + // Split into group of 2 channels and group of 4 channels + ab.setGroups(2, [](lmms::group_cnt_t idx, lmms::AudioBuffer::ChannelGroup&) { + switch (idx) + { + case 0: return 2; // 1st group has 2 channels + case 1: return 4; // 2nd group has 4 channels + default: return 0; + } + }); + + QCOMPARE(ab.groupCount(), 2); + QCOMPARE(ab.group(0).channels(), 2); + QCOMPARE(ab.group(1).channels(), 4); + + // Check that no reallocation occurred + QCOMPARE(ab.allBuffers().data(), allBuffers); + } + + //! Verifies that an `AudioBuffer` object created using shared memory can be + //! exactly recreated, with the buffers shared between the two objects. This is an important + //! ability to allow using `AudioBuffer` on both the client and server sides of `RemotePlugin` + //! with shared memory as the backing array. + void TwoAudioBuffersWithSameSharedMemory() + { + // Use enough shared memory for 5 channels with 7 frames each + interleaved buffer + const auto allocationSize = AudioBuffer::allocationSize(7, 5, true); + + // Split the 5 channels into 2 groups + auto groupVisitor = [](lmms::ch_cnt_t idx, AudioBuffer::ChannelGroup&) { + switch (idx) + { + case 0: return 2; // 1st group has 2 channels + case 1: return 3; // 2nd group has 3 channels + default: return 0; + } + }; + + // Create server-side SharedMemory + lmms::SharedMemory smServer; + smServer.create(allocationSize); + QCOMPARE(smServer.resource()->availableBytes(), allocationSize); + + // Create server-side AudioBuffer + auto abServer = AudioBuffer{7, 5, 2, smServer.resource(), groupVisitor}; + abServer.allocateInterleavedBuffer(); + QCOMPARE(smServer.resource()->availableBytes(), 0); + QCOMPARE(abServer.groupCount(), 2); + QCOMPARE(abServer.totalChannels(), 5); + QCOMPARE(abServer.frames(), 7); + QCOMPARE(abServer.hasInterleavedBuffer(), true); + + // Connect to the server-side's SharedMemory + lmms::SharedMemory smClient; + smClient.attach(smServer.key()); + QCOMPARE(smClient.resource()->availableBytes(), allocationSize); + + // Create client-side AudioBuffer + auto abClient = AudioBuffer{7, 5, 2, smClient.resource(), groupVisitor}; + abClient.allocateInterleavedBuffer(); + QCOMPARE(smClient.resource()->availableBytes(), 0); + QCOMPARE(abClient.groupCount(), 2); + QCOMPARE(abClient.totalChannels(), 5); + QCOMPARE(abClient.frames(), 7); + QCOMPARE(abClient.hasInterleavedBuffer(), true); + + // Can write data on the server side and read it from the client side + abServer.buffer(1)[3] = 123.f; // 2nd channel, 4th frame + QCOMPARE(abClient.buffer(1)[3], 123.f); + + // Can write data on the client side and read it from the server side + abClient.group(1).buffer(2)[5] = 456.f; // 3rd channel of 2nd group, 6th frame + QCOMPARE(abServer.group(1).buffer(2)[5], 456.f); + } + + //! Verifies all silence flag bits are set when there are no channels + void SilenceFlags_AllSilentWhenNoChannels() + { + auto ab = AudioBuffer{10, 0}; + QCOMPARE(ab.silenceFlags().all(), true); + } + + //! Verifies all silence flags bits are set even after adding new groups/channels + void SilenceFlags_AllSilentWhenNewGroupsAdded() + { + auto ab = AudioBuffer{10}; + QCOMPARE(ab.silenceFlags().all(), true); + + ab.addGroup(4); + QCOMPARE(ab.silenceFlags().all(), true); + } + + //! Verifies that `assumeNonSilent` clears a specific bit in the silence flags + void AssumeNonSilent() + { + auto ab = AudioBuffer{10, 2}; + QCOMPARE(ab.silenceFlags().all(), true); + + // Assume 2nd channel is non-silent + ab.assumeNonSilent(1); + + QCOMPARE(ab.silenceFlags().all(), false); + QCOMPARE(ab.silenceFlags()[0], true); + QCOMPARE(ab.silenceFlags()[1], false); + } + + //! Verifies `enableSilenceTracking` enables and disables silence tracking + void EnableSilenceTracking_GetterSetter() + { + auto ab = AudioBuffer{10}; + ab.enableSilenceTracking(true); + QCOMPARE(ab.silenceTrackingEnabled(), true); + + ab.enableSilenceTracking(false); + QCOMPARE(ab.silenceTrackingEnabled(), false); + } + + //! Verifies that `enableSilenceTracking(true)` also updates silence flags + void EnableSilenceTracking_UpdatesSilenceFlags() + { + auto ab = AudioBuffer{10, 2}; + ab.enableSilenceTracking(false); + + // Assume 2nd channel is non-silent + ab.assumeNonSilent(1); + + QCOMPARE(ab.silenceFlags().all(), false); + QCOMPARE(ab.silenceFlags()[0], true); + QCOMPARE(ab.silenceFlags()[1], false); + + ab.enableSilenceTracking(true); + + // Silence flags should be updated + QCOMPARE(ab.silenceFlags().all(), true); + QCOMPARE(ab.silenceFlags()[0], true); + QCOMPARE(ab.silenceFlags()[1], true); + } + + //! Verifies that the `updateSilenceFlags` method does nothing to silence flags + //! when all channels are already silent, regardless of which channels are selected + //! for an update. + void UpdateSilenceFlags_DoesNothingWhenSilent() + { + auto ab = AudioBuffer{10}; + ab.enableSilenceTracking(true); + + QCOMPARE(ab.silenceFlags().all(), true); + + // Right channel only + QCOMPARE(ab.updateSilenceFlags(0b01), true); + QCOMPARE(ab.silenceFlags().all(), true); + + // Left channel only + QCOMPARE(ab.updateSilenceFlags(0b10), true); + QCOMPARE(ab.silenceFlags().all(), true); + + // Both channels + QCOMPARE(ab.updateSilenceFlags(0b11), true); + QCOMPARE(ab.silenceFlags().all(), true); + } + + //! Verifies that the `updateSilenceFlags` method updates a single non-silent channel, + //! but only when that channel is selected for an update. + void UpdateSilenceFlags_UpdatesChannelWhenSelected() + { + auto ab = AudioBuffer{10}; + ab.enableSilenceTracking(true); + + // Both channels should be silent + QCOMPARE(ab.silenceFlags().all(), true); + + // Introduce noise to a frame in the right channel + ab.group(0).buffer(1)[5] = 1.f; + + // Update the left channel - returns true because the updated channel is silent + QCOMPARE(ab.updateSilenceFlags(0b01), true); + + // Silence flags remain the same since the non-silent channel was not updated + QCOMPARE(ab.silenceFlags().all(), true); + + // Now update the right channel - returns false since the updated channel is not silent + QCOMPARE(ab.updateSilenceFlags(0b10), false); + + // The silence flag for the right channel should now be cleared + QCOMPARE(ab.silenceFlags()[0], true); // left channel + QCOMPARE(ab.silenceFlags()[1], false); // right channel + QCOMPARE(ab.silenceFlags()[2], true); // unused 3rd channel + // eab. + + // Updating both channels returns false since one of them is non-silent + QCOMPARE(ab.updateSilenceFlags(0b11), false); + } + + //! Verifies that the `updateSilenceFlags` method works across channel groups + void UpdateSilenceFlags_WorksWithGroups() + { + auto ab = AudioBuffer{10, 0}; + ab.enableSilenceTracking(true); + + ab.addGroup(3); + ab.addGroup(1); + + // All channels should be silent + QCOMPARE(ab.silenceFlags().all(), true); + + // Introduce noise to a frame in the 2nd channel of the 1st group + ab.group(0).buffer(1)[5] = 1.f; + + // Introduce noise to a frame in the 1st channel of the 2nd group + ab.group(1).buffer(0)[5] = 1.f; + + // Update the two silent channels - returns true because both are silent + QCOMPARE(ab.updateSilenceFlags(0b0101), true); + + // Silence flags remain the same since the non-silent channels were not updated + QCOMPARE(ab.silenceFlags().all(), true); + + // Now update the 3rd channel of the 1st group and the 1st channel of the 2nd group + // Returns false since one of the updated channels is not silent + QCOMPARE(ab.updateSilenceFlags(0b1100), false); + + // The silence flag for the 1st channel of the 2nd group should now be cleared, + // but the 2nd channel of the 1st group should still be marked silent since + // it has not been updated yet. + QCOMPARE(ab.silenceFlags()[0], true); // group 1, channel 1 + QCOMPARE(ab.silenceFlags()[1], true); // group 1, channel 2 + QCOMPARE(ab.silenceFlags()[2], true); // group 1, channel 3 + QCOMPARE(ab.silenceFlags()[3], false); // group 2, channel 1 + QCOMPARE(ab.silenceFlags()[4], true); // unused 5th channel + // eab. + + // Now update group 1, channel 2 + QCOMPARE(ab.updateSilenceFlags(0b0010), false); + + QCOMPARE(ab.silenceFlags()[0], true); // group 1, channel 1 + QCOMPARE(ab.silenceFlags()[1], false); // group 1, channel 2 + QCOMPARE(ab.silenceFlags()[2], true); // group 1, channel 3 + QCOMPARE(ab.silenceFlags()[3], false); // group 2, channel 1 + QCOMPARE(ab.silenceFlags()[4], true); // unused 5th channel + // eab. + } + + //! Verifies that the `updateSilenceFlags` method updates a silent channel's flags + //! from non-silent to silent when selected for update. + void UpdateSilenceFlags_UpdatesFromNonSilenceToSilence() + { + auto ab = AudioBuffer{10, 2}; + ab.enableSilenceTracking(true); + + QCOMPARE(ab.silenceFlags().all(), true); + + // Assume 2nd channel is non-silent + ab.assumeNonSilent(1); + QCOMPARE(ab.silenceFlags()[0], true); + QCOMPARE(ab.silenceFlags()[1], false); + + // Update 1st channel - does nothing + QCOMPARE(ab.updateSilenceFlags(0b01), true); + QCOMPARE(ab.silenceFlags()[0], true); + QCOMPARE(ab.silenceFlags()[1], false); + QCOMPARE(ab.silenceFlags()[2], true); // unused 3rd channel + + // Update 2nd channel (non-silent to silent) + // Returns true because the channel's audio data is silent + QCOMPARE(ab.updateSilenceFlags(0b10), true); + QCOMPARE(ab.silenceFlags()[0], true); + QCOMPARE(ab.silenceFlags()[1], true); + QCOMPARE(ab.silenceFlags()[2], true); // unused 3rd channel + + // Again, assume 2nd channel is non-silent + ab.assumeNonSilent(1); + QCOMPARE(ab.silenceFlags()[0], true); + QCOMPARE(ab.silenceFlags()[1], false); + QCOMPARE(ab.silenceFlags()[2], true); // unused 3rd channel + + // Update both channels + // Returns true because both channels' audio data is silent + QCOMPARE(ab.updateSilenceFlags(0b11), true); + QCOMPARE(ab.silenceFlags()[0], true); + QCOMPARE(ab.silenceFlags()[1], true); + QCOMPARE(ab.silenceFlags()[2], true); // unused 3rd channel + } + + //! Verifies that `updateSilenceFlags` marks selected channels as non-silent when + //! silence tracking is disabled. + void UpdateSilenceFlags_NonSilentWhenSilenceTrackingDisabled() + { + auto ab = AudioBuffer{10, 2}; + ab.enableSilenceTracking(false); + + QCOMPARE(ab.silenceFlags().all(), true); + + // Now update the 2nd channel. The audio data is actually silent, but silence tracking + // is disabled so it must assume the updated channel is non-silent just to be safe. + QCOMPARE(ab.updateSilenceFlags(0b10), false); + QCOMPARE(ab.silenceFlags()[0], true); + QCOMPARE(ab.silenceFlags()[1], false); + QCOMPARE(ab.silenceFlags()[2], true); // unused 3rd channel + + // Again with both channels + QCOMPARE(ab.updateSilenceFlags(0b11), false); + QCOMPARE(ab.silenceFlags()[0], false); + QCOMPARE(ab.silenceFlags()[1], false); + QCOMPARE(ab.silenceFlags()[2], true); // unused 3rd channel + } + + //! Verifies that when no selected channels are passed to `updateSilenceFlags`, + //! it returns true indicating that all selected channels are silent. + void UpdateSilenceFlags_NoSelectionIsSilent() + { + auto ab = AudioBuffer{10, 2}; + + // First, with silence tracking + ab.enableSilenceTracking(true); + QCOMPARE(ab.updateSilenceFlags(0), true); + + // Should produce the same result without silence tracking + ab.enableSilenceTracking(false); + QCOMPARE(ab.updateSilenceFlags(0), true); + } + + //! Verifies that `updateAllSilenceFlags` updates all silence flags + //! when silence tracking is enabled. + void UpdateAllSilenceFlags_SilenceTrackingEnabled() + { + auto ab = AudioBuffer{10, 2}; + ab.addGroup(2); + ab.enableSilenceTracking(true); + + QCOMPARE(ab.updateAllSilenceFlags(), true); + QCOMPARE(ab.silenceFlags().all(), true); + + // Introduce noise to a frame in the 1st channel of the 1st group + ab.group(0).buffer(0)[5] = 1.f; + + // Introduce noise to a frame in the 2nd channel of the 2nd group + ab.group(1).buffer(1)[5] = 1.f; + + // Those 2 channels should be marked silent after updating all channels + QCOMPARE(ab.updateAllSilenceFlags(), false); + QCOMPARE(ab.silenceFlags()[0], false); // channel 1 + QCOMPARE(ab.silenceFlags()[1], true); // channel 2 + QCOMPARE(ab.silenceFlags()[2], true); // channel 3 + QCOMPARE(ab.silenceFlags()[3], false); // channel 4 + QCOMPARE(ab.silenceFlags()[4], true); // unused 5th channel + + // Silence the frame in the 2nd channel of the 2nd group + ab.group(1).buffer(1)[5] = 0.f; + + // Now only the 1st channel should be marked silent after updating all channels + QCOMPARE(ab.updateAllSilenceFlags(), false); + QCOMPARE(ab.silenceFlags()[0], false); // channel 1 + QCOMPARE(ab.silenceFlags()[1], true); // channel 2 + QCOMPARE(ab.silenceFlags()[2], true); // channel 3 + QCOMPARE(ab.silenceFlags()[3], true); // channel 4 + QCOMPARE(ab.silenceFlags()[4], true); // unused 5th channel + } + + //! Verifies that `updateAllSilenceFlags` marks all silence flags + //! for used channels as non-silent when silence tracking is disabled. + void UpdateAllSilenceFlags_SilenceTrackingDisabled() + { + auto ab = AudioBuffer{10, 2}; + ab.addGroup(2); + ab.enableSilenceTracking(false); + + QCOMPARE(ab.updateAllSilenceFlags(), false); + + QCOMPARE(ab.silenceFlags()[0], false); // channel 1 + QCOMPARE(ab.silenceFlags()[1], false); // channel 2 + QCOMPARE(ab.silenceFlags()[2], false); // channel 3 + QCOMPARE(ab.silenceFlags()[3], false); // channel 4 + QCOMPARE(ab.silenceFlags()[4], true); // unused 5th channel + } + + //! Verifies that when there are no channels, `updateAllSilenceFlags` + //! returns true indicating that all channels are silent. + void UpdateAllSilenceFlags_NoChannelsIsSilent() + { + auto ab = AudioBuffer{10, 0}; + + // First, with silence tracking + ab.enableSilenceTracking(true); + QCOMPARE(ab.updateAllSilenceFlags(), true); + + // Should produce the same result without silence tracking + ab.enableSilenceTracking(false); + QCOMPARE(ab.updateAllSilenceFlags(), true); + } + + //! Verifies that `hasSignal` returns true if any of the selected + //! channels are non-silent. + void HasSignal() + { + auto ab = AudioBuffer{10, 2}; + ab.enableSilenceTracking(true); + + // Add a 2nd stereo group + QVERIFY(ab.addGroup(2) != nullptr); + + // No signal since all channels are silent + QCOMPARE(ab.hasSignal(0b1111), false); + + // Assume both left channels are non-silent + ab.assumeNonSilent(0); + ab.assumeNonSilent(2); + + // Check if any channels are non-silent + QCOMPARE(ab.hasSignal(0b1111), true); + + // Check if either of the left channels are non-silent + QCOMPARE(ab.hasSignal(0b0101), true); + + // Check if either of the right channels are non-silent + QCOMPARE(ab.hasSignal(0b1010), false); + + // Check if either channel in the 1st group are non-silent + QCOMPARE(ab.hasSignal(0b0011), true); + + // Check if the 5th channel (an unused channel) is non-silent + QCOMPARE(ab.hasSignal(0b10000), false); + } + + //! Verifies that the `sanitize` method only silences channels containing Inf or NaN + void Sanitize_SilencesOnlyInfAndNaN() + { + lmms::MixHelpers::setNaNHandler(true); + + auto ab = AudioBuffer{10, 2}; + ab.enableSilenceTracking(true); + + // Add a 2nd stereo group + QVERIFY(ab.addGroup(2) != nullptr); + + // Should have no effect when all buffers are silenced + QCOMPARE(ab.silenceFlags().all(), true); + ab.sanitize(0b1111); + QCOMPARE(ab.silenceFlags().all(), true); + + // Make left channel of 1st channel group + // contain an Inf, and force the channel to non-silent + ab.group(0).buffer(0)[5] = std::numeric_limits::infinity(); + ab.assumeNonSilent(0); + + // Make right channel of 1st channel group non-silent too, + // but using a valid value + ab.group(0).buffer(1)[5] = 1.f; + ab.assumeNonSilent(1); + + // Sanitize only the left channel + ab.sanitize(0b0001); + + // The left channel's buffer should be silenced, + // while the right channel should be unaffected + QCOMPARE(ab.group(0).buffer(0)[5], 0.f); + QCOMPARE(ab.group(0).buffer(1)[5], 1.f); + QCOMPARE(ab.silenceFlags()[0], true); + QCOMPARE(ab.silenceFlags()[1], false); + QCOMPARE(ab.silenceFlags()[2], true); + QCOMPARE(ab.silenceFlags()[3], true); + QCOMPARE(ab.silenceFlags()[4], true); // unused 5th channel + + // Try again + ab.group(0).buffer(0)[5] = std::numeric_limits::quiet_NaN(); + ab.assumeNonSilent(0); + + // This time, sanitize both channels of the 1st channel group + ab.sanitize(0b0011); + + // Again, the left channel's buffer should be silence, + // while the right channel should be unaffected + QCOMPARE(ab.group(0).buffer(0)[5], 0.f); + QCOMPARE(ab.group(0).buffer(1)[5], 1.f); + QCOMPARE(ab.silenceFlags()[0], true); + QCOMPARE(ab.silenceFlags()[1], false); + QCOMPARE(ab.silenceFlags()[2], true); + QCOMPARE(ab.silenceFlags()[3], true); + QCOMPARE(ab.silenceFlags()[4], true); // unused 5th channel + } + + //! Verifies that the `silenceChannels` method silences the selected channels + //! and updates their silence flags + void SilenceChannels_SilencesSelectedChannels() + { + auto ab = AudioBuffer{10, 2}; + ab.enableSilenceTracking(true); + + // Add a 2nd stereo group + QVERIFY(ab.addGroup(2) != nullptr); + + // Should have no effect when all buffers are silent + QCOMPARE(ab.silenceFlags().all(), true); + ab.silenceChannels(0b1111); + QCOMPARE(ab.silenceFlags().all(), true); + + // Make left channel of 2nd channel group contain + // a non-silent value, and force the channel to be non-silent + ab.group(1).buffer(0)[5] = 1.f; + ab.assumeNonSilent(2); + + // Silence only the left channel + ab.silenceChannels(0b0100); + + QCOMPARE(ab.silenceFlags()[0], true); // not selected + QCOMPARE(ab.silenceFlags()[1], true); // not selected + QCOMPARE(ab.silenceFlags()[2], true); // updated! + QCOMPARE(ab.silenceFlags()[3], true); // not selected + + // Make right channel of 2nd channel group contain + // a non-silent value, and force the channel to be non-silent + ab.group(1).buffer(1)[5] = 1.f; + ab.assumeNonSilent(3); + + // Silence only the right channel + ab.silenceChannels(0b1000); + + QCOMPARE(ab.silenceFlags()[0], true); // not selected + QCOMPARE(ab.silenceFlags()[1], true); // not selected + QCOMPARE(ab.silenceFlags()[2], true); // not selected + QCOMPARE(ab.silenceFlags()[3], true); // updated! + + // Make right channel of 1st channel group and + // both channels of 2nd channel group contain + // a non-silent value, and force those channels to be non-silent + ab.group(1).buffer(1)[5] = 1.f; + ab.assumeNonSilent(1); + ab.group(1).buffer(0)[5] = 1.f; + ab.assumeNonSilent(2); + ab.group(1).buffer(1)[5] = 1.f; + ab.assumeNonSilent(3); + + // Silence both channels of the 2nd channel group, + // plus the already-silent left channel of the 1st group + ab.silenceChannels(0b1101); + + QCOMPARE(ab.silenceFlags()[0], true); // selected, but already silent + QCOMPARE(ab.silenceFlags()[1], false); // not selected, remains non-silent + QCOMPARE(ab.silenceFlags()[2], true); // updated! + QCOMPARE(ab.silenceFlags()[3], true); // updated! + } +}; + +QTEST_GUILESS_MAIN(AudioBufferTest) +#include "AudioBufferTest.moc"