aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--al/buffer.cpp12
-rw-r--r--al/source.cpp9
-rw-r--r--alc/alu.cpp14
-rw-r--r--alc/effects/convolution.cpp1
-rw-r--r--core/buffer_storage.cpp1
-rw-r--r--core/buffer_storage.h28
-rw-r--r--core/uhjfilter.h3
-rw-r--r--core/voice.cpp28
-rw-r--r--core/voice.h1
9 files changed, 64 insertions, 33 deletions
diff --git a/al/buffer.cpp b/al/buffer.cpp
index 550b6247..fe944ead 100644
--- a/al/buffer.cpp
+++ b/al/buffer.cpp
@@ -522,10 +522,8 @@ void LoadData(ALCcontext *context, ALbuffer *ALBuf, ALsizei freq, ALuint size,
SETERR_RETURN(context, AL_INVALID_VALUE,, "Invalid unpack alignment %u for %s samples",
unpackalign, NameFromUserFmtType(SrcType));
- const ALuint ambiorder{(*DstChannels == FmtBFormat2D || *DstChannels == FmtBFormat3D) ?
- ALBuf->UnpackAmbiOrder :
- ((*DstChannels == FmtUHJ2 || *DstChannels == FmtUHJ3 || *DstChannels == FmtUHJ4) ? 1 :
- 0)};
+ const ALuint ambiorder{IsBFormat(*DstChannels) ? ALBuf->UnpackAmbiOrder :
+ (IsUHJ(*DstChannels) ? 1 : 0)};
if((access&AL_PRESERVE_DATA_BIT_SOFT))
{
@@ -643,10 +641,8 @@ void PrepareCallback(ALCcontext *context, ALbuffer *ALBuf, ALsizei freq,
if UNLIKELY(!DstType)
SETERR_RETURN(context, AL_INVALID_ENUM,, "Unsupported callback format");
- const ALuint ambiorder{(*DstChannels == FmtBFormat2D || *DstChannels == FmtBFormat3D) ?
- ALBuf->UnpackAmbiOrder :
- ((*DstChannels == FmtUHJ2 || *DstChannels == FmtUHJ3 || *DstChannels == FmtUHJ4) ? 1 :
- 0)};
+ const ALuint ambiorder{IsBFormat(*DstChannels) ? ALBuf->UnpackAmbiOrder :
+ (IsUHJ(*DstChannels) ? 1 : 0)};
constexpr uint line_size{BufferLineSize + MaxPostVoiceLoad};
al::vector<al::byte,16>(FrameSizeFromFmt(*DstChannels, *DstType, ambiorder) *
diff --git a/al/source.cpp b/al/source.cpp
index 7ba08970..d8584956 100644
--- a/al/source.cpp
+++ b/al/source.cpp
@@ -496,10 +496,8 @@ void InitVoice(Voice *voice, ALsource *source, ALbufferQueueItem *BufferList, AL
voice->mFmtType = buffer->mType;
voice->mNumChannels = buffer->channelsFromFmt();
voice->mFrameSize = buffer->frameSizeFromFmt();
- voice->mAmbiLayout = (buffer->mChannels == FmtUHJ2 || buffer->mChannels == FmtUHJ3
- || buffer->mChannels == FmtUHJ4) ? AmbiLayout::FuMa : buffer->mAmbiLayout;
- voice->mAmbiScaling = (buffer->mChannels == FmtUHJ2 || buffer->mChannels == FmtUHJ3
- || buffer->mChannels == FmtUHJ4) ? AmbiScaling::UHJ : buffer->mAmbiScaling;
+ voice->mAmbiLayout = buffer->isUhj() ? AmbiLayout::FuMa : buffer->mAmbiLayout;
+ voice->mAmbiScaling = buffer->isUhj() ? AmbiScaling::UHJ : buffer->mAmbiScaling;
voice->mAmbiOrder = buffer->mAmbiOrder;
if(buffer->mCallback) voice->mFlags |= VoiceIsCallback;
@@ -509,8 +507,7 @@ void InitVoice(Voice *voice, ALsource *source, ALbufferQueueItem *BufferList, AL
/* Even if storing really high order ambisonics, we only mix channels for
* orders up to MaxAmbiOrder. The rest are simply dropped.
*/
- ALuint num_channels{(buffer->mChannels == FmtUHJ2) ? 3 :
- ChannelsFromFmt(buffer->mChannels, minu(buffer->mAmbiOrder, MaxAmbiOrder))};
+ ALuint num_channels{buffer->mixerChannelsFromFmt()};
if UNLIKELY(num_channels > device->mSampleData.size())
{
ERR("Unexpected channel count: %u (limit: %zu, %d:%d)\n", num_channels,
diff --git a/alc/alu.cpp b/alc/alu.cpp
index 4ec4154e..693288df 100644
--- a/alc/alu.cpp
+++ b/alc/alu.cpp
@@ -771,6 +771,7 @@ void CalcPanningAndFilters(Voice *voice, const float xpos, const float ypos, con
case FmtUHJ2:
case FmtUHJ3:
case FmtUHJ4:
+ case FmtSuperStereo:
DirectChannels = DirectMode::Off;
break;
}
@@ -778,11 +779,12 @@ void CalcPanningAndFilters(Voice *voice, const float xpos, const float ypos, con
voice->mFlags &= ~(VoiceHasHrtf | VoiceHasNfc);
if(voice->mFmtChannels == FmtBFormat2D || voice->mFmtChannels == FmtBFormat3D
|| voice->mFmtChannels == FmtUHJ2 || voice->mFmtChannels == FmtUHJ3
- || voice->mFmtChannels == FmtUHJ4)
+ || voice->mFmtChannels == FmtUHJ4 || voice->mFmtChannels == FmtSuperStereo)
{
/* Special handling for B-Format sources. */
- if(Device->AvgSpeakerDist > 0.0f && voice->mFmtChannels != FmtUHJ2)
+ if(Device->AvgSpeakerDist > 0.0f && voice->mFmtChannels != FmtUHJ2
+ && voice->mFmtChannels != FmtSuperStereo)
{
if(!(Distance > std::numeric_limits<float>::epsilon()))
{
@@ -883,7 +885,7 @@ void CalcPanningAndFilters(Voice *voice, const float xpos, const float ypos, con
*/
const uint8_t *index_map{
(voice->mFmtChannels == FmtBFormat2D || voice->mFmtChannels == FmtUHJ2
- || voice->mFmtChannels == FmtUHJ3) ?
+ || voice->mFmtChannels == FmtUHJ3 || voice->mFmtChannels == FmtSuperStereo) ?
GetAmbi2DLayout(voice->mAmbiLayout).data() :
GetAmbiLayout(voice->mAmbiLayout).data()};
@@ -1539,10 +1541,8 @@ void CalcSourceParams(Voice *voice, ContextBase *context, bool force)
}
if((voice->mProps.DirectChannels != DirectMode::Off && voice->mFmtChannels != FmtMono
- && voice->mFmtChannels != FmtBFormat2D && voice->mFmtChannels != FmtBFormat3D
- && voice->mFmtChannels != FmtUHJ2 && voice->mFmtChannels != FmtUHJ3
- && voice->mFmtChannels != FmtUHJ3)
- || voice->mProps.mSpatializeMode==SpatializeMode::Off
+ && !IsAmbisonic(voice->mFmtChannels))
+ || voice->mProps.mSpatializeMode == SpatializeMode::Off
|| (voice->mProps.mSpatializeMode==SpatializeMode::Auto && voice->mFmtChannels != FmtMono))
CalcNonAttnSourceParams(voice, &voice->mProps, context);
else
diff --git a/alc/effects/convolution.cpp b/alc/effects/convolution.cpp
index 300ddb17..a1d49be4 100644
--- a/alc/effects/convolution.cpp
+++ b/alc/effects/convolution.cpp
@@ -444,6 +444,7 @@ void ConvolutionState::update(const ContextBase *context, const EffectSlot *slot
switch(mChannels)
{
case FmtMono: chanmap = MonoMap; break;
+ case FmtSuperStereo:
case FmtStereo: chanmap = StereoMap; break;
case FmtRear: chanmap = RearMap; break;
case FmtQuad: chanmap = QuadMap; break;
diff --git a/core/buffer_storage.cpp b/core/buffer_storage.cpp
index 5179db13..1c80e7ef 100644
--- a/core/buffer_storage.cpp
+++ b/core/buffer_storage.cpp
@@ -36,6 +36,7 @@ uint ChannelsFromFmt(FmtChannels chans, uint ambiorder) noexcept
case FmtUHJ2: return 2;
case FmtUHJ3: return 3;
case FmtUHJ4: return 4;
+ case FmtSuperStereo: return 2;
}
return 0;
}
diff --git a/core/buffer_storage.h b/core/buffer_storage.h
index 64943453..091882f9 100644
--- a/core/buffer_storage.h
+++ b/core/buffer_storage.h
@@ -4,6 +4,8 @@
#include <atomic>
#include "albyte.h"
+#include "alnumeric.h"
+#include "ambidefs.h"
using uint = unsigned int;
@@ -30,6 +32,7 @@ enum FmtChannels : unsigned char {
FmtUHJ2, /* 2-channel UHJ, aka "BHJ", stereo-compatible */
FmtUHJ3, /* 3-channel UHJ, aka "THJ" */
FmtUHJ4, /* 4-channel UHJ, aka "PHJ" */
+ FmtSuperStereo, /* Stereo processed with Super Stereo. */
};
enum class AmbiLayout : unsigned char {
@@ -48,6 +51,21 @@ uint ChannelsFromFmt(FmtChannels chans, uint ambiorder) noexcept;
inline uint FrameSizeFromFmt(FmtChannels chans, FmtType type, uint ambiorder) noexcept
{ return ChannelsFromFmt(chans, ambiorder) * BytesFromFmt(type); }
+constexpr bool IsBFormat(FmtChannels chans) noexcept
+{ return chans == FmtBFormat2D || chans == FmtBFormat3D; }
+
+/* Super Stereo is considered part of the UHJ family here, since it goes
+ * through similar processing as UHJ, both result in a B-Format signal, and
+ * needs the same consideration as BHJ (three channel result with only two
+ * channel input).
+ */
+constexpr bool IsUHJ(FmtChannels chans) noexcept
+{ return chans == FmtUHJ2 || chans == FmtUHJ3 || chans == FmtUHJ4 || chans == FmtSuperStereo; }
+
+/** Ambisonic formats are either B-Format or UHJ formats. */
+constexpr bool IsAmbisonic(FmtChannels chans) noexcept
+{ return IsBFormat(chans) || IsUHJ(chans); }
+
using CallbackType = int(*)(void*, void*, int);
@@ -69,8 +87,14 @@ struct BufferStorage {
{ return ChannelsFromFmt(mChannels, mAmbiOrder); }
inline uint frameSizeFromFmt() const noexcept { return channelsFromFmt() * bytesFromFmt(); }
- inline bool isBFormat() const noexcept
- { return mChannels == FmtBFormat2D || mChannels == FmtBFormat3D; }
+ inline uint mixerChannelsFromFmt() const noexcept
+ {
+ if(mChannels == FmtUHJ2 || mChannels == FmtSuperStereo) return 3;
+ return ChannelsFromFmt(mChannels, minu(mAmbiOrder, MaxAmbiOrder));
+ }
+
+ inline bool isBFormat() const noexcept { return IsBFormat(mChannels); }
+ inline bool isUhj() const noexcept { return IsUHJ(mChannels); }
};
#endif /* CORE_BUFFER_STORAGE_H */
diff --git a/core/uhjfilter.h b/core/uhjfilter.h
index 574cb800..9e692599 100644
--- a/core/uhjfilter.h
+++ b/core/uhjfilter.h
@@ -78,6 +78,9 @@ struct UhjDecoder : public UhjFilterBase {
void decodeStereo(const al::span<BufferLine> samples, const size_t offset,
const size_t samplesToDo, const size_t forwardSamples);
+ using DecoderFunc = void (UhjDecoder::*)(const al::span<BufferLine> samples,
+ const size_t offset, const size_t samplesToDo, const size_t forwardSamples);
+
DEF_NEWDEL(UhjDecoder)
};
diff --git a/core/voice.cpp b/core/voice.cpp
index a8c5b281..923d7275 100644
--- a/core/voice.cpp
+++ b/core/voice.cpp
@@ -207,7 +207,7 @@ void LoadSamples(const al::span<DeviceBase::MixerBufferLine> dstSamples, const s
#define HANDLE_FMT(T) case T: \
{ \
constexpr size_t sampleSize{sizeof(al::FmtTypeTraits<T>::Type)}; \
- if(srcchans == FmtUHJ2) \
+ if(srcchans == FmtUHJ2 || srcchans == FmtSuperStereo) \
{ \
src += srcOffset*2u*sampleSize; \
al::LoadSampleArray<T>(dstSamples[0].data() + dstOffset, src, \
@@ -510,8 +510,7 @@ void Voice::mix(const State vstate, ContextBase *Context, const uint SamplesToDo
Device->mSampleData.data() + Device->mSampleData.size() - mChans.size(),
mChans.size()};
const uint PostPadding{MaxResamplerEdge +
- ((mFmtChannels==FmtUHJ2 || mFmtChannels==FmtUHJ3 || mFmtChannels==FmtUHJ4)
- ? uint{UhjDecoder::sFilterDelay} : 0u)};
+ (mDecoder ? uint{UhjDecoder::sFilterDelay} : 0u)};
uint buffers_done{0u};
uint OutPos{0u};
do {
@@ -628,7 +627,8 @@ void Voice::mix(const State vstate, ContextBase *Context, const uint SamplesToDo
{
const size_t srcOffset{(increment*DstBufferSize + DataPosFrac)>>MixerFracBits};
SrcBufferSize = SrcBufferSize - PostPadding + MaxResamplerEdge;
- mDecoder->decode(MixingSamples, MaxResamplerEdge, SrcBufferSize, srcOffset);
+ ((*mDecoder).*mDecoderFunc)(MixingSamples, MaxResamplerEdge, SrcBufferSize,
+ srcOffset);
}
}
@@ -814,10 +814,17 @@ void Voice::mix(const State vstate, ContextBase *Context, const uint SamplesToDo
void Voice::prepare(DeviceBase *device)
{
- if((mFmtChannels == FmtUHJ2 || mFmtChannels == FmtUHJ3 || mFmtChannels==FmtUHJ4) && !mDecoder)
+ if(IsUHJ(mFmtChannels))
+ {
mDecoder = std::make_unique<UhjDecoder>();
- else if(mFmtChannels != FmtUHJ2 && mFmtChannels != FmtUHJ3 && mFmtChannels != FmtUHJ4)
+ mDecoderFunc = (mFmtChannels == FmtSuperStereo) ? &UhjDecoder::decodeStereo
+ : &UhjDecoder::decode;
+ }
+ else
+ {
mDecoder = nullptr;
+ mDecoderFunc = nullptr;
+ }
/* Clear the stepping value explicitly so the mixer knows not to mix this
* until the update gets applied.
@@ -833,7 +840,8 @@ void Voice::prepare(DeviceBase *device)
if(mAmbiOrder && device->mAmbiOrder > mAmbiOrder)
{
const uint8_t *OrderFromChan{(mFmtChannels == FmtBFormat2D
- || mFmtChannels == FmtUHJ2 || mFmtChannels == FmtUHJ3) ?
+ || mFmtChannels == FmtUHJ2 || mFmtChannels == FmtUHJ3
+ || mFmtChannels == FmtSuperStereo) ?
AmbiIndex::OrderFrom2DChannel().data() : AmbiIndex::OrderFromChannel().data()};
const auto scales = AmbiScale::GetHFOrderScales(mAmbiOrder, device->mAmbiOrder);
@@ -850,9 +858,9 @@ void Voice::prepare(DeviceBase *device)
* use different shelf filters after mixing it and with any old speaker
* setup the user has. To make this work, we apply the expected shelf
* filters for decoding UHJ2 to quad (only needs LF scaling), and act
- * as if those 4 channels are encoded back onto first-order B-Format,
- * which then upsamples to higher order as normal (only needs HF
- * scaling).
+ * as if those 4 quad channels are encoded right back onto first-order
+ * B-Format, which then upsamples to higher order as normal (only needs
+ * HF scaling).
*
* This isn't perfect, but without an entirely separate and limited
* UHJ2 path, it's better than nothing.
diff --git a/core/voice.h b/core/voice.h
index 94385884..ba5e82cc 100644
--- a/core/voice.h
+++ b/core/voice.h
@@ -214,6 +214,7 @@ struct Voice {
uint mAmbiOrder;
std::unique_ptr<UhjDecoder> mDecoder;
+ UhjDecoder::DecoderFunc mDecoderFunc{};
/** Current target parameters used for mixing. */
uint mStep{0};