Make sosci plugin processor process in buffers rather than samples

pull/332/head
James H Ball 2025-10-05 17:14:11 +01:00
rodzic 5d37d20146
commit 5d919264da
6 zmienionych plików z 147 dodań i 93 usunięć

Wyświetl plik

@ -16,96 +16,155 @@ SosciAudioProcessor::~SosciAudioProcessor() {}
void SosciAudioProcessor::processBlock(juce::AudioBuffer<float>& buffer, juce::MidiBuffer& midiMessages) {
juce::ScopedNoDenormals noDenormals;
auto input = getBusBuffer(buffer, true, 0);
auto output = getBusBuffer(buffer, false, 0);
float EPSILON = 0.0001f;
juce::AudioBuffer<float> input = getBusBuffer(buffer, true, 0);
juce::AudioBuffer<float> output = getBusBuffer(buffer, false, 0);
const float EPSILON = 0.0001f;
const int numSamples = input.getNumSamples();
midiMessages.clear();
auto inputArray = input.getArrayOfWritePointers();
auto outputArray = output.getArrayOfWritePointers();
// Get source buffer (either from WAV parser or input)
juce::AudioBuffer<float> sourceBuffer;
juce::SpinLock::ScopedLockType lock2(wavParserLock);
bool readingFromWav = wavParser.isInitialised();
if (readingFromWav) {
wavBuffer.setSize(6, numSamples, false, true, false);
wavBuffer.clear();
wavParser.processBlock(wavBuffer);
sourceBuffer = juce::AudioBuffer<float>(wavBuffer.getArrayOfWritePointers(), wavBuffer.getNumChannels(), numSamples);
} else {
sourceBuffer = juce::AudioBuffer<float>(input.getArrayOfWritePointers(), input.getNumChannels(), numSamples);
}
// Resize working buffer with 6 channels: x, y, z/brightness, r, g, b
workBuffer.setSize(6, numSamples, false, true, false);
auto sourceArray = sourceBuffer.getArrayOfReadPointers();
auto workArray = workBuffer.getArrayOfWritePointers();
for (int sample = 0; sample < input.getNumSamples(); ++sample) {
osci::Point point;
if (readingFromWav) {
point = wavParser.getSample();
// Copy X and Y channels
for (int ch = 0; ch < 2; ++ch) {
if (sourceBuffer.getNumChannels() > ch) {
juce::FloatVectorOperations::copy(workArray[ch], sourceArray[ch], numSamples);
} else {
float x = input.getNumChannels() > 0 ? inputArray[0][sample] : 0.0f;
float y = input.getNumChannels() > 1 ? inputArray[1][sample] : 0.0f;
float zAsBrightnessOrR = 1.0f;
if (input.getNumChannels() > 2 && !forceDisableBrightnessInput) {
float zChan = inputArray[2][sample];
if (!brightnessEnabled && zChan > EPSILON) brightnessEnabled = true;
if (brightnessEnabled) zAsBrightnessOrR = zChan;
}
// RGB detection: if channels 3 or 4 present and not forced off, treat as RGB mode.
float gChan = 0.0f, bChan = 0.0f;
bool haveG = input.getNumChannels() > 3;
bool haveB = input.getNumChannels() > 4;
if (!forceDisableRgbInput && (haveG || haveB)) {
float gIn = haveG ? inputArray[3][sample] : 0.0f;
float bIn = haveB ? inputArray[4][sample] : 0.0f;
// Enable RGB only when we actually receive signal
if (!rgbEnabled && (std::abs(gIn) > EPSILON || std::abs(bIn) > EPSILON)) {
rgbEnabled = true;
}
if (rgbEnabled) {
gChan = gIn;
bChan = bIn;
}
}
// Build point:
// - In RGB mode: use z as R, and channels 3/4 as G/B
// - Otherwise: use z as brightness and leave RGB at defaults
if (rgbEnabled && !forceDisableRgbInput) {
point = osci::Point(x, y, 1.0f, zAsBrightnessOrR, gChan, bChan);
} else {
point = osci::Point(x, y, zAsBrightnessOrR, 1.0f, 0.0f, 0.0f);
}
juce::FloatVectorOperations::clear(workArray[ch], numSamples);
}
// Clamp brightness
point.z = juce::jlimit(0.0, 1.0, point.z);
for (auto& effect : permanentEffects) {
point = effect->apply(sample, point);
}
// Detect brightness mode: check if channel 2 has any signal > EPSILON
if (!brightnessEnabled && sourceBuffer.getNumChannels() > 2 && !forceDisableBrightnessInput) {
auto range = juce::FloatVectorOperations::findMinAndMax(sourceArray[2], numSamples);
if (range.getEnd() > EPSILON) {
brightnessEnabled = true;
}
}
// Detect RGB mode: check if channels 3 or 4 have any signal > EPSILON
bool haveG = sourceBuffer.getNumChannels() > 3;
bool haveB = sourceBuffer.getNumChannels() > 4;
if (!rgbEnabled && !forceDisableRgbInput && (haveG || haveB)) {
bool hasGSignal = false;
bool hasBSignal = false;
if (haveG) {
auto gRange = juce::FloatVectorOperations::findMinAndMax(sourceArray[3], numSamples);
hasGSignal = std::abs(gRange.getStart()) > EPSILON || std::abs(gRange.getEnd()) > EPSILON;
}
if (haveB) {
auto bRange = juce::FloatVectorOperations::findMinAndMax(sourceArray[4], numSamples);
hasBSignal = std::abs(bRange.getStart()) > EPSILON || std::abs(bRange.getEnd()) > EPSILON;
}
if (hasGSignal || hasBSignal) {
rgbEnabled = true;
}
}
// Populate remaining channels based on detected mode
if (rgbEnabled && !forceDisableRgbInput) {
// RGB mode: z=1.0, r=ch2, g=ch3, b=ch4
juce::FloatVectorOperations::fill(workArray[2], 1.0f, numSamples);
if (sourceBuffer.getNumChannels() > 2) {
juce::FloatVectorOperations::copy(workArray[3], sourceArray[2], numSamples);
} else {
juce::FloatVectorOperations::fill(workArray[3], 1.0f, numSamples);
}
if (haveG) {
juce::FloatVectorOperations::copy(workArray[4], sourceArray[3], numSamples);
} else {
juce::FloatVectorOperations::clear(workArray[4], numSamples);
}
if (haveB) {
juce::FloatVectorOperations::copy(workArray[5], sourceArray[4], numSamples);
} else {
juce::FloatVectorOperations::clear(workArray[5], numSamples);
}
} else {
// Brightness mode: z=ch2 or 1.0, r=1.0, g=0, b=0
if (brightnessEnabled && sourceBuffer.getNumChannels() > 2) {
juce::FloatVectorOperations::copy(workArray[2], sourceArray[2], numSamples);
} else {
juce::FloatVectorOperations::fill(workArray[2], 1.0f, numSamples);
}
juce::FloatVectorOperations::fill(workArray[3], 1.0f, numSamples);
juce::FloatVectorOperations::clear(workArray[4], numSamples);
juce::FloatVectorOperations::clear(workArray[5], numSamples);
}
// Clamp brightness channel
juce::FloatVectorOperations::clip(workBuffer.getWritePointer(2), workBuffer.getReadPointer(2), 0.0f, 1.0f, numSamples);
// this is the point that the visualiser will draw
// Apply effects
juce::AudioBuffer<float> effectBuffer(workBuffer.getArrayOfWritePointers(), 3, numSamples);
for (auto& effect : permanentEffects) {
effect->processBlock(effectBuffer, midiMessages);
}
// Process output sample-by-sample for visualiser, volume, clipping
auto outputArray = output.getArrayOfWritePointers();
for (int sample = 0; sample < numSamples; ++sample) {
osci::Point point(workArray[0][sample], workArray[1][sample], workArray[2][sample],
workArray[3][sample], workArray[4][sample], workArray[5][sample]);
threadManager.write(point, "VisualiserRenderer");
}
if (juce::JUCEApplication::isStandaloneApp()) {
point.scale(volume, volume, 1.0);
if (juce::JUCEApplication::isStandaloneApp()) {
// Scale output by volume
juce::FloatVectorOperations::multiply(workArray[0], workArray[0], volume.load(), numSamples);
juce::FloatVectorOperations::multiply(workArray[1], workArray[1], volume.load(), numSamples);
// clip
point.x = juce::jmax(-threshold, juce::jmin(threshold.load(), point.x));
point.y = juce::jmax(-threshold, juce::jmin(threshold.load(), point.y));
// Hard clip to threshold
float thresholdVal = threshold.load();
juce::FloatVectorOperations::clip(workArray[0], workArray[0], -thresholdVal, thresholdVal, numSamples);
juce::FloatVectorOperations::clip(workArray[1], workArray[1], -thresholdVal, thresholdVal, numSamples);
// Apply mute if active
if (muteParameter->getBoolValue()) {
point.x = 0.0;
point.y = 0.0;
// apply mute if active
if (muteParameter->getBoolValue()) {
juce::FloatVectorOperations::clear(workArray[0], numSamples);
juce::FloatVectorOperations::clear(workArray[1], numSamples);
}
// Copy to output for all channels available from work buffer
for (int ch = 0; ch < output.getNumChannels(); ++ch) {
if (workBuffer.getNumChannels() > ch) {
juce::FloatVectorOperations::copy(outputArray[ch], workArray[ch], numSamples);
} else {
juce::FloatVectorOperations::clear(outputArray[ch], numSamples);
}
}
// this is the point that the volume component will draw (i.e. post scale/clipping)
for (int sample = 0; sample < numSamples; ++sample) {
osci::Point point(workArray[0][sample], workArray[1][sample], workArray[2][sample],
workArray[3][sample], workArray[4][sample], workArray[5][sample]);
threadManager.write(point, "VolumeComponent");
}
if (output.getNumChannels() > 0) {
outputArray[0][sample] = point.x;
}
if (output.getNumChannels() > 1) {
outputArray[1][sample] = point.y;
}
if (output.getNumChannels() > 2) {
outputArray[2][sample] = point.z;
}
}
}
}
void SosciAudioProcessor::getStateInformation(juce::MemoryBlock& destData) {

Wyświetl plik

@ -30,6 +30,11 @@ public:
juce::AudioProcessorEditor* createEditor() override;
private:
// Cached buffers to avoid reallocations in processBlock
juce::AudioBuffer<float> wavBuffer;
juce::AudioBuffer<float> workBuffer;
//==============================================================================
JUCE_DECLARE_NON_COPYABLE_WITH_LEAK_DETECTOR (SosciAudioProcessor)
};

Wyświetl plik

@ -149,7 +149,11 @@ osci::Point FileParser::nextSample(lua_State*& L, LuaVariables& vars) {
} else if (img != nullptr) {
return img->getSample();
} else if (wav != nullptr) {
return wav->getSample();
juce::AudioBuffer<float> pointBuffer(3, 1);
pointBuffer.clear();
wav->processBlock(pointBuffer);
auto* data = pointBuffer.getReadPointer(0);
return osci::Point(data[0], data[1], data[2]);
}
return osci::Point();

Wyświetl plik

@ -11,7 +11,6 @@ bool WavParser::parse(std::unique_ptr<juce::InputStream> stream) {
if (stream == nullptr) {
return false;
}
counter = 0;
currentSample = 0;
juce::AudioFormatManager formatManager;
formatManager.registerBasicFormats();
@ -56,9 +55,10 @@ void WavParser::setSampleRate(double sampleRate) {
currentSampleRate = sampleRate;
}
osci::Point WavParser::getSample() {
void WavParser::processBlock(juce::AudioBuffer<float> &buffer) {
if (!initialised || paused) {
return osci::Point();
buffer.clear();
return;
}
if (currentSampleRate != audioProcessor.currentSampleRate) {
@ -69,24 +69,12 @@ osci::Point WavParser::getSample() {
afSource->setLooping(looping);
}
source->getNextAudioBlock(juce::AudioSourceChannelInfo(audioBuffer));
currentSample += source->getResamplingRatio();
counter++;
source->getNextAudioBlock(juce::AudioSourceChannelInfo(buffer));
currentSample += source->getResamplingRatio() * buffer.getNumSamples();
if (currentSample >= totalSamples && afSource->isLooping()) {
currentSample = 0;
counter = 0;
afSource->setNextReadPosition(0);
}
if (audioBuffer.getNumChannels() == 1) {
return osci::Point(audioBuffer.getSample(0, 0), audioBuffer.getSample(0, 0), 1.0);
} else if (audioBuffer.getNumChannels() == 2) {
return osci::Point(audioBuffer.getSample(0, 0), audioBuffer.getSample(1, 0), 1.0);
} else if (audioBuffer.getNumChannels() >= 3) {
return osci::Point(audioBuffer.getSample(0, 0), audioBuffer.getSample(1, 0), audioBuffer.getSample(2, 0));
} else {
return osci::Point();
}
}
void WavParser::setProgress(double progress) {
@ -106,7 +94,6 @@ bool WavParser::isLooping() {
void WavParser::setPaused(bool paused) {
this->paused = paused;
counter = 0;
}
bool WavParser::isPaused() {

Wyświetl plik

@ -7,7 +7,7 @@ public:
WavParser(CommonAudioProcessor& p);
~WavParser();
osci::Point getSample();
void processBlock(juce::AudioBuffer<float>& buffer);
void setProgress(double progress);
void setPaused(bool paused);
@ -29,7 +29,6 @@ private:
std::atomic<bool> looping = true;
std::unique_ptr<juce::ResamplingAudioSource> source = nullptr;
juce::AudioBuffer<float> audioBuffer;
std::atomic<long> counter = 0;
std::atomic<bool> paused = false;
int fileSampleRate;
int currentSampleRate;

Wyświetl plik

@ -3,7 +3,7 @@
<JUCERPROJECT id="HH2E72" name="sosci" projectType="audioplug" useAppConfig="0"
addUsingNamespaceToJuceHeader="0" jucerFormatVersion="1" pluginManufacturer="jameshball"
aaxIdentifier="sh.ball.sosci" cppLanguageStandard="20" projectLineFeed="&#10;"
headerPath="./include" version="1.1.8.4" companyName="James H Ball"
headerPath="./include" version="1.2.0.0" companyName="James H Ball"
companyWebsite="https://osci-render.com" companyEmail="james@ball.sh"
defines="NOMINMAX=1&#10;INTERNET_FLAG_NO_AUTO_REDIRECT=0&#10;OSCI_PREMIUM=1&#10;JUCE_USE_CUSTOM_PLUGIN_STANDALONE_APP=1&#10;JUCE_MODAL_LOOPS_PERMITTED=1"
pluginManufacturerCode="Jhba" pluginCode="Sosc" pluginAUMainType="'aufx'">