tracktion-engine 3.0-10-g034fdde4aa5
Tracktion Engine — High level data model for audio applications

« « « Anklang Documentation
Loading...
Searching...
No Matches
tracktion_PluginNode.cpp
Go to the documentation of this file.
1 /*
2 ,--. ,--. ,--. ,--.
3 ,-' '-.,--.--.,--,--.,---.| |,-.,-' '-.`--' ,---. ,--,--, Copyright 2024
4 '-. .-'| .--' ,-. | .--'| /'-. .-',--.| .-. || \ Tracktion Software
5 | | | | \ '-' \ `--.| \ \ | | | |' '-' '| || | Corporation
6 `---' `--' `--`--'`---'`--'`--' `---' `--' `---' `--''--' www.tracktion.com
7
8 Tracktion Engine uses a GPL/commercial licence - see LICENCE.md for details.
9*/
10
11#pragma once
12
13namespace tracktion { inline namespace engine
14{
15
16namespace
17{
18 static bool shouldUseFineGrainAutomation (Plugin& p)
19 {
20 if (! p.isAutomationNeeded())
21 return false;
22
23 if (p.engine.getPluginManager().canUseFineGrainAutomation)
24 return p.engine.getPluginManager().canUseFineGrainAutomation (p);
25
26 return true;
27 }
28}
29
30//==============================================================================
32 tracktion::engine::Plugin::Ptr pluginToProcess,
33 double sampleRateToUse, int blockSizeToUse,
34 const TrackMuteState* trackMuteStateToUse,
35 ProcessState& processStateToUse,
36 bool rendering, bool canBalanceLatency,
37 int maxNumChannelsToUse)
38 : TracktionEngineNode (processStateToUse),
39 input (std::move (inputNode)),
40 plugin (std::move (pluginToProcess)),
41 trackMuteState (trackMuteStateToUse),
42 playHeadState (processStateToUse.playHeadState),
43 isRendering (rendering),
44 maxNumChannels (maxNumChannelsToUse),
45 balanceLatency (canBalanceLatency)
46{
47 jassert (input != nullptr);
48 jassert (plugin != nullptr);
49 initialisePlugin (sampleRateToUse, blockSizeToUse);
50}
51
53{
54 if (isInitialised && ! plugin->baseClassNeedsInitialising())
55 plugin->baseClassDeinitialise();
56}
57
58//==============================================================================
60{
61 if (cachedNodeProperties)
62 return *cachedNodeProperties;
63
64 auto props = input->getNodeProperties();
65
66 // Assume a stereo output here to corretly initialise plugins
67 // We might need to modify this to return a number of channels passed as an argument if there are differences with mono renders
68 props.numberOfChannels = juce::jmax (2, props.numberOfChannels, plugin->getNumOutputChannelsGivenInputs (std::max (2, props.numberOfChannels)));
69
70 if (maxNumChannels > 0)
71 props.numberOfChannels = std::min (maxNumChannels, props.numberOfChannels);
72
73 props.hasAudio = props.hasAudio || plugin->producesAudioWhenNoAudioInput();
74 props.hasMidi = props.hasMidi || plugin->takesMidiInput();
75 props.latencyNumSamples = std::max (0, props.latencyNumSamples + latencyNumSamples);
76 props.nodeID = (size_t) plugin->itemID.getRawID();
77
78 if (isPrepared)
79 cachedNodeProperties = props;
80
81 return props;
82}
83
85{
86 juce::ignoreUnused (info);
87 jassert (sampleRate == info.sampleRate);
88 jassert (! isPrepared); // Is this being called multiple times?
89
90 auto props = getNodeProperties();
91
92 if (props.latencyNumSamples > 0)
93 automationAdjustmentTime = TimeDuration::fromSamples (-props.latencyNumSamples, sampleRate);
94
95 if (shouldUseFineGrainAutomation (*plugin))
96 subBlockSizeToUse = std::max (128, 128 * juce::roundToInt (info.sampleRate / 44100.0));
97
98 canProcessBypassed = balanceLatency
99 && dynamic_cast<ExternalPlugin*> (plugin.get()) != nullptr
100 && latencyNumSamples > 0;
101
102 if (canProcessBypassed)
103 {
104 replaceLatencyProcessorIfPossible (info.nodeGraphToReplace);
105
106 if (! latencyProcessor)
107 {
109 latencyProcessor->setLatencyNumSamples (latencyNumSamples);
110 latencyProcessor->prepareToPlay (info.sampleRate, info.blockSize, props.numberOfChannels);
111 }
112 }
113
114 isPrepared = true;
115
116 if (info.enableNodeMemorySharing && input->numOutputNodes == 1)
117 {
118 const auto inputNumChannels = input->getNodeProperties().numberOfChannels;
119 const auto desiredNumChannels = props.numberOfChannels;
120
121 if (inputNumChannels >= desiredNumChannels)
122 {
123 canUseSourceBuffers = true;
124 setOptimisations ({ tracktion::graph::ClearBuffers::no,
125 tracktion::graph::AllocateAudioBuffer::no });
126 }
127 }
128}
129
131{
132 plugin->prepareForNextBlock (getEditTimeRange().getStart());
133}
134
135void PluginNode::preProcess (choc::buffer::FrameCount, juce::Range<int64_t>)
136{
137 if (canUseSourceBuffers)
138 setBufferViewToUse (input.get(), input->getProcessedOutput().audio);
139}
140
142{
143 auto inputBuffers = input->getProcessedOutput();
144 auto& inputAudioBlock = inputBuffers.audio;
145
146 auto& outputBuffers = pc.buffers;
147 auto outputAudioView = outputBuffers.audio;
148 const auto blockNumSamples = inputAudioBlock.getNumFrames();
149 jassert (inputAudioBlock.getNumFrames() == outputAudioView.getNumFrames());
150
151 const auto numInputChannelsToCopy = std::min (inputAudioBlock.getNumChannels(),
152 outputAudioView.getNumChannels());
153
154 if (latencyProcessor)
155 {
156 if (numInputChannelsToCopy > 0)
157 latencyProcessor->writeAudio (inputAudioBlock.getFirstChannels (numInputChannelsToCopy));
158
159 latencyProcessor->writeMIDI (inputBuffers.midi);
160 }
161
162 // Copy the inputs to the outputs, then process using the
163 // output buffers as that will be the correct size
164 if (numInputChannelsToCopy > 0)
165 tracktion::graph::copyIfNotAliased (outputAudioView.getFirstChannels (numInputChannelsToCopy),
166 inputAudioBlock.getFirstChannels (numInputChannelsToCopy));
167
168 // Init block
169 auto subBlockSize = subBlockSizeToUse < 0 ? blockNumSamples
170 : (choc::buffer::FrameCount) subBlockSizeToUse;
171
172 choc::buffer::FrameCount numSamplesDone = 0;
173 auto numSamplesLeft = blockNumSamples;
174
175 bool shouldProcessPlugin = canProcessBypassed || plugin->isEnabled();
176 bool isAllNotesOff = inputBuffers.midi.isAllNotesOff;
177
178 if (playHeadState.didPlayheadJump())
179 isAllNotesOff = true;
180
181 if (trackMuteState != nullptr)
182 {
183 if (! trackMuteState->shouldTrackContentsBeProcessed())
184 {
185 shouldProcessPlugin = shouldProcessPlugin && trackMuteState->shouldTrackBeAudible();
186
187 if (trackMuteState->wasJustMuted())
188 isAllNotesOff = true;
189 }
190 }
191
192 const auto blockTimeRange = getEditTimeRange();
193 auto inputMidiIter = inputBuffers.midi.begin();
194
195 // Process in blocks
196 for (int subBlockNum = 0;; ++subBlockNum)
197 {
198 auto numSamplesThisBlock = std::min (subBlockSize, numSamplesLeft);
199
200 auto outputAudioBuffer = toAudioBuffer (outputAudioView.getFrameRange (frameRangeWithStartAndLength (numSamplesDone, numSamplesThisBlock)));
201
202 const auto blockPropStart = (numSamplesDone / (double) blockNumSamples);
203 const auto blockPropEnd = ((numSamplesDone + numSamplesThisBlock) / (double) blockNumSamples);
204 const auto subBlockTimeRange = TimeRange (toPosition (blockTimeRange.getLength()) * blockPropStart,
205 toPosition (blockTimeRange.getLength()) * blockPropEnd);
206
207 midiMessageArray.clear();
208 midiMessageArray.isAllNotesOff = isAllNotesOff;
209
210 for (auto end = inputBuffers.midi.end(); inputMidiIter != end; ++inputMidiIter)
211 {
212 const auto timestamp = inputMidiIter->getTimeStamp();
213
214 // If the time range is empty, we need to pass through all the MIDI as it means the playhead is stopped
215 if (! subBlockTimeRange.isEmpty()
216 && timestamp >= subBlockTimeRange.getEnd().inSeconds())
217 break;
218
219 midiMessageArray.addMidiMessage (*inputMidiIter,
220 timestamp - subBlockTimeRange.getStart().inSeconds(),
221 inputMidiIter->mpeSourceID);
222 }
223
224 // Process the plugin
225 if (shouldProcessPlugin)
226 plugin->applyToBufferWithAutomation (getPluginRenderContext ({ blockTimeRange.getStart() + toDuration (subBlockTimeRange.getStart()),
227 blockTimeRange.getStart() + toDuration (subBlockTimeRange.getEnd()) },
228 outputAudioBuffer));
229
230 // Then copy the buffers to the outputs
231 if (subBlockNum == 0)
232 outputBuffers.midi.swapWith (midiMessageArray);
233 else
234 outputBuffers.midi.mergeFrom (midiMessageArray);
235
236 numSamplesDone += numSamplesThisBlock;
237 numSamplesLeft -= numSamplesThisBlock;
238
239 if (numSamplesLeft == 0)
240 break;
241
242 isAllNotesOff = false;
243 }
244
245 // If the plugin was bypassed, use the delayed audio
246 if (latencyProcessor)
247 {
248 // A slightly better approach would be to crossfade between the processed and latency block to minimise any discrepancies
249 if (plugin->isEnabled())
250 {
251 auto numSamples = (int) blockNumSamples;
252 latencyProcessor->clearAudio (numSamples);
253 latencyProcessor->clearMIDI (numSamples);
254 }
255 else
256 {
257 outputBuffers.midi.clear();
258
259 // If no inputs have been added to the fifo, there won't be any samples available so skip
260 if (numInputChannelsToCopy > 0)
261 latencyProcessor->readAudioOverwriting (outputAudioView);
262
263 latencyProcessor->readMIDI (outputBuffers.midi, (int) blockNumSamples);
264 }
265 }
266
267 // Some plugins flake and add NaNs so zero these out to avoid killing all the audio downstream
268 sanitise (outputAudioView);
269}
270
271//==============================================================================
272void PluginNode::initialisePlugin (double sampleRateToUse, int blockSizeToUse)
273{
274 plugin->baseClassInitialise ({ TimePosition(), sampleRateToUse, blockSizeToUse });
275 isInitialised = true;
276
277 sampleRate = sampleRateToUse;
278 latencyNumSamples = juce::roundToInt (plugin->getLatencySeconds() * sampleRate);
279}
280
281PluginRenderContext PluginNode::getPluginRenderContext (TimeRange editTime, juce::AudioBuffer<float>& destBuffer)
282{
283 return { &destBuffer,
285 0, destBuffer.getNumSamples(),
286 &midiMessageArray, 0.0,
287 editTime + automationAdjustmentTime,
288 playHeadState.playHead.isPlaying(), playHeadState.playHead.isUserDragging(),
289 isRendering, canProcessBypassed };
290}
291
292void PluginNode::replaceLatencyProcessorIfPossible (NodeGraph* nodeGraphToReplace)
293{
294 if (nodeGraphToReplace == nullptr)
295 return;
296
297 const auto props = getNodeProperties();
298 const auto nodeIDToLookFor = props.nodeID;
299
300 if (nodeIDToLookFor == 0)
301 return;
302
303 if (auto oldNode = findNodeWithID<PluginNode> (*nodeGraphToReplace, nodeIDToLookFor))
304 {
305 if (! oldNode->latencyProcessor)
306 return;
307
308 if (! latencyProcessor)
309 {
310 if (oldNode->latencyProcessor->hasConfiguration (latencyNumSamples, sampleRate, props.numberOfChannels))
311 latencyProcessor = oldNode->latencyProcessor;
312
313 return;
314 }
315
316 if (latencyProcessor->hasSameConfigurationAs (*oldNode->latencyProcessor))
317 latencyProcessor = oldNode->latencyProcessor;
318 }
319}
320
321}} // namespace tracktion { inline namespace engine
int getNumChannels() const noexcept
int getNumSamples() const noexcept
static AudioChannelSet JUCE_CALLTYPE canonicalChannelSet(int numChannels)
ReferencedType * get() const noexcept
tracktion::graph::NodeProperties getNodeProperties() override
Should return the properties of the node.
void preProcess(choc::buffer::FrameCount, juce::Range< int64_t >) override
Called when the node is to be processed, just before process.
void process(ProcessContext &) override
Called when the node is to be processed.
void prefetchBlock(juce::Range< int64_t >) override
Called before once on all Nodes before they are processed.
void prepareToPlay(const tracktion::graph::PlaybackInitialisationInfo &) override
Called once before playback begins for each node.
PluginNode(std::unique_ptr< Node > input, tracktion::engine::Plugin::Ptr, double sampleRateToUse, int blockSizeToUse, const TrackMuteState *, ProcessState &, bool rendering, bool balanceLatency, int maxNumChannelsToUse)
Creates a PluginNode to process a plugin on a Track.
Holds the state of a Track and if its contents/plugins should be played or not.
bool shouldTrackBeAudible() const
Returns true if the track's mix bus should be audible.
bool shouldTrackContentsBeProcessed() const
Returns true if the track's contents should be processed e.g.
bool wasJustMuted() const
Returns true if the last block was audible but this one isn't.
Base class for Nodes that provides information about the current process call.
TimeRange getEditTimeRange() const
Returns the edit time range of the current process block.
void setOptimisations(NodeOptimisations)
This can be called to provide some hints about allocating or playing back a Node to improve efficienc...
void setBufferViewToUse(Node *sourceNode, const choc::buffer::ChannelArrayView< float > &)
This can be called during prepareToPlay to set a BufferView to use which can improve efficiency.
Struct to describe a single iteration of a process call.
bool didPlayheadJump() noexcept
Returns true if the play head jumped.
bool isUserDragging() const
Returns true if the user is dragging.
bool isPlaying() const noexcept
Returns true is the play head is currently playing.
T is_pointer_v
#define jassert(expression)
typedef int
typedef double
T max(T... args)
T min(T... args)
constexpr Type jmax(Type a, Type b)
void ignoreUnused(Types &&...) noexcept
int roundToInt(const FloatType value) noexcept
juce::AudioBuffer< float > toAudioBuffer(choc::buffer::ChannelArrayView< float > view)
Creates a juce::AudioBuffer from a choc::buffer::BufferView.
choc::buffer::FrameRange frameRangeWithStartAndLength(choc::buffer::FrameCount start, choc::buffer::FrameCount length)
Returns a FrameRange with a start and length.
Represents a position in real-life time.
Holds the state of a process call.
Holds some really basic properties of a node.
Passed into Nodes when they are being initialised, to give them useful contextual information that th...
typedef size_t