tracktion-engine 3.0-10-g034fdde4aa5
Tracktion Engine — High level data model for audio applications

« « « Anklang Documentation
Loading...
Searching...
No Matches
tracktion_NodeRenderContext.cpp
Go to the documentation of this file.
1 /*
2 ,--. ,--. ,--. ,--.
3 ,-' '-.,--.--.,--,--.,---.| |,-.,-' '-.`--' ,---. ,--,--, Copyright 2024
4 '-. .-'| .--' ,-. | .--'| /'-. .-',--.| .-. || \ Tracktion Software
5 | | | | \ '-' \ `--.| \ \ | | | |' '-' '| || | Corporation
6 `---' `--' `--`--'`---'`--'`--' `---' `--' `---' `--''--' www.tracktion.com
7
8 Tracktion Engine uses a GPL/commercial licence - see LICENCE.md for details.
9*/
10
11#pragma once
12
13namespace tracktion { inline namespace engine
14{
15
16namespace
17{
18 static Plugin::Array findAllPlugins (tracktion::graph::Node& node)
19 {
20 Plugin::Array plugins, insideRacks;
21
22 for (auto n : getNodes (node, VertexOrdering::preordering))
23 if (auto pluginNode = dynamic_cast<PluginNode*> (n))
24 plugins.add (&pluginNode->getPlugin());
25
26 for (auto plugin : plugins)
27 if (auto rack = dynamic_cast<RackInstance*> (plugin))
28 if (auto type = rack->type)
29 for (auto p : type->getPlugins())
30 insideRacks.addIfNotAlreadyThere (p);
31
32 plugins.addArray (insideRacks);
33 return plugins;
34 }
35}
36
37
38//==============================================================================
45 : owner (owner_),
46 r (p), originalParams (p),
47 playHead (std::move (playHead_)),
48 playHeadState (std::move (playHeadState_)),
49 processState (std::move (processState_)),
50 status (juce::Result::ok()),
51 ditherers (256, r.bitDepth),
52 sourceToUpdate (sourceToUpdate_)
53{
55 TRACKTION_ASSERT_MESSAGE_THREAD
56 jassert (r.engine != nullptr);
57 jassert (r.edit != nullptr);
58 jassert (r.time.getLength() > 0.0s);
59
60 nodePlayer = std::make_unique<TracktionNodePlayer> (std::move (n), *processState, r.sampleRateForAudio, r.blockSizeForAudio,
62 nodePlayer->setNumThreads ((size_t) p.engine->getEngineBehaviour().getNumberOfCPUsToUseForAudio() - 1);
63
64 numLatencySamplesToDrop = nodePlayer->getNode()->getNodeProperties().latencyNumSamples;
65 r.time = r.time.withEnd (r.time.getEnd() + TimeDuration::fromSamples (numLatencySamplesToDrop, r.sampleRateForAudio));
66
67 if (r.edit->getTransport().isPlayContextActive())
68 {
70 TRACKTION_LOG_ERROR("Rendering whilst attached to audio device");
71 }
72
73 if (r.shouldNormalise || r.trimSilenceAtEnds || r.shouldNormaliseByRMS)
74 {
75 needsToNormaliseAndTrim = true;
76
77 r.audioFormat = r.engine->getAudioFileFormatManager().getFrozenFileFormat();
78
79 intermediateFile = std::make_unique<juce::TemporaryFile> (r.destFile.withFileExtension (r.audioFormat->getFileExtensions()[0]));
80 r.destFile = intermediateFile->getFile();
81
82 r.shouldNormalise = false;
83 r.trimSilenceAtEnds = false;
84 r.shouldNormaliseByRMS = false;
85 }
86
87 numOutputChans = 2;
88
89 {
90 auto props = nodePlayer->getNode()->getNodeProperties();
91
92 if (p.checkNodesForAudio && ! props.hasAudio)
93 {
94 status = juce::Result::fail (TRANS("Didn't find any audio to render"));
95 return;
96 }
97
98 if (r.mustRenderInMono || (r.canRenderInMono && (props.numberOfChannels < 2)))
99 numOutputChans = 1;
100 }
101
102 AudioFileUtils::addBWAVStartToMetadata (r.metadata, toSamples (r.time.getStart(), r.sampleRateForAudio));
103
104 writer = std::make_unique<AudioFileWriter> (AudioFile (*originalParams.engine, r.destFile),
105 r.audioFormat, numOutputChans, r.sampleRateForAudio,
106 r.bitDepth, r.metadata, r.quality);
107
108 if (r.destFile != juce::File() && ! writer->isOpen())
109 {
110 status = juce::Result::fail (TRANS("Couldn't write to target file"));
111 return;
112 }
113
114 blockLength = TimeDuration::fromSamples (r.blockSizeForAudio, r.sampleRateForAudio);
115
116 // number of blank blocks to play before starting, to give plugins time to warm up
117 numPreRenderBlocks = (int) ((r.sampleRateForAudio / 2) / r.blockSizeForAudio + 1);
118
119 // how long each block must take in real-time
120 realTimePerBlock = (int) (blockLength.inSeconds() * 1000.0 + 0.99);
122 sleepCounter = 10;
123
125
126 peak = 0.0001f;
127 rmsTotal = 0.0;
128 rmsNumSamps = 0;
129 streamTime = r.time.getStart();
130
131 precount = numPreRenderBlocks;
132 streamTime = streamTime - (blockLength * precount);
133
134 plugins = findAllPlugins (*nodePlayer->getNode());
135
136 // Set the realtime property before preparing to play
137 Renderer::RenderTask::setAllPluginsRealtime (plugins, r.realTimeRender);
138 nodePlayer->prepareToPlay (r.sampleRateForAudio, r.blockSizeForAudio);
139 Renderer::RenderTask::flushAllPlugins (plugins, r.sampleRateForAudio, r.blockSizeForAudio);
140
141 samplesTrimmed = 0;
142 hasStartedSavingToFile = ! r.trimSilenceAtEnds;
143
144 playHead->stop();
145 playHead->setPosition (toSamples (r.time.getStart(), r.sampleRateForAudio));
146
147 samplesToWrite = tracktion::toSamples ((r.time.getLength() + r.endAllowance), r.sampleRateForAudio);
148
149 if (sourceToUpdate != nullptr)
150 sourceToUpdate->reset (numOutputChans, r.sampleRateForAudio, samplesToWrite);
151}
152
154{
156 r.resultMagnitude = owner.params.resultMagnitude = peak;
157 r.resultRMS = owner.params.resultRMS = rmsNumSamps > 0 ? (float) (rmsTotal / rmsNumSamps) : 0.0f;
158 r.resultAudioDuration = owner.params.resultAudioDuration = float (numSamplesWrittenToSource / owner.params.sampleRateForAudio);
159
160 playHead->stop();
161 Renderer::RenderTask::setAllPluginsRealtime (plugins, true);
162
163 if (writer != nullptr)
164 writer->closeForWriting();
165
166 callBlocking ([this] { nodePlayer.reset(); });
167
168 if (needsToNormaliseAndTrim)
169 owner.performNormalisingAndTrimming (originalParams, r);
170}
171
173{
176
177 if (--sleepCounter <= 0)
178 {
179 sleepCounter = sleepCounterMax;
180 juce::Thread::sleep (1);
181 }
182
183 if (owner.shouldExit())
184 {
185 writer->closeForWriting();
186 r.destFile.deleteFile();
187
188 playHead->stop();
189 Renderer::RenderTask::setAllPluginsRealtime (plugins, true);
190
191 return true;
192 }
193
194 auto blockEnd = streamTime + blockLength;
195
196 if (precount > 0)
197 blockEnd = juce::jmin (r.time.getStart(), blockEnd);
198
199 if (precount > numPreRenderBlocks / 2)
200 playHead->setPosition (toSamples (streamTime, r.sampleRateForAudio));
201 else if (precount == numPreRenderBlocks / 2)
202 playHead->playSyncedToRange ({ toSamples (streamTime, r.sampleRateForAudio), std::numeric_limits<int64_t>::max() });
203
204 if (precount == 0)
205 {
206 streamTime = r.time.getStart();
207 blockEnd = streamTime + blockLength;
208
209 playHead->playSyncedToRange (toSamples (TimeRange (streamTime, Edit::getMaximumLength()), r.sampleRateForAudio));
210 playHeadState->update (toSamples (TimeRange (streamTime, blockEnd), r.sampleRateForAudio));
211 }
212
213 if (r.realTimeRender)
214 {
216 auto timeToWait = (int) (realTimePerBlock - (timeNow - lastTime));
217 lastTime = timeNow;
218
219 if (timeToWait > 0)
220 juce::Thread::sleep (timeToWait);
221 }
222
223 currentTempoPosition->set (streamTime);
224
225 resetFP();
226
227 const TimeRange streamTimeRange (streamTime, blockEnd);
228 const auto referenceSampleRange = juce::Range<int64_t>::withStartAndLength (toSamples (streamTimeRange.getStart(), originalParams.sampleRateForAudio), r.blockSizeForAudio);
229
230 // Update modifier timers
231 r.edit->updateModifierTimers (streamTime, r.blockSizeForAudio);
232
233 // Wait for any nodes to render their sources or proxies
234 auto leafNodesReady = [this, referenceSampleRange]
235 {
236 for (auto node : getNodes (*nodePlayer->getNode(), VertexOrdering::postordering))
237 {
238 // Call prepare for next block here to ensure isReadyToProcess internals are updated
239 node->prepareForNextBlock (referenceSampleRange);
240
241 if (node->getDirectInputNodes().empty() && ! node->isReadyToProcess())
242 return false;
243 }
244
245 return true;
246 }();
247
248 while (! (leafNodesReady || owner.shouldExit()))
249 return false;
250
251 juce::AudioBuffer<float> renderingBuffer (numOutputChans, r.blockSizeForAudio + 256);
252 renderingBuffer.clear();
253 midiBuffer.clear();
254
255 auto destView = choc::buffer::createChannelArrayView (renderingBuffer.getArrayOfWritePointers(),
256 (choc::buffer::ChannelCount) renderingBuffer.getNumChannels(),
257 (choc::buffer::FrameCount) referenceSampleRange.getLength());
258
259 nodePlayer->process ({ (choc::buffer::FrameCount) referenceSampleRange.getLength(), referenceSampleRange, { destView, midiBuffer} });
260
261 if (precount <= 0)
262 {
263 jassert (playHeadState->isContiguousWithPreviousBlock());
264
265 auto numSamplesDone = (uint32_t) juce::jmin (samplesToWrite, (int64_t) r.blockSizeForAudio);
266 samplesToWrite -= numSamplesDone;
267
268 auto blockSize = (uint32_t) numSamplesDone;
269 uint32_t blockOffset = 0;
270
271 if (numLatencySamplesToDrop > 0)
272 {
273 auto numToDrop = std::min ((uint32_t) numLatencySamplesToDrop, numSamplesDone);
274 numLatencySamplesToDrop -= (int) numToDrop;
275 numSamplesDone -= numToDrop;
276
277 blockSize = numSamplesDone;
278 blockOffset = destView.getNumFrames() - blockSize;
279 }
280
281 if (blockSize > 0)
282 {
283 jassert (blockSize <= destView.getNumFrames());
284
285 if (writeAudioBlock (destView.getFrameRange ({ blockOffset, blockOffset + blockSize })) == WriteResult::failed)
286 return true;
287 }
288 }
289 else
290 {
291 // for the pre-count blocks, sleep to give things a chance to get going
292 juce::Thread::sleep ((int) (blockLength.inSeconds() * 1000));
293 }
294
295 if (streamTime > r.time.getEnd() + r.endAllowance)
296 {
297 // Ending after end time and end allowance has elapsed
298 return true;
299 }
300 else if (streamTime > r.time.getEnd()
301 && renderingBuffer.getMagnitude (0, r.blockSizeForAudio) <= thresholdForStopping)
302 {
303 // Ending during end allowance period due to low magnitude
304 return true;
305 }
306
307 auto prog = (float) ((streamTime - r.time.getStart()) / juce::jmax (1_td, r.time.getLength()));
308
309 if (needsToNormaliseAndTrim)
310 prog *= 0.9f;
311
312 jassert (! std::isnan (prog));
313 progressToUpdate = juce::jlimit (0.0f, 1.0f, prog);
314 --precount;
315 streamTime = blockEnd;
316
317 return false;
318}
319
320//==============================================================================
321NodeRenderContext::WriteResult NodeRenderContext::writeAudioBlock (choc::buffer::ChannelArrayView<float> block)
322{
324 // Prepare buffer to use
325 auto blockSizeSamples = (int) block.getNumFrames();
326
327 juce::AudioBuffer<float> buffer (block.data.channels, numOutputChans, blockSizeSamples);
328
329 // Apply dithering and mag/rms analysis
330 if (r.ditheringEnabled && r.bitDepth < 32)
331 ditherers.apply (buffer, blockSizeSamples);
332
333 auto mag = buffer.getMagnitude (0, blockSizeSamples);
334 peak = juce::jmax (peak, mag);
335
336 if (! hasStartedSavingToFile)
337 hasStartedSavingToFile = (mag > 0.0f);
338
339 for (int i = buffer.getNumChannels(); --i >= 0;)
340 {
341 rmsTotal += buffer.getRMSLevel (i, 0, blockSizeSamples);
342 ++rmsNumSamps;
343 }
344
345 if (! hasStartedSavingToFile)
346 samplesTrimmed += blockSizeSamples;
347
348 // Update thumbnail source
349 if (sourceToUpdate != nullptr && blockSizeSamples > 0)
350 sourceToUpdate->addBlock (numSamplesWrittenToSource, buffer, 0, blockSizeSamples);
351
352 numSamplesWrittenToSource += blockSizeSamples;
353
354 // And finally write to the file
355 // NB buffer gets trashed by this call
356 if (blockSizeSamples > 0 && hasStartedSavingToFile
357 && writer->isOpen()
358 && ! writer->appendBuffer (buffer, blockSizeSamples))
359 return WriteResult::failed;
360
361 return WriteResult::succeeded;
362}
363
364//==============================================================================
371 std::atomic<float>& progress)
372{
373 const int samplesPerBlock = r.blockSizeForAudio;
374 const double sampleRate = r.sampleRateForAudio;
375 const auto blockLength = TimeDuration::fromSamples (samplesPerBlock, sampleRate);
376 auto streamTime = r.time.getStart();
377
379 callBlocking ([&]
380 {
381 nodePlayer = std::make_unique<TracktionNodePlayer> (std::move (n), *processState,
382 sampleRate, samplesPerBlock,
384 });
385 // Ensure the node player gets deleted on the message thread
386 const juce::ErasedScopeGuard scope ([&nodePlayer] { callBlocking ([&] { nodePlayer.reset(); }); });
387
388 nodePlayer->setNumThreads ((size_t) r.engine->getEngineBehaviour().getNumberOfCPUsToUseForAudio() - 1);
389
390 //TODO: Should really purge any non-MIDI nodes here then return if no MIDI has been found
391
392 playHead->stop();
393 playHead->setPosition (toSamples (streamTime, sampleRate));
394 playHead->playSyncedToRange (toSamples ({ streamTime, Edit::getMaximumLength() }, sampleRate));
395
396 playHeadState->update (toSamples ({ streamTime, streamTime + blockLength }, sampleRate));
397
398 // Wait for any nodes to render their sources or proxies
399 auto leafNodesReady = [nodes = getNodes (*nodePlayer->getNode(), VertexOrdering::postordering)]
400 {
401 for (auto node : nodes)
402 if (node->getDirectInputNodes().empty() && ! node->isReadyToProcess())
403 return false;
404
405 return true;
406 };
407
408 while (! leafNodesReady())
409 {
410 juce::Thread::sleep (100);
411
412 if (owner.shouldExit())
413 return TRANS("Render cancelled");
414 }
415
416 // Then render the blocks
417 auto currentTempoPosition = createPosition (r.edit->tempoSequence);
418
419 juce::AudioBuffer<float> renderingBuffer (2, samplesPerBlock + 256);
421 juce::MidiMessageSequence outputSequence;
422
423 for (;;)
424 {
425 if (owner.shouldExit())
426 return TRANS("Render cancelled");
427
428 if (streamTime > r.time.getEnd())
429 break;
430
431 auto blockEnd = streamTime + blockLength;
432 const TimeRange streamTimeRange (streamTime, blockEnd);
433
434 // Update modifier timers
435 r.edit->updateModifierTimers (streamTime, samplesPerBlock);
436
437 // Then once eveything is ready, render the block
438 currentTempoPosition.set (streamTime);
439
440 renderingBuffer.clear();
441 blockMidiBuffer.clear();
442 const auto referenceSampleRange = toSamples (streamTimeRange, sampleRate);
443 auto destView = choc::buffer::createChannelArrayView (renderingBuffer.getArrayOfWritePointers(),
444 (choc::buffer::ChannelCount) renderingBuffer.getNumChannels(), (choc::buffer::FrameCount) referenceSampleRange.getLength());
445
446 nodePlayer->process ({ (choc::buffer::FrameCount) referenceSampleRange.getLength(), referenceSampleRange, { destView, blockMidiBuffer} });
447
448 // Set MIDI messages to beats and update final sequence
449 for (auto& m : blockMidiBuffer)
450 {
451 tempo::Sequence::Position eventPos (currentTempoPosition);
452 eventPos.set (TimePosition::fromSeconds (m.getTimeStamp()) + (streamTime - r.time.getStart()));
453
454 outputSequence.addEvent (juce::MidiMessage (m, Edit::ticksPerQuarterNote * eventPos.getPPQTime()));
455 }
456
457 streamTime = blockEnd;
458
459 progress = juce::jlimit (0.0f, 1.0f, (float) ((streamTime - r.time.getStart()) / r.time.getLength()));
460 }
461
462 playHead->stop();
463
464 if (outputSequence.getNumEvents() == 0)
465 return TRANS("No MIDI found to render");
466
467 if (! Renderer::RenderTask::addMidiMetaDataAndWriteToFile (r.destFile, std::move (outputSequence), r.edit->tempoSequence))
468 return TRANS("Unable to write to destination file");
469
470 return {};
471}
472
473
474}} // namespace tracktion { inline namespace engine
Type getMagnitude(int channel, int startSample, int numSamples) const noexcept
int getNumChannels() const noexcept
void clear() noexcept
Type *const * getArrayOfWritePointers() noexcept
virtual StringArray getFileExtensions() const
File withFileExtension(StringRef newExtension) const
bool deleteFile() const
MidiEventHolder * addEvent(const MidiMessage &newMessage, double timeAdjustment=0)
int getNumEvents() const noexcept
static Range withStartAndLength(const ValueType startValue, const ValueType length) noexcept
static Result fail(const String &errorMessage) noexcept
bool shouldExit() const noexcept
static double getMillisecondCounterHiRes() noexcept
TransportControl & getTransport() const noexcept
Returns the TransportControl which is used to stop/stop/position playback and recording.
TempoSequence tempoSequence
The global TempoSequence of this Edit.
static const int ticksPerQuarterNote
The number of ticks per quarter note.
static TimeDuration getMaximumLength()
Returns the maximum length an Edit can be.
void updateModifierTimers(TimePosition editTime, int numSamples) const
Updates all the ModifierTimers with a given edit time and number of samples.
AudioFileFormatManager & getAudioFileFormatManager() const
Returns the AudioFileFormatManager that maintains a list of available audio file formats.
EngineBehaviour & getEngineBehaviour() const
Returns the EngineBehaviour instance.
static juce::String renderMidi(Renderer::RenderTask &, Renderer::Parameters &, std::unique_ptr< tracktion::graph::Node >, std::unique_ptr< tracktion::graph::PlayHead >, std::unique_ptr< tracktion::graph::PlayHeadState >, std::unique_ptr< ProcessState >, std::atomic< float > &progressToUpdate)
Renders the MIDI of an Edit to a sequence.
bool renderNextBlock(std::atomic< float > &progressToUpdate)
Renders the next block of audio.
NodeRenderContext(Renderer::RenderTask &, Renderer::Parameters &, std::unique_ptr< tracktion::graph::Node >, std::unique_ptr< tracktion::graph::PlayHead >, std::unique_ptr< tracktion::graph::PlayHeadState >, std::unique_ptr< ProcessState >, juce::AudioFormatWriter::ThreadedWriter::IncomingDataReceiver *sourceToUpdate)
Creates a context to render a Node.
Task that actually performs the render operation in blocks.
bool isPlayContextActive() const
Returns true if this Edit is attached to the DeviceManager for playback.
Main graph Node processor class.
virtual std::vector< Node * > getDirectInputNodes()
Should return all the inputs directly feeding in to this node.
virtual bool isReadyToProcess()=0
Should return true when this node is ready to be processed.
void prepareForNextBlock(juce::Range< int64_t > referenceSampleRange)
Call before processing the next block, used to reset the process status.
T is_pointer_v
T isnan(T... args)
#define TRANS(stringLiteral)
#define jassert(expression)
#define jassertfalse
typedef int
typedef float
T max(T... args)
T min(T... args)
constexpr Type jmin(Type a, Type b)
constexpr Type jmax(Type a, Type b)
Type jlimit(Type lowerLimit, Type upperLimit, Type valueToConstrain) noexcept
tempo::Sequence::Position createPosition(const TempoSequence &s)
Creates a Position to iterate over the given TempoSequence.
LockFreeMultiThreadedNodePlayer::ThreadPoolCreator getPoolCreatorFunction(ThreadPoolStrategy poolType)
Returns a function to create a ThreadPool for the given stategy.
ThreadPoolStrategy
Available strategies for thread pools.
VertexOrdering
Specifies the ordering algorithm.
std::vector< Node * > getNodes(Node &node, VertexOrdering vertexOrdering)
Returns all the nodes in a Node graph in the order given by vertexOrdering.
T reset(T... args)
typedef uint32_t
constexpr double inSeconds() const
Returns the TimeDuration as a number of seconds.
A Sequence::Position is an iterator through a Sequence.
double getPPQTime() const noexcept
Returns the position as a PPQ time.
void set(TimePosition)
Sets the Position to a new time.
#define CRASH_TRACER
This macro adds the current location to a stack which gets logged if a crash happens.