tracktion-engine 3.0-10-g034fdde4aa5
Tracktion Engine — High level data model for audio applications

« « « Anklang Documentation
Loading...
Searching...
No Matches
tracktion_SpeedRampAudioNode.h
Go to the documentation of this file.
1 /*
2 ,--. ,--. ,--. ,--.
3 ,-' '-.,--.--.,--,--.,---.| |,-.,-' '-.`--' ,---. ,--,--, Copyright 2024
4 '-. .-'| .--' ,-. | .--'| /'-. .-',--.| .-. || \ Tracktion Software
5 | | | | \ '-' \ `--.| \ \ | | | |' '-' '| || | Corporation
6 `---' `--' `--`--'`---'`--'`--' `---' `--' `---' `--''--' www.tracktion.com
7
8 Tracktion Engine uses a GPL/commercial licence - see LICENCE.md for details.
9*/
10
11#pragma once
12
13namespace tracktion { inline namespace engine
14{
15
17{
18public:
20 const AudioFile& af,
21 legacy::EditTimeRange editTime,
22 double off,
24 LiveClipLevel level,
25 double speed,
26 const juce::AudioChannelSet& channels)
27 : engine (e),
28 editPosition (editTime),
29 loopSection (loop.getStart() * speed, loop.getEnd() * speed),
30 offset (off),
31 originalSpeedRatio (speed),
32 audioFile (af),
33 clipLevel (level),
34 channelsToUse (channels)
35 {
36 }
37
38 void getAudioNodeProperties (AudioNodeProperties& info) override
39 {
40 info.hasAudio = true;
41 info.hasMidi = false;
42 info.numberOfChannels = juce::jlimit (1, channelsToUse.size(), audioFile.getNumChannels());
43 }
44
45 void visitNodes (const VisitorFn& v) override
46 {
47 v (*this);
48 }
49
50 bool purgeSubNodes (bool keepAudio, bool) override
51 {
52 return keepAudio;
53 }
54
56 {
57 reader = engine.getAudioFileManager().cache.createReader (audioFile);
58 outputSampleRate = info.sampleRate;
59 updateFileSampleRate();
60 resetResamplers();
61 }
62
63 bool isReadyToRender() override
64 {
65 // if the hash is 0 it means an empty file path which means a missing file so
66 // this will never return a valid reader and we should just bail
67 if (audioFile.isNull())
68 return true;
69
70 if (reader == nullptr)
71 if ((reader = engine.getAudioFileManager().cache.createReader (audioFile)) == nullptr)
72 return false;
73
74 if (audioFileSampleRate == 0.0 && ! updateFileSampleRate())
75 return false;
76
77 return true;
78 }
79
81 {
82 reader = nullptr;
83 }
84
85 void renderOver (const AudioRenderContext& rc) override
86 {
87 callRenderAdding (rc);
88 }
89
90 void renderAdding (const AudioRenderContext& rc) override
91 {
92 invokeSplitRender (rc, *this);
93 }
94
95 void prepareForNextBlock (const AudioRenderContext& rc) override
96 {
97 SCOPED_REALTIME_CHECK
98
99 // keep a local copy, because releaseAudioNodeResources may remove the reader halfway through..
100 if (const auto localReader = reader)
101 localReader->setReadPosition (static_cast<SampleCount> (editTimeToFileSample (rc.getEditTime().editRange1.getStart()) + 0.5) - 5);
102 }
103
104 void renderSection (const AudioRenderContext& rc, legacy::EditTimeRange editTime)
105 {
106 // keep a local copy, because releaseAudioNodeResources may remove the reader halfway through..
107 const auto localReader (reader);
108
109 rc.sanityCheck();
110
111 if (rc.destBuffer == nullptr
112 || rc.bufferNumSamples == 0
113 || localReader == nullptr
114 || editTime.getStart() >= editPosition.getEnd())
115 return;
116
117 SCOPED_REALTIME_CHECK
118
119 if (audioFileSampleRate == 0.0 && ! updateFileSampleRate())
120 return;
121
122 auto fileStart = editTimeToFileSample (editTime.getStart());
123 auto fileEnd = editTimeToFileSample (editTime.getEnd());
124
125 int preRead = 5;
126 auto fileReadStart = static_cast<SampleCount> (std::ceil (fileStart)) - preRead;
127 auto fileReadEnd = static_cast<SampleCount> (std::ceil (fileEnd));
128 auto numSamplesToRead = (int) (fileReadEnd - fileReadStart);
129
130 AudioScratchBuffer scratchBuffer (juce::jmin (2, rc.destBuffer->getNumChannels()), numSamplesToRead + 2);
131 const juce::AudioChannelSet scratchBufferChannels = juce::AudioChannelSet::canonicalChannelSet (scratchBuffer.buffer.getNumChannels());
132
133 localReader->setReadPosition (fileReadStart);
134
135 int lastSampleFadeLength = 0;
136
137 {
138 SCOPED_REALTIME_CHECK
139
140 if (localReader->readSamples (numSamplesToRead + 2, scratchBuffer.buffer, scratchBufferChannels, 0,
141 channelsToUse,
142 rc.isRendering ? 5000 : 3))
143 {
144 if (rc.isFirstBlockOfLoop() || ! rc.isContiguousWithPreviousBlock())
145 lastSampleFadeLength = rc.playhead.isUserDragging() ? 40 : 10;
146 }
147 else
148 {
149 lastSampleFadeLength = 40;
150 scratchBuffer.buffer.clear();
151 }
152 }
153
154 float gains[2];
155
156 if (rc.destBuffer->getNumChannels() > 1)
157 clipLevel.getLeftAndRightGains (gains[0], gains[1]);
158 else
159 gains[0] = gains[1] = clipLevel.getGainIncludingMute();
160
161 if (rc.playhead.isUserDragging())
162 {
163 gains[0] *= 0.4f;
164 gains[1] *= 0.4f;
165 }
166
167 // Pre-resampling set-up
168 const double ratio = (fileEnd - fileStart) / (double) rc.bufferNumSamples;
169 const double subSamplePos = fileStart - (int) fileStart;
170
171 for (int channel = rc.destBuffer->getNumChannels(); --channel >= 0;)
172 {
173 const int srcChan = juce::jmin (channel, scratchBuffer.buffer.getNumChannels() - 1);
174 const float* const src = scratchBuffer.buffer.getReadPointer (srcChan);
175 float dest[5 + 2];
176 const float gain = gains[channel & 1];
177
178 // Stoke interpolator with pre-read samples and set next read position
179 juce::LagrangeInterpolator& li = resampler[channel];
180 li.reset();
181 li.processAdding (1.0, src, dest, preRead - 1, gain);
182 li.processAdding (subSamplePos, src + preRead - 1, dest, 1, gain);
183 }
184
185 if (ratio > 0.0)
186 {
187 for (int channel = rc.destBuffer->getNumChannels(); --channel >= 0;)
188 {
189 const int srcChan = juce::jmin (channel, scratchBuffer.buffer.getNumChannels() - 1);
190 const float* const src = scratchBuffer.buffer.getReadPointer (srcChan, preRead);
191 float* const dest = rc.destBuffer->getWritePointer (channel, rc.bufferStartSample);
192
193 resampler[channel].processAdding (ratio, src, dest, rc.bufferNumSamples, gains[channel & 1]);
194
195 if (lastSampleFadeLength > 0)
196 {
197 const int fadeSamps = juce::jmin (lastSampleFadeLength, rc.bufferNumSamples);
198
199 for (int i = 0; i < fadeSamps; ++i)
200 {
201 const float alpha = i / (float) fadeSamps;
202 dest[i] = alpha * dest[i] + lastSample[channel] * (1.0f - alpha);
203 }
204 }
205
206 lastSample[channel] = dest[rc.bufferNumSamples - 1];
207 }
208 }
209 }
210
211 Engine& engine;
212
213private:
214 //==============================================================================
215 legacy::EditTimeRange editPosition, loopSection;
216 double offset;
217 double originalSpeedRatio, outputSampleRate = 44100.0;
218
219 AudioFile audioFile;
220 LiveClipLevel clipLevel;
221 double audioFileSampleRate = 0;
222 juce::AudioChannelSet channelsToUse;
223 AudioFileCache::Reader::Ptr reader;
224
225 juce::LagrangeInterpolator resampler[8];
226 float lastSample[8] = {};
227
228 double editTimeToFileSample (double editTime) const noexcept
229 {
230 return (editTime - (editPosition.getStart() - offset)) * originalSpeedRatio * audioFileSampleRate;
231 }
232
233 bool updateFileSampleRate()
234 {
235 if (reader != nullptr)
236 {
237 audioFileSampleRate = reader->getSampleRate();
238
239 if (audioFileSampleRate > 0)
240 {
241 if (! loopSection.isEmpty())
242 reader->setLoopRange (SampleRange ((SampleCount) (loopSection.getStart() * audioFileSampleRate),
243 (SampleCount) (loopSection.getEnd() * audioFileSampleRate)));
244
245 return true;
246 }
247 }
248
249 return false;
250 }
251
252 void resetResamplers()
253 {
254 for (int i = 0; i < juce::numElementsInArray (resampler); ++i)
255 resampler[i].reset();
256
257 for (int i = 0; i < juce::numElementsInArray (lastSample); ++i)
258 lastSample[i] = 0.0f;
259 }
260
261 JUCE_DECLARE_NON_COPYABLE_WITH_LEAK_DETECTOR (SubSampleWaveAudioNode)
262};
263
264//==============================================================================
267{
268public:
272 AudioFadeCurve::Type fadeInType_,
273 AudioFadeCurve::Type fadeOutType_)
274 : SingleInputAudioNode (source),
275 fadeIn (in), fadeOut (out),
276 fadeInType (fadeInType_),
277 fadeOutType (fadeOutType_)
278 {
279 jassert (! (fadeIn.isEmpty() && fadeOut.isEmpty()));
280 }
281
282 //==============================================================================
283 void renderOver (const AudioRenderContext& rc) override
284 {
285 if (renderingNeeded (rc))
286 invokeSplitRender (rc, *this);
287 else
288 input->renderOver (rc);
289 }
290
291 void renderAdding (const AudioRenderContext& rc) override
292 {
293 if (renderingNeeded (rc))
294 callRenderOver (rc);
295 else
296 input->renderAdding (rc);
297 }
298
299 void renderSection (const AudioRenderContext& rc, legacy::EditTimeRange editTime)
300 {
301 const bool intersectsFadeIn = fadeIn.getLength() > 0.0 && editTime.overlaps (fadeIn);
302 const bool intersectsFadeOut = fadeOut.getLength() > 0.0 && editTime.overlaps (fadeOut);
303
304 if (intersectsFadeIn && intersectsFadeOut)
305 {
306 auto startSample = rc.bufferStartSample;
307 auto numSamples = rc.bufferNumSamples;
308 auto sampleRate = numSamples / editTime.getLength();
309 auto streamTime = rc.streamTime;
310
311 {
312 legacy::EditTimeRange fadeInSection (editTime.getStart(), fadeOut.getStart());
313 auto fadeInTime = fadeInSection.getLength();
314
315 AudioRenderContext rc2 (rc);
316 rc2.streamTime = rc.streamTime.withLength (fadeInTime);
317 rc2.bufferNumSamples = juce::roundToInt (fadeInTime * sampleRate);
318 renderRampSection (rc2, fadeInSection, fadeIn, true);
319
320 numSamples -= rc2.bufferNumSamples;
321 startSample += rc2.bufferNumSamples;
322 streamTime.start = rc2.streamTime.getEnd();
323 }
324
325 {
326 AudioRenderContext rc2 (rc);
327 rc2.streamTime = streamTime;
328 rc2.bufferNumSamples = numSamples;
329 rc2.bufferStartSample = startSample;
330 startSample += rc2.bufferNumSamples;
331
332 renderRampSection (rc2, fadeOut.getIntersectionWith (editTime), fadeOut, false);
333 }
334
335 juce::ignoreUnused (startSample);
336 jassert (startSample == (rc.bufferNumSamples + rc.bufferStartSample));
337 }
338 else if (intersectsFadeIn)
339 {
340 renderRampSection (rc, editTime, fadeIn, true);
341 }
342 else if (intersectsFadeOut)
343 {
344 renderRampSection (rc, editTime, fadeOut, false);
345 }
346 }
347
348private:
349 //==============================================================================
350 legacy::EditTimeRange fadeIn, fadeOut;
351 AudioFadeCurve::Type fadeInType, fadeOutType;
352
353 bool renderingNeeded (const AudioRenderContext& rc) const
354 {
355 if (rc.destBuffer == nullptr || ! rc.playhead.isPlaying())
356 return false;
357
358 auto editTime = rc.getEditTime();
359
360 if (editTime.isSplit)
361 return fadeIn.overlaps (editTime.editRange1)
362 || fadeIn.overlaps (editTime.editRange2)
363 || fadeOut.overlaps (editTime.editRange1)
364 || fadeOut.overlaps (editTime.editRange2);
365
366 return fadeIn.overlaps (editTime.editRange1)
367 || fadeOut.overlaps (editTime.editRange1);
368 }
369
370 void renderRampSection (const AudioRenderContext& rc, legacy::EditTimeRange editTime,
371 legacy::EditTimeRange fade, bool rampUp)
372 {
373 auto startSample = rc.bufferStartSample;
374 auto sampleRate = rc.bufferNumSamples / editTime.getLength();
375 auto timeBefore = fade.getStart() - editTime.getStart();
376
377 if (timeBefore > 0.0)
378 {
379 auto numSamples = juce::roundToInt (timeBefore * sampleRate);
380
381 if (numSamples > 0)
382 {
383 AudioRenderContext rc2 (rc);
384 rc2.streamTime = rc.streamTime.withLength (timeBefore);
385 rc2.bufferNumSamples = numSamples;
386 startSample += numSamples;
387
388 input->renderOver (rc2);
389 }
390 }
391
392 auto editTimeIntersection = fade.getIntersectionWith (editTime);
393
394 if (editTimeIntersection.getLength() > 0.0)
395 {
396 auto startAlpha = (editTimeIntersection.getStart() - fade.getStart()) / fade.getLength();
397 auto endAlpha = (editTimeIntersection.getEnd() - fade.getStart()) / fade.getLength();
398
399 const AudioFadeCurve::Type t = rampUp ? fadeInType : fadeOutType;
400 auto startProp = rescale (t, startAlpha, rampUp);
401 auto endProp = rescale (t, endAlpha, rampUp);
402
404 && juce::isPositiveAndNotGreaterThan (endProp, 1.0));
405
406 legacy::EditTimeRange newEditTime (fade.getStart() + fade.getLength() * startProp,
407 fade.getStart() + fade.getLength() * endProp);
408
409 auto numSamples = juce::roundToInt (editTimeIntersection.getLength() * sampleRate);
410 auto streamDiff = rc.streamTime.getStart() - editTime.getStart();
411 AudioRenderContext rc2 (rc);
412 rc2.streamTime = newEditTime + streamDiff;
413 rc2.bufferNumSamples = numSamples;
414 rc2.bufferStartSample = startSample;
415 startSample += numSamples;
416
417 input->renderOver (rc2);
418 }
419
420 auto timeAfter = editTime.getEnd() - fade.getEnd();
421
422 if (timeAfter > 0.0)
423 {
424 auto numSamples = juce::roundToInt (timeAfter * sampleRate);
425
426 if (numSamples > 0)
427 {
428 AudioRenderContext rc2 (rc);
429 rc2.streamTime = { rc.streamTime.getEnd() - timeAfter, rc.streamTime.getEnd() };
430 rc2.bufferNumSamples = numSamples;
431 rc2.bufferStartSample = startSample;
432 startSample += numSamples;
433
434 input->renderOver (rc2);
435 }
436 }
437
438 juce::ignoreUnused (startSample);
439 jassert (startSample == (rc.bufferNumSamples + rc.bufferStartSample));
440 }
441
442 static double rescale (AudioFadeCurve::Type t, double proportion, bool rampUp)
443 {
444 switch (t)
445 {
446 case AudioFadeCurve::convex:
447 return rampUp ? (-2.0 * std::cos ((juce::MathConstants<double>::pi * proportion) / 2.0)) / juce::MathConstants<double>::pi + 1.0
448 : 1.0 - ((-2.0 * std::cos ((juce::MathConstants<double>::pi * (proportion - 1.0)) / 2.0)) / juce::MathConstants<double>::pi + 1.0);
449
450 case AudioFadeCurve::concave:
451 return rampUp ? proportion - (2.0 * std::sin ((juce::MathConstants<double>::pi * proportion) / 2.0)) / juce::MathConstants<double>::pi + (2.0 / juce::MathConstants<double>::pi)
452 : ((2.0 * std::sin ((juce::MathConstants<double>::pi * (proportion + 1.0)) / 2.0)) / juce::MathConstants<double>::pi) + proportion - (2.0 / juce::MathConstants<double>::pi);
453
454 case AudioFadeCurve::sCurve:
455 return rampUp ? (proportion / 2.0) - (std::sin (juce::MathConstants<double>::pi * proportion) / (2.0 * juce::MathConstants<double>::pi)) + 0.5
456 : std::sin (juce::MathConstants<double>::pi * proportion) / (2.0 * juce::MathConstants<double>::pi) + (proportion / 2.0);
457
458 case AudioFadeCurve::linear:
459 default:
460 return rampUp ? (juce::square (proportion) * 0.5) + 0.5
461 : ((-juce::square (proportion - 1.0)) * 0.5) + 0.5;
462 }
463 }
464
466};
467
468}} // namespace tracktion { inline namespace engine
T ceil(T... args)
int size() const noexcept
static AudioChannelSet JUCE_CALLTYPE canonicalChannelSet(int numChannels)
Reader::Ptr createReader(const AudioFile &)
Creates a Reader to read an AudioFile.
Base class for nodes in an audio playback graph.
The Engine is the central class for all tracktion sessions.
AudioFileManager & getAudioFileManager() const
Returns the AudioFileManager instance.
An AudioNode that speeds up and slows down its input node in/out at given times.
bool purgeSubNodes(bool keepAudio, bool) override
Tells the node to delete any sub-nodes that don't produce the required type of output.
void prepareAudioNodeToPlay(const PlaybackInitialisationInfo &info) override
tells the node to initialise itself ready for playing from the given time.
void releaseAudioNodeResources() override
tells the node that play has stopped, and it can free up anything it no longer needs.
T cos(T... args)
#define jassert(expression)
#define JUCE_DECLARE_NON_COPYABLE_WITH_LEAK_DETECTOR(className)
typedef int
typedef float
constexpr Type jmin(Type a, Type b)
Type jlimit(Type lowerLimit, Type upperLimit, Type valueToConstrain) noexcept
constexpr NumericType square(NumericType n) noexcept
void ignoreUnused(Types &&...) noexcept
bool isPositiveAndNotGreaterThan(Type1 valueToTest, Type2 upperLimit) noexcept
int roundToInt(const FloatType value) noexcept
constexpr int numElementsInArray(Type(&)[N]) noexcept
Interpolators::Lagrange LagrangeInterpolator
Holds some really basic properties of a node.
Passed into AudioNodes when they are being initialised, to give them useful contextual information th...
T sin(T... args)
Type
A enumeration of the curve classes available.
int bufferNumSamples
The number of samples to write into the audio buffer.
int bufferStartSample
The index of the start point in the audio buffer from which data must be written.
PlayHead & playhead
The playhead provides information about current time, tempo etc at the block being rendered.
juce::AudioBuffer< float > * destBuffer
The target audio buffer which needs to be filled.
PlayHead::EditTimeWindow getEditTime() const
Returns the section of the edit that needs to be rendered by this block.
legacy::EditTimeRange streamTime
The time window which needs to be rendered into the current block.
Provides a thread-safe way to share a clip's levels with an audio engine without worrying about the C...
float getGainIncludingMute() const noexcept
Returns the clip's gain if the clip is not muted.
void getLeftAndRightGains(float &left, float &right) const noexcept
Reutrns the left and right gains taking in to account mute and pan values.