tracktion-engine 3.0-10-g034fdde4aa5
Tracktion Engine — High level data model for audio applications

« « « Anklang Documentation
Loading...
Searching...
No Matches
tracktion_SpeedRampWaveNode.cpp
Go to the documentation of this file.
1 /*
2 ,--. ,--. ,--. ,--.
3 ,-' '-.,--.--.,--,--.,---.| |,-.,-' '-.`--' ,---. ,--,--, Copyright 2024
4 '-. .-'| .--' ,-. | .--'| /'-. .-',--.| .-. || \ Tracktion Software
5 | | | | \ '-' \ `--.| \ \ | | | |' '-' '| || | Corporation
6 `---' `--' `--`--'`---'`--'`--' `---' `--' `---' `--''--' www.tracktion.com
7
8 Tracktion Engine uses a GPL/commercial licence - see LICENCE.md for details.
9*/
10
11namespace tracktion { inline namespace engine
12{
13
14
15//==============================================================================
16//==============================================================================
18{
19 PerChannelState() { resampler.reset(); }
20
22 float lastSample = 0;
23};
24
25
26//==============================================================================
28 TimeRange editTime,
29 TimeDuration off,
30 TimeRange loop,
31 LiveClipLevel level,
32 double speed,
33 const juce::AudioChannelSet& channelSetToUse,
34 const juce::AudioChannelSet& destChannelsToFill,
35 ProcessState& ps,
36 EditItemID itemIDToUse,
37 bool isRendering,
38 SpeedFadeDescription speedFadeDescriptionToUse)
40 editPosition (editTime),
41 loopSection (TimePosition::fromSeconds (loop.getStart().inSeconds() * speed),
42 TimePosition::fromSeconds (loop.getEnd().inSeconds() * speed)),
43 offset (off),
44 originalSpeedRatio (speed),
45 editItemID (itemIDToUse),
46 isOfflineRender (isRendering),
47 speedFadeDescription (speedFadeDescriptionToUse),
48 audioFile (af),
49 clipLevel (level),
50 channelsToUse (channelSetToUse),
51 destChannels (destChannelsToFill)
52{
53 // Both ramp times should not be empty!
54 assert ((! speedFadeDescription.inTimeRange.isEmpty())
55 || (! speedFadeDescription.outTimeRange.isEmpty()));
56}
57
59{
61 props.hasAudio = true;
62 props.hasMidi = false;
63 props.numberOfChannels = destChannels.size();
64 props.nodeID = (size_t) editItemID.getRawID();
65
66 return props;
67}
68
70{
71 reader = audioFile.engine->getAudioFileManager().cache.createReader (audioFile);
72 outputSampleRate = info.sampleRate;
73 editPositionInSamples = tracktion::toSamples ({ editPosition.getStart(), editPosition.getEnd() }, outputSampleRate);
74 updateFileSampleRate();
75
76 channelState.clear();
77
78 if (reader != nullptr)
79 for (int i = std::max (channelsToUse.size(), reader->getNumChannels()); --i >= 0;)
80 channelState.add (new PerChannelState());
81}
82
84{
85 // Only check this whilst rendering or it will block whilst the proxies are being created
86 if (! isOfflineRender)
87 return true;
88
89 // If the hash is 0 it means an empty file path which means a missing file so
90 // this will never return a valid reader and we should just bail
91 if (audioFile.isNull())
92 return true;
93
94 if (reader == nullptr)
95 {
96 reader = audioFile.engine->getAudioFileManager().cache.createReader (audioFile);
97
98 if (reader == nullptr)
99 return false;
100 }
101
102 if (audioFileSampleRate == 0.0 && ! updateFileSampleRate())
103 return false;
104
105 return true;
106}
107
109{
110 SCOPED_REALTIME_CHECK
111 assert (outputSampleRate == getSampleRate());
112
113 //TODO: Might get a performance boost by pre-setting the file position in prepareForNextBlock
114 processSection (pc, getTimelineSampleRange());
115}
116
117//==============================================================================
118int64_t SpeedRampWaveNode::editTimeToFileSample (TimePosition editTime) const noexcept
119{
120 editTime = juce::jlimit (speedFadeDescription.inTimeRange.getStart(),
121 speedFadeDescription.outTimeRange.getEnd(),
122 editTime);
123
124 if (! speedFadeDescription.inTimeRange.isEmpty()
125 && speedFadeDescription.inTimeRange.containsInclusive (editTime))
126 {
127 const auto timeFromStart = editTime - speedFadeDescription.inTimeRange.getStart();
128 const double proportionOfFade = timeFromStart.inSeconds() / speedFadeDescription.inTimeRange.getLength().inSeconds();
129 const double rescaledProportion = rescale (speedFadeDescription.fadeInType, proportionOfFade, true);
130
131 editTime = speedFadeDescription.inTimeRange.getStart()
132 + TimeDuration::fromSeconds (rescaledProportion * speedFadeDescription.inTimeRange.getLength().inSeconds());
133
134 jassert (speedFadeDescription.inTimeRange.containsInclusive (editTime));
135 }
136 else if (! speedFadeDescription.outTimeRange.isEmpty()
137 && speedFadeDescription.outTimeRange.containsInclusive (editTime))
138 {
139 const auto timeFromStart = editTime - speedFadeDescription.outTimeRange.getStart();
140 const double proportionOfFade = timeFromStart.inSeconds() / speedFadeDescription.outTimeRange.getLength().inSeconds();
141 const double rescaledProportion = rescale (speedFadeDescription.fadeOutType, proportionOfFade, false);
142
143 editTime = speedFadeDescription.outTimeRange.getStart()
144 + TimeDuration::fromSeconds (rescaledProportion * speedFadeDescription.outTimeRange.getLength().inSeconds());
145
146 jassert (speedFadeDescription.outTimeRange.containsInclusive (editTime));
147 }
148
149 return (int64_t) ((editTime - (editPosition.getStart() - offset)).inSeconds()
150 * originalSpeedRatio * audioFileSampleRate + 0.5);
151}
152
153bool SpeedRampWaveNode::updateFileSampleRate()
154{
155 using namespace tracktion::graph;
156
157 if (reader == nullptr)
158 return false;
159
160 audioFileSampleRate = reader->getSampleRate();
161
162 if (audioFileSampleRate <= 0)
163 return false;
164
165 if (! loopSection.isEmpty())
166 reader->setLoopRange ({ tracktion::toSamples (loopSection.getStart(), audioFileSampleRate),
167 tracktion::toSamples (loopSection.getEnd(), audioFileSampleRate) });
168
169 return true;
170}
171
172void SpeedRampWaveNode::processSection (ProcessContext& pc, juce::Range<int64_t> timelineRange)
173{
174 const auto sectionEditTime = tracktion::timeRangeFromSamples (timelineRange, outputSampleRate);
175
176 if (reader == nullptr
177 || sectionEditTime.getEnd() <= editPosition.getStart()
178 || sectionEditTime.getStart() >= editPosition.getEnd())
179 return;
180
181 SCOPED_REALTIME_CHECK
182
183 if (audioFileSampleRate == 0.0 && ! updateFileSampleRate())
184 return;
185
186 const auto fileStart = editTimeToFileSample (sectionEditTime.getStart());
187 const auto fileEnd = editTimeToFileSample (sectionEditTime.getEnd());
188 const auto numFileSamples = (int) (fileEnd - fileStart);
189
190 if (numFileSamples <= 3)
191 {
192 playedLastBlock = false;
193 return;
194 }
195
196 reader->setReadPosition (fileStart);
197
198 auto destBuffer = pc.buffers.audio;
199 auto numSamples = destBuffer.getNumFrames();
200 const auto destBufferChannels = juce::AudioChannelSet::canonicalChannelSet ((int) destBuffer.getNumChannels());
201 auto numChannels = (choc::buffer::ChannelCount) destBufferChannels.size();
202 assert (pc.buffers.audio.getNumChannels() == numChannels);
203
204 AudioScratchBuffer fileData ((int) numChannels, numFileSamples + 2);
205
206 uint32_t lastSampleFadeLength = 0;
207
208 {
209 SCOPED_REALTIME_CHECK
210
211 if (reader->readSamples (numFileSamples + 2, fileData.buffer, destBufferChannels, 0,
212 channelsToUse,
213 isOfflineRender ? 5000 : 3))
214 {
215 if (! getPlayHeadState().isContiguousWithPreviousBlock() && ! getPlayHeadState().isFirstBlockOfLoop())
216 lastSampleFadeLength = std::min (numSamples, getPlayHead().isUserDragging() ? 40u : 10u);
217 }
218 else
219 {
220 lastSampleFadeLength = std::min (numSamples, 40u);
221 fileData.buffer.clear();
222 }
223 }
224
225 float gains[2];
226
227 // For stereo, use the pan, otherwise ignore it
228 if (numChannels == 2)
229 clipLevel.getLeftAndRightGains (gains[0], gains[1]);
230 else
231 gains[0] = gains[1] = clipLevel.getGainIncludingMute();
232
233 if (getPlayHead().isUserDragging())
234 {
235 gains[0] *= 0.4f;
236 gains[1] *= 0.4f;
237 }
238
239 auto ratio = numFileSamples / (double) numSamples;
240
241 if (ratio <= 0.0)
242 return;
243
244 jassert ((int) numChannels <= channelState.size()); // this should always have been made big enough
245
246 for (choc::buffer::ChannelCount channel = 0; channel < numChannels; ++channel)
247 {
248 if (channel < (choc::buffer::ChannelCount) channelState.size())
249 {
250 const auto src = fileData.buffer.getReadPointer ((int) channel);
251 const auto dest = destBuffer.getChannel (channel).data.data;
252
253 auto& state = *channelState.getUnchecked ((int) channel);
254 state.resampler.processAdding (ratio, src, dest, (int) numSamples, gains[channel & 1]);
255
256 if (lastSampleFadeLength > 0)
257 {
258 for (uint32_t i = 0; i < lastSampleFadeLength; ++i)
259 {
260 auto alpha = i / (float) lastSampleFadeLength;
261 dest[i] = alpha * dest[i] + state.lastSample * (1.0f - alpha);
262 }
263 }
264
265 state.lastSample = dest[numSamples - 1];
266 }
267 else
268 {
269 destBuffer.getChannel (channel).clear();
270 }
271 }
272
273 // If the ratio goes below 0.05, this will be too low to hear so fade out the block if it was played
274 // If this is the first block, fade it in
275 if (ratio < 0.05)
276 {
277 if (! playedLastBlock)
278 {
279 destBuffer.clear();
280 return;
281 }
282
283 auto bufferRef = tracktion::graph::toAudioBuffer (destBuffer);
284 bufferRef.applyGainRamp (0, bufferRef.getNumSamples(),
285 1.0f, 0.0f);
286
287 playedLastBlock = false;
288 return;
289 }
290 else
291 {
292 if (! playedLastBlock)
293 {
294 auto bufferRef = tracktion::graph::toAudioBuffer (destBuffer);
295 bufferRef.applyGainRamp (0, bufferRef.getNumSamples(),
296 0.0f, 1.0f);
297 }
298
299 playedLastBlock = true;
300 }
301
302
303 // Silence any samples before or after our edit time range
304 // N.B. this shouldn't happen when using a clip combiner as the times should be clipped correctly
305 {
306 auto numSamplesToClearAtStart = editPositionInSamples.getStart() - timelineRange.getStart();
307 auto numSamplesToClearAtEnd = timelineRange.getEnd() - editPositionInSamples.getEnd();
308
309 if (numSamplesToClearAtStart > 0)
310 destBuffer.getStart ((choc::buffer::FrameCount) numSamplesToClearAtStart).clear();
311
312 if (numSamplesToClearAtEnd > 0)
313 destBuffer.getEnd ((choc::buffer::FrameCount) numSamplesToClearAtEnd).clear();
314 }
315}
316
317
318//==============================================================================
319double SpeedRampWaveNode::rescale (AudioFadeCurve::Type t, double proportion, bool rampUp)
320{
321 switch (t)
322 {
323 case AudioFadeCurve::convex:
324 return rampUp ? (-2.0 * std::cos ((juce::MathConstants<double>::pi * proportion) / 2.0)) / juce::MathConstants<double>::pi + 1.0
325 : 1.0 - ((-2.0 * std::cos ((juce::MathConstants<double>::pi * (proportion - 1.0)) / 2.0)) / juce::MathConstants<double>::pi + 1.0);
326
327 case AudioFadeCurve::concave:
328 return rampUp ? proportion - (2.0 * std::sin ((juce::MathConstants<double>::pi * proportion) / 2.0)) / juce::MathConstants<double>::pi + (2.0 / juce::MathConstants<double>::pi)
329 : ((2.0 * std::sin ((juce::MathConstants<double>::pi * (proportion + 1.0)) / 2.0)) / juce::MathConstants<double>::pi) + proportion - (2.0 / juce::MathConstants<double>::pi);
330
331 case AudioFadeCurve::sCurve:
332 return rampUp ? (proportion / 2.0) - (std::sin (juce::MathConstants<double>::pi * proportion) / (2.0 * juce::MathConstants<double>::pi)) + 0.5
333 : std::sin (juce::MathConstants<double>::pi * proportion) / (2.0 * juce::MathConstants<double>::pi) + (proportion / 2.0);
334
335 case AudioFadeCurve::linear:
336 default:
337 return rampUp ? (juce::square (proportion) * 0.5) + 0.5
338 : ((-juce::square (proportion - 1.0)) * 0.5) + 0.5;
339 }
340}
341
342}} // namespace tracktion { inline namespace engine
assert
int size() const noexcept
static AudioChannelSet JUCE_CALLTYPE canonicalChannelSet(int numChannels)
constexpr ValueType getStart() const noexcept
constexpr ValueType getEnd() const noexcept
Reader::Ptr createReader(const AudioFile &)
Creates a Reader to read an AudioFile.
An audio scratch buffer that has pooled storage.
AudioFileManager & getAudioFileManager() const
Returns the AudioFileManager instance.
tracktion::graph::NodeProperties getNodeProperties() override
Should return the properties of the node.
bool isReadyToProcess() override
Should return true when this node is ready to be processed.
SpeedRampWaveNode(const AudioFile &, TimeRange editTime, TimeDuration offset, TimeRange loopSection, LiveClipLevel, double speedRatio, const juce::AudioChannelSet &sourceChannelsToUse, const juce::AudioChannelSet &destChannelsToFill, ProcessState &, EditItemID, bool isOfflineRender, SpeedFadeDescription)
offset is a time added to the start of the file, e.g.
void process(ProcessContext &) override
Called when the node is to be processed.
void prepareToPlay(const tracktion::graph::PlaybackInitialisationInfo &) override
Called once before playback begins for each node.
Base class for Nodes that provides information about the current process call.
juce::Range< int64_t > getTimelineSampleRange() const
Returns the timeline sample range of the current process block.
double getSampleRate() const
Returns the sample rate of the current process block.
tracktion::graph::PlayHeadState & getPlayHeadState()
Returns the PlayHeadState in use.
tracktion::graph::PlayHead & getPlayHead()
Returns the PlayHead in use.
Struct to describe a single iteration of a process call.
T cos(T... args)
cos
#define jassert(expression)
typedef int
typedef double
T max(T... args)
T min(T... args)
Type jlimit(Type lowerLimit, Type upperLimit, Type valueToConstrain) noexcept
constexpr NumericType square(NumericType n) noexcept
Interpolators::Lagrange LagrangeInterpolator
TimeRange timeRangeFromSamples(juce::Range< int64_t > sampleRange, double sampleRate)
Creates a TimeRange from a range of samples.
constexpr int64_t toSamples(TimePosition, double sampleRate)
Converts a TimePosition to a number of samples.
T sin(T... args)
sin
typedef int64_t
Represents a duration in real-life time.
Represents a position in real-life time.
Type
A enumeration of the curve classes available.
ID for objects of type EditElement - e.g.
Provides a thread-safe way to share a clip's levels with an audio engine without worrying about the C...
float getGainIncludingMute() const noexcept
Returns the clip's gain if the clip is not muted.
void getLeftAndRightGains(float &left, float &right) const noexcept
Reutrns the left and right gains taking in to account mute and pan values.
Holds the state of a process call.
Describes the time and type of the speed fade in/outs.
Holds some really basic properties of a node.
Passed into Nodes when they are being initialised, to give them useful contextual information that th...
typedef size_t