tracktion-engine 3.0-10-g034fdde4aa5
Tracktion Engine — High level data model for audio applications

« « « Anklang Documentation
Loading...
Searching...
No Matches
tracktion_DynamicOffsetNode.cpp
Go to the documentation of this file.
1 /*
2 ,--. ,--. ,--. ,--.
3 ,-' '-.,--.--.,--,--.,---.| |,-.,-' '-.`--' ,---. ,--,--, Copyright 2024
4 '-. .-'| .--' ,-. | .--'| /'-. .-',--.| .-. || \ Tracktion Software
5 | | | | \ '-' \ `--.| \ \ | | | |' '-' '| || | Corporation
6 `---' `--' `--`--'`---'`--'`--' `---' `--' `---' `--''--' www.tracktion.com
7
8 Tracktion Engine uses a GPL/commercial licence - see LICENCE.md for details.
9*/
10
11#include "tracktion_WaveNode.h"
12
13namespace tracktion { inline namespace engine
14{
15
16//==============================================================================
17//==============================================================================
18DynamicOffsetNode::DynamicOffsetNode (ProcessState& editProcessState,
19 EditItemID clipID,
20 BeatRange position,
21 BeatDuration offset,
22 BeatRange clipLoopRange,
24 : TracktionEngineNode (editProcessState),
25 containerClipID (clipID),
26 clipPosition (position),
27 loopRange (clipLoopRange),
28 clipOffset (offset),
29 tempoPosition (*getProcessState().getTempoSequence()),
30 localProcessState (editProcessState.playHeadState, *editProcessState.getTempoSequence()),
31 inputs (std::move (inputNodes))
32{
33 assert (getProcessState().getTempoSequence() != nullptr);
34 dynamicOffsetNodes.reserve (inputs.size());
35
36 for (auto& i : inputs)
37 {
38 for (auto n : transformNodes (*i))
39 {
40 orderedNodes.push_back (n);
41
42 if (n->getDirectInputNodes().empty())
43 leafNodes.push_back (n);
44
45 if (auto engineNode = dynamic_cast<TracktionEngineNode*> (n))
46 engineNode->setProcessState (localProcessState);
47
48 if (auto dynamicNode = dynamic_cast<DynamicallyOffsettableNodeBase*> (n))
49 dynamicOffsetNodes.push_back (dynamicNode);
50 }
51 }
52}
53
54void DynamicOffsetNode::setDynamicOffsetBeats (BeatDuration newOffset)
55{
56 if (juce::approximatelyEqual (dynamicOffsetBeats->inBeats(), newOffset.inBeats()))
57 return;
58
59 (*dynamicOffsetBeats) = newOffset;
60}
61
62//==============================================================================
63tracktion::graph::NodeProperties DynamicOffsetNode::getNodeProperties()
64{
65 NodeProperties props;
66 props.hasAudio = false;
67 props.hasMidi = false;
68 props.numberOfChannels = 0;
69
70 for (auto& node : inputs)
71 {
72 auto nodeProps = node->getNodeProperties();
73 props.hasAudio = props.hasAudio || nodeProps.hasAudio;
74 props.hasMidi = props.hasMidi || nodeProps.hasMidi;
75 props.numberOfChannels = std::max (props.numberOfChannels, nodeProps.numberOfChannels);
76 props.latencyNumSamples = std::max (props.latencyNumSamples, nodeProps.latencyNumSamples);
77 hash_combine (props.nodeID, nodeProps.nodeID);
78 }
79
80 props.nodeID = 0;
81
82 // Calculated from hashing a string view of "DynamicOffsetNode"
83 const auto hashSalt = 8507534508343435306;
84 hash_combine (props.nodeID, hashSalt);
85 hash_combine (props.nodeID, containerClipID.getRawID());
86
87 return props;
88}
89
90std::vector<tracktion::graph::Node*> DynamicOffsetNode::getDirectInputNodes()
91{
92 return {};
93}
94
95std::vector<Node*> DynamicOffsetNode::getInternalNodes()
96{
97 return orderedNodes;
98}
99
100void DynamicOffsetNode::prepareToPlay (const tracktion::graph::PlaybackInitialisationInfo& info)
101{
102 auto info2 = info;
103 info2.allocateAudioBuffer = {};
104 info2.deallocateAudioBuffer = {};
105
106 for (auto& i : orderedNodes)
107 i->initialise (info2);
108}
109
110bool DynamicOffsetNode::isReadyToProcess()
111{
112 for (auto& i : leafNodes)
113 if (! i->isReadyToProcess())
114 return false;
115
116 return true;
117}
118
119void DynamicOffsetNode::prefetchBlock (juce::Range<int64_t> referenceSampleRange)
120{
121 for (auto& node : orderedNodes)
122 node->prepareForNextBlock (referenceSampleRange);
123}
124
125void DynamicOffsetNode::process (ProcessContext& pc)
126{
127 const auto dynamicOffset = *dynamicOffsetBeats;
128 const auto sectionEditBeatRange = getEditBeatRange();
129 const auto sectionEditSampleRange = getTimelineSampleRange();
130
131 if (sectionEditBeatRange.getEnd() <= (clipPosition.getStart() + dynamicOffset)
132 || sectionEditBeatRange.getStart() >= (clipPosition.getEnd() + dynamicOffset))
133 return;
134
135 const auto editStartBeatOfLocalTimeline = clipPosition.getStart() + dynamicOffset - getOffset();
136
137 auto section1 = sectionEditBeatRange;
139
140 if (! loopRange.isEmpty())
141 {
142 const auto playbackStartBeatRelativeToClip = sectionEditBeatRange.getStart() - editStartBeatOfLocalTimeline;
143 const auto loopIteration = loopRange.isEmpty() ? 0
144 : static_cast<int> (playbackStartBeatRelativeToClip.inBeats() / loopRange.getLength().inBeats());
145 const auto loopEndBeat = editStartBeatOfLocalTimeline + (loopRange.getLength() * (loopIteration + 1));
146
147 if (loopEndBeat > sectionEditBeatRange.getStart()
148 && loopEndBeat < sectionEditBeatRange.getEnd())
149 {
150 section1 = sectionEditBeatRange.withEnd (loopEndBeat);
151 section2 = sectionEditBeatRange.withStart (section1.getEnd());
152
153 assert (juce::approximatelyEqual (section1.getLength().inBeats() + section2->getLength().inBeats(),
154 sectionEditBeatRange.getLength().inBeats()));
155 assert (juce::approximatelyEqual (section1.getStart().inBeats(), sectionEditBeatRange.getStart().inBeats()));
156 assert (juce::approximatelyEqual (section2->getEnd().inBeats(), sectionEditBeatRange.getEnd().inBeats()));
157 }
158 }
159
160 // Process the two possible sections
161 // N.B. Processing in two sections won't work as the WaveNodeRealTime's TracktionEngineNode base
162 // which references the same ProcessState as everything else will have been updated with the
163 // whole referenceSampleRange for the block. This means you could get up to a block's worth
164 // of audio past the end of the ContainerClip's loop end
165 // There's two potential solutions to this:
166 // 1. Use a local ProcessState for the dynamic offset Node and update it
167 // each DynamicOffsetNode::processSection call (This is currently in place and in testing)
168 // 2. Convey a point of interest to the main Edit player so it chunks the whole buffer on a
169 // ContainerClip loop boundary
170 if (! section2)
171 {
172 processSection (pc, section1);
173 }
174 else
175 {
176 assert (section2->getLength() > 0_bd);
177 const juce::NormalisableRange blockRangeBeats (sectionEditBeatRange.getStart().inBeats(),
178 sectionEditBeatRange.getEnd().inBeats());
179
180 auto processSubSection = [this, &pc, &blockRangeBeats] (auto section)
181 {
182 const auto proportion = juce::Range (blockRangeBeats.convertTo0to1 (section.getStart().inBeats()),
183 blockRangeBeats.convertTo0to1 (section.getEnd().inBeats()));
184 const auto startFrame = (choc::buffer::FrameCount) std::llround (proportion.getStart() * pc.numSamples);
185 const auto endFrame = (choc::buffer::FrameCount) std::llround (proportion.getEnd() * pc.numSamples);
186
187 const auto sectionNumFrames = endFrame - startFrame;
188
189 if (sectionNumFrames == 0)
190 return;
191
192 const auto numRefSamples = pc.referenceSampleRange.getLength();
193 const auto startRefSample = pc.referenceSampleRange.getStart() + (int64_t) std::llround (proportion.getStart() * numRefSamples);
194 const auto endRefSample = pc.referenceSampleRange.getStart() + (int64_t) std::llround (proportion.getEnd() * numRefSamples);
195
196 const juce::Range subSectionReferenceSampleRange (startRefSample, endRefSample);
197
198 for (auto& node : orderedNodes)
199 node->prepareForNextBlock (subSectionReferenceSampleRange);
200
201 auto sectionBufferView = pc.buffers.audio.getFrameRange ({ startFrame, endFrame });
202 ProcessContext subSection {
203 sectionNumFrames, subSectionReferenceSampleRange,
204 { sectionBufferView, pc.buffers.midi }
205 };
206 processSection (subSection, section);
207 };
208
209 processSubSection (section1);
210 processSubSection (*section2);
211 }
212
213 // Silence any samples before or after our edit time range
214 {
215 const TimeRange clipTimeRange (tempoPosition.set (clipPosition.getStart() + dynamicOffset),
216 tempoPosition.set (clipPosition.getEnd() + dynamicOffset));
217 const auto editPositionInSamples = toSamples ({ clipTimeRange.getStart(), clipTimeRange.getEnd() }, getSampleRate());
218
219 const auto destBuffer = pc.buffers.audio;
220 auto numSamplesToClearAtStart = std::min (editPositionInSamples.getStart() - sectionEditSampleRange.getStart(), (SampleCount) destBuffer.getNumFrames());
221 auto numSamplesToClearAtEnd = std::min (sectionEditSampleRange.getEnd() - editPositionInSamples.getEnd(), (SampleCount) destBuffer.getNumFrames());
222
223 if (numSamplesToClearAtStart > 0)
224 destBuffer.getStart ((choc::buffer::FrameCount) numSamplesToClearAtStart).clear();
225
226 if (numSamplesToClearAtEnd > 0)
227 destBuffer.getEnd ((choc::buffer::FrameCount) numSamplesToClearAtEnd).clear();
228 }
229}
230
231//==============================================================================
232BeatDuration DynamicOffsetNode::getOffset() const
233{
234 return clipOffset - *dynamicOffsetBeats;
235}
236
237void DynamicOffsetNode::processSection (ProcessContext& pc, BeatRange sectionRange)
238{
239 const auto dynamicOffset = *dynamicOffsetBeats;
240 const auto editStartBeatOfLocalTimeline = clipPosition.getStart() + dynamicOffset - getOffset();
241
242 const auto playbackStartBeatRelativeToClip = sectionRange.getStart() - editStartBeatOfLocalTimeline;
243 const auto loopIteration = loopRange.isEmpty() ? 0
244 : static_cast<int> (playbackStartBeatRelativeToClip.inBeats() / loopRange.getLength().inBeats());
245 const auto loopIterationOffset = loopRange.getLength() * loopIteration;
246 const auto dynamicOffsetBeatsForChildNodes = toDuration (editStartBeatOfLocalTimeline) + loopIterationOffset - toDuration (loopRange.getStart());
247
248 const auto offsetStartTime = tempoPosition.set (editStartBeatOfLocalTimeline);
249 const auto offsetEndTime = tempoPosition.add (dynamicOffsetBeatsForChildNodes);
250 const auto dynamicOffsetTimeForChildNodes = offsetEndTime - offsetStartTime;
251
252 localProcessState.setPlaybackSpeedRatio (getPlaybackSpeedRatio());
253 localProcessState.update (getSampleRate(), pc.referenceSampleRange,
254 ProcessState::UpdateContinuityFlags::no);
255
256 // Update the offset for compatible Nodes
257 for (auto n : dynamicOffsetNodes)
258 {
259 n->setDynamicOffsetBeats (dynamicOffsetBeatsForChildNodes);
260 n->setDynamicOffsetTime (dynamicOffsetTimeForChildNodes);
261 }
262
263 // Process ordered Nodes
264 {
265 for (auto& node : orderedNodes)
266 node->process (pc.numSamples, pc.referenceSampleRange);
267 }
268
269 // Get output from root Nodes
270 {
271 const auto numChannels = pc.buffers.audio.getNumChannels();
272 int nodesWithMidi = pc.buffers.midi.isEmpty() ? 0 : 1;
273
274 // Get each of the inputs and add them to dest
275 for (auto& node : inputs)
276 {
277 auto inputFromNode = node->getProcessedOutput();
278
279 if (auto numChannelsToAdd = std::min (inputFromNode.audio.getNumChannels(), numChannels))
280 add (pc.buffers.audio.getFirstChannels (numChannelsToAdd),
281 inputFromNode.audio.getFirstChannels (numChannelsToAdd));
282
283 if (inputFromNode.midi.isNotEmpty())
284 nodesWithMidi++;
285
286 pc.buffers.midi.mergeFrom (inputFromNode.midi);
287 }
288
289 if (nodesWithMidi > 1)
290 pc.buffers.midi.sortByTimestamp();
291 }
292}
293
294}} // namespace tracktion { inline namespace engine
assert
ValueType convertTo0to1(ValueType v) const noexcept
constexpr ValueType getStart() const noexcept
constexpr ValueType getLength() const noexcept
Struct to describe a single iteration of a process call.
T max(T... args)
T min(T... args)
T move(T... args)
constexpr bool approximatelyEqual(Type a, Type b, Tolerance< Type > tolerance=Tolerance< Type >{} .withAbsolute(std::numeric_limits< Type >::min()) .withRelative(std::numeric_limits< Type >::epsilon()))
T llround(T... args)
typedef int64_t
Represents a duration in beats.
constexpr double inBeats() const
Returns the position as a number of beats.
Holds some really basic properties of a node.
Passed into Nodes when they are being initialised, to give them useful contextual information that th...