tracktion-engine 3.0-10-g034fdde4aa5
Tracktion Engine — High level data model for audio applications

« « « Anklang Documentation
Loading...
Searching...
No Matches
tracktion_CombiningAudioNode.cpp
Go to the documentation of this file.
1 /*
2 ,--. ,--. ,--. ,--.
3 ,-' '-.,--.--.,--,--.,---.| |,-.,-' '-.`--' ,---. ,--,--, Copyright 2024
4 '-. .-'| .--' ,-. | .--'| /'-. .-',--.| .-. || \ Tracktion Software
5 | | | | \ '-' \ `--.| \ \ | | | |' '-' '| || | Corporation
6 `---' `--' `--`--'`---'`--'`--' `---' `--' `---' `--''--' www.tracktion.com
7
8 Tracktion Engine uses a GPL/commercial licence - see LICENCE.md for details.
9*/
10
11namespace tracktion { inline namespace engine
12{
13
14// how much extra time to give a track before it gets cut off - to allow for plugins
15// that ring on.
16static const double decayTimeAllowance = 5.0;
17static const int secondsPerGroup = 8;
18
19static inline int timeToGroupIndex (double t) noexcept
20{
21 return ((int) t) / secondsPerGroup;
22}
23
24//==============================================================================
26{
27 TimedAudioNode (legacy::EditTimeRange t, AudioNode* n) : time (t), node (n)
28 {
29 }
30
33 int lastBufferSize = 0;
34
35 void render (const AudioRenderContext& rc, legacy::EditTimeRange editTime) const
36 {
37 AudioRenderContext context (rc);
38
39 if (editTime.getEnd() > time.end)
40 {
41 auto newLength = time.end - editTime.getStart();
42 context.streamTime.end = context.streamTime.start + newLength;
43 context.bufferNumSamples = std::max (0, (int) (context.bufferNumSamples * newLength
44 / rc.streamTime.getLength()));
45 }
46
47 auto amountToSkip = time.start - editTime.getStart();
48
49 if (amountToSkip > 0)
50 {
51 auto samplesToSkip = std::min (context.bufferNumSamples,
52 (int) (context.bufferNumSamples * amountToSkip
53 / context.streamTime.getLength()));
54
55 context.bufferStartSample += samplesToSkip;
56 context.bufferNumSamples -= samplesToSkip;
57
58 context.midiBufferOffset += amountToSkip;
59 context.streamTime.start = context.streamTime.getStart() + amountToSkip;
60 jassert (context.streamTime.start < context.streamTime.end);
61 }
62
63 if (context.bufferNumSamples > 0 || rc.bufferForMidiMessages != nullptr)
64 node->renderAdding (context);
65 }
66
68};
69
70//==============================================================================
71CombiningAudioNode::CombiningAudioNode() {}
72CombiningAudioNode::~CombiningAudioNode() {}
73
75{
76 if (inputNode == nullptr)
77 return;
78
79 if (time.isEmpty())
80 {
81 std::unique_ptr<AudioNode> an (inputNode);
82 return;
83 }
84
86 info.hasAudio = false;
87 info.hasMidi = false;
88 info.numberOfChannels = 0;
89
90 inputNode->getAudioNodeProperties (info);
91
92 hasAudio |= info.hasAudio;
93 hasMidi |= info.hasMidi;
94
95 maxNumberOfChannels = std::max (maxNumberOfChannels, info.numberOfChannels);
96
97 int i;
98 for (i = 0; i < inputs.size(); ++i)
99 if (inputs.getUnchecked(i)->time.start >= time.getStart())
100 break;
101
102 auto tan = inputs.insert (i, new TimedAudioNode (time, inputNode));
103
105
106 // add the node to any groups it's near to.
107 auto start = std::max (0, timeToGroupIndex (time.start - (secondsPerGroup / 2 + 2)));
108 auto end = std::max (0, timeToGroupIndex (time.end + (secondsPerGroup / 2 + 2)));
109
110 while (groups.size() <= end)
111 groups.add (new juce::Array<TimedAudioNode*>());
112
113 for (i = start; i <= end; ++i)
114 {
115 auto g = groups.getUnchecked(i);
116
117 int j;
118 for (j = 0; j < g->size(); ++j)
119 if (g->getUnchecked(j)->time.start >= time.start)
120 break;
121
122 jassert (tan != nullptr);
123 g->insert (j, tan);
124 }
125}
126
127void CombiningAudioNode::clear()
128{
129 inputs.clearQuick (true);
130 groups.clearQuick (true);
131 hasAudio = false;
132 hasMidi = false;
133}
134
135void CombiningAudioNode::getAudioNodeProperties (AudioNodeProperties& info)
136{
137 info.hasAudio = hasAudio;
138 info.hasMidi = hasMidi;
139 info.numberOfChannels = maxNumberOfChannels;
140}
141
142void CombiningAudioNode::visitNodes (const VisitorFn& v)
143{
144 v (*this);
145
146 for (auto* n : inputs)
147 n->node->visitNodes (v);
148}
149
150bool CombiningAudioNode::purgeSubNodes (bool keepAudio, bool keepMidi)
151{
152 for (int i = inputs.size(); --i >= 0;)
153 {
154 auto input = inputs.getUnchecked(i);
155
156 if (! input->node->purgeSubNodes (keepAudio, keepMidi))
157 {
158 for (int j = groups.size(); --j >= 0;)
159 groups.getUnchecked(j)->removeAllInstancesOf (input);
160
161 inputs.remove (i);
162 }
163 }
164
165 return ! inputs.isEmpty();
166}
167
169{
170 for (auto* n : inputs)
171 {
172 auto info2 = info;
173 info2.startTime -= n->time.start;
174 n->node->prepareAudioNodeToPlay (info2);
175 }
176}
177
178bool CombiningAudioNode::isReadyToRender()
179{
180 for (auto* n : inputs)
181 if (! n->node->isReadyToRender())
182 return false;
183
184 return true;
185}
186
188{
189 for (auto* n : inputs)
190 n->node->releaseAudioNodeResources();
191}
192
193void CombiningAudioNode::renderOver (const AudioRenderContext& rc)
194{
195 callRenderAdding (rc);
196}
197
198void CombiningAudioNode::renderAdding (const AudioRenderContext& rc)
199{
200 rc.sanityCheck();
201
202 if (hasAudio || hasMidi)
203 invokeSplitRender (rc, *this);
204}
205
206void CombiningAudioNode::renderSection (const AudioRenderContext& rc, legacy::EditTimeRange editTime)
207{
208 if (auto g = groups[timeToGroupIndex (editTime.getStart())])
209 {
210 for (auto tan : *g)
211 {
212 if (tan->time.end > editTime.getStart())
213 {
214 if (tan->time.start >= editTime.getEnd())
215 break;
216
217 tan->render (rc, editTime);
218 }
219 }
220 }
221}
222
223void CombiningAudioNode::prepareForNextBlock (const AudioRenderContext& rc)
224{
225 SCOPED_REALTIME_CHECK
226
227 auto time = rc.getEditTime().editRange1.getStart();
228 prefetchGroup (rc, time);
229
230 if (rc.playhead.isLooping() && time > rc.playhead.getLoopTimes().end - 4.0)
231 prefetchGroup (rc, rc.playhead.getLoopTimes().start);
232}
233
234void CombiningAudioNode::prefetchGroup (const AudioRenderContext& rc, const double time)
235{
236 if (auto g = groups[timeToGroupIndex (time)])
237 for (auto tan : *g)
238 tan->node->prepareForNextBlock (rc);
239}
240
241}} // namespace tracktion { inline namespace engine
Base class for nodes in an audio playback graph.
bool purgeSubNodes(bool keepAudio, bool keepMidi) override
Tells the node to delete any sub-nodes that don't produce the required type of output.
void addInput(legacy::EditTimeRange time, AudioNode *inputNode)
Adds an input node to be played.
void prepareAudioNodeToPlay(const PlaybackInitialisationInfo &) override
tells the node to initialise itself ready for playing from the given time.
void releaseAudioNodeResources() override
tells the node that play has stopped, and it can free up anything it no longer needs.
static constexpr double maximumLength
The maximum length an Edit can be.
#define jassert(expression)
#define JUCE_DECLARE_NON_COPYABLE(className)
T max(T... args)
T min(T... args)
Holds some really basic properties of a node.
Passed into AudioNodes when they are being initialised, to give them useful contextual information th...
int bufferNumSamples
The number of samples to write into the audio buffer.
MidiMessageArray * bufferForMidiMessages
A buffer of MIDI events to process.
int bufferStartSample
The index of the start point in the audio buffer from which data must be written.
void sanityCheck() const
Does a quick check on the bounds of various values in the structure.
double midiBufferOffset
A time offset to add to the timestamp of any events in the MIDI buffer.
legacy::EditTimeRange streamTime
The time window which needs to be rendered into the current block.
tan
time