tracktion-engine 3.0-10-g034fdde4aa5
Tracktion Engine — High level data model for audio applications

« « « Anklang Documentation
Loading...
Searching...
No Matches
tracktion_EditNodeBuilder.cpp
Go to the documentation of this file.
1 /*
2 ,--. ,--. ,--. ,--.
3 ,-' '-.,--.--.,--,--.,---.| |,-.,-' '-.`--' ,---. ,--,--, Copyright 2024
4 '-. .-'| .--' ,-. | .--'| /'-. .-',--.| .-. || \ Tracktion Software
5 | | | | \ '-' \ `--.| \ \ | | | |' '-' '| || | Corporation
6 `---' `--' `--`--'`---'`--'`--' `---' `--' `---' `--''--' www.tracktion.com
7
8 Tracktion Engine uses a GPL/commercial licence - see LICENCE.md for details.
9*/
10
11
12// N.B. There are some limitations to this at the moment:
13// - Only works with audio clips
14// - Only works with WaveAudioClips which have setUsesProxy (false) on them
15#define USE_DYNAMIC_OFFSET_CONTAINER_CLIP 1
16
17
18namespace tracktion { inline namespace engine
19{
20
21//==============================================================================
22//==============================================================================
23namespace
24{
25 enum class ClipRole
26 {
27 arranger,
28 launcher
29 };
30
31 template<typename PluginType>
32 juce::Array<PluginType*> getAllPluginsOfType (Edit& edit)
33 {
35
36 // N.B. There is a bit of a hack here checking if the plugin is actually still in the Edit
37 // as they are removed from the PluginCache async and we don't want to flush it every time
38 // we call this method. This should probably be moved to an EditItemCache like Clips and Tracks
39 for (auto p : edit.getPluginCache().getPlugins())
40 if (auto pt = dynamic_cast<PluginType*> (p))
41 if (pt->state.getParent().isValid() && pt->state.getRoot() == edit.state)
42 plugins.add (pt);
43
44 return plugins;
45 }
46
47 using namespace tracktion::graph;
48
49 int getSidechainBusID (EditItemID sidechainSourceID)
50 {
51 constexpr size_t sidechainMagicNum = 0xb2275e7216a2;
52 return static_cast<int> (hash (sidechainMagicNum, sidechainSourceID.getRawID()));
53 }
54
55 int getRackInputBusID (EditItemID rackID)
56 {
57 constexpr size_t rackInputMagicNum = 0x7261636b496e;
58 return static_cast<int> (hash (rackInputMagicNum, rackID.getRawID()));
59 }
60
61 int getRackOutputBusID (EditItemID rackID)
62 {
63 constexpr size_t rackOutputMagicNum = 0x7261636b4f7574;
64 return static_cast<int> (hash (rackOutputMagicNum, rackID.getRawID()));
65 }
66
67 int getWaveInputDeviceBusID (EditItemID trackItemID)
68 {
69 constexpr size_t waveMagicNum = 0xc1abde;
70 return static_cast<int> (hash (waveMagicNum, trackItemID.getRawID()));
71 }
72
73 int getMidiInputDeviceBusID (EditItemID trackItemID)
74 {
75 constexpr size_t midiMagicNum = 0x9a2762;
76 return static_cast<int> (hash (midiMagicNum, trackItemID.getRawID()));
77 }
78
79 bool isSidechainSource (Track& t)
80 {
81 const auto itemID = t.itemID;
82
83 for (auto p : t.edit.getPluginCache().getPlugins())
84 if (p->getSidechainSourceID() == itemID)
85 return true;
86
87 return false;
88 }
89
90 constexpr int getTrackNumChannels()
91 {
92 return 2;
93 }
94
95 bool isUnityChannelMap (const std::vector<std::pair<int, int>>& channelMap)
96 {
97 for (auto mapping : channelMap)
98 if (mapping.first != mapping.second)
99 return false;
100
101 return true;
102 }
103
104 AudioTrack* getTrackContainingTrackDevice (Edit& edit, WaveInputDevice& device)
105 {
106 for (auto t : getAudioTracks (edit))
107 if (&t->getWaveInputDevice() == &device)
108 return t;
109
110 return nullptr;
111 }
112
113 AudioTrack* getTrackContainingTrackDevice (Edit& edit, MidiInputDevice& device)
114 {
115 for (auto t : getAudioTracks (edit))
116 if (&t->getMidiInputDevice() == &device)
117 return t;
118
119 return nullptr;
120 }
121
122 int getNumChannelsFromDevice (OutputDevice& device)
123 {
124 if (auto waveDevice = dynamic_cast<WaveOutputDevice*> (&device))
125 return waveDevice->getChannelSet().size();
126
127 return 0;
128 }
129
130 juce::Array<RackInstance*> getInstancesForRack (RackType& type)
131 {
133
134 for (auto ri : getAllPluginsOfType<RackInstance> (type.edit))
135 if (ri->type.get() == &type)
136 instances.add (ri);
137
138 return instances;
139 }
140
141 juce::Array<RackInstance*> getEnabledInstancesForRack (RackType& type)
142 {
143 auto instances = getInstancesForRack (type);
144 instances.removeIf ([] (auto instance) { return ! instance->isEnabled(); });
145
146 return instances;
147 }
148
149 // If we're rendering and try to render a track in a submix,
150 // only render it if the parent track isn't included in the allowed tracks
151 // This allows us to render tracks contained inside submixes without the
152 // parent submix effects applied
153 bool shouldRenderTrackInSubmix (Track& t, const CreateNodeParams& params)
154 {
155 jassert (t.isPartOfSubmix());
156
157 if (! params.forRendering)
158 return false;
159
160 if (params.allowedTracks == nullptr)
161 return false;
162
163 for (auto allowedTrack : *params.allowedTracks)
164 if (t.isAChildOf (*allowedTrack))
165 return false;
166
167 return true;
168 }
169
170 juce::Array<Track*> addImplicitSubmixChildTracks (const juce::Array<Track*> originalTracks)
171 {
172 if (originalTracks.isEmpty())
173 return {};
174
175 auto tracks = originalTracks;
176
177 // Iterate all original tracks
178 // If any tracks are submix tracks, check if their parents are included or any of their children
179 // If not, add all children recusively
180 // Ensure there are no duplicates
181 for (auto track : originalTracks)
182 {
183 if (auto st = dynamic_cast<FolderTrack*> (track);
184 st != nullptr && st->isSubmixFolder())
185 {
186 bool shouldSkip = false;
187
188 // First check for parents
189 for (auto potentialParent : originalTracks)
190 {
191 if (track->isAChildOf (*potentialParent))
192 {
193 shouldSkip = true;
194 break;
195 }
196 }
197
198 // Then children
199 for (auto potentialChild : originalTracks)
200 {
201 if (potentialChild->isAChildOf (*track))
202 {
203 shouldSkip = true;
204 break;
205 }
206 }
207
208 if (shouldSkip)
209 continue;
210
211 // Otherwise add all the children
212 for (auto childTrack : st->getAllSubTracks (true))
213 tracks.addIfNotAlreadyThere (childTrack);
214 }
215 }
216
217 return tracks;
218 }
219
220 SpeedFadeDescription getSpeedFadeDescription (const AudioClipBase& clip)
221 {
222 if (clip.getFadeInBehaviour() == AudioClipBase::speedRamp
223 || clip.getFadeOutBehaviour() == AudioClipBase::speedRamp)
224 {
226 const auto clipPos = clip.getPosition();
227
228 if (clip.getFadeInBehaviour() == AudioClipBase::speedRamp)
229 {
230 desc.inTimeRange = TimeRange (clipPos.getStart(), clip.getFadeIn());
231 desc.fadeInType = clip.getFadeInType();
232 }
233 else
234 {
235 desc.inTimeRange = TimeRange (clipPos.getStart(), TimeDuration());
236 }
237
238 if (clip.getFadeOutBehaviour() == AudioClipBase::speedRamp)
239 {
240 desc.outTimeRange = TimeRange (clipPos.getEnd() - clip.getFadeOut(), clip.getFadeOut());
241 desc.fadeOutType = clip.getFadeOutType();
242 }
243 else
244 {
245 desc.outTimeRange = TimeRange (clipPos.getEnd(), TimeDuration());
246 }
247
248 return desc;
249 }
250
251 return {};
252 }
253
254 std::optional<WarpMap> getWarpMap (const AudioClipBase& clip)
255 {
256 if (! clip.getWarpTime())
257 return {};
258
259 WarpMap map;
260
261 for (auto m : clip.getWarpTimeManager().getMarkers())
262 map.push_back ({ m->sourceTime, m->warpTime });
263
264 return map;
265 }
266
270 std::optional<tempo::Sequence> getChordTrackSequenceIfRequired (AudioClipBase& clip)
271 {
272 if (! clip.getAutoPitch())
273 return {};
274
275 if (clip.getAutoPitchMode() != AudioClipBase::chordTrackMono)
276 return {};
277
278 if (auto pg = clip.getPatternGenerator())
279 {
280 // First get the properties that are static for the whole clip
281 const auto clipRootKey = clip.getLoopInfo().getRootNote() % 12;
282 const auto clipTransposeSemitones = clip.getTransposeSemiTones (false);
283 const auto scale = static_cast<int> (pg->scaleType.get());
284
285 // Next get the progression in Edit-time
287 pg->getFlattenedChordProgression (progression, true);
288
289 // Then iterate the progression
291 auto editTempoSequencePosition = createPosition (clip.edit.tempoSequence);
292 BeatPosition beatPos;
293
294 for (auto p : progression)
295 {
296 // Find the key (pitch/scale) of the Edit
297 editTempoSequencePosition.set (beatPos);
298 const auto editKey = editTempoSequencePosition.getKey();
299
300 const int scaleNote = editKey.pitch % 12;
301 int chordTrackPitchDelta = 0;
302
303 // If this section has a chord, find the pitch offset for it
304 if (p->chordName.get().isNotEmpty())
305 {
306 const int chordNote = p->getRootNote (scaleNote, Scale (static_cast<Scale::ScaleType> (editKey.scale)));
307 chordTrackPitchDelta = chordNote - scaleNote;
308 }
309
310 // Then find the base transposition from the Edit's key and clip's key
311 int transposeBase = scaleNote - clipRootKey;
312
313 while (transposeBase > 6) transposeBase -= 12;
314 while (transposeBase < -6) transposeBase += 12;
315
316 // Shift by the section's octave
317 transposeBase += p->octave * 12;
318
319 // Put the three transposition sections back together and add it as a KeyChange
320 const int finalPitch = transposeBase + chordTrackPitchDelta + clipTransposeSemitones;
321 keyChanges.push_back ({ beatPos, { finalPitch, scale } });
322
323 beatPos = beatPos + p->lengthInBeats;
324 }
325
326 // Finally copy tempo data from Edit's tempo sequence
329
330 {
331 for (auto ts : clip.edit.tempoSequence.getTempos())
332 tempoChanges.push_back ({ ts->startBeatNumber.get(), ts->bpm.get(), ts->curve.get() });
333
334 for (auto ts : clip.edit.tempoSequence.getTimeSigs())
335 timeSigChanges.push_back ({ ts->startBeatNumber.get(), ts->numerator.get(), ts->denominator.get(), ts->triplets.get() });
336 }
337
338 const bool useDenominator = clip.edit.engine.getEngineBehaviour().lengthOfOneBeatDependsOnTimeSignature();
339 tempo::Sequence seq (std::move (tempoChanges), std::move (timeSigChanges), std::move (keyChanges),
340 useDenominator ? tempo::LengthOfOneBeat::dependsOnTimeSignature
342
343 return seq;
344 }
345
346 return {};
347 }
348
349 bool shouldMonitorTrackDevice (InputDeviceInstance& instance)
350 {
351 switch (instance.owner.getMonitorMode())
352 {
353 case InputDevice::MonitorMode::on: return true;
355 case InputDevice::MonitorMode::off: return false;
356 };
357
358 return false;
359 }
360
361//==============================================================================
362//==============================================================================
364
367
370
372
374
376
377//==============================================================================
378//==============================================================================
379std::unique_ptr<tracktion::graph::Node> createFadeNodeForClip (AudioClipBase& clip, EditTimeRange clipTimeRangeToUse,
380 std::unique_ptr<Node> node, const CreateNodeParams& params)
381{
382 auto fIn = clip.getFadeIn();
383 auto fOut = clip.getFadeOut();
384
385 if (fIn > 0_td || fOut > 0_td)
386 {
387 const bool speedIn = clip.getFadeInBehaviour() == AudioClipBase::speedRamp && fIn > 0_td;
388 const bool speedOut = clip.getFadeOutBehaviour() == AudioClipBase::speedRamp && fOut > 0_td;
389
390 auto pos = toTime (clipTimeRangeToUse, clip.edit.tempoSequence);
391 node = makeNode<FadeInOutNode> (std::move (node), params.processState,
392 speedIn ? TimeRange (pos.getStart(), pos.getStart() + juce::jmin (TimeDuration::fromSeconds (0.003), fIn))
393 : TimeRange (pos.getStart(), pos.getStart() + fIn),
394 speedOut ? TimeRange (pos.getEnd() - juce::jmin (TimeDuration::fromSeconds (0.003), fOut), pos.getEnd())
395 : TimeRange (pos.getEnd() - fOut, pos.getEnd()),
396 clip.getFadeInType(), clip.getFadeOutType(),
397 true);
398 }
399
400 return node;
401}
402
403//==============================================================================
404std::unique_ptr<tracktion::graph::Node> createNodeForAudioClip (AudioClipBase& clip, EditItemID idToUse, EditTimeRange clipTimeRangeToUse,
405 bool includeMelodyne, const CreateNodeParams& params, ClipRole role)
406{
407 auto& playHeadState = params.processState.playHeadState;
408 const AudioFile playFile (clip.getPlaybackFile());
409
410 if (playFile.isNull())
411 return {};
412
413 // Check if ARA should be used
414 if (clip.setupARA (false))
415 {
416 jassert (clip.melodyneProxy != nullptr);
417
418 if (includeMelodyne)
419 return makeNode<MelodyneNode> (clip, playHeadState.playHead, params.forRendering);
420
421 return {}; // the ARA node creation will be handled by the track to allow live-play...
422 }
423
424 clip.melodyneProxy = nullptr;
425
426 // Otherwise use audio file
427 auto original = clip.getAudioFile();
428
429 // Trigger proxy render if it needs it
430 clip.beginRenderingNewProxyIfNeeded();
431
433
434 if (clip.canUseProxy())
435 {
436 assert (role != ClipRole::launcher);
437 assert (! clipTimeRangeToUse.isBeats());
438 TimeDuration nodeOffset;
439 double speed = 1.0;
440 TimeRange loopRange;
441
442 if (! clip.usesTimeStretchedProxy())
443 {
444 nodeOffset = clip.getPosition().getOffset();
445 loopRange = clip.getLoopRange();
446 speed = clip.getSpeedRatio();
447 }
448
449 if ((clip.getFadeInBehaviour() == AudioClipBase::speedRamp && clip.getFadeIn() != 0_td)
450 || (clip.getFadeOutBehaviour() == AudioClipBase::speedRamp && clip.getFadeOut() != 0_td))
451 {
453 const auto clipPos = clip.getPosition();
454
455 if (clip.getFadeInBehaviour() == AudioClipBase::speedRamp)
456 {
457 desc.inTimeRange = TimeRange (clipPos.getStart(), clip.getFadeIn());
458 desc.fadeInType = clip.getFadeInType();
459 }
460 else
461 {
462 desc.inTimeRange = TimeRange (clipPos.getStart(), TimeDuration());
463 }
464
465 if (clip.getFadeOutBehaviour() == AudioClipBase::speedRamp)
466 {
467 desc.outTimeRange = TimeRange (clipPos.getEnd() - clip.getFadeOut(), clip.getFadeOut());
468 desc.fadeOutType = clip.getFadeOutType();
469 }
470 else
471 {
472 desc.outTimeRange = TimeRange (clipPos.getEnd(), TimeDuration());
473 }
474
475 node = tracktion::graph::makeNode<SpeedRampWaveNode> (playFile,
476 toTime (clipTimeRangeToUse, clip.edit.tempoSequence),
477 nodeOffset,
478 loopRange,
479 clip.getLiveClipLevel(),
480 speed,
481 clip.getActiveChannels(),
482 juce::AudioChannelSet::canonicalChannelSet (std::max (2, clip.getActiveChannels().size())),
483 params.processState,
484 idToUse,
485 params.forRendering,
486 desc);
487 }
488 else
489 {
490 node = tracktion::graph::makeNode<WaveNode> (playFile,
491 toTime (clipTimeRangeToUse, clip.edit.tempoSequence),
492 nodeOffset,
493 loopRange,
494 clip.getLiveClipLevel(),
495 speed,
496 clip.getActiveChannels(),
497 juce::AudioChannelSet::canonicalChannelSet (std::max (2, clip.getActiveChannels().size())),
498 params.processState,
499 idToUse,
500 params.forRendering);
501 }
502 }
503 else
504 {
505 const auto timeStretcherMode = clip.getActualTimeStretchMode();
506 const auto timeStretcherOpts = clip.elastiqueProOptions.get();
507 const auto readAhead = params.readAheadTimeStretchNodes ? WaveNodeRealTime::ReadAhead::yes
508 : WaveNodeRealTime::ReadAhead::no;
509
510 const auto speedFadeDesc = getSpeedFadeDescription (clip);
511 auto warpMap = getWarpMap (clip);
512 std::optional<tempo::Sequence::Position> editTempoPosition (speedFadeDesc.isEmpty() ? std::optional<tempo::Sequence::Position>() : createPosition (clip.edit.tempoSequence));
513
514 if (clip.getAutoTempo() || clip.getAutoPitch() || role == ClipRole::launcher)
515 {
516 assert (clipTimeRangeToUse.isBeats());
520 auto syncTempo = WaveNodeRealTime::SyncTempo::no;
521 auto syncPitch = WaveNodeRealTime::SyncPitch::no;
522
523 auto wi = clip.getWaveInfo();
524 auto& li = clip.getLoopInfo();
525
526 if (clip.getAutoTempo() && li.getNumBeats() > 0 && wi.hashCode != 0)
527 {
528 tempos.push_back ({ 0_bp, li.getBpm (wi), 1.0 });
529 timeSigs.push_back ({ 0_bp, li.getNumerator(), li.getDenominator(), false });
530 syncTempo = WaveNodeRealTime::SyncTempo::yes;
531 }
532 else
533 {
534 tempos.push_back ({ 0_bp, 120.0, 0.0 });
535 timeSigs.push_back ({ 0_bp, 4, 4, false });
536 }
537
538 if (clip.getAutoPitch() && li.getRootNote() != -1)
539 {
540 keyChanges.push_back ({ 0_bp, { li.getRootNote(), 0 } });
541 syncPitch = WaveNodeRealTime::SyncPitch::yes;
542 }
543
544 tempo::Sequence seq (std::move (tempos),
545 std::move (timeSigs),
546 std::move (keyChanges),
547 clip.edit.engine.getEngineBehaviour().lengthOfOneBeatDependsOnTimeSignature() ? tempo::LengthOfOneBeat::dependsOnTimeSignature
549
550 if (role == ClipRole::launcher)
551 {
552 node = makeNode<WaveNodeRealTime> (playFile,
553 timeStretcherMode, timeStretcherOpts,
554 BeatRange (0_bp, BeatPosition::fromBeats (std::numeric_limits<double>::max())),
555 clip.getOffsetInBeats(),
556 clip.getLoopRangeBeats(),
557 clip.getLiveClipLevel(),
558 clip.getActiveChannels(),
559 juce::AudioChannelSet::canonicalChannelSet (std::max (2, clip.getActiveChannels().size())),
560 params.processState,
561 idToUse,
562 params.forRendering,
563 clip.getResamplingQuality(),
564 speedFadeDesc, std::move (editTempoPosition),
565 std::move (warpMap),
566 seq, syncTempo, syncPitch,
567 getChordTrackSequenceIfRequired (clip),
568 clip.getPitchChange(),
569 readAhead);
570 }
571 else
572 {
573 node = makeNode<WaveNodeRealTime> (playFile,
574 timeStretcherMode, timeStretcherOpts,
575 toBeats (clipTimeRangeToUse, clip.edit.tempoSequence),
576 clip.getOffsetInBeats(),
577 BeatRange (clip.getLoopStartBeats(), clip.getLoopLengthBeats()),
578 clip.getLiveClipLevel(),
579 clip.getActiveChannels(),
580 juce::AudioChannelSet::canonicalChannelSet (std::max (2, clip.getActiveChannels().size())),
581 params.processState,
582 idToUse,
583 params.forRendering,
584 clip.getResamplingQuality(),
585 speedFadeDesc, std::move (editTempoPosition),
586 std::move (warpMap),
587 seq, syncTempo, syncPitch,
588 getChordTrackSequenceIfRequired (clip),
589 clip.getPitchChange(),
590 readAhead);
591 }
592 }
593 else
594 {
595 assert (role != ClipRole::launcher);
596 assert (! clipTimeRangeToUse.isBeats());
597 node = makeNode<WaveNodeRealTime> (playFile,
598 toTime (clipTimeRangeToUse, clip.edit.tempoSequence),
599 clip.getPosition().getOffset(),
600 clip.getLoopRange(),
601 clip.getLiveClipLevel(),
602 clip.getSpeedRatio(),
603 clip.getActiveChannels(),
604 juce::AudioChannelSet::canonicalChannelSet (std::max (2, clip.getActiveChannels().size())),
605 params.processState,
606 idToUse,
607 params.forRendering,
608 clip.getResamplingQuality(),
609 speedFadeDesc, std::move (editTempoPosition),
610 timeStretcherMode, timeStretcherOpts,
611 clip.getPitchChange(),
612 readAhead);
613 }
614 }
615
616 // Plugins
617 if (params.includePlugins)
618 {
619 if (auto pluginList = clip.getPluginList())
620 {
621 for (auto p : *pluginList)
622 p->initialiseFully();
623
624 node = createPluginNodeForList (*pluginList, nullptr, std::move (node), playHeadState, params);
625 }
626 }
627
628 // Create FadeInOutNode
629 if (role != ClipRole::launcher)
630 node = createFadeNodeForClip (clip, clipTimeRangeToUse, std::move (node), params);
631
632 return node;
633}
634
635std::unique_ptr<tracktion::graph::Node> createNodeForAudioClip (AudioClipBase& clip, bool includeMelodyne,
636 const CreateNodeParams& params, ClipRole role)
637{
638 if (clip.canUseProxy())
639 {
640 assert (role == ClipRole::arranger);
641 return createNodeForAudioClip (clip, clip.itemID, clip.getEditTimeRange(), includeMelodyne, params, role);
642 }
643
644 if (clip.getAutoTempo() || clip.getAutoPitch() || role == ClipRole::launcher)
645 return createNodeForAudioClip (clip, clip.itemID, clip.getEditBeatRange(), includeMelodyne, params, role);
646
647 assert (role == ClipRole::arranger);
648 return createNodeForAudioClip (clip, clip.itemID, clip.getEditTimeRange(), includeMelodyne, params, role);
649}
650
651std::unique_ptr<tracktion::graph::Node> createNodeForMidiClip (MidiClip& clip, const TrackMuteState& trackMuteState,
652 const CreateNodeParams& params, ClipRole role)
653{
655 const bool generateMPE = clip.getMPEMode();
656 const auto timeBase = clip.canUseProxy() ? MidiList::TimeBase::seconds
658
659 const auto channels = generateMPE ? juce::Range<int> (2, 15)
660 : juce::Range<int>::withStartAndLength (clip.getMidiChannel().getChannelNumber(), 1);
661
662 if (timeBase == MidiList::TimeBase::beatsRaw)
663 {
665 sequences.emplace_back (clip.getSequence().exportToPlaybackMidiSequence (clip, timeBase, generateMPE));
666 const auto clipBeatRange = role == ClipRole::launcher ? BeatRange (0_bp, BeatPosition::fromBeats (std::numeric_limits<double>::max()))
667 : BeatRange (clip.getStartBeat(), clip.getEndBeat());
668
669 return graph::makeNode<LoopingMidiNode> (std::move (sequences),
670 channels,
671 generateMPE,
672 clipBeatRange,
673 clip.getLoopRangeBeats(),
674 clip.getOffsetInBeats(),
675 clip.getLiveClipLevel(),
676 params.processState,
677 clip.itemID,
678 clip.getQuantisation(),
679 clip.edit.engine.getGrooveTemplateManager().getTemplateByName (clip.getGrooveTemplate()),
680 clip.getGrooveStrength(),
681 [&trackMuteState]
682 {
683 if (! trackMuteState.shouldTrackBeAudible())
684 return ! trackMuteState.shouldTrackMidiBeProcessed();
685
686 return false;
687 });
688 }
689
690 // Use looped sequence in seconds time base
691 assert (role != ClipRole::launcher);
692 const auto clipTimeRange = clip.getEditTimeRange();
693 const juce::Range<double> editTimeRange { clipTimeRange.getStart().inSeconds(), clipTimeRange.getEnd().inSeconds() };
694
696 sequences.emplace_back (clip.getSequenceLooped().exportToPlaybackMidiSequence (clip, timeBase, generateMPE));
697
698 return graph::makeNode<MidiNode> (std::move (sequences),
699 timeBase,
700 channels,
701 generateMPE,
702 editTimeRange,
703 clip.getLiveClipLevel(),
704 params.processState,
705 clip.itemID,
706 [&trackMuteState]
707 {
708 if (! trackMuteState.shouldTrackBeAudible())
709 return ! trackMuteState.shouldTrackMidiBeProcessed();
710
711 return false;
712 });
713}
714
715std::unique_ptr<tracktion::graph::Node> createNodeForStepClip (StepClip& clip, const TrackMuteState& trackMuteState,
716 const CreateNodeParams& params, ClipRole role)
717{
719
721
722 if (role == ClipRole::launcher)
723 {
725
726 for (int i = clip.usesProbability() ? 64 : 1; --i >= 0;)
727 sequences.push_back (clip.generateMidiSequence (MidiList::TimeBase::beatsRaw));
728
729 const auto clipBeatRange = BeatRange (0_bp, BeatPosition::fromBeats (std::numeric_limits<double>::max()));
730 node = graph::makeNode<LoopingMidiNode> (std::move (sequences),
731 juce::Range<int> (1, 16),
732 false,
733 clipBeatRange,
734 clip.getLoopRangeBeats(),
735 clip.getOffsetInBeats(),
736 clip.getLiveClipLevel(),
737 params.processState,
738 clip.itemID,
740 nullptr,
741 0.0f,
742 [&trackMuteState]
743 {
744 if (! trackMuteState.shouldTrackBeAudible())
745 return ! trackMuteState.shouldTrackMidiBeProcessed();
746
747 return false;
748 });
749 }
750 else
751 {
753
754 for (int i = clip.usesProbability() ? 64 : 1; --i >= 0;)
755 {
757 clip.generateMidiSequence (sequence);
758 sequences.push_back (sequence);
759 }
760
761 const auto clipRange = clip.getEditTimeRange ();
762 const juce::Range<double> editTimeRange (clipRange.getStart ().inSeconds (), clipRange.getEnd ().inSeconds ());
763 node = graph::makeNode<MidiNode> (std::move (sequences),
764 MidiList::TimeBase::seconds,
765 juce::Range<int> (1, 16),
766 false,
767 editTimeRange,
768 clip.getLiveClipLevel(),
769 params.processState,
770 clip.itemID,
771 [&trackMuteState]
772 {
773 if (!trackMuteState.shouldTrackBeAudible ())
774 return !trackMuteState.shouldTrackMidiBeProcessed ();
775
776 return false;
777 });
778 }
779
780 if (node && ! clip.getListeners().isEmpty())
781 node = makeNode<LiveMidiOutputNode> (clip, std::move (node));
782
783 return node;
784}
785
786std::unique_ptr<tracktion::graph::Node> createNodeForContainerClip (ContainerClip& clip, [[ maybe_unused ]] const TrackMuteState& trackMuteState,
787 const CreateNodeParams& params, ClipRole role)
788{
790 const auto& clips = clip.getClips();
791
792 if (clips.isEmpty())
793 return {};
794
795 #if USE_DYNAMIC_OFFSET_CONTAINER_CLIP
797
798 {
800
801 for (auto c : clips)
802 {
803 if (auto acb = dynamic_cast<AudioClipBase*> (c))
804 {
805 assert (! acb->canUseProxy());
806 assert (acb->getAutoTempo());
807
808 if (auto clipNode = createNodeForAudioClip (*acb, false, params, ClipRole::arranger))
809 nodes.push_back (std::move (clipNode));
810 }
811 else
812 {
813 assert (false && "Only WaveAudioClips supported at the moment");
814 }
815 }
816
817 auto offsetNode = std::make_unique<DynamicOffsetNode> (params.processState,
818 clip.itemID,
819 role == ClipRole::launcher ? BeatRange (0_bp, BeatPosition::fromBeats (std::numeric_limits<double>::max()))
820 : clip.getEditBeatRange(),
821 clip.getOffsetInBeats(),
822 clip.getLoopRangeBeats(),
823 std::move (nodes));
824 node = std::move (offsetNode);
825 }
826 #else
827 // Combiner clip and the contained clips need their own, local PlayHeadState.
828 // This also needs to persist across graph rebuilds to maintain continuity.
829 // Once the ContainerClipNode has been initialised it will update it's children with its own ProcessState
830 auto node = makeNode<ContainerClipNode> (params.processState,
831 clip.itemID,
832 BeatRange (clip.getStartBeat(), clip.getEndBeat()),
833 clip.getOffsetInBeats(),
834 clip.getLoopRangeBeats(),
835 createNodeForClips (clip.itemID, clips, trackMuteState, params));
836 #endif
837
838 // Plugins
839 if (params.includePlugins)
840 {
841 if (auto pluginList = clip.getPluginList())
842 {
843 for (auto p : *pluginList)
844 p->initialiseFully();
845
846 node = createPluginNodeForList (*pluginList, nullptr, std::move (node), params.processState.playHeadState, params);
847 }
848 }
849
850 // Create FadeInOutNode
851 if (role != ClipRole::launcher)
852 return createFadeNodeForClip (clip, clip.getEditTimeRange(), std::move (node), params);
853
854 return node;
855}
856
857std::unique_ptr<tracktion::graph::Node> createNodeForClip (Clip& clip, const TrackMuteState& trackMuteState,
858 const CreateNodeParams& params, ClipRole role)
859{
860 if (clip.disabled)
861 return {};
862
863 // N.B. This must be checked first as a ContainerClip is an AudioClipBase
864 if (auto containerClip = dynamic_cast<ContainerClip*> (&clip))
865 return createNodeForContainerClip (*containerClip, trackMuteState, params, role);
866
867 if (auto audioClip = dynamic_cast<AudioClipBase*> (&clip))
868 return createNodeForAudioClip (*audioClip, false, params, role);
869
870 if (auto midiClip = dynamic_cast<MidiClip*> (&clip))
871 return createNodeForMidiClip (*midiClip, trackMuteState, params, role);
872
873 if (auto stepClip = dynamic_cast<StepClip*> (&clip))
874 return createNodeForStepClip (*stepClip, trackMuteState, params, role);
875
876 return {};
877}
878
879std::unique_ptr<tracktion::graph::Node> createNodeForClips (EditItemID trackID, const juce::Array<Clip*>& clips,
880 const TrackMuteState& trackMuteState, const CreateNodeParams& params)
881{
882 // If there are no clips, we still need to send note-offs for clips that might have been deleted whilst still playing
883 // In the future, this will be removed during the transform stage
884 if (clips.size() == 0)
885 return std::make_unique<CombiningNode> (trackID, params.processState);
886
887 const bool clipsHaveLatency = [&]
888 {
889 if (params.includePlugins)
890 for (auto clip : clips)
891 if (params.allowedClips == nullptr || params.allowedClips->contains (clip))
892 if (auto pluginList = clip->getPluginList())
893 for (auto p : *pluginList)
894 if (p->getLatencySeconds() > 0.0)
895 return true;
896
897 return false;
898 }();
899
900 // If any of the clips have latency, it's impossible to use a CombiningNode as it doesn't
901 // continuously process Nodes which means the latency FIFO doesn't get flushed. So just
902 // use a normal SummingNode instead
903 if (clipsHaveLatency)
904 {
905 auto combiner = std::make_unique<SummingNode>();
906
907 for (auto clip : clips)
908 if (params.allowedClips == nullptr || params.allowedClips->contains (clip))
909 if (auto clipNode = createNodeForClip (*clip, trackMuteState, params, ClipRole::arranger))
910 combiner->addInput (std::move (clipNode));
911
912 return combiner;
913 }
914
915 if (clips.size() == 1)
916 {
917 auto clip = clips.getFirst();
918
919 if (params.allowedClips == nullptr || params.allowedClips->contains (clip))
920 {
921 auto combiner = std::make_unique<CombiningNode> (trackID, params.processState);
922
923 if (auto clipNode = createNodeForClip (*clip, trackMuteState, params, ClipRole::arranger))
924 combiner->addInput (std::move (clipNode), clip->getPosition().time);
925
926 return combiner;
927 }
928 }
929
930 auto combiner = std::make_unique<CombiningNode> (trackID, params.processState);
931
932 // Use a CombiningNode for most clips
933 for (auto clip : clips)
934 if (params.allowedClips == nullptr || params.allowedClips->contains (clip))
935 if (auto clipNode = createNodeForClip (*clip, trackMuteState, params, ClipRole::arranger))
936 combiner->addInput (std::move (clipNode), clip->getPosition().time);
937
938 return combiner;
939}
940
941std::vector<std::unique_ptr<SlotControlNode>> createNodeForLauncherClips (const ClipSlotList& slotList,
942 const TrackMuteState& trackMuteState, const CreateNodeParams& params)
943{
945
946 for (auto slot : slotList.getClipSlots())
947 {
948 auto clip = slot->getClip();
949
950 if (! clip)
951 continue;
952
953 if (params.allowedClips == nullptr || params.allowedClips->contains (clip))
954 {
955 if (auto clipNode = createNodeForClip (*clip, trackMuteState, params, ClipRole::launcher))
956 {
958
959 if (auto acb = dynamic_cast<AudioClipBase*> (clip))
960 launchHandle = acb->getLaunchHandle();
961 else if (auto mc = dynamic_cast<MidiClip*> (clip))
962 launchHandle = mc->getLaunchHandle();
963 else if (auto sc = dynamic_cast<StepClip*> (clip))
964 launchHandle = sc->getLaunchHandle();
965 else
966 assert (false);
967
968 std::optional<BeatDuration> clipDuration = clip->isLooping() ? std::optional<BeatDuration>()
969 : clip->getLengthInBeats();
970
971 switch (clip->followActionDurationType.get())
972 {
974 if (auto afterBeats = clip->followActionBeats.get(); afterBeats > 0_bd)
975 clipDuration = afterBeats;
976
977 break;
979 if (clip->isLooping())
980 {
981 if (auto afterLoops = clip->followActionNumLoops.get(); afterLoops > 0.0)
982 clipDuration = (clip->getLoopLengthBeats() * afterLoops) - clip->getOffsetInBeats();
983 }
984 else
985 {
986 clipDuration = clip->getLengthInBeats() - clip->getOffsetInBeats();
987 }
988 break;
989 }
990
991 auto controlNode = std::make_unique<SlotControlNode> (params.processState,
992 std::move (launchHandle),
993 clipDuration,
994 createFollowAction (*clip),
995 slot->itemID,
996 std::move (clipNode));
997
998 nodes.push_back (std::move (controlNode));
999 }
1000 }
1001 }
1002
1003 return nodes;
1004}
1005
1006
1007//==============================================================================
1008std::unique_ptr<tracktion::graph::Node> createNodeForFrozenAudioTrack (AudioTrack& track, tracktion::graph::PlayHeadState& playHeadState, const CreateNodeParams& params)
1009{
1010 jassert (! params.forRendering);
1011
1012 const bool processMidiWhenMuted = track.state.getProperty (IDs::processMidiWhenMuted, false);
1013 auto trackMuteState = std::make_unique<TrackMuteState> (track, false, processMidiWhenMuted);
1014 auto node = tracktion::graph::makeNode<WaveNode> (AudioFile (track.edit.engine, TemporaryFileManager::getFreezeFileForTrack (track)),
1015 TimeRange (TimePosition(), track.getLengthIncludingInputTracks()),
1018 params.processState,
1019 track.itemID,
1020 params.forRendering);
1021
1022 // Plugins
1023 if (params.includePlugins)
1024 node = createPluginNodeForTrack (track, *trackMuteState, std::move (node), playHeadState, params);
1025
1026 if (isSidechainSource (track))
1027 node = makeNode<SendNode> (std::move (node), getSidechainBusID (track.itemID));
1028
1029 node = makeNode<TrackMutingNode> (std::move (trackMuteState), std::move (node), false);
1030
1031 return node;
1032}
1033
1034std::unique_ptr<tracktion::graph::Node> createARAClipsNode (const juce::Array<Clip*>& clips, const TrackMuteState&, const CreateNodeParams& params)
1035{
1037
1038 for (auto clip : clips)
1039 if (params.allowedClips == nullptr || params.allowedClips->contains (clip))
1040 if (auto acb = dynamic_cast<AudioClipBase*> (clip))
1041 if (acb->isUsingMelodyne() && acb->melodyneProxy != nullptr)
1042 araClips.add (acb);
1043
1044 if (araClips.size() == 0)
1045 return {};
1046
1048
1049 for (auto araClip : araClips)
1050 if (auto araNode = createNodeForAudioClip (*araClip, true, params, ClipRole::arranger))
1051 nodes.push_back (createFadeNodeForClip (*araClip, araClip->getEditTimeRange(), std::move (araNode), params));
1052
1053 if (nodes.size() == 1)
1054 return std::move (nodes.front());
1055
1056 return std::make_unique<SummingNode> (std::move (nodes));
1057}
1058
1059std::unique_ptr<tracktion::graph::Node> createClipsNode (AudioTrack& at, const TrackMuteState& trackMuteState,
1060 const CreateNodeParams& params)
1061{
1062 std::vector<std::unique_ptr<Node>> arrangerNodes;
1063 const auto trackID = at.itemID;
1064 const auto& clips = at.getClips();
1065
1066 if (auto clipsNode = createNodeForClips (trackID, clips, trackMuteState, params))
1067 arrangerNodes.push_back (std::move (clipsNode));
1068
1069 if (auto araNode = createARAClipsNode (clips, trackMuteState, params))
1070 arrangerNodes.push_back (std::move (araNode));
1071
1072 if (! params.allowClipSlots)
1073 {
1074 if (arrangerNodes.empty())
1075 return {};
1076
1077 if (arrangerNodes.size() == 1)
1078 return std::move (arrangerNodes.front());
1079
1080 return std::make_unique<SummingNode> (std::move (arrangerNodes));
1081 }
1082
1083 auto launcherNodes = createNodeForLauncherClips (at.getClipSlotList(), trackMuteState, params);
1084
1085 if (arrangerNodes.empty() && launcherNodes.empty())
1086 return {};
1087
1088 std::unique_ptr<Node> arrangerNode;
1089
1090 if (arrangerNodes.size() == 1)
1091 arrangerNode = std::move (arrangerNodes.front());
1092 else if (arrangerNodes.size() > 1)
1093 arrangerNode = std::make_unique<SummingNode> (std::move (arrangerNodes));
1094
1095 return makeNode<ArrangerLauncherSwitchingNode> (params.processState, at, std::move (arrangerNode), std::move (launcherNodes));
1096}
1097
1098std::unique_ptr<tracktion::graph::Node> createLiveInputNodeForDevice (InputDeviceInstance& inputDeviceInstance, tracktion::graph::PlayHeadState& playHeadState,
1099 const CreateNodeParams& params, EditItemID trackID)
1100{
1101 if (auto midiDevice = dynamic_cast<MidiInputDevice*> (&inputDeviceInstance.getInputDevice()))
1102 {
1103 if (midiDevice->isTrackDevice())
1104 if (auto sourceTrack = getTrackContainingTrackDevice (inputDeviceInstance.edit, *midiDevice))
1105 return makeNode<TrackMidiInputDeviceNode> (*midiDevice, makeNode<ReturnNode> (getMidiInputDeviceBusID (sourceTrack->itemID)), params.processState,
1106 shouldMonitorTrackDevice (inputDeviceInstance));
1107
1109 return makeNode<HostedMidiInputDeviceNode> (inputDeviceInstance, *midiDevice, midiDevice->getMPESourceID(), playHeadState, params.processState);
1110
1111 return makeNode<MidiInputDeviceNode> (inputDeviceInstance, *midiDevice, midiDevice->getMPESourceID(), playHeadState, trackID);
1112 }
1113 else if (auto waveDevice = dynamic_cast<WaveInputDevice*> (&inputDeviceInstance.getInputDevice()))
1114 {
1115 if (waveDevice->isTrackDevice())
1116 if (auto sourceTrack = getTrackContainingTrackDevice (inputDeviceInstance.edit, *waveDevice))
1117 return makeNode<TrackWaveInputDeviceNode> (params.processState,
1118 *waveDevice,
1119 makeNode<ReturnNode> (getWaveInputDeviceBusID (sourceTrack->itemID)),
1120 shouldMonitorTrackDevice (inputDeviceInstance));
1121
1122 // For legacy reasons, we always need a stereo output from our live inputs
1123 return makeNode<WaveInputDeviceNode> (inputDeviceInstance, *waveDevice,
1125 }
1126
1127 return {};
1128}
1129
1130std::unique_ptr<tracktion::graph::Node> createLiveInputsNode (AudioTrack& track, tracktion::graph::PlayHeadState& playHeadState, const CreateNodeParams& params)
1131{
1133
1134 if (! params.forRendering)
1135 if (auto context = track.edit.getCurrentPlaybackContext())
1136 for (auto in : context->getAllInputs())
1137 if ((in->isLivePlayEnabled (track) || in->getInputDevice().isTrackDevice()) && in->getTargets().contains (track.itemID))
1138 if (auto node = createLiveInputNodeForDevice (*in, playHeadState, params, track.itemID))
1139 nodes.push_back (std::move (node));
1140
1141 if (nodes.empty())
1142 return {};
1143
1144 if (nodes.size() == 1)
1145 return std::move (nodes.front());
1146
1147 return std::make_unique<SummingNode> (std::move (nodes));
1148}
1149
1150std::unique_ptr<tracktion::graph::Node> createSidechainInputNodeForPlugin (Plugin& plugin, std::unique_ptr<Node> node)
1151{
1152 const auto sidechainSourceID = plugin.getSidechainSourceID();
1153 const bool usesSidechain = ! plugin.isMissing() && sidechainSourceID.isValid();
1154
1155 if (! usesSidechain)
1156 return node;
1157
1158 // This is complicated because the first two source channels will always be the track the plugin is on
1159 // Any additional channels will be from the sidechain source track
1160 // So we really have two channel maps, one from the plugin's track to the plugin and one from the sidechain track to the plugin
1161 std::vector<std::pair<int /*source channel*/, int /*dest channel*/>> directChannelMap, sidechainChannelMap;
1162
1163 for (int i = 0; i < plugin.getNumWires(); ++i)
1164 {
1165 if (auto w = plugin.getWire (i))
1166 {
1167 const int sourceIndex = w->sourceChannelIndex;
1168 const int destIndex = w->destChannelIndex;
1169
1170 if (sourceIndex < getTrackNumChannels())
1171 directChannelMap.emplace_back (sourceIndex, destIndex);
1172 else
1173 sidechainChannelMap.emplace_back (sourceIndex - getTrackNumChannels(), destIndex);
1174 }
1175 }
1176
1177 if (directChannelMap.empty() && sidechainChannelMap.empty())
1178 return node;
1179
1180 auto directInput = std::move (node);
1181
1182 if (! isUnityChannelMap (directChannelMap))
1183 directInput = makeNode<ChannelRemappingNode> (std::move (directInput), directChannelMap, true);
1184
1185 auto sidechainInput = makeNode<ReturnNode> (getSidechainBusID (sidechainSourceID));
1186 sidechainInput = makeNode<ChannelRemappingNode> (std::move (sidechainInput), std::move (sidechainChannelMap), false);
1187
1188 if (directChannelMap.empty())
1189 return sidechainInput;
1190
1191 auto sumNode = makeSummingNode ({ directInput.release(), sidechainInput.release() });
1192
1193 return sumNode;
1194}
1195
1196std::unique_ptr<tracktion::graph::Node> createNodeForPlugin (Plugin& plugin, const TrackMuteState* trackMuteState, std::unique_ptr<Node> node,
1197 const CreateNodeParams& params)
1198{
1199 jassert (node != nullptr);
1200
1201 if (plugin.isDisabled())
1202 return node;
1203
1204 if (! plugin.isEnabled() && ! params.includeBypassedPlugins)
1205 return node;
1206
1207 int maxNumChannels = -1;
1208
1209 // If this plugin is on a track or clip and doesn't have a sidechain input we can limit the number of channels it uses
1210 if (plugin.getOwnerTrack() != nullptr || plugin.getOwnerClip() != nullptr)
1211 if (! plugin.getSidechainSourceID().isValid())
1212 maxNumChannels = 2;
1213
1214 node = createSidechainInputNodeForPlugin (plugin, std::move (node));
1215 node = tracktion::graph::makeNode<PluginNode> (std::move (node),
1216 plugin,
1217 params.sampleRate, params.blockSize,
1218 trackMuteState, params.processState,
1219 params.forRendering, params.includeBypassedPlugins,
1220 maxNumChannels);
1221
1222 return node;
1223}
1224
1225std::unique_ptr<tracktion::graph::Node> createNodeForRackInstance (RackInstance& rackInstance, std::unique_ptr<Node> node)
1226{
1227 jassert (node != nullptr);
1228
1229 if (! rackInstance.isEnabled())
1230 return node;
1231
1232 const auto rackInputID = getRackInputBusID (rackInstance.rackTypeID);
1233 const auto rackOutputID = getRackOutputBusID (rackInstance.rackTypeID);
1234
1235 // The input to the instance is referenced by the dry signal path
1236 auto* inputNode = node.get();
1237
1238 // Send
1239 // N.B. the channel indicies from the RackInstance start a 1 so we need to subtract this to get a 0-indexed channel
1240 RackInstanceNode::ChannelMap sendChannelMap;
1241 sendChannelMap[0] = { 0, rackInstance.leftInputGoesTo - 1, rackInstance.leftInDb };
1242 sendChannelMap[1] = { 1, rackInstance.rightInputGoesTo - 1, rackInstance.rightInDb };
1243 node = makeNode<RackInstanceNode> (std::move (node), std::move (sendChannelMap));
1244 node = makeNode<SendNode> (std::move (node), rackInputID);
1245 node = makeNode<ReturnNode> (makeNode<SinkNode> (std::move (node)), rackOutputID);
1246
1247 // Return
1248 RackInstanceNode::ChannelMap returnChannelMap;
1249 returnChannelMap[0] = { rackInstance.leftOutputComesFrom - 1, 0, rackInstance.leftOutDb };
1250 returnChannelMap[1] = { rackInstance.rightOutputComesFrom - 1, 1, rackInstance.rightOutDb };
1251 node = makeNode<RackInstanceNode> (std::move (node), std::move (returnChannelMap));
1252
1253 return makeNode<RackReturnNode> (std::move (node),
1254 [wetGain = rackInstance.wetGain] { return wetGain->getCurrentValue(); },
1255 inputNode,
1256 [dryGain = rackInstance.dryGain] { return dryGain->getCurrentValue(); });
1257}
1258
1259std::unique_ptr<tracktion::graph::Node> createPluginNodeForList (PluginList& list, const TrackMuteState* trackMuteState, std::unique_ptr<Node> node,
1260 tracktion::graph::PlayHeadState& playHeadState, const CreateNodeParams& params)
1261{
1262 for (auto p : list)
1263 {
1264 if (! params.forRendering && p->isFrozen())
1265 continue;
1266
1267 if (auto meterPlugin = dynamic_cast<LevelMeterPlugin*> (p))
1268 {
1269 node = makeNode<LevelMeasurerProcessingNode> (std::move (node), *meterPlugin);
1270 }
1271 else if (auto sendPlugin = dynamic_cast<AuxSendPlugin*> (p))
1272 {
1273 if (sendPlugin->isEnabled())
1274 node = makeNode<AuxSendNode> (std::move (node), sendPlugin->busNumber, *sendPlugin,
1275 playHeadState, trackMuteState,
1276 list.getEdit().engine.getEngineBehaviour().shouldProcessAuxSendWhenTrackIsMuted (*sendPlugin));
1277 }
1278 else if (auto returnPlugin = dynamic_cast<AuxReturnPlugin*> (p))
1279 {
1280 if (returnPlugin->isEnabled())
1281 node = makeNode<ReturnNode> (std::move (node), returnPlugin->busNumber);
1282 }
1283 else if (auto rackInstance = dynamic_cast<RackInstance*> (p))
1284 {
1285 node = createNodeForRackInstance (*rackInstance, std::move (node));
1286 }
1287 else if (auto insertPlugin = dynamic_cast<InsertPlugin*> (p))
1288 {
1289 if (! insertPlugin->isEnabled())
1290 continue;
1291
1292 if (auto insertReturnNode = createInsertReturnNode (*insertPlugin, playHeadState, params))
1293 node = makeNode<InsertNode> (std::move (node), *insertPlugin, std::move (insertReturnNode),
1294 SampleRateAndBlockSize { params.sampleRate, params.blockSize });
1295 }
1296 else
1297 {
1298 node = createNodeForPlugin (*p, trackMuteState, std::move (node), params);
1299 }
1300 }
1301
1302 return node;
1303}
1304
1305std::unique_ptr<tracktion::graph::Node> createModifierNodeForList (ModifierList* list,
1307 TrackMuteState* trackMuteState,
1309 tracktion::graph::PlayHeadState& playHeadState,
1310 const CreateNodeParams& params)
1311{
1312 if (list != nullptr)
1313 {
1314 for (auto& modifier : list->getModifiers())
1315 {
1316 if (modifier->getProcessingPosition() != position)
1317 continue;
1318
1319 node = makeNode<ModifierNode> (std::move (node), modifier, params.sampleRate, params.blockSize,
1320 trackMuteState, playHeadState, params.forRendering);
1321 }
1322 }
1323
1324 return node;
1325}
1326
1327std::unique_ptr<tracktion::graph::Node> createPluginNodeForTrack (Track& t,
1328 TrackMuteState& trackMuteState,
1330 tracktion::graph::PlayHeadState& playHeadState,
1331 const CreateNodeParams& params)
1332{
1333 node = createModifierNodeForList (t.getModifierList(), Modifier::ProcessingPosition::preFX,
1334 &trackMuteState, std::move (node), playHeadState, params);
1335
1336 if (params.includePlugins)
1337 node = createPluginNodeForList (t.pluginList, &trackMuteState, std::move (node), playHeadState, params);
1338
1339 node = createModifierNodeForList (t.getModifierList(), Modifier::ProcessingPosition::postFX,
1340 &trackMuteState, std::move (node), playHeadState, params);
1341
1342 return node;
1343}
1344
1345juce::Array<Track*> getDirectInputTracks (AudioTrack& at)
1346{
1347 juce::Array<Track*> inputTracks;
1348
1349 for (auto track : getAudioTracks (at.edit))
1350 if (! track->isPartOfSubmix() && track != &at && track->getOutput().outputsToDestTrack (at))
1351 inputTracks.add (track);
1352
1353 for (auto track : getTracksOfType<FolderTrack> (at.edit, true))
1354 if (! track->isPartOfSubmix() && track->getOutput() != nullptr && track->getOutput()->outputsToDestTrack (at))
1355 inputTracks.add (track);
1356
1357 return inputTracks;
1358}
1359
1361{
1362 if (at.getCompGroup() == -1)
1363 return node;
1364
1365 if (auto tc = at.edit.getTrackCompManager().getTrackComp (&at))
1366 {
1367 const auto crossfadeTimeMs = at.edit.engine.getPropertyStorage().getProperty (SettingID::compCrossfadeMs, 20.0);
1368 const auto crossfadeTime = TimeDuration::fromSeconds (static_cast<double> (crossfadeTimeMs) / 1000.0);
1369
1370 const auto nonMuteTimes = tc->getNonMuteTimes (at, crossfadeTime);
1371 const auto muteTimes = TrackCompManager::TrackComp::getMuteTimes (nonMuteTimes);
1372
1373 if (muteTimes.isEmpty())
1374 return node;
1375
1376 node = makeNode<TimedMutingNode> (std::move (node), std::move (muteTimes), params.processState.playHeadState);
1377
1378 for (auto r : nonMuteTimes)
1379 {
1380 auto fadeIn = r.withLength (crossfadeTime) - 0.0001s;
1381 auto fadeOut = fadeIn.movedToEndAt (r.getEnd() + 0.0001s);
1382
1383 if (! (fadeIn.isEmpty() && fadeOut.isEmpty()))
1384 node = makeNode<FadeInOutNode> (std::move (node),
1385 params.processState,
1386 TimeRange { fadeIn.getStart(), fadeIn.getEnd() },
1387 TimeRange { fadeOut.getStart(), fadeOut.getEnd() },
1388 AudioFadeCurve::convex,
1389 AudioFadeCurve::convex, false);
1390 }
1391 }
1392
1393 return node;
1394}
1395
1396std::unique_ptr<tracktion::graph::Node> createNodeForAudioTrack (AudioTrack& at, const CreateNodeParams& params)
1397{
1399 jassert (at.isProcessing (false));
1400 auto& playHeadState = params.processState.playHeadState;
1401
1403 return createNodeForFrozenAudioTrack (at, playHeadState, params);
1404
1405 auto inputTracks = getDirectInputTracks (at);
1406 const bool processMidiWhenMuted = at.state.getProperty (IDs::processMidiWhenMuted, false);
1407 auto clipsMuteState = std::make_unique<TrackMuteState> (at, true, processMidiWhenMuted);
1408 auto trackMuteState = std::make_unique<TrackMuteState> (at, false, processMidiWhenMuted);
1409
1410 std::unique_ptr<Node> node = createClipsNode (at, *clipsMuteState, params);
1411
1412 if (node)
1413 {
1414 // When recording, clips should be muted but the plugin should still be audible so use two muting Nodes
1415 node = makeNode<TrackMutingNode> (std::move (clipsMuteState), std::move (node), true);
1416
1417 node = createTrackCompNode (at, std::move (node), params);
1418 }
1419
1420 auto liveInputNode = createLiveInputsNode (at, playHeadState, params);
1421
1422 if (node && ! at.getListeners().isEmpty())
1423 node = makeNode<LiveMidiOutputNode> (at, std::move (node));
1424
1425 if (node)
1426 node = makeNode<LiveMidiInjectingNode> (at, std::move (node));
1427
1428 if (node == nullptr && inputTracks.isEmpty() && liveInputNode == nullptr)
1429 {
1430 // If there are synths on the track, create a stub Node to feed them
1431 for (auto plugin : at.pluginList)
1432 {
1433 if (plugin->producesAudioWhenNoAudioInput())
1434 {
1435 node = makeNode<SilentNode> (2);
1436 break;
1437 }
1438 }
1439
1440 if (! node)
1441 return {};
1442 }
1443
1444 if (liveInputNode)
1445 {
1446 if (node)
1447 {
1448 auto sumNode = std::make_unique<SummingNode>();
1449 sumNode->addInput (std::move (node));
1450 sumNode->addInput (std::move (liveInputNode));
1451 node = std::move (sumNode);
1452 }
1453 else
1454 {
1455 node = std::move (liveInputNode);
1456 }
1457 }
1458
1459 if (! inputTracks.isEmpty())
1460 {
1461 auto sumNode = std::make_unique<SummingNode>();
1462
1463 if (node)
1464 sumNode->addInput (std::move (node));
1465
1466 for (auto inputTrack : inputTracks)
1467 if (auto n = createNodeForTrack (*inputTrack, params))
1468 sumNode->addInput (std::move (n));
1469
1470 node = std::move (sumNode);
1471 }
1472
1473 node = createPluginNodeForTrack (at, *trackMuteState, std::move (node), playHeadState, params);
1474
1475 if (isSidechainSource (at))
1476 node = makeNode<SendNode> (std::move (node), getSidechainBusID (at.itemID));
1477
1478 node = makeNode<TrackMutingNode> (std::move (trackMuteState), std::move (node), false);
1479
1480 if (! params.forRendering)
1481 {
1482 if (at.getWaveInputDevice().isEnabled())
1483 node = makeNode<SendNode> (std::move (node), getWaveInputDeviceBusID (at.itemID));
1484
1485 if (at.getMidiInputDevice().isEnabled())
1486 node = makeNode<SendNode> (std::move (node), getMidiInputDeviceBusID (at.itemID));
1487 }
1488
1489 return node;
1490}
1491
1492//==============================================================================
1493std::unique_ptr<tracktion::graph::Node> createNodeForSubmixTrack (FolderTrack& submixTrack, const CreateNodeParams& params)
1494{
1496 jassert (submixTrack.isSubmixFolder());
1497
1498 juce::Array<AudioTrack*> subAudioTracks;
1499 juce::Array<FolderTrack*> subFolderTracks;
1500
1501 for (auto t : submixTrack.getAllSubTracks (false))
1502 {
1503 if (auto ft = dynamic_cast<AudioTrack*> (t))
1504 subAudioTracks.add (ft);
1505
1506 if (auto ft = dynamic_cast<FolderTrack*> (t))
1507 subFolderTracks.add (ft);
1508 }
1509
1510 if (subAudioTracks.isEmpty() && subFolderTracks.isEmpty())
1511 return {};
1512
1514 sumNode->setDoubleProcessingPrecision (submixTrack.edit.engine.getPropertyStorage().getProperty (SettingID::use64Bit, false));
1515
1516 // Create nodes for any submix tracks
1517 for (auto ft : subFolderTracks)
1518 {
1519 if (params.allowedTracks != nullptr && ! params.allowedTracks->contains (ft))
1520 continue;
1521
1522 if (! ft->isProcessing (true))
1523 continue;
1524
1525 if (ft->isSubmixFolder())
1526 {
1527 if (auto node = createNodeForSubmixTrack (*ft, params))
1528 sumNode->addInput (std::move (node));
1529 }
1530 else
1531 {
1532 for (auto at : ft->getAllAudioSubTracks (false))
1533 if (params.allowedTracks == nullptr || params.allowedTracks->contains (at))
1534 if (auto node = createNodeForAudioTrack (*at, params))
1535 sumNode->addInput (std::move (node));
1536 }
1537 }
1538
1539 // Then add any audio tracks
1540 for (auto at : subAudioTracks)
1541 if (params.allowedTracks == nullptr || params.allowedTracks->contains (at))
1542 if (at->isProcessing (true))
1543 if (auto node = createNodeForAudioTrack (*at, params))
1544 sumNode->addInput (std::move (node));
1545
1546 if (sumNode->getDirectInputNodes().empty())
1547 return {};
1548
1549 // Finally the effects
1550 std::unique_ptr<Node> node = std::move (sumNode);
1551 auto trackMuteState = std::make_unique<TrackMuteState> (submixTrack, false, false);
1552
1553 node = createPluginNodeForTrack (submixTrack, *trackMuteState, std::move (node), params.processState.playHeadState, params);
1554
1555 node = makeNode<TrackMutingNode> (std::move (trackMuteState), std::move (node), false);
1556
1557 return node;
1558}
1559
1560//==============================================================================
1561std::unique_ptr<tracktion::graph::Node> createNodeForTrack (Track& track, const CreateNodeParams& params)
1562{
1563 if (auto t = dynamic_cast<AudioTrack*> (&track))
1564 {
1565 if (! t->isProcessing (true))
1566 return {};
1567
1568 if (! t->createsOutput())
1569 return {};
1570
1571 if (t->isPartOfSubmix() && ! shouldRenderTrackInSubmix (*t, params))
1572 return {};
1573
1574 if (! params.forRendering && t->isFrozen (Track::groupFreeze))
1575 return {};
1576
1577 return createNodeForAudioTrack (*t, params);
1578 }
1579
1580 if (auto t = dynamic_cast<FolderTrack*> (&track))
1581 {
1582 if (! t->isSubmixFolder())
1583 return {};
1584
1585 if (t->isPartOfSubmix() && ! shouldRenderTrackInSubmix (*t, params))
1586 return {};
1587
1588 if (t->getOutput() == nullptr)
1589 return {};
1590
1591 return createNodeForSubmixTrack (*t, params);
1592 }
1593
1594 return {};
1595}
1596
1597//==============================================================================
1598std::unique_ptr<Node> createNodeForRackType (RackType& rackType, const CreateNodeParams& params)
1599{
1600 const auto rackInputID = getRackInputBusID (rackType.rackID);
1601 const auto rackOutputID = getRackOutputBusID (rackType.rackID);
1602
1603 auto rackInputNode = makeNode<ReturnNode> (rackInputID);
1604 auto rackNode = RackNodeBuilder::createRackNode (rackType, params.sampleRate, params.blockSize, std::move (rackInputNode),
1605 params.processState, params.forRendering);
1606 auto rackOutputNode = makeNode<SendNode> (std::move (rackNode), rackOutputID);
1607
1608 return makeNode<SinkNode> (std::move (rackOutputNode));
1609}
1610
1611std::vector<std::unique_ptr<Node>> createNodesForRacks (RackTypeList& rackTypeList,
1612 const CreateNodeParams& params)
1613{
1615
1616 for (auto rackType : rackTypeList.getTypes())
1617 if (getEnabledInstancesForRack (*rackType).size() > 0)
1618 if (auto rackNode = createNodeForRackType (*rackType, params))
1619 nodes.push_back (std::move (rackNode));
1620
1621 return nodes;
1622}
1623
1624std::unique_ptr<Node> createRackNode (std::unique_ptr<Node> input,
1625 RackTypeList& rackTypeList,
1626 const CreateNodeParams& params)
1627{
1628 // Finally add the RackType Nodes
1629 auto rackNodes = createNodesForRacks (rackTypeList, params);
1630
1631 if (rackNodes.empty())
1632 return input;
1633
1634 auto sumNode = std::make_unique<SummingNode> (std::move (rackNodes));
1635 sumNode->addInput (std::move (input));
1636 input = std::move (sumNode);
1637
1638 return input;
1639}
1640
1641//==============================================================================
1642std::unique_ptr<Node> createInsertReturnNode (InsertPlugin& insert,
1643 tracktion::graph::PlayHeadState& playHeadState,
1644 const CreateNodeParams& params)
1645{
1646 if (insert.getReturnDeviceType() != InsertPlugin::noDevice)
1647 for (auto i : insert.edit.getAllInputDevices())
1648 if (i->owner.getName() == insert.inputDevice)
1649 return createLiveInputNodeForDevice (*i, playHeadState, params, EditItemID());
1650
1651 return {};
1652}
1653
1654std::unique_ptr<Node> createInsertSendNode (InsertPlugin& insert, OutputDevice& device)
1655{
1656 if (insert.outputDevice != device.getName())
1657 return {};
1658
1659 return makeNode<InsertSendNode> (insert);
1660}
1661
1662//==============================================================================
1663std::unique_ptr<tracktion::graph::Node> createGroupFreezeNodeForDevice (Edit& edit,
1664 OutputDevice& device,
1665 ProcessState& processState)
1666{
1668
1669 for (auto& freezeFile : TemporaryFileManager::getFrozenTrackFiles (edit))
1670 {
1671 const auto outId = TemporaryFileManager::getDeviceIDFromFreezeFile (edit, freezeFile);
1672
1673 if (device.getDeviceID() == outId)
1674 {
1675 AudioFile af (edit.engine, freezeFile);
1676 const auto length = TimeDuration::fromSeconds (af.getLength());
1677
1678 if (length <= 0.0s)
1679 return {};
1680
1681 auto node = tracktion::graph::makeNode<WaveNode> (af, TimeRange (0.0s, length),
1682 0.0s, TimeRange(), LiveClipLevel(),
1683 1.0,
1686 processState,
1687 EditItemID::fromRawID ((uint64_t) device.getName().hash()),
1688 false);
1689
1690 return makeNode<TrackMutingNode> (std::make_unique<TrackMuteState> (edit), std::move (node), false);
1691 }
1692 }
1693
1694 return {};
1695}
1696
1697//==============================================================================
1699 OutputDevice& device,
1700 PlayHeadState& playHeadState,
1702{
1703 if (auto waveDevice = dynamic_cast<WaveOutputDevice*> (&device))
1704 {
1705 std::vector<std::pair<int /*source channel*/, int /*dest channel*/>> channelMap;
1706 int sourceIndex = 0;
1707
1708 for (const auto& channel : waveDevice->getChannels())
1709 {
1710 if (channel.indexInDevice != -1)
1711 channelMap.push_back (std::make_pair (sourceIndex, channel.indexInDevice));
1712
1713 ++sourceIndex;
1714 }
1715
1716 return tracktion::graph::makeNode<ChannelRemappingNode> (std::move (node), channelMap, false);
1717 }
1718 else if (auto midiInstance = dynamic_cast<MidiOutputDeviceInstance*> (epc.getOutputFor (&device)))
1719 {
1720 return tracktion::graph::makeNode<MidiOutputDeviceInstanceInjectingNode> (*midiInstance, std::move (node),
1721 playHeadState.playHead);
1722 }
1723
1724 return {};
1725}
1726
1727std::unique_ptr<tracktion::graph::Node> createMasterPluginsNode (Edit& edit,
1728 tracktion::graph::PlayHeadState& playHeadState,
1730 const CreateNodeParams& params)
1731{
1732 if (! params.includeMasterPlugins)
1733 return node;
1734
1735 auto tempoTrack = edit.getTempoTrack();
1736 auto tempoModList = tempoTrack != nullptr ? tempoTrack->getModifierList() : nullptr;
1737
1738 node = createModifierNodeForList (tempoModList, Modifier::ProcessingPosition::preFX,
1739 nullptr, std::move (node), playHeadState, params);
1740
1741 auto masterTrack = edit.getMasterTrack();
1742 auto masterModList = masterTrack != nullptr ? masterTrack->getModifierList() : nullptr;
1743
1744 node = createModifierNodeForList (masterModList, Modifier::ProcessingPosition::preFX,
1745 nullptr, std::move (node), playHeadState, params);
1746
1747 node = createPluginNodeForList (edit.getMasterPluginList(), nullptr, std::move (node), playHeadState, params);
1748
1749 node = createModifierNodeForList (tempoModList, Modifier::ProcessingPosition::postFX,
1750 nullptr, std::move (node), playHeadState, params);
1751 node = createModifierNodeForList (masterModList, Modifier::ProcessingPosition::postFX,
1752 nullptr, std::move (node), playHeadState, params);
1753
1754 if (auto masterVolPlugin = edit.getMasterVolumePlugin())
1755 node = createNodeForPlugin (*masterVolPlugin, nullptr, std::move (node), params);
1756
1757 return node;
1758}
1759
1760std::unique_ptr<tracktion::graph::Node> createMasterFadeInOutNode (Edit& edit,
1762 const CreateNodeParams& params)
1763{
1764 if (! params.includeMasterPlugins)
1765 return node;
1766
1767 if (edit.masterFadeIn > 0_td || edit.masterFadeOut > 0_td)
1768 {
1769 auto length = toPosition (edit.getLength());
1770 return makeNode<FadeInOutNode> (std::move (node), params.processState,
1771 TimeRange { 0_tp, edit.masterFadeIn },
1772 TimeRange { length - edit.masterFadeOut, length },
1773 edit.masterFadeInType.get(),
1774 edit.masterFadeOutType.get(),
1775 true);
1776 }
1777
1778 return node;
1779}
1780
1781}
1782
1783//==============================================================================
1785{
1786 Edit& edit = epc.edit;
1787 auto& playHeadState = params.processState.playHeadState;
1788 auto insertPlugins = getAllPluginsOfType<InsertPlugin> (edit);
1789
1792 std::vector<OutputDevice*> devicesWithFrozenNodes;
1793
1794 for (auto t : getAllTracks (edit))
1795 {
1796 if (params.allowedTracks != nullptr && ! params.allowedTracks->contains (t))
1797 continue;
1798
1799 if (auto output = getTrackOutput (*t))
1800 {
1801 if (auto device = output->getOutputDevice (false))
1802 {
1803 if (! device->isEnabled())
1804 continue;
1805
1806 if (! params.forRendering && t->isFrozen (Track::groupFreeze))
1807 {
1808 if (std::find (devicesWithFrozenNodes.begin(), devicesWithFrozenNodes.end(), device)
1809 != devicesWithFrozenNodes.end())
1810 continue;
1811
1812 if (auto node = createGroupFreezeNodeForDevice (edit, *device, params.processState))
1813 {
1814 deviceNodes[device].push_back (std::move (node));
1815 devicesWithFrozenNodes.push_back (device);
1816 }
1817 }
1818 else if (auto node = createNodeForTrack (*t, params))
1819 {
1820 deviceNodes[device].push_back (std::move (node));
1821 }
1822 }
1823 }
1824 }
1825
1826 // Add deviceNodes for any devices only being used by InsertPlugins
1827 for (auto ins : insertPlugins)
1828 {
1829 if (ins->getSendDeviceType() != InsertPlugin::noDevice)
1830 {
1831 if (auto device = edit.engine.getDeviceManager().findOutputDeviceWithName (ins->outputDevice))
1832 {
1833 auto& trackNodeVector = deviceNodes[device];
1834 juce::ignoreUnused (trackNodeVector);
1835 // We don't need to add anything to the vector, just ensure the device is in the map
1836 }
1837 }
1838 }
1839
1840 // Add deviceNodes for any devices only being used by the click track
1841 for (int i = edit.engine.getDeviceManager().getNumOutputDevices(); --i >= 0;)
1842 {
1843 if (auto device = edit.engine.getDeviceManager().getOutputDeviceAt (i))
1844 {
1845 if (! edit.isClickTrackDevice (*device))
1846 continue;
1847
1848 auto& trackNodeVector = deviceNodes[device];
1849 juce::ignoreUnused (trackNodeVector);
1850 // We don't need to add anything to the vector, just ensure the device is in the map
1851 }
1852 }
1853
1854 // Add deviceNodes for any devices only being used by the MIDI clock or MTC
1855 for (int i = edit.engine.getDeviceManager().getNumMidiOutDevices(); --i >= 0;)
1856 {
1857 if (auto device = edit.engine.getDeviceManager().getMidiOutDevice (i))
1858 {
1859 const bool isSendingMidi = device->isSendingClock()
1860 || device->isSendingTimecode()
1861 || device->isSendingControllerMidiClock();
1862
1863 if (! isSendingMidi)
1864 continue;
1865
1866 auto& trackNodeVector = deviceNodes[device];
1867 juce::ignoreUnused (trackNodeVector);
1868 // We don't need to add anything to the vector, just ensure the device is in the map
1869 }
1870 }
1871
1872
1874
1875 for (auto& deviceAndTrackNode : deviceNodes)
1876 {
1877 auto device = deviceAndTrackNode.first;
1878 jassert (device != nullptr);
1879 auto tracksVector = std::move (deviceAndTrackNode.second);
1880
1881 auto sumNode = std::make_unique<SummingNode> (std::move (tracksVector));
1882 sumNode->setDoubleProcessingPrecision (edit.engine.getPropertyStorage().getProperty (SettingID::use64Bit, false));
1883
1884 // Create nodes for any insert plugins
1885 bool deviceIsBeingUsedAsInsert = false;
1886
1887 for (auto ins : insertPlugins)
1888 {
1889 if (ins->isFrozen())
1890 continue;
1891
1892 if (ins->outputDevice != device->getName())
1893 continue;
1894
1895 if (auto sendNode = createInsertSendNode (*ins, *device))
1896 {
1897 sumNode->addInput (std::move (sendNode));
1898 deviceIsBeingUsedAsInsert = true;
1899 }
1900 }
1901
1902 std::unique_ptr<Node> node = std::move (sumNode);
1903
1904 if (! deviceIsBeingUsedAsInsert)
1905 {
1906 if (edit.engine.getDeviceManager().getDefaultWaveOutDeviceID() == device->getDeviceID())
1907 node = createMasterPluginsNode (edit, playHeadState, std::move (node), params);
1908
1909 node = createMasterFadeInOutNode (edit, std::move (node), params);
1910 node = EditNodeBuilder::insertOptionalLastStageNode (std::move (node));
1911
1912 if (edit.getIsPreviewEdit() && node != nullptr)
1913 if (auto previewMeasurer = edit.getPreviewLevelMeasurer())
1914 node = makeNode<SharedLevelMeasuringNode> (std::move (previewMeasurer), std::move (node));
1915 }
1916
1917 if (edit.isClickTrackDevice (*device))
1918 {
1919 auto clickAndTracksNode = makeSummingNode ({ node.release(),
1920 makeNode<ClickNode> (edit, getNumChannelsFromDevice (*device),
1921 device->isMidi(), playHeadState.playHead).release() });
1922 node = std::move (clickAndTracksNode);
1923 }
1924
1925 if (auto outputDeviceNode = createNodeForDevice (epc, *device, playHeadState, std::move (node)))
1926 outputNode->addInput (std::move (outputDeviceNode));
1927 }
1928
1929 std::unique_ptr<Node> finalNode (std::move (outputNode));
1930 finalNode = makeNode<LevelMeasuringNode> (std::move (finalNode), epc.masterLevels);
1931 finalNode = createRackNode (std::move (finalNode), edit.getRackList(), params);
1932 finalNode = makeNode<PlayHeadPositionNode> (params.processState, std::move (finalNode), audibleTimeToUpdate);
1933
1934 return finalNode;
1935}
1936
1938{
1940 auto params = originalParams;
1941 auto& playHeadState = params.processState.playHeadState;
1942
1943 if (params.implicitlyIncludeSubmixChildTracks && params.allowedTracks != nullptr)
1944 *params.allowedTracks = addImplicitSubmixChildTracks (*params.allowedTracks);
1945
1946 for (auto t : getAllTracks (edit))
1947 {
1948 if (params.allowedTracks != nullptr && ! params.allowedTracks->contains (t))
1949 continue;
1950
1951 // Skip tracks that don't output to a device or feed in to other tracks
1952 if (auto output = getTrackOutput (*t))
1953 {
1954 if (output->getDestinationTrack() != nullptr)
1955 continue;
1956 }
1957 else
1958 {
1959 continue;
1960 }
1961
1962 if (auto node = createNodeForTrack (*t, params))
1963 trackNodes.push_back (std::move (node));
1964 }
1965
1966 auto sumNode = std::make_unique<SummingNode> (std::move (trackNodes));
1967 sumNode->setDoubleProcessingPrecision (edit.engine.getPropertyStorage().getProperty (SettingID::use64Bit, false));
1968
1969 auto node = std::unique_ptr<Node> (std::move (sumNode));
1970 node = createMasterPluginsNode (edit, playHeadState, std::move (node), params);
1971 node = createMasterFadeInOutNode (edit, std::move (node), params);
1972 node = createRackNode (std::move (node), edit.getRackList(), params);
1973
1974 return node;
1975}
1976
1978 = [] (std::unique_ptr<tracktion::graph::Node> input) { return input; };
1979
1980}} // namespace tracktion { inline namespace engine
assert
T begin(T... args)
int removeIf(PredicateType &&predicate)
bool isEmpty() const noexcept
int size() const noexcept
ElementType getFirst() const noexcept
void add(const ElementType &newElement)
bool contains(ParameterType elementToLookFor) const
static AudioChannelSet JUCE_CALLTYPE stereo()
static AudioChannelSet JUCE_CALLTYPE canonicalChannelSet(int numChannels)
Type get() const noexcept
bool isValid() const noexcept
constexpr ValueType getStart() const noexcept
const var & getProperty(const Identifier &name) const noexcept
Base class for Clips that produce some kind of audio e.g.
@ speedRamp
Fade is a change of playback speed for tape start/stop effects.
@ chordTrackMono
Clip tracks the chord track with a monophonic pitch change.
ClipSlotList & getClipSlotList()
Returns the ClipSlotList for this track.
juce::ListenerList< Listener > & getListeners()
Returns the listener list so Nodes can manually call them.
bool isFrozen(FreezeType) const override
Returns true if this track is frozen using the given type.
const juce::Array< Clip * > & getClips() const
Returns the clips this owner contains.
A list of the ClipSlots on a Track.
A clip in an edit.
A clip that can contain multiple other clips and mix their output together.
const EditItemID itemID
Every EditItem has an ID which is unique within the edit.
The Tracktion Edit class!
VolumeAndPanPlugin::Ptr getMasterVolumePlugin() const
Returns the master VolumeAndPanPlugin.
juce::CachedValue< TimeDuration > masterFadeIn
The duration in seconds of the fade in.
TimeDuration getLength() const
Returns the end time of last clip.
TrackCompManager & getTrackCompManager() const noexcept
Returns the TrackCompManager for the Edit.
juce::CachedValue< TimeDuration > masterFadeOut
The duration in seconds of the fade out.
MasterTrack * getMasterTrack() const
Returns the global MasterTrack.
SharedLevelMeasurer::Ptr getPreviewLevelMeasurer()
Returns a previously set SharedLevelMeasurer.
TempoTrack * getTempoTrack() const
Returns the global TempoTrack.
juce::CachedValue< AudioFadeCurve::Type > masterFadeInType
The curve type of the fade in.
bool getIsPreviewEdit() const noexcept
Returns true if this Edit is a temporary Edit for previewing files/clips etc.
PluginList & getMasterPluginList() const noexcept
Returns the master PluginList.
bool isClickTrackDevice(OutputDevice &) const
Returns true if the given OutputDevice is being used as the click track output.
juce::CachedValue< AudioFadeCurve::Type > masterFadeOutType
The curve type of the fade out.
RackTypeList & getRackList() const noexcept
Returns the RackTypeList which contains all the RackTypes for the Edit.
Engine & engine
A reference to the Engine.
virtual bool shouldProcessAuxSendWhenTrackIsMuted(AuxSendPlugin &)
Whether or not to include muted track contents in aux send plugins.
PropertyStorage & getPropertyStorage() const
Returns the PropertyStorage user settings customisable XML file.
DeviceManager & getDeviceManager() const
Returns the DeviceManager instance for handling audio / MIDI devices.
EngineBehaviour & getEngineBehaviour() const
Returns the EngineBehaviour instance.
static bool isHostedMidiInputDevice(const MidiInputDevice &)
Returns true if the MidiInput device is a HostedMidiInputDevice.
An instance of an InputDevice that's available to an Edit.
virtual bool isRecordingActive() const
Returns true if recording is enabled and the input is connected to any target.
Edit & edit
The Edit this instance belongs to.
InputDevice & owner
The state of this instance.
InputDevice & getInputDevice() noexcept
Returns the InputDevice this instance belongs to.
@ automatic
Live input is audible when record is enabled.
@ off
Live input is never audible.
@ on
Live input is always audible.
@ beatsRaw
Event times will be in beats relative to the Edit timeline.
Holds a list of Modifiers that have been added to a Track.
Base class for audio or midi output devices, to which a track's output can be sent.
Holds a sequence of plugins.
virtual bool isDisabled()
Plugins can be disabled to avoid them crashing Edits.
virtual bool isMissing()
for things like VSTs where the DLL is missing.
Clip * getOwnerClip() const
Returns the clip if that's what it's in.
Track * getOwnerTrack() const
Returns the track if it's a track or clip plugin.
Holds info about where temp files should go, and tidies up old ones when needed.
Holds the state of a Track and if its contents/plugins should be played or not.
bool shouldTrackBeAudible() const
Returns true if the track's mix bus should be audible.
bool shouldTrackMidiBeProcessed() const
Returns true if the track's MIDI should be processed to avoid breaks in long notes.
Base class for tracks which contain clips and plugins and can be added to Edit[s].
bool isProcessing(bool includeParents) const
Returns true if this track should be included in playback.
@ individualFreeze
Freezes a track in to a single audio file.
@ groupFreeze
Freezes multiple tracks together in to a single file.
juce::ValueTree state
The state of this Track.
ModifierList * getModifierList() const
Returns the ModifierList for this track, if it has one.
A (virtual) audio input device.
A (virtual) audio output device.
Determines how this block releates to other previous render blocks and if the play head has jumped in...
T emplace_back(T... args)
T empty(T... args)
T end(T... args)
T find(T... args)
T front(T... args)
T get(T... args)
T is_pointer_v
#define jassert(expression)
typedef int
T make_pair(T... args)
T max(T... args)
T move(T... args)
constexpr Type jmin(Type a, Type b)
void ignoreUnused(Types &&...) noexcept
bool includePlugins
Whether to include track plugins.
TrackOutput * getTrackOutput(Track &track)
Returns the TrackOutput if the given track has one.
juce::Array< TrackType * > getTracksOfType(const Edit &, bool recursive)
Returns the tracks of a given type in an Edit.
juce::String getName(LaunchQType t)
Retuns the name of a LaunchQType for display purposes.
ProcessState & processState
The process state of the graph.
bool includeBypassedPlugins
If false, bypassed plugins will be completely ommited from the graph.
juce::Array< Track * > getAllTracks(const Edit &edit)
Returns all the tracks in an Edit.
bool forRendering
If the node is for rendering or not.
juce::Array< AudioTrack * > getAudioTracks(const Edit &edit)
Returns all the AudioTracks in an Edit.
tempo::Sequence::Position createPosition(const TempoSequence &s)
Creates a Position to iterate over the given TempoSequence.
std::unique_ptr< tracktion::graph::Node > createNodeForEdit(EditPlaybackContext &epc, std::atomic< double > &audibleTimeToUpdate, const CreateNodeParams &params)
Creates a Node to play back an Edit with live inputs and outputs.
bool includeMasterPlugins
Whether to include master plugins, fades and volume.
BeatPosition toBeats(TimePosition tp, const TempoSequence &ts)
Converts a TimePosition to a BeatPosition given a TempoSequence.
juce::Array< Track * > * allowedTracks
The tracks to include.
const juce::Array< Clip * > * allowedClips
The clips to include.
bool readAheadTimeStretchNodes
TEMPORARY: If true, real-time time-stretch Nodes will use a larger buffer and background thread to re...
bool implicitlyIncludeSubmixChildTracks
If true, child track in submixes will be included regardless of the allowedTracks param.
TimePosition toTime(BeatPosition bp, const TempoSequence &ts)
Converts a BeatPosition to a TimePosition given a TempoSequence.
bool allowClipSlots
If true, track's clip slots will be included, set to false to disable these (which will use a slightl...
Contains options for Edit Node content creation.
RangeType< BeatPosition > BeatRange
A RangeType based on beats.
constexpr TimePosition toPosition(TimeDuration)
Converts a TimeDuration to a TimePosition.
RangeType< TimePosition > TimeRange
A RangeType based on real time (i.e.
size_t hash(size_t seed, const T &v)
Hashes a type with a given seed and returns the new hash value.
T push_back(T... args)
T release(T... args)
T size(T... args)
Represents a position in beats.
Represents a duration in real-life time.
Represents a position in real-life time.
ID for objects of type EditElement - e.g.
static std::function< std::unique_ptr< graph::Node >(std::unique_ptr< tracktion::graph::Node >)> insertOptionalLastStageNode
If set, this will be called to give an opportunity to add an additional final node which could be use...
Represents a time range in an Edit stored as either time or beats.
bool isBeats() const
Returns true if the time is stored as beats, false if stored as a TimePosition.
Provides a thread-safe way to share a clip's levels with an audio engine without worrying about the C...
ProcessingPosition
Determines the position in the FX chain where the modifier should be processed.
@ preFX
The Modifier is processed before the plugn chain.
@ postFX
The Modifier is processed after the plugn chain.
Holds the state of a process call.
Describes the time and type of the speed fade in/outs.
time
#define CRASH_TRACER
This macro adds the current location to a stack which gets logged if a crash happens.
LengthOfOneBeat
Used to determine the length of a beat in beat <-> time conversions.
@ isAlwaysACrotchet
Signifies the length of one beat always depends only the current BPM at that point in the edit,...