tracktion-engine 3.0-10-g034fdde4aa5
Tracktion Engine — High level data model for audio applications

« « « Anklang Documentation
Loading...
Searching...
No Matches
tracktion_AudioClipBase.cpp
Go to the documentation of this file.
1 /*
2 ,--. ,--. ,--. ,--.
3 ,-' '-.,--.--.,--,--.,---.| |,-.,-' '-.`--' ,---. ,--,--, Copyright 2024
4 '-. .-'| .--' ,-. | .--'| /'-. .-',--.| .-. || \ Tracktion Software
5 | | | | \ '-' \ `--.| \ \ | | | |' '-' '| || | Corporation
6 `---' `--' `--`--'`---'`--'`--' `---' `--' `---' `--''--' www.tracktion.com
7
8 Tracktion Engine uses a GPL/commercial licence - see LICENCE.md for details.
9*/
10
11namespace tracktion { inline namespace engine
12{
13
14//==============================================================================
19{
20public:
23 : ThreadPoolJobWithProgress (TRANS("Detecting tempo")),
24 engine (e), sourceFile (file)
25 {
26 }
27
29 float getBpm() { return bpm; }
30
32 bool isResultSensible() { return isSensible; }
33
34 //==============================================================================
36 JobStatus runJob() override
37 {
38 std::unique_ptr<juce::AudioFormatReader> reader (AudioFileUtils::createReaderFor (engine, sourceFile));
39
40 if (reader == nullptr)
41 return jobHasFinished;
42
43 auto numChannels = (int) reader->numChannels;
44 auto numSamples = reader->lengthInSamples;
45 auto sampleRate = reader->sampleRate;
46
47 if (numSamples <= 0)
48 return jobHasFinished;
49
50 // main detection loop
51 {
52 TempoDetect detector (numChannels, sampleRate);
53 const int blockSize = 65536;
54 const bool useRightChan = numChannels > 1;
55
56 // can't use an AudioScratchBuffer yet
57 juce::AudioBuffer<float> buffer (numChannels, blockSize);
58
59 auto numLeft = numSamples;
60 SampleCount startSample = 0;
61
62 while (numLeft > 0)
63 {
64 if (shouldExit())
65 return jobHasFinished;
66
67 auto numThisTime = (int) std::min ((SampleCount) numLeft, (SampleCount) blockSize);
68 reader->read (&buffer, 0, numThisTime, startSample, true, useRightChan);
69 detector.processSection (buffer, numThisTime);
70
71 startSample += numThisTime;
72 numLeft -= numThisTime;
73 progress = numLeft / (float) numSamples;
74 }
75
76 bpm = detector.finishAndDetect();
77 isSensible = bpm > 0;
78 }
79
80 return jobHasFinished;
81 }
82
84 float getCurrentTaskProgress() override { return progress; }
85
86private:
87 //==============================================================================
88 Engine& engine;
89 juce::File sourceFile;
90 float progress = 0;
91 bool isSensible = false;
92 float bpm = 12.0f;
93
95};
96
97
98//==============================================================================
100{
101public:
102 ProxyGeneratorJob (const AudioFile& o, const AudioFile& p,
103 AudioClipBase& acb, bool renderTimestretched)
104 : GeneratorJob (p), engine (acb.edit.engine), original (o)
105 {
106 setName (TRANS("Creating Proxy") + ": " + acb.getName());
107
108 if (renderTimestretched)
109 proxyInfo = acb.createProxyRenderingInfo();
110 }
111
112 ~ProxyGeneratorJob() override
113 {
115 }
116
117private:
118 Engine& engine;
119 AudioFile original;
121
122 bool render() override
123 {
125
126 AudioFile tempFile (engine, proxy.getFile()
128 .withFileExtension (proxy.getFile().getFileExtension()));
129
130 bool ok = render (tempFile);
131
132 if (ok)
133 {
134 ok = proxy.deleteFile();
135 (void) ok;
136 jassert (ok);
137 ok = tempFile.getFile().moveFileTo (proxy.getFile());
138 jassert (ok);
139 }
140
141 tempFile.deleteFile();
142
143 engine.getAudioFileManager().releaseFile (proxy);
144 return ok;
145 }
146
147 bool render (const AudioFile& tempFile)
148 {
150 AudioFileInfo sourceInfo (original.getInfo());
151
152 // need to strip AIFF metadata to write to wav files
153 if (sourceInfo.metadata.getValue ("MetaDataSource", "None") == "AIFF")
154 sourceInfo.metadata.clear();
155
156 AudioFileWriter writer (tempFile, engine.getAudioFileFormatManager().getWavFormat(),
157 sourceInfo.numChannels, sourceInfo.sampleRate,
158 std::max (16, sourceInfo.bitsPerSample),
159 sourceInfo.metadata, 0);
160
161 return writer.isOpen()
162 && (proxyInfo != nullptr ? proxyInfo->render (engine, original, writer, this, progress)
163 : renderNormalSpeed (writer));
164 }
165
166 bool renderNormalSpeed (AudioFileWriter& writer)
167 {
169 std::unique_ptr<juce::AudioFormatReader> reader (AudioFileUtils::createReaderFor (engine, original.getFile()));
170
171 if (reader == nullptr)
172 return false;
173
174 SampleCount sourceSample = 0;
175 auto samplesToDo = (SampleCount) reader->lengthInSamples;
176
177 while (! shouldExit())
178 {
179 auto numThisTime = (int) std::min (samplesToDo, (SampleCount) 65536);
180
181 if (numThisTime <= 0)
182 return true;
183
184 if (! writer.writeFromAudioReader (*reader, sourceSample, numThisTime))
185 break;
186
187 samplesToDo -= numThisTime;
188 sourceSample += numThisTime;
189
190 progress = juce::jlimit (0.0f, 1.0f, (float) (sourceSample / (double) reader->lengthInSamples));
191 }
192
193 return false;
194 }
195
197};
198
199//==============================================================================
201 : Clip (v, targetParent, id, t),
202 loopInfo (edit.engine, state.getOrCreateChildWithName (IDs::LOOPINFO, getUndoManager()), getUndoManager()),
203 pluginList (edit),
204 lastProxy (edit.engine)
205{
206 auto um = getUndoManager();
207
208 level->dbGain.referTo (state, IDs::gain, um);
209 level->pan.referTo (state, IDs::pan, um);
210 level->mute.referTo (state, IDs::mute, um);
211 channels.referTo (state, IDs::channels, um, juce::AudioChannelSet::stereo().getSpeakerArrangementAsString());
212
213 if (channels.get().isEmpty())
215
216 fadeIn.referTo (state, IDs::fadeIn, um);
217 fadeOut.referTo (state, IDs::fadeOut, um);
218
219 fadeInType.referTo (state, IDs::fadeInType, um, AudioFadeCurve::linear);
220 fadeOutType.referTo (state, IDs::fadeOutType, um, AudioFadeCurve::linear);
221 autoCrossfade.referTo (state, IDs::autoCrossfade, um);
222
223 fadeInBehaviour.referTo (state, IDs::fadeInBehaviour, um, gainFade);
224 fadeOutBehaviour.referTo (state, IDs::fadeOutBehaviour, um, gainFade);
225
226 loopStart.referTo (state, IDs::loopStart, um);
227 loopLength.referTo (state, IDs::loopLength, um);
228
229 loopStartBeats.referTo (state, IDs::loopStartBeats, um);
230 loopLengthBeats.referTo (state, IDs::loopLengthBeats, um);
231
232 resamplingQuality.referTo (state, IDs::resamplingQuality, um, ResamplingQuality::lagrange);
233 proxyAllowed.referTo (state, IDs::proxyAllowed, um, true);
234 transpose.referTo (state, IDs::transpose, um);
235 pitchChange.referTo (state, IDs::pitchChange, um);
236
237 beatSensitivity.referTo (state, IDs::beatSensitivity, um, 0.5f);
238
239 timeStretchMode.referTo (state, IDs::elastiqueMode, um);
240 elastiqueProOptions.referTo (state, IDs::elastiqueOptions, um);
241
242 // Keep this in to handle old edits..
243 if (state.getProperty (IDs::timeStretch))
244 timeStretchMode = juce::VariantConverter<TimeStretcher::Mode>::fromVar (state.getProperty (IDs::stretchMode));
245
246 timeStretchMode = TimeStretcher::checkModeIsAvailable (timeStretchMode);
247
248 autoPitch.referTo (state, IDs::autoPitch, um);
249 autoPitchMode.referTo (state, IDs::autoPitchMode, um);
250 autoTempo.referTo (state, IDs::autoTempo, um);
251 warpTime.referTo (state, IDs::warpTime, um);
252 isReversed.referTo (state, IDs::isReversed, um);
253 autoDetectBeats.referTo (state, IDs::autoDetectBeats, um);
254
255 level->pan = juce::jlimit (-1.0f, 1.0f, static_cast<float> (level->pan.get()));
257
258 useClipLaunchQuantisation.referTo (state, IDs::useClipLaunchQuantisation, um);
259
260 clipEffectsVisible.referTo (state, IDs::effectsVisible, nullptr);
261 updateClipEffectsState();
262
263 updateLeftRightChannelActivenessFlags();
264
265 pluginList.setTrackAndClip (getTrack(), this);
266 pluginList.initialise (state);
267
268 asyncFunctionCaller.addFunction (updateCrossfadesFlag, [this] { updateAutoCrossfades (false); });
269 asyncFunctionCaller.addFunction (updateCrossfadesOverlappedFlag, [this] { updateAutoCrossfades (true); });
270
271 auto pgen = state.getChildWithName (IDs::PATTERNGENERATOR);
272
273 if (pgen.isValid())
274 patternGenerator = std::make_unique<PatternGenerator> (*this, pgen);
275}
276
278{
279 melodyneProxy = nullptr;
280
281 if (renderJob != nullptr)
282 renderJob->removeListener (this);
283}
284
285//==============================================================================
287{
289
290 setCurrentSourceFile (sourceFileReference.getFile());
291
292 stopTimer(); // prevent proxy generation unless we're actually going to be played.
293
294 if (shouldAttemptRender())
295 {
296 auto audioFile = RenderManager::getAudioFileForHash (edit.engine, edit.getTempDirectory (false), getHash());
297
298 if (currentSourceFile != audioFile.getFile())
299 setCurrentSourceFile (audioFile.getFile());
300 }
301
302 if (! edit.getUndoManager().isPerformingUndoRedo())
303 callBlocking ([this] { setLoopDefaults(); });
304}
305
307{
308 if (auto other = dynamic_cast<AudioClipBase*> (c))
309 {
310 Clip::cloneFrom (other);
311
312 const bool wasLooping = loopLengthBeats.get() > BeatDuration() || loopLength.get() > TimeDuration();
313
314 level->dbGain .setValue (other->level->dbGain, nullptr);
315 level->pan .setValue (other->level->pan, nullptr);
316 level->mute .setValue (other->level->mute, nullptr);
317 channels .setValue (other->channels, nullptr);
318 fadeIn .setValue (other->fadeIn, nullptr);
319 fadeOut .setValue (other->fadeOut, nullptr);
320 fadeInType .setValue (other->fadeInType, nullptr);
321 fadeOutType .setValue (other->fadeOutType, nullptr);
322 autoCrossfade .setValue (other->autoCrossfade, nullptr);
323 fadeInBehaviour .setValue (other->fadeInBehaviour, nullptr);
324 fadeOutBehaviour .setValue (other->fadeOutBehaviour, nullptr);
325 loopStart .setValue (other->loopStart, nullptr);
326 loopLength .setValue (other->loopLength, nullptr);
327 loopStartBeats .setValue (other->loopStartBeats, nullptr);
328 loopLengthBeats .setValue (other->loopLengthBeats, nullptr);
329 transpose .setValue (other->transpose, nullptr);
330 pitchChange .setValue (other->pitchChange, nullptr);
331 beatSensitivity .setValue (other->beatSensitivity, nullptr);
332 timeStretchMode .setValue (other->timeStretchMode, nullptr);
333 elastiqueProOptions .setValue (other->elastiqueProOptions, nullptr);
334 autoPitch .setValue (other->autoPitch, nullptr);
335 autoPitchMode .setValue (other->autoPitchMode, nullptr);
336 autoTempo .setValue (other->autoTempo, nullptr);
337 isReversed .setValue (other->isReversed, nullptr);
338 autoDetectBeats .setValue (other->autoDetectBeats, nullptr);
339 warpTime .setValue (other->warpTime, nullptr);
340 proxyAllowed .setValue (other->proxyAllowed, nullptr);
341 resamplingQuality .setValue (other->resamplingQuality, nullptr);
342
343 copyValueTree (loopInfo.state, other->loopInfo.state, nullptr);
344
345 const bool isLooping = loopLengthBeats.get() > BeatDuration() || loopLength.get() > TimeDuration();
346
347 if (! isLooping && wasLooping)
349
351 }
352}
353
354void AudioClipBase::updateLeftRightChannelActivenessFlags()
355{
356 juce::String channelMask = channels;
357
358 if (channelMask.isEmpty())
359 activeChannels = juce::AudioChannelSet::disabled();
360
361 if (channels == "r") activeChannels.addChannel (juce::AudioChannelSet::right);
362 else if (channels == "l") activeChannels.addChannel (juce::AudioChannelSet::left);
363 else activeChannels = channelSetFromSpeakerArrangmentString (channelMask);
364}
365
367{
369
370 if (clipEffects != nullptr)
371 clipEffects->flushStateToValueTree();
372}
373
375{
376 if (! state.getChildWithName (IDs::PATTERNGENERATOR).isValid())
377 state.addChild (juce::ValueTree (IDs::PATTERNGENERATOR), -1, &edit.getUndoManager());
378
379 jassert (patternGenerator != nullptr);
380 return patternGenerator.get();
381}
382
383//==============================================================================
384void AudioClipBase::setParent (ClipOwner* co)
385{
386 Clip::setParent (co);
387
388 pluginList.setTrackAndClip (nullptr, this);
389}
390
392{
393 return canContainAudio (co);
394}
395
397{
398 clearCachedAudioSegmentList();
400
402
403 if (melodyneProxy != nullptr)
404 melodyneProxy->sourceClipChanged();
405}
406
408{
409 return juce::Colours::red.withHue (0.0f);
410}
411
412//==============================================================================
414{
415 if (! isLooping())
416 {
417 if (getSourceLength() <= 0_td)
418 return 100000.0_td;
419
420 if (getAutoTempo())
421 return edit.tempoSequence.toTime (getStartBeat() + BeatDuration::fromBeats (loopInfo.getNumBeats()))
422 - getPosition().getStart();
423
424 return getSourceLength() / speedRatio;
425 }
426
427 return Edit::getMaximumLength();
428}
429
430//==============================================================================
432{
433 level->dbGain = juce::jlimit (-100.0f, 24.0f, g);
434}
435
437{
438 level->pan = std::abs (p) < 0.01 ? 0.0f
439 : juce::jlimit (-1.0f, 1.0f, p);
440}
441
442//==============================================================================
444{
445 if (isLeftChannelActive() != b)
446 {
447 auto set = activeChannels;
448
449 if (b)
450 {
452 }
453 else
454 {
455 set.removeChannel (juce::AudioChannelSet::left);
456
457 if (set.size() == 0)
458 set.addChannel (juce::AudioChannelSet::right);
459 }
460
461 channels = set.getSpeakerArrangementAsString();
462 }
463}
464
466{
467 return activeChannels.size() == 0 || activeChannels.getChannelIndexForType (juce::AudioChannelSet::left) != -1;
468}
469
471{
472 if (isRightChannelActive() != b)
473 {
474 auto set = activeChannels;
475
476 if (b)
477 {
479 }
480 else
481 {
482 set.removeChannel (juce::AudioChannelSet::right);
483
484 if (set.size() == 0)
485 set.addChannel (juce::AudioChannelSet::left);
486 }
487
488 channels = set.getSpeakerArrangementAsString();
489 }
490}
491
493{
494 return activeChannels.size() == 0 || activeChannels.getChannelIndexForType (juce::AudioChannelSet::right) != -1;
495}
496
497//==============================================================================
499{
500 auto len = getPosition().getLength();
501 in = juce::jlimit (TimeDuration(), len, in);
502
503 // check the fades don't overrun
504 if (in + fadeOut > len)
505 {
506 const double scale = len / (in + fadeOut);
507 fadeIn = in * scale;
508 fadeOut = fadeOut * scale;
509 }
510 else if (fadeIn != in)
511 {
512 fadeIn = in;
513 return false;
514 }
515
516 return false;
517}
518
520{
521 auto len = getPosition().getLength();
522 out = juce::jlimit (TimeDuration(), len, out);
523
524 if (fadeIn + out > len)
525 {
526 const double scale = len / (fadeIn + out);
527 fadeIn = fadeIn * scale;
528 fadeOut = out * scale;
529 }
530 else if (fadeOut != out)
531 {
532 fadeOut = out;
533 return false;
534 }
535
536 return false;
537}
538
540{
541 asyncFunctionCaller.handleUpdateNowIfNeeded();
542
543 if (autoCrossfade && getOverlappingClip (ClipDirection::previous) != nullptr)
544 return autoFadeIn;
545
546 auto len = getPosition().getLength();
547
548 if (fadeIn + fadeOut > len)
549 return TimeDuration::fromSeconds (fadeIn * len.inSeconds() / (fadeIn + fadeOut));
550
551 return fadeIn;
552}
553
555{
556 asyncFunctionCaller.handleUpdateNowIfNeeded();
557
558 if (autoCrossfade && getOverlappingClip (ClipDirection::next) != nullptr)
559 return autoFadeOut;
560
561 auto len = getPosition().getLength();
562
563 if (fadeIn + fadeOut > len)
564 return TimeDuration::fromSeconds (fadeOut * len.inSeconds() / (fadeIn + fadeOut));
565
566 return fadeOut;
567}
568
570{
571 if (t != fadeInType)
572 fadeInType = t;
573 else
574 changed(); // keep this, in case they press a fade button twice
575}
576
578{
579 if (t != fadeOutType)
580 fadeOutType = t;
581 else
582 changed(); // keep this, in case they press a fade button twice
583}
584
585//==============================================================================
587{
588 return timeStretchMode;
589}
590
592{
593 TimeStretcher::Mode ts = timeStretchMode;
594
595 if (ts == TimeStretcher::disabled && (getAutoPitch() || getAutoTempo() || getPitchChange() != 0.0f))
597
598 return ts;
599}
600
601//==============================================================================
603{
604 // Arrh! Another horrible hack! Because we need to reverse the loop points BEFORE we've actually
605 // generated the new source file we don't have a valid WaveInfo object. We therefore have to bodge
606 // this like the EditClips to find the number of samples. A better approach would be to have the
607 // LoopInfo use only times, then they would be file agnostic and could be manipulated at any point.
609
610 if (isReversed)
611 wi = AudioFile (edit.engine, sourceFileReference.getFile()).getInfo();
612
613 if (wi.lengthInSamples == 0)
614 return;
615
616 auto ratio = getSpeedRatio();
617 const bool beatBased = getAutoTempo();
618
619 if (beatBased)
620 {
621 auto bps = edit.tempoSequence.getBeatsPerSecondAt (getPosition().getStart());
622 ratio = bps / std::max (1.0, loopInfo.getBeatsPerSecond (wi));
623 }
624
625 jassert (ratio >= 0.1);
626
627 // To find the new offset we need to work out the time in the source file at the end of the clip.
628 // Then we need to reverse that and set it as the new offset. This is complicated by the fact that
629 // the loop length could be beat based.
630
631 if (isLooping())
632 {
633 // Reverse white loop points...
634 auto sourceEnd = toPosition (getSourceLength() / ratio);
635 auto o = getLoopRange();
636 auto n = TimeRange::between (toPosition (sourceEnd - o.getEnd()),
637 toPosition (sourceEnd - o.getStart()));
638 setLoopRange (n);
639
640 // then the offset
641 if (o.getLength() > TimeDuration())
642 {
643 auto clipOffset = std::fmod (getPosition().getOffset().inSeconds(), sourceEnd.inSeconds());
644 auto numLoops = getPosition().getLength() / o.getLength();
645 numLoops += clipOffset / o.getLength().inSeconds();
646 numLoops = numLoops - (int) numLoops;
647
648 auto posAtEnd = o.getStart() + (o.getLength() * numLoops);
649 auto newOffset = sourceEnd - (posAtEnd - n.getStart());
650
651 setOffset (toDuration (newOffset));
652 }
653 }
654 else
655 {
656 // reverse offset
657 auto sourceEnd = toPosition (getSourceLength() / ratio);
658 auto newOffset = sourceEnd - toPosition (getPosition().getLength()) - getPosition().getOffset();
659 setOffset (newOffset);
660 }
661
662 // red in/out markers
663 const SampleCount newIn = loopInfo.getOutMarker() > -1 ? (wi.lengthInSamples - loopInfo.getOutMarker()) : 0;
664 const SampleCount newOut = loopInfo.getInMarker() == 0 ? -1 : (wi.lengthInSamples - loopInfo.getInMarker());
665
666 loopInfo.setInMarker (newIn);
667 loopInfo.setOutMarker (newOut);
668
669 // beat loop points e.g. REX files
670 for (int i = loopInfo.getNumLoopPoints(); --i >= 0;)
671 {
672 const LoopInfo::LoopPoint p = loopInfo.getLoopPoint (i);
673 loopInfo.changeLoopPoint (i, wi.lengthInSamples - p.pos, p.type);
674 }
675}
676
678{
679 auto len = getPosition().getLength();
680
681 // check the fades don't overrun
682 if (fadeIn + fadeOut > len)
683 {
684 const double scale = len / (fadeIn + fadeOut);
685 fadeIn = fadeIn * scale;
686 fadeOut = fadeOut * scale;
687 }
688
689 // also check the auto fades
690 if (autoFadeIn + autoFadeOut > len)
691 {
692 const double scale = len / (autoFadeIn + autoFadeOut);
693 autoFadeIn = autoFadeIn * scale;
694 autoFadeOut = autoFadeOut * scale;
695 }
696}
697
699{
701
702 if (auto ct = getParent())
703 {
704 const auto& clips = ct->getClips();
705 auto ourIndex = clips.indexOf (const_cast<AudioClipBase*> (this));
706
707 if (direction == ClipDirection::next)
708 {
709 for (int i = ourIndex + 1; i < clips.size(); ++i)
710 if (auto c = dynamic_cast<AudioClipBase*> (clips[i]))
711 if (getPosition().time.contains (c->getPosition().getStart() + 0.001s)
712 && ! getPosition().time.contains (c->getPosition().getEnd()))
713 return c;
714 }
715 else if (direction == ClipDirection::previous)
716 {
717 for (int i = ourIndex; --i >= 0;)
718 if (auto c = dynamic_cast<AudioClipBase*> (clips[i]))
719 if (getPosition().time.contains (c->getPosition().getEnd() - 0.001s)
720 && ! getPosition().time.contains (c->getPosition().getStart()))
721 return c;
722 }
723 }
724
725 return {};
726}
727
729{
730 asyncFunctionCaller.updateAsync (updateOverlapped ? updateCrossfadesOverlappedFlag
731 : updateCrossfadesFlag);
732}
733
734void AudioClipBase::updateAutoCrossfades (bool updateOverlapped)
735{
737
738 auto prevClip = getOverlappingClip (ClipDirection::previous);
739 auto nextClip = getOverlappingClip (ClipDirection::next);
740
741 if (updateOverlapped)
742 {
743 if (prevClip != nullptr)
744 prevClip->updateAutoCrossfades (false);
745
746 if (nextClip != nullptr)
747 nextClip->updateAutoCrossfades (false);
748 }
749
750 if (autoCrossfade)
751 {
752 autoFadeIn = (prevClip != nullptr) ? (prevClip->getPosition().getEnd() - getPosition().getStart()) : fadeIn;
753 autoFadeOut = (nextClip != nullptr) ? (getPosition().getEnd() - nextClip->getPosition().getStart()) : fadeOut;
754
756 }
757}
758
760{
761 const auto fade = TimeDuration::fromSeconds (0.005);
762
763 if (fadeIn < fade) setFadeIn (fade);
764 if (fadeOut < fade) setFadeOut (fade);
765}
766
767void AudioClipBase::copyFadeToAutomation (bool useFadeIn, bool removeClipFade)
768{
770
771 TimeRange fadeTime (TimePosition(), useFadeIn ? getFadeIn() : getFadeOut());
772
773 if (useFadeIn)
774 fadeTime = fadeTime.movedToStartAt (getPosition().getStart());
775 else
776 fadeTime = fadeTime.movedToEndAt (getPosition().getEnd());
777
778 auto& ui = edit.engine.getUIBehaviour();
779
780 if (fadeTime.isEmpty())
781 {
782 ui.showWarningMessage (TRANS("Could not create automation.")
784 + TRANS("No fade found for this clip"));
785 return;
786 }
787
788 auto at = dynamic_cast<AudioTrack*> (getTrack());
789
790 if (at == nullptr)
791 return;
792
794
795 if (auto vol = at->getVolumePlugin())
796 param = vol->volParam;
797
798 if (param == nullptr)
799 {
800 ui.showWarningMessage (TRANS("Could not create automation.")
802 + TRANS("No volume plguin was found for this track, please insert one and try again"));
803 return;
804 }
805
806 auto& oldCurve = param->getCurve();
807
808 if (oldCurve.countPointsInRegion (fadeTime) > 0)
809 {
810 if (! ui.showOkCancelAlertBox (TRANS("Overwrite Existing Automation?"),
811 TRANS("There is already automation in this region, applying the curve will overwrite it. Is this OK?")))
812 return;
813 }
814
815 AutomationCurve curve;
816 curve.setOwnerParameter (param.get());
817
818 auto curveType = useFadeIn ? getFadeInType() : getFadeOutType();
819 auto startValue = useFadeIn ? 0.0f : oldCurve.getValueAt (fadeTime.getStart());
820 auto endValue = useFadeIn ? oldCurve.getValueAt (fadeTime.getEnd()) : 0.0f;
821 auto valueLimits = juce::Range<float>::between (startValue, endValue);
822
823 switch (curveType)
824 {
825 case AudioFadeCurve::convex:
826 case AudioFadeCurve::concave:
827 case AudioFadeCurve::sCurve:
828 {
829 for (int i = 0; i < 10; ++i)
830 {
831 auto alpha = i / 9.0f;
832 auto time = toPosition (fadeTime.getLength()) * alpha;
833
834 if (! useFadeIn)
835 alpha = 1.0f - alpha;
836
837 auto volCurveGain = AudioFadeCurve::alphaToGainForType (curveType, alpha);
838 auto value = valueLimits.getStart() + (volCurveGain * valueLimits.getLength());
839 curve.addPoint (time, (float) value, 0.0f);
840 }
841
842 break;
843 }
844
845 case AudioFadeCurve::linear:
846 default:
847 {
848 curve.addPoint (TimePosition(), useFadeIn ? valueLimits.getStart() : valueLimits.getLength(), 0.0f);
849 curve.addPoint (toPosition (fadeTime.getLength()), useFadeIn ? valueLimits.getLength() : valueLimits.getStart(), 0.0f);
850 break;
851 }
852 }
853
854 oldCurve.mergeOtherCurve (curve, fadeTime, TimePosition(), TimeDuration(), true, true);
855
856 // also need to remove the point just before the first one we added
857 if (useFadeIn && (oldCurve.countPointsInRegion ({ {}, fadeTime.getStart() + (fadeTime.getLength() * 0.09) }) == 2))
858 oldCurve.removePoint (0);
859
860 if (removeClipFade)
861 {
862 if (useFadeIn)
863 setFadeIn ({});
864 else
865 setFadeOut ({});
866 }
867
868 at->setCurrentlyShownAutoParam (param);
869}
870
871void AudioClipBase::setLoopInfo (const LoopInfo& loopInfo_)
872{
873 loopInfo = loopInfo_;
874}
875
877{
878 if (! canLoop())
879 num = 0;
880
881 auto pos = getPosition();
882 auto len = std::min (getSourceLength() / speedRatio, pos.getLength());
883
884 if (len <= 0_td)
885 return;
886
887 if (autoTempo)
888 {
889 auto& ts = edit.tempoSequence;
890 auto newStart = BeatPosition::fromBeats (pos.getOffset().inSeconds() * ts.getBeatsPerSecondAt (pos.getStart()));
891 setLoopRangeBeats ({ newStart, newStart + getLengthInBeats() });
892 setLength (pos.getLength() * num, true);
893 }
894 else
895 {
896 setLoopRange ({ toPosition (pos.getOffset()), toPosition (pos.getOffset()) + len });
897 setLength (len * num, true);
898 }
899
900 setOffset ({});
901}
902
904{
905 auto pos = getPosition();
906
907 if (autoTempo)
908 {
909 pos.time = pos.time.withEnd (getTimeOfRelativeBeat (loopLengthBeats));
910 pos.offset = toDuration (getTimeOfRelativeBeat (toDuration (loopStartBeats.get())) - toDuration (pos.getStart()));
911 }
912 else
913 {
914 pos.time = pos.time.withEnd (pos.time.getStart() + loopLength.get());
915 pos.offset = toDuration (loopStart);
916 }
917
918 setLoopRange ({});
919 setPosition (pos);
920
921 if (getPosition().getLength() > getMaximumLength())
922 setLength (getMaximumLength(), true);
923}
924
926{
927 if (! beatBasedLooping())
928 return { loopStart, loopStart + loopLength };
929
930 auto bps = edit.tempoSequence.getBeatsPerSecondAt (getPosition().getStart());
931
932 return { TimePosition::fromSeconds (loopStartBeats.get().inBeats() / bps),
933 TimePosition::fromSeconds ((loopStartBeats + loopLengthBeats).inBeats() / bps) };
934}
935
937{
938 return isUsingMelodyne() ? false
939 : loopInfo.isLoopable();
940}
941
943{
944 if (! beatBasedLooping())
945 return loopStart;
946
947 return TimePosition::fromSeconds (loopStartBeats.get().inBeats() / edit.tempoSequence.getBeatsPerSecondAt (getPosition().getStart()));
948}
949
951{
952 if (! beatBasedLooping())
953 return loopLength;
954
955 return TimeDuration::fromSeconds (loopLengthBeats.get().inBeats() / edit.tempoSequence.getBeatsPerSecondAt (getPosition().getStart()));
956}
957
959{
960 if (beatBasedLooping())
961 return loopStartBeats;
962
963 return BeatPosition::fromBeats (loopStart.get().inSeconds() * edit.tempoSequence.getBeatsPerSecondAt (getPosition().getStart()));
964}
965
967{
968 if (beatBasedLooping())
969 return loopLengthBeats;
970
971 return BeatDuration::fromBeats (loopLength.get().inSeconds() * edit.tempoSequence.getBeatsPerSecondAt (getPosition().getStart()));
972}
973
974void AudioClipBase::setLoopRange (TimeRange newRange)
975{
976 if (autoTempo)
977 {
978 auto pos = getPosition();
979 auto& ts = edit.tempoSequence;
980 auto newStart = BeatPosition::fromBeats (newRange.getStart().inSeconds() * ts.getBeatsPerSecondAt (pos.getStart()));
981 auto newLength = ts.toBeats (pos.getStart() + newRange.getLength()) - ts.toBeats (pos.getStart());
982 setLoopRangeBeats ({ newStart, newStart + newLength });
983 }
984 else
985 {
986 auto sourceLen = getSourceLength();
987
988 if (sourceLen > 0s)
989 {
990 // limits the number of times longer than the source file length the loop length can be
991 const double maxMultiplesOfSourceLengthForLooping = 50.0;
992
993 auto newStart = juce::jlimit (0_tp, toPosition (sourceLen) / getSpeedRatio(), newRange.getStart());
994 auto newLength = juce::jlimit (0_td, sourceLen * maxMultiplesOfSourceLengthForLooping / getSpeedRatio(), newRange.getLength());
995
996 if (loopStart != newStart || loopLength != newLength)
997 {
998 loopStart = newStart;
999 loopLength = newLength;
1000 }
1001 }
1002 }
1003}
1004
1005void AudioClipBase::setLoopRangeBeats (BeatRange newRangeBeats)
1006{
1007 auto newStartBeat = juce::jlimit (0_bp, BeatPosition::fromBeats (loopInfo.getNumBeats()), newRangeBeats.getStart());
1008 auto newLengthBeat = juce::jlimit (0_bd, BeatDuration::fromBeats (loopInfo.getNumBeats() * 2), newRangeBeats.getLength());
1009
1010 if (loopStartBeats != newStartBeat || loopLengthBeats != newLengthBeat)
1011 {
1012 Clip::setSpeedRatio (1.0);
1013 setAutoTempo (true);
1014
1015 loopStartBeats = newStartBeat;
1016 loopLengthBeats = newLengthBeat;
1017 }
1018}
1019
1020void AudioClipBase::setAutoDetectBeats (bool b)
1021{
1022 autoDetectBeats = b;
1023 setLoopInfo (autoDetectBeatMarkers (loopInfo, b, beatSensitivity));
1024}
1025
1026void AudioClipBase::setBeatSensitivity (float s)
1027{
1028 beatSensitivity = s;
1029 setLoopInfo (autoDetectBeatMarkers (loopInfo, autoDetectBeats, s));
1030}
1031
1033{
1034 clearCachedAudioSegmentList();
1036
1037 if (melodyneProxy != nullptr)
1038 melodyneProxy->sourceClipChanged();
1039}
1040
1041void AudioClipBase::clearCachedAudioSegmentList()
1042{
1043 if (! edit.isLoading())
1044 TRACKTION_ASSERT_MESSAGE_THREAD
1045
1046 audioSegmentList.reset();
1047}
1048
1049const AudioSegmentList& AudioClipBase::getAudioSegmentList()
1050{
1051 if (! edit.isLoading())
1052 TRACKTION_ASSERT_MESSAGE_THREAD
1053
1054 if (audioSegmentList == nullptr)
1055 audioSegmentList = AudioSegmentList::create (*this, false, false);
1056
1057 return *audioSegmentList;
1058}
1059
1060void AudioClipBase::setResamplingQuality (ResamplingQuality rq)
1061{
1062 resamplingQuality = rq;
1063}
1064
1065ResamplingQuality AudioClipBase::getResamplingQuality() const
1066{
1067 return resamplingQuality;
1068}
1069
1070//==============================================================================
1072{
1073 if (! autoTempo)
1074 {
1075 auto factor = getSpeedRatio() / r;
1076 auto newLoopStart = getLoopStart() * factor;
1077 auto newLoopLen = getLoopLength() * factor;
1078
1079 Clip::setSpeedRatio (r);
1080 setLoopRange ({ newLoopStart, newLoopStart + newLoopLen });
1081 }
1082}
1083
1084bool AudioClipBase::isUsingMelodyne() const
1085{
1086 return TimeStretcher::isMelodyne (timeStretchMode);
1087}
1088
1089void AudioClipBase::loadMelodyneState()
1090{
1091 setupARA (true);
1092}
1093
1094void AudioClipBase::showMelodyneWindow()
1095{
1096 if (melodyneProxy != nullptr)
1097 melodyneProxy->showPluginWindow();
1098}
1099
1100void AudioClipBase::hideMelodyneWindow()
1101{
1102 if (melodyneProxy != nullptr)
1103 melodyneProxy->hidePluginWindow();
1104}
1105
1106void AudioClipBase::melodyneConvertToMIDI()
1107{
1108 if (melodyneProxy != nullptr)
1109 {
1110 juce::MidiMessageSequence m (melodyneProxy->getAnalysedMIDISequence());
1111
1112 if (m.getNumEvents() > 0)
1113 {
1114 juce::UndoManager* um = nullptr;
1115
1116 juce::ValueTree midiClip (IDs::MIDICLIP);
1117 midiClip.setProperty (IDs::name, getName(), um);
1118 midiClip.setProperty (IDs::start, getPosition().getStart().inSeconds(), um);
1119 midiClip.setProperty (IDs::length, getPosition().getLength().inSeconds(), um);
1120
1121 juce::ValueTree ms (IDs::SEQUENCE);
1122 ms.setProperty (IDs::ver, 1, um);
1123 ms.setProperty (IDs::channelNumber, 1, um);
1124
1125 midiClip.addChild (ms, -1, um);
1126
1127 auto& ts = edit.tempoSequence;
1128
1129 for (int i = 0; i < m.getNumEvents(); ++i)
1130 {
1131 auto& e = *m.getEventPointer (i);
1132
1133 if (e.noteOffObject != nullptr)
1134 {
1135 juce::ValueTree note (IDs::NOTE);
1136 note.setProperty ("p", e.message.getNoteNumber(), um);
1137 note.setProperty ("v", e.message.getVelocity(), um);
1138 note.setProperty ("b", ts.toBeats (TimePosition::fromSeconds (e.message.getTimeStamp())).inBeats(), um);
1139 note.setProperty ("l", (ts.toBeats (TimePosition::fromSeconds (e.noteOffObject->message.getTimeStamp()))
1140 - ts.toBeats (TimePosition::fromSeconds (e.message.getTimeStamp()))).inBeats(), um);
1141
1142 ms.addChild (note, -1, um);
1143 }
1144 }
1145
1146 if (auto t = getClipTrack())
1147 t->insertClipWithState (midiClip, getName(), Type::midi,
1148 { getPosition().time, {} }, true, false);
1149 }
1150 else
1151 {
1152 edit.engine.getUIBehaviour().showWarningMessage (TRANS("No MIDI notes were found by the plugin!"));
1153 }
1154 }
1155}
1156
1157void AudioClipBase::setTimeStretchMode (TimeStretcher::Mode mode)
1158{
1159 timeStretchMode = TimeStretcher::checkModeIsAvailable (mode);
1160
1161 if (isLooping() && ! canLoop())
1163}
1164
1165WarpTimeManager& AudioClipBase::getWarpTimeManager() const
1166{
1167 if (warpTimeManager == nullptr)
1168 {
1170 WarpTimeManager::Ptr ptr = edit.engine.getWarpTimeFactory().getWarpTimeManager (*this);
1171 warpTimeManager = dynamic_cast<WarpTimeManager*> (ptr.get());
1172 jassert (warpTimeManager != nullptr);
1173 }
1174
1175 return *warpTimeManager;
1176}
1177
1178int AudioClipBase::getTransposeSemiTones (bool includeAutoPitch) const
1179{
1180 if (autoPitch && includeAutoPitch)
1181 {
1182 int pitch = edit.pitchSequence.getPitchAt (getPosition().getStart() + TimeDuration::fromSeconds (0.0001)).getPitch();
1183 int transposeBase = pitch - loopInfo.getRootNote();
1184
1185 while (transposeBase > 6) transposeBase -= 12;
1186 while (transposeBase < -6) transposeBase += 12;
1187
1188 return transpose + transposeBase;
1189 }
1190
1191 return transpose;
1192}
1193
1194LoopInfo AudioClipBase::autoDetectBeatMarkers (const LoopInfo& current, bool autoBeat, float sens) const
1195{
1196 LoopInfo res = current;
1197
1198 for (int i = res.getNumLoopPoints(); --i >= 0;)
1199 if (res.getLoopPoint(i).isAutomatic())
1200 res.deleteLoopPoint (i);
1201
1202 if (autoBeat)
1203 {
1204 if (auto reader = std::unique_ptr<juce::AudioFormatReader> (AudioFileUtils::createReaderFor (edit.engine, getCurrentSourceFile())))
1205 {
1206 const auto start = loopInfo.getInMarker();
1207 const auto end = (loopInfo.getOutMarker() == -1) ? reader->lengthInSamples
1208 : loopInfo.getOutMarker();
1209
1210 BeatDetect detect;
1211 detect.setSensitivity (sens);
1212 detect.setSampleRate (reader->sampleRate);
1213
1214 if ((end - start) > reader->sampleRate)
1215 {
1216 auto blockLength = detect.getBlockSize();
1217 auto blockSize = choc::buffer::Size::create (reader->numChannels, blockLength);
1218 auto pos = start;
1219
1220 choc::buffer::ChannelArrayBuffer<float> buffer (blockSize);
1221
1222 while (pos + blockLength < end)
1223 {
1224 if (! reader->read (buffer.getView().data.channels,
1225 (int) reader->numChannels, pos, (int) blockLength))
1226 break;
1227
1228 detect.audioProcess (buffer);
1229 pos += blockLength;
1230 }
1231
1232 for (auto beat : detect.getBeats())
1234 }
1235 }
1236 }
1237
1238 return res;
1239}
1240
1241bool AudioClipBase::performTempoDetect()
1242{
1243 TempoDetectTask tempoDetectTask (edit.engine, getCurrentSourceFile());
1244
1245 edit.engine.getUIBehaviour().runTaskWithProgressBar (tempoDetectTask);
1246
1247 if (! tempoDetectTask.isResultSensible())
1248 return false;
1249
1250 const AudioFileInfo wi = AudioFile (edit.engine, getCurrentSourceFile()).getInfo();
1251 loopInfo.setBpm (tempoDetectTask.getBpm(), wi);
1252
1253 return true;
1254}
1255
1256juce::StringArray AudioClipBase::getRootNoteChoices (Engine& e)
1257{
1259 s.add ("<" + TRANS("None") + ">");
1260
1261 for (int i = 0; i < 12; ++i)
1262 s.add (Pitch::getPitchAsString (e, i));
1263
1264 return s;
1265}
1266
1267juce::StringArray AudioClipBase::getPitchChoices()
1268{
1270
1271 const int numSemitones = isUsingMelodyne() ? 12 : 24;
1272
1273 if (loopInfo.getRootNote() == -1)
1274 {
1275 for (int i = numSemitones; i >= 1; i--)
1276 s.add ("+" + juce::String (i));
1277
1278 s.add ("0");
1279
1280 for (int i = 1; i <= numSemitones; ++i)
1281 s.add ("-" + juce::String (i));
1282 }
1283 else
1284 {
1285 const int base = autoPitch ? edit.pitchSequence.getPitchAt (getPosition().getStart()).getPitch()
1286 : loopInfo.getRootNote();
1287
1288 for (int i = numSemitones; i >= 1; i--)
1289 s.add ("+" + juce::String (i) + " : " + Pitch::getPitchAsString (edit.engine, base + i));
1290
1291 s.add ("0 : " + Pitch::getPitchAsString (edit.engine, base));
1292
1293 for (int i = 1; i <= numSemitones; ++i)
1294 s.add ("-" + juce::String (i) + " : " + Pitch::getPitchAsString (edit.engine, base - i));
1295 }
1296
1297 return s;
1298}
1299
1300void AudioClipBase::enableEffects (bool enable, bool warn)
1301{
1302 auto v = state.getChildWithName (IDs::EFFECTS);
1303 auto um = getUndoManager();
1304
1305 if (enable)
1306 {
1307 setUsesProxy (true);
1308 if (! v.isValid())
1309 {
1310 state.addChild (ClipEffects::create(), -1, um);
1311 clipEffectsVisible = true;
1312 }
1313 }
1314 else if (v.isValid())
1315 {
1316 if (! warn || edit.engine.getUIBehaviour().showOkCancelAlertBox (TRANS("Remove Clip Effects"),
1317 TRANS("Are you sure you want to remove all clip effects?")))
1318 {
1319 state.removeChild (v, um);
1320 state.removeProperty (IDs::effectsVisible, um);
1321 }
1322 }
1323}
1324
1325void AudioClipBase::addEffect (const juce::ValueTree& effectsTree)
1326{
1327 auto v = state.getChildWithName (IDs::EFFECTS);
1328 jassert (v.isValid());
1329
1330 if (v.isValid())
1331 v.addChild (effectsTree, -1, getUndoManager());
1332}
1333
1334//==============================================================================
1335TimePosition AudioClipBase::clipTimeToSourceFileTime (TimePosition t)
1336{
1337 if (getAutoTempo())
1338 {
1339 if (isLooping())
1340 {
1341 auto b = toPosition (getBeatOfRelativeTime (toDuration (t)) - getStartBeat());
1342 const auto end = toPosition (getLoopLengthBeats());
1343
1344 while (b > end)
1345 b = b - getLoopLengthBeats();
1346
1347 b = b + BeatDuration::fromBeats (getPosition().getOffset().inSeconds()) + toDuration (getLoopStartBeats());
1348 return TimePosition::fromSeconds (b.inBeats() / loopInfo.getBeatsPerSecond (getAudioFile().getInfo()));
1349 }
1350
1351 auto b = getBeatOfRelativeTime (toDuration (t)) - getStartBeat() + getOffsetInBeats();
1352
1353 return TimePosition::fromSeconds (b.inBeats() / loopInfo.getBeatsPerSecond (getAudioFile().getInfo()));
1354 }
1355
1356 if (isLooping())
1357 {
1358 const auto end = toPosition (getLoopLength());
1359
1360 while (t > end)
1361 t = t - getLoopLength();
1362
1363 return (t + getPosition().getOffset() + toDuration (getLoopStart())) * getSpeedRatio();
1364 }
1365
1366 return (t + getPosition().getOffset()) * getSpeedRatio();
1367}
1368
1369void AudioClipBase::addMark (TimePosition relCursorPos)
1370{
1371 if (auto sourceItem = sourceFileReference.getSourceProjectItem())
1372 {
1373 auto marks = sourceItem->getMarkedPoints();
1374 marks.add (clipTimeToSourceFileTime (relCursorPos));
1375 sourceItem->setMarkedPoints (marks);
1376 }
1377}
1378
1379void AudioClipBase::moveMarkTo (TimePosition relCursorPos)
1380{
1381 if (auto sourceItem = sourceFileReference.getSourceProjectItem())
1382 {
1383 auto marks = sourceItem->getMarkedPoints();
1384
1386 juce::Array<int> index;
1387 getRescaledMarkPoints (rescaled, index);
1388
1389 int indexOfNearest = -1;
1390 auto nearestDiff = Edit::getMaximumEditEnd();
1391
1392 for (int i = rescaled.size(); --i >= 0;)
1393 {
1394 auto diff = TimePosition::fromSeconds (std::abs ((rescaled[i] - toDuration (relCursorPos)).inSeconds()));
1395
1396 if (diff < nearestDiff)
1397 {
1398 nearestDiff = diff;
1399 indexOfNearest = index[i];
1400 }
1401 }
1402
1403 if (indexOfNearest != -1)
1404 {
1405 marks.set (indexOfNearest, clipTimeToSourceFileTime (relCursorPos));
1406 sourceItem->setMarkedPoints (marks);
1407 }
1408 }
1409}
1410
1411void AudioClipBase::deleteMark (TimePosition relCursorPos)
1412{
1413 if (auto sourceItem = sourceFileReference.getSourceProjectItem())
1414 {
1415 auto marks = sourceItem->getMarkedPoints();
1416
1418 juce::Array<int> index;
1419 getRescaledMarkPoints (rescaled, index);
1420
1421 int indexOfNearest = -1;
1422 auto nearestDiff = Edit::getMaximumEditEnd();
1423
1424 for (int i = rescaled.size(); --i >= 0;)
1425 {
1426 auto diff = TimePosition::fromSeconds (std::abs ((rescaled[i] - toDuration (relCursorPos)).inSeconds()));
1427
1428 if (diff < nearestDiff)
1429 {
1430 nearestDiff = diff;
1431 indexOfNearest = index[i];
1432 }
1433 }
1434
1435 if (indexOfNearest != -1)
1436 {
1437 marks.remove (indexOfNearest);
1438 sourceItem->setMarkedPoints (marks);
1439 }
1440 }
1441}
1442
1443bool AudioClipBase::canSnapToOriginalBWavTime()
1444{
1445 return getAudioFile().getMetadata()[juce::WavAudioFormat::bwavTimeReference].isNotEmpty();
1446}
1447
1448void AudioClipBase::snapToOriginalBWavTime()
1449{
1450 auto f = getAudioFile();
1451 juce::String bwavTime (f.getMetadata()[juce::WavAudioFormat::bwavTimeReference]);
1452
1453 if (bwavTime.isNotEmpty())
1454 {
1455 auto t = TimePosition::fromSeconds (bwavTime.getLargeIntValue() / f.getSampleRate());
1456
1457 setStart (t + getPosition().getOffset(), false, true);
1458 }
1459}
1460
1461//==============================================================================
1463{
1464 auto results = Clip::getReferencedItems();
1465
1467 item.firstTimeUsed = 0;
1468 item.lengthUsed = 0;
1469
1470 if (! getAutoTempo())
1471 {
1472 auto speed = getSpeedRatio();
1473
1474 if (! isLooping())
1475 {
1476 item.firstTimeUsed = (getPosition().getOffset() * speed).inSeconds();
1477 item.lengthUsed = (getPosition().getLength() * speed).inSeconds();
1478 }
1479 else
1480 {
1481 item.firstTimeUsed = (getLoopStart() * speed).inSeconds();
1482 item.lengthUsed = (getLoopLength() * speed).inSeconds();
1483 }
1484 }
1485
1486 if (hasAnyTakes())
1487 {
1488 for (auto takeID : getTakes())
1489 {
1490 item.itemID = takeID;
1491 results.add (item);
1492 }
1493
1494 jassert (! results.isEmpty());
1495 }
1496 else
1497 {
1498 item.itemID = ProjectItemID (sourceFileReference.source.get());
1499 results.add (item);
1500 }
1501
1502 if (getAutoTempo())
1503 {
1504 for (auto& ref : results)
1505 {
1506 auto wi = edit.engine.getAudioFileManager().getAudioFile (ref.itemID).getInfo();
1507
1508 if (wi.sampleRate > 0)
1509 {
1510 ref.firstTimeUsed = 0;
1511 ref.lengthUsed = wi.getLengthInSeconds();
1512 }
1513 }
1514 }
1515
1516 return results;
1517}
1518
1519void AudioClipBase::reassignReferencedItem (const ReferencedItem& item,
1520 ProjectItemID newItemID, double newStartTime)
1521{
1522 Clip::reassignReferencedItem (item, newItemID, newStartTime);
1523
1524 if (getReferencedItems().size() == 1 && item == getReferencedItems().getFirst())
1525 {
1526 sourceFileReference.setToProjectFileReference (newItemID);
1527
1528 if (! isLooping())
1529 setOffset (getPosition().getOffset() - TimeDuration::fromSeconds ((newStartTime / getSpeedRatio())));
1530 else
1531 loopStart = loopStart - TimeDuration::fromSeconds ((newStartTime / getSpeedRatio()));
1532 }
1533 else
1534 {
1536 }
1537}
1538
1539juce::Array<ProjectItemID> AudioClipBase::getTakes() const
1540{
1541 jassert (! hasAnyTakes());
1542 return {};
1543}
1544
1545//==============================================================================
1547{
1548 if (! launchHandle)
1549 launchHandle = std::make_shared<LaunchHandle>();
1550
1551 return launchHandle;
1552}
1553
1555{
1556 if (! launchQuantisation)
1557 launchQuantisation = std::make_unique<LaunchQuantisation> (state, edit);
1558
1559 return launchQuantisation.get();
1560}
1561
1563{
1564 if (! followActions)
1566
1567 return followActions.get();
1568}
1569
1570
1571//==============================================================================
1572juce::String AudioClipBase::canAddClipPlugin (const Plugin::Ptr& p) const
1573{
1574 if (p != nullptr)
1575 {
1576 if (! p->canBeAddedToClip())
1577 return TRANS("Can't add this kind of plugin to a clip!");
1578
1579 if (pluginList.size() >= edit.engine.getEngineBehaviour().getEditLimits().maxPluginsOnClip)
1580 return TRANS("Can't add any more plugins to this clip!");
1581 }
1582
1583 return {};
1584}
1585
1587{
1588 if (canAddClipPlugin (p).isEmpty())
1589 {
1590 pluginList.insertPlugin (p, -1, &sm);
1591 return true;
1592 }
1593
1594 return false;
1595}
1596
1598{
1599 return pluginList.getPlugins();
1600}
1601
1603{
1604 pluginList.sendMirrorUpdateToAllPlugins (p);
1605}
1606
1607//==============================================================================
1608bool AudioClipBase::setupARA (bool dontPopupErrorMessages)
1609{
1610 TRACKTION_ASSERT_MESSAGE_THREAD
1611 static bool araReentrancyCheck = false;
1612
1613 if (araReentrancyCheck)
1614 return true;
1615
1616 const juce::ScopedValueSetter<bool> svs (araReentrancyCheck, true);
1617
1618 #if TRACKTION_ENABLE_ARA
1619 if (isUsingMelodyne())
1620 {
1621 if (melodyneProxy == nullptr)
1622 {
1623 TRACKTION_LOG ("Created ARA reader!");
1624 melodyneProxy = new MelodyneFileReader (edit, *this);
1625 }
1626
1627 if (melodyneProxy != nullptr && melodyneProxy->isValid())
1628 return true;
1629
1630 if (! dontPopupErrorMessages)
1631 {
1632 TRACKTION_LOG_ERROR ("Failed setting up ARA for audio clip!");
1633
1634 if (TimeStretcher::isMelodyne (timeStretchMode)
1635 && edit.engine.getPluginManager().getARACompatiblePlugDescriptions().size() <= 0)
1636 {
1637 TRACKTION_LOG_ERROR ("No ARA-compatible plugins were found!");
1638
1639 edit.engine.getUIBehaviour().showWarningMessage (TRANS ("This audio clip is setup with Melodyne's time-stretching, but there aren't any ARA-compatible plugins available!")
1640 + "\n\n"
1641 + TRANS ("If you know you have ARA-compatible plugins installed, they must be scanned and part of the list of known plugins!"));
1642 }
1643 }
1644 }
1645 #endif
1646
1647 juce::ignoreUnused (dontPopupErrorMessages);
1648 return false;
1649}
1650
1651LiveClipLevel AudioClipBase::getLiveClipLevel()
1652{
1653 return { level };
1654}
1655
1656//==============================================================================
1657void AudioClipBase::markAsDirty()
1658{
1659 lastRenderJobFailed = false;
1660 createNewProxyAsync(); // Do this asyncronously to avoid recursion
1661}
1662
1663void AudioClipBase::updateSourceFile()
1664{
1666
1667 if (! isInitialised)
1668 return;
1669
1670 TRACKTION_ASSERT_MESSAGE_THREAD
1671
1672 // check to see if our source file already exists, it may have been created by another clip
1673 // if it does exist, we will just use that, otherwise we need to start our own render operation
1674 const AudioFile audioFile (RenderManager::getAudioFileForHash (edit.engine, edit.getTempDirectory (false), getHash()));
1675
1676 if (getCurrentSourceFile() != audioFile.getFile())
1677 setCurrentSourceFile (audioFile.getFile());
1678
1679 if (renderJob != nullptr && renderJob->proxy == audioFile && (! renderJob->shouldExit()))
1680 return;
1681
1682 if (! (audioFile.getFile().existsAsFile() || audioFile.isValid()))
1683 {
1684 renderSource();
1685 }
1686 else if (renderJob != nullptr)
1687 {
1688 renderJob->removeListener (this);
1689 renderJob = nullptr;
1690 }
1691}
1692
1693void AudioClipBase::renderSource()
1694{
1695 TRACKTION_ASSERT_MESSAGE_THREAD
1696 jassert (isInitialised);
1697
1698 const AudioFile audioFile (edit.engine, getCurrentSourceFile());
1699 const bool isValid = audioFile.isValid();
1700
1701 if (audioFile.getFile().existsAsFile() && isValid)
1702 return;
1703
1704 if (! isValid)
1705 {
1706 // need to render
1707 bool needsToChangeJob = renderJob == nullptr
1708 || (renderJob != nullptr && (renderJob->proxy != audioFile));
1709
1710 if (needsToChangeJob)
1711 {
1712 if (renderJob != nullptr)
1713 renderJob->removeListener (this);
1714
1715 renderJob = getRenderJob (audioFile);
1716
1717 if (renderJob != nullptr)
1718 renderJob->addListener (this);
1719
1720 changed(); // updates the thumbnail progress
1721 }
1722 }
1723 else
1724 {
1725 // either finished or file exists
1726 if (renderJob != nullptr)
1727 {
1728 renderJob->removeListener (this);
1729 renderJob = nullptr;
1730 }
1731
1733 }
1734}
1735
1736void AudioClipBase::renderComplete()
1737{
1738 TRACKTION_ASSERT_MESSAGE_THREAD
1739 // Updates the thumbnail message
1740 changed();
1741
1742 if (clipEffects != nullptr)
1743 clipEffects->notifyListenersOfRenderCompletion();
1744}
1745
1746//==============================================================================
1748{
1750 juce::Array<int> index;
1751 getRescaledMarkPoints (rescaled, index);
1752 return rescaled;
1753}
1754
1756{
1757 if (auto sourceItem = sourceFileReference.getSourceProjectItem())
1758 {
1759 if (getAutoTempo())
1760 {
1761 auto beats = sourceItem->getMarkedPoints();
1762 auto afi = getAudioFile().getInfo();
1763
1764 for (int i = 0; i < beats.size(); ++i)
1765 beats.set (i, beats[i] * loopInfo.getBeatsPerSecond (afi));
1766
1767 if (isLooping())
1768 {
1769 auto loopLen = getLoopLengthBeats();
1770 auto clipLen = getLengthInBeats();
1771 auto b = loopLen - getOffsetInBeats();
1772
1773 for (int i = 0; i < beats.size(); ++i)
1774 {
1775 auto newB = BeatDuration::fromBeats (beats[i].inSeconds() - (toPosition (getOffsetInBeats()) - getLoopStartBeats()).inBeats());
1776
1777 if (newB > BeatDuration() && newB < b)
1778 {
1779 times.add (getTimeOfRelativeBeat (newB));
1780 index.add (i);
1781 }
1782 }
1783
1784 while (b < clipLen)
1785 {
1786 for (int i = 0; i < beats.size(); ++i)
1787 {
1788 auto newB = BeatDuration::fromBeats (beats[i].inSeconds() + (toPosition (b) - getLoopStartBeats()).inBeats());
1789
1790 if (newB >= b && newB < b + loopLen)
1791 {
1792 times.add (getTimeOfRelativeBeat (newB));
1793 index.add (i);
1794 }
1795 }
1796
1797 b = b + loopLen;
1798 }
1799 }
1800 else
1801 {
1802 for (int i = 0; i < beats.size(); ++i)
1803 {
1804 auto newT = getTimeOfRelativeBeat (BeatPosition::fromBeats (beats[i].inSeconds()) - toPosition (getOffsetInBeats()));
1805
1806 if (newT >= TimePosition())
1807 {
1808 times.add (newT);
1809 index.add (i);
1810 }
1811 }
1812 }
1813 }
1814 else
1815 {
1816 if (isLooping())
1817 {
1818 auto origTimes = sourceItem->getMarkedPoints();
1819
1820 for (int i = origTimes.size(); --i >= 0;)
1821 origTimes.set (i, origTimes[i] / speedRatio - getPosition().getOffset() - toDuration (getLoopStart()));
1822
1823 const auto loopLen = getLoopLength();
1824 const auto clipLen = getPosition().getLength();
1825 auto t = loopLen - getPosition().getOffset();
1826
1827 for (int i = 0; i < origTimes.size(); ++i)
1828 {
1829 if (origTimes[i] >= TimePosition() && origTimes[i] < toPosition (t))
1830 {
1831 times.add(origTimes[i]);
1832 index.add(i);
1833 }
1834 }
1835
1836 while (t < clipLen)
1837 {
1838 for (int i = 0; i < origTimes.size(); ++i)
1839 {
1840 auto newT = toDuration (origTimes[i] + t + getPosition().getOffset());
1841
1842 if (newT >= t && newT < t + loopLen)
1843 {
1844 times.add(toPosition (newT));
1845 index.add(i);
1846 }
1847 }
1848
1849 t = t + loopLen;
1850 }
1851 }
1852 else
1853 {
1854 times = sourceItem->getMarkedPoints();
1855
1856 for (int i = 0; i < times.size(); ++i)
1857 {
1858 times.set(i, times[i] / speedRatio - getPosition().getOffset());
1859 index.add(i);
1860 }
1861 }
1862 }
1863 }
1864}
1865
1866//==============================================================================
1867bool AudioClipBase::isUsingFile (const AudioFile& af)
1868{
1869 if (getPlaybackFile() == af || getAudioFile() == af)
1870 return true;
1871
1872 if (clipEffects != nullptr)
1873 return clipEffects->isUsingFile (af);
1874
1875 return false;
1876}
1877
1878void AudioClipBase::setUsesProxy (bool canUseProxy) noexcept
1879{
1880 proxyAllowed = canUseProxy;
1881 stopTimer();
1882}
1883
1884bool AudioClipBase::usesTimeStretchedProxy() const
1885{
1886 if (! proxyAllowed)
1887 return false;
1888
1889 return getAutoTempo() || getAutoPitch()
1890 || getPitchChange() != 0.0f
1891 || isUsingMelodyne()
1892 || (std::abs (getSpeedRatio() - 1.0) > 0.00001
1893 && TimeStretcher::canProcessFor (timeStretchMode));
1894}
1895
1898
1899AudioFile AudioClipBase::getProxyFileToCreate (bool renderTimestretched)
1900{
1901 if (renderTimestretched)
1902 return TemporaryFileManager::getFileForCachedClipRender (*this, getProxyHash());
1903
1904 return TemporaryFileManager::getFileForCachedFileRender (edit, getHash());
1905}
1906
1907//==============================================================================
1909{
1910 static constexpr int maxNumChannels = 8;
1911
1912 StretchSegment (Engine& engine, const AudioFile& file,
1914 double sampleRate, const AudioSegmentList::Segment& s)
1915 : segment (s),
1916 fileInfo (file.getInfo()),
1917 crossfadeSamples ((int) tracktion::toSamples (info.audioSegmentList->getCrossfadeLength(), sampleRate)),
1918 numChannelsToUse (juce::jlimit (1, maxNumChannels, fileInfo.numChannels))
1919 {
1921 reader = engine.getAudioFileManager().cache.createReader (file);
1922
1923 if (reader != nullptr)
1924 {
1925 auto sampleRange = segment.getSampleRange();
1926
1927 if (segment.isFollowedBySilence())
1928 {
1929 reader->setReadPosition (sampleRange.getStart());
1930 }
1931 else
1932 {
1933 reader->setLoopRange (sampleRange);
1934 reader->setReadPosition (0);
1935 }
1936
1937 timestretcher.initialise (fileInfo.sampleRate, outputBufferSize, numChannelsToUse,
1938 info.mode, info.options, false);
1939 jassert (timestretcher.isInitialised()); // Have you enabled a TimeStretcher mode?
1940
1941 timestretcher.setSpeedAndPitch ((float) (1.0 / segment.getStretchRatio()),
1942 segment.getTranspose());
1943 }
1944 }
1945
1946 void renderNextBlock (juce::AudioBuffer<float>& buffer, TimeRange editTime, int numSamples)
1947 {
1948 if (reader == nullptr)
1949 return;
1950
1952
1953 auto loopRange = segment.getRange();
1954
1955 if (! editTime.overlaps (loopRange))
1956 return;
1957
1958 int start = 0;
1959
1960 if (loopRange.getEnd() < editTime.getEnd())
1961 numSamples = std::max (0, (int) (numSamples * (loopRange.getEnd() - editTime.getStart()).inSeconds()
1962 / editTime.getLength().inSeconds()));
1963
1964 if (loopRange.getStart() > editTime.getStart())
1965 {
1966 auto skip = juce::jlimit (0, numSamples, (int) (numSamples * (loopRange.getStart() - editTime.getStart()).inSeconds() / editTime.getLength().inSeconds()));
1967 start += skip;
1968 numSamples -= skip;
1969 }
1970
1971 while (numSamples > 0)
1972 {
1973 auto numReady = std::min (numSamples, readySamplesEnd - readySamplesStart);
1974
1975 if (numReady > 0)
1976 {
1977 for (int i = 0; i < buffer.getNumChannels(); ++i)
1978 buffer.addFrom (i, start, fifo,
1979 std::min (i, fifo.getNumChannels() - 1),
1980 readySamplesStart, numReady);
1981
1982 readySamplesStart += numReady;
1983 start += numReady;
1984 numSamples -= numReady;
1985 }
1986 else
1987 {
1988 auto blockSize = fillNextBlock();
1989 renderFades (blockSize);
1990
1991 readySampleOutputPos += blockSize;
1992 }
1993 }
1994 }
1995
1996 int fillNextBlock()
1997 {
1999 float* outs[maxNumChannels] = {};
2000
2001 for (int i = 0; i < numChannelsToUse; ++i)
2002 outs[i] = fifo.getWritePointer (i);
2003
2004 const int needed = timestretcher.getFramesNeeded();
2005 int numRead = 0;
2006
2007 if (needed >= 0)
2008 {
2009 AudioScratchBuffer scratch (numChannelsToUse, needed);
2010 scratch.buffer.clear();
2011 auto bufferChannels = juce::AudioChannelSet::canonicalChannelSet (numChannelsToUse);
2012 auto sourceChannelsToUse = bufferChannels;
2013
2014 if (needed > 0)
2015 {
2016 #if JUCE_DEBUG
2017 jassert (reader->readSamples (needed, scratch.buffer, bufferChannels, 0, sourceChannelsToUse, 5000));
2018 #else
2019 reader->readSamples (needed, scratch.buffer, bufferChannels, 0, sourceChannelsToUse, 5000);
2020 #endif
2021 }
2022
2023 const float* ins[maxNumChannels] = {};
2024
2025 for (int i = 0; i < numChannelsToUse; ++i)
2026 ins[i] = scratch.buffer.getReadPointer (i);
2027
2028 numRead = timestretcher.processData (ins, needed, outs);
2029 }
2030 else
2031 {
2032 jassert (needed == -1);
2033 numRead = timestretcher.flush (outs);
2034 }
2035
2036 readySamplesStart = 0;
2037 readySamplesEnd = numRead;
2038
2039 return numRead;
2040 }
2041
2042 void renderFades (int numSamples)
2043 {
2045 auto renderedEnd = readySampleOutputPos + numSamples;
2046
2047 if (segment.hasFadeIn())
2048 if (readySampleOutputPos < crossfadeSamples)
2049 renderFade (0, crossfadeSamples, false, numSamples);
2050
2051 if (segment.hasFadeOut())
2052 {
2053 auto fadeOutStart = (SampleCount) (segment.getSampleRange().getLength() / segment.getStretchRatio()) - crossfadeSamples;
2054
2055 if (renderedEnd > fadeOutStart)
2056 renderFade (fadeOutStart, fadeOutStart + crossfadeSamples + 2, true, numSamples);
2057 }
2058 }
2059
2060 void renderFade (SampleCount start, SampleCount end, bool isFadeOut, int numSamples)
2061 {
2062 float alpha1 = 0.0f, alpha2 = 1.0f;
2063 auto renderedEnd = readySampleOutputPos + numSamples;
2064
2065 if (end > renderedEnd)
2066 {
2067 alpha2 = (renderedEnd - start) / (float) (end - start);
2068 end = renderedEnd;
2069 }
2070
2071 if (start < readySampleOutputPos)
2072 {
2073 alpha1 = alpha2 * (readySampleOutputPos - start) / (float) (end - start);
2074 start = readySampleOutputPos;
2075 }
2076
2077 if (end > start)
2078 {
2079 if (isFadeOut)
2080 {
2081 alpha1 = 1.0f - alpha1;
2082 alpha2 = 1.0f - alpha2;
2083 }
2084
2085 AudioFadeCurve::applyCrossfadeSection (fifo,
2086 (int) (start - readySampleOutputPos),
2087 (int) (end - start),
2088 AudioFadeCurve::convex, alpha1, alpha2);
2089 }
2090 }
2091
2092 const AudioSegmentList::Segment& segment;
2093 TimeStretcher timestretcher;
2094
2095 AudioFileInfo fileInfo;
2097
2098 const int outputBufferSize = 1024;
2099 int readySamplesStart = 0, readySamplesEnd = 0;
2100 SampleCount readySampleOutputPos = 0;
2101 const int crossfadeSamples, numChannelsToUse;
2102 juce::AudioBuffer<float> fifo { numChannelsToUse, outputBufferSize };
2103
2105};
2106
2107//==============================================================================
2108std::unique_ptr<AudioClipBase::ProxyRenderingInfo> AudioClipBase::createProxyRenderingInfo()
2109{
2111 p->audioSegmentList = AudioSegmentList::create (*this, true, true);
2112 p->clipTime = getEditTimeRange();
2113 p->speedRatio = getSpeedRatio();
2114 p->mode = (timeStretchMode != TimeStretcher::disabled && timeStretchMode != TimeStretcher::melodyne)
2115 ? timeStretchMode
2116 : TimeStretcher::defaultMode;
2117 p->options = elastiqueProOptions;
2118
2119 return p;
2120}
2121
2123 juce::ThreadPoolJob* const& job, std::atomic<float>& progress) const
2124{
2126
2127 if (audioSegmentList->getSegments().isEmpty() || ! sourceFile.isValid())
2128 return false;
2129
2131
2132 auto sampleRate = sourceFile.getSampleRate();
2133
2134 for (auto& segment : audioSegmentList->getSegments())
2135 segments.add (new StretchSegment (engine, sourceFile, *this, sampleRate, segment));
2136
2137 const int samplesPerBlock = 1024;
2138 juce::AudioBuffer<float> buffer (sourceFile.getNumChannels(), samplesPerBlock);
2139 double time = 0.0;
2140
2141 auto numBlocks = 1 + (int) (clipTime.getLength().inSeconds() * sampleRate / samplesPerBlock);
2142
2143 for (int i = 0; i < numBlocks; ++i)
2144 {
2145 if (job != nullptr && job->shouldExit())
2146 return false;
2147
2148 buffer.clear();
2149
2150 auto endTime = time + samplesPerBlock / sampleRate;
2151 const auto editTime = TimeRange (TimePosition::fromSeconds (time), TimePosition::fromSeconds (endTime));
2152 time = endTime;
2153
2154 for (auto s : segments)
2155 s->renderNextBlock (buffer, editTime, samplesPerBlock);
2156
2157 if (! writer.appendBuffer (buffer, samplesPerBlock))
2158 return false;
2159
2160 progress = i / (float) numBlocks;
2161 }
2162
2163 return true;
2164}
2165
2166AudioFile AudioClipBase::getPlaybackFile()
2167{
2168 // this needs to return the same file right from the first call, if it's a rendered file then obviously it won't exist but we need to return it anyway
2169 const AudioFile af (getAudioFile());
2170
2171 if (canUseProxy() && ! af.isNull())
2172 {
2173 const bool timestretched = usesTimeStretchedProxy();
2174
2175 if (timestretched || af.getInfo().needsCachedProxy)
2176 return getProxyFileToCreate (timestretched);
2177 }
2178
2179 return af;
2180}
2181
2182AudioFileInfo AudioClipBase::getWaveInfo()
2183{
2184 // if the source needs to render we'll just have to bodge return a WaveInfo so the AudioSegmentList gives us a consistent hash
2185 // this is of course a massive hack because it assumes that the rendered file will have the same sample rate etc.
2186
2187 if (needsRender())
2188 if (auto sourceItem = sourceFileReference.getSourceProjectItem())
2189 return AudioFile (edit.engine, sourceItem->getSourceFile()).getInfo();
2190
2191 return getAudioFile().getInfo();
2192}
2193
2194HashCode AudioClipBase::getProxyHash()
2195{
2197
2198 auto clipPos = getPosition();
2199
2200 HashCode hash = getHash()
2201 ^ static_cast<HashCode> (timeStretchMode.get())
2202 ^ elastiqueProOptions.get().toString().hashCode64()
2203 ^ (7342847 * static_cast<HashCode> (pitchChange * 199.0))
2204 ^ static_cast<HashCode> (clipPos.getLength().inSeconds() * 10005.0)
2205 ^ static_cast<HashCode> (clipPos.getOffset().inSeconds() * 9997.0)
2206 ^ static_cast<HashCode> (getLoopStart().inSeconds() * 8971.0)
2207 ^ static_cast<HashCode> (getLoopLength().inSeconds() * 7733.0)
2208 ^ static_cast<HashCode> (getSpeedRatio() * 877.0);
2209
2210 auto needsPlainStretch = [&]() { return std::abs (getSpeedRatio() - 1.0) > 0.00001 || (getPitchChange() != 0.0f); };
2211
2212 if (getAutoTempo() || getAutoPitch() || needsPlainStretch())
2213 {
2214 auto& segmentList = getAudioSegmentList();
2215 int i = 0;
2216
2217 for (auto& segment : segmentList.getSegments())
2218 hash ^= static_cast<HashCode> (segment.getHashCode() * (i++ + 0.1));
2219 }
2220
2221 return hash;
2222}
2223
2224void AudioClipBase::beginRenderingNewProxyIfNeeded()
2225{
2226 if (! canUseProxy())
2227 return;
2228
2229 if (isTimerRunning())
2230 {
2231 startTimer (1);
2232 return;
2233 }
2234
2235 const AudioFile playFile (getPlaybackFile());
2236
2237 if (playFile.isNull())
2238 return;
2239
2240 auto original = getAudioFile();
2241
2242 if (shouldAttemptRender() && ! original.isValid())
2244
2245 if (usesTimeStretchedProxy() || original.getInfo().needsCachedProxy)
2246 if (playFile.getSampleRate() <= 0.0)
2248}
2249
2250//==============================================================================
2251void AudioClipBase::jobFinished (RenderManager::Job& job, bool completedOk)
2252{
2253 TRACKTION_ASSERT_MESSAGE_THREAD
2254
2255 if (&job == renderJob.get())
2256 {
2257 lastRenderJobFailed = ! completedOk;
2258 renderJob->removeListener (this);
2259 renderJob = nullptr;
2260
2261 renderComplete();
2262 }
2263}
2264
2265//==============================================================================
2266void AudioClipBase::createNewProxyAsync()
2267{
2268 if (canUseProxy())
2269 startTimer (600);
2270}
2271
2272void AudioClipBase::cancelCurrentRender()
2273{
2274 if (renderJob != nullptr)
2275 renderJob->cancelJob();
2276}
2277
2279{
2280 if (edit.isLoading()
2281 || ! edit.getTransport().isAllowedToReallocate())
2282 return;
2283
2284 // if the source file hasn't been rendered yet we need to delay this
2285 if (shouldAttemptRender())
2286 {
2287 updateSourceFile();
2288
2289 if (! getAudioFile().isValid())
2290 {
2291 createNewProxyAsync();
2292 return;
2293 }
2294 }
2295
2296 stopTimer();
2297
2298 if (! canUseProxy())
2299 return;
2300
2301 const bool isTimeStretched = usesTimeStretchedProxy();
2302
2303 const AudioFile originalFile (getAudioFile());
2304 const AudioFile newProxy (getPlaybackFile());
2305
2306 const bool proxyChanged = lastProxy != newProxy;
2307
2308 if (proxyChanged || ! newProxy.getFile().exists())
2309 {
2310 if (proxyChanged
2311 && lastProxy != originalFile
2312 && lastProxy.getFile().isAChildOf (edit.getTempDirectory (false))
2313 && ! edit.areAnyClipsUsingFile (lastProxy))
2314 edit.engine.getAudioFileManager().proxyGenerator.deleteProxy (lastProxy);
2315
2316 lastProxy = newProxy;
2317
2318 if (isTimeStretched || newProxy != originalFile)
2319 {
2320 edit.engine.getAudioFileManager().proxyGenerator
2321 .beginJob (new ProxyGeneratorJob (getAudioFile(), newProxy, *this, isTimeStretched));
2322 }
2323
2324 if (proxyChanged || newProxy.getFile().exists())
2325 {
2326 Selectable::changed(); // force update of waveforms
2327 edit.restartPlayback();
2328 }
2329 }
2330}
2331
2333{
2334 if (tree == state)
2335 {
2336 if (id == IDs::fadeInType || id == IDs::fadeOutType
2337 || id == IDs::fadeInBehaviour || id == IDs::fadeOutBehaviour
2338 || id == IDs::fadeIn || id == IDs::fadeOut
2339 || id == IDs::loopStart || id == IDs::loopLength
2340 || id == IDs::loopStartBeats || id == IDs::loopLengthBeats
2341 || id == IDs::transpose || id == IDs::pitchChange
2342 || id == IDs::elastiqueMode || id == IDs::autoPitch
2343 || id == IDs::elastiqueOptions || id == IDs::warpTime
2344 || id == IDs::effectsVisible || id == IDs::autoPitchMode
2345 || id == IDs::resamplingQuality
2346 || id == IDs::launchQuantisation || id == IDs::useClipLaunchQuantisation)
2347 {
2348 if (id == IDs::warpTime)
2349 {
2350 warpTime.forceUpdateOfCachedValue();
2351
2352 if (! getWarpTime())
2353 {
2354 if (shouldAttemptRender())
2356 else
2358 }
2359 }
2360
2361 changed();
2362 }
2363 else if (id == IDs::gain)
2364 {
2365 changed();
2366 }
2367 else if (id == IDs::pan || id == IDs::mute
2368 || id == IDs::autoCrossfade)
2369 {
2370 changed();
2371
2372 if (id == IDs::mute)
2373 {
2374 if (auto track = getTrack())
2375 if (auto f = track->getParentFolderTrack())
2376 f->setDirtyClips();
2377 }
2378 else if (id == IDs::autoCrossfade)
2379 {
2381 }
2382 }
2383 else if (id == IDs::autoTempo)
2384 {
2385 if (! getUndoManager()->isPerformingUndoRedo())
2386 {
2387 autoTempo.forceUpdateOfCachedValue();
2388 updateAutoTempoState();
2389 }
2390 }
2391 else if (id == IDs::isReversed)
2392 {
2393 isReversed.forceUpdateOfCachedValue();
2394 updateReversedState();
2395 }
2396 else if (id == IDs::channels)
2397 {
2398 channels.forceUpdateOfCachedValue();
2399 updateLeftRightChannelActivenessFlags();
2400 changed();
2401 }
2402 else if (id == IDs::proxyAllowed)
2403 {
2404 propertiesChanged();
2405 }
2406 else
2407 {
2409 }
2410 }
2411 else if (tree.hasType (IDs::WARPMARKER))
2412 {
2413 if (id == IDs::warpTime || id == IDs::sourceTime)
2414 changed();
2415 }
2416 else if (tree.hasType (IDs::LOOPINFO))
2417 {
2418 if (isInitialised)
2419 changed();
2420 }
2421 else
2422 {
2424 }
2425}
2426
2428{
2429 if (parentState == state)
2430 {
2431 if (child.hasType (IDs::PLUGIN))
2432 Selectable::changed();
2433 else if (child.hasType (IDs::EFFECTS))
2434 updateClipEffectsState();
2435 else if (child.hasType (IDs::PATTERNGENERATOR))
2436 patternGenerator = std::make_unique<PatternGenerator> (*this, child);
2437 else if (child.hasType (IDs::LOOPINFO) && isInitialised)
2438 loopInfo.state = child;
2439 }
2440 else if (parentState.hasType (IDs::LOOPINFO) || child.hasType (IDs::WARPMARKER))
2441 {
2442 changed();
2443 }
2444 else
2445 {
2446 Clip::valueTreeChildAdded (parentState, child);
2447 }
2448}
2449
2450void AudioClipBase::valueTreeChildRemoved (juce::ValueTree& parentState, juce::ValueTree& child, int oldIndex)
2451{
2452 if (parentState == state)
2453 {
2454 if (child.hasType (IDs::PLUGIN))
2455 Selectable::changed();
2456 else if (child.hasType (IDs::EFFECTS))
2457 updateClipEffectsState();
2458 else if (child.hasType (IDs::PATTERNGENERATOR))
2459 patternGenerator = nullptr;
2460 else if (child.hasType (IDs::LOOPINFO) && isInitialised)
2461 copyValueTree (loopInfo.state, LoopInfo (edit.engine).state, nullptr); // Resets to default
2462 }
2463 else if (parentState.hasType (IDs::LOOPINFO) || child.hasType (IDs::WARPMARKER))
2464 {
2465 changed();
2466 }
2467 else
2468 {
2469 Clip::valueTreeChildRemoved (parentState, child, oldIndex);
2470 }
2471}
2472
2473void AudioClipBase::valueTreeChildOrderChanged (juce::ValueTree& parentState, int oldIndex, int newIndex)
2474{
2475 Clip::valueTreeChildOrderChanged (parentState, oldIndex, newIndex);
2476}
2477
2479{
2481
2483}
2484
2485void AudioClipBase::updateReversedState()
2486{
2488
2489 if (isReversed)
2491
2492 if (! getUndoManager()->isPerformingUndoRedo())
2494
2495 changed();
2496 SelectionManager::refreshAllPropertyPanels();
2497}
2498
2499void AudioClipBase::updateAutoTempoState()
2500{
2501 if (isLooping())
2502 {
2503 auto bps = edit.tempoSequence.getBeatsPerSecondAt (getPosition().getStart());
2504
2505 if (autoTempo)
2506 {
2507 // convert time based looping to beat based looping
2508 loopStartBeats = BeatPosition::fromBeats (loopStart.get().inSeconds() * bps);
2509 loopLengthBeats = BeatDuration::fromBeats (loopLength.get().inSeconds() * bps);
2510
2511 loopStart = 0_tp;
2512 loopLength = 0_td;
2513 }
2514 else
2515 {
2516 // convert beat based looping to time based looping
2517 loopStart = TimePosition::fromSeconds (loopStartBeats.get().inBeats() / bps);
2518 loopLength = TimeDuration::fromSeconds (loopLengthBeats.get().inBeats() / bps);
2519
2520 loopStartBeats = 0_bp;
2521 loopLengthBeats = 0_bd;
2522 }
2523
2524 changed();
2525 }
2526}
2527
2528void AudioClipBase::updateClipEffectsState()
2529{
2530 auto v = state.getChildWithName (IDs::EFFECTS);
2531
2532 if (v.isValid() && canHaveEffects())
2533 {
2534 if (clipEffects == nullptr)
2535 {
2536 clipEffects = std::make_unique<ClipEffects> (v, *this);
2537 changed();
2538 }
2539 }
2540 else if (clipEffects != nullptr)
2541 {
2542 clipEffects = nullptr;
2543
2544 if (auto sourceItem = sourceFileReference.getSourceProjectItem())
2545 setCurrentSourceFile (sourceItem->getSourceFile());
2546
2547 changed();
2548 }
2549
2550 markAsDirty();
2551}
2552
2553}} // namespace tracktion { inline namespace engine
int size() const noexcept
Type * getWritePointer(int channelNumber) noexcept
int getNumChannels() const noexcept
void clear() noexcept
void addFrom(int destChannel, int destStartSample, const AudioBuffer &source, int sourceChannel, int sourceStartSample, int numSamples, Type gainToApplyToSource=Type(1)) noexcept
const Type * getReadPointer(int channelNumber) const noexcept
int size() const noexcept
static AudioChannelSet JUCE_CALLTYPE disabled()
static AudioChannelSet JUCE_CALLTYPE stereo()
void addChannel(ChannelType newChannelType)
String getSpeakerArrangementAsString() const
static AudioChannelSet JUCE_CALLTYPE canonicalChannelSet(int numChannels)
int getChannelIndexForType(ChannelType type) const noexcept
void forceUpdateOfCachedValue()
void setValue(const Type &newValue, UndoManager *undoManagerToUse)
void referTo(ValueTree &tree, const Identifier &property, UndoManager *um)
Type get() const noexcept
Colour withHue(float newHue) const noexcept
String getFileExtension() const
File getSiblingFile(StringRef siblingFileName) const
File withFileExtension(StringRef newExtension) const
bool isAChildOf(const File &potentialParentDirectory) const
bool isValid() const noexcept
ObjectClass * add(ObjectClass *newObject)
int64 nextInt64() noexcept
static constexpr Range between(const ValueType position1, const ValueType position2) noexcept
constexpr ValueType getLength() const noexcept
ReferencedType * get() const noexcept
void add(String stringToAdd)
bool isEmpty() const noexcept
static String toHexString(IntegerType number)
int64 getLargeIntValue() const noexcept
bool isNotEmpty() const noexcept
bool shouldExit() const noexcept
void stopTimer() noexcept
bool isTimerRunning() const noexcept
void startTimer(int intervalInMilliseconds) noexcept
virtual void timerCallback()=0
virtual void valueTreeChildRemoved(ValueTree &parentTree, ValueTree &childWhichHasBeenRemoved, int indexFromWhichChildWasRemoved)
virtual void valueTreeChildOrderChanged(ValueTree &parentTreeWhoseChildrenHaveMoved, int oldIndex, int newIndex)
virtual void valueTreeParentChanged(ValueTree &treeWhoseParentHasChanged)
virtual void valueTreePropertyChanged(ValueTree &treeWhosePropertyHasChanged, const Identifier &property)
virtual void valueTreeChildAdded(ValueTree &parentTree, ValueTree &childWhichHasBeenAdded)
bool hasType(const Identifier &typeName) const noexcept
void removeChild(const ValueTree &child, UndoManager *undoManager)
bool isValid() const noexcept
ValueTree & setProperty(const Identifier &name, const var &newValue, UndoManager *undoManager)
void addChild(const ValueTree &child, int index, UndoManager *undoManager)
const var & getProperty(const Identifier &name) const noexcept
ValueTree getChildWithName(const Identifier &type) const
ValueTree getOrCreateChildWithName(const Identifier &type, UndoManager *undoManager)
void removeProperty(const Identifier &name, UndoManager *undoManager)
static const char *const bwavTimeReference
Performs a tempo detection task on a background thread.
TempoDetectTask(Engine &e, const juce::File &file)
Creates a task for a given file.
float getBpm()
Returns the bpm after a successful detection.
bool isResultSensible()
Returns true if the result was within a sensible range.
JobStatus runJob() override
Performs the actual detection.
float getCurrentTaskProgress() override
Returns the current progress.
Base class for Clips that produce some kind of audio e.g.
@ gainFade
Fade is a volume/gain ramp.
virtual bool needsRender() const
Subclasses should override this to return true if they need the rest of the render callbacks.
std::unique_ptr< ProxyRenderingInfo > createProxyRenderingInfo()
Creates a ProxyRenderingInfo object to decribe the stretch segements of this clip.
void reverseLoopPoints()
Reverses the loop points to expose the same section of the source file but reversed.
virtual juce::File getOriginalFile() const =0
Must return the file that the source ProjectItemID refers to.
TimeDuration getFadeOut() const
Returns the fade out duration in seconds.
virtual TimeDuration getSourceLength() const =0
Must return the length in seconds of the source material e.g.
virtual bool canHaveEffects() const
Returns true if this clip can have ClipEffects added to it.
LoopInfo autoDetectBeatMarkers(const LoopInfo &current, bool autoBeat, float sensitivity) const
Scans the current source file for any beats and adds them to the LoopInfo returned.
void updateSourceFile()
Checks the current source file to see if it's up to date and then triggers a source render if needed.
virtual void renderComplete()
Callback to indicate that the render has completed.
juce::String canAddClipPlugin(const Plugin::Ptr &) const
Returns an empty string if this plugin can be added, otherwise an error message due to the clip plugi...
bool isRightChannelActive() const
Returns whether the right channel of the clip is enabled.
const AudioSegmentList & getAudioSegmentList()
Returns an AudioSegmentList describing this file if it is using auto-tempo.
juce::Colour getDefaultColour() const override
Returns the default colour for this clip.
void setLeftChannelActive(bool)
Enables the left channel of the clip.
void setGainDB(float dB)
Sets the gain of the clip in dB.
TimeDuration getFadeIn() const
Returns the fade in duration in seconds.
AudioClipBase * getOverlappingClip(ClipDirection) const
Returns the previous/next overlapping clip if one exists.
void updateAutoCrossfadesAsync(bool updateOverlapped)
Triggers an update of the auto-crossfades.
void setRightChannelActive(bool)
Enables the right channel of the clip.
void setPan(float pan)
Sets the pan of the clip.
virtual AudioFile getAudioFile() const
Returns the file used to play back the source and will get proxied etc.
TimeStretcher::Mode getActualTimeStretchMode() const noexcept
Returns the time-stretch mode that is in use.
bool setFadeIn(TimeDuration length)
Sets the fade in duration in seconds.
void disableLooping() override
Disables all looping.
virtual HashCode getHash() const =0
Must return a unique hash for this clip's source.
void setFadeInType(AudioFadeCurve::Type)
Sets the curve shape for the fade in to use.
void flushStateToValueTree() override
Can be overridden to ensure any state (e.g.
AudioFile getProxyFileToCreate(bool renderTimestretched)
Returns the AudioFile to create to play this clip back.
virtual void setLoopDefaults()=0
Override this to fill in the LoopInfo structure as best fits the source.
void setLoopRange(TimeRange) override
Sets the loop range the clip should use in seconds.
float getPitchChange() const
Returns the number of semitones to transpose the clip by.
bool getWarpTime() const
Returns true if warp time is enabled.
virtual juce::Array< ProjectItemID > getTakes() const
Returns the ProjectItemID of the clip's takes.
bool isLooping() const override
Returns true if this clip is currently looping.
bool getAutoTempo() const
Returns true if auto-tempo has been set.
bool isUsingMelodyne() const
Returns true if this clip is using Melodyne.
void setFadeOutType(AudioFadeCurve::Type newType)
Sets the curve shape for the fade out to use.
TimeRange getLoopRange() const
Returns the loop range in seconds.
void createNewProxyAsync()
Triggers a source or proxy render after a timeout.
void checkFadeLengthsForOverrun()
Trims the fade in out lengths to avoid any overlap between them.
TimeStretcher::Mode getTimeStretchMode() const noexcept
Returns the time-stretch mode that has been set.
virtual AudioFileInfo getWaveInfo()
Returns the WaveInfo for a clip.
AudioFadeCurve::Type getFadeOutType() const
Returns the curve shape for the fade out to use.
bool usesTimeStretchedProxy() const
Retuns true if this clip use a proxy file due to timestretching.
bool isLeftChannelActive() const
Returns whether the left channel of the clip is enabled.
void copyFadeToAutomation(bool fadeIn, bool removeClipFade)
Copies the fade in curve to a volume automation curve.
bool setFadeOut(TimeDuration length)
Sets the fade out duration in seconds.
void initialise() override
Initialises the Clip.
AudioFadeCurve::Type getFadeInType() const
Returns the curve shape for the fade in to use.
bool getAutoPitch() const
Returns true if auto-pitch has been set.
void markAsDirty()
Resets the dirty flag so that a new render will be attempted.
TimeDuration getMaximumLength() override
Returns the maximum length for this clip.
void setLoopInfo(const LoopInfo &)
Sets a LoopInfo to describe this clip's tempo, time sig etc.
void applyEdgeFades()
Sets the fade in/out lengths to be 0.03s to avoid any clicks at the start/end of th clip.
juce::ReferenceCountedObjectPtr< MelodyneFileReader > melodyneProxy
The MelodyneFileReader proxy if this clip is using Melodyne.
ClipDirection
Defines a prevous/next direction.
bool canUseProxy() const noexcept
Retuns true if this clip can use a proxy file.
bool canBeAddedTo(ClipOwner &) override
Tests whether this clip can go on the given parent.
HashCode getProxyHash()
Returns a hash identifying the proxy settings.
void changed() override
This should be called to send a change notification to any SelectableListeners that are registered wi...
void cloneFrom(Clip *) override
Clones the given clip to this clip.
AudioClipBase(const juce::ValueTree &, EditItemID, Type, ClipOwner &)
Creates a basic AudioClip.
void setUsesProxy(bool canUseProxy) noexcept
Can be used to disable proxy file generation for this clip.
AudioFile getPlaybackFile()
Returns the current AudioFile being used by the Clip, either the original source or a proxy.
PatternGenerator * getPatternGenerator() override
Returns the PatternGenerator for this clip if it has one.
void setAutoTempo(bool shouldUseAutoTempo)
Enables/disables auto-tempo.
juce::CachedValue< TimeStretcher::ElastiqueProOptions > elastiqueProOptions
The ElastiqueProOptions for fine tuning Elastique (if available).
Reader::Ptr createReader(const AudioFile &)
Creates a Reader to read an AudioFile.
Smart wrapper for writing to an audio file.
bool appendBuffer(juce::AudioBuffer< float > &buffer, int numSamples)
Appends an AudioBuffer to the file.
bool writeFromAudioReader(juce::AudioFormatReader &, SampleCount startSample, SampleCount numSamples)
Appends a block of samples to the file from an audio format reader.
bool isOpen() const noexcept
Returns true if the file is open and ready to write to.
An audio scratch buffer that has pooled storage.
juce::AudioBuffer< float > & buffer
The buffer to use.
Holds a list of audio regions for playback of things like warp time.
Base class for items that can contain clips.
A clip in an edit.
virtual void disableLooping()
Disables all looping.
virtual juce::Array< TimePosition > getRescaledMarkPoints() const
Returns the mark points relative to the start of the clip, rescaled to the current speed.
virtual TimePosition getLoopStart() const
Returns the start time of the loop start point.
virtual Plugin::Array getAllPlugins()
Returns all the plugins on the clip.
juce::Array< ReferencedItem > getReferencedItems() override
Returns an array of any ReferencedItem[s] e.g.
virtual bool beatBasedLooping() const
Returns true if this clip's looping is based on beats or false if absolute time.
virtual void sendMirrorUpdateToAllPlugins(Plugin &) const
Sends an update to all plugins mirroing the one passed in.
ClipTrack * getClipTrack() const
Returns the parent ClipTrack this clip is on (if any).
virtual juce::String getName() const override
Returns the name of the clip.
void changed() override
This should be called to send a change notification to any SelectableListeners that are registered wi...
void setOffset(TimeDuration newOffset)
Sets the offset of the clip, i.e.
virtual bool hasAnyTakes() const
Returns true if this clip has any takes.
virtual bool isLooping() const
Returns true if this clip is currently looping.
virtual void pitchTempoTrackChanged()
Called when there are pitch or tempo changes made which might require clips to adjust timing informat...
juce::ValueTree state
The ValueTree of the Clip state.
virtual void flushStateToValueTree()
Can be overridden to ensure any state (e.g.
virtual TimeDuration getMaximumLength()
Returns the maximum length this clip can have.
void setStart(TimePosition newStart, bool preserveSync, bool keepLength)
Sets the start time of the clip.
Track * getTrack() const override
Returns the parent Track this clip is on (if any).
void reassignReferencedItem(const ReferencedItem &, ProjectItemID, double) override
Should be implemented to change the underlying source to a new ProjectItemID.
virtual void initialise()
Initialises the Clip.
ClipOwner * getParent() const
Returns the parent ClipOwner this clip is on.
virtual void setLoopRange(TimeRange)
Sets the loop range the clip should use in seconds.
virtual std::shared_ptr< LaunchHandle > getLaunchHandle()
Some clip types can be launched, if that's possible, this returns a handle to trigger starting/stoppi...
virtual bool canLoop() const
Returns true if this clip is capable of looping.
virtual void cloneFrom(Clip *)
Clones the given clip to this clip.
void setLength(TimeDuration newLength, bool preserveSync)
Sets the length of the clip.
virtual BeatPosition getLoopStartBeats() const
Returns the beat position of the loop start point.
double getSpeedRatio() const noexcept
Returns the speed ratio i.e.
virtual void setNumberOfLoops(int)
Sets the clip looping a number of times.
virtual TimeDuration getLoopLength() const
Returns the length of loop in seconds.
virtual void setLoopRangeBeats(BeatRange)
Sets the loop range the clip should use in beats.
virtual void setSpeedRatio(double)
Sets a speed ratio i.e.
TimeRange getLoopRange() const
Returns the loop range in seconds.
juce::File getCurrentSourceFile() const
Returns the current source file, this is different to the SourceFileReference as it could be a tempor...
virtual FollowActions * getFollowActions()
Some clip types can be launched, if that's possible, this can be used to determine the action to perf...
virtual LaunchQuantisation * getLaunchQuantisation()
Some clip types can be launched, if that's possible, this returns a quantisation that can be used for...
juce::UndoManager * getUndoManager() const
Returns the UndoManager.
ClipPosition getPosition() const override
Returns the ClipPosition on the parent Track.
virtual bool addClipPlugin(const Plugin::Ptr &, SelectionManager &)
Adds a plugin to the clip.
void setCurrentSourceFile(const juce::File &)
Sets a new source file for this clip.
virtual BeatDuration getLoopLengthBeats() const
Returns the length of loop in beats.
void setPosition(ClipPosition newPosition)
Sets the position of the clip.
static TimeDuration getMaximumLength()
Returns the maximum length an Edit can be.
Engine & engine
A reference to the Engine.
The Engine is the central class for all tracktion sessions.
AudioFileFormatManager & getAudioFileFormatManager() const
Returns the AudioFileFormatManager that maintains a list of available audio file formats.
AudioFileManager & getAudioFileManager() const
Returns the AudioFileManager instance.
Holds tempo/beat information about an audio file.
SampleCount getOutMarker() const
Returns the sample number used as the end point in the file.
void setInMarker(SampleCount)
Sets the sample number to be used as the start point in the file.
void setBpm(double newBpm, const AudioFileInfo &)
Sets the tempo of the object.
void addLoopPoint(SampleCount, LoopPointType)
Adds a loop point at the given position.
bool isLoopable() const
Returns true if this can be looped.
LoopPoint getLoopPoint(int index) const
Returns the loop points at the given index.
int getRootNote() const
Returns the root note of the object.
void changeLoopPoint(int index, SampleCount, LoopPointType)
Sets the loop point at the given index to a new position and type.
int getNumLoopPoints() const
Returns the number of loop points in the object.
SampleCount getInMarker() const
Returns the sample number used as the start point in the file.
double getNumBeats() const
Returns the number of beats.
void setOutMarker(SampleCount)
Sets the sample number to be used as the end point in the file.
void deleteLoopPoint(int index)
Removes the loop point at the given index.
double getBeatsPerSecond(const AudioFileInfo &) const
Returns the tempo of the object.
The base class that all generator jobs derive from.
static AudioFile getAudioFileForHash(Engine &, const juce::File &directory, HashCode hash)
Returns the AudioFile for a particular hash.
virtual void changed()
This should be called to send a change notification to any SelectableListeners that are registered wi...
Manages a list of items that are currently selected.
void setToProjectFileReference(const juce::File &, bool updateProjectItem)
Points this source at a new file via a project item.
Uses the SoundTouch BPMDetect class to guess the tempo of some audio.
float finishAndDetect()
Completes the detection process and returns the BPM.
void processSection(juce::AudioBuffer< float > &buffer, int numSamplesToProcess)
Processes a non-interleaved buffer section.
void prepareForJobDeletion()
Call this in your sub-class destructor to to remvoe it from the manager queue before this class's des...
void setName(const juce::String &newName)
Sets the job's name but also updates the manager so the list will reflect it.
Handles time/pitch stretching using various supported libraries.
void initialise(double sourceSampleRate, int samplesPerBlock, int numChannels, Mode, ElastiqueProOptions, bool realtime)
Initialises the TimeStretcher ready to perform timestretching.
int getFramesNeeded() const
Returns the expected number of frames required to generate some output.
bool isInitialised() const
Returns true if this has been fully initialised.
int processData(const float *const *inChannels, int numSamples, float *const *outChannels)
Processes some input frames and fills some output frames with the applied speed ratio and pitch shift...
int flush(float *const *outChannels)
Flushes the end of the stream when input data is exhausted but there is still output data available.
static Mode checkModeIsAvailable(Mode)
Checks if the given mode is available for use.
bool setSpeedAndPitch(float speedRatio, float semitones)
Sets the timestretch speed ratio and semitones pitch shift.
Mode
Holds the various algorithms to which can be used (if enabled).
Type
Defines the types of item that can live on Track[s].
BeatPosition getStartBeat() const
Returns the start beat in the Edit of this item.
BeatDuration getLengthInBeats() const
Returns the duration in beats the of this item.
TimePosition getTimeOfRelativeBeat(BeatDuration) const
Returns an Edit time point for a given number of beats from the start of this item.
BeatDuration getOffsetInBeats() const
Returns an the offset of this item in beats.
TimeRange getEditTimeRange() const
Returns the time range of this item.
BeatPosition getBeatOfRelativeTime(TimeDuration) const
Returns an Edit beat point for a given number of seconds from the start of this item.
A WarpTimeManager contains a list of WarpMarkers and some source material and maps times from a linea...
T fmod(T... args)
T get(T... args)
T is_pointer_v
#define TRANS(stringLiteral)
#define jassert(expression)
#define JUCE_DECLARE_NON_COPYABLE_WITH_LEAK_DETECTOR(className)
#define jassertfalse
typedef int
typedef float
T max(T... args)
T min(T... args)
NewLine newLine
Type jlimit(Type lowerLimit, Type upperLimit, Type valueToConstrain) noexcept
void ignoreUnused(Types &&...) noexcept
bool canContainAudio(const ClipOwner &co)
Returns true if this Track can contain WaveAudioClip[s].
ResamplingQuality
Specifies a number of resampling qualities that can be used.
@ lagrange
Lagrange interpolation.
juce::AudioChannelSet channelSetFromSpeakerArrangmentString(const juce::String &arrangement)
Creates an AudioChannelSet from a list of abbreviated channel names.
constexpr TimeDuration toDuration(TimePosition)
Converts a TimePosition to a TimeDuration.
T ref(T... args)
T size(T... args)
Represents a duration in beats.
Represents a position in beats.
Represents a duration in real-life time.
constexpr double inSeconds() const
Returns the TimeDuration as a number of seconds.
Represents a position in real-life time.
constexpr double inSeconds() const
Returns the TimePosition as a number of seconds.
void updateAsync(int functionID)
Triggers an asyncronous call to one of the functions.
void handleUpdateNowIfNeeded()
If an update has been triggered and is pending, this will invoke it synchronously.
void addFunction(int functionID, const std::function< void()> &f)
Adds a function and associates a functionID with it.
Holds information about how to render a proxy for this clip.
bool render(Engine &, const AudioFile &, AudioFileWriter &, juce::ThreadPoolJob *const &, std::atomic< float > &progress) const
Renders this audio segment list to an AudioFile.
Type
A enumeration of the curve classes available.
static float alphaToGainForType(Type type, float alpha) noexcept
Converts an alpha position along the curve (0 to 1.0) into the gain at that point.
TimePosition getEnd() const
Returns the end time.
TimePosition getStart() const
Returns the start time.
TimeRange time
The TimeRange this ClipPosition occupies.
TimeDuration getOffset() const
Returns the offset.
TimeDuration getLength() const
Returns the length.
ID for objects of type EditElement - e.g.
Provides a thread-safe way to share a clip's levels with an audio engine without worrying about the C...
bool isAutomatic() const
Returns true if this is an automatic loop point.
time
times
#define CRASH_TRACER
This macro adds the current location to a stack which gets logged if a crash happens.