tracktion-engine 3.0-10-g034fdde4aa5
Tracktion Engine — High level data model for audio applications

« « « Anklang Documentation
Loading...
Searching...
No Matches
tracktion_WaveNode.cpp
Go to the documentation of this file.
1 /*
2 ,--. ,--. ,--. ,--.
3 ,-' '-.,--.--.,--,--.,---.| |,-.,-' '-.`--' ,---. ,--,--, Copyright 2024
4 '-. .-'| .--' ,-. | .--'| /'-. .-',--.| .-. || \ Tracktion Software
5 | | | | \ '-' \ `--.| \ \ | | | |' '-' '| || | Corporation
6 `---' `--' `--`--'`---'`--'`--' `---' `--' `---' `--''--' www.tracktion.com
7
8 Tracktion Engine uses a GPL/commercial licence - see LICENCE.md for details.
9*/
10
11#ifndef REPLACE_ELASTIQUE_WITH_DIRECT_MODE
12 #define REPLACE_ELASTIQUE_WITH_DIRECT_MODE 1
13#endif
14
15namespace tracktion { inline namespace engine
16{
17
18//==============================================================================
19//==============================================================================
20namespace utils
21{
22 inline void zeroSamplesOutsideClipRange (choc::buffer::ChannelArrayView<float> buffer,
23 BeatRange editBeatRange,
24 BeatRange clipBeatRange)
25 {
26 if (editBeatRange.isEmpty())
27 return;
28
29 using choc::buffer::FrameCount;
30 const auto beatsToClearAtStart = clipBeatRange.getStart() - editBeatRange.getStart();
31 const auto beatsToClearAtEnd = editBeatRange.getEnd() - clipBeatRange.getEnd();
32
33 const auto editBeatRangeLength = editBeatRange.getLength();
34 const auto numFrames = buffer.getNumFrames();
35
36 if (beatsToClearAtStart > 0_bd)
37 {
38 const auto numSamplesToClearAtStart = std::min (numFrames,
39 static_cast<FrameCount> (numFrames * (beatsToClearAtStart / editBeatRangeLength) + 0.5));
40
41 if (numSamplesToClearAtStart > 0)
42 buffer.getStart (numSamplesToClearAtStart).clear();
43 }
44
45 if (beatsToClearAtEnd > 0_bd)
46 {
47 const auto numSamplesToClearAtEnd = std::min (numFrames,
48 static_cast<FrameCount> (numFrames * (beatsToClearAtEnd / editBeatRangeLength) + 0.5));
49
50 if (numSamplesToClearAtEnd)
51 buffer.getEnd (numSamplesToClearAtEnd).clear();
52 }
53 }
54
55 inline TimeStretcher::Mode replaceElastiqueWithDirectMode (TimeStretcher::Mode m)
56 {
57 #if REPLACE_ELASTIQUE_WITH_DIRECT_MODE
61 #endif
62
63 return m;
64 }
65
66 inline TimeStretcher::Mode replaceElastiqueWithDirectModeIfNotRendering (TimeStretcher::Mode m, bool isRendering)
67 {
68 return isRendering ? m: replaceElastiqueWithDirectMode (m);
69 }
70}
71
72//==============================================================================
73//==============================================================================
75{
76public:
78 const juce::AudioChannelSet& destBufferChannels,
79 const juce::AudioChannelSet& sourceBufferChannels)
80 : reader (std::move (ptr)), timeoutMs ((int) std::lround (timeout.inSeconds() * 1000.0)),
81 destChannelSet (destBufferChannels), sourceChannelSet (sourceBufferChannels)
82 {
83 }
84
85 choc::buffer::ChannelCount getNumChannels() override
86 {
87 return numChannels;
88 }
89
90 SampleCount getPosition() override
91 {
92 return reader->getReadPosition();
93 }
94
95 void setPosition (SampleCount t) override
96 {
97 reader->setReadPosition (t);
98 }
99
100 void setPosition (TimePosition t) override
101 {
102 setPosition (toSamples (t, getSampleRate()));
103 }
104
105 void setLoopRange (TimeRange loopRange)
106 {
107 reader->setLoopRange (toSamples (loopRange, getSampleRate()));
108 }
109
110 void reset() override
111 {}
112
113 double getSampleRate() override
114 {
115 return reader->getSampleRate();
116 }
117
118 bool readSamples (choc::buffer::ChannelArrayView<float>& destBuffer) override
119 {
120 auto buffer = toAudioBuffer (destBuffer);
121 return reader->readSamples ((int) destBuffer.getNumFrames(),
122 buffer,
123 destChannelSet,
124 0,
125 sourceChannelSet,
126 timeoutMs);
127 }
128
130 int timeoutMs;
131 const juce::AudioChannelSet destChannelSet;
132 const juce::AudioChannelSet sourceChannelSet;
133 const choc::buffer::ChannelCount numChannels { static_cast<choc::buffer::ChannelCount> (destChannelSet.size()) };
134};
135
136//==============================================================================
138{
139public:
141 : source (std::move (input))
142 {
143 }
144
145 choc::buffer::ChannelCount getNumChannels() override { return source->getNumChannels(); }
146 SampleCount getPosition() override { return source->getPosition(); }
147 void setPosition (SampleCount t) override { source->setPosition (t); }
148 void setPosition (TimePosition t) override { source->setPosition (t); }
149 void reset() override { source->reset(); }
150 double getSampleRate() override { return source->getSampleRate(); }
151
152 bool readSamples (choc::buffer::ChannelArrayView<float>& destBuffer) override
153 {
154 return source->readSamples (destBuffer);
155 }
156
158};
159
160//==============================================================================
162{
163public:
165 : SingleInputAudioReader (std::move (input)), loopRange (loopRangeToUse)
166 {
167 }
168
169 LoopReader (std::unique_ptr<AudioReader> input, TimeRange loopRangeToUse)
170 : SingleInputAudioReader (std::move (input)), loopRange (toSamples (loopRangeToUse, source->getSampleRate()))
171 {
172 }
173
174 void setPosition (SampleCount t) override
175 {
176 const auto loopStart = loopRange.getStart();
177 const auto loopLength = loopRange.getLength();
178
179 if (loopLength > 0)
180 {
181 if (t >= 0)
182 t = loopStart + (t % loopLength);
183 else
184 t = loopStart + juce::negativeAwareModulo (t, loopLength);
185 }
186
187 source->setPosition (t);
188 }
189
190 void setPosition (TimePosition t) override
191 {
192 setPosition (toSamples (t, getSampleRate()));
193 }
194
195 bool readSamples (choc::buffer::ChannelArrayView<float>& destBuffer) override
196 {
197 using choc::buffer::FrameCount;
198
199 const auto loopStart = loopRange.getStart();
200 const auto loopLength = loopRange.getLength();
201
202 if (loopLength == 0)
203 return source->readSamples (destBuffer);
204
205 const auto numFrames = static_cast<SampleCount> (destBuffer.getNumFrames());
206
207 auto readPos = source->getPosition();
208
209 if (readPos >= loopStart + loopLength)
210 readPos -= loopLength;
211
212 int numSamplesToDo = (int) numFrames;
213 SampleCount startOffsetInDestBuffer = 0;
214 bool allOk = true;
215
216 while (numSamplesToDo > 0)
217 {
218 jassert (juce::isPositiveAndBelow (readPos - loopStart, loopLength));
219
220 const auto numToRead = std::min ((SampleCount) numSamplesToDo, loopStart + loopLength - readPos);
221
222 source->setPosition (readPos);
223 auto destSubsection = destBuffer.getFrameRange ({ (FrameCount) startOffsetInDestBuffer, (FrameCount) (startOffsetInDestBuffer + numToRead) });
224 allOk = source->readSamples (destSubsection) && allOk;
225
226 readPos += numToRead;
227
228 if (readPos >= loopStart + loopLength)
229 readPos -= loopLength;
230
231 startOffsetInDestBuffer += numToRead;
232 numSamplesToDo -= (int) numToRead;
233 }
234
235 return allOk;
236 }
237
238 const SampleRange loopRange;
239};
240
241
242//==============================================================================
244{
245public:
247 : SingleInputAudioReader (std::move (input))
248 {
249 }
250
252 virtual void setSpeedRatio (double newSpeedRatio) = 0;
253
255 virtual void setGains (float leftGain, float rightGain) = 0;
256};
257
258
260{
261public:
262 LagrangeResamplerReader (std::unique_ptr<AudioReader> input, double sampleRateToConvertTo)
263 : ResamplerReader (std::move (input)), destSampleRate (sampleRateToConvertTo)
264 {
265 for (int i = (int) source->getNumChannels(); --i >= 0;)
266 {
267 resamplers.emplace_back();
268 resamplers.back().reset();
269 }
270 }
271
272 void setPosition (SampleCount t) override
273 {
274 setPosition (TimePosition::fromSamples (t, destSampleRate));
275 }
276
277 void setPosition (TimePosition t) override
278 {
279 source->setPosition (t);
280 }
281
283 void setSpeedRatio (double newSpeedRatio) override
284 {
285 assert (newSpeedRatio > 0);
286 speedRatio = newSpeedRatio;
287 }
288
290 void setGains (float leftGain, float rightGain) override
291 {
292 gains[0] = leftGain;
293 gains[1] = rightGain;
294 }
295
296 double getSampleRate() override
297 {
298 return destSampleRate;
299 }
300
301 bool readSamples (choc::buffer::ChannelArrayView<float>& destBuffer) override
302 {
303 const auto numChannels = destBuffer.getNumChannels();
304 assert (numChannels <= (choc::buffer::ChannelCount) resamplers.size());
305 assert (destBuffer.getNumChannels() == numChannels);
306
307 const auto ratio = sampleRatio * speedRatio;
308 const auto numFrames = destBuffer.getNumFrames();
309 const int numSourceFramesToRead = static_cast<int> ((numFrames * ratio) + 0.5);
310 AudioScratchBuffer fileData ((int) numChannels, numSourceFramesToRead);
311 auto fileDataView = toBufferView (fileData.buffer);
312 const bool ok = source->readSamples (fileDataView);
313
314 const auto resamplerRatio = static_cast<double> (numSourceFramesToRead) / numFrames;
315
316 for (choc::buffer::ChannelCount channel = 0; channel < numChannels; ++channel)
317 {
318 if (channel < (choc::buffer::ChannelCount) resamplers.size())
319 {
320 const auto src = fileData.buffer.getReadPointer ((int) channel);
321 const auto dest = destBuffer.getIterator (channel).sample;
322
323 auto& resampler = resamplers[(size_t) channel];
324 resampler.processAdding (resamplerRatio, src, dest, (int) numFrames, gains[channel & 1]);
325 }
326 else
327 {
328 destBuffer.getChannel (channel).clear();
329 }
330 }
331
332 return ok;
333 }
334
335 const double destSampleRate;
336 const double sourceSampleRate { source->getSampleRate() };
337 const double sampleRatio { sourceSampleRate / destSampleRate };
338 double speedRatio = 1.0;
340 float gains[2] = { 1.0f, 1.0f };
341};
342
343
345{
346public:
348 double sampleRateToConvertTo, ResamplingQuality resamplingQuality)
349 : ResamplerReader (std::move (input)),
350 numChannels ((int) source->getNumChannels()),
351 destSampleRate (sampleRateToConvertTo)
352 {
353 const int converterType = [&]
354 {
355 switch (resamplingQuality)
356 {
357 case ResamplingQuality::sincFast: return src::SRC_SINC_FASTEST;
358 case ResamplingQuality::sincMedium: return src::SRC_SINC_MEDIUM_QUALITY;
359 case ResamplingQuality::sincBest: return src::SRC_SINC_BEST_QUALITY;
360 case ResamplingQuality::lagrange: [[ fallthrough ]];
361 default: assert (false); return src::SRC_SINC_FASTEST;
362 }
363 }();
364
365 int error = 0;
366 src_state = src::src_callback_new (srcReadCallback,
367 converterType, numChannels, &error, this);
368 assert (error == 0);
369 }
370
372 {
373 src_delete (src_state);
374 }
375
376 SampleCount getPosition() override
377 {
378 return getReadPosition();
379 }
380
381 void setPosition (SampleCount t) override
382 {
383 if (std::abs (t - getReadPosition()) <= 1)
384 return;
385
386 readPosition = (double) t;
387
388 source->setPosition (TimePosition::fromSamples (t, destSampleRate));
389
390 src_reset (src_state);
391 }
392
393 void setPosition (TimePosition t) override
394 {
395 setPosition (toSamples (t, destSampleRate));
396 }
397
398 double getSampleRate() override
399 {
400 return destSampleRate;
401 }
402
404 void setSpeedRatio (double newSpeedRatio) override
405 {
406 assert (newSpeedRatio > 0);
407 speedRatio = newSpeedRatio;
408 }
409
411 void setGains (float leftGain, float rightGain) override
412 {
413 gains[0] = leftGain;
414 gains[1] = rightGain;
415 }
416
417 void reset() override
418 {
419 }
420
421 bool readSamples (choc::buffer::ChannelArrayView<float>& destBuffer) override
422 {
423 const auto numFramesToDo = destBuffer.getNumFrames();
424 choc::buffer::FrameCount startFrame = 0;
425 auto framesRemaining = numFramesToDo;
426
427 for (;;)
428 {
429 const auto numFramesThisChunk = std::min (framesRemaining, chunkSize);
430
431 if (! readChunk (destBuffer.getFrameRange ({ startFrame, startFrame + numFramesThisChunk })))
432 return false;
433
434 startFrame += numFramesThisChunk;
435 framesRemaining -= numFramesThisChunk;
436
437 if (startFrame == numFramesToDo)
438 break;
439 }
440
441 if (gains[0] != 1.0f || gains[1] != 1.0f)
442 {
443 choc::buffer::applyGain (destBuffer.getChannel (0), gains[0]);
444
445 if (destBuffer.getNumChannels() > 1)
446 choc::buffer::applyGain (destBuffer.getChannel (1), gains[1]);
447 }
448
449 return true;
450 }
451
452 static constexpr choc::buffer::FrameCount chunkSize = 256;
453 const int numChannels;
454 const double destSampleRate;
455 const double sourceSampleRate { source->getSampleRate() };
456 const double sampleRatio { sourceSampleRate / destSampleRate };
457
458 src::SRC_STATE* src_state = nullptr;
459
460 choc::buffer::ChannelArrayBuffer<float> scratchBuffer { choc::buffer::createChannelArrayBuffer (numChannels, chunkSize, [] { return 0.0f; }) };
461 choc::buffer::InterleavedBuffer<float> interleavedInputScratchBuffer { choc::buffer::createInterleavedBuffer (numChannels, chunkSize, [] { return 0.0f; }) };
462 choc::buffer::InterleavedBuffer<float> interleavedOutputScratchBuffer { interleavedInputScratchBuffer };
463
464 double speedRatio = 1.0, readPosition = 0.0;
465 float gains[2] = { 1.0f, 1.0f };
466 bool failedToRead = false;
467
468 SampleCount getReadPosition() const
469 {
470 return static_cast<SampleCount> (readPosition + 0.5);
471 }
472
473 static long srcReadCallback (void* data, float** destInterleavedSampleData)
474 {
475 return static_cast<HighQualityResamplerReader*> (data)->srcReadCallback (destInterleavedSampleData);
476 }
477
478 long srcReadCallback (float** destInterleavedSampleData)
479 {
480 const auto numFramesToRead = interleavedInputScratchBuffer.getSize().numFrames;
481
482 // Read data in to temp buffer
483 auto scratchView = scratchBuffer.getView();
484
485 // If the reader fails, we can't bail out or SRC will hang so just give it an empty buffer and pass the fail via a flag
486
487
488 if (failedToRead = ! source->readSamples (scratchView); failedToRead)
489 scratchView.clear();
490
491 // Interleave
492 choc::buffer::copy (interleavedInputScratchBuffer, scratchBuffer);
493 *destInterleavedSampleData = interleavedInputScratchBuffer.getView().data.data;
494
495 return static_cast<long> (numFramesToRead);
496 }
497
498 bool readChunk (const choc::buffer::ChannelArrayView<float>& destBuffer)
499 {
500 assert (destBuffer.getNumFrames() > 0);
501 assert (destBuffer.getNumFrames() <= chunkSize);
502 assert (numChannels == (int) destBuffer.getNumChannels());
503 const auto numFramesToDo = destBuffer.getNumFrames();
504 const auto ratio = sampleRatio * speedRatio;
505 assert (ratio > 0.0);
506
507 const auto numRead = src_callback_read (src_state, 1.0 / ratio,
508 static_cast<long> (numFramesToDo),
509 interleavedOutputScratchBuffer.getView().data.data);
510
511 if (failedToRead
512 || static_cast<decltype (numFramesToDo)> (numRead) != numFramesToDo)
513 {
514 destBuffer.clear();
515 return false;
516 }
517
518 choc::buffer::copy (destBuffer, interleavedOutputScratchBuffer.getStart (numFramesToDo));
519 readPosition += numFramesToDo;
520
521 return true;
522 }
523};
524
526
527{
528public:
530 : SingleInputAudioReader (std::move (input))
531 {
532 }
533
534 virtual ~TimeStretchReaderBase() = default;
535 virtual void setSpeed (double speedRatio) = 0;
536 virtual void setPitch (double semitones) = 0;
537};
538
540{
541public:
544 TimeStretcher::ElastiqueProOptions elastiqueProOptions)
545 : TimeStretchReaderBase (std::move (input)), numChannels ((int) source->getNumChannels())
546 {
547 timeStretcher.initialise (source->getSampleRate(), chunkSize, numChannels,
548 mode, elastiqueProOptions, true);
549 inputFifo.setSize (numChannels, timeStretcher.getMaxFramesNeeded());
550 outputFifo.setSize (numChannels, timeStretcher.getMaxFramesNeeded());
551
552 source->setPosition (getReadPosition());
553 timeStretcher.reset();
554 setSpeedAndPitch (playbackSpeedRatio, semitonesShift);
555 inputFifo.reset();
556 outputFifo.reset();
557 }
558
559 SampleCount getPosition() override
560 {
561 return getReadPosition();
562 }
563
564 void setPosition (SampleCount t) override
565 {
566 if (std::abs (t - getReadPosition()) <= 10)
567 return;
568
569 readPosition = (double) t;
570
571 source->setPosition (t);
572 timeStretcher.reset();
573 setSpeedAndPitch (playbackSpeedRatio, semitonesShift);
574 inputFifo.reset();
575 outputFifo.reset();
576 }
577
578 void setPosition (TimePosition t) override
579 {
580 setPosition (toSamples (t, getSampleRate()));
581 }
582
583 void reset() override
584 {
585 }
586
587 void setSpeed (double speedRatio) override
588 {
589 if (playbackSpeedRatio == speedRatio)
590 return;
591
592 playbackSpeedRatio = speedRatio;
593 setSpeedAndPitch (playbackSpeedRatio, semitonesShift);
594 }
595
596 void setPitch (double semitones) override
597 {
598 if (semitonesShift == semitones)
599 return;
600
601 semitonesShift = semitones;
602 setSpeedAndPitch (playbackSpeedRatio, semitonesShift);
603 }
604
605 void setSpeedAndPitch (double speedRatio, double semitones)
606 {
607 playbackSpeedRatio = speedRatio;
608 semitonesShift = semitones;
609
610 [[ maybe_unused ]] const bool ok = timeStretcher.setSpeedAndPitch ((float) (1.0 / speedRatio), (float) semitonesShift);
611 assert (ok);
612 }
613
614 bool readSamples (choc::buffer::ChannelArrayView<float>& destBuffer) override
615 {
616 assert (numChannels == (int) destBuffer.getNumChannels());
617 const auto numFramesToDo = destBuffer.getNumFrames();
618
619 for (;;)
620 {
621 // If there are enough output samples in the fifo, read them out
622 if (outputFifo.getNumReady() >= int (numFramesToDo))
623 {
624 auto destAudioBuffer = toAudioBuffer (destBuffer);
625 outputFifo.read (destAudioBuffer, 0);
626 break;
627 }
628
629 const auto numThisTime = timeStretcher.getFramesNeeded();
630
631 if (numThisTime > 0)
632 {
633 // Read samples from source and push to fifo
634 AudioScratchBuffer scratchBuffer (numChannels, numThisTime);
635 scratchBuffer.buffer.clear();
636 auto scratchView = toBufferView (scratchBuffer.buffer);
637
638 if (! source->readSamples (scratchView))
639 return false;
640
641 inputFifo.write (scratchBuffer.buffer);
642 }
643
644 // Push into time-stretcher and read output samples to out fifo
645 assert (inputFifo.getNumReady() >= numThisTime);
646 assert (outputFifo.getFreeSpace() >= numThisTime);
647 assert (outputFifo.getFreeSpace() >= chunkSize);
648 timeStretcher.processData (inputFifo, numThisTime, outputFifo);
649 }
650
651 readPosition += numFramesToDo * playbackSpeedRatio;
652
653 return true;
654 }
655
656 static constexpr int chunkSize = 256;
657 const int numChannels;
658 TimeStretcher timeStretcher;
659 AudioFifo inputFifo { numChannels, chunkSize }, outputFifo { numChannels, chunkSize };
660 double playbackSpeedRatio = 1.0, semitonesShift = 0.0, readPosition = 0.0;
661
662 SampleCount getReadPosition() const
663 {
664 return static_cast<SampleCount> (readPosition + 0.5);
665 }
666};
667
669{
670public:
673 TimeStretcher::ElastiqueProOptions elastiqueProOptions,
674 int blockSize)
675 : TimeStretchReaderBase (std::move (input)), numChannels ((int) source->getNumChannels()),
676 chunkSize (std::min (blockSize, 1024)) // Elastique can only support up to 1024
677 {
678 timeStretcher.initialise (source->getSampleRate(), chunkSize, numChannels,
679 mode, elastiqueProOptions, true);
680
681 source->setPosition (getReadPosition());
682 timeStretcher.reset();
683 setSpeedAndPitch (playbackSpeedRatio, semitonesShift);
684 }
685
686 SampleCount getPosition() override
687 {
688 return getReadPosition();
689 }
690
691 void setPosition (SampleCount t) override
692 {
693 if (std::abs (t - getReadPosition()) <= 10)
694 return;
695
696 readPosition = (double) t;
697
698 source->setPosition (t);
699 timeStretcher.reset();
700 setSpeedAndPitch (playbackSpeedRatio, semitonesShift);
701 }
702
703 void setPosition (TimePosition t) override
704 {
705 setPosition (toSamples (t, getSampleRate()));
706 }
707
708 void reset() override
709 {
710 }
711
712 void setSpeed (double speedRatio) override
713 {
714 if (playbackSpeedRatio == speedRatio)
715 return;
716
717 playbackSpeedRatio = speedRatio;
718 setSpeedAndPitch (playbackSpeedRatio, semitonesShift);
719 }
720
721 void setPitch (double semitones) override
722 {
723 if (semitonesShift == semitones)
724 return;
725
726 semitonesShift = semitones;
727 setSpeedAndPitch (playbackSpeedRatio, semitonesShift);
728 }
729
730 void setSpeedAndPitch (double speedRatio, double semitones)
731 {
732 playbackSpeedRatio = speedRatio;
733 semitonesShift = semitones;
734
735 [[ maybe_unused ]] const bool ok = timeStretcher.setSpeedAndPitch ((float) (1.0 / speedRatio), (float) semitonesShift);
736 assert (ok);
737 }
738
739 bool readSamples (choc::buffer::ChannelArrayView<float>& destBuffer) override
740 {
741 assert (numChannels == (int) destBuffer.getNumChannels());
742 const auto numFramesToDo = static_cast<int> (destBuffer.getNumFrames());
743 int startFrame = 0;
744
745 // Always push one block to keep the input fifo stoked but don't do this for every chunk unless required
746 if (const auto numToPush = timeStretcher.getFramesRecomended(); numToPush > 0)
747 if (! readSourceAndPushFrames (numToPush))
748 return false;
749
750 for (auto numFramesLeft = numFramesToDo; numFramesLeft > 0;)
751 {
752 const auto maxNumFramesThisTime = std::min (chunkSize, numFramesLeft);
753
754 // Read the number of samples we need
755 {
756 auto destAudioBuffer = toAudioBuffer (destBuffer.getFrameRange (createFrameRange (startFrame, startFrame + maxNumFramesThisTime)));
757 const int numRead = timeStretcher.popData (destAudioBuffer.getArrayOfWritePointers(), maxNumFramesThisTime);
758
759 if (numRead == 0)
760 {
761 destBuffer.clear();
762 return false;
763 }
764
765 startFrame += numRead;
766 numFramesLeft -= numRead;
767 }
768
769 if (numFramesLeft > 0 && timeStretcher.requiresMoreFrames())
770 if (const auto numToPush = timeStretcher.getFramesRecomended(); numToPush > 0)
771 if (! readSourceAndPushFrames (numToPush))
772 return false;
773 }
774
775 readPosition += numFramesToDo * playbackSpeedRatio;
776
777 return true;
778 }
779
780 const int numChannels, chunkSize = 1024;
781 ReadAheadTimeStretcher timeStretcher { 3 };
782 double playbackSpeedRatio = 1.0, semitonesShift = 0.0, readPosition = 0.0;
783
784 SampleCount getReadPosition() const
785 {
786 return static_cast<SampleCount> (readPosition + 0.5);
787 }
788
789 bool readSourceAndPushFrames (int numSourceFrames)
790 {
791 if (numSourceFrames <= 0)
792 return true;
793
794 // Read samples from source and push to stretcher if capacity
795 AudioScratchBuffer scratchBuffer (numChannels, numSourceFrames);
796 scratchBuffer.buffer.clear();
797 auto scratchView = toBufferView (scratchBuffer.buffer);
798
799 if (! source->readSamples (scratchView))
800 return false;
801
802 return timeStretcher.pushData (scratchBuffer.buffer.getArrayOfReadPointers(), numSourceFrames) == numSourceFrames;
803 }
804};
805
807{
808 TimePosition position;
809 double stretchRatio = 1.0;
810};
811
812inline WarpedTime warpTime (const WarpMap& map, TimePosition time)
813{
814 if (map.empty())
815 return { time, 1.0 };
816
817 assert (map.size() > (size_t) 1);
818 WarpPoint startMarker, endMarker;
819
820 auto first = map.front();
821 auto last = map.back();
822
823 if (time <= first.warpTime) //below or on the 1st marker
824 {
825 const auto durationBefore = time - first.warpTime;
826 return { toPosition (-durationBefore), 1.0 };
827 }
828 else if (time > last.warpTime) // after the last marker
829 {
830 const auto durationBeyondEnd = time - last.warpTime;
831 return { last.sourceTime + durationBeyondEnd, 1.0 };
832 }
833 else
834 {
835 size_t index = 0;
836 auto numMarkers = map.size();
837
838 while (index < numMarkers && map[index].warpTime < time)
839 index++;
840
841 if (index > 0)
842 startMarker = map[index - 1];
843
844 endMarker = map[index];
845 }
846
847 const auto sourceDuration = endMarker.sourceTime - startMarker.sourceTime;
848 const auto warpedDuration = endMarker.warpTime - startMarker.warpTime;
849
850 TimePosition sourcePosition;
851
852 if (warpedDuration == 0.0s)
853 return { 0s, 1.0 };
854
855 const double warpProportion = (time - startMarker.warpTime) / warpedDuration;
856 sourcePosition = startMarker.sourceTime + (sourceDuration * warpProportion);
857 const double ratio = sourceDuration / warpedDuration;
858
859 return { sourcePosition, ratio };
860}
861
862//==============================================================================
864{
865public:
867 WarpMap warpMap,
870 : SingleInputAudioReader (std::make_unique<TimeStretchReader> (std::move (input), mode, options)),
871 reader (static_cast<TimeStretchReader*> (source.get())), map (std::move (warpMap))
872 {
873 }
874
875 SampleCount getPosition() override
876 {
877 return readPosition;
878 }
879
880 void setPosition (TimePosition t) override
881 {
882 setPosition (toSamples (t, getSampleRate()));
883 }
884
885 void setPosition (SampleCount t) override
886 {
887 if (t == readPosition)
888 return;
889
890 readPosition = t;
891 setSourcePosition (t);
892 }
893
894 bool readSamples (choc::buffer::ChannelArrayView<float>& destBuffer) override
895 {
896 const auto unwarpedStartTime = TimePosition::fromSamples (readPosition, getSampleRate());
897 const auto ratio = warpTime (map, unwarpedStartTime).stretchRatio;
898
899 reader->setSpeed (ratio);
900 readPosition += (SampleCount) destBuffer.getNumFrames();
901
902 return reader->readSamples (destBuffer);
903 }
904
905private:
906 TimeStretchReader* reader = nullptr;
907 WarpMap map;
908 SampleCount readPosition = 0;
909
910 void setSourcePosition (SampleCount pos)
911 {
912 const auto sampleRate = getSampleRate();
913 const auto sourceTime = TimePosition::fromSamples (pos, sampleRate);
914 const auto warpedTime = warpTime (map, sourceTime).position;
915 const auto warpedSamplePos = toSamples (warpedTime, sampleRate);
916 source->setPosition (warpedSamplePos);
917 }
918};
919
920
921//==============================================================================
923{
924public:
926 TimeStretchReaderBase* timeStretcher,
927 const tempo::Sequence& fileTempoSequence)
928 : SingleInputAudioReader (std::move (input)),
929 timeStretchSource (timeStretcher),
930 rootPitch (fileTempoSequence.getKeyAt (0s).pitch),
931 syncToKey (true)
932 {
933 assert (timeStretchSource != nullptr);
934 }
935
937 TimeStretchReaderBase* timeStretcher,
938 float numSemitones)
939 : SingleInputAudioReader (std::move (input)),
940 timeStretchSource (timeStretcher),
941 numSemitonesShift (numSemitones)
942 {
943 assert (timeStretchSource != nullptr);
944 }
945
946 void setKey (tempo::Key newKey)
947 {
948 key = newKey;
949 }
950
951 bool readSamples (choc::buffer::ChannelArrayView<float>& destBuffer) override
952 {
953 if (syncToKey)
954 {
955 int pitch = key.pitch;
956 int transposeBase = pitch - rootPitch;
957
958 while (transposeBase > 6) transposeBase -= 12;
959 while (transposeBase < -6) transposeBase += 12;
960
961 timeStretchSource->setPitch (static_cast<double> (transposeBase));
962 }
963 else
964 {
965 timeStretchSource->setPitch (static_cast<double> (numSemitonesShift));
966 }
967
968 return SingleInputAudioReader::readSamples (destBuffer);
969 }
970
971 TimeStretchReaderBase* timeStretchSource;
972 const int rootPitch = 60;
973 tempo::Key key;
974
975 bool syncToKey = false;
976 float numSemitonesShift = 0.0f;
977};
978
979
980//==============================================================================
982{
983public:
985 : SingleInputAudioReader (std::move (input)),
986 resamplerReader (static_cast<ResamplerReader*> (source.get()))
987 {
988 }
989
991 : SingleInputAudioReader (std::move (input)),
992 timeStretchSource (static_cast<TimeStretchReaderBase*> (source.get()))
993 {
994 }
995
997 TimeStretchReaderBase* timeStretcher)
998 : SingleInputAudioReader (std::move (input)),
999 timeStretchSource (timeStretcher)
1000 {
1001 assert (timeStretcher != nullptr);
1002 }
1003
1004 bool read (TimeRange tr,
1005 choc::buffer::ChannelArrayView<float>& destBuffer,
1006 TimeDuration editDuration,
1007 bool isContiguous,
1008 double playbackSpeedRatio)
1009 {
1010 const auto numSourceSamples = toSamples (tr.getLength(), getSampleRate());
1011
1012 if (numSourceSamples == 0)
1013 {
1014 destBuffer.clear();
1015 return true;
1016 }
1017
1018 const auto blockSpeedRatio = (tr.getLength() / editDuration) * playbackSpeedRatio;
1019
1020 if (timeStretchSource)
1021 timeStretchSource->setSpeed (blockSpeedRatio);
1022 else
1023 resamplerReader->setSpeedRatio (blockSpeedRatio);
1024
1025 setPosition (tr.getStart());
1026
1027 if (! isContiguous)
1028 reset();
1029
1030 return readSamples (destBuffer);
1031 }
1032
1033 ResamplerReader* resamplerReader = nullptr;
1034 TimeStretchReaderBase* timeStretchSource = nullptr;
1035};
1036
1037
1038//==============================================================================
1040{
1041public:
1043 TimeRange sourceTimeRange, TimeDuration offsetTime,
1044 double speedRatioToUse)
1045 : source (std::move (input)),
1046 clipPosition (sourceTimeRange), offset (offsetTime),
1047 speedRatio (speedRatioToUse)
1048 {
1049 }
1050
1051 bool read (TimeRange editTimeRange,
1052 choc::buffer::ChannelArrayView<float>& destBuffer,
1053 TimeDuration editDuration,
1054 bool isContiguous,
1055 double playbackSpeedRatio)
1056 {
1057 const auto clipStartOffset = toDuration (clipPosition.getStart());
1058 TimeRange tr ((editTimeRange.getStart() - clipStartOffset) * speedRatio,
1059 (editTimeRange.getEnd() - clipStartOffset) * speedRatio);
1060 tr = tr + (offset * speedRatio);
1061 const auto readOk = source->read (tr, destBuffer, editDuration, isContiguous, playbackSpeedRatio);
1062
1063 // Clear samples outside of clip position
1064 // N.B. this shouldn't happen when using a clip combiner as the times should be clipped correctly
1065 if (! tr.isEmpty())
1066 {
1067 using choc::buffer::FrameCount;
1068 const auto timeToClearAtStart = editTimeRange.contains (clipPosition.getStart()) ? clipPosition.getStart() - editTimeRange.getStart() : 0_td;
1069 const auto timeToClearAtEnd = editTimeRange.contains (clipPosition.getEnd()) ? editTimeRange.getEnd() - clipPosition.getEnd() : 0_td;
1070
1071 const auto editTimeRangeLength = editTimeRange.getLength();
1072 const auto numFrames = destBuffer.getNumFrames();
1073
1074 if (timeToClearAtStart > 0_td)
1075 {
1076 const auto numSamplesToClearAtStart = static_cast<FrameCount> (numFrames * (timeToClearAtStart / editTimeRangeLength) + 0.5);
1077 destBuffer.getStart (numSamplesToClearAtStart).clear();
1078 }
1079
1080 if (timeToClearAtEnd > 0_td)
1081 {
1082 const auto numSamplesToClearAtEnd = static_cast<FrameCount> (numFrames * (timeToClearAtEnd / editTimeRangeLength) + 0.5);
1083 destBuffer.getEnd (numSamplesToClearAtEnd).clear();
1084 }
1085 }
1086
1087 return readOk;
1088 }
1089
1090 choc::buffer::ChannelCount getNumChannels() override { return source->getNumChannels(); }
1091 SampleCount getPosition() override { return source->getPosition(); }
1092 void setPosition (SampleCount t) override { source->setPosition (t); }
1093 void setPosition (TimePosition t) override { source->setPosition (t); }
1094 void reset() override { source->reset(); }
1095 double getSampleRate() override { return source->getSampleRate(); }
1096
1097 bool readSamples (choc::buffer::ChannelArrayView<float>& destBuffer) override
1098 {
1099 assert (false && "Use the other read method that takes a time range");
1100 return source->readSamples (destBuffer);
1101 }
1102
1104 const TimeRange clipPosition;
1105 const TimeDuration offset;
1106 const double speedRatio;
1107};
1108
1109
1110//==============================================================================
1114class BeatRangeReader final : public AudioReader
1115{
1116public:
1118 BeatRange loopRange_,
1119 BeatDuration offset_,
1120 std::shared_ptr<BeatDuration> dynamicOffset_,
1121 tempo::Sequence::Position sourceSequencePosition_)
1122 : source (std::move (input)),
1123 loopRange (loopRange_), offset (offset_),
1124 dynamicOffset (std::move (dynamicOffset_)),
1125 sourceSequencePosition (sourceSequencePosition_)
1126 {
1127 assert (dynamicOffset);
1128 }
1129
1130 bool read (BeatRange br,
1131 choc::buffer::ChannelArrayView<float>& destBuffer,
1132 TimeDuration editDuration,
1133 bool isContiguous,
1134 double playbackSpeedRatio)
1135 {
1136 // Apply offset first
1137 const auto beatRangeToRead = br + offset - *dynamicOffset;
1138
1139 return readLoopedBeatRange (beatRangeToRead, destBuffer, editDuration, isContiguous, playbackSpeedRatio);
1140 }
1141
1142 choc::buffer::ChannelCount getNumChannels() override { return source->getNumChannels(); }
1143 SampleCount getPosition() override { return source->getPosition(); }
1144 void setPosition (SampleCount t) override { source->setPosition (t); }
1145 void setPosition (TimePosition t) override { source->setPosition (t); }
1146 void reset() override { source->reset(); }
1147 double getSampleRate() override { return source->getSampleRate(); }
1148
1149 bool readSamples (choc::buffer::ChannelArrayView<float>& destBuffer) override
1150 {
1151 return source->readSamples (destBuffer);
1152 }
1153
1154private:
1156 const BeatRange loopRange;
1157 const BeatDuration offset;
1158 std::shared_ptr<BeatDuration> dynamicOffset;
1159 tempo::Sequence::Position sourceSequencePosition;
1160
1161 bool readLoopedBeatRange (BeatRange br,
1162 choc::buffer::ChannelArrayView<float>& destBuffer,
1163 TimeDuration editDuration,
1164 bool isContiguous,
1165 double playbackSpeedRatio)
1166 {
1167 using choc::buffer::FrameCount;
1168
1169 if (loopRange.isEmpty())
1170 return readBeatRange (br, destBuffer, editDuration, isContiguous, playbackSpeedRatio);
1171
1172 const auto s = linearPositionToLoopPosition (br.getStart(), loopRange);
1173 const auto e = linearPositionToLoopPosition (br.getEnd(), loopRange);
1174
1175 if (s > e)
1176 {
1177 if (s >= loopRange.getEnd())
1178 return readBeatRange ({ loopRange.getStart(), e }, destBuffer, editDuration, isContiguous, playbackSpeedRatio);
1179
1180 if (e <= loopRange.getStart())
1181 return readBeatRange ({ s, loopRange.getEnd() }, destBuffer, editDuration, isContiguous, playbackSpeedRatio);
1182
1183 // Otherwise range is split
1184 const BeatRange br1 (s, loopRange.getEnd());
1185 const BeatRange br2 (loopRange.getStart(), e);
1186 const auto prop1 = br1.getLength() / br.getLength();
1187 const auto prop2 = 1.0 - prop1;
1188
1189 const auto numFrames = destBuffer.getNumFrames();
1190 const auto numFrames1 = static_cast<FrameCount> (std::llround (numFrames * prop1));
1191
1192 auto buffer1 = destBuffer.getStart (numFrames1);
1193 auto buffer2 = destBuffer.getFrameRange ({ numFrames1, numFrames });
1194
1195 return readBeatRange (br1, buffer1, editDuration * prop1, isContiguous, playbackSpeedRatio)
1196 && readBeatRange (br2, buffer2, editDuration * prop2, false, playbackSpeedRatio);
1197 }
1198
1199 return readBeatRange ({ s, e }, destBuffer, editDuration, isContiguous, playbackSpeedRatio);
1200 }
1201
1202 bool readBeatRange (BeatRange br,
1203 choc::buffer::ChannelArrayView<float>& destBuffer,
1204 TimeDuration editDuration,
1205 bool isContiguous,
1206 double playbackSpeedRatio)
1207 {
1208 // Convert source beat range to source time range
1209 sourceSequencePosition.set (br.getStart());
1210 const auto startTime = (sourceSequencePosition.getTime());
1211 sourceSequencePosition.set (br.getEnd());
1212 const auto endTime = (sourceSequencePosition.getTime());
1213
1214 return source->read ({ startTime, endTime }, destBuffer, editDuration, isContiguous, playbackSpeedRatio);
1215 }
1216
1217 static inline BeatPosition linearPositionToLoopPosition (BeatPosition position, BeatRange loopRange)
1218 {
1219 return loopRange.getStart() + BeatDuration::fromBeats (std::fmod (position.inBeats(), loopRange.getLength().inBeats()));
1220 }
1221};
1222
1223//==============================================================================
1225{
1226public:
1227 EditToClipBeatReader (std::unique_ptr<BeatRangeReader> input, BeatRange clipPosition_,
1228 std::shared_ptr<BeatDuration> dynamicOffset_)
1229 : source (std::move (input)), clipPosition (clipPosition_),
1230 dynamicOffset (std::move (dynamicOffset_))
1231 {
1232 assert (dynamicOffset);
1233 }
1234
1235 bool read (BeatRange editBeatRange,
1236 choc::buffer::ChannelArrayView<float>& destBuffer,
1237 TimeDuration editDuration,
1238 bool isContiguous,
1239 double playbackSpeedRatio)
1240 {
1241 const auto clipBeatRange = editBeatRange - toDuration (clipPosition.getStart());
1242 const auto readOk = source->read (clipBeatRange, destBuffer, editDuration, isContiguous, playbackSpeedRatio);
1243
1244 utils::zeroSamplesOutsideClipRange (destBuffer, editBeatRange, clipPosition + *dynamicOffset);
1245
1246 return readOk;
1247 }
1248
1249 choc::buffer::ChannelCount getNumChannels() override { return source->getNumChannels(); }
1250 SampleCount getPosition() override { return source->getPosition(); }
1251 void setPosition (SampleCount t) override { source->setPosition (t); }
1252 void setPosition (TimePosition t) override { source->setPosition (t); }
1253 void reset() override { source->reset(); }
1254 double getSampleRate() override { return source->getSampleRate(); }
1255
1256 bool readSamples (choc::buffer::ChannelArrayView<float>& destBuffer) override
1257 {
1259 return source->readSamples (destBuffer);
1260 }
1261
1262private:
1264 const BeatRange clipPosition;
1265 std::shared_ptr<BeatDuration> dynamicOffset;
1266};
1267
1268
1269//==============================================================================
1271{
1272public:
1275 : editToClipBeatReader (std::move (beatReader)),
1276 editToClipTimeReader (std::move (timeReader))
1277 {
1278 assert ((editToClipBeatReader || editToClipTimeReader) && "Must supply one valid reader");
1279 }
1280
1281 bool isBeatBased() const { return editToClipBeatReader != nullptr; }
1282 bool isTimeBased() const { return editToClipTimeReader != nullptr; }
1283
1284 choc::buffer::ChannelCount getNumChannels() const
1285 {
1286 return editToClipBeatReader ? editToClipBeatReader->getNumChannels()
1287 : editToClipTimeReader->getNumChannels();
1288 }
1289
1290 bool read (BeatRange editBeatRange,
1291 TimeRange editTimeRange,
1292 choc::buffer::ChannelArrayView<float>& destBuffer,
1293 bool isContiguous,
1294 double playbackSpeedRatio)
1295 {
1296 const auto editDuration = editTimeRange.getLength();
1297
1298 if (editToClipBeatReader)
1299 return editToClipBeatReader->read (editBeatRange, destBuffer, editDuration, isContiguous, playbackSpeedRatio);
1300
1301 if (editToClipTimeReader)
1302 return editToClipTimeReader->read (editTimeRange, destBuffer, editDuration, isContiguous, playbackSpeedRatio);
1303
1304 return false;
1305 }
1306
1307private:
1308 std::unique_ptr<EditToClipBeatReader> editToClipBeatReader;
1309 std::unique_ptr<EditToClipTimeReader> editToClipTimeReader;
1310};
1311
1312//==============================================================================
1314{
1315public:
1317 SpeedFadeDescription speedFadeDesc,
1319 : reader (std::move (editReader)),
1320 fadeDesc (speedFadeDesc),
1321 tempoPosition (std::move (editTempoPosition))
1322 {
1323 for (int i = (int) getNumChannels(); --i >= 0;)
1324 {
1326 resamplers.back().reset();
1327 }
1328 }
1329
1330 bool isBeatBased() const { return reader->isBeatBased(); }
1331 bool isTimeBased() const { return reader->isTimeBased(); }
1332
1333 choc::buffer::ChannelCount getNumChannels() const
1334 {
1335 return reader->getNumChannels();
1336 }
1337
1338 bool read (BeatRange editBeatRange,
1339 TimeRange editTimeRange,
1340 choc::buffer::ChannelArrayView<float>& destBuffer,
1341 bool isContiguous,
1342 double playbackSpeedRatio)
1343 {
1344 if (! shouldWarp() || editTimeRange.isEmpty())
1345 return reader->read (editBeatRange, editTimeRange, destBuffer, isContiguous, playbackSpeedRatio);
1346
1347 const auto originalDuration = editTimeRange.getLength();
1348 std::tie (editBeatRange, editTimeRange) = warpTimeRanges (editBeatRange, editTimeRange);
1349
1350 const auto editDuration = editTimeRange.getLength();
1351 const auto ratio = editDuration / originalDuration;
1352
1353 const auto numChannels = destBuffer.getNumChannels();
1354 assert (numChannels <= (choc::buffer::ChannelCount) resamplers.size());
1355 assert (destBuffer.getNumChannels() == numChannels);
1356
1357 const auto numFrames = destBuffer.getNumFrames();
1358 const int numSourceFramesToRead = static_cast<int> ((numFrames * ratio) + 0.5);
1359 AudioScratchBuffer sourceData ((int) numChannels, numSourceFramesToRead);
1360 auto sourceDataView = toBufferView (sourceData.buffer);
1361 sourceDataView.clear();
1362 const bool ok = reader->read (editBeatRange, editTimeRange, sourceDataView, isContiguous, playbackSpeedRatio);
1363
1364 const auto resamplerRatio = static_cast<double> (numSourceFramesToRead) / numFrames;
1365
1366 for (choc::buffer::ChannelCount channel = 0; channel < numChannels; ++channel)
1367 {
1368 if (channel < (choc::buffer::ChannelCount) resamplers.size())
1369 {
1370 const auto src = sourceData.buffer.getReadPointer ((int) channel);
1371 const auto dest = destBuffer.getIterator (channel).sample;
1372
1373 auto& resampler = resamplers[(size_t) channel];
1374 resampler.process (resamplerRatio, src, dest, (int) numFrames);
1375 }
1376 else
1377 {
1378 destBuffer.getChannel (channel).clear();
1379 }
1380 }
1381
1382 return ok;
1383 }
1384
1385private:
1387
1388 SpeedFadeDescription fadeDesc;
1391
1392 bool shouldWarp() const
1393 {
1394 return tempoPosition && ! fadeDesc.isEmpty();
1395 }
1396
1397 std::pair<BeatRange, TimeRange> warpTimeRanges (BeatRange br, TimeRange tr)
1398 {
1399 assert (shouldWarp());
1400 tr = { warpTimePosition (tr.getStart()),
1401 warpTimePosition (tr.getEnd()) };
1402
1403 tempoPosition->set (tr.getStart());
1404 const auto startBeat = tempoPosition->getBeats();
1405 tempoPosition->set (tr.getEnd());
1406 const auto endBeat = tempoPosition->getBeats();
1407 br = { startBeat, endBeat };
1408
1409 return { br, tr };
1410 }
1411
1412 TimePosition warpTimePosition (TimePosition tp)
1413 {
1414 if (! fadeDesc.inTimeRange.isEmpty()
1415 && fadeDesc.inTimeRange.containsInclusive (tp))
1416 {
1417 const auto prop = (tp - fadeDesc.inTimeRange.getStart()) / fadeDesc.inTimeRange.getLength();
1418 const auto newProp = rescale (fadeDesc.fadeInType, prop, true);
1419 return fadeDesc.inTimeRange.getStart() + (fadeDesc.inTimeRange.getLength() * newProp);
1420 }
1421 else if (! fadeDesc.outTimeRange.isEmpty()
1422 && fadeDesc.outTimeRange.containsInclusive (tp))
1423 {
1424 const auto prop = (tp - fadeDesc.outTimeRange.getStart()) / fadeDesc.outTimeRange.getLength();
1425 const auto newProp = rescale (fadeDesc.fadeOutType, prop, false);
1426 return fadeDesc.outTimeRange.getStart() + (fadeDesc.outTimeRange.getLength() * newProp);
1427 }
1428
1429 return tp;
1430 }
1431
1432 static double rescale (AudioFadeCurve::Type t, double proportion, bool rampUp)
1433 {
1434 switch (t)
1435 {
1436 case AudioFadeCurve::convex:
1437 return rampUp ? (-2.0 * std::cos ((juce::MathConstants<double>::pi * proportion) / 2.0)) / juce::MathConstants<double>::pi + 1.0
1438 : 1.0 - ((-2.0 * std::cos ((juce::MathConstants<double>::pi * (proportion - 1.0)) / 2.0)) / juce::MathConstants<double>::pi + 1.0);
1439
1440 case AudioFadeCurve::concave:
1441 return rampUp ? proportion - (2.0 * std::sin ((juce::MathConstants<double>::pi * proportion) / 2.0)) / juce::MathConstants<double>::pi + (2.0 / juce::MathConstants<double>::pi)
1442 : ((2.0 * std::sin ((juce::MathConstants<double>::pi * (proportion + 1.0)) / 2.0)) / juce::MathConstants<double>::pi) + proportion - (2.0 / juce::MathConstants<double>::pi);
1443
1444 case AudioFadeCurve::sCurve:
1445 return rampUp ? (proportion / 2.0) - (std::sin (juce::MathConstants<double>::pi * proportion) / (2.0 * juce::MathConstants<double>::pi)) + 0.5
1446 : std::sin (juce::MathConstants<double>::pi * proportion) / (2.0 * juce::MathConstants<double>::pi) + (proportion / 2.0);
1447
1448 case AudioFadeCurve::linear:
1449 default:
1450 return rampUp ? (juce::square (proportion) * 0.5) + 0.5
1451 : ((-juce::square (proportion - 1.0)) * 0.5) + 0.5;
1452 }
1453 }
1454};
1455
1456
1457//==============================================================================
1458//==============================================================================
1460{
1461 PerChannelState() { resampler.reset(); }
1462
1464 float lastSample = 0;
1465};
1466
1467
1468//==============================================================================
1470 TimeRange editTime,
1471 TimeDuration off,
1472 TimeRange loop,
1473 LiveClipLevel level,
1474 double speed,
1475 const juce::AudioChannelSet& channelSetToUse,
1476 const juce::AudioChannelSet& destChannelsToFill,
1477 ProcessState& ps,
1478 EditItemID itemIDToUse,
1479 bool isRendering)
1480 : TracktionEngineNode (ps),
1481 editPosition (editTime),
1482 loopSection (TimePosition::fromSeconds (loop.getStart().inSeconds() * speed),
1483 TimePosition::fromSeconds (loop.getEnd().inSeconds() * speed)),
1484 offset (off),
1485 originalSpeedRatio (speed),
1486 editItemID (itemIDToUse),
1487 isOfflineRender (isRendering),
1488 audioFile (af),
1489 clipLevel (level),
1490 channelsToUse (channelSetToUse),
1491 destChannels (destChannelsToFill)
1492{
1493}
1494
1496{
1498 props.hasAudio = true;
1499 props.hasMidi = false;
1500 props.numberOfChannels = destChannels.size();
1501 props.nodeID = (size_t) editItemID.getRawID();
1502
1503 return props;
1504}
1505
1507{
1508 reader = audioFile.engine->getAudioFileManager().cache.createReader (audioFile);
1509 outputSampleRate = info.sampleRate;
1510 editPositionInSamples = tracktion::toSamples ({ editPosition.getStart(), editPosition.getEnd() }, outputSampleRate);
1511 updateFileSampleRate();
1512
1513 const int numChannelsToUse = std::max (channelsToUse.size(), reader != nullptr ? reader->getNumChannels() : 0);
1514 replaceChannelStateIfPossible (info.nodeGraphToReplace, numChannelsToUse);
1515
1516 if (! channelState)
1517 {
1519
1520 if (reader != nullptr)
1521 for (int i = numChannelsToUse; --i >= 0;)
1522 channelState->add (new PerChannelState());
1523 }
1524}
1525
1527{
1528 // Only check this whilst rendering or it will block whilst the proxies are being created
1529 if (! isOfflineRender)
1530 return true;
1531
1532 // If the hash is 0 it means an empty file path which means a missing file so
1533 // this will never return a valid reader and we should just bail
1534 if (audioFile.isNull())
1535 return true;
1536
1537 if (reader == nullptr)
1538 {
1539 reader = audioFile.engine->getAudioFileManager().cache.createReader (audioFile);
1540
1541 if (reader == nullptr)
1542 return false;
1543 }
1544
1545 if (audioFileSampleRate == 0.0 && ! updateFileSampleRate())
1546 return false;
1547
1548 return true;
1549}
1550
1552{
1553 SCOPED_REALTIME_CHECK
1554 assert (outputSampleRate == getSampleRate());
1555
1556 //TODO: Might get a performance boost by pre-setting the file position in prepareForNextBlock
1557 processSection (pc, getTimelineSampleRange());
1558}
1559
1560//==============================================================================
1561int64_t WaveNode::editPositionToFileSample (int64_t timelinePosition) const noexcept
1562{
1563 // Convert timelinePosition in samples to edit time
1564 return editTimeToFileSample (TimePosition::fromSamples (timelinePosition, outputSampleRate));
1565}
1566
1567int64_t WaveNode::editTimeToFileSample (TimePosition editTime) const noexcept
1568{
1569 return (int64_t) ((editTime - toDuration (editPosition.getStart() - offset)).inSeconds()
1570 * originalSpeedRatio * audioFileSampleRate + 0.5);
1571}
1572
1573bool WaveNode::updateFileSampleRate()
1574{
1575 using namespace tracktion::graph;
1576
1577 if (reader == nullptr)
1578 return false;
1579
1580 audioFileSampleRate = reader->getSampleRate();
1581
1582 if (audioFileSampleRate <= 0)
1583 return false;
1584
1585 if (! loopSection.isEmpty())
1586 reader->setLoopRange ({ tracktion::toSamples (loopSection.getStart(), audioFileSampleRate),
1587 tracktion::toSamples (loopSection.getEnd(), audioFileSampleRate) });
1588
1589 return true;
1590}
1591
1592void WaveNode::replaceChannelStateIfPossible (NodeGraph* nodeGraphToReplace, int numChannelsToUse)
1593{
1594 const auto nodeID = (size_t) editItemID.getRawID();
1595 assert (getNodeProperties().nodeID == nodeID);
1596
1597 if (auto oldWaveNode = findNodeWithIDIfNonZero<WaveNode> (nodeGraphToReplace, nodeID))
1598 replaceChannelStateIfPossible (*oldWaveNode, numChannelsToUse);
1599}
1600
1601void WaveNode::replaceChannelStateIfPossible (WaveNode& other, int numChannelsToUse)
1602{
1603 if (other.editItemID != editItemID)
1604 return;
1605
1606 if (! other.channelState)
1607 return;
1608
1609 if (other.channelState->size() == numChannelsToUse)
1610 channelState = other.channelState;
1611}
1612
1613void WaveNode::processSection (ProcessContext& pc, juce::Range<int64_t> timelineRange)
1614{
1615 const auto sectionEditTime = tracktion::timeRangeFromSamples (timelineRange, outputSampleRate);
1616
1617 if (reader == nullptr
1618 || sectionEditTime.getEnd() <= editPosition.getStart()
1619 || sectionEditTime.getStart() >= editPosition.getEnd())
1620 return;
1621
1622 SCOPED_REALTIME_CHECK
1623
1624 if (audioFileSampleRate == 0.0 && ! updateFileSampleRate())
1625 return;
1626
1627 const auto fileStart = editTimeToFileSample (sectionEditTime.getStart());
1628 const auto fileEnd = editTimeToFileSample (sectionEditTime.getEnd());
1629 const auto numFileSamples = (int) (fileEnd - fileStart);
1630
1631 reader->setReadPosition (fileStart);
1632
1633 auto destBuffer = pc.buffers.audio;
1634 auto numFrames = destBuffer.getNumFrames();
1635 const auto destBufferChannels = juce::AudioChannelSet::canonicalChannelSet ((int) destBuffer.getNumChannels());
1636 auto numChannels = (choc::buffer::ChannelCount) destBufferChannels.size();
1637 assert (pc.buffers.audio.getNumChannels() == numChannels);
1638
1639 AudioScratchBuffer fileData ((int) numChannels, numFileSamples + 2);
1640
1641 uint32_t lastSampleFadeLength = 0;
1642
1643 if (numFileSamples > 0)
1644 {
1645 SCOPED_REALTIME_CHECK
1646
1647 if (reader->readSamples (numFileSamples + 2, fileData.buffer, destBufferChannels, 0,
1648 channelsToUse,
1649 isOfflineRender ? 5000 : 3))
1650 {
1651 if (! getPlayHeadState().isContiguousWithPreviousBlock() && ! getPlayHeadState().isFirstBlockOfLoop())
1652 lastSampleFadeLength = std::min (numFrames, 40u);
1653 }
1654 else
1655 {
1656 lastSampleFadeLength = std::min (numFrames, 40u);
1657 fileData.buffer.clear();
1658 }
1659 }
1660 else
1661 {
1662 lastSampleFadeLength = std::min (numFrames, 40u);
1663
1664 for (choc::buffer::ChannelCount channel = 0; channel < numChannels; ++channel)
1665 {
1666 if (channel >= (choc::buffer::ChannelCount) channelState->size())
1667 continue;
1668
1669 auto& state = *channelState->getUnchecked ((int) channel);
1670
1671 if (state.lastSample == 0.0)
1672 continue;
1673
1674 const auto dest = destBuffer.getIterator (channel).sample;
1675
1676 for (uint32_t i = 0; i < lastSampleFadeLength; ++i)
1677 {
1678 auto alpha = i / (float) lastSampleFadeLength;
1679 dest[i] = state.lastSample * (1.0f - alpha);
1680 }
1681 }
1682
1683 for (auto state : *channelState)
1684 {
1685 state->resampler.reset();
1686 state->lastSample = 0.0;
1687 }
1688
1689 return;
1690 }
1691
1692 auto ratio = numFileSamples / (double) numFrames;
1693
1694 if (ratio <= 0.0)
1695 return;
1696
1697 float gains[2];
1698
1699 // For stereo, use the pan, otherwise ignore it
1700 if (numChannels == 2)
1701 clipLevel.getLeftAndRightGains (gains[0], gains[1]);
1702 else
1703 gains[0] = gains[1] = clipLevel.getGainIncludingMute();
1704
1706 {
1707 gains[0] *= 0.4f;
1708 gains[1] *= 0.4f;
1709 }
1710
1711 jassert (numChannels <= (choc::buffer::ChannelCount) channelState->size()); // this should always have been made big enough
1712
1713 for (choc::buffer::ChannelCount channel = 0; channel < numChannels; ++channel)
1714 {
1715 if (channel < (choc::buffer::ChannelCount) channelState->size())
1716 {
1717 const auto src = fileData.buffer.getReadPointer ((int) channel);
1718 const auto dest = destBuffer.getIterator (channel).sample;
1719
1720 auto& state = *channelState->getUnchecked ((int) channel);
1721 state.resampler.processAdding (ratio, src, dest, (int) numFrames, gains[channel & 1]);
1722
1723 if (lastSampleFadeLength > 0)
1724 {
1725 for (uint32_t i = 0; i < lastSampleFadeLength; ++i)
1726 {
1727 auto alpha = i / (float) lastSampleFadeLength;
1728 dest[i] = alpha * dest[i] + state.lastSample * (1.0f - alpha);
1729 }
1730 }
1731
1732 state.lastSample = dest[numFrames - 1];
1733 }
1734 else
1735 {
1736 destBuffer.getChannel (channel).clear();
1737 }
1738 }
1739
1740 // Silence any samples before or after our edit time range
1741 // N.B. this shouldn't happen when using a clip combiner as the times should be clipped correctly
1742 {
1743 auto numSamplesToClearAtStart = std::min (editPositionInSamples.getStart() - timelineRange.getStart(), (SampleCount) destBuffer.getNumFrames());
1744 auto numSamplesToClearAtEnd = std::min (timelineRange.getEnd() - editPositionInSamples.getEnd(), (SampleCount) destBuffer.getNumFrames());
1745
1746 if (numSamplesToClearAtStart > 0)
1747 destBuffer.getStart ((choc::buffer::FrameCount) numSamplesToClearAtStart).clear();
1748
1749 if (numSamplesToClearAtEnd > 0)
1750 destBuffer.getEnd ((choc::buffer::FrameCount) numSamplesToClearAtEnd).clear();
1751 }
1752}
1753
1754
1755//==============================================================================
1756//==============================================================================
1758 TimeRange editTime,
1759 TimeDuration off,
1760 TimeRange loop,
1761 LiveClipLevel level,
1762 double speed,
1763 const juce::AudioChannelSet& channelSetToUse,
1764 const juce::AudioChannelSet& destChannelsToFill,
1765 ProcessState& ps,
1766 EditItemID itemIDToUse,
1767 bool isRendering,
1768 ResamplingQuality resamplingQualityToUse,
1769 SpeedFadeDescription speedDesc,
1773 float pitchChange,
1774 ReadAhead readAhead_)
1775 : TracktionEngineNode (ps),
1776 editPositionTime (editTime),
1777 loopSectionTime (loop.rescaled (loop.getStart(), speed)),
1778 offsetTime (off),
1779 speedRatio (speed),
1780 editItemID (itemIDToUse),
1781 isOfflineRender (isRendering),
1782 resamplingQuality (resamplingQualityToUse),
1783 audioFile (af),
1784 speedFadeDescription (std::move (speedDesc)),
1785 editTempoSequence (std::move (editTempoSeq)),
1786 timeStretcherMode (utils::replaceElastiqueWithDirectModeIfNotRendering (mode, isRendering)),
1787 elastiqueProOptions (options),
1788 clipLevel (level),
1789 channelsToUse (channelSetToUse),
1790 destChannels (destChannelsToFill),
1791 pitchChangeSemitones (pitchChange),
1792 readAhead (readAhead_)
1793{
1794 // This won't work with invalid or non-existent files!
1795 jassert (! audioFile.isNull());
1796
1797 auto removeRoundingError = [] (auto d) { return static_cast<float> (d.inSeconds()); };
1798 hash_combine (stateHash, removeRoundingError (editPositionTime.getStart()));
1799 hash_combine (stateHash, removeRoundingError (editPositionTime.getEnd()));
1800 hash_combine (stateHash, removeRoundingError (loopSectionTime.getStart()));
1801 hash_combine (stateHash, removeRoundingError (loopSectionTime.getEnd()));
1802 hash_combine (stateHash, removeRoundingError (offsetTime));
1803 hash_combine (stateHash, speedRatio);
1804 hash_combine (stateHash, editItemID.getRawID());
1805 hash_combine (stateHash, channelsToUse.size());
1806 hash_combine (stateHash, destChannels.size());
1807 hash_combine (stateHash, audioFile.getHash());
1808 hash_combine (stateHash, resamplingQualityToUse);
1809 hash_combine (stateHash, speedFadeDescription);
1810 hash_combine (stateHash, static_cast<int> (timeStretcherMode));
1811 hash_combine (stateHash, elastiqueProOptions.toString().hashCode());
1812 hash_combine (stateHash, pitchChangeSemitones);
1813}
1814
1818 BeatRange editTime,
1819 BeatDuration off,
1820 BeatRange loop,
1821 LiveClipLevel level,
1822 const juce::AudioChannelSet& channelSetToUse,
1823 const juce::AudioChannelSet& destChannelsToFill,
1824 ProcessState& ps,
1825 EditItemID itemIDToUse,
1826 bool isRendering,
1827 ResamplingQuality resamplingQualityToUse,
1828 SpeedFadeDescription speedDesc,
1831 tempo::Sequence sourceFileTempoMap,
1832 SyncTempo syncTempo_,
1833 SyncPitch syncPitch_,
1834 std::optional<tempo::Sequence> chordPitchSequence_,
1835 float pitchChange,
1836 ReadAhead readAhead_)
1837 : TracktionEngineNode (ps),
1838 editPositionBeats (editTime),
1839 loopSectionBeats (loop),
1840 offsetBeats (off),
1841 editItemID (itemIDToUse),
1842 isOfflineRender (isRendering),
1843 resamplingQuality (resamplingQualityToUse),
1844 audioFile (af),
1845 speedFadeDescription (std::move (speedDesc)),
1846 editTempoSequence (std::move (editTempoSeq)),
1847 warpMap (std::move (warp)),
1848 timeStretcherMode (utils::replaceElastiqueWithDirectModeIfNotRendering (mode, isRendering)),
1849 elastiqueProOptions (options),
1850 clipLevel (level),
1851 channelsToUse (channelSetToUse),
1852 destChannels (destChannelsToFill),
1853 pitchChangeSemitones (pitchChange),
1854 readAhead (readAhead_)
1855{
1856 syncTempo = syncTempo_;
1857 syncPitch = syncPitch_;
1858
1859 fileTempoSequence = std::make_shared<tempo::Sequence> (std::move (sourceFileTempoMap));
1860 fileTempoPosition = std::make_shared<tempo::Sequence::Position> (*fileTempoSequence);
1861
1862 if (chordPitchSequence_)
1863 {
1864 chordPitchSequence = std::make_shared<tempo::Sequence> (*chordPitchSequence_);
1865 chordPitchPosition = std::make_shared<tempo::Sequence::Position> (*chordPitchSequence);
1866 }
1867
1868 // This won't work with invalid or non-existent files!
1869 jassert (! audioFile.isNull());
1870
1871 auto removeRoundingError = [] (auto d) { return static_cast<float> (d.inBeats()); };
1872 hash_combine (stateHash, removeRoundingError (editPositionBeats.getStart()));
1873 hash_combine (stateHash, removeRoundingError (editPositionBeats.getEnd()));
1874 hash_combine (stateHash, removeRoundingError (loopSectionBeats.getStart()));
1875 hash_combine (stateHash, removeRoundingError (loopSectionBeats.getEnd()));
1876 hash_combine (stateHash, removeRoundingError (offsetBeats));
1877 hash_combine (stateHash, editItemID.getRawID());
1878 hash_combine (stateHash, channelsToUse.size());
1879 hash_combine (stateHash, destChannels.size());
1880 hash_combine (stateHash, audioFile.getHash());
1881 hash_combine (stateHash, resamplingQualityToUse);
1882 hash_combine (stateHash, speedFadeDescription);
1883
1884 if (warpMap)
1885 hash_combine (stateHash, *warpMap);
1886
1887 hash_combine (stateHash, static_cast<int> (timeStretcherMode));
1888 hash_combine (stateHash, elastiqueProOptions.toString().hashCode());
1889
1890 hash_combine (stateHash, fileTempoSequence->hash());
1891 hash_combine (stateHash, syncTempo);
1892 hash_combine (stateHash, syncPitch);
1893
1894 if (chordPitchSequence)
1895 hash_combine (stateHash, chordPitchSequence->hash());
1896
1897 hash_combine (stateHash, pitchChangeSemitones);
1898}
1899
1900//==============================================================================
1902{
1903 if (juce::approximatelyEqual (dynamicOffsetBeats->inBeats(), newOffset.inBeats()))
1904 return;
1905
1906 (*dynamicOffsetBeats) = newOffset;
1907 isFirstBlock = true;
1908}
1909
1910//==============================================================================
1912{
1914 props.hasAudio = true;
1915 props.hasMidi = false;
1916 props.numberOfChannels = destChannels.size();
1917 props.nodeID = (size_t) editItemID.getRawID();
1918
1919 return props;
1920}
1921
1923{
1924 outputSampleRate = info.sampleRate;
1925 outputBlockSize = info.blockSize;
1926
1927 replaceStateIfPossible (info.nodeGraphToReplace);
1928 buildAudioReaderGraph();
1929}
1930
1932{
1933 // Only check this whilst rendering or it will block whilst the proxies are being created
1934 if (! isOfflineRender)
1935 return true;
1936
1937 // If the hash is 0 it means an empty file path which means a missing file so
1938 // this will never return a valid reader and we should just bail
1939 if (audioFile.isNull())
1940 return true;
1941
1942 return buildAudioReaderGraph();
1943}
1944
1946{
1947 SCOPED_REALTIME_CHECK
1948 assert (outputSampleRate == getSampleRate());
1949
1950 //TODO: Might get a performance boost by pre-setting the file position in prepareForNextBlock
1951 processSection (pc);
1952}
1953
1954//==============================================================================
1955bool WaveNodeRealTime::buildAudioReaderGraph()
1956{
1957 if (editReader)
1958 return true;
1959
1960 AudioFileCache::Reader::Ptr fileCacheReader;
1961
1962 // Try creating a MemoryMappedFileReader first for compressed formats
1963 if (audioFile.getInfo().needsCachedProxy)
1964 {
1965 if (auto bufferedFileReader = audioFile.engine->getBufferedAudioFileManager().get (audioFile.getFile()))
1966 {
1967 fileCacheReader = audioFile.engine->getAudioFileManager().cache.createFallbackReader ([&bufferedFileReader] (juce::TimeSliceThread&, int) mutable -> std::unique_ptr<FallbackReader>
1968 {
1969 return std::make_unique<BufferedFileReaderWrapper> (std::move (bufferedFileReader));
1970 });
1971 }
1972 }
1973
1974 if (! fileCacheReader)
1975 fileCacheReader = audioFile.engine->getAudioFileManager().cache.createReader (audioFile);
1976
1977 if (fileCacheReader == nullptr || fileCacheReader->getSampleRate() == 0.0)
1978 return false;
1979
1980 auto audioFileCacheReader = std::make_unique<AudioFileCacheReader> (std::move (fileCacheReader), isOfflineRender ? 5s : 0ms,
1981 destChannels, channelsToUse);
1983
1984 if (warpMap)
1985 {
1986 // If we're using a warp map, the looping as to be applied above the warp so the loop times don't get warped
1987 // This can have performance hits though
1988 loopReader = std::make_unique<WarpReader> (std::move (audioFileCacheReader), std::move (*warpMap), timeStretcherMode, elastiqueProOptions);
1989
1990 if (! loopSectionTime.isEmpty())
1991 loopReader = std::make_unique<LoopReader> (std::move (loopReader), loopSectionTime);
1992 }
1993 else
1994 {
1995 audioFileCacheReader->setLoopRange (loopSectionTime);
1996 loopReader = std::move (audioFileCacheReader);
1997 }
1998
1999 const bool timestretchDisabled = timeStretcherMode == TimeStretcher::Mode::disabled;
2000 std::unique_ptr<ResamplerReader> resamplerAudioReader;
2001
2002 if (resamplingQuality == ResamplingQuality::lagrange)
2003 resamplerAudioReader = std::make_unique<LagrangeResamplerReader> (std::move (loopReader), outputSampleRate);
2004 else
2005 resamplerAudioReader = std::make_unique<HighQualityResamplerReader> (std::move (loopReader), outputSampleRate, resamplingQuality);
2006
2007 resamplerReader = resamplerAudioReader.get();
2009
2010 if (! timestretchDisabled)
2011 {
2012 if (readAhead == ReadAhead::no)
2013 timeStretchReader = std::make_unique<TimeStretchReader> (std::move (resamplerAudioReader), timeStretcherMode, elastiqueProOptions);
2014 else
2015 timeStretchReader = std::make_unique<ReadAheadTimeStretchReader> (std::move (resamplerAudioReader), timeStretcherMode, elastiqueProOptions, outputBlockSize);
2016 }
2017
2018 auto timeStretcher = timeStretchReader.get();
2019 std::unique_ptr<TimeRangeReader> timeRangeReader;
2020 std::unique_ptr<EditReader> basicEditReader;
2021
2022 if (syncPitch == SyncPitch::yes)
2023 {
2024 assert (fileTempoSequence);
2025 auto pitchAdjuster = std::make_unique<PitchAdjustReader> (std::move (timeStretchReader), timeStretchReader.get(), *fileTempoSequence);
2026 pitchAdjustReader = pitchAdjuster.get();
2027 timeRangeReader = std::make_unique<TimeRangeReader> (std::move (pitchAdjuster), timeStretcher);
2028 }
2029 else
2030 {
2031 if (timestretchDisabled)
2032 {
2033 timeRangeReader = std::make_unique<TimeRangeReader> (std::move (resamplerAudioReader));
2034 }
2035 else
2036 {
2037 if (pitchChangeSemitones != 0.0f)
2038 {
2039 auto pitchAdjuster = std::make_unique<PitchAdjustReader> (std::move (timeStretchReader), timeStretchReader.get(), pitchChangeSemitones);
2040 pitchAdjustReader = pitchAdjuster.get();
2041 timeRangeReader = std::make_unique<TimeRangeReader> (std::move (pitchAdjuster), timeStretcher);
2042 }
2043 else
2044 {
2045 timeRangeReader = std::make_unique<TimeRangeReader> (std::move (timeStretchReader), timeStretcher);
2046 }
2047 }
2048 }
2049
2050 if (syncTempo == SyncTempo::yes || syncPitch == SyncPitch::yes)
2051 {
2052 assert (fileTempoSequence);
2053 auto beatRangeReader = std::make_unique<BeatRangeReader> (std::move (timeRangeReader),
2054 loopSectionBeats, offsetBeats, dynamicOffsetBeats, *fileTempoPosition);
2055 auto editToClipBeatReader = std::make_unique<EditToClipBeatReader> (std::move (beatRangeReader), editPositionBeats, dynamicOffsetBeats);
2056 basicEditReader = std::make_unique<EditReader> (std::move (editToClipBeatReader), nullptr);
2057 }
2058 else
2059 {
2060 auto editToClipTimeReader = std::make_unique<EditToClipTimeReader> (std::move (timeRangeReader), editPositionTime, offsetTime, speedRatio);
2061 basicEditReader = std::make_unique<EditReader> (nullptr, std::move (editToClipTimeReader));
2062 }
2063
2064 editReader = std::make_shared<SpeedFadeEditReader> (std::move (basicEditReader), speedFadeDescription, editTempoSequence);
2065
2066 if (! channelState)
2067 {
2068 channelState = std::make_shared<std::vector<float>>();
2069 const int numChannelsToUse = std::max (channelsToUse.size(), (int) (editReader->getNumChannels()));
2070
2071 for (int i = numChannelsToUse; --i >= 0;)
2072 channelState->emplace_back (0.0f);
2073 }
2074
2075 // If we've just created a new reader, this will be the first
2076 // block with it in so we need to fade the block in (unless we're rendering)
2077 isFirstBlock = ! isOfflineRender;
2078
2079 return true;
2080}
2081
2082void WaveNodeRealTime::replaceStateIfPossible (NodeGraph* nodeGraphToReplace)
2083{
2084 if (nodeGraphToReplace == nullptr)
2085 return;
2086
2087 if (stateHash == 0)
2088 return;
2089
2090 assert (getNodeProperties().nodeID == (size_t) editItemID.getRawID());
2091
2092 if (auto oldWaveNode = findNodeWithID<WaveNodeRealTime> (*nodeGraphToReplace, (size_t) editItemID.getRawID()))
2093 replaceStateIfPossible (*oldWaveNode);
2094}
2095
2096void WaveNodeRealTime::replaceStateIfPossible (WaveNodeRealTime& other)
2097{
2098 if (other.editItemID != editItemID)
2099 return;
2100
2101 // This will be used to fade out the last block if the state hash is different
2102 channelState = other.channelState;
2103
2104 if (other.stateHash != stateHash)
2105 return;
2106
2107 fileTempoSequence = other.fileTempoSequence;
2108 fileTempoPosition = other.fileTempoPosition;
2109 resamplerReader = other.resamplerReader;
2110 editReader = other.editReader;
2111 pitchAdjustReader = other.pitchAdjustReader;
2112 dynamicOffsetBeats = other.dynamicOffsetBeats;
2113}
2114
2115void WaveNodeRealTime::processSection (ProcessContext& pc)
2116{
2117 const auto sectionEditBeats = getEditBeatRange();
2118 const auto sectionEditTime = getEditTimeRange();
2119
2120 // Check that the number of channels requested matches the destination buffer num channels
2121 assert (destChannels.size() == (int) pc.buffers.audio.getNumChannels());
2122
2123 if (editReader == nullptr)
2124 return;
2125
2126 if (editReader->isTimeBased()
2127 && (sectionEditTime.getEnd() <= editPositionTime.getStart()
2128 || sectionEditTime.getStart() >= editPositionTime.getEnd()))
2129 return;
2130
2131 if (editReader->isBeatBased()
2132 && (sectionEditBeats.getEnd() <= (editPositionBeats.getStart() + *dynamicOffsetBeats)
2133 || sectionEditBeats.getStart() >= (editPositionBeats.getEnd() + *dynamicOffsetBeats)))
2134 return;
2135
2136 auto destBuffer = pc.buffers.audio;
2137 const auto numFrames = destBuffer.getNumFrames();
2138 const auto numChannels = destBuffer.getNumChannels();
2139
2140 // Calculate gains
2141 float gains[2];
2142
2143 // For stereo, use the pan, otherwise ignore it
2144 if (numChannels == 2)
2145 clipLevel.getLeftAndRightGains (gains[0], gains[1]);
2146 else
2147 gains[0] = gains[1] = clipLevel.getGainIncludingMute();
2148
2149 if (getPlayHead().isUserDragging())
2150 {
2151 gains[0] *= 0.4f;
2152 gains[1] *= 0.4f;
2153 }
2154
2155 if (resamplerReader != nullptr)
2156 resamplerReader->setGains (gains[0], gains[1]);
2157
2158 if (pitchAdjustReader != nullptr)
2159 pitchAdjustReader->setKey (getKeyToSyncTo (sectionEditTime.getStart()));
2160
2161 // Read through the audio stack
2162 const auto isContiguous = getPlayHeadState().isContiguousWithPreviousBlock();
2163 uint32_t lastSampleFadeLength = isFirstBlock ? std::min (numFrames, 10u) : 0;
2164 isFirstBlock = false;
2165
2166 if (editReader->read (sectionEditBeats, sectionEditTime, pc.buffers.audio, isContiguous, getPlaybackSpeedRatio()))
2167 {
2168 if (! isContiguous && ! getPlayHeadState().isFirstBlockOfLoop())
2169 lastSampleFadeLength = std::min (numFrames, 40u);
2170 }
2171 else
2172 {
2173 lastSampleFadeLength = std::min (numFrames, 40u);
2174 isFirstBlock = true; // Fade in the next block to avoid clicks
2175 }
2176
2177 // Crossfade if a fade needs to be applied
2178 jassert (numChannels <= (choc::buffer::ChannelCount) channelState->size()); // this should always have been made big enough
2179
2180 for (choc::buffer::ChannelCount channel = 0; channel < numChannels; ++channel)
2181 {
2182 if (channel < (choc::buffer::ChannelCount) channelState->size())
2183 {
2184 const auto dest = pc.buffers.audio.getIterator (channel).sample;
2185
2186 auto& lastSample = (*channelState)[(size_t) channel];
2187
2188 if (lastSampleFadeLength > 0)
2189 {
2190 for (uint32_t i = 0; i < lastSampleFadeLength; ++i)
2191 {
2192 auto alpha = i / (float) lastSampleFadeLength;
2193 dest[i] = alpha * dest[i] + lastSample * (1.0f - alpha);
2194 }
2195 }
2196
2197 lastSample = dest[numFrames - 1];
2198 }
2199 else
2200 {
2201 destBuffer.getChannel (channel).clear();
2202 }
2203 }
2204}
2205
2206tempo::Key WaveNodeRealTime::getKeyToSyncTo (TimePosition editPosition) const
2207{
2208 if (chordPitchPosition)
2209 {
2210 chordPitchPosition->set (editPosition);
2211 return chordPitchPosition->getKey();
2212 }
2213
2214 return getKey();
2215}
2216
2217}} // namespace tracktion { inline namespace engine
assert
T back(T... args)
void clear() noexcept
const Type * getReadPointer(int channelNumber) const noexcept
const Type *const * getArrayOfReadPointers() const noexcept
int size() const noexcept
static AudioChannelSet JUCE_CALLTYPE canonicalChannelSet(int numChannels)
constexpr ValueType getStart() const noexcept
constexpr ValueType getEnd() const noexcept
constexpr ValueType getLength() const noexcept
int hashCode() const noexcept
Base class for audio based readers that can be chained together.
Reader::Ptr createReader(const AudioFile &)
Creates a Reader to read an AudioFile.
An audio scratch buffer that has pooled storage.
juce::AudioBuffer< float > & buffer
The buffer to use.
AudioFileManager & getAudioFileManager() const
Returns the AudioFileManager instance.
BufferedAudioFileManager & getBufferedAudioFileManager()
Returns the BufferedAudioFileManager instance.
void setSpeedRatio(double newSpeedRatio) override
Sets a ratio to increase or decrease playback speed.
void setGains(float leftGain, float rightGain) override
Sets a l/r gain to apply to channels.
void setGains(float leftGain, float rightGain) override
Sets a l/r gain to apply to channels.
void setSpeedRatio(double newSpeedRatio) override
Sets a ratio to increase or decrease playback speed.
virtual void setGains(float leftGain, float rightGain)=0
Sets a l/r gain to apply to channels.
virtual void setSpeedRatio(double newSpeedRatio)=0
Sets a ratio to increase or decrease playback speed.
Handles time/pitch stretching using various supported libraries.
void initialise(double sourceSampleRate, int samplesPerBlock, int numChannels, Mode, ElastiqueProOptions, bool realtime)
Initialises the TimeStretcher ready to perform timestretching.
int getFramesNeeded() const
Returns the expected number of frames required to generate some output.
void reset()
Resets the TimeStretcher ready for a new set of audio data, maintains mode, speed and pitch ratios.
int getMaxFramesNeeded() const
Returns the maximum number of frames that will ever be returned by getFramesNeeded.
int processData(const float *const *inChannels, int numSamples, float *const *outChannels)
Processes some input frames and fills some output frames with the applied speed ratio and pitch shift...
bool setSpeedAndPitch(float speedRatio, float semitones)
Sets the timestretch speed ratio and semitones pitch shift.
Mode
Holds the various algorithms to which can be used (if enabled).
@ elastiqueDirectPro
Elastique Direct Pro good all round (.
@ elastiquePro
Elastique Pro good all round (.
@ elastiqueDirectEfficient
Elastique Direct lower quality and lower CPU usage.
@ elastiqueDirectMobile
Elastique Direct lower quality and lower CPU usage, optimised for mobile.
@ elastiqueEfficient
Elastique lower quality and lower CPU usage.
@ elastiqueMobile
Elastique lower quality and lower CPU usage, optimised for mobile.
Base class for Nodes that provides information about the current process call.
TimeRange getEditTimeRange() const
Returns the edit time range of the current process block.
double getPlaybackSpeedRatio() const
Returns the playback speed ratio of the current process block.
BeatRange getEditBeatRange() const
Returns the edit beat range of the current process block.
tempo::Key getKey() const
Returns the key of the current process block.
juce::Range< int64_t > getTimelineSampleRange() const
Returns the timeline sample range of the current process block.
double getSampleRate() const
Returns the sample rate of the current process block.
tracktion::graph::PlayHeadState & getPlayHeadState()
Returns the PlayHeadState in use.
tracktion::graph::PlayHead & getPlayHead()
Returns the PlayHead in use.
An Node that plays back a wave file.
bool isReadyToProcess() override
Should return true when this node is ready to be processed.
SyncTempo
Represets whether the file should try and match Edit tempo changes.
graph::NodeProperties getNodeProperties() override
Should return the properties of the node.
void prepareToPlay(const graph::PlaybackInitialisationInfo &) override
Called once before playback begins for each node.
void setDynamicOffsetBeats(BeatDuration) override
Sets an offset to be applied to all times in this node, effectively shifting it forwards or backwards...
SyncPitch
Represets whether the file should try and match Edit pitch changes.
WaveNodeRealTime(const AudioFile &, TimeRange editTime, TimeDuration offset, TimeRange loopSection, LiveClipLevel, double speedRatio, const juce::AudioChannelSet &sourceChannelsToUse, const juce::AudioChannelSet &destChannelsToFill, ProcessState &, EditItemID, bool isOfflineRender, ResamplingQuality=ResamplingQuality::lagrange, SpeedFadeDescription={}, std::optional< tempo::Sequence::Position > editTempoSequence={}, TimeStretcher::Mode=TimeStretcher::Mode::defaultMode, TimeStretcher::ElastiqueProOptions={}, float pitchChangeSemitones=0.0f, ReadAhead=ReadAhead::no)
offset is a time added to the start of the file, e.g.
void process(ProcessContext &) override
Called when the node is to be processed.
ReadAhead
Whether or not to use a background thread to read ahead the time-stretch buffer.
An Node that plays back a wave file.
tracktion::graph::NodeProperties getNodeProperties() override
Should return the properties of the node.
void process(ProcessContext &) override
Called when the node is to be processed.
void prepareToPlay(const tracktion::graph::PlaybackInitialisationInfo &) override
Called once before playback begins for each node.
bool isReadyToProcess() override
Should return true when this node is ready to be processed.
WaveNode(const AudioFile &, TimeRange editTime, TimeDuration offset, TimeRange loopSection, LiveClipLevel, double speedRatio, const juce::AudioChannelSet &sourceChannelsToUse, const juce::AudioChannelSet &destChannelsToFill, ProcessState &, EditItemID, bool isOfflineRender)
offset is a time added to the start of the file, e.g.
Struct to describe a single iteration of a process call.
bool isContiguousWithPreviousBlock() noexcept
Returns true if the play head did not jump and this block is contiguous with the previous block.
bool isUserDragging() const
Returns true if the user is dragging.
T clear(T... args)
T cos(T... args)
T data(T... args)
T emplace_back(T... args)
T empty(T... args)
T fmod(T... args)
T front(T... args)
T get(T... args)
T is_pointer_v
#define jassert(expression)
#define jassertfalse
typedef int
typedef double
T max(T... args)
T min(T... args)
constexpr bool approximatelyEqual(Type a, Type b, Tolerance< Type > tolerance=Tolerance< Type >{} .withAbsolute(std::numeric_limits< Type >::min()) .withRelative(std::numeric_limits< Type >::epsilon()))
constexpr NumericType square(NumericType n) noexcept
bool isPositiveAndBelow(Type1 valueToTest, Type2 upperLimit) noexcept
IntegerType negativeAwareModulo(IntegerType dividend, const IntegerType divisor) noexcept
Interpolators::Lagrange LagrangeInterpolator
choc::buffer::BufferView< SampleType, choc::buffer::SeparateChannelLayout > toBufferView(juce::AudioBuffer< SampleType > &buffer)
Converts a juce::AudioBuffer<SampleType> to a choc::buffer::BufferView.
ResamplingQuality
Specifies a number of resampling qualities that can be used.
@ lagrange
Lagrange interpolation.
@ sincBest
Best quality sinc interpolation provided by libsamplerate.
@ sincFast
Fast sinc interpolation provided by libsamplerate.
@ sincMedium
Medium quality sinc interpolation provided by libsamplerate.
juce::AudioBuffer< float > toAudioBuffer(choc::buffer::ChannelArrayView< float > view)
Creates a juce::AudioBuffer from a choc::buffer::BufferView.
choc::buffer::FrameRange createFrameRange(std::integral auto start, std::integral auto end)
Creates a FrameRange from any integral type.
TimeRange timeRangeFromSamples(juce::Range< int64_t > sampleRange, double sampleRate)
Creates a TimeRange from a range of samples.
T lround(T... args)
T sin(T... args)
T size(T... args)
typedef int64_t
Represents a duration in beats.
constexpr double inBeats() const
Returns the position as a number of beats.
Represents a position in beats.
Represents a duration in real-life time.
constexpr double inSeconds() const
Returns the TimeDuration as a number of seconds.
Represents a position in real-life time.
A Sequence::Position is an iterator through a Sequence.
TimePosition getTime() const
Returns the current time of the Position.
void set(TimePosition)
Sets the Position to a new time.
Type
A enumeration of the curve classes available.
ID for objects of type EditElement - e.g.
Provides a thread-safe way to share a clip's levels with an audio engine without worrying about the C...
float getGainIncludingMute() const noexcept
Returns the clip's gain if the clip is not muted.
void getLeftAndRightGains(float &left, float &right) const noexcept
Reutrns the left and right gains taking in to account mute and pan values.
Holds the state of a process call.
Describes the time and type of the speed fade in/outs.
A set of options that can be used in conjunction with the elastiquePro Mode to fine tune the algorith...
juce::String toString() const
Save the current options as a string.
Holds a graph in an order ready for processing and a sorted map for quick lookups.
Holds some really basic properties of a node.
Passed into Nodes when they are being initialised, to give them useful contextual information that th...
typedef size_t
T tie(T... args)
time