diff --git a/src/UI/AudioVisualizer.cc b/src/UI/AudioVisualizer.cc index d5e6aa70ed0871a158827401ffa2197d4fafb196..d32a2d5fbe2259312ac03a8df572c49e668158bf 100644 --- a/src/UI/AudioVisualizer.cc +++ b/src/UI/AudioVisualizer.cc @@ -10,9 +10,77 @@ using namespace Vivy; -AudioVisualizer::AudioVisualizer(QWidget *parent) noexcept +#define MAXPIXVALUE 7 // Some magix AV magic stuff + +#define CAP_VALUE(_value, _lower, _upper) \ + { \ + if (_value > _upper) { \ + _value = _upper; \ + } else if (_value < _lower) { \ + _value = _lower; \ + } \ + } + +AudioVisualizer::AudioVisualizer(AudioContext::StreamPtr stream, QWidget *parent) : QWidget(parent) { + if (!stream->isDecoded()) { + qDebug() << "Need to decode data for stream" << stream->getStreamIndex(); + stream->decodeData(); + } + + double *data = stream->getDecodedData(); + if (data == nullptr) + throw std::logic_error("the passed stream is not decoded"); + + const size_t size = stream->getDecodedDataSize(); + const size_t height = stream->getDecodedChunkSize(); + const size_t decalage = stream->getDecodedDecalage(); + const size_t width = (size - height) / decalage; + uchar *pixels = new uchar[static_cast<size_t>(width * height / 2)](); + + FFTSamplePtr chunkData( + reinterpret_cast<FFTSample *>(av_malloc_array(2 * height, sizeof(FFTSample))), + fftSampleDeleter); + RDFTContextPtr ctx(av_rdft_init((static_cast<int>(log2(static_cast<int>(height)))), DFT_R2C), + rdftContextDeleter); + + if (!pixels) { + throw std::runtime_error("out of memory"); + } else if (!(chunkData && ctx)) { + delete[] pixels; + throw std::runtime_error("out of memory"); + } + + /* Compute the image data */ + + for (size_t x = 0, i = 0; i < size - height; i += decalage, ++x) { +#pragma omp parallel for + for (size_t j = 0; j < height; j++) { + const double curr_dat = data[i + j]; + const double window_modifier = + (1 - cos(2 * M_PI * static_cast<double>(j) / static_cast<double>(height - 1))) / 2; + float value = static_cast<float>(window_modifier * curr_dat); + CAP_VALUE(value, -1.0f, 1.0f); + chunkData[j] = value; + } + + av_rdft_calc(ctx.get(), chunkData.get()); + +#pragma omp parallel for + for (size_t j = 0; j < height / 2; j++) { + const float im = chunkData[j * 2]; + const float re = chunkData[j * 2 + 1]; + const float mag = sqrtf(im * im + re * re); + const size_t index = static_cast<size_t>(j * static_cast<ulong>(width) + x); + pixels[index] = (unsigned char)(mag)*MAXPIXVALUE; + } + } + + QImage img = QImage(pixels, static_cast<int>(width), static_cast<int>(height / 2), + static_cast<int>(width), QImage::Format_Grayscale8, pixelsDeleter, pixels) + .mirrored(false, true); + printSpectrum(img); } void @@ -24,20 +92,3 @@ AudioVisualizer::printSpectrum(QImage pixmap) noexcept layout->addWidget(timer); setLayout(layout); } - -AudioVisualizer * -AudioVisualizer::fromFile(const QString &filename) -{ - if (filename.isEmpty()) - return nullptr; - - Utils::RawImageData rawImage(Utils::DecoderOption{}, filename); - QImage img = QImage(rawImage.releasePixels(), rawImage.getWidth(), rawImage.getHeight(), - rawImage.getWidth(), QImage::Format_Grayscale8, - Utils::RawImageData::pixelsDeleter, rawImage.releasePixels()) - .mirrored(false, true); - - auto *audioVisualizer = new AudioVisualizer; - audioVisualizer->printSpectrum(img); - return audioVisualizer; -} diff --git a/src/UI/AudioVisualizer.hh b/src/UI/AudioVisualizer.hh index 8a267183e353dca5756b1a215336db8c239fa6b7..3273d285bba08de8865e08be657cf62ba414a483 100644 --- a/src/UI/AudioVisualizer.hh +++ b/src/UI/AudioVisualizer.hh @@ -6,6 +6,7 @@ #endif #include "TimingView.hh" +#include "../Lib/Audio.hh" #include <QWidget> #include <QString> @@ -14,12 +15,26 @@ namespace Vivy class AudioVisualizer final : public QWidget { Q_OBJECT +private: + static constexpr inline auto fftSampleDeleter = [](FFTSample *ptr) noexcept -> void { + if (ptr) + av_free(ptr); + }; + static constexpr inline auto rdftContextDeleter = [](RDFTContext *ptr) noexcept -> void { + if (ptr) + av_rdft_end(ptr); + }; + static constexpr inline auto pixelsDeleter = [](void *ptr) noexcept -> void { + if (ptr) + delete[](reinterpret_cast<uchar *>(ptr)); + }; + using FFTSamplePtr = std::unique_ptr<FFTSample[], decltype(fftSampleDeleter)>; + using RDFTContextPtr = std::unique_ptr<RDFTContext, decltype(rdftContextDeleter)>; + public: - explicit AudioVisualizer(QWidget *parent = nullptr) noexcept; + explicit AudioVisualizer(AudioContext::StreamPtr, QWidget *parent = nullptr); ~AudioVisualizer() noexcept = default; - [[nodiscard("allocated")]] static AudioVisualizer *fromFile(const QString &); - public slots: void printSpectrum(QImage) noexcept; }; diff --git a/src/UI/VivyDocumentView.cc b/src/UI/VivyDocumentView.cc index d9a0cc4c8e8ee992d688d4fc8dec50b198d0c294..bd79dc1a7d402303406f2711d998ed41e8387e32 100644 --- a/src/UI/VivyDocumentView.cc +++ b/src/UI/VivyDocumentView.cc @@ -53,19 +53,20 @@ VivyDocumentView::loadAudioView() noexcept std::shared_ptr<AudioSubDocument> audioDocument = document->getAudioSubDocument(); qDebug() << "Create an audio vizualizer for the audio sub document" << audioDocument->getFilePath(); - visualizer = AudioVisualizer::fromFile(audioDocument->getFilePath()); - if (visualizer == nullptr) { + + if (AudioContext::StreamPtr stream = audioDocument->getDefaultStream(); + (stream == nullptr) || (visualizer = new AudioVisualizer(stream)) == nullptr) { qCritical() << "Failed to create visualizer for" << audioDocument->getFilePath(); return; } QVBoxLayout *layout = new QVBoxLayout(this); - if (visualizer != nullptr) { - qDebug() << "Add visualizer to the view"; - layout->addWidget(visualizer); - } + layout->addWidget(visualizer); + setLayout(layout); - } else { + } + + else { qDebug() << "The document" << document->getName() << "is not AudioAble"; } }