#include "AudioVisualizer.hh"
#include "../../Lib/Audio.hh"

using namespace Vivy;

#define MAXPIXVALUE 7 // Some magix AV magic stuff

AudioVisualizer::AudioVisualizer(AudioContext::StreamPtr stream, QWidget *parent)
    : QWidget(parent)
    , audioStream(stream)
{
    if (!audioStream->isDecoded()) {
        qDebug() << "Need to decode data for stream" << audioStream->getStreamIndex();
        audioStream->decodeData();
    }

    double *decodedData = audioStream->getDecodedData();
    if (decodedData == nullptr)
        throw std::logic_error("the passed stream is not decoded");

    const size_t size     = audioStream->getDecodedDataSize();
    const size_t height   = audioStream->getDecodedChunkSize();
    const size_t decalage = audioStream->getDecodedDecalage();
    const size_t width    = (size - height) / decalage;
    uchar *pixels         = new uchar[static_cast<size_t>(width * height / 2)]();

    FFTSamplePtr chunkData(
        reinterpret_cast<FFTSample *>(av_malloc_array(2 * height, sizeof(FFTSample))),
        fftSampleDeleter);
    RDFTContextPtr ctx(av_rdft_init((static_cast<int>(log2(static_cast<int>(height)))), DFT_R2C),
                       rdftContextDeleter);

    if (!(chunkData && ctx)) {
        delete[] pixels;
        throw std::runtime_error("out of memory");
    }

    /* Compute the image data */

    for (size_t x = 0, i = 0; i < size - height; i += decalage, ++x) {
        parallel_for (size_t j = 0; j < height; j++) {
            const double curr_dat = decodedData[i + j];
            const double window_modifier =
                (1 - cos(2 * M_PI * static_cast<double>(j) / static_cast<double>(height - 1))) / 2;
            const float value =
                std::clamp(static_cast<float>(window_modifier * curr_dat), -1.0f, 1.0f);
            chunkData[j] = value;
        }

        av_rdft_calc(ctx.get(), chunkData.get());

        parallel_for (size_t j = 0; j < height / 2; j++) {
            const float im     = chunkData[j * 2];
            const float re     = chunkData[j * 2 + 1];
            const float mag    = sqrtf(im * im + re * re);
            const size_t index = static_cast<size_t>(j * static_cast<ulong>(width) + x);
            pixels[index]      = static_cast<unsigned char>((mag)*MAXPIXVALUE);
        }
    }

    QImage img = QImage(pixels, static_cast<int>(width), static_cast<int>(height / 2),
                        static_cast<int>(width), QImage::Format_Grayscale8, pixelsDeleter, pixels)
                     .mirrored(false, true);
    printSpectrum(img, audioStream);
}

void
AudioVisualizer::printSpectrum(QImage pixmap, AudioContext::StreamPtr stream) noexcept
{
    TimingScene *timingScene = new TimingScene(pixmap, stream, this);
    TimingView *timingView   = new TimingView(timingScene, pixmap, stream, this);
    TimingParams *params     = new TimingParams(this);

    // The only that we want to take all the space is the timing scene in itself
    QGridLayout *layout = new QGridLayout;
    layout->addWidget(timingView, 1, 0);
    layout->setColumnStretch(0, 10);
    layout->addWidget(params, 1, 1);
    layout->setColumnStretch(1, 0);
    setLayout(layout);

    connect(params->getZoomSlider(), &QSlider::valueChanged, timingScene->getAxis(),
            &TimingAxis::refreshTicks);
    connect(params->getRebuildSceneButton(), &QPushButton::released, timingScene,
            &TimingScene::rebuildScene);
}