Skip to content
Extraits de code Groupes Projets
Vérifiée Valider dee52c94 rédigé par Kubat's avatar Kubat
Parcourir les fichiers

MISC: Remove old big auto-magic functions for decoding audio

parent 6b874c0d
Aucune branche associée trouvée
Aucune étiquette associée trouvée
1 requête de fusion!5Add AudioContext to AudioSubDocument
......@@ -77,9 +77,6 @@ target_precompile_headers(Vivy PRIVATE
"${CMAKE_CURRENT_SOURCE_DIR}/src/Ass/AssFactory.hh"
"${CMAKE_CURRENT_SOURCE_DIR}/src/Ass/Char.hh"
# Utils & Misc, will disapear
"${CMAKE_CURRENT_SOURCE_DIR}/src/AudioUtils.hh"
# Libs
"${CMAKE_CURRENT_SOURCE_DIR}/src/Lib/Utils.hh"
"${CMAKE_CURRENT_SOURCE_DIR}/src/Lib/Audio.hh"
......
#include "AudioUtils.hh"
#define MAXPIXVALUE 7 /* Some magix AV magic stuff */
#define CAP_VALUE(_value, _lower, _upper) \
{ \
if (_value > _upper) { \
_value = _upper; \
} else if (_value < _lower) { \
_value = _lower; \
} \
}
using namespace Vivy::Utils;
uint
DecoderOption::getSampleRate() const noexcept
{
return sampleRate;
}
uint
DecoderOption::getChunkSize() const noexcept
{
return chunkSize;
}
uint
DecoderOption::getOverlap() const noexcept
{
return overlap;
}
uint
DecoderOption::getDecalage() const noexcept
{
return decalage;
}
RawImageData::RawImageData(const DecoderOption &opt, const QString &path)
: chunkData(nullptr, fftSampleDeleter)
, ctx(nullptr, rdftContextDeleter)
{
auto avFrameDeleter = [](AVFrame *ptr) noexcept -> void {
if (ptr)
av_frame_free(&ptr);
};
auto swrContenxtDeleter = [](SwrContext *swr) noexcept -> void {
if (swr)
swr_free(&swr);
};
auto codecCtxDeleter = [](AVCodecContext *ptr) noexcept -> void {
if (ptr)
avcodec_free_context(&ptr);
};
AVPacket packet;
AVFormatContext *format = avformat_alloc_context();
std::unique_ptr<SwrContext, decltype(swrContenxtDeleter)> swr(swr_alloc(), swrContenxtDeleter);
std::unique_ptr<AVFrame, decltype(avFrameDeleter)> frame(av_frame_alloc(), avFrameDeleter);
ssize_t stream_index = -1;
AVStream *stream = nullptr;
AVCodecContext *codec_ctx = nullptr;
AVCodecParameters *codec_par = nullptr;
AVCodec *codec = nullptr;
double *data = nullptr;
size_t size = 0;
if (!frame) {
qCritical() << "Error allocating the frame";
throw std::runtime_error("out of memory: failed to allocate the frame");
}
/* Decode audio */
/* Get format from audio file */
{
if (avformat_open_input(&format, path.toStdString().c_str(), NULL, NULL) != 0) {
qCritical() << "Could not open the file" << path;
throw std::runtime_error("failed to open file");
}
qDebug() << "{ Format name:" << format->iformat->name << ", duration:" << format->duration
<< ", bit rate:" << format->bit_rate
<< ", extensions:" << format->iformat->extensions << "}";
if (avformat_find_stream_info(format, NULL) < 0) {
qCritical() << "Could not retrieve stream info from file" << path;
throw std::runtime_error("failed to get audio stream info");
}
}
/* Find the index of the first audio stream */
{
qDebug() << "Got" << format->nb_streams << "streams for file" << path;
for (unsigned int i = 0; i < format->nb_streams; i++) {
/* Loging */
codec_par = format->streams[i]->codecpar;
qDebug() << "AVStream->time_base before open coded" << format->streams[i]->time_base.num
<< "/" << format->streams[i]->time_base.den;
qDebug() << "AVStream->r_frame_rate before open coded"
<< format->streams[i]->r_frame_rate.num << "/"
<< format->streams[i]->r_frame_rate.den;
qDebug() << "AVStream->start_time" << format->streams[i]->start_time;
qDebug() << "AVStream->duration" << format->streams[i]->duration;
codec = avcodec_find_encoder(codec_par->codec_id);
if (codec->type == AVMEDIA_TYPE_AUDIO) {
stream_index = i;
break;
}
}
if (stream_index == -1) {
qCritical() << "Could not retrieve audio stream from file" << path;
throw std::runtime_error("failed to get autio stream from file");
}
stream = format->streams[stream_index];
}
/* Find & open codec */
codec_par = stream->codecpar;
codec = avcodec_find_decoder(codec_par->codec_id);
if (codec == NULL) {
qCritical() << "Failed to find decoded for stream" << stream_index << "in file" << path;
throw std::runtime_error("failed to find decoder for an audio stream");
}
std::unique_ptr<AVCodecContext, decltype(codecCtxDeleter)> codecCtxHolder(
avcodec_alloc_context3(codec), codecCtxDeleter);
codec_ctx = codecCtxHolder.get();
if ((codec_ctx == nullptr) || (avcodec_parameters_to_context(codec_ctx, codec_par) < 0)) {
qCritical() << "Failed to copy codec params to codec context";
throw std::runtime_error("failed to copy codec params to codec context");
}
if (avcodec_open2(codec_ctx, codec, NULL) < 0) {
qCritical() << "Failed to open decoder for stream" << stream_index << "in file" << path;
throw std::runtime_error("failed to open audio decoder for a stream");
}
qDebug() << "channels:" << codec_par->channels;
qDebug() << "sample_rate:" << codec_par->sample_rate;
qDebug() << "select codec" << codec->name << "(id:" << codec->id << ") and bit_rate"
<< codec_par->bit_rate;
/* Prepare resampler */
{
SwrContext *s = swr.get();
av_opt_set_int(s, "in_channel_count", codec_ctx->channels, 0);
av_opt_set_int(s, "out_channel_count", 1, 0);
av_opt_set_int(s, "in_channel_layout", static_cast<int64_t>(codec_ctx->channel_layout), 0);
av_opt_set_int(s, "out_channel_layout", AV_CH_LAYOUT_MONO, 0);
av_opt_set_int(s, "in_sample_rate", codec_ctx->sample_rate, 0);
av_opt_set_int(s, "out_sample_rate", opt.getSampleRate(), 0);
av_opt_set_sample_fmt(s, "in_sample_fmt", codec_ctx->sample_fmt, 0);
av_opt_set_sample_fmt(s, "out_sample_fmt", AV_SAMPLE_FMT_DBL, 0);
swr_init(s);
if (!swr_is_initialized(s)) {
qCritical() << "Resampler has not been properly initialized";
throw std::runtime_error("failed to initialize resampler");
}
}
/* Prepare to read data */
av_init_packet(&packet);
/* Iterate through frames */
while (av_read_frame(format, &packet) >= 0) {
if (packet.stream_index == stream_index) {
/* Decode one frame */
int response = avcodec_send_packet(codec_ctx, &packet);
if (response < 0) [[unlikely]] {
qCritical() << "Error while sending a packet to the decoder";
throw std::runtime_error("error while sending a packet to the decoder");
}
double *buffer = nullptr;
int old_frame_nb_samples = 0;
while (response >= 0) {
response = avcodec_receive_frame(codec_ctx, frame.get());
if (response == AVERROR(EAGAIN) || response == AVERROR_EOF) [[unlikely]] {
break;
} else if (response < 0) [[unlikely]] {
qCritical() << "Error while receiving a frame from the decoder";
throw std::runtime_error("error while receiving a frame from the decoder");
}
/* Resample frames */
if (old_frame_nb_samples < frame->nb_samples) {
if (nullptr != buffer)
av_free(buffer);
old_frame_nb_samples = frame->nb_samples;
av_samples_alloc(reinterpret_cast<uint8_t **>(&buffer), nullptr, 1,
frame->nb_samples, AV_SAMPLE_FMT_DBL, 0);
}
if (const int frame_count_int = swr_convert(
swr.get(), reinterpret_cast<uint8_t **>(&buffer), frame->nb_samples,
(const uint8_t **)frame->data, frame->nb_samples);
frame_count_int < 0) [[unlikely]] {
throw std::runtime_error("error on frame count, is negative but should not be");
}
else [[likely]] {
const size_t frame_count = static_cast<size_t>(frame_count_int);
/* Append resampled frames to data */
data = (double *)realloc(data,
(size + (size_t)frame->nb_samples) * sizeof(double));
memcpy(data + size, buffer, frame_count * sizeof(double));
size += frame_count;
}
}
if (buffer)
av_free(buffer);
}
av_packet_unref(&packet);
}
/* Clean ressources */
{
if (codec_ctx != NULL)
avcodec_close(codec_ctx);
if (format != NULL)
avformat_free_context(format);
}
auto dataHolder = std::shared_ptr<double>(data, free);
/* Constructor */
{
width = (static_cast<int>((size - opt.getChunkSize()) / opt.getDecalage()));
height = (static_cast<int>(opt.getChunkSize()));
pixels = std::unique_ptr<uchar[]>(new uchar[static_cast<size_t>(width * height / 2)]());
chunkData.reset(reinterpret_cast<FFTSample *>(
av_malloc_array(2 * opt.getChunkSize(), sizeof(FFTSample))));
ctx.reset(
av_rdft_init((static_cast<int>(log2(static_cast<int>(opt.getChunkSize())))), DFT_R2C));
}
if (!(pixels && chunkData && ctx))
throw std::runtime_error("can't create fft related structures or allocated memory");
/* Compute the image data */
for (size_t x = 0, i = 0; i < size - opt.getChunkSize(); i += opt.getDecalage(), ++x) {
#pragma omp parallel for
for (size_t j = 0; j < opt.getChunkSize(); j++) {
const double curr_dat = data[i + j];
const double window_modifier =
0.5 * (1 - cos(2 * M_PI * ((int)j) / (opt.getChunkSize() - 1)));
float value = (float)(window_modifier * curr_dat);
CAP_VALUE(value, -1.0f, 1.0f);
chunkData[j] = value;
}
av_rdft_calc(ctx.get(), chunkData.get());
#pragma omp parallel for
for (size_t j = 0; j < opt.getChunkSize() / 2; j++) {
const float im = chunkData[j * 2];
const float re = chunkData[j * 2 + 1];
const float mag = sqrtf(im * im + re * re);
const size_t index = static_cast<size_t>(j * static_cast<ulong>(width) + x);
pixels[index] = (unsigned char)(mag)*MAXPIXVALUE;
}
}
}
int
RawImageData::getWidth() const noexcept
{
return width;
}
int
RawImageData::getHeight() const noexcept
{
return height / 2;
}
uchar *
RawImageData::releasePixels() noexcept
{
return pixels.release();
}
#ifndef VIVY_AUDIOUTILS_H
#define VIVY_AUDIOUTILS_H
#ifndef __cplusplus
#error "This is a C++ header"
#endif
extern "C" {
#include <libavutil/opt.h>
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libswresample/swresample.h>
#include <libavcodec/avfft.h>
}
#include <QtGlobal>
#include <memory.h>
namespace Vivy::Utils
{
/* A simple decoder option, see if we need to overload it for different audio
* file types. */
class DecoderOption {
private:
static constexpr uint chunkSize = 512;
static constexpr uint overlap = 128;
static constexpr uint decalage = chunkSize - overlap;
static constexpr uint sampleRate = 44100;
public:
uint getSampleRate() const noexcept;
uint getChunkSize() const noexcept;
uint getOverlap() const noexcept;
uint getDecalage() const noexcept;
};
class RawImageData final {
private:
int width, height;
std::unique_ptr<uchar[]> pixels;
static constexpr inline auto fftSampleDeleter = [](FFTSample *ptr) noexcept -> void {
if (ptr)
av_free(ptr);
};
static constexpr inline auto rdftContextDeleter = [](RDFTContext *ptr) noexcept -> void {
if (ptr)
av_rdft_end(ptr);
};
using FFTSamplePtr = std::unique_ptr<FFTSample[], decltype(fftSampleDeleter)>;
using RDFTContextPtr = std::unique_ptr<RDFTContext, decltype(rdftContextDeleter)>;
FFTSamplePtr chunkData;
RDFTContextPtr ctx;
public:
explicit RawImageData(const DecoderOption &opt, const QString &path);
int getWidth() const noexcept;
int getHeight() const noexcept;
[[nodiscard("allocated")]] uchar *releasePixels() noexcept;
static constexpr inline auto pixelsDeleter = [](void *ptr) noexcept -> void {
if (ptr)
delete[](reinterpret_cast<uchar *>(ptr));
};
};
}
#endif // VIVY_AUDIOUTILS_H
#include "AudioVisualizer.hh"
#include "../AudioUtils.hh"
#include "../Lib/Audio.hh"
#include <QGraphicsPixmapItem>
#include <QLabel>
......
0% Chargement en cours ou .
You are about to add 0 people to the discussion. Proceed with caution.
Veuillez vous inscrire ou vous pour commenter