Feature: remove audio_player.cpp

pull/221/head
xufuji456 2 years ago
parent 638ccffda0
commit 31d19d6d0f
  1. 1
      app/src/main/cpp/CMakeLists.txt
  2. 280
      app/src/main/cpp/audio_player.cpp
  3. 17
      app/src/main/cpp/audio_player_jni.cpp
  4. 163
      app/src/main/cpp/ff_audio_player.cpp
  5. 33
      app/src/main/cpp/ff_audio_player.h
  6. 4
      app/src/main/cpp/visualizer/frank_visualizer.cpp
  7. 32
      app/src/main/java/com/frank/ffmpeg/AudioPlayer.java

@ -25,7 +25,6 @@ add_library( # Sets the name of the library.
ffmpeg_cmd.c
ffmpeg/ffprobe.c
ffmpeg/ffmpeg_hw.c
audio_player.cpp
video_player.cpp
ffmpeg_pusher.cpp
video_filter.c

@ -1,280 +0,0 @@
//
// Created by frank on 2018/2/1.
//
#include <jni.h>
#include <cstdlib>
#include <unistd.h>
#include "visualizer/frank_visualizer.h"
#ifdef __cplusplus
extern "C" {
#endif
#include "libavformat/avformat.h"
#include "libavcodec/avcodec.h"
#include "libswscale/swscale.h"
#include "libswresample/swresample.h"
#include "libavfilter/buffersink.h"
#include "libavfilter/buffersrc.h"
#include "libavfilter/avfiltergraph.h"
#include "libavutil/opt.h"
#include "ffmpeg_jni_define.h"
#ifdef __cplusplus
}
#endif
#define TAG "AudioPlayer"
#define SLEEP_TIME (1000 * 16)
#define MAX_AUDIO_FRAME_SIZE (48000 * 4)
int filter_again = 0;
int filter_release = 0;
const char *filter_desc = "superequalizer=6b=4:8b=5:10b=5";
FrankVisualizer *mVisualizer;
void fft_callback(JNIEnv *jniEnv, jobject thiz, jmethodID fft_method, int8_t* arg, int samples);
int init_equalizer_filter(const char *filter_description, AVCodecContext *codecCtx, AVFilterGraph **graph,
AVFilterContext **src, AVFilterContext **sink) {
int ret = 0;
char args[512];
AVFilterContext *buffersrc_ctx;
AVFilterContext *buffersink_ctx;
AVRational time_base = codecCtx->time_base;
AVFilterInOut *inputs = avfilter_inout_alloc();
AVFilterInOut *outputs = avfilter_inout_alloc();
const AVFilter *buffersrc = avfilter_get_by_name("abuffer");
const AVFilter *buffersink = avfilter_get_by_name("abuffersink");
AVFilterGraph *filter_graph = avfilter_graph_alloc();
if (!outputs || !inputs || !filter_graph) {
ret = AVERROR(ENOMEM);
goto end;
}
/* buffer audio source: the decoded frames from the decoder will be inserted here. */
if (!codecCtx->channel_layout)
codecCtx->channel_layout = static_cast<uint64_t>(av_get_default_channel_layout(
codecCtx->channels));
snprintf(args, sizeof(args),
"time_base=%d/%d:sample_rate=%d:sample_fmt=%s:channel_layout=0x%" PRIx64 "",
time_base.num, time_base.den, codecCtx->sample_rate,
av_get_sample_fmt_name(codecCtx->sample_fmt), codecCtx->channel_layout);
ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in",
args, nullptr, filter_graph);
if (ret < 0) {
LOGE(TAG, "Cannot create buffer source:%d", ret);
goto end;
}
/* buffer audio sink: to terminate the filter chain. */
ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out",
nullptr, nullptr, filter_graph);
if (ret < 0) {
LOGE(TAG, "Cannot create buffer sink:%d", ret);
goto end;
}
outputs->name = av_strdup("in");
outputs->filter_ctx = buffersrc_ctx;
outputs->pad_idx = 0;
outputs->next = nullptr;
inputs->name = av_strdup("out");
inputs->filter_ctx = buffersink_ctx;
inputs->pad_idx = 0;
inputs->next = nullptr;
if ((ret = avfilter_graph_parse_ptr(filter_graph, filter_description,
&inputs, &outputs, nullptr)) < 0) {
LOGE(TAG, "avfilter_graph_parse_ptr error:%d", ret);
goto end;
}
if ((ret = avfilter_graph_config(filter_graph, nullptr)) < 0) {
LOGE(TAG, "avfilter_graph_config error:%d", ret);
goto end;
}
*graph = filter_graph;
*src = buffersrc_ctx;
*sink = buffersink_ctx;
end:
avfilter_inout_free(&inputs);
avfilter_inout_free(&outputs);
return ret;
}
AUDIO_PLAYER_FUNC(void, play111, jstring input_jstr, jstring filter_jstr) {
int got_frame = 0, ret;
AVPacket packet;
AVFilterGraph *audioFilterGraph;
AVFilterContext *audioSrcContext;
AVFilterContext *audioSinkContext;
const char *input_cstr = env->GetStringUTFChars(input_jstr, nullptr);
LOGI(TAG, "input url=%s", input_cstr);
filter_desc = env->GetStringUTFChars(filter_jstr, nullptr);
LOGE(TAG, "filter_desc=%s", filter_desc);
AVFormatContext *pFormatCtx = avformat_alloc_context();
if (avformat_open_input(&pFormatCtx, input_cstr, nullptr, nullptr) != 0) {
LOGE(TAG, "Couldn't open the audio file!");
return;
}
if (avformat_find_stream_info(pFormatCtx, nullptr) < 0) {
LOGE(TAG, "Couldn't find stream info!");
return;
}
int i = 0, audio_stream_idx = -1;
for (; i < pFormatCtx->nb_streams; i++) {
if (pFormatCtx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {
audio_stream_idx = i;
break;
}
}
AVCodecContext *codecCtx = pFormatCtx->streams[audio_stream_idx]->codec;
AVCodec *codec = avcodec_find_decoder(codecCtx->codec_id);
if (codec == nullptr) {
LOGE(TAG, "Couldn't find audio decoder!");
return;
}
if (avcodec_open2(codecCtx, codec, nullptr) < 0) {
LOGE(TAG, "Couldn't open audio decoder");
return;
}
AVFrame *frame = av_frame_alloc();
SwrContext *swrCtx = swr_alloc();
enum AVSampleFormat in_sample_fmt = codecCtx->sample_fmt;
enum AVSampleFormat out_sample_fmt = AV_SAMPLE_FMT_S16;
int in_sample_rate = codecCtx->sample_rate;
int out_sample_rate = in_sample_rate;
uint64_t in_ch_layout = codecCtx->channel_layout;
uint64_t out_ch_layout = AV_CH_LAYOUT_STEREO;
swr_alloc_set_opts(swrCtx, (int64_t)out_ch_layout, out_sample_fmt, out_sample_rate,
(int64_t)in_ch_layout, in_sample_fmt, in_sample_rate, 0, nullptr);
swr_init(swrCtx);
int out_channel = av_get_channel_layout_nb_channels(out_ch_layout);
jclass player_class = env->GetObjectClass(thiz);
if (!player_class) {
LOGE(TAG, "player_class not found...");
}
//get AudioTrack by reflection
jmethodID audio_track_method = env->GetMethodID(player_class, "createAudioTrack",
"(II)Landroid/media/AudioTrack;");
if (!audio_track_method) {
LOGE(TAG, "audio_track_method not found...");
}
jobject audio_track = env->CallObjectMethod(thiz, audio_track_method, out_sample_rate, out_channel);
//call play method
jclass audio_track_class = env->GetObjectClass(audio_track);
jmethodID audio_track_play_mid = env->GetMethodID(audio_track_class, "play", "()V");
env->CallVoidMethod(audio_track, audio_track_play_mid);
//get write method
jmethodID audio_track_write_mid = env->GetMethodID(audio_track_class, "write", "([BII)I");
auto *out_buffer = (uint8_t *) av_malloc(MAX_AUDIO_FRAME_SIZE);
/* Set up the filter graph. */
AVFrame *filter_frame = av_frame_alloc();
ret = init_equalizer_filter(filter_desc, codecCtx, &audioFilterGraph, &audioSrcContext, &audioSinkContext);
if (ret < 0) {
LOGE(TAG, "Unable to init filter graph:%d", ret);
}
jmethodID fft_method = env->GetMethodID(player_class, "fftCallbackFromJNI", "([B)V");
mVisualizer = new FrankVisualizer();
mVisualizer->init_visualizer();
//read audio frame
while (av_read_frame(pFormatCtx, &packet) >= 0 && !filter_release) {
if (packet.stream_index != audio_stream_idx) {
av_packet_unref(&packet);
continue;
}
if (filter_again) {
filter_again = 0;
avfilter_graph_free(&audioFilterGraph);
if (init_equalizer_filter(filter_desc, codecCtx, &audioFilterGraph, &audioSrcContext, &audioSinkContext) < 0) {
LOGE(TAG, "init_filter error, ret=%d\n", ret);
goto end;
}
LOGE(TAG, "play again,filter_descr=_=%s", filter_desc);
}
ret = avcodec_decode_audio4(codecCtx, frame, &got_frame, &packet);
if (ret < 0) {
break;
}
if (got_frame > 0) {
int nb_samples = frame->nb_samples < MAX_FFT_SIZE ? frame->nb_samples : MAX_FFT_SIZE;
if (nb_samples >= MIN_FFT_SIZE) {
int8_t *output_data = mVisualizer->fft_run(frame->data[0], nb_samples);
fft_callback(env, thiz, fft_method, output_data, mVisualizer->getOutputSample());
}
ret = av_buffersrc_add_frame(audioSrcContext, frame);
if (ret < 0) {
LOGE(TAG, "Error add the frame to the filter graph:%d", ret);
}
/* Get all the filtered output that is available. */
while (true) {
ret = av_buffersink_get_frame(audioSinkContext, filter_frame);
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
break;
if (ret < 0) {
LOGE(TAG, "Error get the frame from the filter graph:%d", ret);
goto end;
}
//convert audio format
swr_convert(swrCtx, &out_buffer, MAX_AUDIO_FRAME_SIZE,
(const uint8_t **) /*frame*/filter_frame->data, /*frame*/filter_frame->nb_samples);
int out_buffer_size = av_samples_get_buffer_size(nullptr, out_channel,
/*frame*/filter_frame->nb_samples, out_sample_fmt, 1);
jbyteArray audio_sample_array = env->NewByteArray(out_buffer_size);
jbyte *sample_byte_array = env->GetByteArrayElements(audio_sample_array, nullptr);
memcpy(sample_byte_array, out_buffer, (size_t) out_buffer_size);
env->ReleaseByteArrayElements(audio_sample_array, sample_byte_array, 0);
//call write method to play
env->CallIntMethod(audio_track, audio_track_write_mid,
audio_sample_array, 0, out_buffer_size);
env->DeleteLocalRef(audio_sample_array);
av_frame_unref(filter_frame);
usleep(SLEEP_TIME);
}
}
av_packet_unref(&packet);
}
end:
av_free(out_buffer);
swr_free(&swrCtx);
avfilter_graph_free(&audioFilterGraph);
// avcodec_free_context(&codecCtx);
avformat_close_input(&pFormatCtx);
av_frame_free(&frame);
av_frame_free(&filter_frame);
env->ReleaseStringUTFChars(input_jstr, input_cstr);
env->ReleaseStringUTFChars(filter_jstr, filter_desc);
jmethodID releaseMethod = env->GetMethodID(player_class, "releaseAudioTrack", "()V");
env->CallVoidMethod(thiz, releaseMethod);
filter_again = 0;
filter_release = 0;
mVisualizer->release_visualizer();
LOGE(TAG, "audio release...");
}
AUDIO_PLAYER_FUNC(void, again111, jstring filter_jstr) {
if (!filter_jstr) return;
filter_again = 1;
filter_desc = env->GetStringUTFChars(filter_jstr, nullptr);
}
AUDIO_PLAYER_FUNC(void, release111) {
filter_release = 1;
}
void fft_callback(JNIEnv *jniEnv, jobject thiz, jmethodID fft_method, int8_t * arg, int samples) {
jbyteArray dataArray = jniEnv->NewByteArray(samples);
jniEnv->SetByteArrayRegion(dataArray, 0, samples, arg);
jniEnv->CallVoidMethod(thiz, fft_method, dataArray);
jniEnv->DeleteLocalRef(dataArray);
}

@ -8,8 +8,6 @@
#define SLEEP_TIME (16000)
FFAudioPlayer *audioPlayer;
void fftCallback(JNIEnv *env, jobject thiz, jmethodID fft_method, int8_t *data, int size) {
jbyteArray dataArray = env->NewByteArray(size);
env->SetByteArrayRegion(dataArray, 0, size, data);
@ -17,13 +15,18 @@ void fftCallback(JNIEnv *env, jobject thiz, jmethodID fft_method, int8_t *data,
env->DeleteLocalRef(dataArray);
}
AUDIO_PLAYER_FUNC(void, play, jstring path) {
AUDIO_PLAYER_FUNC(long, native_1init) {
auto *audioPlayer = new FFAudioPlayer();
return (long)audioPlayer;
}
AUDIO_PLAYER_FUNC(void, native_1play, long context, jstring path, jstring filter) {
if (path == nullptr)
return;
int result = 0;
const char* native_path = env->GetStringUTFChars(path, JNI_FALSE);
audioPlayer = new FFAudioPlayer();
auto *audioPlayer = (FFAudioPlayer*) context;
// open stream, and init work
audioPlayer->open(native_path);
// init AudioTrack
@ -73,14 +76,16 @@ AUDIO_PLAYER_FUNC(void, play, jstring path) {
delete audioPlayer;
}
AUDIO_PLAYER_FUNC(void, again, jstring filter_jstr) {
AUDIO_PLAYER_FUNC(void, native_1again, long context, jstring filter_jstr) {
if (!filter_jstr) return;
auto *audioPlayer = (FFAudioPlayer*) context;
audioPlayer->setFilterAgain(true);
const char *desc = env->GetStringUTFChars(filter_jstr, nullptr);
audioPlayer->setFilterDesc(desc);
}
AUDIO_PLAYER_FUNC(void, release) {
AUDIO_PLAYER_FUNC(void, native_1release, long context) {
auto *audioPlayer = (FFAudioPlayer*) context;
audioPlayer->setExit(true);
}

@ -77,96 +77,105 @@ end:
return ret;
}
FFAudioPlayer::FFAudioPlayer() {
m_state = new AudioPlayerState();
m_visualizer = nullptr;
}
FFAudioPlayer::~FFAudioPlayer() {
delete m_state;
}
int FFAudioPlayer::open(const char *path) {
if (!path)
return -1;
int ret;
const AVCodec *codec;
inputFrame = av_frame_alloc();
packet = av_packet_alloc();
out_buffer = new uint8_t [BUFFER_SIZE];
m_state->inputFrame = av_frame_alloc();
m_state->packet = av_packet_alloc();
m_state->outBuffer = new uint8_t [BUFFER_SIZE];
// open input stream
ret = avformat_open_input(&formatContext, path, nullptr, nullptr);
ret = avformat_open_input(&m_state->formatContext, path, nullptr, nullptr);
if (ret < 0) {
LOGE(AUDIO_TAG, "avformat_open_input error=%s", av_err2str(ret));
return ret;
}
// (if need)find info: width、height、sample_rate、duration
avformat_find_stream_info(formatContext, nullptr);
avformat_find_stream_info(m_state->formatContext, nullptr);
// find audio index
for (int i=0; i<formatContext->nb_streams; i++) {
if (AVMEDIA_TYPE_AUDIO == formatContext->streams[i]->codecpar->codec_type) {
audio_index = i;
for (int i=0; i<m_state->formatContext->nb_streams; i++) {
if (AVMEDIA_TYPE_AUDIO == m_state->formatContext->streams[i]->codecpar->codec_type) {
m_state->audioIndex = i;
break;
}
}
if (audio_index == -1) {
if (m_state->audioIndex == -1) {
return -1;
}
// find audio decoder
codec = avcodec_find_decoder(formatContext->streams[audio_index]->codecpar->codec_id);
codecContext = avcodec_alloc_context3(codec);
avcodec_parameters_to_context(codecContext, formatContext->streams[audio_index]->codecpar);
codec = avcodec_find_decoder(m_state->formatContext->streams[m_state->audioIndex]->codecpar->codec_id);
m_state->codecContext = avcodec_alloc_context3(codec);
avcodec_parameters_to_context(m_state->codecContext, m_state->formatContext->streams[m_state->audioIndex]->codecpar);
// open decoder
ret = avcodec_open2(codecContext, codec, nullptr);
ret = avcodec_open2(m_state->codecContext, codec, nullptr);
if (ret < 0) {
LOGE(AUDIO_TAG, "avcodec_open2 error=%s", av_err2str(ret));
return ret;
}
// input and output params
int in_sample_rate = codecContext->sample_rate;
auto in_sample_fmt = codecContext->sample_fmt;
int in_ch_layout = (int)codecContext->channel_layout;
out_sample_rate = in_sample_rate;
out_sample_fmt = AV_SAMPLE_FMT_S16;
out_ch_layout = AV_CH_LAYOUT_STEREO;
out_channel = codecContext->channels;
int in_sample_rate = m_state->codecContext->sample_rate;
auto in_sample_fmt = m_state->codecContext->sample_fmt;
int in_ch_layout = (int)m_state->codecContext->channel_layout;
m_state->out_sample_rate = in_sample_rate;
m_state->out_sample_fmt = AV_SAMPLE_FMT_S16;
m_state->out_ch_layout = AV_CH_LAYOUT_STEREO;
m_state->out_channel = m_state->codecContext->channels;
// init resample context
swrContext = swr_alloc();
swr_alloc_set_opts(swrContext, out_ch_layout, out_sample_fmt, out_sample_rate,
m_state->swrContext = swr_alloc();
swr_alloc_set_opts(m_state->swrContext, m_state->out_ch_layout, m_state->out_sample_fmt, m_state->out_sample_rate,
in_ch_layout, in_sample_fmt, in_sample_rate, 0, nullptr);
swr_init(swrContext);
swr_init(m_state->swrContext);
// init filter graph
filterFrame = av_frame_alloc();
initFilter(FILTER_DESC, codecContext, &audioFilterGraph,
&audioSrcContext, &audioSinkContext);
m_state->filterFrame = av_frame_alloc();
initFilter(FILTER_DESC, m_state->codecContext, &m_state->audioFilterGraph,
&m_state->audioSrcContext, &m_state->audioSinkContext);
// init visualizer
mVisualizer = new FrankVisualizer();
mVisualizer->init_visualizer();
m_visualizer = new FrankVisualizer();
m_visualizer->init_visualizer();
return 0;
}
int FFAudioPlayer::getChannel() const {
return out_channel;
return m_state->out_channel;
}
int FFAudioPlayer::getSampleRate() const {
return out_sample_rate;
return m_state->out_sample_rate;
}
int FFAudioPlayer::decodeAudio() {
int ret;
if (exitPlaying.load()) {
if (m_state->exitPlaying.load()) {
return -1;
}
// demux: read a frame(should be demux thread)
ret = av_read_frame(formatContext, packet);
ret = av_read_frame(m_state->formatContext, m_state->packet);
if (ret < 0) {
return ret;
}
// see if audio packet
if (packet->stream_index != audio_index) {
if (m_state->packet->stream_index != m_state->audioIndex) {
return 0;
}
// decode audio frame(should be decode thread)
ret = avcodec_send_packet(codecContext, packet);
ret = avcodec_send_packet(m_state->codecContext, m_state->packet);
if (ret < 0) {
LOGE(AUDIO_TAG, "avcodec_send_packet=%s", av_err2str(ret));
}
ret = avcodec_receive_frame(codecContext, inputFrame);
ret = avcodec_receive_frame(m_state->codecContext, m_state->inputFrame);
if (ret < 0) {
if (ret == AVERROR(EAGAIN)) {
return 0;
@ -176,29 +185,29 @@ int FFAudioPlayer::decodeAudio() {
}
// visualizer: do fft
int nb_samples = inputFrame->nb_samples < MAX_FFT_SIZE ? inputFrame->nb_samples : MAX_FFT_SIZE;
int nb_samples = m_state->inputFrame->nb_samples < MAX_FFT_SIZE ? m_state->inputFrame->nb_samples : MAX_FFT_SIZE;
if (m_enableVisualizer && nb_samples >= MIN_FFT_SIZE) {
mVisualizer->fft_run(inputFrame->data[0], nb_samples);
m_visualizer->fft_run(m_state->inputFrame->data[0], nb_samples);
}
// change filter
if (filterAgain) {
filterAgain = false;
avfilter_graph_free(&audioFilterGraph);
if ((ret = initFilter(filterDesc, codecContext, &audioFilterGraph, &audioSrcContext, &audioSinkContext)) < 0) {
if (m_state->filterAgain) {
m_state->filterAgain = false;
avfilter_graph_free(&m_state->audioFilterGraph);
if ((ret = initFilter(m_state->filterDesc, m_state->codecContext, &m_state->audioFilterGraph,
&m_state->audioSrcContext, &m_state->audioSinkContext)) < 0) {
LOGE(AUDIO_TAG, "init_filter error, ret=%d\n", ret);
return ret;
}
LOGE(AUDIO_TAG, "play again,filter_descr=_=%s", filterDesc);
}
// put into filter
ret = av_buffersrc_add_frame(audioSrcContext, inputFrame);
ret = av_buffersrc_add_frame(m_state->audioSrcContext, m_state->inputFrame);
if (ret < 0) {
LOGE(AUDIO_TAG, "av_buffersrc_add_frame error=%s", av_err2str(ret));
}
// drain from filter
ret = av_buffersink_get_frame(audioSinkContext, filterFrame);
ret = av_buffersink_get_frame(m_state->audioSinkContext, m_state->filterFrame);
if (ret == AVERROR(EAGAIN)) {
return 0;
} else if (ret == AVERROR_EOF) {
@ -210,20 +219,20 @@ int FFAudioPlayer::decodeAudio() {
}
// convert audio format and sample_rate
swr_convert(swrContext, &out_buffer, BUFFER_SIZE,
(const uint8_t **)(filterFrame->data), filterFrame->nb_samples);
swr_convert(m_state->swrContext, &m_state->outBuffer, BUFFER_SIZE,
(const uint8_t **)(m_state->filterFrame->data), m_state->filterFrame->nb_samples);
// get buffer size after converting
int buffer_size = av_samples_get_buffer_size(nullptr, out_channel,
filterFrame->nb_samples, out_sample_fmt, 1);
int buffer_size = av_samples_get_buffer_size(nullptr, m_state->out_channel,
m_state->filterFrame->nb_samples, m_state->out_sample_fmt, 1);
av_frame_unref(inputFrame);
av_frame_unref(filterFrame);
av_packet_unref(packet);
av_frame_unref(m_state->inputFrame);
av_frame_unref(m_state->filterFrame);
av_packet_unref(m_state->packet);
return buffer_size;
}
uint8_t *FFAudioPlayer::getDecodeFrame() const {
return out_buffer;
return m_state->outBuffer;
}
void FFAudioPlayer::setEnableVisualizer(bool enable) {
@ -235,52 +244,54 @@ bool FFAudioPlayer::enableVisualizer() const {
}
int8_t* FFAudioPlayer::getFFTData() const {
if (!mVisualizer)
if (!m_visualizer)
return nullptr;
return mVisualizer->getFFTData();
return m_visualizer->getFFTData();
}
int FFAudioPlayer::getFFTSize() const {
if (!mVisualizer)
if (!m_visualizer)
return 0;
return mVisualizer->getOutputSample();
return m_visualizer->getOutputSample();
}
void FFAudioPlayer::setFilterAgain(bool again) {
filterAgain = again;
m_state->filterAgain = again;
}
void FFAudioPlayer::setFilterDesc(const char *filterDescription) {
filterDesc = filterDescription;
m_state->filterDesc = filterDescription;
}
void FFAudioPlayer::setExit(bool exit) {
exitPlaying = exit;
m_state->exitPlaying = exit;
}
void FFAudioPlayer::close() {
if (formatContext) {
avformat_close_input(&formatContext);
if (!m_state)
return;
if (m_state->formatContext) {
avformat_close_input(&m_state->formatContext);
}
if (codecContext) {
avcodec_free_context(&codecContext);
if (m_state->codecContext) {
avcodec_free_context(&m_state->codecContext);
}
if (packet) {
av_packet_free(&packet);
if (m_state->packet) {
av_packet_free(&m_state->packet);
}
if (inputFrame) {
av_frame_free(&inputFrame);
if (m_state->inputFrame) {
av_frame_free(&m_state->inputFrame);
}
if (swrContext) {
swr_close(swrContext);
if (m_state->swrContext) {
swr_close(m_state->swrContext);
}
avfilter_free(audioSrcContext);
avfilter_free(audioSinkContext);
if (audioFilterGraph) {
avfilter_graph_free(&audioFilterGraph);
avfilter_free(m_state->audioSrcContext);
avfilter_free(m_state->audioSinkContext);
if (m_state->audioFilterGraph) {
avfilter_graph_free(&m_state->audioFilterGraph);
}
delete[] out_buffer;
if (mVisualizer) {
mVisualizer->release_visualizer();
delete[] m_state->outBuffer;
if (m_visualizer) {
m_visualizer->release_visualizer();
}
}

@ -23,21 +23,21 @@ extern "C" {
}
#endif
class FFAudioPlayer {
private:
AVFormatContext *formatContext;
AVCodecContext *codecContext;
int audio_index = -1;
SwrContext *swrContext;
int out_sample_rate;
int out_ch_layout;
struct AudioPlayerState {
int out_channel;
int out_ch_layout;
int out_sample_rate;
enum AVSampleFormat out_sample_fmt;
AVPacket *packet;
AVFrame *inputFrame;
AVFrame *filterFrame;
uint8_t *out_buffer;
int audioIndex = -1;
uint8_t *outBuffer;
SwrContext *swrContext;
AVFormatContext *formatContext;
AVCodecContext *codecContext;
const char *filterDesc;
std::atomic_bool filterAgain;
@ -46,11 +46,22 @@ private:
AVFilterGraph *audioFilterGraph;
AVFilterContext *audioSrcContext;
AVFilterContext *audioSinkContext;
};
class FFAudioPlayer {
private:
AudioPlayerState *m_state;
bool m_enableVisualizer = false;
FrankVisualizer *mVisualizer;
FrankVisualizer *m_visualizer;
public:
FFAudioPlayer();
~FFAudioPlayer();
int open(const char* path);
int getSampleRate() const;

@ -151,11 +151,11 @@ release:
}
FrankVisualizer::FrankVisualizer() {
LOGE("FrankVisualizer init...");
}
FrankVisualizer::~FrankVisualizer() {
LOGE("FrankVisualizer release...");
}
int8_t* FrankVisualizer::fft_run(uint8_t *input_buffer, int nb_samples) {

@ -14,14 +14,38 @@ public class AudioPlayer {
System.loadLibrary("media-handle");
}
private long audioContext = 0;
private AudioTrack mAudioTrack;
//using AudioTrack to play
public native void play(String audioPath, String filterDesc);
private native long native_init();
private native void native_play(long context, String audioPath, String filter);
private native void native_again(long context, String filterDesc);
private native void native_release(long context);
public void play(String audioPath, String filter) {
audioContext = native_init();
native_play(audioContext, audioPath, filter);
}
public native void again(String filterDesc);
public void again(String filterDesc) {
if (audioContext == 0) {
return;
}
native_again(audioContext, filterDesc);
}
public void release() {
if (audioContext == 0) {
return;
}
native_release(audioContext);
audioContext = 0;
}
public native void release();
/**
* Create an AudioTrack instance for JNI calling

Loading…
Cancel
Save