parent
06c1251b60
commit
84e64c159b
@ -0,0 +1,66 @@ |
||||
/*
|
||||
* Copyright (C) 2007 The Android Open Source Project |
||||
* |
||||
* Licensed under the Apache License, Version 2.0 (the "License"); |
||||
* you may not use this file except in compliance with the License. |
||||
* You may obtain a copy of the License at |
||||
* |
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* |
||||
* Unless required by applicable law or agreed to in writing, software |
||||
* distributed under the License is distributed on an "AS IS" BASIS, |
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
* See the License for the specific language governing permissions and |
||||
* limitations under the License. |
||||
*/ |
||||
|
||||
#ifndef MUTEX_H |
||||
#define MUTEX_H |
||||
|
||||
#include <stdint.h> |
||||
#include <sys/types.h> |
||||
#include <time.h> |
||||
# include <pthread.h> |
||||
|
||||
|
||||
class Mutex { |
||||
public: |
||||
Mutex(); |
||||
~Mutex(); |
||||
|
||||
int32_t lock(); |
||||
void unlock(); |
||||
|
||||
class Autolock { |
||||
public: |
||||
inline Autolock(Mutex& mutex) : mLock(mutex) { mLock.lock(); } |
||||
inline Autolock(Mutex* mutex) : mLock(*mutex) { mLock.lock(); } |
||||
inline ~Autolock() { mLock.unlock(); } |
||||
private: |
||||
Mutex& mLock; |
||||
}; |
||||
|
||||
private: |
||||
Mutex(const Mutex&); |
||||
Mutex& operator = (const Mutex&); |
||||
|
||||
pthread_mutex_t mMutex; |
||||
}; |
||||
|
||||
inline Mutex::Mutex() { |
||||
pthread_mutex_init(&mMutex, NULL); |
||||
} |
||||
|
||||
inline Mutex::~Mutex() { |
||||
pthread_mutex_destroy(&mMutex); |
||||
} |
||||
|
||||
inline int32_t Mutex::lock() { |
||||
return -pthread_mutex_lock(&mMutex); |
||||
} |
||||
|
||||
inline void Mutex::unlock() { |
||||
pthread_mutex_unlock(&mMutex); |
||||
} |
||||
|
||||
#endif // MUTEX_H
|
@ -0,0 +1,755 @@ |
||||
/*
|
||||
* Created by frank on 2022/2/23 |
||||
* |
||||
* part of code from William Seemann |
||||
*/ |
||||
|
||||
#include <ffmpeg_media_retriever.h> |
||||
#include <stdio.h> |
||||
#include <unistd.h> |
||||
#include <android/log.h> |
||||
#include <metadata_util.h> |
||||
#include <libswscale/swscale.h> |
||||
#include <libavutil/imgutils.h> |
||||
|
||||
#define TAG "ffmpeg_retriever" |
||||
#define LOGE(...) __android_log_print(ANDROID_LOG_ERROR, TAG, __VA_ARGS__) |
||||
|
||||
const int TARGET_IMAGE_FORMAT = AV_PIX_FMT_RGBA; |
||||
const int TARGET_IMAGE_CODEC = AV_CODEC_ID_PNG; |
||||
|
||||
|
||||
int is_supported_format(int codec_id, int pix_fmt) { |
||||
if ((codec_id == AV_CODEC_ID_PNG || |
||||
codec_id == AV_CODEC_ID_MJPEG || |
||||
codec_id == AV_CODEC_ID_BMP) && |
||||
pix_fmt == AV_PIX_FMT_RGBA) { |
||||
return 1; |
||||
} |
||||
|
||||
return 0; |
||||
} |
||||
|
||||
int get_scaled_context(State *s, AVCodecContext *pCodecCtx, int width, int height) { |
||||
AVCodec *targetCodec = avcodec_find_encoder(TARGET_IMAGE_CODEC); |
||||
if (!targetCodec) { |
||||
LOGE("avcodec_find_decoder() failed to find encoder\n"); |
||||
return FAILURE; |
||||
} |
||||
|
||||
s->scaled_codecCtx = avcodec_alloc_context3(targetCodec); |
||||
if (!s->scaled_codecCtx) { |
||||
LOGE("avcodec_alloc_context3 failed\n"); |
||||
return FAILURE; |
||||
} |
||||
|
||||
AVCodecParameters *codecP = s->video_st->codecpar; |
||||
s->scaled_codecCtx->width = width; |
||||
s->scaled_codecCtx->height = height; |
||||
s->scaled_codecCtx->pix_fmt = TARGET_IMAGE_FORMAT; |
||||
s->scaled_codecCtx->codec_type = AVMEDIA_TYPE_VIDEO; |
||||
s->scaled_codecCtx->bit_rate = codecP->bit_rate; |
||||
s->scaled_codecCtx->time_base.num = s->video_st->codec->time_base.num; |
||||
s->scaled_codecCtx->time_base.den = s->video_st->codec->time_base.den; |
||||
|
||||
if (!targetCodec || avcodec_open2(s->scaled_codecCtx, targetCodec, NULL) < 0) { |
||||
LOGE("avcodec_open2() failed\n"); |
||||
return FAILURE; |
||||
} |
||||
|
||||
if (codecP->width > 0 && codecP->height > 0 && codecP->format != AV_PIX_FMT_NONE && width > 0 && height > 0) { |
||||
s->scaled_sws_ctx = sws_getContext(codecP->width, |
||||
codecP->height, |
||||
codecP->format, |
||||
width, |
||||
height, |
||||
TARGET_IMAGE_FORMAT, |
||||
SWS_BILINEAR, |
||||
NULL, |
||||
NULL, |
||||
NULL); |
||||
} |
||||
|
||||
return SUCCESS; |
||||
} |
||||
|
||||
int stream_component_open(State *s, int stream_index) { |
||||
AVFormatContext *pFormatCtx = s->pFormatCtx; |
||||
AVCodecContext *codecCtx; |
||||
AVCodec *codec; |
||||
|
||||
if (stream_index < 0 || stream_index >= pFormatCtx->nb_streams) { |
||||
return FAILURE; |
||||
} |
||||
|
||||
codecCtx = pFormatCtx->streams[stream_index]->codec; |
||||
codec = avcodec_find_decoder(codecCtx->codec_id); |
||||
if (codec == NULL) { |
||||
LOGE("avcodec_find_decoder() failed to find decoder=%d", codecCtx->codec_id); |
||||
return FAILURE; |
||||
} |
||||
if (!codec || (avcodec_open2(codecCtx, codec, NULL) < 0)) { |
||||
LOGE("avcodec_open2() failed\n"); |
||||
return FAILURE; |
||||
} |
||||
|
||||
switch(codecCtx->codec_type) { |
||||
case AVMEDIA_TYPE_AUDIO: |
||||
s->audio_stream = stream_index; |
||||
s->audio_st = pFormatCtx->streams[stream_index]; |
||||
break; |
||||
case AVMEDIA_TYPE_VIDEO: |
||||
s->video_stream = stream_index; |
||||
s->video_st = pFormatCtx->streams[stream_index]; |
||||
AVCodec *targetCodec = avcodec_find_encoder(AV_CODEC_ID_PNG); |
||||
if (!targetCodec) { |
||||
LOGE("avcodec_find_decoder() failed to find encoder\n"); |
||||
return FAILURE; |
||||
} |
||||
s->codecCtx = avcodec_alloc_context3(targetCodec); |
||||
if (!s->codecCtx) { |
||||
LOGE("avcodec_alloc_context3 failed\n"); |
||||
return FAILURE; |
||||
} |
||||
|
||||
AVCodecParameters *codecP = s->video_st->codecpar; |
||||
s->codecCtx->width = codecP->width; |
||||
s->codecCtx->height = codecP->height; |
||||
s->codecCtx->bit_rate = codecP->bit_rate; |
||||
s->codecCtx->pix_fmt = TARGET_IMAGE_FORMAT; |
||||
s->codecCtx->codec_type = AVMEDIA_TYPE_VIDEO; |
||||
s->codecCtx->time_base.num = s->video_st->codec->time_base.num; |
||||
s->codecCtx->time_base.den = s->video_st->codec->time_base.den; |
||||
|
||||
if (!targetCodec || avcodec_open2(s->codecCtx, targetCodec, NULL) < 0) { |
||||
LOGE("avcodec_open2() failed\n"); |
||||
return FAILURE; |
||||
} |
||||
|
||||
if (codecP->width > 0 && codecP->height > 0 && codecP->format != AV_PIX_FMT_NONE) { |
||||
s->sws_ctx = sws_getContext(codecP->width, |
||||
codecP->height, |
||||
codecP->format, |
||||
codecP->width, |
||||
codecP->height, |
||||
TARGET_IMAGE_FORMAT, |
||||
SWS_BILINEAR, |
||||
NULL, |
||||
NULL, |
||||
NULL); |
||||
} |
||||
break; |
||||
default: |
||||
break; |
||||
} |
||||
|
||||
return SUCCESS; |
||||
} |
||||
|
||||
int set_data_source_l(State **ps, const char* path) { |
||||
int i; |
||||
int audio_index = -1; |
||||
int video_index = -1; |
||||
State *state = *ps; |
||||
|
||||
AVDictionary *options = NULL; |
||||
av_dict_set(&options, "icy", "1", 0); |
||||
av_dict_set(&options, "user-agent", "FFmpegMetadataRetriever", 0); |
||||
|
||||
state->pFormatCtx = avformat_alloc_context(); |
||||
if (state->offset > 0) { |
||||
state->pFormatCtx->skip_initial_bytes = state->offset; |
||||
} |
||||
|
||||
if (avformat_open_input(&state->pFormatCtx, path, NULL, &options) != 0) { |
||||
LOGE("Metadata could not be retrieved\n"); |
||||
*ps = NULL; |
||||
return FAILURE; |
||||
} |
||||
|
||||
if (avformat_find_stream_info(state->pFormatCtx, NULL) < 0) { |
||||
LOGE("Metadata could not be retrieved\n"); |
||||
avformat_close_input(&state->pFormatCtx); |
||||
*ps = NULL; |
||||
return FAILURE; |
||||
} |
||||
|
||||
set_duration(state->pFormatCtx); |
||||
|
||||
// Find the first audio and video stream
|
||||
for (i = 0; i < state->pFormatCtx->nb_streams; i++) { |
||||
if (state->pFormatCtx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && video_index < 0) { |
||||
video_index = i; |
||||
} |
||||
|
||||
if (state->pFormatCtx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && audio_index < 0) { |
||||
audio_index = i; |
||||
} |
||||
|
||||
set_codec(state->pFormatCtx, i); |
||||
} |
||||
|
||||
if (audio_index >= 0) { |
||||
stream_component_open(state, audio_index); |
||||
} |
||||
|
||||
if (video_index >= 0) { |
||||
stream_component_open(state, video_index); |
||||
} |
||||
|
||||
set_mimetype(state->pFormatCtx); |
||||
set_filesize(state->pFormatCtx); |
||||
set_video_resolution(state->pFormatCtx, state->video_st); |
||||
set_rotation(state->pFormatCtx, state->audio_st, state->video_st); |
||||
set_framerate(state->pFormatCtx, state->audio_st, state->video_st); |
||||
|
||||
*ps = state; |
||||
return SUCCESS; |
||||
} |
||||
|
||||
void init_ffmpeg(State **ps) { |
||||
State *state = *ps; |
||||
|
||||
if (state && state->pFormatCtx) { |
||||
avformat_close_input(&state->pFormatCtx); |
||||
} |
||||
if (state && state->fd != -1) { |
||||
close(state->fd); |
||||
} |
||||
if (!state) { |
||||
state = av_mallocz(sizeof(State)); |
||||
} |
||||
|
||||
state->pFormatCtx = NULL; |
||||
state->audio_stream = -1; |
||||
state->video_stream = -1; |
||||
state->audio_st = NULL; |
||||
state->video_st = NULL; |
||||
state->fd = -1; |
||||
state->offset = 0; |
||||
state->headers = NULL; |
||||
|
||||
*ps = state; |
||||
} |
||||
|
||||
int set_data_source(State **ps, const char* path) { |
||||
State *state = *ps; |
||||
ANativeWindow *native_window = NULL; |
||||
|
||||
if (state && state->native_window) { |
||||
native_window = state->native_window; |
||||
} |
||||
|
||||
init_ffmpeg(&state); |
||||
state->native_window = native_window; |
||||
*ps = state; |
||||
|
||||
return set_data_source_l(ps, path); |
||||
} |
||||
|
||||
int set_data_source_fd(State **ps, int fd, int64_t offset, int64_t length) { |
||||
char path[256] = ""; |
||||
State *state = *ps; |
||||
ANativeWindow *native_window = NULL; |
||||
|
||||
if (state && state->native_window) { |
||||
native_window = state->native_window; |
||||
} |
||||
init_ffmpeg(&state); |
||||
state->native_window = native_window; |
||||
int myfd = dup(fd); |
||||
char str[20]; |
||||
sprintf(str, "pipe:%d", myfd); |
||||
strcat(path, str); |
||||
state->fd = myfd; |
||||
state->offset = offset; |
||||
*ps = state; |
||||
|
||||
return set_data_source_l(ps, path); |
||||
} |
||||
|
||||
const char* extract_metadata(State **ps, const char* key) { |
||||
char* value = NULL; |
||||
State *state = *ps; |
||||
|
||||
if (!state || !state->pFormatCtx) { |
||||
return value; |
||||
} |
||||
|
||||
return extract_metadata_internal(state->pFormatCtx, state->audio_st, state->video_st, key); |
||||
} |
||||
|
||||
int get_metadata(State **ps, AVDictionary **metadata) { |
||||
State *state = *ps; |
||||
|
||||
if (!state || !state->pFormatCtx) { |
||||
return FAILURE; |
||||
} |
||||
|
||||
get_metadata_internal(state->pFormatCtx, metadata); |
||||
|
||||
return SUCCESS; |
||||
} |
||||
|
||||
int init_ffmpeg_filters(State *state, const char *filters_descr, AVFormatContext *fmt_ctx, AVCodecContext *dec_ctx) { |
||||
char args[512]; |
||||
int ret = 0; |
||||
const AVFilter *buffersrc = avfilter_get_by_name("buffer"); |
||||
const AVFilter *buffersink = avfilter_get_by_name("buffersink"); |
||||
AVFilterInOut *outputs = avfilter_inout_alloc(); |
||||
AVFilterInOut *inputs = avfilter_inout_alloc(); |
||||
int i; |
||||
int video_stream_index = 0; |
||||
AVFilterContext *buffersink_ctx; |
||||
AVFilterContext *buffersrc_ctx; |
||||
AVFilterGraph *filter_graph; |
||||
for (i = 0; i < fmt_ctx->nb_streams; i++) { |
||||
if (fmt_ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) { |
||||
video_stream_index = i; |
||||
break; |
||||
} |
||||
} |
||||
AVRational time_base = fmt_ctx->streams[video_stream_index]->time_base; |
||||
enum AVPixelFormat pix_fmts[] = { /*AV_PIX_FMT_YUV420P*/AV_PIX_FMT_RGBA, AV_PIX_FMT_NONE }; |
||||
|
||||
filter_graph = avfilter_graph_alloc(); |
||||
if (!outputs || !inputs || !filter_graph) { |
||||
ret = AVERROR(ENOMEM); |
||||
goto end; |
||||
} |
||||
|
||||
/* buffer video source: the decoded frames from the decoder will be inserted here. */ |
||||
snprintf(args, sizeof(args), |
||||
"video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d", |
||||
dec_ctx->width, dec_ctx->height, dec_ctx->pix_fmt, |
||||
time_base.num, time_base.den, |
||||
dec_ctx->sample_aspect_ratio.num, dec_ctx->sample_aspect_ratio.den); |
||||
|
||||
ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in", |
||||
args, NULL, filter_graph); |
||||
if (ret < 0) { |
||||
av_log(NULL, AV_LOG_ERROR, "Cannot create buffer source\n"); |
||||
goto end; |
||||
} |
||||
|
||||
/* buffer video sink: to terminate the filter chain. */ |
||||
ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out", |
||||
NULL, NULL, filter_graph); |
||||
if (ret < 0) { |
||||
av_log(NULL, AV_LOG_ERROR, "Cannot create buffer sink\n"); |
||||
goto end; |
||||
} |
||||
|
||||
ret = av_opt_set_int_list(buffersink_ctx, "pix_fmts", pix_fmts, |
||||
AV_PIX_FMT_NONE, AV_OPT_SEARCH_CHILDREN); |
||||
if (ret < 0) { |
||||
av_log(NULL, AV_LOG_ERROR, "Cannot set output pixel format\n"); |
||||
goto end; |
||||
} |
||||
|
||||
outputs->name = av_strdup("in"); |
||||
outputs->filter_ctx = buffersrc_ctx; |
||||
outputs->pad_idx = 0; |
||||
outputs->next = NULL; |
||||
|
||||
inputs->name = av_strdup("out"); |
||||
inputs->filter_ctx = buffersink_ctx; |
||||
inputs->pad_idx = 0; |
||||
inputs->next = NULL; |
||||
|
||||
if ((ret = avfilter_graph_parse_ptr(filter_graph, filters_descr, |
||||
&inputs, &outputs, NULL)) < 0) |
||||
goto end; |
||||
|
||||
if ((ret = avfilter_graph_config(filter_graph, NULL)) < 0) |
||||
goto end; |
||||
|
||||
state->buffersrc_ctx = buffersrc_ctx; |
||||
state->buffersink_ctx = buffersink_ctx; |
||||
state->filter_graph = filter_graph; |
||||
|
||||
end: |
||||
avfilter_inout_free(&inputs); |
||||
avfilter_inout_free(&outputs); |
||||
|
||||
return ret; |
||||
} |
||||
|
||||
void convert_image(State *state, AVCodecContext *pCodecCtx, AVFrame *pFrame, AVPacket *avpkt, |
||||
int *got_packet_ptr, int width, int height) { |
||||
|
||||
AVFrame *frame; |
||||
*got_packet_ptr = 0; |
||||
int rotateDegree = 0; |
||||
AVCodecContext *codecCtx; |
||||
struct SwsContext *scaleCtx; |
||||
|
||||
if (width != -1 && height != -1) { |
||||
if (state->scaled_codecCtx == NULL || |
||||
state->scaled_sws_ctx == NULL) { |
||||
get_scaled_context(state, pCodecCtx, width, height); |
||||
} |
||||
|
||||
codecCtx = state->scaled_codecCtx; |
||||
scaleCtx = state->scaled_sws_ctx; |
||||
} else { |
||||
codecCtx = state->codecCtx; |
||||
scaleCtx = state->sws_ctx; |
||||
} |
||||
|
||||
if (!scaleCtx) { |
||||
LOGE("scale context is null!"); |
||||
return; |
||||
} |
||||
if (width == -1) { |
||||
width = pCodecCtx->width; |
||||
} |
||||
if (height == -1) { |
||||
height = pCodecCtx->height; |
||||
} |
||||
|
||||
frame = av_frame_alloc(); |
||||
|
||||
// Determine required buffer size and allocate buffer
|
||||
int numBytes = av_image_get_buffer_size(TARGET_IMAGE_FORMAT, codecCtx->width, codecCtx->height, 1); |
||||
void * buffer = (uint8_t *) av_malloc(numBytes * sizeof(uint8_t)); |
||||
|
||||
frame->format = TARGET_IMAGE_FORMAT; |
||||
frame->width = codecCtx->width; |
||||
frame->height = codecCtx->height; |
||||
|
||||
av_image_fill_arrays(frame->data, |
||||
frame->linesize, |
||||
buffer, |
||||
TARGET_IMAGE_FORMAT, |
||||
codecCtx->width, |
||||
codecCtx->height, |
||||
1); |
||||
|
||||
sws_scale(scaleCtx, |
||||
(const uint8_t * const *) pFrame->data, |
||||
pFrame->linesize, |
||||
0, |
||||
pFrame->height, |
||||
frame->data, |
||||
frame->linesize); |
||||
|
||||
if (state->video_st) { |
||||
AVDictionaryEntry *entry = av_dict_get(state->video_st->metadata, ROTATE, NULL, AV_DICT_MATCH_CASE); |
||||
if (entry && entry->value) { |
||||
rotateDegree = atoi(entry->value); |
||||
} |
||||
} |
||||
if (rotateDegree == 90 || rotateDegree == 270) { |
||||
if (!state->buffersrc_ctx || !state->buffersink_ctx || !state->filter_graph) { |
||||
const char* filter_str = "transpose=clock"; |
||||
if (rotateDegree == 270) { |
||||
filter_str = "transpose=cclock"; |
||||
} |
||||
init_ffmpeg_filters(state, filter_str, state->pFormatCtx, codecCtx); |
||||
} |
||||
|
||||
if (state->buffersrc_ctx && state->buffersink_ctx && state->filter_graph) { |
||||
int filter_ret = av_buffersrc_add_frame_flags(state->buffersrc_ctx, frame, AV_BUFFERSRC_FLAG_KEEP_REF); |
||||
if (filter_ret >= 0) { |
||||
AVFrame *dst_frame = av_frame_alloc(); |
||||
filter_ret = av_buffersink_get_frame(state->buffersink_ctx, dst_frame); |
||||
if (filter_ret >= 0) { |
||||
codecCtx->width = dst_frame->width; |
||||
codecCtx->height = dst_frame->height; |
||||
av_frame_free(&frame); |
||||
frame = dst_frame; |
||||
} |
||||
} |
||||
} |
||||
} |
||||
|
||||
int ret = avcodec_encode_video2(codecCtx, avpkt, frame, got_packet_ptr); |
||||
|
||||
if (rotateDegree == 90 || rotateDegree == 270) { |
||||
codecCtx->width = frame->height; |
||||
codecCtx->height = frame->width; |
||||
} |
||||
|
||||
if (ret >= 0 && state->native_window) { |
||||
ANativeWindow_setBuffersGeometry(state->native_window, width, height, WINDOW_FORMAT_RGBA_8888); |
||||
ANativeWindow_Buffer windowBuffer; |
||||
|
||||
if (ANativeWindow_lock(state->native_window, &windowBuffer, NULL) == 0) { |
||||
for (int h = 0; h < height; h++) { |
||||
memcpy(windowBuffer.bits + h * windowBuffer.stride * 4, |
||||
buffer + h * frame->linesize[0], |
||||
width*4); |
||||
} |
||||
|
||||
ANativeWindow_unlockAndPost(state->native_window); |
||||
} |
||||
} |
||||
|
||||
if (ret < 0) { |
||||
*got_packet_ptr = 0; |
||||
} |
||||
av_frame_free(&frame); |
||||
if (buffer) { |
||||
free(buffer); |
||||
} |
||||
if (ret < 0 || !*got_packet_ptr) { |
||||
av_packet_unref(avpkt); |
||||
} |
||||
} |
||||
|
||||
int get_embedded_picture(State **ps, AVPacket *pkt) { |
||||
int i = 0; |
||||
int got_packet = 0; |
||||
AVFrame *frame = NULL; |
||||
|
||||
State *state = *ps; |
||||
|
||||
if (!state || !state->pFormatCtx) { |
||||
return FAILURE; |
||||
} |
||||
|
||||
// find the first attached picture, if available
|
||||
for (i = 0; i < state->pFormatCtx->nb_streams; i++) { |
||||
if (state->pFormatCtx->streams[i]->disposition & AV_DISPOSITION_ATTACHED_PIC) { |
||||
if (pkt) { |
||||
av_packet_unref(pkt); |
||||
av_init_packet(pkt); |
||||
} |
||||
av_packet_ref(pkt, &state->pFormatCtx->streams[i]->attached_pic); |
||||
got_packet = 1; |
||||
|
||||
if (pkt->stream_index == state->video_stream) { |
||||
int codec_id = state->video_st->codecpar->codec_id; |
||||
int pix_fmt = state->video_st->codecpar->format; |
||||
|
||||
if (!is_supported_format(codec_id, pix_fmt)) { |
||||
int got_frame = 0; |
||||
|
||||
frame = av_frame_alloc(); |
||||
|
||||
if (!frame) { |
||||
break; |
||||
} |
||||
|
||||
if (avcodec_decode_video2(state->video_st->codec, frame, &got_frame, pkt) <= 0) { |
||||
break; |
||||
} |
||||
|
||||
if (got_frame) { |
||||
AVPacket convertedPkt; |
||||
av_init_packet(&convertedPkt); |
||||
convertedPkt.size = 0; |
||||
convertedPkt.data = NULL; |
||||
|
||||
convert_image(state, state->video_st->codec, frame, &convertedPkt, &got_packet, -1, -1); |
||||
|
||||
av_packet_unref(pkt); |
||||
av_init_packet(pkt); |
||||
av_packet_ref(pkt, &convertedPkt); |
||||
|
||||
av_packet_unref(&convertedPkt); |
||||
|
||||
break; |
||||
} |
||||
} else { |
||||
av_packet_unref(pkt); |
||||
av_init_packet(pkt); |
||||
av_packet_ref(pkt, &state->pFormatCtx->streams[i]->attached_pic); |
||||
|
||||
got_packet = 1; |
||||
break; |
||||
} |
||||
} |
||||
} |
||||
} |
||||
|
||||
av_frame_free(&frame); |
||||
|
||||
if (got_packet) { |
||||
return SUCCESS; |
||||
} else { |
||||
return FAILURE; |
||||
} |
||||
} |
||||
|
||||
void decode_frame(State *state, AVPacket *pkt, int *got_frame, int64_t desired_frame_number, int width, int height) { |
||||
*got_frame = 0; |
||||
AVFrame *frame = av_frame_alloc(); |
||||
if (!frame) { |
||||
return; |
||||
} |
||||
|
||||
while (av_read_frame(state->pFormatCtx, pkt) >= 0) { |
||||
|
||||
if (pkt->stream_index == state->video_stream) { |
||||
int codec_id = state->video_st->codecpar->codec_id; |
||||
int pix_fmt = state->video_st->codecpar->format; |
||||
|
||||
if (!is_supported_format(codec_id, pix_fmt)) { |
||||
*got_frame = 0; |
||||
|
||||
if (avcodec_decode_video2(state->video_st->codec, frame, got_frame, pkt) <= 0) { |
||||
*got_frame = 0; |
||||
break; |
||||
} |
||||
|
||||
if (*got_frame) { |
||||
if (desired_frame_number == -1 || |
||||
(desired_frame_number != -1 && frame->pts >= desired_frame_number)) { |
||||
if (pkt->data) { |
||||
av_packet_unref(pkt); |
||||
} |
||||
av_init_packet(pkt); |
||||
convert_image(state, state->video_st->codec, frame, pkt, got_frame, width, height); |
||||
break; |
||||
} |
||||
} |
||||
} else { |
||||
*got_frame = 1; |
||||
break; |
||||
} |
||||
} |
||||
} |
||||
|
||||
av_frame_free(&frame); |
||||
} |
||||
|
||||
int get_frame_at_time(State **ps, int64_t timeUs, int option, AVPacket *pkt) { |
||||
return get_scaled_frame_at_time(ps, timeUs, option, pkt, -1, -1); |
||||
} |
||||
|
||||
int get_scaled_frame_at_time(State **ps, int64_t timeUs, int option, AVPacket *pkt, int width, int height) { |
||||
int flags = 0; |
||||
int ret = -1; |
||||
int got_packet = 0; |
||||
State *state = *ps; |
||||
Options opt = option; |
||||
int64_t desired_frame_number = -1; |
||||
|
||||
if (!state || !state->pFormatCtx || state->video_stream < 0) { |
||||
return FAILURE; |
||||
} |
||||
|
||||
if (timeUs > -1) { |
||||
int stream_index = state->video_stream; |
||||
int64_t seek_time = av_rescale_q(timeUs, AV_TIME_BASE_Q, state->pFormatCtx->streams[stream_index]->time_base); |
||||
int64_t seek_stream_duration = state->pFormatCtx->streams[stream_index]->duration; |
||||
|
||||
if (seek_stream_duration > 0 && seek_time > seek_stream_duration) { |
||||
seek_time = seek_stream_duration; |
||||
} |
||||
if (seek_time < 0) { |
||||
return FAILURE; |
||||
} |
||||
|
||||
if (opt == OPTION_CLOSEST) { |
||||
desired_frame_number = seek_time; |
||||
flags = AVSEEK_FLAG_BACKWARD; |
||||
} else if (opt == OPTION_CLOSEST_SYNC) { |
||||
flags = 0; |
||||
} else if (opt == OPTION_NEXT_SYNC) { |
||||
flags = 0; |
||||
} else if (opt == OPTION_PREVIOUS_SYNC) { |
||||
flags = AVSEEK_FLAG_BACKWARD; |
||||
} |
||||
|
||||
ret = av_seek_frame(state->pFormatCtx, stream_index, seek_time, flags); |
||||
|
||||
if (ret < 0) { |
||||
return FAILURE; |
||||
} else { |
||||
if (state->audio_stream >= 0) { |
||||
avcodec_flush_buffers(state->audio_st->codec); |
||||
} |
||||
|
||||
if (state->video_stream >= 0) { |
||||
avcodec_flush_buffers(state->video_st->codec); |
||||
} |
||||
} |
||||
} |
||||
|
||||
decode_frame(state, pkt, &got_packet, desired_frame_number, width, height); |
||||
|
||||
if (got_packet) { |
||||
return SUCCESS; |
||||
} else { |
||||
return FAILURE; |
||||
} |
||||
} |
||||
|
||||
int set_native_window(State **ps, ANativeWindow* native_window) { |
||||
|
||||
State *state = *ps; |
||||
|
||||
if (native_window == NULL) { |
||||
return FAILURE; |
||||
} |
||||
if (!state) { |
||||
init_ffmpeg(&state); |
||||
} |
||||
|
||||
state->native_window = native_window; |
||||
*ps = state; |
||||
|
||||
return SUCCESS; |
||||
} |
||||
|
||||
void release(State **ps) { |
||||
|
||||
State *state = *ps; |
||||
|
||||
if (state) { |
||||
if (state->audio_st && state->audio_st->codec) { |
||||
avcodec_close(state->audio_st->codec); |
||||
} |
||||
|
||||
if (state->video_st && state->video_st->codec) { |
||||
avcodec_close(state->video_st->codec); |
||||
} |
||||
|
||||
if (state->pFormatCtx) { |
||||
avformat_close_input(&state->pFormatCtx); |
||||
} |
||||
|
||||
if (state->fd != -1) { |
||||
close(state->fd); |
||||
} |
||||
|
||||
if (state->sws_ctx) { |
||||
sws_freeContext(state->sws_ctx); |
||||
state->sws_ctx = NULL; |
||||
} |
||||
|
||||
if (state->codecCtx) { |
||||
avcodec_close(state->codecCtx); |
||||
av_free(state->codecCtx); |
||||
} |
||||
|
||||
if (state->scaled_codecCtx) { |
||||
avcodec_close(state->scaled_codecCtx); |
||||
av_free(state->scaled_codecCtx); |
||||
} |
||||
|
||||
if (state->scaled_sws_ctx) { |
||||
sws_freeContext(state->scaled_sws_ctx); |
||||
} |
||||
|
||||
if (state->native_window != NULL) { |
||||
ANativeWindow_release(state->native_window); |
||||
state->native_window = NULL; |
||||
} |
||||
|
||||
if (state->buffersrc_ctx) { |
||||
avfilter_free(state->buffersrc_ctx); |
||||
} |
||||
if (state->buffersink_ctx) { |
||||
avfilter_free(state->buffersink_ctx); |
||||
} |
||||
if (state->filter_graph) { |
||||
avfilter_graph_free(&state->filter_graph); |
||||
} |
||||
|
||||
av_freep(&state); |
||||
} |
||||
} |
@ -0,0 +1,60 @@ |
||||
/*
|
||||
* Created by frank on 2022/2/23 |
||||
* |
||||
* part of code from William Seemann |
||||
*/ |
||||
|
||||
#ifndef FFMPEG_MEDIA_RETRIEVER_H_ |
||||
#define FFMPEG_MEDIA_RETRIEVER_H_ |
||||
|
||||
#include <libavcodec/avcodec.h> |
||||
#include <libavformat/avformat.h> |
||||
#include <libavutil/dict.h> |
||||
|
||||
#include <android/native_window_jni.h> |
||||
#include <libavfilter/avfilter.h> |
||||
#include <libavfilter/buffersrc.h> |
||||
#include <libavfilter/buffersink.h> |
||||
|
||||
typedef enum { |
||||
OPTION_PREVIOUS_SYNC = 0, |
||||
OPTION_NEXT_SYNC = 1, |
||||
OPTION_CLOSEST_SYNC = 2, |
||||
OPTION_CLOSEST = 3, |
||||
} Options; |
||||
|
||||
typedef struct State { |
||||
AVFormatContext *pFormatCtx; |
||||
int audio_stream; |
||||
int video_stream; |
||||
AVStream *audio_st; |
||||
AVStream *video_st; |
||||
int fd; |
||||
int64_t offset; |
||||
const char *headers; |
||||
AVCodecContext *codecCtx; |
||||
AVCodecContext *scaled_codecCtx; |
||||
ANativeWindow *native_window; |
||||
|
||||
AVFilterContext *buffersink_ctx; |
||||
AVFilterContext *buffersrc_ctx; |
||||
AVFilterGraph *filter_graph; |
||||
|
||||
struct SwsContext *sws_ctx; |
||||
struct SwsContext *scaled_sws_ctx; |
||||
} State; |
||||
|
||||
struct AVDictionary { |
||||
int count; |
||||
AVDictionaryEntry *elems; |
||||
}; |
||||
|
||||
int set_data_source(State **ps, const char* path); |
||||
int set_data_source_fd(State **ps, int fd, int64_t offset, int64_t length); |
||||
const char* extract_metadata(State **ps, const char* key); |
||||
int get_frame_at_time(State **ps, int64_t timeUs, int option, AVPacket *pkt); |
||||
int get_scaled_frame_at_time(State **ps, int64_t timeUs, int option, AVPacket *pkt, int width, int height); |
||||
int set_native_window(State **ps, ANativeWindow* native_window); |
||||
void release(State **ps); |
||||
|
||||
#endif /*FFMPEG_MEDIA_RETRIEVER_H_*/ |
@ -0,0 +1,54 @@ |
||||
/*
|
||||
* Created by frank on 2022/2/23 |
||||
* |
||||
* part of code from William Seemann |
||||
*/ |
||||
|
||||
#include <media_retriever.h> |
||||
|
||||
MediaRetriever::MediaRetriever() |
||||
{ |
||||
state = nullptr; |
||||
} |
||||
|
||||
MediaRetriever::~MediaRetriever() |
||||
{ |
||||
Mutex::Autolock _l(mLock); |
||||
::release(&state); |
||||
} |
||||
|
||||
int MediaRetriever::setDataSource(const char *srcUrl) |
||||
{ |
||||
Mutex::Autolock _l(mLock); |
||||
return ::set_data_source(&state, srcUrl); |
||||
} |
||||
|
||||
int MediaRetriever::setDataSource(int fd, int64_t offset, int64_t length) |
||||
{ |
||||
Mutex::Autolock _l(mLock); |
||||
return ::set_data_source_fd(&state, fd, offset, length); |
||||
} |
||||
|
||||
const char* MediaRetriever::extractMetadata(const char *key) |
||||
{ |
||||
Mutex::Autolock _l(mLock); |
||||
return ::extract_metadata(&state, key); |
||||
} |
||||
|
||||
int MediaRetriever::getFrameAtTime(int64_t timeUs, int option, AVPacket *pkt) |
||||
{ |
||||
Mutex::Autolock _l(mLock); |
||||
return ::get_frame_at_time(&state, timeUs, option, pkt); |
||||
} |
||||
|
||||
int MediaRetriever::getScaledFrameAtTime(int64_t timeUs, int option, AVPacket *pkt, int width, int height) |
||||
{ |
||||
Mutex::Autolock _l(mLock); |
||||
return ::get_scaled_frame_at_time(&state, timeUs, option, pkt, width, height); |
||||
} |
||||
|
||||
int MediaRetriever::setNativeWindow(ANativeWindow* native_window) |
||||
{ |
||||
Mutex::Autolock _l(mLock); |
||||
return ::set_native_window(&state, native_window); |
||||
} |
@ -0,0 +1,39 @@ |
||||
/*
|
||||
* Created by frank on 2022/2/23 |
||||
* |
||||
* part of code from William Seemann |
||||
*/ |
||||
|
||||
#ifndef MEDIA_RETRIEVER_H |
||||
#define MEDIA_RETRIEVER_H |
||||
|
||||
#include <Mutex.h> |
||||
|
||||
#ifdef __cplusplus |
||||
extern "C" { |
||||
#endif |
||||
|
||||
#include "ffmpeg_media_retriever.h" |
||||
|
||||
#ifdef __cplusplus |
||||
} |
||||
#endif |
||||
|
||||
class MediaRetriever |
||||
{ |
||||
State* state; |
||||
public: |
||||
MediaRetriever(); |
||||
~MediaRetriever(); |
||||
int setDataSource(const char* dataSourceUrl); |
||||
int setDataSource(int fd, int64_t offset, int64_t length); |
||||
const char* extractMetadata(const char* key); |
||||
int getFrameAtTime(int64_t timeUs, int option, AVPacket *pkt); |
||||
int getScaledFrameAtTime(int64_t timeUs, int option, AVPacket *pkt, int width, int height); |
||||
int setNativeWindow(ANativeWindow* native_window); |
||||
|
||||
private: |
||||
Mutex mLock; |
||||
}; |
||||
|
||||
#endif // MEDIA_RETRIEVER_H
|
@ -0,0 +1,283 @@ |
||||
/*
|
||||
* Created by frank on 2022/2/23 |
||||
* |
||||
* part of code from William Seemann |
||||
*/ |
||||
|
||||
#include <android/log.h> |
||||
#include <media_retriever.h> |
||||
#include "jni.h" |
||||
|
||||
#include <android/bitmap.h> |
||||
|
||||
#define LOG_TAG "FFmpegMediaRetrieverJNI" |
||||
|
||||
extern "C" { |
||||
#include "ffmpeg_media_retriever.h" |
||||
} |
||||
|
||||
using namespace std; |
||||
|
||||
struct fields_t { |
||||
jfieldID context; |
||||
}; |
||||
|
||||
static fields_t fields; |
||||
static ANativeWindow* theNativeWindow; |
||||
static const char* kClassPathName = "com/frank/ffmpeg/metadata/FFmpegMediaRetriever"; |
||||
|
||||
static jstring NewStringUTF(JNIEnv* env, const char * data) { |
||||
jstring str = nullptr; |
||||
int size = strlen(data); |
||||
jbyteArray array = env->NewByteArray(size); |
||||
if (!array) { |
||||
__android_log_print(ANDROID_LOG_ERROR, LOG_TAG, "convertString: OutOfMemoryError is thrown."); |
||||
} else { |
||||
jbyte* bytes = env->GetByteArrayElements(array, nullptr); |
||||
if (bytes != nullptr) { |
||||
memcpy(bytes, data, size); |
||||
env->ReleaseByteArrayElements(array, bytes, 0); |
||||
|
||||
jclass string_Clazz = env->FindClass("java/lang/String"); |
||||
jmethodID string_initMethodID = env->GetMethodID(string_Clazz, "<init>", "([BLjava/lang/String;)V"); |
||||
jstring utf = env->NewStringUTF("UTF-8"); |
||||
str = (jstring) env->NewObject(string_Clazz, string_initMethodID, array, utf); |
||||
|
||||
env->DeleteLocalRef(utf); |
||||
} |
||||
} |
||||
env->DeleteLocalRef(array); |
||||
|
||||
return str; |
||||
} |
||||
|
||||
void jniThrowException(JNIEnv* env, const char* className, |
||||
const char* msg) { |
||||
jclass exception = env->FindClass(className); |
||||
env->ThrowNew(exception, msg); |
||||
} |
||||
|
||||
static void process_retriever_call(JNIEnv *env, int opStatus, const char* exception, const char *message) |
||||
{ |
||||
if (opStatus == -2) { |
||||
jniThrowException(env, "java/lang/IllegalStateException", nullptr); |
||||
} else if (opStatus == -1) { |
||||
if (strlen(message) > 520) { |
||||
jniThrowException( env, exception, message); |
||||
} else { |
||||
char msg[256]; |
||||
sprintf(msg, "%s: status = 0x%X", message, opStatus); |
||||
jniThrowException( env, exception, msg); |
||||
} |
||||
} |
||||
} |
||||
|
||||
static MediaRetriever* getRetriever(JNIEnv* env, jobject thiz) |
||||
{ |
||||
auto* retriever = (MediaRetriever*) env->GetLongField(thiz, fields.context); |
||||
return retriever; |
||||
} |
||||
|
||||
static void setRetriever(JNIEnv* env, jobject thiz, long retriever) |
||||
{ |
||||
auto *old = (MediaRetriever*) env->GetLongField(thiz, fields.context); |
||||
env->SetLongField(thiz, fields.context, retriever); |
||||
} |
||||
|
||||
static void native_setup(JNIEnv *env, jobject thiz) |
||||
{ |
||||
auto* retriever = new MediaRetriever(); |
||||
setRetriever(env, thiz, (long)retriever); |
||||
} |
||||
|
||||
static void native_init(JNIEnv *env, jobject thiz) |
||||
{ |
||||
jclass clazz = env->FindClass(kClassPathName); |
||||
if (!clazz) { |
||||
return; |
||||
} |
||||
|
||||
fields.context = env->GetFieldID(clazz, "mNativeContext", "J"); |
||||
if (fields.context == nullptr) { |
||||
return; |
||||
} |
||||
|
||||
av_register_all(); |
||||
avformat_network_init(); |
||||
} |
||||
|
||||
static void native_set_dataSource(JNIEnv *env, jobject thiz, jstring path) { |
||||
MediaRetriever* retriever = getRetriever(env, thiz); |
||||
if (retriever == nullptr) { |
||||
jniThrowException(env, "java/lang/IllegalStateException", "No retriever available"); |
||||
return; |
||||
} |
||||
if (!path) { |
||||
jniThrowException(env, "java/lang/IllegalArgumentException", "Null pointer"); |
||||
return; |
||||
} |
||||
const char *tmp = env->GetStringUTFChars(path, nullptr); |
||||
if (!tmp) { |
||||
return; |
||||
} |
||||
|
||||
process_retriever_call( |
||||
env, |
||||
retriever->setDataSource(tmp), |
||||
"java/lang/IllegalArgumentException", |
||||
"setDataSource failed"); |
||||
|
||||
env->ReleaseStringUTFChars(path, tmp); |
||||
} |
||||
|
||||
static int jniGetFDFromFileDescriptor(JNIEnv * env, jobject fileDescriptor) { |
||||
jint fd = -1; |
||||
jclass fdClass = env->FindClass("java/io/FileDescriptor"); |
||||
|
||||
if (fdClass != nullptr) { |
||||
jfieldID fdClassDescriptorFieldID = env->GetFieldID(fdClass, "descriptor", "I"); |
||||
if (fdClassDescriptorFieldID != nullptr && fileDescriptor != nullptr) { |
||||
fd = env->GetIntField(fileDescriptor, fdClassDescriptorFieldID); |
||||
} |
||||
} |
||||
|
||||
return fd; |
||||
} |
||||
|
||||
static void native_set_dataSourceFD(JNIEnv *env, jobject thiz, jobject fileDescriptor, jlong offset, jlong length) |
||||
{ |
||||
MediaRetriever* retriever = getRetriever(env, thiz); |
||||
if (retriever == nullptr) { |
||||
jniThrowException(env, "java/lang/IllegalStateException", "No retriever available"); |
||||
return; |
||||
} |
||||
if (!fileDescriptor) { |
||||
jniThrowException(env, "java/lang/IllegalArgumentException", nullptr); |
||||
return; |
||||
} |
||||
int fd = jniGetFDFromFileDescriptor(env, fileDescriptor); |
||||
if (offset < 0 || length < 0 || fd < 0) { |
||||
if (offset < 0) { |
||||
__android_log_print(ANDROID_LOG_ERROR, LOG_TAG, "negative offset (%ld)", offset); |
||||
} |
||||
if (length < 0) { |
||||
__android_log_print(ANDROID_LOG_ERROR, LOG_TAG, "negative length (%ld)", length); |
||||
} |
||||
if (fd < 0) { |
||||
__android_log_print(ANDROID_LOG_ERROR, LOG_TAG, "invalid file descriptor"); |
||||
} |
||||
jniThrowException(env, "java/lang/IllegalArgumentException", nullptr); |
||||
return; |
||||
} |
||||
process_retriever_call(env, retriever->setDataSource(fd, offset, length), "java/lang/RuntimeException", "setDataSource failed"); |
||||
} |
||||
|
||||
static void native_set_surface(JNIEnv *env, jclass thiz, jobject surface) |
||||
{ |
||||
MediaRetriever* retriever = getRetriever(env, thiz); |
||||
if (retriever == nullptr) { |
||||
jniThrowException(env, "java/lang/IllegalStateException", "No retriever available"); |
||||
return; |
||||
} |
||||
|
||||
theNativeWindow = ANativeWindow_fromSurface(env, surface); |
||||
|
||||
if (theNativeWindow != nullptr) { |
||||
retriever->setNativeWindow(theNativeWindow); |
||||
} |
||||
} |
||||
|
||||
static jobject native_extract_metadata(JNIEnv *env, jobject thiz, jstring jkey) |
||||
{ |
||||
MediaRetriever* retriever = getRetriever(env, thiz); |
||||
if (retriever == nullptr) { |
||||
jniThrowException(env, "java/lang/IllegalStateException", "No retriever available"); |
||||
return nullptr; |
||||
} |
||||
if (!jkey) { |
||||
jniThrowException(env, "java/lang/IllegalArgumentException", "Null pointer"); |
||||
return nullptr; |
||||
} |
||||
const char *key = env->GetStringUTFChars(jkey, nullptr); |
||||
if (!key) { |
||||
return nullptr; |
||||
} |
||||
const char* value = retriever->extractMetadata(key); |
||||
if (!value) { |
||||
return nullptr; |
||||
} |
||||
env->ReleaseStringUTFChars(jkey, key); |
||||
return NewStringUTF(env, value); |
||||
} |
||||
|
||||
static jbyteArray native_get_frameAtTime(JNIEnv *env, jobject thiz, jlong timeUs, jint option) |
||||
{ |
||||
MediaRetriever* retriever = getRetriever(env, thiz); |
||||
if (retriever == nullptr) { |
||||
jniThrowException(env, "java/lang/IllegalStateException", "No retriever available"); |
||||
return nullptr; |
||||
} |
||||
|
||||
AVPacket packet; |
||||
av_init_packet(&packet); |
||||
jbyteArray array = nullptr; |
||||
|
||||
if (retriever->getFrameAtTime(timeUs, option, &packet) == 0) { |
||||
int size = packet.size; |
||||
uint8_t* data = packet.data; |
||||
array = env->NewByteArray(size); |
||||
if (!array) { |
||||
__android_log_print(ANDROID_LOG_ERROR, LOG_TAG, "getFrameAtTime: OutOfMemoryError is thrown."); |
||||
} else { |
||||
jbyte* bytes = env->GetByteArrayElements(array, nullptr); |
||||
if (bytes != nullptr) { |
||||
memcpy(bytes, data, size); |
||||
env->ReleaseByteArrayElements(array, bytes, 0); |
||||
} |
||||
} |
||||
} |
||||
|
||||
av_packet_unref(&packet); |
||||
|
||||
return array; |
||||
} |
||||
|
||||
static jbyteArray native_get_scaleFrameAtTime(JNIEnv *env, jobject thiz, jlong timeUs, jint option, |
||||
jint width, jint height) |
||||
{ |
||||
MediaRetriever* retriever = getRetriever(env, thiz); |
||||
if (retriever == nullptr) { |
||||
jniThrowException(env, "java/lang/IllegalStateException", "No retriever available"); |
||||
return nullptr; |
||||
} |
||||
|
||||
AVPacket packet; |
||||
av_init_packet(&packet); |
||||
jbyteArray array = nullptr; |
||||
|
||||
if (retriever->getScaledFrameAtTime(timeUs, option, &packet, width, height) == 0) { |
||||
int size = packet.size; |
||||
uint8_t* data = packet.data; |
||||
array = env->NewByteArray(size); |
||||
if (!array) { |
||||
__android_log_print(ANDROID_LOG_ERROR, LOG_TAG, "getFrameAtTime: OutOfMemoryError is thrown."); |
||||
} else { |
||||
jbyte* bytes = env->GetByteArrayElements(array, nullptr); |
||||
if (bytes != nullptr) { |
||||
memcpy(bytes, data, size); |
||||
env->ReleaseByteArrayElements(array, bytes, 0); |
||||
} |
||||
} |
||||
} |
||||
|
||||
av_packet_unref(&packet); |
||||
|
||||
return array; |
||||
} |
||||
|
||||
static void native_release(JNIEnv *env, jobject thiz) |
||||
{ |
||||
MediaRetriever* retriever = getRetriever(env, thiz); |
||||
delete retriever; |
||||
setRetriever(env, thiz, 0); |
||||
} |
@ -0,0 +1,125 @@ |
||||
/*
|
||||
* Created by frank on 2022/2/23 |
||||
* |
||||
* part of code from William Seemann |
||||
*/ |
||||
|
||||
#include "metadata_util.h" |
||||
#include <stdio.h> |
||||
|
||||
void set_duration(AVFormatContext *ic) { |
||||
char value[30] = "0"; |
||||
long duration = 0; |
||||
|
||||
if (ic) { |
||||
if (ic->duration != AV_NOPTS_VALUE) { |
||||
duration = ((ic->duration / AV_TIME_BASE) * 1000); |
||||
} |
||||
} |
||||
|
||||
sprintf(value, "%ld", duration); |
||||
av_dict_set(&ic->metadata, DURATION, value, 0); |
||||
} |
||||
|
||||
void set_filesize(AVFormatContext *ic) { |
||||
char value[30] = "0"; |
||||
|
||||
int64_t size = ic->pb ? avio_size(ic->pb) : -1; |
||||
sprintf(value, "%"PRId64, size); |
||||
av_dict_set(&ic->metadata, FILESIZE, value, 0); |
||||
} |
||||
|
||||
void set_mimetype(AVFormatContext *ic) { |
||||
if (ic->iformat == NULL || ic->iformat->name == NULL){ |
||||
return; |
||||
} |
||||
const char *name = ic->iformat->name; |
||||
av_dict_set(&ic->metadata, MIME_TYPE, name, 0); |
||||
} |
||||
|
||||
void set_codec(AVFormatContext *ic, int i) { |
||||
const char *codec_type = av_get_media_type_string(ic->streams[i]->codecpar->codec_type); |
||||
|
||||
if (!codec_type) { |
||||
return; |
||||
} |
||||
|
||||
const char *codec_name = avcodec_get_name(ic->streams[i]->codecpar->codec_id); |
||||
|
||||
if (strcmp(codec_type, "audio") == 0) { |
||||
av_dict_set(&ic->metadata, AUDIO_CODEC, codec_name, 0); |
||||
} else if (strcmp(codec_type, "video") == 0) { |
||||
av_dict_set(&ic->metadata, VIDEO_CODEC, codec_name, 0); |
||||
} |
||||
} |
||||
|
||||
void set_video_resolution(AVFormatContext *ic, AVStream *video_st) { |
||||
char value[30] = "0"; |
||||
|
||||
if (video_st) { |
||||
sprintf(value, "%d", video_st->codecpar->width); |
||||
av_dict_set(&ic->metadata, VIDEO_WIDTH, value, 0); |
||||
|
||||
sprintf(value, "%d", video_st->codecpar->height); |
||||
av_dict_set(&ic->metadata, VIDEO_HEIGHT, value, 0); |
||||
} |
||||
} |
||||
|
||||
int get_metadata_internal(AVFormatContext *ic, AVDictionary **metadata) { |
||||
if (!ic) { |
||||
return FAILURE; |
||||
} |
||||
av_dict_copy(metadata, ic->metadata, 0); |
||||
|
||||
return SUCCESS; |
||||
} |
||||
|
||||
void set_rotation(AVFormatContext *ic, AVStream *audio_st, AVStream *video_st) { |
||||
if (!extract_metadata_internal(ic, audio_st, video_st, ROTATE) && video_st && video_st->metadata) { |
||||
AVDictionaryEntry *entry = av_dict_get(video_st->metadata, ROTATE, NULL, AV_DICT_MATCH_CASE); |
||||
|
||||
if (entry && entry->value) { |
||||
av_dict_set(&ic->metadata, ROTATE, entry->value, 0); |
||||
} else { |
||||
av_dict_set(&ic->metadata, ROTATE, "0", 0); |
||||
} |
||||
} |
||||
} |
||||
|
||||
void set_framerate(AVFormatContext *ic, AVStream *audio_st, AVStream *video_st) { |
||||
char value[30] = "0"; |
||||
|
||||
if (video_st && video_st->avg_frame_rate.den && video_st->avg_frame_rate.num) { |
||||
double d = av_q2d(video_st->avg_frame_rate); |
||||
uint64_t v = lrintf((float)d * 100); |
||||
if (v % 100) { |
||||
sprintf(value, "%3.2f", d); |
||||
} else if (v % (100 * 1000)) { |
||||
sprintf(value, "%1.0f", d); |
||||
} else { |
||||
sprintf(value, "%1.0fk", d / 1000); |
||||
} |
||||
|
||||
av_dict_set(&ic->metadata, FRAMERATE, value, 0); |
||||
} |
||||
} |
||||
|
||||
const char* extract_metadata_internal(AVFormatContext *ic, AVStream *audio_st, AVStream *video_st, const char* key) { |
||||
char* value = NULL; |
||||
|
||||
if (!ic) { |
||||
return value; |
||||
} |
||||
|
||||
if (key) { |
||||
if (av_dict_get(ic->metadata, key, NULL, AV_DICT_MATCH_CASE)) { |
||||
value = av_dict_get(ic->metadata, key, NULL, AV_DICT_MATCH_CASE)->value; |
||||
} else if (audio_st && av_dict_get(audio_st->metadata, key, NULL, AV_DICT_MATCH_CASE)) { |
||||
value = av_dict_get(audio_st->metadata, key, NULL, AV_DICT_MATCH_CASE)->value; |
||||
} else if (video_st && av_dict_get(video_st->metadata, key, NULL, AV_DICT_MATCH_CASE)) { |
||||
value = av_dict_get(video_st->metadata, key, NULL, AV_DICT_MATCH_CASE)->value; |
||||
} |
||||
} |
||||
|
||||
return value;
|
||||
} |
@ -0,0 +1,37 @@ |
||||
/*
|
||||
* Created by frank on 2022/2/23 |
||||
* |
||||
* part of code from William Seemann |
||||
*/ |
||||
|
||||
#ifndef METADATA_UTIL_H_ |
||||
#define METADATA_UTIL_H_ |
||||
|
||||
#include "libavutil/opt.h" |
||||
#include "libavcodec/avcodec.h" |
||||
#include "libavformat/avformat.h" |
||||
|
||||
static const char *DURATION = "duration"; |
||||
static const char *AUDIO_CODEC = "audio_codec"; |
||||
static const char *VIDEO_CODEC = "video_codec"; |
||||
static const char *ROTATE = "rotate"; |
||||
static const char *FRAMERATE = "framerate"; |
||||
static const char *FILESIZE = "filesize"; |
||||
static const char *VIDEO_WIDTH = "video_width"; |
||||
static const char *VIDEO_HEIGHT = "video_height"; |
||||
static const char *MIME_TYPE = "mime_type"; |
||||
|
||||
static const int SUCCESS = 0; |
||||
static const int FAILURE = -1; |
||||
|
||||
void set_duration(AVFormatContext *ic); |
||||
void set_filesize(AVFormatContext *ic); |
||||
void set_mimetype(AVFormatContext *ic); |
||||
void set_codec(AVFormatContext *ic, int i); |
||||
void set_video_resolution(AVFormatContext *ic, AVStream *video_st); |
||||
int get_metadata_internal(AVFormatContext *ic, AVDictionary **metadata); |
||||
void set_rotation(AVFormatContext *ic, AVStream *audio_st, AVStream *video_st); |
||||
void set_framerate(AVFormatContext *ic, AVStream *audio_st, AVStream *video_st); |
||||
const char* extract_metadata_internal(AVFormatContext *ic, AVStream *audio_st, AVStream *video_st, const char* key); |
||||
|
||||
#endif /*METADATA_UTIL_H_*/ |
Loading…
Reference in new issue