summaryrefslogtreecommitdiff
path: root/D139699.diff
diff options
context:
space:
mode:
Diffstat (limited to 'D139699.diff')
-rw-r--r--D139699.diff127
1 files changed, 127 insertions, 0 deletions
diff --git a/D139699.diff b/D139699.diff
new file mode 100644
index 0000000..a4be4c5
--- /dev/null
+++ b/D139699.diff
@@ -0,0 +1,127 @@
+diff --git a/dom/media/platforms/ffmpeg/FFmpegAudioDecoder.cpp b/dom/media/platforms/ffmpeg/FFmpegAudioDecoder.cpp
+--- a/dom/media/platforms/ffmpeg/FFmpegAudioDecoder.cpp
++++ b/dom/media/platforms/ffmpeg/FFmpegAudioDecoder.cpp
+@@ -206,15 +206,49 @@
+ media::TimeUnit pts = aSample->mTime;
+
+ while (packet.size > 0) {
+- int decoded;
+- int bytesConsumed =
++ int decoded = false;
++ int bytesConsumed = -1;
++#if LIBAVCODEC_VERSION_MAJOR < 59
++ bytesConsumed =
+ mLib->avcodec_decode_audio4(mCodecContext, mFrame, &decoded, &packet);
+-
+ if (bytesConsumed < 0) {
+ NS_WARNING("FFmpeg audio decoder error.");
+ return MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
+ RESULT_DETAIL("FFmpeg audio error:%d", bytesConsumed));
+ }
++#else
++# define AVERROR_OK 0
++
++ int ret = mLib->avcodec_receive_frame(mCodecContext, mFrame);
++ switch (ret) {
++ case AVERROR_OK:
++ decoded = true;
++ break;
++ case AVERROR(EAGAIN):
++ break;
++ case int(AVERROR_EOF): {
++ FFMPEG_LOG(" End of stream.");
++ return MediaResult(NS_ERROR_DOM_MEDIA_END_OF_STREAM,
++ RESULT_DETAIL("End of stream"));
++ }
++ }
++ ret = mLib->avcodec_send_packet(mCodecContext, &packet);
++ switch (ret) {
++ case AVERROR_OK:
++ bytesConsumed = packet.size;
++ break;
++ case AVERROR(EAGAIN):
++ break;
++ case int(AVERROR_EOF):
++ FFMPEG_LOG(" End of stream.");
++ return MediaResult(NS_ERROR_DOM_MEDIA_END_OF_STREAM,
++ RESULT_DETAIL("End of stream"));
++ default:
++ NS_WARNING("FFmpeg audio decoder error.");
++ return MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
++ RESULT_DETAIL("FFmpeg audio error"));
++ }
++#endif
+
+ if (decoded) {
+ if (mFrame->format != AV_SAMPLE_FMT_FLT &&
+diff --git a/dom/media/platforms/ffmpeg/FFmpegVideoDecoder.cpp b/dom/media/platforms/ffmpeg/FFmpegVideoDecoder.cpp
+--- a/dom/media/platforms/ffmpeg/FFmpegVideoDecoder.cpp
++++ b/dom/media/platforms/ffmpeg/FFmpegVideoDecoder.cpp
+@@ -39,6 +39,9 @@
+ # define AV_PIX_FMT_YUV444P10LE PIX_FMT_YUV444P10LE
+ # define AV_PIX_FMT_NONE PIX_FMT_NONE
+ #endif
++#if LIBAVCODEC_VERSION_MAJOR > 58
++# define AV_PIX_FMT_VAAPI_VLD AV_PIX_FMT_VAAPI
++#endif
+ #include "mozilla/PodOperations.h"
+ #include "mozilla/StaticPrefs_media.h"
+ #include "mozilla/TaskQueue.h"
+@@ -766,6 +769,14 @@
+ }
+ #endif
+
++static int64_t GetFramePts(AVFrame* aFrame) {
++#if LIBAVCODEC_VERSION_MAJOR > 58
++ return aFrame->pts;
++#else
++ return aFrame->pkt_pts;
++#endif
++}
++
+ MediaResult FFmpegVideoDecoder<LIBAV_VER>::DoDecode(
+ MediaRawData* aSample, uint8_t* aData, int aSize, bool* aGotFrame,
+ MediaDataDecoder::DecodedData& aResults) {
+@@ -831,7 +842,7 @@
+ MediaResult rv;
+ # ifdef MOZ_WAYLAND_USE_VAAPI
+ if (IsHardwareAccelerated()) {
+- rv = CreateImageVAAPI(mFrame->pkt_pos, mFrame->pkt_pts,
++ rv = CreateImageVAAPI(mFrame->pkt_pos, GetFramePts(mFrame),
+ mFrame->pkt_duration, aResults);
+ // If VA-API playback failed, just quit. Decoder is going to be restarted
+ // without VA-API.
+@@ -844,8 +855,8 @@
+ } else
+ # endif
+ {
+- rv = CreateImage(mFrame->pkt_pos, mFrame->pkt_pts, mFrame->pkt_duration,
+- aResults);
++ rv = CreateImage(mFrame->pkt_pos, GetFramePts(mFrame),
++ mFrame->pkt_duration, aResults);
+ }
+ if (NS_FAILED(rv)) {
+ return rv;
+@@ -879,9 +890,9 @@
+ "DoDecodeFrame:decode_video: rv=%d decoded=%d "
+ "(Input: pts(%" PRId64 ") dts(%" PRId64 ") Output: pts(%" PRId64
+ ") "
+- "opaque(%" PRId64 ") pkt_pts(%" PRId64 ") pkt_dts(%" PRId64 "))",
++ "opaque(%" PRId64 ") pts(%" PRId64 ") pkt_dts(%" PRId64 "))",
+ bytesConsumed, decoded, packet.pts, packet.dts, mFrame->pts,
+- mFrame->reordered_opaque, mFrame->pkt_pts, mFrame->pkt_dts);
++ mFrame->reordered_opaque, mFrame->pts, mFrame->pkt_dts);
+
+ if (bytesConsumed < 0) {
+ return MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
+@@ -896,7 +907,8 @@
+ }
+
+ // If we've decoded a frame then we need to output it
+- int64_t pts = mPtsContext.GuessCorrectPts(mFrame->pkt_pts, mFrame->pkt_dts);
++ int64_t pts =
++ mPtsContext.GuessCorrectPts(GetFramePts(mFrame), mFrame->pkt_dts);
+ // Retrieve duration from dts.
+ // We use the first entry found matching this dts (this is done to
+ // handle damaged file with multiple frames with the same dts)
+
bgstack15