This commit is contained in:
Fancy code 2024-07-11 14:23:09 +08:00
parent 4c6cf898cf
commit 19aa5e1599
7 changed files with 54 additions and 17 deletions

View File

@ -18,8 +18,11 @@
#include "MediaBase.h" #include "MediaBase.h"
#include "StatusCode.h" #include "StatusCode.h"
#include <memory> #include <memory>
#include <stdio.h>
#include <time.h>
CameraHalTest::CameraHalTest(const CameraType &cameraType) CameraHalTest::CameraHalTest(const CameraType &cameraType)
: mCameraType(cameraType), mReadH264File(nullptr), mReadG711aFile(nullptr), mTaskRuning(false) : mCameraType(cameraType), mReadH264File(nullptr), mReadG711aFile(nullptr), mTaskRuning(false),
mVideoTimeStamp_us(0), mAudioTimeStamp_us(0)
{ {
} }
void CameraHalTest::Init(void) void CameraHalTest::Init(void)
@ -128,11 +131,17 @@ void CameraHalTest::MockReportMediaStream(void)
} }
void CameraHalTest::ReadDataFromH264File(const void *stream, const unsigned int length) void CameraHalTest::ReadDataFromH264File(const void *stream, const unsigned int length)
{ {
GetVideoStream(stream, length, 0); struct timespec ts;
clock_gettime(CLOCK_REALTIME, &ts);
long microseconds = ts.tv_sec * 1000000L + ts.tv_nsec / 1000;
GetVideoStream(stream, length, microseconds);
} }
void CameraHalTest::ReadDataFromG711aFile(const void *stream, const unsigned int length) void CameraHalTest::ReadDataFromG711aFile(const void *stream, const unsigned int length)
{ {
GetAudioStream(stream, length, 0); struct timespec ts;
clock_gettime(CLOCK_REALTIME, &ts);
long microseconds = ts.tv_sec * 1000000L + ts.tv_nsec / 1000;
GetAudioStream(stream, length, microseconds);
} }
CameraHalMock::CameraHalMock(const CameraType &cameraType) : CameraHalTest(cameraType) CameraHalMock::CameraHalMock(const CameraType &cameraType) : CameraHalTest(cameraType)
{ {

View File

@ -50,6 +50,8 @@ protected:
std::condition_variable mCv; std::condition_variable mCv;
bool mTaskRuning; bool mTaskRuning;
std::thread mTaskTimerThread; std::thread mTaskTimerThread;
unsigned long long mVideoTimeStamp_us;
unsigned long long mAudioTimeStamp_us;
}; };
class CameraHalMock : public CameraHalTest class CameraHalMock : public CameraHalTest
{ {

View File

@ -63,12 +63,15 @@ bool FfmpegDecoder::Init(void)
/* check that the encoder supports s16 pcm input */ /* check that the encoder supports s16 pcm input */
mCodecCtx->sample_fmt = AV_SAMPLE_FMT_S16; mCodecCtx->sample_fmt = AV_SAMPLE_FMT_S16;
if (!check_sample_fmt(mCodec, mCodecCtx->sample_fmt)) { if (!check_sample_fmt(mCodec, mCodecCtx->sample_fmt)) {
LogError("Encoder does not support sample format %s", av_get_sample_fmt_name(mCodecCtx->sample_fmt)); LogError("decoder does not support sample format %s", av_get_sample_fmt_name(mCodecCtx->sample_fmt));
return false; return false;
} }
/* select other audio parameters supported by the encoder */ /* select other audio parameters supported by the encoder */
mCodecCtx->sample_rate = select_sample_rate(mCodec); mCodecCtx->sample_rate = select_sample_rate(mCodec);
LogInfo("decoder sample_rate:%d\n", mCodecCtx->sample_rate);
// const AVChannelLayout src = (AVChannelLayout)AV_CHANNEL_LAYOUT_MONO;
// av_channel_layout_copy(&mCodecCtx->ch_layout, &src);
ret = select_channel_layout(mCodec, &(mCodecCtx->ch_layout)); ret = select_channel_layout(mCodec, &(mCodecCtx->ch_layout));
if (ret < 0) { if (ret < 0) {
LogError("Could not set channel layout\n"); LogError("Could not set channel layout\n");
@ -96,8 +99,8 @@ bool FfmpegDecoder::Init(void)
return false; return false;
} }
if (AVMEDIA_TYPE_AUDIO == mCodec->type) { if (AVMEDIA_TYPE_AUDIO == mCodec->type) {
mFrame->nb_samples = mCodecCtx->frame_size; // mFrame->nb_samples = mCodecCtx->frame_size;
mFrame->format = mCodecCtx->sample_fmt; // mFrame->format = mCodecCtx->sample_fmt;
ret = av_channel_layout_copy(&(mFrame->ch_layout), &(mCodecCtx->ch_layout)); ret = av_channel_layout_copy(&(mFrame->ch_layout), &(mCodecCtx->ch_layout));
if (ret < 0) { if (ret < 0) {
LogError("Could not copy channel layout\n"); LogError("Could not copy channel layout\n");
@ -124,14 +127,16 @@ bool FfmpegDecoder::UnInit(void)
} }
return true; return true;
} }
void FfmpegDecoder::DecodeData(const void *data, const size_t &size, std::function<void(AVFrame *frame)> callback) void FfmpegDecoder::DecodeData(const void *data, const size_t &size, const unsigned long long &pts,
std::function<void(AVFrame *frame)> callback)
{ {
if (nullptr == mParser) { if (nullptr == mParser) {
mPacket->data = (uint8_t *)data; mPacket->data = (uint8_t *)data;
mPacket->size = size; mPacket->size = size;
// mPacket->stream_index = 0; // mPacket->stream_index = 0;
mPacket->pts = AV_NOPTS_VALUE; mPacket->pts = pts;
mPacket->dts = AV_NOPTS_VALUE; mPacket->dts = mPacket->pts;
LogInfo("source data mPacket->pts:%d\n", mPacket->pts);
AVDecodeData(mPacket, callback); AVDecodeData(mPacket, callback);
return; return;
} }
@ -209,6 +214,7 @@ void inline FfmpegDecoder::AVDecodeData(AVPacket *pkt, std::function<void(AVFram
// // fwrite(frame->data[ch] + data_size * i, 1, data_size, outfile); // // fwrite(frame->data[ch] + data_size * i, 1, data_size, outfile);
// save_code_stream_file(mFrame->data[ch] + data_size * i, data_size); // save_code_stream_file(mFrame->data[ch] + data_size * i, data_size);
// save_code_stream_file(mFrame->data[0], mFrame->linesize[0]); // save_code_stream_file(mFrame->data[0], mFrame->linesize[0]);
LogInfo("decode frame pts = %llu, nb_samples = %d\n", mFrame->pts, mFrame->nb_samples);
callback(mFrame); callback(mFrame);
} }
break; break;

View File

@ -40,7 +40,8 @@ public:
virtual ~FfmpegDecoder() = default; virtual ~FfmpegDecoder() = default;
bool Init(void); bool Init(void);
bool UnInit(void); bool UnInit(void);
void DecodeData(const void *data, const size_t &size, std::function<void(AVFrame *frame)> callback); void DecodeData(const void *data, const size_t &size, const unsigned long long &pts,
std::function<void(AVFrame *frame)> callback);
private: private:
void AVParseData(const void *data, const size_t &size, std::function<void(AVFrame *frame)> callback); void AVParseData(const void *data, const size_t &size, std::function<void(AVFrame *frame)> callback);

View File

@ -68,7 +68,7 @@ bool FfmpegEncoder::Init(int &outputFlags)
LogError("Could not alloc an encoding context\n"); LogError("Could not alloc an encoding context\n");
return false; return false;
} }
const AVChannelLayout src = (AVChannelLayout)AV_CHANNEL_LAYOUT_STEREO; const AVChannelLayout src = (AVChannelLayout)AV_CHANNEL_LAYOUT_MONO;
switch (mCodec->type) { switch (mCodec->type) {
case AVMEDIA_TYPE_AUDIO: case AVMEDIA_TYPE_AUDIO:
mCodecCtx->sample_fmt = mCodec->sample_fmts ? mCodec->sample_fmts[0] : AV_SAMPLE_FMT_FLTP; mCodecCtx->sample_fmt = mCodec->sample_fmts ? mCodec->sample_fmts[0] : AV_SAMPLE_FMT_FLTP;
@ -220,7 +220,12 @@ int FfmpegEncoder::EncodeData(AVFrame *frame, AVStream *stream, std::function<vo
/* rescale output packet timestamp values from codec to stream timebase */ /* rescale output packet timestamp values from codec to stream timebase */
av_packet_rescale_ts(mTmpPkt, mCodecCtx->time_base, stream->time_base); av_packet_rescale_ts(mTmpPkt, mCodecCtx->time_base, stream->time_base);
// LogInfo("Write mCodecCtx->time_base.num: %d\n", mCodecCtx->time_base.num);
// LogInfo("Write mCodecCtx->time_base.den: %d\n", mCodecCtx->time_base.den);
// LogInfo("Write stream->time_base.num: %d\n", stream->time_base.num);
// LogInfo("Write stream->time_base.den: %d\n", stream->time_base.den);
mTmpPkt->stream_index = stream->index; mTmpPkt->stream_index = stream->index;
LogInfo("Write frame mTmpPkt->pts: %llu\n", mTmpPkt->pts);
if (callback) { if (callback) {
// save_code_stream_file(mTmpPkt->data, mTmpPkt->size); // save_code_stream_file(mTmpPkt->data, mTmpPkt->size);
@ -320,8 +325,9 @@ AVFrame *FfmpegEncoder::ConvertAudioFrame(AVFrame *decodeFrame, struct SwrContex
LogError("decodeFrame is null\n"); LogError("decodeFrame is null\n");
return nullptr; return nullptr;
} }
decodeFrame->pts = next_pts; LogInfo("decodeFrame->pts = %d\n", decodeFrame->pts);
next_pts += decodeFrame->nb_samples; // decodeFrame->pts = next_pts;
// next_pts += decodeFrame->nb_samples;
int ret = 0; int ret = 0;
int dst_nb_samples = 0; int dst_nb_samples = 0;
/* convert samples from native format to destination codec format, using the resampler */ /* convert samples from native format to destination codec format, using the resampler */
@ -350,7 +356,9 @@ AVFrame *FfmpegEncoder::ConvertAudioFrame(AVFrame *decodeFrame, struct SwrContex
return nullptr; return nullptr;
} }
mFrame->pts = av_rescale_q(mSamplesCount, (AVRational){1, mCodecCtx->sample_rate}, mCodecCtx->time_base); mFrame->pts = av_rescale_q(decodeFrame->pts, (AVRational){1, SOURCE_AUDIO_SAMPEL_RATE}, mCodecCtx->time_base);
LogInfo("decodeFrame->pts = %d\n", decodeFrame->pts);
LogInfo("mFrame->pts = %d\n", mFrame->pts);
mSamplesCount += dst_nb_samples; mSamplesCount += dst_nb_samples;
return mFrame; return mFrame;
} }

View File

@ -64,7 +64,11 @@ bool FfmpegOutputStream::Init(AVFormatContext *outputFormat)
mEncoder->OpenEncoder(nullptr, mStream); mEncoder->OpenEncoder(nullptr, mStream);
} }
else { else {
mStream->time_base = (AVRational){1, 15}; /**
* @brief There is no need to set time_base here, time_base will be automatically corrected inside ffmpeg.
*
*/
// mStream->time_base = (AVRational){1, 15};
mStream->codecpar->codec_id = AV_CODEC_ID_H264; mStream->codecpar->codec_id = AV_CODEC_ID_H264;
mStream->codecpar->codec_type = AVMEDIA_TYPE_VIDEO; mStream->codecpar->codec_type = AVMEDIA_TYPE_VIDEO;
mStream->codecpar->width = 1920; mStream->codecpar->width = 1920;
@ -92,7 +96,7 @@ void FfmpegOutputStream::UnInit(void)
void FfmpegOutputStream::WriteSourceData(const void *data, const size_t &size, const unsigned long long &pts) void FfmpegOutputStream::WriteSourceData(const void *data, const size_t &size, const unsigned long long &pts)
{ {
if (mDecoder) { if (mDecoder) {
mDecoder->DecodeData(data, size, mDecodeCallback); mDecoder->DecodeData(data, size, pts, mDecodeCallback);
return; return;
} }
AVPacket *tmpPkt = av_packet_alloc(); AVPacket *tmpPkt = av_packet_alloc();
@ -105,7 +109,9 @@ void FfmpegOutputStream::WriteSourceData(const void *data, const size_t &size, c
constexpr int64_t ZERO_MEANS_UNKNOWN = 0; constexpr int64_t ZERO_MEANS_UNKNOWN = 0;
tmpPkt->duration = ZERO_MEANS_UNKNOWN; tmpPkt->duration = ZERO_MEANS_UNKNOWN;
// tmpPkt->pts = u64Interval * 1000; // ת<><D7AA><EFBFBD><EFBFBD> us // tmpPkt->pts = u64Interval * 1000; // ת<><D7AA><EFBFBD><EFBFBD> us
tmpPkt->pts = av_rescale_q(u64Interval, (AVRational){1, 15}, mStream->time_base); tmpPkt->pts = av_rescale_q(pts, (AVRational){1, 15}, mStream->time_base);
// LogInfo("pts:%llu, duration:%d\n", tmpPkt->pts, tmpPkt->duration);
// tmpPkt->pts = pts;
u64Interval++; u64Interval++;
tmpPkt->dts = tmpPkt->pts; tmpPkt->dts = tmpPkt->pts;
/* copy packet */ /* copy packet */

View File

@ -123,8 +123,13 @@ void FfmpegReadFile::ReadFileThread(AVFormatContext *pFormatCtx, int mediaStream
if (packet.stream_index == mediaStreamIndex) { if (packet.stream_index == mediaStreamIndex) {
playTimeMs = (packet.duration * pFormatCtx->streams[mediaStreamIndex]->time_base.num * 1000) / playTimeMs = (packet.duration * pFormatCtx->streams[mediaStreamIndex]->time_base.num * 1000) /
pFormatCtx->streams[mediaStreamIndex]->time_base.den; pFormatCtx->streams[mediaStreamIndex]->time_base.den;
// AVRational time_base = pFormatCtx->streams[mediaStreamIndex]->time_base;
// int64_t duration_ms = av_rescale_q(packet.duration, time_base, {1, AV_TIME_BASE}) * 1000;
// LogInfo("Frame data address: %p, length: %zu\n", packet.data, packet.size); // LogInfo("Frame data address: %p, length: %zu\n", packet.data, packet.size);
// LogInfo("Play time ms:%d\n", playTimeMs); // LogInfo("Play time ms:%d\n", playTimeMs);
// static unsigned long long timeAmout = 0;
// timeAmout += playTimeMs;
// LogInfo("Time amout: %llu\n", timeAmout);
// LogInfo("time base: num = %d, den = %d\n", // LogInfo("time base: num = %d, den = %d\n",
// pFormatCtx->streams[mediaStreamIndex]->time_base.num, // pFormatCtx->streams[mediaStreamIndex]->time_base.num,
// pFormatCtx->streams[mediaStreamIndex]->time_base.den); // pFormatCtx->streams[mediaStreamIndex]->time_base.den);