Backup:use hardware jpeg encode.
This commit is contained in:
parent
64f7777c5e
commit
fefc76b1cd
|
@ -76,21 +76,13 @@ StatusCode MainThread::Init(void)
|
|||
}
|
||||
StatusCode MainThread::UnInit(void)
|
||||
{
|
||||
LogInfo("uuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuu.1\n");
|
||||
IMissionManager::GetInstance()->UnInit();
|
||||
LogInfo("uuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuu.2\n");
|
||||
IMediaManager::GetInstance()->UnInit();
|
||||
LogInfo("uuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuu.3\n");
|
||||
IIpcConfig::GetInstance()->UnInit();
|
||||
LogInfo("uuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuu.4\n");
|
||||
IStorageManager::GetInstance()->UnInit();
|
||||
LogInfo("uuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuu.5\n");
|
||||
IMcuManager::GetInstance()->UnInit();
|
||||
LogInfo("uuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuu.6\n");
|
||||
IDeviceManager::GetInstance()->UnInit();
|
||||
LogInfo("uuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuu.7\n");
|
||||
IHalCpp::GetInstance()->UnInit();
|
||||
LogInfo("uuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuu.8\n");
|
||||
DestoryAllModules();
|
||||
ILogUnInit();
|
||||
return CreateStatusCode(STATUS_CODE_OK);
|
||||
|
|
|
@ -67,6 +67,8 @@ MediaTaskType MediaTaskHandle::WorkModeConvert(const WorkMode &mode)
|
|||
switch (mode) {
|
||||
case WorkMode::MODE_PIC:
|
||||
return MediaTaskType::TAKE_PICTURE;
|
||||
case WorkMode::MODE_VIDEO:
|
||||
return MediaTaskType::TAKE_VIDEO;
|
||||
case WorkMode::MODE_PIC_VIDEO:
|
||||
return MediaTaskType::TAKE_PICTURE_AND_VIDEO;
|
||||
|
||||
|
|
|
@ -72,7 +72,7 @@ StatusCode VCameraTaskContext::TaskFinished(void)
|
|||
LogWarning("STATUS_CODE_VIRTUAL_FUNCTION.\n");
|
||||
return CreateStatusCode(STATUS_CODE_VIRTUAL_FUNCTION);
|
||||
}
|
||||
camera_task_param::camera_task_param(const CameraTaskType &cameraTask) : mCameraTask(cameraTask)
|
||||
camera_task_param::camera_task_param(const CameraTaskType &cameraTask) : mTaskType(cameraTask)
|
||||
{
|
||||
mVideoRecordingTimeMs = DEFAULT_VIDEO_RECORDING_TIME_MS;
|
||||
}
|
||||
|
@ -100,6 +100,11 @@ StatusCode VCameraHal::SetVideoStreamCallback(VideoStreamCallback callback)
|
|||
LogWarning("STATUS_CODE_VIRTUAL_FUNCTION.\n");
|
||||
return CreateStatusCode(STATUS_CODE_VIRTUAL_FUNCTION);
|
||||
}
|
||||
StatusCode VCameraHal::SetJpegEncodeCallback(JpegEncodeCallback callback)
|
||||
{
|
||||
LogWarning("STATUS_CODE_VIRTUAL_FUNCTION.\n");
|
||||
return CreateStatusCode(STATUS_CODE_VIRTUAL_FUNCTION);
|
||||
}
|
||||
void VSdCardHalMonitor::ReportEvent(const SdCardHalStatus &status)
|
||||
{
|
||||
LogWarning("STATUS_CODE_VIRTUAL_FUNCTION.\n");
|
||||
|
|
|
@ -118,12 +118,13 @@ constexpr int DEFAULT_VIDEO_RECORDING_TIME_MS = 10 * 1000;
|
|||
typedef struct camera_task_param
|
||||
{
|
||||
camera_task_param(const CameraTaskType &cameraTask);
|
||||
const CameraTaskType mCameraTask;
|
||||
const CameraTaskType mTaskType;
|
||||
unsigned int mVideoRecordingTimeMs; // TODO: delete?
|
||||
std::shared_ptr<VCameraTaskContext> mCtx;
|
||||
} CameraTaskParam;
|
||||
using AudioStreamCallback = std::function<void(const void *, const unsigned int &, const unsigned long long &)>;
|
||||
using VideoStreamCallback = std::function<void(const void *, const unsigned int &, const unsigned long long &)>;
|
||||
using JpegEncodeCallback = std::function<void(const void *, const unsigned int &, const unsigned long long &)>;
|
||||
class VCameraHal
|
||||
{
|
||||
public:
|
||||
|
@ -134,6 +135,7 @@ public:
|
|||
virtual StatusCode StopTask(void);
|
||||
virtual StatusCode SetAudioStreamCallback(AudioStreamCallback callback);
|
||||
virtual StatusCode SetVideoStreamCallback(VideoStreamCallback callback);
|
||||
virtual StatusCode SetJpegEncodeCallback(JpegEncodeCallback callback);
|
||||
};
|
||||
class VSdCardHalMonitor
|
||||
{
|
||||
|
|
|
@ -20,7 +20,7 @@
|
|||
#include <stdio.h>
|
||||
CameraHal::CameraHal()
|
||||
: mTaskRuning(false), mAudioStreamCallback(nullptr), mVideoStreamCallback(nullptr), mVideoFile(nullptr),
|
||||
mAudioFile(nullptr)
|
||||
mAudioFile(nullptr), mTaskType(CameraTaskType::END)
|
||||
{
|
||||
}
|
||||
void CameraHal::Init(void)
|
||||
|
@ -32,6 +32,7 @@ void CameraHal::UnInit(void)
|
|||
StatusCode CameraHal::StartSingleTask(const CameraTaskParam ¶m)
|
||||
{
|
||||
LogInfo("StartSingleTask.\n");
|
||||
// mTaskType = param.mTaskType;
|
||||
mTaskRuning = true;
|
||||
fx_system_v2("rm -f " SD_CARD_MOUNT_PATH "/chip.g711a");
|
||||
fx_system_v2("rm -f " SD_CARD_MOUNT_PATH "/chip.h264");
|
||||
|
@ -50,6 +51,7 @@ StatusCode CameraHal::StopTask(void)
|
|||
fclose(mVideoFile);
|
||||
mVideoFile = nullptr;
|
||||
}
|
||||
// mTaskType = CameraTaskType::END;
|
||||
fx_system_v2("sync");
|
||||
return CreateStatusCode(STATUS_CODE_OK);
|
||||
}
|
||||
|
@ -63,6 +65,11 @@ StatusCode CameraHal::SetVideoStreamCallback(VideoStreamCallback callback)
|
|||
mVideoStreamCallback = callback;
|
||||
return CreateStatusCode(STATUS_CODE_OK);
|
||||
}
|
||||
StatusCode CameraHal::SetJpegEncodeCallback(JpegEncodeCallback callback)
|
||||
{
|
||||
mJpegEncodeCallback = callback;
|
||||
return CreateStatusCode(STATUS_CODE_OK);
|
||||
}
|
||||
void CameraHal::GetAudioStream(const void *stream, const unsigned int &length, const unsigned long long &timeStamp)
|
||||
{
|
||||
if (mTaskRuning && nullptr != mAudioStreamCallback) {
|
||||
|
@ -77,6 +84,12 @@ void CameraHal::GetVideoStream(const void *stream, const unsigned int &length, c
|
|||
mVideoStreamCallback(stream, length, timeStamp);
|
||||
}
|
||||
}
|
||||
void CameraHal::GetJpegData(const void *stream, const unsigned int &length, const unsigned long long &timeStamp)
|
||||
{
|
||||
if (mTaskRuning && nullptr != mJpegEncodeCallback) {
|
||||
mJpegEncodeCallback(stream, length, timeStamp);
|
||||
}
|
||||
}
|
||||
void CameraHal::SaveChipStream(const ChipStreamType &streamType, const void *stream, const unsigned int &length)
|
||||
{
|
||||
FILE *file = nullptr;
|
||||
|
|
|
@ -37,10 +37,12 @@ protected:
|
|||
StatusCode StopTask(void) override;
|
||||
StatusCode SetAudioStreamCallback(AudioStreamCallback callback) override;
|
||||
StatusCode SetVideoStreamCallback(VideoStreamCallback callback) override;
|
||||
StatusCode SetJpegEncodeCallback(JpegEncodeCallback callback) override;
|
||||
|
||||
protected:
|
||||
void GetAudioStream(const void *stream, const unsigned int &length, const unsigned long long &timeStamp);
|
||||
void GetVideoStream(const void *stream, const unsigned int &length, const unsigned long long &timeStamp);
|
||||
void GetJpegData(const void *stream, const unsigned int &length, const unsigned long long &timeStamp);
|
||||
void SaveChipStream(const ChipStreamType &streamType, const void *stream, const unsigned int &length);
|
||||
|
||||
private:
|
||||
|
@ -48,10 +50,12 @@ private:
|
|||
bool mTaskRuning;
|
||||
AudioStreamCallback mAudioStreamCallback;
|
||||
VideoStreamCallback mVideoStreamCallback;
|
||||
JpegEncodeCallback mJpegEncodeCallback;
|
||||
/**
|
||||
* @brief Each time a media task is executed, the original data stream of the chip is saved to the SD card.
|
||||
*/
|
||||
FILE *mVideoFile; ///< The original video stream data is saved.
|
||||
FILE *mAudioFile; ///< The original audio stream data is saved.
|
||||
CameraTaskType mTaskType;
|
||||
};
|
||||
#endif
|
|
@ -46,6 +46,7 @@ enum class IpcConfigKey
|
|||
enum class WorkMode
|
||||
{
|
||||
MODE_PIC = 0,
|
||||
MODE_VIDEO,
|
||||
MODE_PIC_VIDEO,
|
||||
END,
|
||||
};
|
||||
|
|
|
@ -72,18 +72,14 @@ const StatusCode McuManagerImpl::Init(void)
|
|||
}
|
||||
const StatusCode McuManagerImpl::UnInit(void)
|
||||
{
|
||||
LogInfo("=============================== 00\n");
|
||||
McuDevice::UnInit();
|
||||
LogInfo("=============================== 11\n");
|
||||
McuProtocol::UnInit();
|
||||
LogInfo("=============================== 22\n");
|
||||
mMcuAskList.clear();
|
||||
mMonitor.reset();
|
||||
mMutex.lock();
|
||||
mWatchDogRuning = false;
|
||||
mCv.notify_one();
|
||||
mMutex.unlock();
|
||||
LogInfo("=============================== 33\n");
|
||||
if (mWatchDogThread.joinable()) {
|
||||
mWatchDogThread.join();
|
||||
}
|
||||
|
|
|
@ -40,14 +40,14 @@ MediaHandle::MediaHandle(const MediaChannel &mediaChannel, const std::shared_ptr
|
|||
}
|
||||
void MediaHandle::Init(void)
|
||||
{
|
||||
if (mCameraHal == nullptr) {
|
||||
LogError("CameraHal is null.\n");
|
||||
return;
|
||||
}
|
||||
auto audioFunc = std::bind(&MediaHandle::GetAudioStreamCallback, this, _1, _2, _3);
|
||||
mCameraHal->SetAudioStreamCallback(audioFunc);
|
||||
auto videoFunc = std::bind(&MediaHandle::GetVideoStreamCallback, this, _1, _2, _3);
|
||||
mCameraHal->SetVideoStreamCallback(videoFunc);
|
||||
// if (mCameraHal == nullptr) {
|
||||
// LogError("CameraHal is null.\n");
|
||||
// return;
|
||||
// }
|
||||
// auto audioFunc = std::bind(&MediaHandle::GetAudioStreamCallback, this, _1, _2, _3);
|
||||
// mCameraHal->SetAudioStreamCallback(audioFunc);
|
||||
// auto videoFunc = std::bind(&MediaHandle::GetVideoStreamCallback, this, _1, _2, _3);
|
||||
// mCameraHal->SetVideoStreamCallback(videoFunc);
|
||||
}
|
||||
void MediaHandle::UnInit(void)
|
||||
{
|
||||
|
@ -67,6 +67,7 @@ void MediaHandle::UnInit(void)
|
|||
*/
|
||||
mCameraHal->SetAudioStreamCallback(nullptr);
|
||||
mCameraHal->SetVideoStreamCallback(nullptr);
|
||||
mCameraHal->SetJpegEncodeCallback(nullptr);
|
||||
}
|
||||
}
|
||||
StatusCode MediaHandle::ExecuteTask(std::shared_ptr<VMediaTask> &task)
|
||||
|
@ -94,7 +95,7 @@ StatusCode MediaHandle::ExecuteTask(std::shared_ptr<VMediaTask> &task)
|
|||
}
|
||||
CameraTaskType taskType = TaskTypeConvert(task->GetTaskType());
|
||||
CameraTaskParam data(taskType);
|
||||
auto code = mCameraHal->StartSingleTask(data);
|
||||
auto code = StartMediaTask(data);
|
||||
if (IsCodeOK(code)) {
|
||||
mCurrentTask = task;
|
||||
StartTaskTimer();
|
||||
|
@ -212,6 +213,9 @@ void inline MediaHandle::HandleListFrame(void)
|
|||
else if (FrameType::AUDIO == frontFrame.mType) {
|
||||
mStreamHandle->GetAudioStream(frontFrame.mData, frontFrame.mLength, frontFrame.mTimeStamp_us);
|
||||
}
|
||||
else if (FrameType::JPEG == frontFrame.mType) {
|
||||
mStreamHandle->GetJpegData(frontFrame.mData, frontFrame.mLength, frontFrame.mTimeStamp_us);
|
||||
}
|
||||
free(frontFrame.mData);
|
||||
frontFrame.mData = nullptr;
|
||||
// mFrameList.pop_front();
|
||||
|
@ -230,7 +234,18 @@ void inline MediaHandle::HandleListFrame(void)
|
|||
}
|
||||
CameraTaskType MediaHandle::TaskTypeConvert(const MediaTaskType &type)
|
||||
{
|
||||
return CameraTaskType::END;
|
||||
switch (type) {
|
||||
case MediaTaskType::TAKE_PICTURE:
|
||||
return CameraTaskType::PICTURE;
|
||||
case MediaTaskType::TAKE_VIDEO:
|
||||
return CameraTaskType::VIDEO;
|
||||
case MediaTaskType::TAKE_PICTURE_AND_VIDEO:
|
||||
return CameraTaskType::PICTURE_AND_VIDEO;
|
||||
|
||||
default:
|
||||
LogError("TaskTypeConvert error.\n");
|
||||
return CameraTaskType::END;
|
||||
}
|
||||
}
|
||||
void MediaHandle::GetVideoStreamCallback(const void *stream, const int &length, const unsigned long long &timeStamp)
|
||||
{
|
||||
|
@ -240,6 +255,16 @@ void MediaHandle::GetAudioStreamCallback(const void *stream, const int &length,
|
|||
{
|
||||
GetAVStream(FrameType::AUDIO, stream, length, timeStamp);
|
||||
}
|
||||
void MediaHandle::GetJpegDataCallback(const void *stream, const int &length, const unsigned long long &timeStamp)
|
||||
{
|
||||
/**
|
||||
* @brief If it is a screenshot, each task only needs to process one piece of image data.
|
||||
*
|
||||
*/
|
||||
if (MEDIA_TASK_NOT_START == mFirstFrameTimeStamp_us) {
|
||||
GetAVStream(FrameType::JPEG, stream, length, timeStamp);
|
||||
}
|
||||
}
|
||||
void MediaHandle::GetAVStream(const FrameType &type, const void *stream, const int &length,
|
||||
const unsigned long long &timeStamp_us)
|
||||
{
|
||||
|
@ -283,4 +308,32 @@ void MediaHandle::DeleteFrame(const OneFrameStream &frame)
|
|||
return KEEP_THE_FRAME;
|
||||
};
|
||||
mFrameList.remove_if(searchMcuAsk);
|
||||
}
|
||||
StatusCode MediaHandle::StartMediaTask(const CameraTaskParam ¶m)
|
||||
{
|
||||
if (mCameraHal == nullptr) {
|
||||
LogError("CameraHal is null.\n");
|
||||
return CreateStatusCode(STATUS_CODE_NOT_OK);
|
||||
}
|
||||
mCameraHal->SetAudioStreamCallback(nullptr);
|
||||
mCameraHal->SetVideoStreamCallback(nullptr);
|
||||
mCameraHal->SetJpegEncodeCallback(nullptr);
|
||||
auto audioFunc = std::bind(&MediaHandle::GetAudioStreamCallback, this, _1, _2, _3);
|
||||
auto videoFunc = std::bind(&MediaHandle::GetVideoStreamCallback, this, _1, _2, _3);
|
||||
auto jpegFunc = std::bind(&MediaHandle::GetJpegDataCallback, this, _1, _2, _3);
|
||||
switch (param.mTaskType) {
|
||||
case CameraTaskType::PICTURE:
|
||||
mCameraHal->SetJpegEncodeCallback(jpegFunc);
|
||||
break;
|
||||
case CameraTaskType::VIDEO:
|
||||
mCameraHal->SetAudioStreamCallback(audioFunc);
|
||||
mCameraHal->SetVideoStreamCallback(videoFunc);
|
||||
break;
|
||||
case CameraTaskType::PICTURE_AND_VIDEO:
|
||||
break;
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
return mCameraHal->StartSingleTask(param);
|
||||
}
|
|
@ -28,6 +28,7 @@ enum class FrameType
|
|||
{
|
||||
VIDEO,
|
||||
AUDIO,
|
||||
JPEG,
|
||||
END
|
||||
};
|
||||
typedef struct one_frame_stream
|
||||
|
@ -61,6 +62,7 @@ private:
|
|||
CameraTaskType TaskTypeConvert(const MediaTaskType &type);
|
||||
void GetVideoStreamCallback(const void *stream, const int &length, const unsigned long long &timeStamp);
|
||||
void GetAudioStreamCallback(const void *stream, const int &length, const unsigned long long &timeStamp);
|
||||
void GetJpegDataCallback(const void *stream, const int &length, const unsigned long long &timeStamp);
|
||||
/**
|
||||
* @brief
|
||||
*
|
||||
|
@ -73,6 +75,7 @@ private:
|
|||
const unsigned long long &timeStamp_us);
|
||||
void ClearFrameList(void);
|
||||
void DeleteFrame(const OneFrameStream &frame);
|
||||
StatusCode StartMediaTask(const CameraTaskParam ¶m);
|
||||
|
||||
private:
|
||||
std::mutex mMutex;
|
||||
|
|
|
@ -73,7 +73,8 @@ void TakePicture::StopHandleStream(void)
|
|||
std::lock_guard<std::mutex> locker(mMutex);
|
||||
if (mTakePictureObject) {
|
||||
// OutputFileInfo finalFile = IGetOutputFileInfo(mTakePictureObject);
|
||||
MediaTaskResponse response("finalFile.mFileName", 0);
|
||||
std::string picturePath = mPictureTask->GetTargetNameForSaving();
|
||||
MediaTaskResponse response(picturePath, 0);
|
||||
mTaskResponse.push_back(response);
|
||||
ICloseJpegFile(mTakePictureObject);
|
||||
IMediaBaseFree(mTakePictureObject);
|
||||
|
@ -85,7 +86,16 @@ void TakePicture::GetVideoStream(const void *stream, const unsigned int &length,
|
|||
{
|
||||
std::lock_guard<std::mutex> locker(mMutex);
|
||||
if (mTakePictureObject) {
|
||||
IWriteJpegData(mTakePictureObject, stream, length);
|
||||
StreamInfo info = {.mType = STREAM_TYPE_VIDEO_H264, .mTimeStamp_us = timeStamp};
|
||||
IWriteJpegData(mTakePictureObject, stream, length, info);
|
||||
}
|
||||
}
|
||||
void TakePicture::GetJpegData(const void *stream, const unsigned int &length, const unsigned long long &timeStamp)
|
||||
{
|
||||
std::lock_guard<std::mutex> locker(mMutex);
|
||||
if (mTakePictureObject) {
|
||||
StreamInfo info = {.mType = STREAM_TYPE_JPEG, .mTimeStamp_us = timeStamp};
|
||||
IWriteJpegData(mTakePictureObject, stream, length, info);
|
||||
}
|
||||
}
|
||||
StatusCode TakePicture::GetAllFiles(std::vector<MediaTaskResponse> &files)
|
||||
|
|
|
@ -28,6 +28,7 @@ public:
|
|||
StatusCode UnInit(void) override;
|
||||
void StopHandleStream(void) override;
|
||||
void GetVideoStream(const void *stream, const unsigned int &length, const unsigned long long &timeStamp) override;
|
||||
void GetJpegData(const void *stream, const unsigned int &length, const unsigned long long &timeStamp) override;
|
||||
StatusCode GetAllFiles(std::vector<MediaTaskResponse> &files) override;
|
||||
bool HandleFinished(void) override;
|
||||
|
||||
|
|
|
@ -33,12 +33,16 @@ void VStreamHandle::StopHandleStream(void)
|
|||
}
|
||||
void VStreamHandle::GetVideoStream(const void *stream, const unsigned int &length, const unsigned long long &timeStamp)
|
||||
{
|
||||
LogWarning("STATUS_CODE_VIRTUAL_FUNCTION\n");
|
||||
// LogWarning("STATUS_CODE_VIRTUAL_FUNCTION\n");
|
||||
}
|
||||
void VStreamHandle::GetAudioStream(const void *stream, const unsigned int &length, const unsigned long long &timeStamp)
|
||||
{
|
||||
// LogWarning("STATUS_CODE_VIRTUAL_FUNCTION\n");
|
||||
}
|
||||
void VStreamHandle::GetJpegData(const void *stream, const unsigned int &length, const unsigned long long &timeStamp)
|
||||
{
|
||||
// LogWarning("STATUS_CODE_VIRTUAL_FUNCTION\n");
|
||||
}
|
||||
StatusCode VStreamHandle::GetAllFiles(std::vector<MediaTaskResponse> &files)
|
||||
{
|
||||
LogWarning("STATUS_CODE_VIRTUAL_FUNCTION\n");
|
||||
|
|
|
@ -27,6 +27,7 @@ public:
|
|||
virtual void StopHandleStream(void);
|
||||
virtual void GetVideoStream(const void *stream, const unsigned int &length, const unsigned long long &timeStamp);
|
||||
virtual void GetAudioStream(const void *stream, const unsigned int &length, const unsigned long long &timeStamp);
|
||||
virtual void GetJpegData(const void *stream, const unsigned int &length, const unsigned long long &timeStamp);
|
||||
virtual StatusCode GetAllFiles(std::vector<MediaTaskResponse> &files);
|
||||
virtual bool HandleFinished(void);
|
||||
};
|
||||
|
|
|
@ -13,6 +13,7 @@ include_directories(
|
|||
${MIDDLEWARE_SOURCE_PATH}/McuManager/src
|
||||
${MIDDLEWARE_SOURCE_PATH}/DeviceManager/include
|
||||
${MIDDLEWARE_SOURCE_PATH}/DeviceManager/src
|
||||
${MIDDLEWARE_SOURCE_PATH}/IpcConfig/include
|
||||
${UTILS_SOURCE_PATH}/McuProtocol/include
|
||||
${UTILS_SOURCE_PATH}/UartDevice/include
|
||||
${UTILS_SOURCE_PATH}/LedControl/include
|
||||
|
@ -46,6 +47,7 @@ endif()
|
|||
set(TARGET_NAME HuntingCameraTest)
|
||||
add_executable(${TARGET_NAME} ${SRC_FILES_MAIN} ${SRC_FILES})
|
||||
target_link_libraries(${TARGET_NAME}# -Wl,--start-group
|
||||
IpcConfig
|
||||
HuntingMainLib MissionManagerTestTool McuManagerTestTool McuAskBaseTestTool
|
||||
AppManagerTestTool HalTestTool DeviceManagerTestTool IpcConfigTestTool TestManager
|
||||
# -Wl,--end-group
|
||||
|
|
|
@ -71,6 +71,7 @@ void HuntingCameraTest::TearDown()
|
|||
std::shared_ptr<LinuxApiMock> test = std::make_shared<LinuxApiMock>();
|
||||
LinuxApiMock::GetInstance(&test);
|
||||
McuManagerTestTool::UnInit();
|
||||
IpcConfigTestTool::UnInit();
|
||||
DeviceManagerTestTool::UnInit();
|
||||
DestroyAllCamerasMock();
|
||||
DestroyAllKeysMock();
|
||||
|
|
|
@ -16,6 +16,7 @@
|
|||
#include "GtestUsing.h"
|
||||
#include "HalTestTool.h"
|
||||
#include "HuntingCameraTest.h"
|
||||
#include "IIpcConfig.h"
|
||||
#include "ILog.h"
|
||||
#include "MainThread.h"
|
||||
#include "McuManagerTestTool.h"
|
||||
|
@ -61,6 +62,7 @@ TEST_F(HuntingCameraTest, INTEGRATION_HunttingCamera_EXAMPLE_MediaTask)
|
|||
std::this_thread::sleep_for(std::chrono::milliseconds(100));
|
||||
MainThread::GetInstance()->Init();
|
||||
TestManager::ResetTimeOut(1000 * 15);
|
||||
IIpcConfig::GetInstance()->SetWorkMode(WorkMode::MODE_VIDEO);
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds(100));
|
||||
HalTestTool::MockKeyClick("reset", 200); // Simulate pressing a button.
|
||||
MainThread::GetInstance()->Runing();
|
||||
|
@ -74,6 +76,7 @@ TEST_F(HuntingCameraTest, INTEGRATION_HunttingCamera_EXAMPLE_MediaTask_Twice)
|
|||
std::this_thread::sleep_for(std::chrono::milliseconds(100));
|
||||
MainThread::GetInstance()->Init();
|
||||
TestManager::ResetTimeOut(1000 * 25);
|
||||
IIpcConfig::GetInstance()->SetWorkMode(WorkMode::MODE_VIDEO);
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds(100));
|
||||
HalTestTool::MockKeyClick("reset", 200); // Simulate pressing a button.
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds(1000 * 15));
|
||||
|
|
|
@ -19,10 +19,11 @@
|
|||
#include "StatusCode.h"
|
||||
#include <memory>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <time.h>
|
||||
CameraHalTest::CameraHalTest(const CameraType &cameraType)
|
||||
: mCameraType(cameraType), mReadH264File(nullptr), mReadG711aFile(nullptr), mTaskRuning(false),
|
||||
mVideoTimeStamp_us(0), mAudioTimeStamp_us(0)
|
||||
mVideoTimeStamp_us(0), mAudioTimeStamp_us(0), mJpegData(nullptr), mJpegSize(0)
|
||||
{
|
||||
}
|
||||
void CameraHalTest::Init(void)
|
||||
|
@ -39,6 +40,10 @@ void CameraHalTest::UnInit(void)
|
|||
mTaskTimerThread.join();
|
||||
}
|
||||
ReadFileUnInit();
|
||||
if (mJpegData) {
|
||||
free(mJpegData);
|
||||
mJpegData = nullptr;
|
||||
}
|
||||
}
|
||||
void CameraHalTest::SetCameraMonitor(std::shared_ptr<VCameraHalMonitor> &monitor)
|
||||
{
|
||||
|
@ -98,17 +103,29 @@ void CameraHalTest::MockReportMediaStream(void)
|
|||
if (nullptr != mReadG711aFile) {
|
||||
IStartReadFile(mReadG711aFile, TEST_SOURCE_PATH "/support_test/chip.g711a");
|
||||
}
|
||||
ReadJpegFileForMockData();
|
||||
constexpr int TIME_OUT_MS = 100;
|
||||
while (mTaskRuning) {
|
||||
if (mJpegData) {
|
||||
GetJpegData(mJpegData, mJpegSize, 1000);
|
||||
}
|
||||
std::unique_lock<std::mutex> lock(mMutex);
|
||||
mCv.wait(lock, [&] {
|
||||
// mCv.wait(lock, [&] {
|
||||
// return !mTaskRuning;
|
||||
// });
|
||||
mCv.wait_for(lock, std::chrono::milliseconds(TIME_OUT_MS), [&] {
|
||||
return !mTaskRuning;
|
||||
});
|
||||
/**
|
||||
* @brief If the recording time is over, you need to stop the recording timer here.
|
||||
*/
|
||||
mTaskRuning = false;
|
||||
// mTaskRuning = false;
|
||||
}
|
||||
ReadFileUnInit();
|
||||
if (mJpegData) {
|
||||
free(mJpegData);
|
||||
mJpegData = nullptr;
|
||||
}
|
||||
}
|
||||
void CameraHalTest::ReadDataFromH264File(const void *stream, const unsigned int length,
|
||||
const unsigned long long duration_us)
|
||||
|
@ -168,6 +185,35 @@ void CameraHalTest::ReadFileUnInit(void)
|
|||
mReadG711aFile = nullptr;
|
||||
}
|
||||
}
|
||||
int CameraHalTest::ReadJpegFileForMockData(void)
|
||||
{
|
||||
const char *filename = TEST_SOURCE_PATH "/support_test/rv1106_jpeg_encoder.jpg";
|
||||
|
||||
FILE *file = fopen(filename, "rb");
|
||||
if (!file) {
|
||||
LogError("Error opening file: %s\n", filename);
|
||||
return 1;
|
||||
}
|
||||
|
||||
fseek(file, 0, SEEK_END);
|
||||
mJpegSize = ftell(file);
|
||||
fseek(file, 0, SEEK_SET);
|
||||
|
||||
mJpegData = (unsigned char *)malloc(mJpegSize);
|
||||
if (!mJpegData) {
|
||||
LogError("Error allocating memory\n");
|
||||
fclose(file);
|
||||
return 1;
|
||||
}
|
||||
size_t read_size = fread(mJpegData, 1, mJpegSize, file);
|
||||
if (read_size != mJpegSize) {
|
||||
LogError("Error reading file\n");
|
||||
fclose(file);
|
||||
return 1;
|
||||
}
|
||||
fclose(file);
|
||||
return 0;
|
||||
}
|
||||
CameraHalMock::CameraHalMock(const CameraType &cameraType) : CameraHalTest(cameraType)
|
||||
{
|
||||
}
|
||||
|
|
|
@ -41,6 +41,7 @@ private:
|
|||
void ReadDataFromG711aFile(const void *stream, const unsigned int length, const unsigned long long duration_us);
|
||||
void ReadFileInit(void);
|
||||
void ReadFileUnInit(void);
|
||||
int ReadJpegFileForMockData(void);
|
||||
|
||||
protected:
|
||||
const CameraType mCameraType;
|
||||
|
@ -54,6 +55,8 @@ protected:
|
|||
std::thread mTaskTimerThread;
|
||||
unsigned long long mVideoTimeStamp_us;
|
||||
unsigned long long mAudioTimeStamp_us;
|
||||
unsigned char *mJpegData;
|
||||
unsigned int mJpegSize;
|
||||
};
|
||||
class CameraHalMock : public CameraHalTest
|
||||
{
|
||||
|
|
BIN
test/support_test/rv1106_jpeg_encoder.jpg
Executable file
BIN
test/support_test/rv1106_jpeg_encoder.jpg
Executable file
Binary file not shown.
After Width: | Height: | Size: 94 KiB |
|
@ -31,6 +31,7 @@ enum StreamType
|
|||
{
|
||||
STREAM_TYPE_VIDEO_H264 = 0,
|
||||
STREAM_TYPE_AUDIO_G711A,
|
||||
STREAM_TYPE_JPEG,
|
||||
STREAM_TYPE_END
|
||||
};
|
||||
/**
|
||||
|
@ -82,7 +83,7 @@ OutputFileInfo IGetOutputFileInfo(void *object);
|
|||
|
||||
StatusCode IOpenJpegFile(void *object, const OutputFileInfo *info);
|
||||
StatusCode ICloseJpegFile(void *object);
|
||||
StatusCode IWriteJpegData(void *object, const void *data, const size_t size);
|
||||
StatusCode IWriteJpegData(void *object, const void *data, const size_t size, const StreamInfo streamInfo);
|
||||
|
||||
void IMediaBaseFree(void *object);
|
||||
#ifdef __cplusplus
|
||||
|
|
294
utils/MediaBase/src/FfmpegDecoderV2.cpp
Normal file
294
utils/MediaBase/src/FfmpegDecoderV2.cpp
Normal file
|
@ -0,0 +1,294 @@
|
|||
/*
|
||||
* Copyright (c) 2023 Fancy Code.
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
#include "FfmpegDecoderV2.h"
|
||||
#include "ILog.h"
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
#include <libavcodec/avcodec.h>
|
||||
#include <libavcodec/codec.h>
|
||||
#include <libavcodec/codec_id.h>
|
||||
#include <libavcodec/packet.h>
|
||||
#include <libavutil/avutil.h>
|
||||
#include <libavutil/channel_layout.h>
|
||||
#include <libavutil/error.h>
|
||||
#include <libavutil/frame.h>
|
||||
#include <libavutil/pixfmt.h>
|
||||
#include <libavutil/samplefmt.h>
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#include <cstddef>
|
||||
#include <cstdlib>
|
||||
#include <errno.h>
|
||||
#include <functional>
|
||||
#include <stdint.h>
|
||||
FfmpegDecoderV2::FfmpegDecoderV2(const enum AVCodecID &codecId, const AVPixelFormat &decodePixelFormat,
|
||||
const int &width, const int &height)
|
||||
: mCodecId(codecId), mCodec(nullptr), mCodecCtx(nullptr), mFrame(nullptr), mPacket(nullptr), mParser(nullptr),
|
||||
mVideoWidth(width), mVideoHeight(height), mDecodePixelFormat(decodePixelFormat)
|
||||
{
|
||||
}
|
||||
bool FfmpegDecoderV2::Init(void)
|
||||
{
|
||||
int ret = 0;
|
||||
LogInfo("find decoder : %s\n", avcodec_get_name(mCodecId));
|
||||
mCodec = (AVCodec *)avcodec_find_decoder(mCodecId);
|
||||
// mCodec = (AVCodec *)avcodec_find_decoder_by_name("libfdk_aac");
|
||||
if (!(mCodec)) {
|
||||
LogError("decoder not found:%s\n", avcodec_get_name(mCodecId));
|
||||
return false;
|
||||
}
|
||||
mCodecCtx = avcodec_alloc_context3((const AVCodec *)(mCodec));
|
||||
if (!(mCodecCtx)) {
|
||||
LogError("Could not allocate codec context\n");
|
||||
return false;
|
||||
}
|
||||
if (AVMEDIA_TYPE_AUDIO == mCodec->type) {
|
||||
LogInfo("Audio decoder.\n");
|
||||
/* put sample parameters */
|
||||
mCodecCtx->bit_rate = 64000;
|
||||
// mCodecCtx->bit_rate = 352800;
|
||||
// mCodecCtx->sample_rate = 8000;
|
||||
|
||||
/* check that the encoder supports s16 pcm input */
|
||||
mCodecCtx->sample_fmt = AV_SAMPLE_FMT_S16;
|
||||
if (!check_sample_fmt(mCodec, mCodecCtx->sample_fmt)) {
|
||||
LogError("decoder does not support sample format %s", av_get_sample_fmt_name(mCodecCtx->sample_fmt));
|
||||
return false;
|
||||
}
|
||||
|
||||
/* select other audio parameters supported by the encoder */
|
||||
mCodecCtx->sample_rate = select_sample_rate(mCodec);
|
||||
LogInfo("decoder sample_rate:%d\n", mCodecCtx->sample_rate);
|
||||
// const AVChannelLayout src = (AVChannelLayout)AV_CHANNEL_LAYOUT_MONO;
|
||||
// av_channel_layout_copy(&mCodecCtx->ch_layout, &src);
|
||||
ret = select_channel_layout(mCodec, &(mCodecCtx->ch_layout));
|
||||
if (ret < 0) {
|
||||
LogError("Could not set channel layout\n");
|
||||
return false;
|
||||
}
|
||||
}
|
||||
else {
|
||||
mCodecCtx->pix_fmt = mDecodePixelFormat;
|
||||
mCodecCtx->width = mVideoWidth;
|
||||
mCodecCtx->height = mVideoHeight;
|
||||
}
|
||||
if ((ret = avcodec_open2(mCodecCtx, mCodec, nullptr)) < 0) {
|
||||
char error_str[AV_ERROR_MAX_STRING_SIZE] = {0};
|
||||
LogError("Could not open codec:%s\n", av_make_error_string(error_str, AV_ERROR_MAX_STRING_SIZE, ret));
|
||||
return false;
|
||||
}
|
||||
mFrame = av_frame_alloc();
|
||||
if (!mFrame) {
|
||||
LogError("Could not allocate video frame\n");
|
||||
return false;
|
||||
}
|
||||
mPacket = av_packet_alloc();
|
||||
if (!mPacket) {
|
||||
LogError("Could not allocate video frame\n");
|
||||
return false;
|
||||
}
|
||||
// mParser = av_parser_init(mCodec->id);
|
||||
// if (!mParser) {
|
||||
// LogError("mParser not found : %s\n", avcodec_get_name(mCodec->id));
|
||||
// return false;
|
||||
// }
|
||||
if (AVMEDIA_TYPE_AUDIO == mCodec->type) {
|
||||
// mFrame->nb_samples = mCodecCtx->frame_size;
|
||||
// mFrame->format = mCodecCtx->sample_fmt;
|
||||
ret = av_channel_layout_copy(&(mFrame->ch_layout), &(mCodecCtx->ch_layout));
|
||||
if (ret < 0) {
|
||||
LogError("Could not copy channel layout\n");
|
||||
return false;
|
||||
}
|
||||
}
|
||||
LogInfo("init success pix_fmt = %d\n", mCodecCtx->pix_fmt);
|
||||
return true;
|
||||
}
|
||||
bool FfmpegDecoderV2::UnInit(void)
|
||||
{
|
||||
LogInfo("uninit %s\n", avcodec_get_name(mCodecId));
|
||||
av_packet_free(&mPacket);
|
||||
mPacket = nullptr;
|
||||
if (mParser) {
|
||||
av_parser_close(mParser);
|
||||
mParser = nullptr;
|
||||
}
|
||||
avcodec_free_context(&mCodecCtx);
|
||||
mCodecCtx = nullptr;
|
||||
if (mFrame) {
|
||||
av_frame_free(&mFrame);
|
||||
mFrame = nullptr;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
void FfmpegDecoderV2::DecodeData(const void *data, const size_t &size, const unsigned long long &pts,
|
||||
std::function<void(AVFrame *frame)> callback)
|
||||
{
|
||||
if (nullptr == mParser) {
|
||||
mPacket->data = (uint8_t *)data;
|
||||
mPacket->size = size;
|
||||
mPacket->pts = pts;
|
||||
mPacket->dts = mPacket->pts;
|
||||
// LogInfo("source data mPacket->pts:%d\n", mPacket->pts);
|
||||
AVDecodeData(mPacket, callback);
|
||||
return;
|
||||
}
|
||||
AVParseData(data, size, callback);
|
||||
}
|
||||
void inline FfmpegDecoderV2::AVParseData(const void *data, const size_t &size,
|
||||
std::function<void(AVFrame *frame)> callback)
|
||||
{
|
||||
if (nullptr == data) {
|
||||
LogError("data is null\n");
|
||||
return;
|
||||
}
|
||||
uint8_t *frameData = (uint8_t *)data;
|
||||
size_t data_size = size;
|
||||
while (data_size > 0) {
|
||||
int ret = av_parser_parse2(mParser,
|
||||
mCodecCtx,
|
||||
&mPacket->data,
|
||||
&mPacket->size,
|
||||
frameData,
|
||||
data_size,
|
||||
AV_NOPTS_VALUE,
|
||||
AV_NOPTS_VALUE,
|
||||
0);
|
||||
if (ret < 0) {
|
||||
LogError("av_parse_frame failed\n");
|
||||
break;
|
||||
}
|
||||
frameData += ret;
|
||||
data_size -= ret;
|
||||
if (mPacket->size) {
|
||||
AVDecodeData(mPacket, callback);
|
||||
}
|
||||
}
|
||||
}
|
||||
// static void save_code_stream_file(const void *data, const size_t &size)
|
||||
// {
|
||||
// char OutPath[128] = {0};
|
||||
// const void *pData = data;
|
||||
// FILE *file = NULL;
|
||||
// LogInfo("save_code_stream_file size = %d\n", size);
|
||||
// sprintf(OutPath, "./test.yuv");
|
||||
// file = fopen(OutPath, "a+");
|
||||
|
||||
// if (file) { // TODO: Don't open very time.
|
||||
// fwrite(pData, 1, size, file);
|
||||
// fflush(file);
|
||||
// }
|
||||
|
||||
// if (file)
|
||||
// fclose(file);
|
||||
// }
|
||||
// static void pgm_save(unsigned char *buf, int wrap, int xsize, int ysize, char *filename)
|
||||
// {
|
||||
// FILE *f;
|
||||
// int i;
|
||||
|
||||
// f = fopen(filename, "wb");
|
||||
// fprintf(f, "P5\n%d %d\n%d\n", xsize, ysize, 255);
|
||||
// for (i = 0; i < ysize; i++)
|
||||
// fwrite(buf + i * wrap, 1, xsize, f);
|
||||
// fclose(f);
|
||||
// }
|
||||
void inline FfmpegDecoderV2::AVDecodeData(AVPacket *pkt, std::function<void(AVFrame *frame)> callback)
|
||||
{
|
||||
int ret = avcodec_send_packet(mCodecCtx, pkt);
|
||||
if (ret < 0) {
|
||||
LogError("Error sending a packet for decoding\n");
|
||||
av_packet_unref(pkt);
|
||||
return;
|
||||
}
|
||||
while (ret >= 0) {
|
||||
ret = avcodec_receive_frame(mCodecCtx, mFrame);
|
||||
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
|
||||
break;
|
||||
}
|
||||
if (ret < 0) {
|
||||
LogError("Error during decoding\n");
|
||||
break;
|
||||
}
|
||||
if (callback) {
|
||||
// int i, ch, data_size;
|
||||
// data_size = av_get_bytes_per_sample(mCodecCtx->sample_fmt);
|
||||
// for (i = 0; i < mFrame->nb_samples; i++)
|
||||
// for (ch = 0; ch < mCodecCtx->ch_layout.nb_channels; ch++)
|
||||
// // fwrite(frame->data[ch] + data_size * i, 1, data_size, outfile);
|
||||
// save_code_stream_file(mFrame->data[ch] + data_size * i, data_size);
|
||||
// save_code_stream_file(mFrame->data[0], mFrame->linesize[0]);
|
||||
// if (mCodecId == AV_CODEC_ID_H264) {
|
||||
// pgm_save(mFrame->data[0], mFrame->linesize[0], mFrame->width, mFrame->height, "./test.yuv");
|
||||
// }
|
||||
// LogInfo("decode frame pts = %llu, nb_samples = %d\n", mFrame->pts, mFrame->nb_samples);
|
||||
callback(mFrame);
|
||||
}
|
||||
break;
|
||||
}
|
||||
av_packet_unref(pkt);
|
||||
}
|
||||
/* just pick the highest supported samplerate */
|
||||
int FfmpegDecoderV2::select_sample_rate(const AVCodec *codec)
|
||||
{
|
||||
const int *p;
|
||||
int best_samplerate = 0;
|
||||
|
||||
if (!codec->supported_samplerates)
|
||||
return 44100;
|
||||
|
||||
p = codec->supported_samplerates;
|
||||
while (*p) {
|
||||
if (!best_samplerate || abs(44100 - *p) < abs(44100 - best_samplerate))
|
||||
best_samplerate = *p;
|
||||
p++;
|
||||
}
|
||||
return best_samplerate;
|
||||
}
|
||||
/* select layout with the highest channel count */
|
||||
int FfmpegDecoderV2::select_channel_layout(const AVCodec *codec, AVChannelLayout *dst)
|
||||
{
|
||||
const AVChannelLayout *p, *best_ch_layout = nullptr;
|
||||
int best_nb_channels = 0;
|
||||
AVChannelLayout channelLayout = AV_CHANNEL_LAYOUT_STEREO;
|
||||
if (!codec->ch_layouts)
|
||||
return av_channel_layout_copy(dst, &channelLayout);
|
||||
|
||||
p = codec->ch_layouts;
|
||||
while (p->nb_channels) {
|
||||
int nb_channels = p->nb_channels;
|
||||
|
||||
if (nb_channels > best_nb_channels) {
|
||||
best_ch_layout = p;
|
||||
best_nb_channels = nb_channels;
|
||||
}
|
||||
p++;
|
||||
}
|
||||
return av_channel_layout_copy(dst, best_ch_layout);
|
||||
}
|
||||
/* check that a given sample format is supported by the encoder */
|
||||
int FfmpegDecoderV2::check_sample_fmt(const AVCodec *codec, enum AVSampleFormat sample_fmt)
|
||||
{
|
||||
const enum AVSampleFormat *p = codec->sample_fmts;
|
||||
|
||||
while (*p != AV_SAMPLE_FMT_NONE) {
|
||||
if (*p == sample_fmt)
|
||||
return 1;
|
||||
p++;
|
||||
}
|
||||
return 0;
|
||||
}
|
74
utils/MediaBase/src/FfmpegDecoderV2.h
Normal file
74
utils/MediaBase/src/FfmpegDecoderV2.h
Normal file
|
@ -0,0 +1,74 @@
|
|||
/*
|
||||
* Copyright (c) 2023 Fancy Code.
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
#ifndef FFMPEG_DECODER_V2_H
|
||||
#define FFMPEG_DECODER_V2_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
#include <libavcodec/avcodec.h>
|
||||
#include <libavcodec/packet.h>
|
||||
#include <libavformat/avformat.h>
|
||||
#include <libavutil/avassert.h>
|
||||
#include <libavutil/avutil.h>
|
||||
#include <libavutil/channel_layout.h>
|
||||
#include <libavutil/imgutils.h>
|
||||
#include <libavutil/mathematics.h>
|
||||
#include <libavutil/opt.h>
|
||||
#include <libavutil/timestamp.h>
|
||||
#include <libswresample/swresample.h>
|
||||
#include <libswscale/swscale.h>
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#include <functional>
|
||||
// constexpr int DECODER_UNSUPORTED = 0;
|
||||
class FfmpegDecoderV2
|
||||
{
|
||||
public:
|
||||
/**
|
||||
* @brief When decoding a video stream, you need to use this constructor to provide the required parameters.
|
||||
* @param codecId Video stream format
|
||||
* @param width Video height
|
||||
* @param height Video width
|
||||
*/
|
||||
FfmpegDecoderV2(const enum AVCodecID &codecId, const AVPixelFormat &decodePixelFormat, const int &width,
|
||||
const int &height);
|
||||
virtual ~FfmpegDecoderV2() = default;
|
||||
bool Init(void);
|
||||
bool UnInit(void);
|
||||
void DecodeData(const void *data, const size_t &size, const unsigned long long &pts,
|
||||
std::function<void(AVFrame *frame)> callback);
|
||||
|
||||
private:
|
||||
void AVParseData(const void *data, const size_t &size, std::function<void(AVFrame *frame)> callback);
|
||||
void AVDecodeData(AVPacket *pkt, std::function<void(AVFrame *frame)> callback);
|
||||
|
||||
private:
|
||||
static int select_sample_rate(const AVCodec *codec);
|
||||
static int select_channel_layout(const AVCodec *codec, AVChannelLayout *dst);
|
||||
static int check_sample_fmt(const AVCodec *codec, enum AVSampleFormat sample_fmt);
|
||||
|
||||
private:
|
||||
const enum AVCodecID mCodecId;
|
||||
AVCodec *mCodec;
|
||||
AVCodecContext *mCodecCtx;
|
||||
AVFrame *mFrame;
|
||||
AVPacket *mPacket;
|
||||
AVCodecParserContext *mParser;
|
||||
const int mVideoWidth;
|
||||
const int mVideoHeight;
|
||||
const AVPixelFormat mDecodePixelFormat;
|
||||
};
|
||||
#endif
|
418
utils/MediaBase/src/FfmpegEncoderV2.cpp
Normal file
418
utils/MediaBase/src/FfmpegEncoderV2.cpp
Normal file
|
@ -0,0 +1,418 @@
|
|||
/*
|
||||
* Copyright (c) 2023 Fancy Code.
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
#include "FfmpegEncoderV2.h"
|
||||
#include "ILog.h"
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
#include <libavcodec/avcodec.h>
|
||||
#include <libavcodec/codec.h>
|
||||
#include <libavcodec/codec_id.h>
|
||||
#include <libavcodec/packet.h>
|
||||
#include <libavformat/avformat.h>
|
||||
// #include <libavutil/avassert.h>
|
||||
#include <libavutil/avutil.h>
|
||||
#include <libavutil/channel_layout.h>
|
||||
#include <libavutil/dict.h>
|
||||
#include <libavutil/error.h>
|
||||
#include <libavutil/frame.h>
|
||||
#include <libavutil/mathematics.h>
|
||||
#include <libavutil/opt.h>
|
||||
#include <libavutil/pixfmt.h>
|
||||
#include <libavutil/samplefmt.h>
|
||||
#include <libswresample/swresample.h>
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#include <cstdint>
|
||||
#include <errno.h>
|
||||
#include <functional>
|
||||
constexpr long SOURCE_AUDIO_SAMPEL_RATE = 8000;
|
||||
#define STREAM_DURATION 10.0
|
||||
#define STREAM_FRAME_RATE 25 /* 25 images/s */
|
||||
#define STREAM_PIX_FMT AV_PIX_FMT_YUV420P /* default pix_fmt */
|
||||
FfmpegEncoderV2::FfmpegEncoderV2(const enum AVCodecID &codecId, const AVPixelFormat &encodePixelFormat,
|
||||
const int &width, const int &height)
|
||||
: mCodecId(codecId), mCodecCtx(nullptr), mCodec(nullptr), mFrame(nullptr), mTmpFrame(nullptr), mTmpPkt(nullptr),
|
||||
mSamplesCount(0), mSwrCtx(nullptr), next_pts(0), mVideoWidth(width), mVideoHeight(height),
|
||||
mEncodePixelFormat(encodePixelFormat)
|
||||
{
|
||||
}
|
||||
bool FfmpegEncoderV2::Init(const int &outputFlags)
|
||||
{
|
||||
mTmpPkt = av_packet_alloc();
|
||||
if (!mTmpPkt) {
|
||||
LogError("Could not allocate AVPacket\n");
|
||||
return false;
|
||||
}
|
||||
LogInfo("find encoder : %s\n", avcodec_get_name(mCodecId));
|
||||
int i = 0;
|
||||
/* find the encoder */
|
||||
mCodec = (AVCodec *)avcodec_find_encoder(mCodecId);
|
||||
if (!mCodec) {
|
||||
LogError("Could not find encoder for '%s'\n", avcodec_get_name(mCodecId));
|
||||
return false;
|
||||
}
|
||||
mCodecCtx = avcodec_alloc_context3(mCodec);
|
||||
if (!mCodecCtx) {
|
||||
LogError("Could not alloc an encoding context\n");
|
||||
return false;
|
||||
}
|
||||
const AVChannelLayout src = (AVChannelLayout)AV_CHANNEL_LAYOUT_MONO;
|
||||
switch (mCodec->type) {
|
||||
case AVMEDIA_TYPE_AUDIO:
|
||||
mCodecCtx->sample_fmt = mCodec->sample_fmts ? mCodec->sample_fmts[0] : AV_SAMPLE_FMT_FLTP;
|
||||
mCodecCtx->bit_rate = 64000;
|
||||
// mCodecCtx->bit_rate = 24000;
|
||||
mCodecCtx->sample_rate = 44100;
|
||||
if (mCodec->supported_samplerates) {
|
||||
mCodecCtx->sample_rate = mCodec->supported_samplerates[0];
|
||||
for (i = 0; mCodec->supported_samplerates[i]; i++) {
|
||||
if (mCodec->supported_samplerates[i] == 44100)
|
||||
mCodecCtx->sample_rate = 44100;
|
||||
}
|
||||
}
|
||||
mCodecCtx->sample_rate = 16000;
|
||||
// mCodecCtx->time_base = (AVRational){1, mCodecCtx->sample_rate};
|
||||
// mCodecCtx->ch_layout.nb_channels = 1;
|
||||
// av_channel_layout_default(&mCodecCtx->ch_layout, 1);
|
||||
av_channel_layout_copy(&mCodecCtx->ch_layout, &src);
|
||||
break;
|
||||
|
||||
case AVMEDIA_TYPE_VIDEO:
|
||||
mCodecCtx->codec_id = mCodecId;
|
||||
|
||||
mCodecCtx->bit_rate = 300000;
|
||||
/* Resolution must be a multiple of two. */
|
||||
mCodecCtx->width = mVideoWidth;
|
||||
mCodecCtx->height = mVideoHeight;
|
||||
/* timebase: This is the fundamental unit of time (in seconds) in terms
|
||||
* of which frame timestamps are represented. For fixed-fps content,
|
||||
* timebase should be 1/framerate and timestamp increments should be
|
||||
* identical to 1. */
|
||||
mCodecCtx->time_base = (AVRational){1, STREAM_FRAME_RATE};
|
||||
|
||||
mCodecCtx->gop_size = 12; /* emit one intra frame every twelve frames at most */
|
||||
mCodecCtx->pix_fmt = mEncodePixelFormat;
|
||||
if (mCodecCtx->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
|
||||
/* just for testing, we also add B-frames */
|
||||
mCodecCtx->max_b_frames = 2;
|
||||
}
|
||||
if (mCodecCtx->codec_id == AV_CODEC_ID_MPEG1VIDEO) {
|
||||
/* Needed to avoid using macroblocks in which some coeffs overflow.
|
||||
* This does not happen with normal video, it just happens here as
|
||||
* the motion of the chroma plane does not match the luma plane. */
|
||||
mCodecCtx->mb_decision = 2;
|
||||
}
|
||||
break;
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
/* Some formats want stream headers to be separate. */
|
||||
if (outputFlags & AVFMT_GLOBALHEADER) {
|
||||
mCodecCtx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
void FfmpegEncoderV2::UnInit(void)
|
||||
{
|
||||
if (mFrame) {
|
||||
av_frame_free(&mFrame);
|
||||
mFrame = nullptr;
|
||||
}
|
||||
if (mTmpFrame) {
|
||||
av_frame_free(&mTmpFrame);
|
||||
mTmpFrame = nullptr;
|
||||
}
|
||||
if (mCodecCtx) {
|
||||
avcodec_free_context(&mCodecCtx);
|
||||
mCodecCtx = nullptr;
|
||||
}
|
||||
av_packet_free(&mTmpPkt);
|
||||
swr_free(&mSwrCtx);
|
||||
}
|
||||
AVRational FfmpegEncoderV2::GetTimeBase(void)
|
||||
{
|
||||
switch (mCodec->type) {
|
||||
case AVMEDIA_TYPE_AUDIO:
|
||||
return (AVRational){1, mCodecCtx->sample_rate};
|
||||
|
||||
case AVMEDIA_TYPE_VIDEO:
|
||||
return mCodecCtx->time_base;
|
||||
|
||||
default:
|
||||
LogError("Unsupported media type.\n");
|
||||
return (AVRational){0, -1};
|
||||
}
|
||||
}
|
||||
bool FfmpegEncoderV2::OpenEncoder(AVDictionary *optArg, AVStream *stream)
|
||||
{
|
||||
switch (mCodec->type) {
|
||||
case AVMEDIA_TYPE_AUDIO:
|
||||
return OpenAudio(optArg, stream);
|
||||
|
||||
case AVMEDIA_TYPE_VIDEO:
|
||||
return OpenVideo(optArg, stream);
|
||||
|
||||
default:
|
||||
LogError("Unsupported media type.\n");
|
||||
return false;
|
||||
}
|
||||
}
|
||||
// static void save_code_stream_file(const void *data, const size_t &size)
|
||||
// {
|
||||
// char OutPath[16];
|
||||
// const void *pData = data;
|
||||
// FILE *file = NULL;
|
||||
// LogInfo("save_code_stream_file: %d\n", size);
|
||||
// sprintf(OutPath, "./test.jpg");
|
||||
// file = fopen(OutPath, "a+");
|
||||
|
||||
// if (file) { // TODO: Don't open very time.
|
||||
// fwrite(pData, 1, size, file);
|
||||
// fflush(file);
|
||||
// }
|
||||
|
||||
// if (file)
|
||||
// fclose(file);
|
||||
// }
|
||||
int FfmpegEncoderV2::EncodeData(AVFrame *frame, AVStream *stream, std::function<void(AVPacket *pkt)> callback)
|
||||
{
|
||||
int ret = 0;
|
||||
AVFrame *tmpFrame = frame;
|
||||
if (AVMEDIA_TYPE_AUDIO == mCodec->type) {
|
||||
tmpFrame = ConvertAudioFrame(frame, mSwrCtx);
|
||||
}
|
||||
if (!tmpFrame) {
|
||||
LogError("Could not convert audio frame.\n");
|
||||
return AVERROR_EXIT;
|
||||
}
|
||||
// send the frame to the encoder
|
||||
ret = avcodec_send_frame(mCodecCtx, tmpFrame);
|
||||
if (ret < 0) {
|
||||
char error_str[AV_ERROR_MAX_STRING_SIZE] = {0};
|
||||
LogInfo("Error sending a frame to the encoder: %s\n",
|
||||
av_make_error_string(error_str, AV_ERROR_MAX_STRING_SIZE, ret));
|
||||
return AVERROR_EXIT;
|
||||
}
|
||||
|
||||
while (ret >= 0) {
|
||||
ret = avcodec_receive_packet(mCodecCtx, mTmpPkt);
|
||||
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
|
||||
break;
|
||||
}
|
||||
if (ret < 0) {
|
||||
char error_str[AV_ERROR_MAX_STRING_SIZE] = {0};
|
||||
LogInfo("Error encoding a frame: %s\n", av_make_error_string(error_str, AV_ERROR_MAX_STRING_SIZE, ret));
|
||||
return AVERROR_EXIT;
|
||||
}
|
||||
|
||||
/* rescale output packet timestamp values from codec to stream timebase */
|
||||
av_packet_rescale_ts(mTmpPkt, mCodecCtx->time_base, stream->time_base);
|
||||
// LogInfo("Write mCodecCtx->time_base.num: %d\n", mCodecCtx->time_base.num);
|
||||
// LogInfo("Write mCodecCtx->time_base.den: %d\n", mCodecCtx->time_base.den);
|
||||
// LogInfo("Write stream->time_base.num: %d\n", stream->time_base.num);
|
||||
// LogInfo("Write stream->time_base.den: %d\n", stream->time_base.den);
|
||||
mTmpPkt->stream_index = stream->index;
|
||||
// LogInfo(" Write frame mTmpPkt->pts: %llu\n", mTmpPkt->pts);
|
||||
|
||||
if (callback) {
|
||||
// if (mCodecId == AV_CODEC_ID_MJPEG) {
|
||||
// save_code_stream_file(mTmpPkt->data, mTmpPkt->size);
|
||||
// }
|
||||
callback(mTmpPkt);
|
||||
}
|
||||
}
|
||||
|
||||
return ret == AVERROR_EOF ? 1 : 0;
|
||||
}
|
||||
bool FfmpegEncoderV2::OpenVideo(AVDictionary *optArg, AVStream *stream)
|
||||
{
|
||||
int ret = 0;
|
||||
AVDictionary *opt = nullptr;
|
||||
av_dict_copy(&opt, optArg, 0);
|
||||
// av_dict_set(&opt, "strict_std_compliance", "experimental", 0);
|
||||
av_opt_set(mCodecCtx, "strict", "unofficial", 0); // Add for jpeg
|
||||
/* open the codec */
|
||||
ret = avcodec_open2(mCodecCtx, mCodec, &opt);
|
||||
av_dict_free(&opt);
|
||||
if (ret < 0) {
|
||||
char error_str[AV_ERROR_MAX_STRING_SIZE] = {0};
|
||||
LogError("Could not open video codec: %s\n", av_make_error_string(error_str, AV_ERROR_MAX_STRING_SIZE, ret));
|
||||
return false;
|
||||
}
|
||||
/* allocate and init a re-usable frame */
|
||||
mFrame = alloc_frame(mCodecCtx->pix_fmt, mCodecCtx->width, mCodecCtx->height);
|
||||
if (!mFrame) {
|
||||
LogError("Could not allocate video frame\n");
|
||||
return false;
|
||||
}
|
||||
if (mCodecCtx->pix_fmt != AV_PIX_FMT_YUV420P) {
|
||||
mTmpFrame = alloc_frame(AV_PIX_FMT_YUV420P, mCodecCtx->width, mCodecCtx->height);
|
||||
if (!mTmpFrame) {
|
||||
LogError("Could not allocate temporary video frame\n");
|
||||
return false;
|
||||
}
|
||||
}
|
||||
/* copy the stream parameters to the muxer */
|
||||
ret = avcodec_parameters_from_context(stream->codecpar, mCodecCtx);
|
||||
if (ret < 0) {
|
||||
LogError("Could not copy the stream parameters\n");
|
||||
return false;
|
||||
}
|
||||
LogInfo(" Open video success, mCodecCtx->pix_fmt = %d\n", mCodecCtx->pix_fmt);
|
||||
return true;
|
||||
}
|
||||
bool FfmpegEncoderV2::OpenAudio(AVDictionary *optArg, AVStream *stream)
|
||||
{
|
||||
int nb_samples = 0;
|
||||
int ret = 0;
|
||||
AVDictionary *opt = nullptr;
|
||||
av_dict_copy(&opt, optArg, 0);
|
||||
/* open it */
|
||||
ret = avcodec_open2(mCodecCtx, mCodec, &opt);
|
||||
av_dict_free(&opt);
|
||||
if (ret < 0) {
|
||||
char error_str[AV_ERROR_MAX_STRING_SIZE] = {0};
|
||||
LogError("Could not open audio codec: %s\n", av_make_error_string(error_str, AV_ERROR_MAX_STRING_SIZE, ret));
|
||||
return false;
|
||||
}
|
||||
if (mCodecCtx->codec->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE)
|
||||
nb_samples = 10000;
|
||||
else
|
||||
nb_samples = mCodecCtx->frame_size;
|
||||
mFrame = alloc_audio_frame(mCodecCtx->sample_fmt, &mCodecCtx->ch_layout, mCodecCtx->sample_rate, nb_samples);
|
||||
// mTmpFrame = alloc_audio_frame(AV_SAMPLE_FMT_S16, &mCodecCtx->ch_layout, mCodecCtx->sample_rate, nb_samples);
|
||||
/* copy the stream parameters to the muxer */
|
||||
ret = avcodec_parameters_from_context(stream->codecpar, mCodecCtx);
|
||||
if (ret < 0) {
|
||||
LogError("Could not copy the stream parameters\n");
|
||||
return false;
|
||||
}
|
||||
/* create resampler context */
|
||||
mSwrCtx = swr_alloc();
|
||||
if (!mSwrCtx) {
|
||||
LogError("Could not allocate resampler context\n");
|
||||
return false;
|
||||
}
|
||||
const AVChannelLayout src = (AVChannelLayout)AV_CHANNEL_LAYOUT_MONO;
|
||||
AVChannelLayout ch_layout;
|
||||
av_channel_layout_copy(&ch_layout, &src);
|
||||
/* set options */
|
||||
av_opt_set_chlayout(mSwrCtx, "in_chlayout", &ch_layout, 0);
|
||||
// av_opt_set_chlayout(mSwrCtx, "in_chlayout", &mCodecCtx->ch_layout, 0);
|
||||
av_opt_set_int(mSwrCtx, "in_sample_rate", SOURCE_AUDIO_SAMPEL_RATE, 0);
|
||||
av_opt_set_sample_fmt(mSwrCtx, "in_sample_fmt", AV_SAMPLE_FMT_S16, 0);
|
||||
av_opt_set_chlayout(mSwrCtx, "out_chlayout", &mCodecCtx->ch_layout, 0);
|
||||
av_opt_set_int(mSwrCtx, "out_sample_rate", mCodecCtx->sample_rate, 0);
|
||||
av_opt_set_sample_fmt(mSwrCtx, "out_sample_fmt", mCodecCtx->sample_fmt, 0);
|
||||
/* initialize the resampling context */
|
||||
if ((ret = swr_init(mSwrCtx)) < 0) {
|
||||
LogError("Failed to initialize the resampling context\n");
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
AVFrame *FfmpegEncoderV2::ConvertAudioFrame(AVFrame *decodeFrame, struct SwrContext *swr_ctx)
|
||||
{
|
||||
if (nullptr == decodeFrame) {
|
||||
LogError("decodeFrame is null\n");
|
||||
return nullptr;
|
||||
}
|
||||
// LogInfo("decodeFrame->pts = %d\n", decodeFrame->pts);
|
||||
// decodeFrame->pts = next_pts;
|
||||
// next_pts += decodeFrame->nb_samples;
|
||||
int ret = 0;
|
||||
int dst_nb_samples = 0;
|
||||
/* convert samples from native format to destination codec format, using the resampler */
|
||||
/* compute destination number of samples */
|
||||
dst_nb_samples = av_rescale_rnd(swr_get_delay(swr_ctx, mCodecCtx->sample_rate) + decodeFrame->nb_samples,
|
||||
mCodecCtx->sample_rate,
|
||||
SOURCE_AUDIO_SAMPEL_RATE,
|
||||
AV_ROUND_UP);
|
||||
// av_assert0(dst_nb_samples == decodeFrame->nb_samples);
|
||||
|
||||
/* when we pass a frame to the encoder, it may keep a reference to it
|
||||
* internally;
|
||||
* make sure we do not overwrite it here
|
||||
*/
|
||||
ret = av_frame_make_writable(mFrame);
|
||||
if (ret < 0) {
|
||||
LogError("av_frame_make_writable failed\n");
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
/* convert to destination format */
|
||||
ret = swr_convert(
|
||||
swr_ctx, mFrame->data, dst_nb_samples, (const uint8_t **)decodeFrame->data, decodeFrame->nb_samples);
|
||||
if (ret < 0) {
|
||||
LogError("Error while converting\n");
|
||||
return nullptr;
|
||||
}
|
||||
// LogInfo("mCodecCtx->time_base.num = %d, mCodecCtx->time_base.den=%d\n",
|
||||
// mCodecCtx->time_base.num,
|
||||
// mCodecCtx->time_base.den);
|
||||
mFrame->pts = av_rescale_q(decodeFrame->pts, (AVRational){1, 1000000}, mCodecCtx->time_base);
|
||||
// LogInfo("decodeFrame->pts = %d\n", decodeFrame->pts);
|
||||
// LogInfo("mFrame->pts = %d\n", mFrame->pts);
|
||||
mSamplesCount += dst_nb_samples;
|
||||
return mFrame;
|
||||
}
|
||||
AVFrame *FfmpegEncoderV2::alloc_frame(enum AVPixelFormat pix_fmt, int width, int height)
|
||||
{
|
||||
AVFrame *frame;
|
||||
int ret;
|
||||
|
||||
frame = av_frame_alloc();
|
||||
if (!frame)
|
||||
return nullptr;
|
||||
|
||||
frame->format = pix_fmt;
|
||||
frame->width = width;
|
||||
frame->height = height;
|
||||
|
||||
/* allocate the buffers for the frame data */
|
||||
ret = av_frame_get_buffer(frame, 0);
|
||||
if (ret < 0) {
|
||||
LogInfo("Could not allocate frame data.\n");
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
return frame;
|
||||
}
|
||||
AVFrame *FfmpegEncoderV2::alloc_audio_frame(enum AVSampleFormat sample_fmt, const AVChannelLayout *channel_layout,
|
||||
int sample_rate, int nb_samples)
|
||||
{
|
||||
AVFrame *frame = av_frame_alloc();
|
||||
if (!frame) {
|
||||
LogError("Error allocating an audio frame\n");
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
frame->format = sample_fmt;
|
||||
av_channel_layout_copy(&frame->ch_layout, channel_layout);
|
||||
frame->sample_rate = sample_rate;
|
||||
frame->nb_samples = nb_samples;
|
||||
|
||||
if (nb_samples) {
|
||||
if (av_frame_get_buffer(frame, 0) < 0) {
|
||||
LogError("Error allocating an audio buffer\n");
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
return frame;
|
||||
}
|
79
utils/MediaBase/src/FfmpegEncoderV2.h
Normal file
79
utils/MediaBase/src/FfmpegEncoderV2.h
Normal file
|
@ -0,0 +1,79 @@
|
|||
/*
|
||||
* Copyright (c) 2023 Fancy Code.
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
#ifndef FFMPEG_ENCODER_V2_H
|
||||
#define FFMPEG_ENCODER_V2_H
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
#include <libavcodec/avcodec.h>
|
||||
#include <libavcodec/packet.h>
|
||||
#include <libavformat/avformat.h>
|
||||
#include <libavutil/avassert.h>
|
||||
#include <libavutil/avutil.h>
|
||||
#include <libavutil/channel_layout.h>
|
||||
#include <libavutil/imgutils.h>
|
||||
#include <libavutil/mathematics.h>
|
||||
#include <libavutil/opt.h>
|
||||
#include <libavutil/timestamp.h>
|
||||
#include <libswresample/swresample.h>
|
||||
#include <libswscale/swscale.h>
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#include <functional>
|
||||
// constexpr int ENCODER_UNSUPORTED = 0;
|
||||
class FfmpegEncoderV2
|
||||
{
|
||||
public:
|
||||
/**
|
||||
* @brief When encoding a video stream, you need to use this constructor to provide the required parameters.
|
||||
* @param codecId Video stream format.
|
||||
* @param width Video width.
|
||||
* @param height Video height.
|
||||
*/
|
||||
FfmpegEncoderV2(const enum AVCodecID &codecId, const AVPixelFormat &encodePixelFormat, const int &width,
|
||||
const int &height);
|
||||
virtual ~FfmpegEncoderV2() = default;
|
||||
bool Init(const int &outputFlags);
|
||||
void UnInit(void);
|
||||
AVRational GetTimeBase(void);
|
||||
bool OpenEncoder(AVDictionary *optArg, AVStream *stream);
|
||||
int EncodeData(AVFrame *frame, AVStream *stream, std::function<void(AVPacket *pkt)> callback);
|
||||
|
||||
private:
|
||||
bool OpenVideo(AVDictionary *optArg, AVStream *stream);
|
||||
bool OpenAudio(AVDictionary *optArg, AVStream *stream);
|
||||
AVFrame *ConvertAudioFrame(AVFrame *decodeFrame, struct SwrContext *swr_ctx);
|
||||
|
||||
private:
|
||||
static AVFrame *alloc_frame(enum AVPixelFormat pix_fmt, int width, int height);
|
||||
static AVFrame *alloc_audio_frame(enum AVSampleFormat sample_fmt, const AVChannelLayout *channel_layout,
|
||||
int sample_rate, int nb_samples);
|
||||
|
||||
private:
|
||||
const enum AVCodecID mCodecId;
|
||||
AVCodecContext *mCodecCtx;
|
||||
AVCodec *mCodec;
|
||||
AVFrame *mFrame;
|
||||
AVFrame *mTmpFrame;
|
||||
AVPacket *mTmpPkt;
|
||||
int mSamplesCount;
|
||||
struct SwrContext *mSwrCtx;
|
||||
int64_t next_pts;
|
||||
const int mVideoWidth;
|
||||
const int mVideoHeight;
|
||||
const AVPixelFormat mEncodePixelFormat;
|
||||
};
|
||||
#endif
|
|
@ -42,6 +42,7 @@ FfmpegReadFile::FfmpegReadFile()
|
|||
}
|
||||
StatusCode FfmpegReadFile::StartReadFile(const std::string &path)
|
||||
{
|
||||
mFilePath = path;
|
||||
InitFfmpeg();
|
||||
int result = 0;
|
||||
const AVInputFormat *iformat = av_find_input_format(FfmpegBase::InputFormat(mType));
|
||||
|
@ -81,6 +82,7 @@ StatusCode FfmpegReadFile::StartReadFile(const std::string &path)
|
|||
}
|
||||
StatusCode FfmpegReadFile::StopReadFile(void)
|
||||
{
|
||||
std::lock_guard<std::mutex> locker(mMutex);
|
||||
mTaskRuning = false;
|
||||
if (mTaskTimerThread.joinable()) {
|
||||
mTaskTimerThread.join();
|
||||
|
|
|
@ -16,6 +16,7 @@
|
|||
#define FFMPEG_READ_FILE_H
|
||||
#include "FfmpegBase.h"
|
||||
#include "MediaBase.h"
|
||||
#include <mutex>
|
||||
class FfmpegReadFile : virtual public FfmpegBase
|
||||
{
|
||||
public:
|
||||
|
@ -33,9 +34,11 @@ private:
|
|||
void ReadFrame(AVPacket *packet, const unsigned int duration_us);
|
||||
|
||||
private:
|
||||
std::mutex mMutex;
|
||||
ReadVideoFileCallback mReadVideoCallback;
|
||||
void *mReadVideoCallbackContext;
|
||||
ReadVideoFileCallback mReadAudioCallback;
|
||||
void *mReadAudioCallbackContext;
|
||||
std::string mFilePath;
|
||||
};
|
||||
#endif
|
|
@ -15,6 +15,7 @@
|
|||
#include "FfmpegTakePicture.h"
|
||||
#include "FfmpegBase.h"
|
||||
#include "FfmpegOriginalPicture.h"
|
||||
#include "FfmpegThumbnailV2.h"
|
||||
#include "ILog.h"
|
||||
#include "MediaBase.h"
|
||||
#include "StatusCode.h"
|
||||
|
@ -51,24 +52,33 @@ StatusCode FfmpegTakePicture::CloseJpegFile(void)
|
|||
}
|
||||
return CreateStatusCode(STATUS_CODE_OK);
|
||||
}
|
||||
StatusCode FfmpegTakePicture::WriteJpegData(const void *data, const size_t &size)
|
||||
StatusCode FfmpegTakePicture::WriteJpegData(const void *data, const size_t &size, const StreamInfo &streamInfo)
|
||||
{
|
||||
if (!mOutputFileInfo) {
|
||||
LogError("mOutputFileInfoCit is null\n");
|
||||
return CreateStatusCode(STATUS_CODE_NOT_OK);
|
||||
}
|
||||
char *pData = (char *)data;
|
||||
for (size_t i = 0; i < size; i++) {
|
||||
if ((0x00 == pData[i]) && (0x00 == pData[i + 1]) && (0x00 == pData[i + 2]) && (0x01 == pData[i + 3]) &&
|
||||
(0x5 == (pData[i + 4] & 0x1F))) {
|
||||
LogInfo("Found extradata\n");
|
||||
CreateJpegFile(data, size);
|
||||
if (mOutputFileInfo->mFinished) {
|
||||
*(mOutputFileInfo->mFinished) = static_cast<int>(OUTPUT_FILE_STATUS_FINISHED);
|
||||
if (STREAM_TYPE_VIDEO_H264 == streamInfo.mType) {
|
||||
char *pData = (char *)data;
|
||||
for (size_t i = 0; i < size; i++) {
|
||||
if ((0x00 == pData[i]) && (0x00 == pData[i + 1]) && (0x00 == pData[i + 2]) && (0x01 == pData[i + 3]) &&
|
||||
(0x5 == (pData[i + 4] & 0x1F))) {
|
||||
LogInfo("Found extradata\n");
|
||||
CreateJpegFile(data, size);
|
||||
if (mOutputFileInfo->mFinished) {
|
||||
*(mOutputFileInfo->mFinished) = static_cast<int>(OUTPUT_FILE_STATUS_FINISHED);
|
||||
}
|
||||
return CreateStatusCode(STATUS_CODE_OK);
|
||||
}
|
||||
return CreateStatusCode(STATUS_CODE_OK);
|
||||
}
|
||||
}
|
||||
if (STREAM_TYPE_JPEG == streamInfo.mType) {
|
||||
CreateJpegFile2(data, size);
|
||||
if (mOutputFileInfo->mFinished) {
|
||||
*(mOutputFileInfo->mFinished) = static_cast<int>(OUTPUT_FILE_STATUS_FINISHED);
|
||||
}
|
||||
return CreateStatusCode(STATUS_CODE_OK);
|
||||
}
|
||||
return CreateStatusCode(STATUS_CODE_NOT_OK);
|
||||
}
|
||||
void inline FfmpegTakePicture::CreateJpegFile(const void *data, const size_t &size)
|
||||
|
@ -94,4 +104,24 @@ void FfmpegTakePicture::CreateJpegFileThread(const void *data, const size_t &siz
|
|||
picture.Init(info);
|
||||
picture.CreateOriginalPicture(mOutputFileInfo->mFileName, data, size);
|
||||
picture.UnInit();
|
||||
}
|
||||
void inline FfmpegTakePicture::CreateJpegFile2(const void *data, const size_t &size)
|
||||
{
|
||||
mFrameData = (char *)malloc(size);
|
||||
if (!mFrameData) {
|
||||
LogError("malloc failed\n");
|
||||
return;
|
||||
}
|
||||
memcpy(mFrameData, data, size);
|
||||
auto codecThread = [](std::shared_ptr<FfmpegTakePicture> output, const void *frameData, const size_t dataSize) {
|
||||
LogInfo("CreateJpegFile start.\n");
|
||||
output->CreateJpegFileThread2(frameData, dataSize);
|
||||
};
|
||||
std::shared_ptr<FfmpegTakePicture> impl =
|
||||
std::dynamic_pointer_cast<FfmpegTakePicture>(FfmpegBase::shared_from_this());
|
||||
mCodecThread = std::thread(codecThread, impl, mFrameData, size);
|
||||
}
|
||||
void FfmpegTakePicture::CreateJpegFileThread2(const void *data, const size_t &size)
|
||||
{
|
||||
FfmpegThumbnailV2::SavePicture(mOutputFileInfo->mFileName, data, size);
|
||||
}
|
|
@ -50,11 +50,13 @@ public:
|
|||
* @param size
|
||||
* @return StatusCode
|
||||
*/
|
||||
StatusCode WriteJpegData(const void *data, const size_t &size) override;
|
||||
StatusCode WriteJpegData(const void *data, const size_t &size, const StreamInfo &streamInfo) override;
|
||||
|
||||
private:
|
||||
void CreateJpegFile(const void *data, const size_t &size);
|
||||
void CreateJpegFileThread(const void *data, const size_t &size);
|
||||
void CreateJpegFile2(const void *data, const size_t &size);
|
||||
void CreateJpegFileThread2(const void *data, const size_t &size);
|
||||
|
||||
private:
|
||||
std::shared_ptr<OutputFileInfo> mOutputFileInfo;
|
||||
|
|
215
utils/MediaBase/src/FfmpegThumbnailV2.cpp
Normal file
215
utils/MediaBase/src/FfmpegThumbnailV2.cpp
Normal file
|
@ -0,0 +1,215 @@
|
|||
/*
|
||||
* Copyright (c) 2023 Fancy Code.
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
#include "FfmpegThumbnailV2.h"
|
||||
#include "FfmpegDecoderV2.h"
|
||||
#include "FfmpegEncoderV2.h"
|
||||
#include "ILog.h"
|
||||
#include "LinuxApi.h"
|
||||
#include <stdio.h>
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
#include <libavcodec/codec_id.h>
|
||||
#include <libavcodec/packet.h>
|
||||
#include <libavformat/avformat.h>
|
||||
#include <libavformat/avio.h>
|
||||
#include <libavutil/avutil.h>
|
||||
#include <libavutil/frame.h>
|
||||
#include <libavutil/imgutils.h>
|
||||
#include <libavutil/mem.h>
|
||||
#include <libavutil/pixfmt.h>
|
||||
#include <libswscale/swscale.h>
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#include <cstdlib>
|
||||
#include <functional>
|
||||
#include <memory>
|
||||
#include <stdint.h>
|
||||
#include <string>
|
||||
thumbnail_info_v2::thumbnail_info_v2(const int &sourceWidth, const int &sourceHeight, const int &targetWidth,
|
||||
const int &targetHeight)
|
||||
: mSourceWidth(sourceWidth), mSourceHeight(sourceHeight), mTargetWidth(targetWidth), mTargetHeight(targetHeight)
|
||||
{
|
||||
}
|
||||
FfmpegThumbnailV2::FfmpegThumbnailV2(const AVCodecID &encodecId, const AVCodecID &decodecId)
|
||||
: mOutputFormat(nullptr), mStream(nullptr), mSwsCtx(nullptr), mEncodecId(encodecId), mDecodecId(decodecId),
|
||||
mDecodePixelFormat(AV_PIX_FMT_RGB4)
|
||||
{
|
||||
}
|
||||
void FfmpegThumbnailV2::Init(const ThumbnailInfoV2 &thumbnailInfo)
|
||||
{
|
||||
LogInfo("FfmpegThumbnailV2 Init\n");
|
||||
mSourceWidth = thumbnailInfo.mSourceWidth;
|
||||
mSourceHeight = thumbnailInfo.mSourceHeight;
|
||||
mTargetWidth = thumbnailInfo.mTargetWidth;
|
||||
mTargetHeight = thumbnailInfo.mTargetHeight;
|
||||
mDecoder = std::make_shared<FfmpegDecoderV2>(mDecodecId, mDecodePixelFormat, mSourceWidth, mSourceHeight);
|
||||
if (!mDecoder) {
|
||||
LogError("mDecoder = nullptr.\n");
|
||||
}
|
||||
mEncoder = std::make_shared<FfmpegEncoderV2>(mEncodecId, mDecodePixelFormat, mTargetWidth, mTargetHeight);
|
||||
if (!mEncoder) {
|
||||
LogError("mEncoder = nullptr.\n");
|
||||
}
|
||||
}
|
||||
void FfmpegThumbnailV2::UnInit(void)
|
||||
{
|
||||
if (mOutputFormat && mOutputFormat->pb) {
|
||||
av_write_trailer(mOutputFormat);
|
||||
}
|
||||
if (mEncoder) {
|
||||
mEncoder->UnInit();
|
||||
mEncoder.reset();
|
||||
}
|
||||
if (mDecoder) {
|
||||
mDecoder->UnInit();
|
||||
mDecoder.reset();
|
||||
}
|
||||
if (nullptr == mOutputFormat) {
|
||||
return;
|
||||
}
|
||||
if (!(mOutputFormat->oformat->flags & AVFMT_NOFILE)) {
|
||||
/* Close the output file. */
|
||||
avio_closep(&mOutputFormat->pb);
|
||||
}
|
||||
avformat_free_context(mOutputFormat);
|
||||
fx_system_v2("sync");
|
||||
if (mSwsCtx) {
|
||||
sws_freeContext(mSwsCtx);
|
||||
mSwsCtx = nullptr;
|
||||
}
|
||||
}
|
||||
bool FfmpegThumbnailV2::CreateThumbnail(const std::string &outputFile, const void *data, const size_t &size)
|
||||
{
|
||||
if (!mDecoder) {
|
||||
LogError("CreateThumbnail mDecoder && mDecodeCallback\n");
|
||||
return true;
|
||||
}
|
||||
mDecodeCallback = std::bind(&FfmpegThumbnailV2::GetDecodeDataCallback, this, std::placeholders::_1);
|
||||
mEncodeCallback = std::bind(&FfmpegThumbnailV2::GetEncodeDataCallback, this, std::placeholders::_1, outputFile);
|
||||
/* allocate the output media context */
|
||||
avformat_alloc_output_context2(&mOutputFormat, nullptr, "image2", outputFile.c_str());
|
||||
if (!mOutputFormat) {
|
||||
LogError("Could not deduce output format from file.\n");
|
||||
return false;
|
||||
}
|
||||
/* Add the audio and video streams using the default format codecs
|
||||
* and initialize the codecs. */
|
||||
if (mOutputFormat->oformat->video_codec != AV_CODEC_ID_NONE) {
|
||||
/**
|
||||
* @brief Maybe there is no need to use avformat_alloc_output_context2 function to create ffmpeg container.
|
||||
* TODO: if mOutputFormat can be deleted here?
|
||||
*/
|
||||
mStream = avformat_new_stream(mOutputFormat, nullptr);
|
||||
if (!mStream) {
|
||||
LogError("Could not allocate stream\n");
|
||||
return false;
|
||||
}
|
||||
mStream->id = mOutputFormat->nb_streams - 1;
|
||||
LogInfo("Create video stream\n");
|
||||
}
|
||||
mDecoder->Init();
|
||||
constexpr int NO_FLAGS = 0;
|
||||
mEncoder->Init(NO_FLAGS);
|
||||
mStream->time_base = mEncoder->GetTimeBase();
|
||||
mEncoder->OpenEncoder(nullptr, mStream);
|
||||
LogInfo("Start to decode data\n");
|
||||
mDecoder->DecodeData(data, size, AV_NOPTS_VALUE, mDecodeCallback);
|
||||
LogInfo("Decode data end\n");
|
||||
return false;
|
||||
}
|
||||
void FfmpegThumbnailV2::GetDecodeDataCallback(AVFrame *frame)
|
||||
{
|
||||
EncodeDataToPicture(frame);
|
||||
}
|
||||
void FfmpegThumbnailV2::GetEncodeDataCallback(AVPacket *pkt, const std::string &fileName)
|
||||
{
|
||||
SavePicture(fileName, pkt->data, pkt->size);
|
||||
}
|
||||
void FfmpegThumbnailV2::EncodeDataToPicture(AVFrame *frame)
|
||||
{
|
||||
LogInfo("Decode frame->width = %d, frame->height=%d\n", frame->width, frame->height);
|
||||
AVFrame *thumbnailFrame = av_frame_alloc();
|
||||
if (!thumbnailFrame) {
|
||||
LogError("thumbnailFrame = nullptr.\n");
|
||||
return;
|
||||
}
|
||||
thumbnailFrame->format = mDecodePixelFormat;
|
||||
thumbnailFrame->width = mTargetWidth;
|
||||
thumbnailFrame->height = mTargetHeight;
|
||||
|
||||
int jpegBufSize = av_image_get_buffer_size(mDecodePixelFormat, mSourceWidth, mSourceHeight, 1);
|
||||
LogInfo("jpegBufSize: %d\n", jpegBufSize);
|
||||
uint8_t *jpegBuf = (uint8_t *)av_malloc(jpegBufSize);
|
||||
if (!jpegBuf) {
|
||||
LogError("jpegBuf = nullptr.\n");
|
||||
goto END;
|
||||
}
|
||||
av_image_fill_arrays(
|
||||
thumbnailFrame->data, thumbnailFrame->linesize, jpegBuf, mDecodePixelFormat, frame->width, frame->height, 1);
|
||||
|
||||
mSwsCtx = sws_getContext(mSourceWidth,
|
||||
mSourceHeight,
|
||||
static_cast<AVPixelFormat>(frame->format),
|
||||
thumbnailFrame->width,
|
||||
thumbnailFrame->height,
|
||||
mDecodePixelFormat,
|
||||
SWS_BILINEAR,
|
||||
nullptr,
|
||||
nullptr,
|
||||
nullptr);
|
||||
if (!mSwsCtx) {
|
||||
LogError("mSwsCtx = nullptr.\n");
|
||||
goto END;
|
||||
}
|
||||
fx_system_v2("echo 1 > /proc/sys/vm/drop_caches");
|
||||
/**
|
||||
* @brief Perform pixel format conversion.
|
||||
* NOTE: This function will crash when the chip does not have enough memory.
|
||||
*/
|
||||
sws_scale(mSwsCtx, frame->data, frame->linesize, 0, frame->height, thumbnailFrame->data, thumbnailFrame->linesize);
|
||||
|
||||
if (mEncoder) {
|
||||
mEncoder->EncodeData(thumbnailFrame, mStream, mEncodeCallback);
|
||||
}
|
||||
END:
|
||||
if (thumbnailFrame) {
|
||||
av_frame_free(&thumbnailFrame);
|
||||
}
|
||||
if (jpegBuf) {
|
||||
av_free(jpegBuf);
|
||||
}
|
||||
return;
|
||||
}
|
||||
bool FfmpegThumbnailV2::SavePicture(const std::string &fileName, const void *data, const size_t &size)
|
||||
{
|
||||
FILE *file = nullptr;
|
||||
if (!data) {
|
||||
LogError("SavePicture:%s failed, data is nullptr.\n", fileName.c_str());
|
||||
return false;
|
||||
}
|
||||
LogInfo("SavePicture:%s, size = %u\n", fileName.c_str(), size);
|
||||
file = fopen(fileName.c_str(), "a+");
|
||||
if (!file) {
|
||||
LogError("fopen failed.\n");
|
||||
return false;
|
||||
}
|
||||
fwrite(data, 1, size, file);
|
||||
fflush(file);
|
||||
fclose(file);
|
||||
// system("sync");
|
||||
return true;
|
||||
}
|
84
utils/MediaBase/src/FfmpegThumbnailV2.h
Normal file
84
utils/MediaBase/src/FfmpegThumbnailV2.h
Normal file
|
@ -0,0 +1,84 @@
|
|||
/*
|
||||
* Copyright (c) 2023 Fancy Code.
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
#ifndef FFMPEG_THUMBNAIL_V2_H
|
||||
#define FFMPEG_THUMBNAIL_V2_H
|
||||
#include "FfmpegDecoderV2.h"
|
||||
#include "FfmpegEncoderV2.h"
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
#include <libavcodec/avcodec.h>
|
||||
#include <libavcodec/packet.h>
|
||||
#include <libavformat/avformat.h>
|
||||
#include <libavutil/avassert.h>
|
||||
#include <libavutil/avutil.h>
|
||||
#include <libavutil/channel_layout.h>
|
||||
#include <libavutil/imgutils.h>
|
||||
#include <libavutil/mathematics.h>
|
||||
#include <libavutil/opt.h>
|
||||
#include <libavutil/timestamp.h>
|
||||
#include <libswresample/swresample.h>
|
||||
#include <libswscale/swscale.h>
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#include <functional>
|
||||
#include <memory>
|
||||
typedef struct thumbnail_info_v2
|
||||
{
|
||||
thumbnail_info_v2(const int &sourceWidth, const int &sourceHeight, const int &targetWidth, const int &targetHeight);
|
||||
const int mSourceWidth;
|
||||
const int mSourceHeight;
|
||||
const int mTargetWidth;
|
||||
const int mTargetHeight;
|
||||
} ThumbnailInfoV2;
|
||||
class FfmpegThumbnailV2
|
||||
{
|
||||
public:
|
||||
FfmpegThumbnailV2(const AVCodecID &encodecId, const AVCodecID &decodecId);
|
||||
virtual ~FfmpegThumbnailV2() = default;
|
||||
void Init(const ThumbnailInfoV2 &thumbnailInfo);
|
||||
void UnInit(void);
|
||||
bool CreateThumbnail(const std::string &outputFile, const void *data, const size_t &size);
|
||||
|
||||
private:
|
||||
void GetDecodeDataCallback(AVFrame *frame);
|
||||
void GetEncodeDataCallback(AVPacket *pkt, const std::string &fileName);
|
||||
|
||||
protected:
|
||||
virtual void EncodeDataToPicture(AVFrame *frame);
|
||||
|
||||
public:
|
||||
static bool SavePicture(const std::string &fileName, const void *data, const size_t &size);
|
||||
|
||||
private:
|
||||
std::shared_ptr<FfmpegEncoderV2> mEncoder;
|
||||
std::shared_ptr<FfmpegDecoderV2> mDecoder;
|
||||
std::function<void(AVFrame *)> mDecodeCallback;
|
||||
std::function<void(AVPacket *)> mEncodeCallback;
|
||||
|
||||
protected:
|
||||
AVFormatContext *mOutputFormat;
|
||||
AVStream *mStream;
|
||||
struct SwsContext *mSwsCtx;
|
||||
int mSourceWidth;
|
||||
int mSourceHeight;
|
||||
int mTargetWidth;
|
||||
int mTargetHeight;
|
||||
const AVCodecID mEncodecId;
|
||||
const AVCodecID mDecodecId;
|
||||
AVPixelFormat mDecodePixelFormat;
|
||||
};
|
||||
#endif
|
|
@ -70,7 +70,7 @@ StatusCode IMediaBase::CloseJpegFile(void)
|
|||
LogWarning("STATUS_CODE_VIRTUAL_FUNCTION\n");
|
||||
return CreateStatusCode(STATUS_CODE_VIRTUAL_FUNCTION);
|
||||
}
|
||||
StatusCode IMediaBase::WriteJpegData(const void *data, const size_t &size)
|
||||
StatusCode IMediaBase::WriteJpegData(const void *data, const size_t &size, const StreamInfo &streamInfo)
|
||||
{
|
||||
LogWarning("STATUS_CODE_VIRTUAL_FUNCTION\n");
|
||||
return CreateStatusCode(STATUS_CODE_VIRTUAL_FUNCTION);
|
||||
|
|
|
@ -37,7 +37,7 @@ public: // About combine file.
|
|||
public: // About take picture.
|
||||
virtual StatusCode OpenJpegFile(const OutputFileInfo &fileInfo);
|
||||
virtual StatusCode CloseJpegFile(void);
|
||||
virtual StatusCode WriteJpegData(const void *data, const size_t &size);
|
||||
virtual StatusCode WriteJpegData(const void *data, const size_t &size, const StreamInfo &streamInfo);
|
||||
};
|
||||
typedef struct media_base_header
|
||||
{
|
||||
|
|
|
@ -106,10 +106,10 @@ StatusCode ICloseJpegFile(void *object)
|
|||
}
|
||||
return CreateStatusCode(STATUS_CODE_NOT_OK);
|
||||
}
|
||||
StatusCode IWriteJpegData(void *object, const void *data, const size_t size)
|
||||
StatusCode IWriteJpegData(void *object, const void *data, const size_t size, const StreamInfo streamInfo)
|
||||
{
|
||||
if (ObjectCheck(object) == true) {
|
||||
return (*(std::shared_ptr<IMediaBase> *)object)->WriteJpegData(data, size);
|
||||
return (*(std::shared_ptr<IMediaBase> *)object)->WriteJpegData(data, size, streamInfo);
|
||||
}
|
||||
return CreateStatusCode(STATUS_CODE_NOT_OK);
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue
Block a user