Compare commits

..

2 Commits

Author SHA1 Message Date
zhangpeng
bba581e510 YUYV格式推流 2025-06-30 17:02:22 +08:00
zhangpeng
15599a6ee4 输出YUYV格式的视频流 2025-06-30 17:01:41 +08:00
10 changed files with 251 additions and 435 deletions

View File

@ -60,6 +60,7 @@
"stop_token": "cpp", "stop_token": "cpp",
"streambuf": "cpp", "streambuf": "cpp",
"thread": "cpp", "thread": "cpp",
"typeinfo": "cpp" "typeinfo": "cpp",
"chrono": "cpp"
} }
} }

View File

@ -2,7 +2,7 @@ PROJECT_NAME = AsynchronousGrabQt
PROJECT_DIR = ../.. PROJECT_DIR = ../..
EXAMPLES_DIR = $(PROJECT_DIR)/../.. EXAMPLES_DIR = $(PROJECT_DIR)/../..
VIMBASDK_DIR = $(EXAMPLES_DIR)/../.. VIMBASDK_DIR = $(EXAMPLES_DIR)/../..
MAKE_INCLUDE_DIR = $(CURDIR)/$(EXAMPLES_DIR)/Build/Make MAKE_INCLUDE_DIR = $(CURDIR)/$(EXAMPLES_DIR)/Build/Make
include $(MAKE_INCLUDE_DIR)/Common.mk include $(MAKE_INCLUDE_DIR)/Common.mk
@ -13,55 +13,47 @@ BIN_DIR = binary/$(CONFIG_DIR)
OBJ_DIR = object/$(CONFIG_DIR) OBJ_DIR = object/$(CONFIG_DIR)
BIN_PATH = $(BIN_DIR)/$(BIN_FILE) BIN_PATH = $(BIN_DIR)/$(BIN_FILE)
# Qt 工具路径
MOC = /usr/lib/aarch64-linux-gnu/qt5/bin/moc
UIC = /usr/lib/aarch64-linux-gnu/qt5/bin/uic
RCC = /usr/lib/aarch64-linux-gnu/qt5/bin/rcc
# 使用 pkg-config 获取 Qt 编译标志
QT_CFLAGS := $(shell pkg-config --cflags Qt5Core Qt5Gui Qt5Widgets)
QT_LIBS := $(shell pkg-config --libs Qt5Core Qt5Gui Qt5Widgets)
all: $(BIN_PATH) all: $(BIN_PATH)
include $(MAKE_INCLUDE_DIR)/VimbaCPP.mk include $(MAKE_INCLUDE_DIR)/VimbaCPP.mk
include $(MAKE_INCLUDE_DIR)/VimbaImageTransform.mk include $(MAKE_INCLUDE_DIR)/VimbaImageTransform.mk
include $(MAKE_INCLUDE_DIR)/Qt.mk
SOURCE_DIR = $(PROJECT_DIR)/Source SOURCE_DIR = $(PROJECT_DIR)/Source
INCLUDE_DIRS = -I$(SOURCE_DIR) \ INCLUDE_DIRS = -I$(SOURCE_DIR) \
-I$(EXAMPLES_DIR) \ -I$(EXAMPLES_DIR) \
-I$(OBJ_DIR) \ -I$(OBJ_DIR)
-I/usr/include/aarch64-linux-gnu/qt5 \
-I/usr/include/aarch64-linux-gnu/qt5/QtCore \
-I/usr/include/aarch64-linux-gnu/qt5/QtGui \
-I/usr/include/aarch64-linux-gnu/qt5/QtWidgets
LIBS = $(VIMBACPP_LIBS) \ LIBS = $(VIMBACPP_LIBS) \
$(VIMBAIMAGETRANSFORM_LIBS) \ $(VIMBAIMAGETRANSFORM_LIBS) \
$(QT_LIBS) $(QTCORE_LIBS) \
$(QTGUI_LIBS)
DEFINES = DEFINES =
CFLAGS = $(COMMON_CFLAGS) \ CFLAGS = $(COMMON_CFLAGS) \
$(VIMBACPP_CFLAGS) \ $(VIMBACPP_CFLAGS) \
$(VIMBAIMAGETRANSFORM_CFLAGS) \ $(VIMBAIMAGETRANSFORM_CFLAGS) \
$(QT_CFLAGS) $(QTCORE_CFLAGS) \
$(QTGUI_CFLAGS)
OBJ_FILES = $(OBJ_DIR)/ApiController.o \ OBJ_FILES = $(OBJ_DIR)/ApiController.o \
$(OBJ_DIR)/AsynchronousGrab.o \ $(OBJ_DIR)/AsynchronousGrab.o \
$(OBJ_DIR)/CameraObserver.o \ $(OBJ_DIR)/CameraObserver.o \
$(OBJ_DIR)/FrameObserver.o \ $(OBJ_DIR)/FrameObserver.o \
$(OBJ_DIR)/main.o \ $(OBJ_DIR)/main.o \
$(OBJ_DIR)/moc_AsynchronousGrab.o \ $(OBJ_DIR)/moc_AsynchronousGrab.o \
$(OBJ_DIR)/moc_CameraObserver.o \ $(OBJ_DIR)/moc_CameraObserver.o \
$(OBJ_DIR)/moc_FrameObserver.o \ $(OBJ_DIR)/moc_FrameObserver.o \
$(OBJ_DIR)/qrc_AsynchronousGrab.o $(OBJ_DIR)/qrc_AsynchronousGrab.o
GEN_HEADERS = $(OBJ_DIR)/ui_AsynchronousGrab.h GEN_HEADERS = $(OBJ_DIR)/ui_AsynchronousGrab.h
DEPENDENCIES = VimbaCPP \ DEPENDENCIES = VimbaCPP \
VimbaImageTransform VimbaImageTransform \
QtCore \
QtGui
$(OBJ_DIR)/moc_%.cpp: $(SOURCE_DIR)/%.h $(OBJ_DIR) $(OBJ_DIR)/moc_%.cpp: $(SOURCE_DIR)/%.h $(OBJ_DIR)
$(MOC) -o $@ $< $(MOC) -o $@ $<
@ -89,6 +81,4 @@ $(OBJ_DIR):
$(MKDIR) -p $(OBJ_DIR) $(MKDIR) -p $(OBJ_DIR)
$(BIN_DIR): $(BIN_DIR):
$(MKDIR) -p $(BIN_DIR) $(MKDIR) -p $(BIN_DIR)
.PHONY: all clean

View File

@ -1,94 +0,0 @@
PROJECT_NAME = AsynchronousGrabQt
PROJECT_DIR = ../..
EXAMPLES_DIR = $(PROJECT_DIR)/../..
VIMBASDK_DIR = $(EXAMPLES_DIR)/../..
MAKE_INCLUDE_DIR = $(CURDIR)/$(EXAMPLES_DIR)/Build/Make
include $(MAKE_INCLUDE_DIR)/Common.mk
CONFIG_DIR = $(ARCH)_$(WORDSIZE)bit
BIN_FILE = $(PROJECT_NAME)
BIN_DIR = binary/$(CONFIG_DIR)
OBJ_DIR = object/$(CONFIG_DIR)
BIN_PATH = $(BIN_DIR)/$(BIN_FILE)
# Qt 工具路径
MOC = /usr/lib/aarch64-linux-gnu/qt5/bin/moc
UIC = /usr/lib/aarch64-linux-gnu/qt5/bin/uic
RCC = /usr/lib/aarch64-linux-gnu/qt5/bin/rcc
# 使用 pkg-config 获取 Qt 编译标志
QT_CFLAGS := $(shell pkg-config --cflags Qt5Core Qt5Gui Qt5Widgets)
QT_LIBS := $(shell pkg-config --libs Qt5Core Qt5Gui Qt5Widgets)
all: $(BIN_PATH)
include $(MAKE_INCLUDE_DIR)/VimbaCPP.mk
include $(MAKE_INCLUDE_DIR)/VimbaImageTransform.mk
SOURCE_DIR = $(PROJECT_DIR)/Source
INCLUDE_DIRS = -I$(SOURCE_DIR) \
-I$(EXAMPLES_DIR) \
-I$(OBJ_DIR) \
-I/usr/include/aarch64-linux-gnu/qt5 \
-I/usr/include/aarch64-linux-gnu/qt5/QtCore \
-I/usr/include/aarch64-linux-gnu/qt5/QtGui \
-I/usr/include/aarch64-linux-gnu/qt5/QtWidgets
LIBS = $(VIMBACPP_LIBS) \
$(VIMBAIMAGETRANSFORM_LIBS) \
$(QT_LIBS)
DEFINES =
CFLAGS = $(COMMON_CFLAGS) \
$(VIMBACPP_CFLAGS) \
$(VIMBAIMAGETRANSFORM_CFLAGS) \
$(QT_CFLAGS)
OBJ_FILES = $(OBJ_DIR)/ApiController.o \
$(OBJ_DIR)/AsynchronousGrab.o \
$(OBJ_DIR)/CameraObserver.o \
$(OBJ_DIR)/FrameObserver.o \
$(OBJ_DIR)/main.o \
$(OBJ_DIR)/moc_AsynchronousGrab.o \
$(OBJ_DIR)/moc_CameraObserver.o \
$(OBJ_DIR)/moc_FrameObserver.o \
$(OBJ_DIR)/qrc_AsynchronousGrab.o
GEN_HEADERS = $(OBJ_DIR)/ui_AsynchronousGrab.h
DEPENDENCIES = VimbaCPP \
VimbaImageTransform
$(OBJ_DIR)/moc_%.cpp: $(SOURCE_DIR)/%.h $(OBJ_DIR)
$(MOC) -o $@ $<
$(OBJ_DIR)/ui_%.h: $(SOURCE_DIR)/res/%.ui $(OBJ_DIR)
$(UIC) -o $@ $<
$(OBJ_DIR)/qrc_%.cpp: $(SOURCE_DIR)/res/%.qrc $(OBJ_DIR)
$(RCC) -o $@ $<
$(OBJ_DIR)/%.o: $(SOURCE_DIR)/%.cpp $(OBJ_DIR) $(GEN_HEADERS)
$(CXX) -c $(INCLUDE_DIRS) $(DEFINES) $(CFLAGS) -o $@ $<
$(OBJ_DIR)/%.o: $(OBJ_DIR)/%.cpp $(OBJ_DIR) $(GEN_HEADERS)
$(CXX) -c $(INCLUDE_DIRS) $(DEFINES) $(CFLAGS) -o $@ $<
$(BIN_PATH): $(DEPENDENCIES) $(OBJ_FILES) $(BIN_DIR)
$(CXX) $(ARCH_CFLAGS) -o $(BIN_PATH) $(OBJ_FILES) $(LIBS) -Wl,-rpath,'$$ORIGIN'
clean:
$(RM) binary -r -f
$(RM) object -r -f
$(OBJ_DIR):
$(MKDIR) -p $(OBJ_DIR)
$(BIN_DIR):
$(MKDIR) -p $(BIN_DIR)
.PHONY: all clean

View File

@ -34,7 +34,8 @@ DEFINES =
CFLAGS = $(COMMON_CFLAGS) \ CFLAGS = $(COMMON_CFLAGS) \
$(VIMBACPP_CFLAGS) \ $(VIMBACPP_CFLAGS) \
$(VIMBAIMAGETRANSFORM_CFLAGS) $(VIMBAIMAGETRANSFORM_CFLAGS) \
OBJ_FILES = $(OBJ_DIR)/ApiController.o \ OBJ_FILES = $(OBJ_DIR)/ApiController.o \
$(OBJ_DIR)/FrameObserver.o \ $(OBJ_DIR)/FrameObserver.o \

View File

@ -4,6 +4,8 @@
#include <sys/ioctl.h> #include <sys/ioctl.h>
#include <unistd.h> #include <unistd.h>
#include <chrono> #include <chrono>
#include <vector>
#include <iostream>
namespace AVT { namespace AVT {
namespace VmbAPI { namespace VmbAPI {
@ -36,8 +38,9 @@ bool FrameObserver::SetupVideoDevice()
m_vfmt.fmt.pix.width = 640; m_vfmt.fmt.pix.width = 640;
m_vfmt.fmt.pix.height = 480; m_vfmt.fmt.pix.height = 480;
m_vfmt.fmt.pix.pixelformat = V4L2_PIX_FMT_YUYV; m_vfmt.fmt.pix.pixelformat = V4L2_PIX_FMT_RGB24; // 改为RGB24格式
m_vfmt.fmt.pix.field = V4L2_FIELD_NONE; m_vfmt.fmt.pix.field = V4L2_FIELD_NONE;
m_vfmt.fmt.pix.sizeimage = 640 * 480 * 3; // RGB24是3字节/像素
if (ioctl(m_videoFd, VIDIOC_S_FMT, &m_vfmt) < 0) { if (ioctl(m_videoFd, VIDIOC_S_FMT, &m_vfmt) < 0) {
perror("Set video format"); perror("Set video format");
@ -76,8 +79,8 @@ bool FrameObserver::UpdateVideoFormat(VmbUint32_t width, VmbUint32_t height, Vmb
m_vfmt.fmt.pix.sizeimage = width * height; m_vfmt.fmt.pix.sizeimage = width * height;
break; break;
case VmbPixelFormatBayerRG8: case VmbPixelFormatBayerRG8:
m_vfmt.fmt.pix.pixelformat = V4L2_PIX_FMT_SRGGB8; m_vfmt.fmt.pix.pixelformat = V4L2_PIX_FMT_YUYV; // We convert Bayer to YUYV
m_vfmt.fmt.pix.sizeimage = width * height; m_vfmt.fmt.pix.sizeimage = width * height * 2;
break; break;
default: default:
m_vfmt.fmt.pix.pixelformat = V4L2_PIX_FMT_YUYV; m_vfmt.fmt.pix.pixelformat = V4L2_PIX_FMT_YUYV;
@ -92,40 +95,125 @@ bool FrameObserver::UpdateVideoFormat(VmbUint32_t width, VmbUint32_t height, Vmb
return true; return true;
} }
void FrameObserver::FrameReceived(const FramePtr pFrame) // Convert Bayer RGGB to RGB (simplified demosaicing)
{ void FrameObserver::ConvertBayerRGGBToRGB(const VmbUchar_t* bayerData, VmbUchar_t* rgbData, VmbUint32_t width, VmbUint32_t height) {
// 只处理中心像素(避免边界越界)
for (VmbUint32_t y = 1; y < height - 1; y++) {
for (VmbUint32_t x = 1; x < width - 1; x++) {
VmbUint32_t idx = y * width + x;
VmbUchar_t r, g, b;
if (y % 2 == 0) { // Even row (RGRG...)
if (x % 2 == 0) { // R
r = bayerData[idx];
g = (bayerData[idx - 1] + bayerData[idx + 1] + bayerData[idx - width] + bayerData[idx + width]) / 4;
b = (bayerData[idx - width - 1] + bayerData[idx - width + 1] + bayerData[idx + width - 1] + bayerData[idx + width + 1]) / 4;
} else { // G
r = (bayerData[idx - 1] + bayerData[idx + 1]) / 2;
g = bayerData[idx];
b = (bayerData[idx - width] + bayerData[idx + width]) / 2;
}
} else { // Odd row (GBGB...)
if (x % 2 == 0) { // G
r = (bayerData[idx - width] + bayerData[idx + width]) / 2;
g = bayerData[idx];
b = (bayerData[idx - 1] + bayerData[idx + 1]) / 2;
} else { // B
r = (bayerData[idx - width - 1] + bayerData[idx - width + 1] + bayerData[idx + width - 1] + bayerData[idx + width + 1]) / 4;
g = (bayerData[idx - 1] + bayerData[idx + 1] + bayerData[idx - width] + bayerData[idx + width]) / 4;
b = bayerData[idx];
}
}
rgbData[idx * 3] = r;
rgbData[idx * 3 + 1] = g;
rgbData[idx * 3 + 2] = b;
}
}
}
// Convert RGB to YUYV (4:2:2)
void FrameObserver::ConvertRGBToYUYV(const VmbUchar_t* rgbData, VmbUchar_t* yuyvData, VmbUint32_t width, VmbUint32_t height) {
for (VmbUint32_t i = 0; i < width * height; i += 2) {
VmbUchar_t r1 = rgbData[i * 3], g1 = rgbData[i * 3 + 1], b1 = rgbData[i * 3 + 2];
VmbUchar_t r2 = rgbData[(i + 1) * 3], g2 = rgbData[(i + 1) * 3 + 1], b2 = rgbData[(i + 1) * 3 + 2];
// YUV转换简化版BT.601
VmbUchar_t y1 = 0.299f * r1 + 0.587f * g1 + 0.114f * b1;
VmbUchar_t u1 = -0.147f * r1 - 0.289f * g1 + 0.436f * b1 + 128;
VmbUchar_t v1 = 0.615f * r1 - 0.515f * g1 - 0.100f * b1 + 128;
VmbUchar_t y2 = 0.299f * r2 + 0.587f * g2 + 0.114f * b2;
yuyvData[i * 2] = y1;
yuyvData[i * 2 + 1] = u1;
yuyvData[i * 2 + 2] = y2;
yuyvData[i * 2 + 3] = v1;
}
}
void FrameObserver::FrameReceived(const FramePtr pFrame) {
auto startTime = std::chrono::high_resolution_clock::now(); auto startTime = std::chrono::high_resolution_clock::now();
// 1. 获取帧数据
VmbUchar_t* pBuffer = nullptr; VmbUchar_t* pBuffer = nullptr;
VmbUint32_t nSize = 0; VmbUint32_t nSize = 0;
VmbUint32_t width = 0, height = 0; VmbUint32_t width = 0, height = 0;
VmbPixelFormatType pixelFormat; VmbPixelFormatType pixelFormat;
if (pFrame->GetImage(pBuffer) != VmbErrorSuccess || if (pFrame->GetImage(pBuffer) != VmbErrorSuccess ||
pFrame->GetImageSize(nSize) != VmbErrorSuccess || pFrame->GetImageSize(nSize) != VmbErrorSuccess ||
pFrame->GetWidth(width) != VmbErrorSuccess || pFrame->GetWidth(width) != VmbErrorSuccess ||
pFrame->GetHeight(height) != VmbErrorSuccess || pFrame->GetHeight(height) != VmbErrorSuccess ||
pFrame->GetPixelFormat(pixelFormat) != VmbErrorSuccess) { pFrame->GetPixelFormat(pixelFormat) != VmbErrorSuccess) {
std::cerr << "Failed to get frame data!" << std::endl;
return; return;
} }
// 2. 检查数据有效性
if (!pBuffer || width == 0 || height == 0) {
std::cerr << "Invalid frame data: width=" << width << ", height=" << height << std::endl;
return;
}
// 3. 更新V4L2设备格式确保支持当前分辨率/PixelFormat
if (!UpdateVideoFormat(width, height, pixelFormat)) { if (!UpdateVideoFormat(width, height, pixelFormat)) {
std::cerr << "Failed to update video format!" << std::endl;
return; return;
} }
ssize_t written = write(m_videoFd, pBuffer, nSize); // 4. 处理Bayer RGGB格式转换为YUYV
if (written != (ssize_t)nSize) { if (pixelFormat == VmbPixelFormatBayerRG8) {
perror("Write to video device"); // 4.1 分配缓冲区
std::vector<VmbUchar_t> rgbData(width * height * 3); // RGB24: width * height * 3
std::vector<VmbUchar_t> yuyvData(width * height * 2); // YUYV: width * height * 2
// 4.2 Bayer RGGB -> RGB
ConvertBayerRGGBToRGB(pBuffer, rgbData.data(), width, height);
// 4.3 RGB -> YUYV
ConvertRGBToYUYV(rgbData.data(), yuyvData.data(), width, height);
// 4.4 写入V4L2设备
ssize_t written = write(m_videoFd, yuyvData.data(), yuyvData.size());
if (written != (ssize_t)yuyvData.size()) {
perror("Write YUYV to video device failed");
}
}
// 5. 处理非Bayer格式直接写入
else {
ssize_t written = write(m_videoFd, pBuffer, nSize);
if (written != (ssize_t)nSize) {
perror("Write raw data to video device failed");
}
} }
// 6. 统计性能
auto endTime = std::chrono::high_resolution_clock::now(); auto endTime = std::chrono::high_resolution_clock::now();
double latency = std::chrono::duration<double, std::milli>(endTime - startTime).count(); double latency = std::chrono::duration<double, std::milli>(endTime - startTime).count();
m_totalLatency.store(m_totalLatency.load() + latency); m_totalLatency.store(m_totalLatency.load() + latency);
m_frameCount++; m_frameCount++;
// 7. 重新排队帧
m_pCamera->QueueFrame(pFrame); m_pCamera->QueueFrame(pFrame);
} }
double FrameObserver::GetAverageLatency() const { double FrameObserver::GetAverageLatency() const {
return m_frameCount > 0 ? m_totalLatency.load() / m_frameCount : 0.0; return m_frameCount > 0 ? m_totalLatency.load() / m_frameCount : 0.0;
} }

View File

@ -8,6 +8,7 @@
#include <mutex> #include <mutex>
#include <chrono> #include <chrono>
#include <atomic> #include <atomic>
#include <vector> // Add for buffer storage
namespace AVT { namespace AVT {
namespace VmbAPI { namespace VmbAPI {
@ -29,6 +30,11 @@ private:
void CloseVideoDevice(); void CloseVideoDevice();
bool UpdateVideoFormat(VmbUint32_t width, VmbUint32_t height, VmbPixelFormatType pixelFormat); bool UpdateVideoFormat(VmbUint32_t width, VmbUint32_t height, VmbPixelFormatType pixelFormat);
// Bayer to RGB conversion
void ConvertBayerRGGBToRGB(const VmbUchar_t* bayerData, VmbUchar_t* rgbData, VmbUint32_t width, VmbUint32_t height);
// RGB to YUYV conversion
void ConvertRGBToYUYV(const VmbUchar_t* rgbData, VmbUchar_t* yuyvData, VmbUint32_t width, VmbUint32_t height);
int m_videoFd; int m_videoFd;
struct v4l2_format m_vfmt; struct v4l2_format m_vfmt;
std::mutex m_deviceMutex; std::mutex m_deviceMutex;

View File

@ -12,50 +12,40 @@ BIN_FILE = $(PROJECT_NAME)
BIN_DIR = binary/$(CONFIG_DIR) BIN_DIR = binary/$(CONFIG_DIR)
OBJ_DIR = object/$(CONFIG_DIR) OBJ_DIR = object/$(CONFIG_DIR)
BIN_PATH = $(BIN_DIR)/$(BIN_FILE) BIN_PATH = $(BIN_DIR)/$(BIN_FILE)
# Qt 工具路径
MOC = /usr/lib/aarch64-linux-gnu/qt5/bin/moc
UIC = /usr/lib/aarch64-linux-gnu/qt5/bin/uic
RCC = /usr/lib/aarch64-linux-gnu/qt5/bin/rcc
# 使用 pkg-config 获取 Qt 编译标志
QT_CFLAGS := $(shell pkg-config --cflags Qt5Core Qt5Gui Qt5Widgets)
QT_LIBS := $(shell pkg-config --libs Qt5Core Qt5Gui Qt5Widgets)
all: $(BIN_PATH) all: $(BIN_PATH)
include $(MAKE_INCLUDE_DIR)/VimbaCPP.mk include $(MAKE_INCLUDE_DIR)/VimbaCPP.mk
include $(MAKE_INCLUDE_DIR)/VimbaImageTransform.mk include $(MAKE_INCLUDE_DIR)/VimbaImageTransform.mk
include $(MAKE_INCLUDE_DIR)/Qt.mk
SOURCE_DIR = $(PROJECT_DIR)/Source SOURCE_DIR = $(PROJECT_DIR)/Source
INCLUDE_DIRS = -I$(SOURCE_DIR) \ INCLUDE_DIRS = -I$(SOURCE_DIR) \
-I$(EXAMPLES_DIR) \ -I$(EXAMPLES_DIR) \
-I$(OBJ_DIR) \ -I$(OBJ_DIR)
-I/usr/include/aarch64-linux-gnu/qt5 \
-I/usr/include/aarch64-linux-gnu/qt5/QtCore \
-I/usr/include/aarch64-linux-gnu/qt5/QtGui \
-I/usr/include/aarch64-linux-gnu/qt5/QtWidgets
OPENCV_CFLAGS = $(shell $(PKGCFG) --cflags opencv)
OPENCV_CFLAGS = $(shell $(PKGCFG) --cflags opencv4) OPENCV_LIBS = $(shell $(PKGCFG) --libs opencv)
OPENCV_LIBS = $(shell $(PKGCFG) --libs opencv4)
LIBS = $(VIMBACPP_LIBS) \ LIBS = $(VIMBACPP_LIBS) \
$(VIMBAIMAGETRANSFORM_LIBS) \ $(VIMBAIMAGETRANSFORM_LIBS) \
$(QT_LIBS) \ $(QTCORE_LIBS) \
$(QTGUI_LIBS) \
$(OPENCV_LIBS) $(OPENCV_LIBS)
DEFINES = DEFINES =
CFLAGS = $(COMMON_CFLAGS) \ CFLAGS = $(COMMON_CFLAGS) \
$(VIMBACPP_CFLAGS) \ $(VIMBACPP_CFLAGS) \
$(VIMBAIMAGETRANSFORM_CFLAGS) \ $(VIMBAIMAGETRANSFORM_CFLAGS) \
$(QT_CFLAGS) \ $(QTCORE_CFLAGS) \
$(QTGUI_CFLAGS) \
$(OPENCV_CFLAGS) $(OPENCV_CFLAGS)
OBJ_FILES = $(OBJ_DIR)/ApiController.o \ OBJ_FILES = $(OBJ_DIR)/ApiController.o \
@ -72,7 +62,9 @@ OBJ_FILES = $(OBJ_DIR)/ApiController.o \
GEN_HEADERS = $(OBJ_DIR)/ui_AsynchronousOpenCVRecorder.h GEN_HEADERS = $(OBJ_DIR)/ui_AsynchronousOpenCVRecorder.h
DEPENDENCIES = VimbaCPP \ DEPENDENCIES = VimbaCPP \
VimbaImageTransform VimbaImageTransform \
QtCore \
QtGui
$(OBJ_DIR)/moc_%.cpp: $(SOURCE_DIR)/%.h $(OBJ_DIR) $(OBJ_DIR)/moc_%.cpp: $(SOURCE_DIR)/%.h $(OBJ_DIR)
$(MOC) -o $@ $< $(MOC) -o $@ $<

View File

@ -90,18 +90,7 @@ the use of this software, even if advised of the possibility of such damage.
#include "VmbTransform.h" #include "VmbTransform.h"
#include <VimbaCPP/Include/VimbaCPP.h> #include <VimbaCPP/Include/VimbaCPP.h>
#include <opencv2/videoio.hpp> // 必须包含 videoio 以使用 VideoWriter
#include <opencv2/opencv.hpp>
// 替换原有的 enum 定义
static const int FOURCC_USER_SELECT = -1; // 替代 CV_FOURCC_PROMPT
static const int FOURCC_DEFAULT = cv::VideoWriter::fourcc('I','Y','U','V');
static const int FOURCC_MPEG1 = cv::VideoWriter::fourcc('P','I','M','1');
static const int FOURCC_MJPEG = cv::VideoWriter::fourcc('M','J','P','G');
static const int FOURCC_MPEG42 = cv::VideoWriter::fourcc('M','P','4','2');
static const int FOURCC_MPEG43 = cv::VideoWriter::fourcc('M','P','4','3');
static const int FOURCC_DIVX = cv::VideoWriter::fourcc('D','I','V','X');
static const int FOURCC_X264 = cv::VideoWriter::fourcc('X','2','6','4');
// //
// Base exception // Base exception
// //
@ -141,6 +130,17 @@ class OpenCVRecorder: public QThread
// Example FOURCC codes that can be used with the OpenCVRecorder // Example FOURCC codes that can be used with the OpenCVRecorder
// //
VmbUint32_t maxQueueElements() const { return 3; } VmbUint32_t maxQueueElements() const { return 3; }
enum
{
FOURCC_USER_SELECT = CV_FOURCC_PROMPT,
FOURCC_DEFAULT = CV_FOURCC_MACRO('I','Y','U','V'),
FOURCC_MPEG1 = CV_FOURCC_MACRO('P','I','M','1'),
FOURCC_MJPEG = CV_FOURCC_MACRO('M','J','P','G'),
FOURCC_MPEG42 = CV_FOURCC_MACRO('M','P','4','2'),
FOURCC_MPEG43 = CV_FOURCC_MACRO('M','P','4','3'),
FOURCC_DIVX = CV_FOURCC_MACRO('D','I','V','X'),
FOURCC_X264 = CV_FOURCC_MACRO('X','2','6','4'),
};
// //
// frame data temporary storage // frame data temporary storage
// //
@ -305,10 +305,10 @@ class OpenCVRecorder: public QThread
public: public:
OpenCVRecorder(const QString &fileName, VmbFloat_t fps, VmbUint32_t Width, VmbUint32_t Height) OpenCVRecorder(const QString &fileName, VmbFloat_t fps, VmbUint32_t Width, VmbUint32_t Height)
: m_StopThread( false ) : m_StopThread( false )
#ifdef _MSC_VER // Windows 下支持手动选择编码器 #ifdef _MSC_VER // codec selection only supported by Windows
, m_VideoWriter(fileName.toStdString(), FOURCC_USER_SELECT, fps, cv::Size(Width, Height), true) , m_VideoWriter(fileName.toStdString(), FOURCC_USER_SELECT, fps, cv::Size(Width,Height),true )
#else #else
, m_VideoWriter(fileName.toStdString(), FOURCC_X264, fps, cv::Size(Width, Height), true) , m_VideoWriter(fileName.toStdString(), FOURCC_X264, fps, cv::Size(Width,Height),true )
#endif #endif
, m_ConvertImage( Height, Width, CV_8UC3) , m_ConvertImage( Height, Width, CV_8UC3)
{ {
@ -398,20 +398,4 @@ public:
} }
}; };
class RTSPStreamer {
public:
RTSPStreamer(const std::string& rtsp_url, int width, int height, int fps);
~RTSPStreamer();
bool pushFrame(const cv::Mat& frame);
bool isConnected() const { return m_connected; }
private:
std::string m_rtspUrl;
AVFormatContext* m_fmtCtx = nullptr;
AVCodecContext* m_codecCtx = nullptr;
AVStream* m_stream = nullptr;
SwsContext* m_swsCtx = nullptr;
std::atomic<bool> m_connected{false};
int m_width, m_height, m_fps;
};
#endif #endif

View File

@ -1,104 +0,0 @@
RTSPStreamer::RTSPStreamer(const std::string& rtsp_url, int width, int height, int fps)
: m_rtspUrl(rtsp_url), m_width(width), m_height(height), m_fps(fps) {
// 初始化 FFmpeg
avformat_network_init();
// 创建输出上下文 (RTSP)
avformat_alloc_output_context2(&m_fmtCtx, nullptr, "rtsp", rtsp_url.c_str());
if (!m_fmtCtx) {
std::cerr << "Failed to create RTSP output context" << std::endl;
return;
}
// 查找 H.264 编码器
const AVCodec* codec = avcodec_find_encoder(AV_CODEC_ID_H264);
if (!codec) {
std::cerr << "H.264 encoder not found" << std::endl;
return;
}
// 创建编码器上下文
m_codecCtx = avcodec_alloc_context3(codec);
m_codecCtx->width = width;
m_codecCtx->height = height;
m_codecCtx->time_base = {1, fps};
m_codecCtx->pix_fmt = AV_PIX_FMT_YUV420P;
// 打开编码器
if (avcodec_open2(m_codecCtx, codec, nullptr) < 0) {
std::cerr << "Failed to open H.264 encoder" << std::endl;
return;
}
// 创建视频流
m_stream = avformat_new_stream(m_fmtCtx, codec);
avcodec_parameters_from_context(m_stream->codecpar, m_codecCtx);
// 打开 RTSP 输出
if (avio_open(&m_fmtCtx->pb, rtsp_url.c_str(), AVIO_FLAG_WRITE) < 0) {
std::cerr << "Failed to open RTSP output" << std::endl;
return;
}
// 写文件头
if (avformat_write_header(m_fmtCtx, nullptr) < 0) {
std::cerr << "Failed to write RTSP header" << std::endl;
return;
}
m_connected = true;
}
bool RTSPStreamer::pushFrame(const cv::Mat& frame) {
if (!m_connected) return false;
// 转换 BGR 到 YUV420
AVFrame* avFrame = av_frame_alloc();
avFrame->format = AV_PIX_FMT_YUV420P;
avFrame->width = m_width;
avFrame->height = m_height;
av_frame_get_buffer(avFrame, 0);
SwsContext* swsCtx = sws_getContext(
frame.cols, frame.rows, AV_PIX_FMT_BGR24,
m_width, m_height, AV_PIX_FMT_YUV420P,
SWS_BILINEAR, nullptr, nullptr, nullptr);
const uint8_t* srcData[1] = {frame.data};
int srcLinesize[1] = {frame.step};
sws_scale(swsCtx, srcData, srcLinesize, 0, frame.rows,
avFrame->data, avFrame->linesize);
sws_freeContext(swsCtx);
// 编码并发送
AVPacket pkt;
av_init_packet(&pkt);
pkt.data = nullptr;
pkt.size = 0;
if (avcodec_send_frame(m_codecCtx, avFrame) < 0) {
av_frame_free(&avFrame);
return false;
}
while (avcodec_receive_packet(m_codecCtx, &pkt) == 0) {
av_packet_rescale_ts(&pkt, m_codecCtx->time_base, m_stream->time_base);
pkt.stream_index = m_stream->index;
av_interleaved_write_frame(m_fmtCtx, &pkt);
av_packet_unref(&pkt);
}
av_frame_free(&avFrame);
return true;
}
RTSPStreamer::~RTSPStreamer() {
if (m_fmtCtx) {
av_write_trailer(m_fmtCtx);
avio_close(m_fmtCtx->pb);
avformat_free_context(m_fmtCtx);
}
if (m_codecCtx) avcodec_free_context(&m_codecCtx);
avformat_network_deinit();
}

View File

@ -18,243 +18,194 @@
#include "mk_mediakit.h" #include "mk_mediakit.h"
typedef struct typedef struct {
{
MppEncoder *encoder; MppEncoder *encoder;
mk_media media; mk_media media;
mk_pusher pusher; mk_pusher pusher;
const char *push_url; const char *push_url;
int video_type=264; int video_type = 264;
int push_rtsp_port; int push_rtsp_port;
std::string push_path_first; std::string push_path_first;
std::string push_path_second; std::string push_path_second;
} rknn_app_context_t; } rknn_app_context_t;
void release_media(mk_media *ptr) void release_media(mk_media *ptr) {
{ if (ptr && *ptr) {
if (ptr && *ptr)
{
mk_media_release(*ptr); mk_media_release(*ptr);
*ptr = NULL; *ptr = NULL;
} }
} }
void release_pusher(mk_pusher *ptr) void release_pusher(mk_pusher *ptr) {
{ if (ptr && *ptr) {
if (ptr && *ptr)
{
mk_pusher_release(*ptr); mk_pusher_release(*ptr);
*ptr = NULL; *ptr = NULL;
} }
} }
void release_track(mk_track *ptr) void release_track(mk_track *ptr) {
{ if (ptr && *ptr) {
if (ptr && *ptr)
{
mk_track_unref(*ptr); mk_track_unref(*ptr);
*ptr = NULL; *ptr = NULL;
} }
} }
int padToMultipleOf16(int number) {
if (number % 16 == 0) {
// 函数定义 return number;
int padToMultipleOf16(int number) { }
// 如果number已经是16的倍数则直接返回 int extra = 16 - (number % 16);
if (number % 16 == 0) { return number + extra;
return number;
}
// 否则计算需要添加的额外量即16 - (number % 16)
// 这等价于找到比number大的最小16的倍数并减去number
int extra = 16 - (number % 16);
// 返回扩充后的数
return number + extra;
} }
void API_CALL on_mk_push_event_func(void *user_data, int err_code, const char *err_msg) {
void API_CALL on_mk_push_event_func(void *user_data, int err_code, const char *err_msg)
{
rknn_app_context_t *ctx = (rknn_app_context_t *)user_data; rknn_app_context_t *ctx = (rknn_app_context_t *)user_data;
if (err_code == 0) if (err_code == 0) {
{
// push success
log_info("push %s success!", ctx->push_url); log_info("push %s success!", ctx->push_url);
printf("push %s success!\n", ctx->push_url); printf("push %s success!\n", ctx->push_url);
} } else {
else
{
log_warn("push %s failed:%d %s", ctx->push_url, err_code, err_msg); log_warn("push %s failed:%d %s", ctx->push_url, err_code, err_msg);
printf("push %s failed:%d %s\n", ctx->push_url, err_code, err_msg); printf("push %s failed:%d %s\n", ctx->push_url, err_code, err_msg);
release_pusher(&(ctx->pusher)); release_pusher(&(ctx->pusher));
} }
} }
void API_CALL on_mk_media_source_regist_func(void *user_data, mk_media_source sender, int regist) void API_CALL on_mk_media_source_regist_func(void *user_data, mk_media_source sender, int regist) {
{
rknn_app_context_t *ctx = (rknn_app_context_t *)user_data; rknn_app_context_t *ctx = (rknn_app_context_t *)user_data;
const char *schema = mk_media_source_get_schema(sender); const char *schema = mk_media_source_get_schema(sender);
if (strncmp(schema, ctx->push_url, strlen(schema)) == 0) if (strncmp(schema, ctx->push_url, strlen(schema)) == 0) {
{
release_pusher(&(ctx->pusher)); release_pusher(&(ctx->pusher));
if (regist) if (regist) {
{
ctx->pusher = mk_pusher_create_src(sender); ctx->pusher = mk_pusher_create_src(sender);
mk_pusher_set_on_result(ctx->pusher, on_mk_push_event_func, ctx); mk_pusher_set_on_result(ctx->pusher, on_mk_push_event_func, ctx);
mk_pusher_set_on_shutdown(ctx->pusher, on_mk_push_event_func, ctx); mk_pusher_set_on_shutdown(ctx->pusher, on_mk_push_event_func, ctx);
log_info("push started!"); log_info("push started!");
printf("push started!\n"); printf("push started!\n");
} } else {
else
{
log_info("push stoped!"); log_info("push stoped!");
printf("push stoped!\n"); printf("push stoped!\n");
} }
printf("push_url:%s\n", ctx->push_url); printf("push_url:%s\n", ctx->push_url);
} } else {
else
{
printf("unknown schema:%s\n", schema); printf("unknown schema:%s\n", schema);
} }
} }
void API_CALL on_mk_shutdown_func(void *user_data, int err_code, const char *err_msg, mk_track tracks[], int track_count) int process_camera_stream(rknn_app_context_t *ctx) {
{ // Open camera device
printf("play interrupted: %d %s", err_code, err_msg); cv::VideoCapture cap("/dev/video61");
} if (!cap.isOpened()) {
printf("Failed to open camera device: /dev/video61\n");
int process_video_file(rknn_app_context_t *ctx, const char *video_path) return -1;
{
// 读取视频
cv::VideoCapture cap(video_path);
if (!cap.isOpened())
{
printf("Failed to open video file: %s", video_path);
} }
// 使用前需要使用v4l2-ctl --device=/dev/video0 --list-formats-ext检查一下设备支持范围
cap.set(cv::CAP_PROP_FOURCC, cv::VideoWriter::fourcc('M', 'J', 'P', 'G')); // Set camera parameters based on v4l2-ctl output
// set width cap.set(cv::CAP_PROP_FOURCC, cv::VideoWriter::fourcc('Y', 'U', 'Y', 'V'));
cap.set(cv::CAP_PROP_FRAME_WIDTH, 1920); cap.set(cv::CAP_PROP_FRAME_WIDTH, 2452);
// set height cap.set(cv::CAP_PROP_FRAME_HEIGHT, 2056);
cap.set(cv::CAP_PROP_FRAME_HEIGHT, 1080); cap.set(cv::CAP_PROP_FPS, 30);
// set fps
cap.set(cv::CAP_PROP_FPS,30); // Get actual parameters
// 获取视频尺寸、帧率
int cap_width = cap.get(cv::CAP_PROP_FRAME_WIDTH); int cap_width = cap.get(cv::CAP_PROP_FRAME_WIDTH);
int cap_height = cap.get(cv::CAP_PROP_FRAME_HEIGHT); int cap_height = cap.get(cv::CAP_PROP_FRAME_HEIGHT);
int fps = cap.get(cv::CAP_PROP_FPS); int fps = cap.get(cv::CAP_PROP_FPS);
printf("Camera opened with resolution: %dx%d, FPS: %d\n", cap_width, cap_height, fps);
// RTSP stream setup
ctx->push_url = "rtsp://localhost/live/stream"; ctx->push_url = "rtsp://localhost/live/stream";
ctx->media = mk_media_create("__defaultVhost__", ctx->push_path_first.c_str(),
ctx->media = mk_media_create("__defaultVhost__", ctx->push_path_first.c_str(), ctx->push_path_second.c_str(), 0, 0, 0); ctx->push_path_second.c_str(), 0, 0, 0);
codec_args v_args = {0}; codec_args v_args = {0};
mk_track v_track = mk_track_create(MKCodecH264, &v_args); mk_track v_track = mk_track_create(MKCodecH264, &v_args);
mk_media_init_track(ctx->media, v_track); mk_media_init_track(ctx->media, v_track);
mk_media_init_complete(ctx->media); mk_media_init_complete(ctx->media);
mk_media_set_on_regist(ctx->media, on_mk_media_source_regist_func, ctx); mk_media_set_on_regist(ctx->media, on_mk_media_source_regist_func, ctx);
// 初始化编码器 // Initialize encoder
MppEncoder *mpp_encoder = new MppEncoder(); MppEncoder *mpp_encoder = new MppEncoder();
MppEncoderParams enc_params; MppEncoderParams enc_params;
memset(&enc_params, 0, sizeof(MppEncoderParams)); memset(&enc_params, 0, sizeof(MppEncoderParams));
enc_params.width = cap_width; enc_params.width = cap_width;
enc_params.height = cap_height; enc_params.height = cap_height;
enc_params.fmt = MPP_FMT_YUV420SP; enc_params.fmt = MPP_FMT_YUV420SP; // We'll convert from YUYV to YUV420SP
enc_params.type = MPP_VIDEO_CodingAVC; enc_params.type = MPP_VIDEO_CodingAVC;
mpp_encoder->Init(enc_params, ctx); mpp_encoder->Init(enc_params, ctx);
ctx->encoder = mpp_encoder; ctx->encoder = mpp_encoder;
// mpp编码配置 // Variables for frame processing
void *mpp_frame = NULL; void *mpp_frame = NULL;
int mpp_frame_fd = 0; int mpp_frame_fd = 0;
void *mpp_frame_addr = NULL; void *mpp_frame_addr = NULL;
int enc_data_size; int enc_data_size;
cv::Mat frame, yuv_frame;
int frame_index = 0;
int ret = 0;
// 画面 while (true) {
cv::Mat img; // Read frame from camera
if (!cap.read(frame)) {
while (true) printf("Failed to read frame from camera\n");
{
// 读取视频帧
cap >> img;
if (img.empty())
{
printf("Video end.");
break; break;
} }
frame_index++; if (frame.empty()) {
// 结束计时 printf("Empty frame received\n");
auto end_time = std::chrono::high_resolution_clock::now(); continue;
// 将当前时间点转换为毫秒级别的时间戳 }
auto millis = std::chrono::time_point_cast<std::chrono::milliseconds>(end_time).time_since_epoch().count();
// 获取解码后的帧 // Get current timestamp
auto millis = std::chrono::duration_cast<std::chrono::milliseconds>(
std::chrono::system_clock::now().time_since_epoch()).count();
// Get encoder input buffer
mpp_frame = ctx->encoder->GetInputFrameBuffer(); mpp_frame = ctx->encoder->GetInputFrameBuffer();
// 获取解码后的帧fd
mpp_frame_fd = ctx->encoder->GetInputFrameBufferFd(mpp_frame); mpp_frame_fd = ctx->encoder->GetInputFrameBufferFd(mpp_frame);
// 获取解码后的帧地址
mpp_frame_addr = ctx->encoder->GetInputFrameBufferAddr(mpp_frame); mpp_frame_addr = ctx->encoder->GetInputFrameBufferAddr(mpp_frame);
rga_buffer_t src = wrapbuffer_fd(mpp_frame_fd, cap_width, cap_height, RK_FORMAT_YCbCr_420_SP,padToMultipleOf16(cap_width),padToMultipleOf16(cap_height)); // Convert YUYV to YUV420SP (NV12) using RGA
rga_buffer_t src = wrapbuffer_virtualaddr((void *)frame.data,
cap_width, cap_height,
RK_FORMAT_YUYV_422);
rga_buffer_t dst = wrapbuffer_fd(mpp_frame_fd,
cap_width, cap_height,
RK_FORMAT_YCbCr_420_SP,
padToMultipleOf16(cap_width),
padToMultipleOf16(cap_height));
// Perform color space conversion
imcvtcolor(src, dst, RK_FORMAT_YUYV_422, RK_FORMAT_YCbCr_420_SP);
// Encode frame
int enc_buf_size = ctx->encoder->GetFrameSize(); int enc_buf_size = ctx->encoder->GetFrameSize();
char *enc_data = (char *)malloc(enc_buf_size); char *enc_data = (char *)malloc(enc_buf_size);
rga_buffer_t rgb_img = wrapbuffer_virtualaddr((void *)img.data, cap_width, cap_height, RK_FORMAT_BGR_888);
// 将RGB图像复制到src中
imcopy(rgb_img, src);
if (frame_index == 1)
{
enc_data_size = ctx->encoder->GetHeader(enc_data, enc_buf_size);
}
// 内存初始化
memset(enc_data, 0, enc_buf_size); memset(enc_data, 0, enc_buf_size);
enc_data_size = ctx->encoder->Encode(mpp_frame, enc_data, enc_buf_size); enc_data_size = ctx->encoder->Encode(mpp_frame, enc_data, enc_buf_size);
ret = mk_media_input_h264(ctx->media, enc_data, enc_data_size, millis, millis); // Push to RTSP
if (ret != 1) int ret = mk_media_input_h264(ctx->media, enc_data, enc_data_size, millis, millis);
{ if (ret != 1) {
printf("mk_media_input_frame failed\n"); printf("mk_media_input_frame failed\n");
} }
if (enc_data != nullptr)
{ if (enc_data != nullptr) {
free(enc_data); free(enc_data);
} }
// Small delay to maintain frame rate
usleep(1000000 / fps);
} }
// 释放资源
// Release resources
cap.release(); cap.release();
release_track(&v_track); release_track(&v_track);
release_media(&ctx->media); release_media(&ctx->media);
return 0;
} }
int main(int argc, char **argv) int main(int argc, char **argv) {
{ // Initialize media kit
int status = 0;
int ret;
if (argc != 2)
{
printf("Usage: %s<video_path>\n", argv[0]);
return -1;
}
char *stream_url = argv[1]; // 视频流地址
int video_type = 264;
// 初始化流媒体
mk_config config; mk_config config;
memset(&config, 0, sizeof(mk_config)); memset(&config, 0, sizeof(mk_config));
config.log_mask = LOG_CONSOLE; config.log_mask = LOG_CONSOLE;
@ -262,19 +213,20 @@ int main(int argc, char **argv)
mk_env_init(&config); mk_env_init(&config);
mk_rtsp_server_start(3554, 0); mk_rtsp_server_start(3554, 0);
rknn_app_context_t app_ctx; // 创建上下文 // Initialize application context
memset(&app_ctx, 0, sizeof(rknn_app_context_t)); // 初始化上下文 rknn_app_context_t app_ctx;
app_ctx.video_type = video_type; memset(&app_ctx, 0, sizeof(rknn_app_context_t));
app_ctx.push_path_first = "yunyan-live"; app_ctx.video_type = 264; // H.264
app_ctx.push_path_second = "test"; app_ctx.push_path_first = "live";
app_ctx.push_path_second = "stream";
process_video_file(&app_ctx, stream_url); // Start processing camera stream
process_camera_stream(&app_ctx);
printf("waiting finish\n"); printf("Streaming finished\n");
usleep(3 * 1000 * 1000); usleep(3 * 1000 * 1000);
if (app_ctx.encoder != nullptr) if (app_ctx.encoder != nullptr) {
{
delete (app_ctx.encoder); delete (app_ctx.encoder);
app_ctx.encoder = nullptr; app_ctx.encoder = nullptr;
} }