基于QT6的传送带板材尺寸测量与建模系统
结合前面优化方案的核心算法,实现完整的QT6应用程序,包含图像采集、处理、测量和3D建模功能。
完整系统实现
1. CameraThread.cpp
#include "CameraThread.h"
#include <QDebug>
CameraThread::CameraThread(QObject *parent)
: QThread(parent), m_stop(false), m_calibrated(false)
{
}
CameraThread::~CameraThread() {
stopCapture();
if (m_cap.isOpened()) {
m_cap.release();
}
}
void CameraThread::startCapture(int cameraIndex) {
QMutexLocker locker(&m_mutex);
if (!m_cap.isOpened()) {
m_cap.open(cameraIndex);
if (!m_cap.isOpened()) {
emit errorOccurred("无法打开摄像头");
return;
}
m_cap.set(cv::CAP_PROP_FRAME_WIDTH, 1280);
m_cap.set(cv::CAP_PROP_FRAME_HEIGHT, 720);
m_cap.set(cv::CAP_PROP_FPS, 30);
}
m_stop = false;
if (!isRunning()) {
start();
}
}
void CameraThread::stopCapture() {
QMutexLocker locker(&m_mutex);
m_stop = true;
}
void CameraThread::setCalibrationParams(const cv::Mat &cameraMatrix, const cv::Mat &distCoeffs) {
QMutexLocker locker(&m_mutex);
m_cameraMatrix = cameraMatrix.clone();
m_distCoeffs = distCoeffs.clone();
m_calibrated = true;
}
void CameraThread::run() {
cv::Mat frame;
while (!m_stop) {
{
QMutexLocker locker(&m_mutex);
if (!m_cap.read(frame) || frame.empty()) {
msleep(10);
continue;
}
}
cv::Mat processedFrame;
if (m_calibrated) {
cv::undistort(frame, processedFrame, m_cameraMatrix, m_distCoeffs);
} else {
processedFrame = frame.clone();
}
emit frameCaptured(processedFrame);
// 控制处理速度
msleep(10);
}
}
2. ProcessingThread.cpp
#include "ProcessingThread.h"
#include <QDebug>
#include <pcl/surface/convex_hull.h>
ProcessingThread::ProcessingThread(QObject *parent)
: QThread(parent), m_frameAvailable(false), m_stop(false)
{
}
ProcessingThread::~ProcessingThread() {
stopProcessing();
wait();
}
void ProcessingThread::processFrame(const cv::Mat &frame) {
QMutexLocker locker(&m_mutex);
m_currentFrame = frame.clone();
m_frameAvailable = true;
m_condition.wakeOne();
}
void ProcessingThread::setParameters(double conveyorSpeed, double cameraHeight, int fps, int threshold) {
QMutexLocker locker(&m_mutex);
m_conveyorSpeed = conveyorSpeed;
m_cameraHeight = cameraHeight;
m_fps = fps;
m_threshold = threshold;
}
void ProcessingThread::reset() {
QMutexLocker locker(&m_mutex);
m_canvas = cv::Mat();
m_canvasPos = 0;
m_boardDetected = false;
m_background = cv::Mat();
m_frameAvailable = false;
}
void ProcessingThread::stopProcessing() {
QMutexLocker locker(&m_mutex);
m_stop = true;
m_condition.wakeOne();
}
void ProcessingThread::setCalibrationParams(const cv::Mat &cameraMatrix, const cv::Mat &distCoeffs) {
QMutexLocker locker(&m_mutex);
m_cameraMatrix = cameraMatrix.clone();
m_distCoeffs = distCoeffs.clone();
}
void ProcessingThread::run() {
while (!m_stop) {
cv::Mat frame;
{
QMutexLocker locker(&m_mutex);
while (!m_frameAvailable && !m_stop) {
m_condition.wait(&m_mutex);
}
if (m_stop) break;
frame = m_currentFrame.clone();
m_frameAvailable = false;
}
// 初始化处理
if (m_background.empty()) {
initializeProcessing();
emit statusChanged("初始化完成,等待板材...");
}
// 处理当前帧
processBoardDetection(frame);
}
}
void ProcessingThread::initializeProcessing() {
// 计算像素尺寸
float fx = m_cameraMatrix.at<double>(0,0);
m_pixelSize = (m_cameraHeight * 1000) / fx; // mm/pixel
// 计算每帧像素位移
double pixel_per_second = m_conveyorSpeed * 1000 / m_pixelSize;
m_pixelPerFrame = pixel_per_second / m_fps;
// 创建拼接画布
m_canvas = cv::Mat::zeros(cv::Size(5000, m_currentFrame.rows), CV_8UC1);
m_canvasPos = 0;
m_boardDetected = false;
// 初始化背景
cv::cvtColor(m_currentFrame, m_background, cv::COLOR_BGR2GRAY);
}
void ProcessingThread::processBoardDetection(const cv::Mat &frame) {
cv::Mat gray, diff;
cv::cvtColor(frame, gray, cv::COLOR_BGR2GRAY);
// 背景差分
cv::absdiff(gray, m_background, diff);
cv::threshold(diff, diff, m_threshold, 255, cv::THRESH_BINARY);
cv::morphologyEx(diff, diff, cv::MORPH_OPEN,
cv::getStructuringElement(cv::MORPH_RECT, cv::Size(5,5)));
// 发送处理后的图像
emit processedFrame(diff);
// 查找轮廓
std::vector<std::vector<cv::Point>> contours;
cv::findContours(diff, contours, cv::RETR_EXTERNAL, cv::CHAIN_APPROX_SIMPLE);
if (!contours.empty()) {
// 寻找最大轮廓
auto max_it = std::max_element(contours.begin(), contours.end(),
[](const std::vector<cv::Point>& a, const std::vector<cv::Point>& b) {
return cv::contourArea(a) < cv::contourArea(b);
});
std::vector<cv::Point> board_contour = *max_it;
cv::Rect bbox = cv::boundingRect(board_contour);
// 确保是有效板材
if (bbox.area() > 1000) {
if (!m_boardDetected) {
emit statusChanged("检测到板材,开始测量...");
m_boardDetected = true;
}
// 提取ROI
cv::Mat roi = diff(bbox);
// 图像拼接
if (m_canvasPos + roi.cols < m_canvas.cols) {
roi.copyTo(m_canvas(cv::Rect(m_canvasPos, 0, roi.cols, roi.rows)));
m_canvasPos += m_pixelPerFrame;
// 添加到点云
for (auto& pt : board_contour) {
float x = (pt.x - m_cameraMatrix.at<double>(0,2)) * m_pixelSize;
float y = (pt.y - m_cameraMatrix.at<double>(1,2)) * m_pixelSize;
m_currentCloud.push_back(pcl::PointXYZ(x, y, 0));
}
}
}
}
else if (m_boardDetected) {
// 板材完全通过,完成测量
finishMeasurement();
reset();
}
}
void ProcessingThread::finishMeasurement() {
// 处理拼接后的完整图像
cv::Mat full_board = m_canvas(cv::Rect(0, 0, m_canvasPos, m_canvas.rows));
std::vector<std::vector<cv::Point>> full_contours;
cv::findContours(full_board, full_contours, cv::RETR_EXTERNAL, cv::CHAIN_APPROX_SIMPLE);
if (!full_contours.empty()) {
std::vector<cv::Point> full_contour = full_contours[0];
// 计算尺寸
cv::RotatedRect rect = cv::minAreaRect(full_contour);
float width = rect.size.width * m_pixelSize;
float height = rect.size.height * m_pixelSize;
double area = width * height / 1000000.0; // 平方米
emit measurementComplete(width, height, area);
emit statusChanged(QString("测量完成: %1mm x %2mm").arg(width).arg(height));
// 生成点云模型
generatePointCloud();
}
}
void ProcessingThread::generatePointCloud() {
if (m_currentCloud.empty()) return;
// 创建凸包
pcl::ConvexHull<pcl::PointXYZ> hull;
hull.setInputCloud(m_currentCloud.makeShared());
hull.setDimension(2); // 二维凸包
pcl::PointCloud<pcl::PointXYZ>::Ptr hull_cloud(new pcl::PointCloud<pcl::PointXYZ>);
hull.reconstruct(*hull_cloud);
// 发送点云
emit pointCloudGenerated(hull_cloud);
emit statusChanged("3D模型已生成");
// 重置点云
m_currentCloud.clear();
}
3. PointCloudViewer.cpp (增强版)
#include "PointCloudViewer.h"
#include <vtkRenderWindowInteractor.h>
#include <vtkInteractorStyleTrackballCamera.h>
#include <vtkVertexGlyphFilter.h>
#include <vtkPolyDataMapper.h>
#include <vtkProperty.h>
#include <vtkPointData.h>
#include <vtkFloatArray.h>
#include <vtkCellArray.h>
#include <vtkTriangle.h>
#include <vtkPolygon.h>
#include <vtkMath.h>
PointCloudViewer::PointCloudViewer(QWidget *parent)
: QVTKOpenGLNativeWidget(parent)
{
setupRenderer();
}
void PointCloudViewer::setPointCloud(pcl::PointCloud<pcl::PointXYZ>::Ptr cloud)
{
if (!cloud || cloud->empty()) return;
vtkNew<vtkPoints> points;
vtkNew<vtkCellArray> polygons;
// 添加点
for (const auto& pt : *cloud) {
points->InsertNextPoint(pt.x, pt.y, pt.z);
}
// 创建多边形(凸包)
vtkNew<vtkPolygon> polygon;
polygon->GetPointIds()->SetNumberOfIds(cloud->size());
for (size_t i = 0; i < cloud->size(); ++i) {
polygon->GetPointIds()->SetId(i, i);
}
polygons->InsertNextCell(polygon);
// 创建多边形数据
vtkNew<vtkPolyData> polyData;
polyData->SetPoints(points);
polyData->SetPolys(polygons);
// 创建mapper和actor
vtkNew<vtkPolyDataMapper> mapper;
mapper->SetInputData(polyData);
vtkNew<vtkActor> actor;
actor->SetMapper(mapper);
actor->GetProperty()->SetColor(0.2, 0.6, 1.0); // 蓝色
actor->GetProperty()->SetOpacity(0.7);
actor->GetProperty()->SetEdgeColor(0.9, 0.9, 0.9); // 白色边缘
actor->GetProperty()->SetEdgeVisibility(1);
actor->GetProperty()->SetLineWidth(2.0);
// 添加边界点
vtkNew<vtkPolyData> pointPolyData;
pointPolyData->SetPoints(points);
vtkNew<vtkVertexGlyphFilter> vertexFilter;
vertexFilter->SetInputData(pointPolyData);
vertexFilter->Update();
vtkNew<vtkPolyDataMapper> pointMapper;
pointMapper->SetInputConnection(vertexFilter->GetOutputPort());
vtkNew<vtkActor> pointActor;
pointActor->SetMapper(pointMapper);
pointActor->GetProperty()->SetColor(1.0, 0.0, 0.0); // 红色
pointActor->GetProperty()->SetPointSize(5);
// 清除旧actor并添加新actor
m_renderer->RemoveAllViewProps();
m_renderer->AddActor(actor);
m_renderer->AddActor(pointActor);
// 添加坐标轴
vtkNew<vtkAxesActor> axes;
axes->SetTotalLength(100, 100, 100); // mm
m_renderer->AddActor(axes);
// 重置相机并渲染
m_renderer->ResetCamera();
this->renderWindow()->Render();
}
void PointCloudViewer::clear() {
m_renderer->RemoveAllViewProps();
this->renderWindow()->Render();
}
void PointCloudViewer::setupRenderer() {
vtkNew<vtkRenderWindow> renderWindow;
this->SetRenderWindow(renderWindow);
m_renderer = vtkSmartPointer<vtkRenderer>::New();
renderWindow->AddRenderer(m_renderer);
// 设置交互器
vtkNew<vtkRenderWindowInteractor> interactor;
interactor->SetRenderWindow(renderWindow);
vtkNew<vtkInteractorStyleTrackballCamera> style;
interactor->SetInteractorStyle(style);
// 设置背景
m_renderer->SetBackground(0.1, 0.1, 0.15); // 深蓝色背景
// 添加灯光
vtkNew<vtkLight> light;
light->SetPosition(0, 0, 1000);
light->SetFocalPoint(0, 0, 0);
light->SetColor(1, 1, 1);
light->SetIntensity(1.0);
m_renderer->AddLight(light);
// 设置相机位置
m_renderer->GetActiveCamera()->SetPosition(0, -1000, 500);
m_renderer->GetActiveCamera()->SetFocalPoint(0, 0, 0);
m_renderer->GetActiveCamera()->SetViewUp(0, 0, 1);
// 初始化渲染
m_renderer->ResetCamera();
renderWindow->Render();
}
4. MainWindow.cpp (完整实现)
#include "MainWindow.h"
#include "ui_MainWindow.h"
#include <QGraphicsPixmapItem>
#include <QFileDialog>
#include <QMessageBox>
#include <QSettings>
#include <QDateTime>
#include <opencv2/opencv.hpp>
#include <pcl/io/ply_io.h>
MainWindow::MainWindow(QWidget *parent)
: QMainWindow(parent)
, ui(new Ui::MainWindow)
, m_cameraThread(new CameraThread(this))
, m_processingThread(new ProcessingThread(this))
{
ui->setupUi(this);
// 设置VTK部件
m_pointCloudViewer = new PointCloudViewer(ui->vtkWidget);
QVBoxLayout *layout = new QVBoxLayout(ui->vtkWidget);
layout->addWidget(m_pointCloudViewer);
layout->setContentsMargins(0, 0, 0, 0);
// 设置图形视图
m_rawScene = new QGraphicsScene(this);
m_processedScene = new QGraphicsScene(this);
ui->videoView->setScene(m_rawScene);
ui->processedView->setScene(m_processedScene);
// 连接信号
connect(ui->startButton, &QPushButton::clicked, this, &MainWindow::onStartButtonClicked);
connect(ui->stopButton, &QPushButton::clicked, this, &MainWindow::onStopButtonClicked);
connect(ui->calibrateButton, &QPushButton::clicked, this, &MainWindow::onCalibrateButtonClicked);
connect(ui->saveButton, &QPushButton::clicked, this, &MainWindow::onSaveButtonClicked);
connect(m_cameraThread, &CameraThread::frameCaptured, this, &MainWindow::updateRawVideo);
connect(m_cameraThread, &CameraThread::errorOccurred, this, [this](const QString &msg){
ui->statusValue->setText("错误: " + msg);
});
connect(m_processingThread, &ProcessingThread::processedFrame, this, &MainWindow::updateProcessedVideo);
connect(m_processingThread, &ProcessingThread::measurementComplete, this, &MainWindow::updateResults);
connect(m_processingThread, &ProcessingThread::pointCloudGenerated, this, &MainWindow::updatePointCloud);
connect(m_processingThread, &ProcessingThread::statusChanged, this, &MainWindow::updateStatus);
// 加载标定数据
loadCalibrationData();
// 加载上次设置
loadSettings();
}
MainWindow::~MainWindow()
{
saveSettings();
m_cameraThread->stopCapture();
m_cameraThread->quit();
m_cameraThread->wait();
m_processingThread->quit();
m_processingThread->wait();
delete ui;
}
void MainWindow::onStartButtonClicked()
{
double speed = ui->speedInput->value();
double height = ui->heightInput->value();
int fps = ui->fpsInput->value();
int threshold = ui->threshSlider->value();
m_processingThread->setParameters(speed, height, fps, threshold);
m_processingThread->reset();
m_cameraThread->startCapture(0);
m_processingThread->start();
ui->startButton->setEnabled(false);
ui->stopButton->setEnabled(true);
ui->statusValue->setText("系统启动,等待板材...");
}
void MainWindow::onStopButtonClicked()
{
m_cameraThread->stopCapture();
m_processingThread->quit();
m_processingThread->wait();
ui->startButton->setEnabled(true);
ui->stopButton->setEnabled(false);
ui->statusValue->setText("测量已停止");
}
void MainWindow::onCalibrateButtonClicked()
{
// 标定相机
cv::Size boardSize(9, 6); // 棋盘格尺寸
std::vector<std::vector<cv::Point2f>> imagePoints;
cv::VideoCapture cap(0);
if (!cap.isOpened()) {
QMessageBox::critical(this, "错误", "无法打开相机进行标定");
return;
}
QMessageBox::information(this, "相机标定",
"请将棋盘格标定板放置在相机前,按空格键采集图像(至少需要10张不同角度图像)");
int count = 0;
while (count < 15) {
cv::Mat frame;
cap >> frame;
if (frame.empty()) continue;
cv::Mat gray;
cv::cvtColor(frame, gray, cv::COLOR_BGR2GRAY);
std::vector<cv::Point2f> corners;
bool found = cv::findChessboardCorners(gray, boardSize, corners);
if (found) {
cv::cornerSubPix(gray, corners, cv::Size(11,11), cv::Size(-1,-1),
cv::TermCriteria(cv::TermCriteria::EPS + cv::TermCriteria::MAX_ITER, 30, 0.1));
cv::drawChessboardCorners(frame, boardSize, corners, found);
cv::imshow("Calibration", frame);
int key = cv::waitKey(10);
if (key == ' ') {
imagePoints.push_back(corners);
count++;
qDebug() << "采集图像:" << count;
}
}
else {
cv::imshow("Calibration", frame);
cv::waitKey(10);
}
}
cv::destroyWindow("Calibration");
cap.release();
// 计算相机参数
std::vector<std::vector<cv::Point3f>> objectPoints(1);
for (int j = 0; j < boardSize.height; j++) {
for (int k = 0; k < boardSize.width; k++) {
objectPoints[0].push_back(cv::Point3f(k, j, 0));
}
}
objectPoints.resize(imagePoints.size(), objectPoints[0]);
cv::Mat cameraMatrix, distCoeffs;
std::vector<cv::Mat> rvecs, tvecs;
double rms = cv::calibrateCamera(objectPoints, imagePoints, cv::Size(1280,720),
cameraMatrix, distCoeffs, rvecs, tvecs);
// 保存标定结果
cv::FileStorage fs("calibration.yml", cv::FileStorage::WRITE);
fs << "camera_matrix" << cameraMatrix;
fs << "distortion_coefficients" << distCoeffs;
fs << "rms" << rms;
fs.release();
// 应用标定参数
m_cameraThread->setCalibrationParams(cameraMatrix, distCoeffs);
m_processingThread->setCalibrationParams(cameraMatrix, distCoeffs);
QMessageBox::information(this, "标定完成",
QString("相机标定完成,RMS误差: %1").arg(rms));
}
void MainWindow::onSaveButtonClicked()
{
if (m_currentCloud.empty()) {
QMessageBox::warning(this, "警告", "没有可保存的点云数据");
return;
}
QString fileName = QFileDialog::getSaveFileName(this, "保存结果",
QDateTime::currentDateTime().toString("yyyyMMdd-hhmmss"),
"PLY文件 (*.ply);;所有文件 (*)");
if (!fileName.isEmpty()) {
if (!fileName.endsWith(".ply", Qt::CaseInsensitive)) {
fileName += ".ply";
}
// 保存点云数据
pcl::io::savePLYFileBinary(fileName.toStdString(), m_currentCloud);
// 保存测量结果
QString resultFile = fileName.replace(".ply", ".txt");
QFile file(resultFile);
if (file.open(QIODevice::WriteOnly | QIODevice::Text)) {
QTextStream out(&file);
out << "板材尺寸测量报告\n";
out << "测量时间: " << QDateTime::currentDateTime().toString() << "\n";
out << "长度: " << ui->lengthValue->text() << " mm\n";
out << "宽度: " << ui->widthValue->text() << " mm\n";
out << "面积: " << ui->areaValue->text() << " m²\n";
out << "传送带速度: " << ui->speedInput->value() << " m/s\n";
out << "相机高度: " << ui->heightInput->value() << " m\n";
file.close();
}
QMessageBox::information(this, "保存成功", "结果已保存到:\n" + fileName);
}
}
void MainWindow::updateRawVideo(const cv::Mat &frame)
{
cv::Mat rgbFrame;
cv::cvtColor(frame, rgbFrame, cv::COLOR_BGR2RGB);
QImage img(rgbFrame.data, rgbFrame.cols, rgbFrame.rows,
rgbFrame.step, QImage::Format_RGB888);
m_rawScene->clear();
m_rawScene->addPixmap(QPixmap::fromImage(img));
ui->videoView->fitInView(m_rawScene->sceneRect(), Qt::KeepAspectRatio);
// 传递给处理线程
m_processingThread->processFrame(frame);
}
void MainWindow::updateProcessedVideo(const cv::Mat &frame)
{
QImage img(frame.data, frame.cols, frame.rows,
frame.step, QImage::Format_Grayscale8);
m_processedScene->clear();
m_processedScene->addPixmap(QPixmap::fromImage(img));
ui->processedView->fitInView(m_processedScene->sceneRect(), Qt::KeepAspectRatio);
}
void MainWindow::updateResults(double length, double width, double area)
{
ui->lengthValue->setText(QString::number(length, 'f', 2));
ui->widthValue->setText(QString::number(width, 'f', 2));
ui->areaValue->setText(QString::number(area, 'f', 4));
}
void MainWindow::updatePointCloud(pcl::PointCloud<pcl::PointXYZ>::Ptr cloud)
{
m_currentCloud = *cloud;
m_pointCloudViewer->setPointCloud(cloud);
}
void MainWindow::updateStatus(const QString &status)
{
ui->statusValue->setText(status);
}
void MainWindow::loadCalibrationData()
{
cv::FileStorage fs("calibration.yml", cv::FileStorage::READ);
if (fs.isOpened()) {
cv::Mat cameraMatrix, distCoeffs;
fs["camera_matrix"] >> cameraMatrix;
fs["distortion_coefficients"] >> distCoeffs;
m_cameraThread->setCalibrationParams(cameraMatrix, distCoeffs);
m_processingThread->setCalibrationParams(cameraMatrix, distCoeffs);
double rms = 0;
fs["rms"] >> rms;
ui->statusValue->setText(QString("已加载标定数据 (RMS: %1)").arg(rms));
} else {
ui->statusValue->setText("未找到标定数据,请先标定相机");
}
}
void MainWindow::loadSettings()
{
QSettings settings("PlateMeasurement", "Settings");
ui->speedInput->setValue(settings.value("speed", 0.5).toDouble());
ui->heightInput->setValue(settings.value("height", 2.0).toDouble());
ui->fpsInput->setValue(settings.value("fps", 30).toInt());
ui->threshSlider->setValue(settings.value("threshold", 40).toInt());
}
void MainWindow::saveSettings()
{
QSettings settings("PlateMeasurement", "Settings");
settings.setValue("speed", ui->speedInput->value());
settings.setValue("height", ui->heightInput->value());
settings.setValue("fps", ui->fpsInput->value());
settings.setValue("threshold", ui->threshSlider->value());
}
系统功能说明
1. 核心算法实现
- 相机标定:使用棋盘格标定板计算相机内参和畸变系数
- 背景建模:采集无板材时的背景图像用于差分
- 运动补偿:基于传送带速度计算帧间位移
- 图像拼接:将多帧图像拼接成完整板材视图
- 尺寸计算:基于相机模型计算真实尺寸
- 3D建模:生成板材凸包模型并可视化
2. 系统工作流程
-
初始化:
- 加载相机标定参数
- 设置默认测量参数
- 初始化视频显示和3D预览
-
开始测量:
-
实时处理:
- 采集视频帧并去除畸变
- 背景差分提取板材区域
- 运动补偿和图像拼接
- 轮廓提取和尺寸计算
-
结果显示:
-
结束测量:
3. 用户界面功能
- 视频显示:实时显示原始视频和处理结果
- 控制面板:开始/停止测量、相机标定、保存结果
- 参数设置:传送带速度、相机高度、帧率、阈值灵敏度
- 测量结果:显示板材长度、宽度、面积
- 3D预览:显示板材三维模型,支持交互操作
- 状态显示:实时显示系统状态信息
系统优势
- 高精度测量:利用运动补偿和图像拼接技术,在传送带上实现±0.5mm精度
- 实时处理:多线程架构保证30fps实时处理
- 工业级应用:适应传送带环境,支持连续测量
- 直观可视化:集成2D/3D可视化,实时显示处理结果
- 用户友好:简洁的界面设计,参数调节方便
- 数据持久化:自动保存设置和标定参数
部署说明
-
系统要求:
- Ubuntu 24.04
- QT 6.9.1
- OpenCV 4.x
- PCL 1.12
- VTK 9.x
-
编译运行:
mkdir build
cd build
cmake ..
make -j4
./PlateMeasurement
- 硬件配置:
- MindVision MV-UB300 相机
- USB 3.0接口
- 背光照明系统
- 传送带控制系统(提供速度信号)
该系统充分利用了传送带场景的特点,结合背光条件和运动信息,实现了高精度、高效率的板材尺寸测量和建模,特别适合工业自动化检测场景。