相机标定——单目、双目

Kelli ·
更新时间:2024-11-13
· 875 次阅读

双目标定

https://www.cnblogs.com/polly333/p/5013505.html

#include "opencv2/calib3d/calib3d.hpp" #include "opencv2/highgui/highgui.hpp" #include "opencv2/imgproc/imgproc.hpp" #include #include #include #include #include #include #include #include using namespace cv; using namespace std; static void StereoCalib(const vector& imagelist, Size boardSize, bool useCalibrated=true, bool showRectified=true) { if( imagelist.size() % 2 != 0 ) { cout << "Error: the image list contains odd (non-even) number of elements\n"; return; } bool displayCorners = false;//true; const int maxScale = 2; const float squareSize = 1.f; // Set this to your actual square size // ARRAY AND VECTOR STORAGE: //创建图像坐标和世界坐标系坐标矩阵 vector<vector > imagePoints[2]; vector<vector > objectPoints; Size imageSize; int i, j, k, nimages = (int)imagelist.size()/2; //确定左右视图矩阵的数量,比如10副图,左右矩阵分别为5个 imagePoints[0].resize(nimages); imagePoints[1].resize(nimages); vector goodImageList; for( i = j = 0; i < nimages; i++ ) { for( k = 0; k < 2; k++ ) { //逐个读取图片 const string& filename = imagelist[i*2+k]; Mat img = imread(filename, 0); if(img.empty()) break; if( imageSize == Size() ) imageSize = img.size(); else if( img.size() != imageSize ) { cout << "The image " << filename << " has the size different from the first image size. Skipping the pair\n"; break; } bool found = false; //设置图像矩阵的引用,此时指向左右视图的矩阵首地址 vector& corners = imagePoints[k][j]; for( int scale = 1; scale 1 ) { Mat cornersMat(corners); cornersMat *= 1./scale; } break; } } if( displayCorners ) { cout << filename << endl; Mat cimg, cimg1; cvtColor(img, cimg, COLOR_GRAY2BGR); drawChessboardCorners(cimg, boardSize, corners, found); double sf = 640./MAX(img.rows, img.cols); resize(cimg, cimg1, Size(), sf, sf); imshow("corners", cimg1); char c = (char)waitKey(500); if( c == 27 || c == 'q' || c == 'Q' ) //Allow ESC to quit exit(-1); } else putchar('.'); if( !found ) break; cornerSubPix(img, corners, Size(11,11), Size(-1,-1), TermCriteria(CV_TERMCRIT_ITER+CV_TERMCRIT_EPS, 30, 0.01)); } if( k == 2 ) { goodImageList.push_back(imagelist[i*2]); goodImageList.push_back(imagelist[i*2+1]); j++; } } cout << j << " pairs have been successfully detected.\n"; nimages = j; if( nimages < 2 ) { cout << "Error: too little pairs to run the calibration\n"; return; } imagePoints[0].resize(nimages); imagePoints[1].resize(nimages); // 图像点的世界坐标系 objectPoints.resize(nimages); for( i = 0; i < nimages; i++ ) { for( j = 0; j < boardSize.height; j++ ) for( k = 0; k < boardSize.width; k++ ) //直接转为float类型,坐标为行、列 objectPoints[i].push_back(Point3f(j*squareSize, k*squareSize, 0)); } cout << "Running stereo calibration ...\n"; //创建内参矩阵 Mat cameraMatrix[2], distCoeffs[2]; cameraMatrix[0] = Mat::eye(3, 3, CV_64F); cameraMatrix[1] = Mat::eye(3, 3, CV_64F); Mat R, T, E, F; //求解双目标定的参数 double rms = stereoCalibrate(objectPoints, imagePoints[0], imagePoints[1], cameraMatrix[0], distCoeffs[0], cameraMatrix[1], distCoeffs[1], imageSize, R, T, E, F, TermCriteria(CV_TERMCRIT_ITER+CV_TERMCRIT_EPS, 100, 1e-5), CV_CALIB_FIX_ASPECT_RATIO + CV_CALIB_ZERO_TANGENT_DIST + CV_CALIB_SAME_FOCAL_LENGTH + CV_CALIB_RATIONAL_MODEL + CV_CALIB_FIX_K3 + CV_CALIB_FIX_K4 + CV_CALIB_FIX_K5); cout << "done with RMS error=" << rms << endl; // CALIBRATION QUALITY CHECK // because the output fundamental matrix implicitly // includes all the output information, // we can check the quality of calibration using the // epipolar geometry constraint: m2^t*F*m1=0 //计算标定误差 double err = 0; int npoints = 0; vector lines[2]; for( i = 0; i < nimages; i++ ) { int npt = (int)imagePoints[0][i].size(); Mat imgpt[2]; for( k = 0; k < 2; k++ ) { imgpt[k] = Mat(imagePoints[k][i]); //校正图像点坐标 undistortPoints(imgpt[k], imgpt[k], cameraMatrix[k], distCoeffs[k], Mat(), cameraMatrix[k]); //求解对极线 computeCorrespondEpilines(imgpt[k], k+1, F, lines[k]); } //计算求解点与实际点的误差 for( j = 0; j < npt; j++ ) { double errij = fabs(imagePoints[0][i][j].x*lines[1][j][0] + imagePoints[0][i][j].y*lines[1][j][1] + lines[1][j][2]) + fabs(imagePoints[1][i][j].x*lines[0][j][0] + imagePoints[1][i][j].y*lines[0][j][1] + lines[0][j][2]); err += errij; } npoints += npt; } cout << "average reprojection err = " << err/npoints << endl; // save intrinsic parameters FileStorage fs("intrinsics.yml", CV_STORAGE_WRITE); if( fs.isOpened() ) { fs << "M1" << cameraMatrix[0] << "D1" << distCoeffs[0] << "M2" << cameraMatrix[1] << "D2" << distCoeffs[1]; fs.release(); } else cout << "Error: can not save the intrinsic parameters\n"; Mat R1, R2, P1, P2, Q; Rect validRoi[2]; //双目视觉校正,根据内参矩阵,两摄像机之间平移矩阵以及投射矩阵 stereoRectify(cameraMatrix[0], distCoeffs[0], cameraMatrix[1], distCoeffs[1], imageSize, R, T, R1, R2, P1, P2, Q, CALIB_ZERO_DISPARITY, 1, imageSize, &validRoi[0], &validRoi[1]); fs.open("extrinsics.yml", CV_STORAGE_WRITE); if( fs.isOpened() ) { fs << "R" << R << "T" << T << "R1" << R1 << "R2" << R2 << "P1" << P1 << "P2" << P2 << "Q" << Q; fs.release(); } else cout << "Error: can not save the intrinsic parameters\n"; // OpenCV can handle left-right // or up-down camera arrangements bool isVerticalStereo = fabs(P2.at(1, 3)) > fabs(P2.at(0, 3)); // COMPUTE AND DISPLAY RECTIFICATION if( !showRectified ) return; Mat rmap[2][2]; // IF BY CALIBRATED (CALIBRATE'S METHOD) //用标定的话,就不许计算左右相机的透射矩阵 if( useCalibrated ) { // we already computed everything } // OR ELSE HARTLEY'S METHOD else // use intrinsic parameters of each camera, but // compute the rectification transformation directly // from the fundamental matrix { vector allimgpt[2]; for( k = 0; k < 2; k++ ) { for( i = 0; i < nimages; i++ ) std::copy(imagePoints[k][i].begin(), imagePoints[k][i].end(), back_inserter(allimgpt[k])); } F = findFundamentalMat(Mat(allimgpt[0]), Mat(allimgpt[1]), FM_8POINT, 0, 0); Mat H1, H2; stereoRectifyUncalibrated(Mat(allimgpt[0]), Mat(allimgpt[1]), F, imageSize, H1, H2, 3); R1 = cameraMatrix[0].inv()*H1*cameraMatrix[0]; R2 = cameraMatrix[1].inv()*H2*cameraMatrix[1]; P1 = cameraMatrix[0]; P2 = cameraMatrix[1]; } //Precompute maps for cv::remap() //根据左右相机的投射矩阵,校正图像 initUndistortRectifyMap(cameraMatrix[0], distCoeffs[0], R1, P1, imageSize, CV_16SC2, rmap[0][0], rmap[0][1]); initUndistortRectifyMap(cameraMatrix[1], distCoeffs[1], R2, P2, imageSize, CV_16SC2, rmap[1][0], rmap[1][1]); Mat canvas; double sf; int w, h; if( !isVerticalStereo ) { sf = 600./MAX(imageSize.width, imageSize.height); w = cvRound(imageSize.width*sf); h = cvRound(imageSize.height*sf); canvas.create(h, w*2, CV_8UC3); } else { sf = 300./MAX(imageSize.width, imageSize.height); w = cvRound(imageSize.width*sf); h = cvRound(imageSize.height*sf); canvas.create(h*2, w, CV_8UC3); } for( i = 0; i < nimages; i++ ) { for( k = 0; k < 2; k++ ) { Mat img = imread(goodImageList[i*2+k], 0), rimg, cimg; remap(img, rimg, rmap[k][0], rmap[k][1], CV_INTER_LINEAR); cvtColor(rimg, cimg, COLOR_GRAY2BGR); Mat canvasPart = !isVerticalStereo ? canvas(Rect(w*k, 0, w, h)) : canvas(Rect(0, h*k, w, h)); resize(cimg, canvasPart, canvasPart.size(), 0, 0, CV_INTER_AREA); if( useCalibrated ) { Rect vroi(cvRound(validRoi[k].x*sf), cvRound(validRoi[k].y*sf), cvRound(validRoi[k].width*sf), cvRound(validRoi[k].height*sf)); rectangle(canvasPart, vroi, Scalar(0,0,255), 3, 8); } } if( !isVerticalStereo ) for( j = 0; j < canvas.rows; j += 16 ) line(canvas, Point(0, j), Point(canvas.cols, j), Scalar(0, 255, 0), 1, 8); else for( j = 0; j < canvas.cols; j += 16 ) line(canvas, Point(j, 0), Point(j, canvas.rows), Scalar(0, 255, 0), 1, 8); imshow("rectified", canvas); char c = (char)waitKey(); if( c == 27 || c == 'q' || c == 'Q' ) break; } } static bool readStringList( const string& filename, vector& l ) { l.resize(0); FileStorage fs(filename, FileStorage::READ); if( !fs.isOpened() ) return false; FileNode n = fs.getFirstTopLevelNode(); if( n.type() != FileNode::SEQ ) return false; FileNodeIterator it = n.begin(), it_end = n.end(); for( ; it != it_end; ++it ) l.push_back((string)*it); return true; } int main(int argc, char** argv) { Size boardSize; string imagelistfn; bool showRectified = true; for( int i = 1; i < argc; i++ ) { if( string(argv[i]) == "-w" ) { if( sscanf(argv[++i], "%d", &boardSize.width) != 1 || boardSize.width <= 0 ) { cout << "invalid board width" << endl; return print_help(); } } else if( string(argv[i]) == "-h" ) { if( sscanf(argv[++i], "%d", &boardSize.height) != 1 || boardSize.height <= 0 ) { cout << "invalid board height" << endl; return print_help(); } } else if( string(argv[i]) == "-nr" ) showRectified = false; else if( string(argv[i]) == "--help" ) return print_help(); else if( argv[i][0] == '-' ) { cout << "invalid option " << argv[i] << endl; return 0; } else imagelistfn = argv[i]; } if( imagelistfn == "" ) { imagelistfn = "stereo_calib.xml"; boardSize = Size(9, 6); } else if( boardSize.width <= 0 || boardSize.height <= 0 ) { cout << "if you specified XML file with chessboards, you should also specify the board width and height (-w and -h options)" << endl; return 0; } vector imagelist; bool ok = readStringList(imagelistfn, imagelist); if(!ok || imagelist.empty()) { cout << "can not open " << imagelistfn << " or the string list is empty" << endl; return print_help(); } StereoCalib(imagelist, boardSize, true, showRectified); return 0; } 单目标定

https://www.cnblogs.com/zyly/p/9366080.html

/************************************************************************************* * * Description:相机标定,张氏标定法 单目标定 * Author :JNU * Data :2018.7.22 * ************************************************************************************/ #include #include #include #include #include #include #include using namespace cv; using namespace std; void main(char *args) { //保存文件名称 std::vector filenames; //需要更改的参数 //左相机标定,指定左相机图片路径,以及标定结果保存文件 string infilename = "sample/left/filename.txt"; //如果是右相机把left改为right string outfilename = "sample/left/caliberation_result.txt"; //标定所用图片文件的路径,每一行保存一个标定图片的路径 ifstream 是从硬盘读到内存 ifstream fin(infilename); //保存标定的结果 ofstream 是从内存写到硬盘 ofstream fout(outfilename); /* 1.读取毎一幅图像,从中提取出角点,然后对角点进行亚像素精确化、获取每个角点在像素坐标系中的坐标 像素坐标系的原点位于图像的左上角 */ std::cout << "开始提取角点......" << std::endl;; //图像数量 int imageCount = 0; //图像尺寸 cv::Size imageSize; //标定板上每行每列的角点数 cv::Size boardSize = cv::Size(9, 6); //缓存每幅图像上检测到的角点 std::vector imagePointsBuf; //保存检测到的所有角点 std::vector<std::vector> imagePointsSeq; char filename[100]; if (fin.is_open()) { //读取完毕? while (!fin.eof()) { //一次读取一行 fin.getline(filename, sizeof(filename) / sizeof(char)); //保存文件名 filenames.push_back(filename); //读取图片 Mat imageInput = cv::imread(filename); //读入第一张图片时获取图宽高信息 if (imageCount == 0) { imageSize.width = imageInput.cols; imageSize.height = imageInput.rows; std::cout << "imageSize.width = " << imageSize.width << std::endl; std::cout << "imageSize.height = " << imageSize.height << std::endl; } std::cout << "imageCount = " << imageCount << std::endl; imageCount++; //提取每一张图片的角点 if (cv::findChessboardCorners(imageInput, boardSize, imagePointsBuf) == 0) { //找不到角点 std::cout << "Can not find chessboard corners!" << std::endl; exit(1); } else { Mat viewGray; //转换为灰度图片 cv::cvtColor(imageInput, viewGray, cv::COLOR_BGR2GRAY); //亚像素精确化 对粗提取的角点进行精确化 cv::find4QuadCornerSubpix(viewGray, imagePointsBuf, cv::Size(5, 5)); //保存亚像素点 imagePointsSeq.push_back(imagePointsBuf); //在图像上显示角点位置 cv::drawChessboardCorners(viewGray, boardSize, imagePointsBuf, true); //显示图片 //cv::imshow("Camera Calibration", viewGray); cv::imwrite("test.jpg", viewGray); //等待0.5s //waitKey(500); } } //计算每张图片上的角点数 54 int cornerNum = boardSize.width * boardSize.height; //角点总数 int total = imagePointsSeq.size()*cornerNum; std::cout << "total = " << total << std::endl; for (int i = 0; i < total; i++) { int num = i / cornerNum; int p = i%cornerNum; //cornerNum是每幅图片的角点个数,此判断语句是为了输出,便于调试 if (p == 0) { std::cout << "\n第 " << num+1 <: " << std::endl; } //输出所有的角点 std::cout<<p+1<<":("<< imagePointsSeq[num][p].x; std::cout << imagePointsSeq[num][p].y<<")\t"; if ((p+1) % 3 == 0) { std::cout << std::endl; } } std::cout << "角点提取完成!" << std::endl; /* 2.摄像机标定 世界坐标系原点位于标定板左上角(第一个方格的左上角) */ std::cout << "开始标定" << std::endl; //棋盘三维信息,设置棋盘在世界坐标系的坐标 //实际测量得到标定板上每个棋盘格的大小 cv::Size squareSize = cv::Size(26, 26); //毎幅图片角点数量 std::vector pointCounts; //保存标定板上角点的三维坐标 std::vector<std::vector> objectPoints; //摄像机内参数矩阵 M=[fx γ u0,0 fy v0,0 0 1] cv::Mat cameraMatrix = cv::Mat(3, 3, CV_64F, Scalar::all(0)); //摄像机的5个畸变系数k1,k2,p1,p2,k3 cv::Mat distCoeffs = cv::Mat(1, 5, CV_64F, Scalar::all(0)); //每幅图片的旋转向量 std::vector tvecsMat; //每幅图片的平移向量 std::vector rvecsMat; //初始化标定板上角点的三维坐标 int i, j, t; for (t = 0; t < imageCount; t++) { std::vector tempPointSet; //行数 for (i = 0; i < boardSize.height; i++) { //列数 for (j = 0; j < boardSize.width; j++) { cv::Point3f realPoint; //假设标定板放在世界坐标系中z=0的平面上。 realPoint.x = i*squareSize.width; realPoint.y = j*squareSize.height; realPoint.z = 0; tempPointSet.push_back(realPoint); } } objectPoints.push_back(tempPointSet); } //初始化每幅图像中的角点数量,假定每幅图像中都可以看到完整的标定板 for (i = 0; i < imageCount; i++) { pointCounts.push_back(boardSize.width*boardSize.height); } //开始标定 cv::calibrateCamera(objectPoints, imagePointsSeq, imageSize, cameraMatrix, distCoeffs, rvecsMat, tvecsMat); std::cout << "标定完成" << std::endl; //对标定结果进行评价 std::cout << "开始评价标定结果......" << std::endl; //所有图像的平均误差的总和 double totalErr = 0.0; //每幅图像的平均误差 double err = 0.0; //保存重新计算得到的投影点 std::vector imagePoints2; std::cout << "每幅图像的标定误差:" << std::endl; fout << "每幅图像的标定误差:" << std::endl; for (i = 0; i < imageCount; i++) { std::vector tempPointSet = objectPoints[i]; //通过得到的摄像机内外参数,对空间的三维点进行重新投影计算,得到新的投影点imagePoints2(在像素坐标系下的点坐标) cv::projectPoints(tempPointSet, rvecsMat[i], tvecsMat[i], cameraMatrix, distCoeffs, imagePoints2); //计算新的投影点和旧的投影点之间的误差 std::vector tempImagePoint = imagePointsSeq[i]; cv::Mat tempImagePointMat = cv::Mat(1, tempImagePoint.size(), CV_32FC2); cv::Mat imagePoints2Mat = cv::Mat(1, imagePoints2.size(), CV_32FC2); for (int j = 0; j < tempImagePoint.size(); j++) { imagePoints2Mat.at(0, j) = cv::Vec2f(imagePoints2[j].x, imagePoints2[j].y); tempImagePointMat.at(0, j) = cv::Vec2f(tempImagePoint[j].x, tempImagePoint[j].y); } //Calculates an absolute difference norm or a relative difference norm. err = cv::norm(imagePoints2Mat, tempImagePointMat, NORM_L2); totalErr += err /= pointCounts[i]; std::cout << " 第" << i + 1 << "幅图像的平均误差:" << err << "像素" << endl; fout<< "第" << i + 1 << "幅图像的平均误差:" << err << "像素" << endl; } //每张图像的平均总误差 std::cout << " 总体平均误差:" << totalErr / imageCount << "像素" << std::endl; fout << "总体平均误差:" << totalErr / imageCount << "像素" << std::endl; std::cout << "评价完成!" << std::endl; //保存标定结果 std::cout << "开始保存标定结果....." << std::endl; //保存每张图像的旋转矩阵 cv::Mat rotationMatrix = cv::Mat(3, 3, CV_32FC1, Scalar::all(0)); fout << "相机内参数矩阵:" << std::endl; fout << cameraMatrix << std::endl << std::endl; fout << "畸变系数:" << std::endl; fout << distCoeffs << std::endl << std::endl; for (int i = 0; i < imageCount; i++) { fout << "第" << i + 1 << "幅图像的旋转向量:" << std::endl; fout << tvecsMat[i] << std::endl; //将旋转向量转换为相对应的旋转矩阵 cv::Rodrigues(tvecsMat[i], rotationMatrix); fout << "第" << i + 1 << "幅图像的旋转矩阵:" << std::endl; fout << rotationMatrix << std::endl; fout << "第" << i + 1 << "幅图像的平移向量:" << std::endl; fout << rvecsMat[i] << std::endl; } std::cout << "保存完成" << std::endl; /************************************************************************ 显示定标结果 *************************************************************************/ cv::Mat mapx = cv::Mat(imageSize, CV_32FC1); cv::Mat mapy = cv::Mat(imageSize, CV_32FC1); cv::Mat R = cv::Mat::eye(3, 3, CV_32F); std::cout << "显示矫正图像" << endl; for (int i = 0; i != imageCount; i++) { std::cout << "Frame #" << i + 1 << "..." << endl; //计算图片畸变矫正的映射矩阵mapx、mapy(不进行立体校正、立体校正需要使用双摄) initUndistortRectifyMap(cameraMatrix, distCoeffs, R, cameraMatrix, imageSize, CV_32FC1, mapx, mapy); //读取一张图片 Mat imageSource = imread(filenames[i]); Mat newimage = imageSource.clone(); //另一种不需要转换矩阵的方式 //undistort(imageSource,newimage,cameraMatrix,distCoeffs); //进行校正 remap(imageSource, newimage, mapx, mapy, INTER_LINEAR); imshow("原始图像", imageSource); imshow("矫正后图像", newimage); waitKey(); } //释放资源 fin.close(); fout.close(); system("pause"); } }
作者:雪天枫



相机标定 相机

需要 登录 后方可回复, 如果你还没有账号请 注册新账号
相关文章