K.Hartmann
12.03.2010, 19:16
/* This is sample from the OpenCV book. The copyright notice is below */
/* *************** License:**************************
Oct. 3, 2008
Right to use this code in any way you want without warrenty, support or any guarentee of it working.
BOOK: It would be nice if you cited it:
Learning OpenCV: Computer Vision with the OpenCV Library
by Gary Bradski and Adrian Kaehler
Published by O'Reilly Media, October 3, 2008
AVAILABLE AT:
http://www.amazon.com/Learning-OpenCV-Computer-Vision-Library/dp/0596516134
Or: http://oreilly.com/catalog/9780596516130/
ISBN-10: 0596516134 or: ISBN-13: 978-0596516130
OTHER OPENCV SITES:
* The source code is on sourceforge at:
http://sourceforge.net/projects/opencvlibrary/
* The OpenCV wiki page (As of Oct 1, 2008 this is down for changing over servers, but should come back):
http://opencvlibrary.sourceforge.net/
* An active user group is at:
http://tech.groups.yahoo.com/group/OpenCV/
* The minutes of weekly OpenCV development meetings are at:
http://pr.willowgarage.com/wiki/OpenCV
************************************************** */
#include "cv.h"
#include "cxmisc.h"
#include "highgui.h"
#include <vector>
#include <string>
#include <algorithm>
#include <stdio.h>
#include <ctype.h>
using namespace std;
//
// Given a list of chessboard images, the number of corners (nx, ny)
// on the chessboards, and a flag: useCalibrated for calibrated (0) or
// uncalibrated (1: use cvStereoCalibrate(), 2: compute fundamental
// matrix separately) stereo. Calibrate the cameras and display the
// rectified results along with the computed disparity images.
//
static void
StereoCalib(const char* imageList, int useUncalibrated)
{
int nx = 0, ny = 0;
int displayCorners = 0;
int showUndistorted = 1;
bool isVerticalStereo = false;//OpenCV can handle left-right
//or up-down camera arrangements
const int maxScale = 1;
const float squareSize = 1.f; //Set this to your actual square size
FILE* f = fopen(imageList, "rt");
int i, j, lr, nframes, n, N = 0;
vector<string> imageNames[2];
vector<CvPoint3D32f> objectPoints;
vector<CvPoint2D32f> points[2];
vector<int> npoints;
vector<uchar> active[2];
vector<CvPoint2D32f> temp;
CvSize imageSize = {0,0};
// ARRAY AND VECTOR STORAGE:
double M1[3][3], M2[3][3], D1[5], D2[5];
double R[3][3], T[3], E[3][3], F[3][3];
CvMat _M1 = cvMat(3, 3, CV_64F, M1 );
CvMat _M2 = cvMat(3, 3, CV_64F, M2 );
CvMat _D1 = cvMat(1, 5, CV_64F, D1 );
CvMat _D2 = cvMat(1, 5, CV_64F, D2 );
CvMat _R = cvMat(3, 3, CV_64F, R );
CvMat _T = cvMat(3, 1, CV_64F, T );
CvMat _E = cvMat(3, 3, CV_64F, E );
CvMat _F = cvMat(3, 3, CV_64F, F );
char buf[1024];
if( displayCorners )
cvNamedWindow( "corners", 1 );
// READ IN THE LIST OF CHESSBOARDS:
if( !f )
{
fprintf(stderr, "can not open file %s\n", imageList );
return;
}
if( !fgets(buf, sizeof(buf)-3, f) || sscanf(buf, "%d%d", &nx, &ny) != 2 )
return;
n = nx*ny;
temp.resize(n);
for(i=0;;i++)
{
int count = 0, result=0;
lr = i % 2;
vector<CvPoint2D32f>& pts = points[lr];
if( !fgets( buf, sizeof(buf)-3, f ))
break;
size_t len = strlen(buf);
while( len > 0 && isspace(buf[len-1]))
buf[--len] = '\0';
if( buf[0] == '#')
continue;
IplImage* img = cvLoadImage( buf, 0 );
if( !img )
break;
imageSize = cvGetSize(img);
imageNames[lr].push_back(buf);
//FIND CHESSBOARDS AND CORNERS THEREIN:
for( int s = 1; s <= maxScale; s++ )
{
IplImage* timg = img;
if( s > 1 )
{
timg = cvCreateImage(cvSize(img->width*s,img->height*s),
img->depth, img->nChannels );
cvResize( img, timg, CV_INTER_CUBIC );
}
result = cvFindChessboardCorners( timg, cvSize(nx, ny),
&temp[0], &count,
CV_CALIB_CB_ADAPTIVE_THRESH |
CV_CALIB_CB_NORMALIZE_IMAGE);
if( timg != img )
cvReleaseImage( &timg );
if( result || s == maxScale )
for( j = 0; j < count; j++ )
{
temp[j].x /= s;
temp[j].y /= s;
}
if( result )
break;
}
if( displayCorners )
{
printf("%s\n", buf);
IplImage* cimg = cvCreateImage( imageSize, 8, 3 );
cvCvtColor( img, cimg, CV_GRAY2BGR );
cvDrawChessboardCorners( cimg, cvSize(nx, ny), &temp[0],
count, result );
cvShowImage( "corners", cimg );
cvReleaseImage( &cimg );
int c = cvWaitKey(1000);
if( c == 27 || c == 'q' || c == 'Q' ) //Allow ESC to quit
exit(-1);
}
else
putchar('.');
N = pts.size();
pts.resize(N + n, cvPoint2D32f(0,0));
active[lr].push_back((uchar)result);
//assert( result != 0 );
if( result )
{
//Calibration will suffer without subpixel interpolation
cvFindCornerSubPix( img, &temp[0], count,
cvSize(11, 11), cvSize(-1,-1),
cvTermCriteria(CV_TERMCRIT_ITER+CV_TERMCRIT_EPS,
30, 0.01) );
copy( temp.begin(), temp.end(), pts.begin() + N );
}
cvReleaseImage( &img );
}
fclose(f);
printf("\n");
// HARVEST CHESSBOARD 3D OBJECT POINT LIST:
nframes = active[0].size();//Number of good chessboads found
objectPoints.resize(nframes*n);
for( i = 0; i < ny; i++ )
for( j = 0; j < nx; j++ )
objectPoints[i*nx + j] =
cvPoint3D32f(i*squareSize, j*squareSize, 0);
for( i = 1; i < nframes; i++ )
copy( objectPoints.begin(), objectPoints.begin() + n,
objectPoints.begin() + i*n );
npoints.resize(nframes,n);
N = nframes*n;
CvMat _objectPoints = cvMat(1, N, CV_32FC3, &objectPoints[0] );
CvMat _imagePoints1 = cvMat(1, N, CV_32FC2, &points[0][0] );
CvMat _imagePoints2 = cvMat(1, N, CV_32FC2, &points[1][0] );
CvMat _npoints = cvMat(1, npoints.size(), CV_32S, &npoints[0] );
cvSetIdentity(&_M1);
cvSetIdentity(&_M2);
cvZero(&_D1);
cvZero(&_D2);
// CALIBRATE THE STEREO CAMERAS
printf("Running stereo calibration ...");
fflush(stdout);
cvStereoCalibrate( &_objectPoints, &_imagePoints1,
&_imagePoints2, &_npoints,
&_M1, &_D1, &_M2, &_D2,
imageSize, &_R, &_T, &_E, &_F,
cvTermCriteria(CV_TERMCRIT_ITER+
CV_TERMCRIT_EPS, 100, 1e-5),
CV_CALIB_FIX_ASPECT_RATIO +
CV_CALIB_ZERO_TANGENT_DIST +
CV_CALIB_SAME_FOCAL_LENGTH );
printf(" done\n");
// CALIBRATION QUALITY CHECK
// because the output fundamental matrix implicitly
// includes all the output information,
// we can check the quality of calibration using the
// epipolar geometry constraint: m2^t*F*m1=0
vector<CvPoint3D32f> lines[2];
points[0].resize(N);
points[1].resize(N);
_imagePoints1 = cvMat(1, N, CV_32FC2, &points[0][0] );
_imagePoints2 = cvMat(1, N, CV_32FC2, &points[1][0] );
lines[0].resize(N);
lines[1].resize(N);
CvMat _L1 = cvMat(1, N, CV_32FC3, &lines[0][0]);
CvMat _L2 = cvMat(1, N, CV_32FC3, &lines[1][0]);
//Always work in undistorted space
cvUndistortPoints( &_imagePoints1, &_imagePoints1,
&_M1, &_D1, 0, &_M1 );
cvUndistortPoints( &_imagePoints2, &_imagePoints2,
&_M2, &_D2, 0, &_M2 );
cvComputeCorrespondEpilines( &_imagePoints1, 1, &_F, &_L1 );
cvComputeCorrespondEpilines( &_imagePoints2, 2, &_F, &_L2 );
double avgErr = 0;
for( i = 0; i < N; i++ )
{
double err = fabs(points[0][i].x*lines[1][i].x +
points[0][i].y*lines[1][i].y + lines[1][i].z)
+ fabs(points[1][i].x*lines[0][i].x +
points[1][i].y*lines[0][i].y + lines[0][i].z);
avgErr += err;
}
printf( "avg err = %g\n", avgErr/(nframes*n) );
//COMPUTE AND DISPLAY RECTIFICATION
if( showUndistorted )
{
CvMat* mx1 = cvCreateMat( imageSize.height,
imageSize.width, CV_32F );
CvMat* my1 = cvCreateMat( imageSize.height,
imageSize.width, CV_32F );
CvMat* mx2 = cvCreateMat( imageSize.height,
imageSize.width, CV_32F );
CvMat* my2 = cvCreateMat( imageSize.height,
imageSize.width, CV_32F );
CvMat* img1r = cvCreateMat( imageSize.height,
imageSize.width, CV_8U );
CvMat* img2r = cvCreateMat( imageSize.height,
imageSize.width, CV_8U );
CvMat* disp = cvCreateMat( imageSize.height,
imageSize.width, CV_16S );
CvMat* vdisp = cvCreateMat( imageSize.height,
imageSize.width, CV_8U );
CvMat* pair;
double R1[3][3], R2[3][3], P1[3][4], P2[3][4];
CvMat _R1 = cvMat(3, 3, CV_64F, R1);
CvMat _R2 = cvMat(3, 3, CV_64F, R2);
// IF BY CALIBRATED (BOUGUET'S METHOD)
if( useUncalibrated == 0 )
{
CvMat _P1 = cvMat(3, 4, CV_64F, P1);
CvMat _P2 = cvMat(3, 4, CV_64F, P2);
cvStereoRectify( &_M1, &_M2, &_D1, &_D2, imageSize,
&_R, &_T,
&_R1, &_R2, &_P1, &_P2, 0,
0/*CV_CALIB_ZERO_DISPARITY*/ );
isVerticalStereo = fabs(P2[1][3]) > fabs(P2[0][3]);
//Precompute maps for cvRemap()
cvInitUndistortRectifyMap(&_M1,&_D1,&_R1,&_P1,mx1,my1);
cvInitUndistortRectifyMap(&_M2,&_D2,&_R2,&_P2,mx2,my2);
}
//OR ELSE HARTLEY'S METHOD
else if( useUncalibrated == 1 || useUncalibrated == 2 )
// use intrinsic parameters of each camera, but
// compute the rectification transformation directly
// from the fundamental matrix
{
double H1[3][3], H2[3][3], iM[3][3];
CvMat _H1 = cvMat(3, 3, CV_64F, H1);
CvMat _H2 = cvMat(3, 3, CV_64F, H2);
CvMat _iM = cvMat(3, 3, CV_64F, iM);
//Just to show you could have independently used F
if( useUncalibrated == 2 )
cvFindFundamentalMat( &_imagePoints1,
&_imagePoints2, &_F);
cvStereoRectifyUncalibrated( &_imagePoints1,
&_imagePoints2, &_F,
imageSize,
&_H1, &_H2, 3);
cvInvert(&_M1, &_iM);
cvMatMul(&_H1, &_M1, &_R1);
cvMatMul(&_iM, &_R1, &_R1);
cvInvert(&_M2, &_iM);
cvMatMul(&_H2, &_M2, &_R2);
cvMatMul(&_iM, &_R2, &_R2);
//Precompute map for cvRemap()
cvInitUndistortRectifyMap(&_M1,&_D1,&_R1,&_M1,mx1,my1);
cvInitUndistortRectifyMap(&_M2,&_D1,&_R2,&_M2,mx2,my2);
}
else
assert(0);
cvNamedWindow( "rectified", 1 );
// RECTIFY THE IMAGES AND FIND DISPARITY MAPS
if( !isVerticalStereo )
pair = cvCreateMat( imageSize.height, imageSize.width*2,
CV_8UC3 );
else
pair = cvCreateMat( imageSize.height*2, imageSize.width,
CV_8UC3 );
//Setup for finding stereo corrrespondences
CvStereoBMState *BMState = cvCreateStereoBMState();
assert(BMState != 0);
BMState->preFilterSize=33;
BMState->preFilterCap=33;
BMState->SADWindowSize=33;
if( useUncalibrated )
{
BMState->minDisparity=-64;
BMState->numberOfDisparities=128;
}
else
{
BMState->minDisparity=-32;
BMState->numberOfDisparities=192;
}
BMState->textureThreshold=10;
BMState->uniquenessRatio=15;
for( i = 0; i < nframes; i++ )
{
IplImage* img1=cvLoadImage(imageNames[0][i].c_str(),0);
IplImage* img2=cvLoadImage(imageNames[1][i].c_str(),0);
if( img1 && img2 )
{
CvMat part;
cvRemap( img1, img1r, mx1, my1 );
cvRemap( img2, img2r, mx2, my2 );
if( !isVerticalStereo || useUncalibrated != 0 )
{
// When the stereo camera is oriented vertically,
// useUncalibrated==0 does not transpose the
// image, so the epipolar lines in the rectified
// images are vertical. Stereo correspondence
// function does not support such a case.
cvFindStereoCorrespondenceBM( img1r, img2r, disp,
BMState);
cvNormalize( disp, vdisp, 0, 256, CV_MINMAX );
cvNamedWindow( "disparity" );
cvShowImage( "disparity", vdisp );
}
if( !isVerticalStereo )
{
cvGetCols( pair, &part, 0, imageSize.width );
cvCvtColor( img1r, &part, CV_GRAY2BGR );
cvGetCols( pair, &part, imageSize.width,
imageSize.width*2 );
cvCvtColor( img2r, &part, CV_GRAY2BGR );
for( j = 0; j < imageSize.height; j += 16 )
cvLine( pair, cvPoint(0,j),
cvPoint(imageSize.width*2,j),
CV_RGB(0,255,0));
}
else
{
cvGetRows( pair, &part, 0, imageSize.height );
cvCvtColor( img1r, &part, CV_GRAY2BGR );
cvGetRows( pair, &part, imageSize.height,
imageSize.height*2 );
cvCvtColor( img2r, &part, CV_GRAY2BGR );
for( j = 0; j < imageSize.width; j += 16 )
cvLine( pair, cvPoint(j,0),
cvPoint(j,imageSize.height*2),
CV_RGB(0,255,0));
}
cvShowImage( "rectified", pair );
if( cvWaitKey() == 27 )
break;
}
cvReleaseImage( &img1 );
cvReleaseImage( &img2 );
}
cvReleaseStereoBMState(&BMState);
cvReleaseMat( &mx1 );
cvReleaseMat( &my1 );
cvReleaseMat( &mx2 );
cvReleaseMat( &my2 );
cvReleaseMat( &img1r );
cvReleaseMat( &img2r );
cvReleaseMat( &disp );
}
}
int main(int argc, char** argv)
{
StereoCalib(argc > 1 ? argv[1] : "stereo_calib.txt", 1);
return 0;
}
K.Hartmann
16.03.2010, 14:07
Bin wieder im Land. Gestern hatte ich noch einen Zahnarzttermin, der mich etwas gebügelt hat, so das ich mich erst jetzt wieder melde.
Ich habe da so ein kleines "Snapshot"-Progrämmchen zusammen gestümpert.
Dieses Programm zeigt Dir die life-Bilder der beiden Kameras an. Und bei jedem Druck auf die Leertaste speichert es die beiden Bilder als Graustufen jpg ab. Soweit, so gut. Nur haben die entstehenden Dateien etwa die doppelte Größe der Bilddateien aus der Demo. Es liegt also nahe zu vermuten, das schon meine Bilder falsch sind. Versuche mit der Funktion cvResize führten lediglich zu Fehlermeldungen bei der Programmausführung. Am besten poste ich mal meinen kleinen mistigen Quellcode des Snapshotprogramms.
#include <stdio.h>
#include <string>
#include <sstream>
#include <highgui.h>
#include <stdlib.h>
#include <cv.h>
#include <cvaux.h>
#include <math.h>
using namespace std;
int main(int argc, char ** argv)
{
int n = 1;
char buffer[1024];
CvCapture* rightCapture = cvCaptureFromCAM(1);
if( !rightCapture ) {
fprintf( stderr, "ERROR: rightCapture is NULL \n" );
getchar();
return -1;
}
CvCapture* leftCapture = cvCaptureFromCAM(0);
if( !leftCapture ) {
fprintf( stderr, "ERROR: leftCapture is NULL \n" );
getchar();
return -1;
}
// Create a window in which the captured images will be presented
cvNamedWindow( "rightCamWindow", CV_WINDOW_AUTOSIZE );
cvNamedWindow( "leftCamWindow", CV_WINDOW_AUTOSIZE );
cvMoveWindow("rightCamWindow",0,0);
cvMoveWindow("leftCamWindow",650,0);
// Show the image captured from the camera in the window and repeat
while( n<15 ) {
// Get one frame
IplImage* rightFrame = cvQueryFrame( rightCapture );
IplImage* leftFrame = cvQueryFrame( leftCapture );
if( !rightFrame ) {
fprintf( stderr, "ERROR: rightFrame is null...\n" );
getchar();
break;
}
if( !leftFrame ) {
fprintf( stderr, "ERROR: leftFrame is null...\n" );
getchar();
break;
}
cvShowImage( "rightCamWindow", rightFrame );
// Do not release the frame!
cvShowImage( "leftCamWindow", leftFrame );
// Do not release the frame!
int cc = 0;
while(cc==0){
if( (cvWaitKey(10) & 255) == 32 ){
cc = 1;
cvShowImage( "rightCamWindow", rightFrame );
// Do not release the frame!
cvShowImage( "leftCamWindow", leftFrame );
// Do not release the frame!
}
IplImage* rightFrame = cvQueryFrame( rightCapture );
if( !rightFrame ) {
fprintf( stderr, "ERROR: rightFrame is null...\n" );
getchar();
break;
}
IplImage* leftFrame = cvQueryFrame( leftCapture );
if( !leftFrame ) {
fprintf( stderr, "ERROR: leftFrame is null...\n" );
getchar();
break;
}
cvShowImage( "rightCamWindow", rightFrame );
// Do not release the frame!
cvShowImage( "leftCamWindow", leftFrame );
// Do not release the frame!
}
ostringstream outStream;
outStream << n;
string sn = outStream.str();
string s0 = "1";
string s1 = "/home/xyz/workspace/stereo-3d/r";
string s2 = "/home/xyz/workspace/stereo-3d/l";
if(n<10) {
s1 = "/home/xyz/workspace/stereo-3d/r0";
s2 = "/home/xyz/workspace/stereo-3d/l0";
}
int l;
if(rightFrame) {
s0 = s1 + outStream.str();;
s0 += ".jpg#";
for (int i=0;i<=100; i++)
{
if (s0[i] != '#') {
buffer[i] = s0[i];
buffer[i+1] = '\0';
}
else break;
}
IplImage* tmpImgGray = cvCreateImage(cvGetSize(rightFrame), IPL_DEPTH_8U,1);
cvCvtColor(rightFrame, tmpImgGray ,CV_RGB2GRAY );
cvSaveImage(buffer ,tmpImgGray);
cvReleaseImage(&tmpImgGray);
}
if(leftFrame) {
s0 = s2 + outStream.str();;
s0 += ".jpg#";
for (int i=0;i<=100; i++)
{
if (s0[i] != '#') {
buffer[i] = s0[i];
buffer[i+1] = '\0';
}
else break;
}
IplImage* tmpImgGray = cvCreateImage(cvGetSize(leftFrame), IPL_DEPTH_8U,1);
cvCvtColor(leftFrame, tmpImgGray ,CV_RGB2GRAY );
cvSaveImage(buffer ,tmpImgGray);
cvReleaseImage(&tmpImgGray);
}
//If ESC key pressed, Key=0x10001B under OpenCV 0.9.7(linux version),
//remove higher bits using AND operator
if( (cvWaitKey(10) & 255) == 27 ) break;
n++;
}
// Release the capture device housekeeping
cvReleaseCapture( &rightCapture );
cvDestroyWindow( "rightCamWindow" );
cvReleaseCapture( &leftCapture );
cvDestroyWindow( "leftCamWindow" );
return 0;
}
Nicht verzweifeln! Ich habe seit fast 20 Jahren nichts mehr mit C gemacht. Man mag mir also mein Unvermögen verzeihen.
Klaus
K.Hartmann
16.03.2010, 19:43
Das war ja wohl ne Lachnummer.
Also. Ich habe zunächst mal den Code von quick 'n dirty auf etwas verfeinert umgeschrieben.
#include <stdio.h>
#include <string>
#include <sstream>
#include <highgui.h>
#include <stdlib.h>
#include <cv.h>
#include <cvaux.h>
#include <math.h>
using namespace std;
int main(int argc, char ** argv)
{
int n = 1;
char buffer[1024];
IplImage* rightFrame;
IplImage* leftFrame;
CvCapture* rightCapture = cvCaptureFromCAM(1);
CvCapture* leftCapture = cvCaptureFromCAM(0);
// Create a window in which the captured images will be presented
cvNamedWindow( "rightCamWindow", CV_WINDOW_AUTOSIZE );
cvNamedWindow( "leftCamWindow", CV_WINDOW_AUTOSIZE );
cvNamedWindow( "rightCaptureWindow", CV_WINDOW_AUTOSIZE );
cvNamedWindow( "leftCaptureWindow", CV_WINDOW_AUTOSIZE );
cvMoveWindow("rightCamWindow",0,0);
cvMoveWindow("leftCamWindow",650,0);
cvMoveWindow("rightCaptureWindow",0,490);
cvMoveWindow("leftCaptureWindow",650,490);
// Show the image captured from the camera in the window and repeat
while( n<15 ) {
rightFrame = cvQueryFrame( rightCapture );
leftFrame = cvQueryFrame( leftCapture );
cvShowImage( "rightCamWindow", rightFrame );
cvShowImage( "leftCamWindow", leftFrame );
if( (cvWaitKey(10) & 255) == 32 ){
ostringstream outStream;
outStream << n;
string sn = outStream.str();
string s0 = "1";
string s1 = "/home/klaus/workspace/stereo-3d/r";
string s2 = "/home/klaus/workspace/stereo-3d/l";
if(n<10) {
s1 = "/home/klaus/workspace/stereo-3d/r0";
s2 = "/home/klaus/workspace/stereo-3d/l0";
}
s0 = s1 + outStream.str();;
s0 += ".jpg";
strcpy (buffer,s0.c_str());
IplImage* rightImgGray = cvCreateImage(cvGetSize(rightFrame), IPL_DEPTH_8U,1);
cvCvtColor(rightFrame, rightImgGray ,CV_RGB2GRAY );
cvShowImage( "rightCaptureWindow", rightImgGray );
cvSaveImage(buffer ,rightImgGray);
cvReleaseImage(&rightImgGray);
s0 = s2 + outStream.str();;
s0 += ".jpg";
strcpy (buffer,s0.c_str());
IplImage* leftImgGray = cvCreateImage(cvGetSize(leftFrame), IPL_DEPTH_8U,1);
cvCvtColor(leftFrame, leftImgGray ,CV_RGB2GRAY );
cvShowImage( "leftCaptureWindow", leftImgGray );
cvSaveImage(buffer ,leftImgGray);
cvReleaseImage(&leftImgGray);
n++;
}
rightFrame = cvQueryFrame( rightCapture );
leftFrame = cvQueryFrame( leftCapture );
//If ESC key pressed, Key=0x10001B under OpenCV 0.9.7(linux version),
//remove higher bits using AND operator
if( (cvWaitKey(10) & 255) == 27 ) break;
}
// Release the capture device housekeeping
cvReleaseCapture( &rightCapture );
cvReleaseCapture( &leftCapture );
cvDestroyWindow( "rightCamWindow" );
cvDestroyWindow( "leftCamWindow" );
cvDestroyWindow( "rightCaptureWindow" );
cvDestroyWindow( "leftCaptureWindow" );
cvReleaseImage(&rightFrame);
cvReleaseImage(&leftFrame);
return 0;
}
Das mag zwar immer noch besser gehen, aber für jemanden, der C irgendwann vor über 20 Jahren mal kurz angeschaut hat und plötzlich mit CPP rumkaspert, geht der neue Code doch wohl?
Aber trotzdem ging rein garnix. Dann ist mir aber die Idee gekommen, mir die Datei stereo_calib.txt noch mal genau anzuschauen. Echt bescheuert.
Ich habe ein 9 * 7 Schachbrett. Aus irgendwelchen unerfindlichen Gründen muss ich in die erste Zeile der Textdatei also 8 6 eintragen. Immer eins weniger. Ok. Ich dachte das hätte ich so gemacht. Tatsächlich stand da aber 9 6.
Kaum hatte ich das geändert, lief alles, wie es soll.
Zusammenfassend kann man sagen:
1. Das verwendete "Schachbrett" DARF NICHT quadratisch sein. Es muss in den beiden Richtungen UNGLEICH viele quadratische Felder aufweisen.
2. Die Bilder MÜSSEN als Graustufenbilder gespeichert sein.
3. In der Textdatei MUSS die um jeweils 1 verminderte Anzahl von Feldern in Länge und Breite des Schachbretts eingetragen sein.
4. Es wird empfohlen mindestens 15 Bilderpaare zu erstellen, um eine brauchbare Kalibrierung zu erhalten.
So. Jetzt klappt es jedenfalls und ich kann mich daran machen den rs232 / i2s Adapter zusammen zu braten.
Vielen Dank
Klaus
Powered by vBulletin® Version 4.2.5 Copyright ©2024 Adduco Digital e.K. und vBulletin Solutions, Inc. Alle Rechte vorbehalten.