Implement approach prev tested in spielwiese
This commit is contained in:
parent
01f29ca9cd
commit
8c53f49935
@ -24,19 +24,37 @@ void Processing::processImage(Mat& inputPicture, int thresholdBinary, int gaussK
|
||||
// And one (the other one) to segment the lines.
|
||||
// No return value here as the input is passed by reference -> directly modified.
|
||||
cvtColor(inputPicture, inputPicture, COLOR_BGR2GRAY);
|
||||
threshold(inputPicture, inputPicture, thresholdBinary, 255, THRESH_BINARY);
|
||||
GaussianBlur(inputPicture, inputPicture, Size(gaussKernelSize, gaussKernelSize), 0);
|
||||
Canny(inputPicture, inputPicture, thresholdCanny1, thresholdCanny2, apertureSizeCanny);
|
||||
threshold(inputPicture, inputPicture, thresholdBinary, 255, THRESH_BINARY);
|
||||
|
||||
//Perform a opening
|
||||
Mat kernel(5,5, CV_8UC1,1);
|
||||
morphologyEx(inputPicture, inputPicture, 2, kernel);
|
||||
}
|
||||
|
||||
std::vector<Vec4i> Processing::calculateLineSegments(const Mat& inputPicture)
|
||||
FrameData Processing::calculateLineSegments(const Mat& inputPicture, const cv::Rect& roi)
|
||||
{
|
||||
//See following link
|
||||
//https://stackoverflow.com/questions/45322630/how-to-detect-lines-in-opencv
|
||||
vector<Vec4i> lines;
|
||||
VectorOfLines linesInVectors;
|
||||
HoughLinesP(inputPicture, lines, 1, CV_PI/360, 150, 0, 250);
|
||||
//lines = linesInVectors.findMiddleLine(lines);
|
||||
FrameData data;
|
||||
cv::findContours(inputPicture, data.contours, RETR_LIST, CHAIN_APPROX_SIMPLE);
|
||||
|
||||
return lines;
|
||||
//Delete the areas that are too small
|
||||
auto iterator = data.contours.begin();
|
||||
while(iterator != data.contours.end())
|
||||
{
|
||||
if (contourArea(*iterator) < 3500)
|
||||
{
|
||||
iterator = data.contours.erase(iterator);
|
||||
}
|
||||
else
|
||||
{
|
||||
Rect boundingBox = boundingRect(*iterator);
|
||||
boundingBox.x += roi.x;
|
||||
boundingBox.y += roi.y;
|
||||
data.boundingBoxes.push_back(boundingBox);
|
||||
data.middlePoints.push_back(Point(boundingBox.x+boundingBox.width/2, boundingBox.y+boundingBox.height/2));
|
||||
data.leftEdges.push_back(Point(boundingBox.x, boundingBox.y+boundingBox.height/2));
|
||||
++iterator;
|
||||
}
|
||||
}
|
||||
return data;
|
||||
}
|
||||
|
@ -21,5 +21,5 @@ public:
|
||||
|
||||
void processImage(Mat& inputPicture, int thresholdBinary, int gaussKernelSize, int thresholdCanny1, int thresholdCanny2, int apertureSizeCanny);
|
||||
|
||||
std::vector<Vec4i> calculateLineSegments(const Mat& inputPicture);
|
||||
FrameData calculateLineSegments(const Mat& inputPicture, const cv::Rect& roi);
|
||||
};
|
@ -61,3 +61,14 @@ class VectorOfLines{
|
||||
vector<Vec4i> findMiddleLine(vector<Vec4i> &lines);
|
||||
|
||||
};
|
||||
|
||||
class FrameData
|
||||
{
|
||||
public:
|
||||
std::vector<std::vector<cv::Point>> contours;
|
||||
std::vector<cv::Rect> boundingBoxes;
|
||||
std::vector<cv::Point> leftEdges;
|
||||
std::vector<cv::Point> middlePoints;
|
||||
|
||||
FrameData(): contours(), boundingBoxes(), leftEdges(), middlePoints() {}
|
||||
};
|
@ -2,7 +2,7 @@
|
||||
|
||||
|
||||
LFR::LFR(int videoHeight, int videoWidth, int thresholdBinary, int gaussKernelSize, int thresholdCanny1, int thresholdCanny2, int apertureSizeCanny)
|
||||
: iAmLooping(false), input(videoHeight, videoWidth), processing(), controlModule(), interpreter(), intersectionHandler()
|
||||
: iAmLooping(false), input(videoHeight, videoWidth), processing(), controlModule(), interpreter(), intersectionHandler(), roi()
|
||||
{
|
||||
this->iAmLooping = false;
|
||||
this->thresholdBinary = thresholdBinary;
|
||||
@ -14,6 +14,10 @@ LFR::LFR(int videoHeight, int videoWidth, int thresholdBinary, int gaussKernelSi
|
||||
this->videoFlag = false;
|
||||
this->saveOutputFlag = false;
|
||||
this->outputFileName = "";
|
||||
|
||||
cv::Point roiOrigin(0, videoHeight*(7.5/12.0));
|
||||
roi = Rect(roiOrigin.x, roiOrigin.y, videoWidth, videoHeight/12);
|
||||
|
||||
}
|
||||
|
||||
LFR::~LFR()
|
||||
@ -29,23 +33,11 @@ void LFR::loop()
|
||||
if(this->videoFlag) {namedWindow("Display window");}
|
||||
while(iAmLooping)
|
||||
{
|
||||
Mat image = input.readWebcam();
|
||||
processing.processImage(image, this->thresholdBinary, this->gaussKernelSize, this->thresholdCanny1, thresholdCanny2, this->apertureSizeCanny);
|
||||
std::vector<Vec4i> lines = processing.calculateLineSegments(image);
|
||||
for( size_t i = 0; i < lines.size(); i++ )
|
||||
{
|
||||
line( image, Point(lines[i][0], lines[i][1]),
|
||||
Point( lines[i][2], lines[i][3]), (0,0,255), 1, 8 );
|
||||
}
|
||||
if(this->videoFlag)
|
||||
{
|
||||
imshow("Display window", image);
|
||||
char c = (char)waitKey(1);
|
||||
}
|
||||
if (this->saveOutputFlag && !(this->outputFileName.empty()))
|
||||
{
|
||||
imwrite(this->outputFileName, image);
|
||||
}
|
||||
Mat originalImage = input.readWebcam();
|
||||
Mat processedImage = originalImage;
|
||||
processing.processImage(processedImage, this->thresholdBinary, this->gaussKernelSize, this->thresholdCanny1, thresholdCanny2, this->apertureSizeCanny);
|
||||
FrameData data = processing.calculateLineSegments(processedImage, this->roi);
|
||||
this->provideOutput(processedImage);
|
||||
}
|
||||
if(this->videoFlag) {destroyWindow("Display window");}
|
||||
input.freeWebcam();
|
||||
@ -63,3 +55,16 @@ void LFR::endLoop()
|
||||
this->loopThread.join();
|
||||
return;
|
||||
}
|
||||
|
||||
void LFR::provideOutput(const Mat& image)
|
||||
{
|
||||
if(this->videoFlag)
|
||||
{
|
||||
imshow("Display window", image);
|
||||
char c = (char)waitKey(1);
|
||||
}
|
||||
if (this->saveOutputFlag && !(this->outputFileName.empty()))
|
||||
{
|
||||
imwrite(this->outputFileName, image);
|
||||
}
|
||||
}
|
||||
|
@ -30,6 +30,9 @@ class LFR
|
||||
int thresholdCanny1;
|
||||
int thresholdCanny2;
|
||||
int apertureSizeCanny;
|
||||
cv::Rect roi;
|
||||
|
||||
void provideOutput(const Mat& image);
|
||||
|
||||
public:
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user