Merge branch 'neuer_ansatz_bildverarbeitung' of yasarba71520/Line-Following-Robot into master

This commit is contained in:
Tim Zeuner 2022-12-01 08:17:29 +00:00 committed by Gitea
commit bb16b602a7
8 changed files with 179 additions and 49 deletions

View File

@ -15,14 +15,14 @@ Mat Input::readFile(String filePath)
{ {
std::srand(std::time(0)); std::srand(std::time(0));
// Read all .jpg files from the specified folder // Read all .jpg files from the specified folder
std::string folder = filePath; cv::String folder = filePath;
std::vector<std::string> filenames; std::vector<cv::String> filenames;
cv::glob(folder, filenames); cv::glob(folder, filenames);
// Random shuffle // Random shuffle
std::random_shuffle(filenames.begin(), filenames.end()); std::random_shuffle(filenames.begin(), filenames.end());
Mat image = imread(filenames[0], IMREAD_COLOR); Mat image = imread(filePath, IMREAD_COLOR);
if(image.empty()) if(image.empty())
{ {
@ -39,16 +39,19 @@ Mat Input::readWebcam()
Mat image; Mat image;
if(!cap.isOpened()) { if(!cap.isOpened()) {
cout << "Fehler"; cout << "Video capture not opened" << std::endl;
return Mat(); return Mat();
} }
cap.read(image); if(!cap.grab()) {
cout << "Could not grab frame from camera" << std::endl;
return Mat();
}
cap.retrieve(image);
return image; return image;
} }
void Input::freeWebcam() void Input::freeWebcam()
{ {
this->cap.release(); this->cap.release();
} }

View File

@ -24,19 +24,37 @@ void Processing::processImage(Mat& inputPicture, int thresholdBinary, int gaussK
// And one (the other one) to segment the lines. // And one (the other one) to segment the lines.
// No return value here as the input is passed by reference -> directly modified. // No return value here as the input is passed by reference -> directly modified.
cvtColor(inputPicture, inputPicture, COLOR_BGR2GRAY); cvtColor(inputPicture, inputPicture, COLOR_BGR2GRAY);
threshold(inputPicture, inputPicture, thresholdBinary, 255, THRESH_BINARY);
GaussianBlur(inputPicture, inputPicture, Size(gaussKernelSize, gaussKernelSize), 0); GaussianBlur(inputPicture, inputPicture, Size(gaussKernelSize, gaussKernelSize), 0);
Canny(inputPicture, inputPicture, thresholdCanny1, thresholdCanny2, apertureSizeCanny); threshold(inputPicture, inputPicture, thresholdBinary, 255, THRESH_BINARY);
//Perform a opening
Mat kernel(5,5, CV_8UC1,1);
morphologyEx(inputPicture, inputPicture, 2, kernel);
} }
std::vector<Vec4i> Processing::calculateLineSegments(const Mat& inputPicture) FrameData Processing::calculateLineSegments(const Mat& inputPicture, const cv::Rect& roi)
{ {
//See following link FrameData data;
//https://stackoverflow.com/questions/45322630/how-to-detect-lines-in-opencv cv::findContours(inputPicture, data.contours, RETR_LIST, CHAIN_APPROX_SIMPLE);
vector<Vec4i> lines;
VectorOfLines linesInVectors; //Delete the areas that are too small
HoughLinesP(inputPicture, lines, 1, CV_PI/360, 150, 0, 250); auto iterator = data.contours.begin();
//lines = linesInVectors.findMiddleLine(lines); while(iterator != data.contours.end())
{
return lines; if (contourArea(*iterator) < 3500)
{
iterator = data.contours.erase(iterator);
}
else
{
Rect boundingBox = boundingRect(*iterator);
boundingBox.x += roi.x;
boundingBox.y += roi.y;
data.boundingBoxes.push_back(boundingBox);
data.middlePoints.push_back(Point(boundingBox.x+boundingBox.width/2, boundingBox.y+boundingBox.height/2));
data.leftEdges.push_back(Point(boundingBox.x, boundingBox.y+boundingBox.height/2));
++iterator;
}
}
return data;
} }

View File

@ -21,5 +21,5 @@ public:
void processImage(Mat& inputPicture, int thresholdBinary, int gaussKernelSize, int thresholdCanny1, int thresholdCanny2, int apertureSizeCanny); void processImage(Mat& inputPicture, int thresholdBinary, int gaussKernelSize, int thresholdCanny1, int thresholdCanny2, int apertureSizeCanny);
std::vector<Vec4i> calculateLineSegments(const Mat& inputPicture); FrameData calculateLineSegments(const Mat& inputPicture, const cv::Rect& roi);
}; };

View File

@ -1,7 +1,8 @@
#include <opencv2/core/utils/logger.hpp> //#include <opencv2/core/utils/logger.hpp>
#include <opencv2/opencv.hpp> #include <opencv2/opencv.hpp>
#include <iostream> #include <iostream>
#include <direct.h>
#include <input.h> #include <input.h>
#include <processing.h> #include <processing.h>
@ -34,27 +35,97 @@ void sweep_em_all(int thresholdBinary, int videoHeight, int videoWidth, int gaus
void in_depth_processing_chain(int thresholdBinary, int videoHeight, int videoWidth, int gaussKernelSize, int thresholdCanny1, int thresholdCanny2, int apertureSizeCanny) void in_depth_processing_chain(int thresholdBinary, int videoHeight, int videoWidth, int gaussKernelSize, int thresholdCanny1, int thresholdCanny2, int apertureSizeCanny)
{ {
std::string outputFolder = "C:\\Users\\User\\Desktop\\temp";
Input input(videoHeight, videoWidth); Input input(videoHeight, videoWidth);
Mat image = input.readFile("C:\\Users\\User\\Desktop\\Studium\\02_Master_MSY\\2. Semester Winter 22 23\\Projekt\\Line-Following-Robot\\Test_data"); cv::String outputFolder = "C:\\Users\\tim-z\\Desktop\\temp";
imwrite(outputFolder + "\\01_input.jpg", image); cv::String inputFolder = "C:\\Users\\tim-z\\Desktop\\Studium\\02_Master MSY\\2. Semester Winter 2022 2023\\Projekt\\Repo\\Line-Following-Robot\\AutonomousMode\\Test_data";
cvtColor(image, image, COLOR_BGR2GRAY);
imwrite(outputFolder + "\\02_color_convert.jpg", image);
GaussianBlur(image, image, Size(gaussKernelSize, gaussKernelSize), 0);
imwrite(outputFolder + "\\03_gauss.jpg", image);
threshold(image, image, thresholdBinary, 255, THRESH_BINARY);
imwrite(outputFolder + "\\04_threshold.jpg", image);
Canny(image, image, thresholdCanny1, thresholdCanny2, apertureSizeCanny);
imwrite(outputFolder + "\\05_canny.jpg", image);
std::vector<cv::String> filenames;
cv::glob(inputFolder, filenames);
//filenames.begin(), filenames.end()
int i = 0;
for(std::vector<cv::String>::iterator it = filenames.begin(); it != filenames.end(); it++)
{
std::string current_output = outputFolder + "\\" + to_string(i);
std::cout << current_output << std::endl;
const char* current_output_char = current_output.c_str();
_mkdir(current_output_char);
std::string inputFile = inputFolder + "\\image" + to_string(i+1) + ".jpeg";
Mat original_image = input.readFile(inputFile);
imwrite(current_output + "\\00_input.jpg", original_image);
Point roiOrigin(0, original_image.rows*(7.5/12.0));
Rect roi = Rect(roiOrigin.x, roiOrigin.y, original_image.cols, original_image.rows/12);
Mat image = original_image(roi);
imwrite(current_output + "\\01_roi.jpg", image);
cvtColor(image, image, COLOR_BGR2GRAY);
imwrite(current_output + "\\02_color_convert.jpg", image);
GaussianBlur(image, image, Size(gaussKernelSize, gaussKernelSize), 0);
imwrite(current_output + "\\03_gauss.jpg", image);
threshold(image, image, thresholdBinary, 255, THRESH_BINARY);
imwrite(current_output + "\\04_threshold.jpg", image);
// Opening (reduces noise)
Mat kernel(5,5, CV_8UC1,1);
morphologyEx(image, image, 2, kernel);
imwrite(current_output + "\\05_opening.jpg", image);
//Canny(image, image, thresholdCanny1, thresholdCanny2, apertureSizeCanny);
//imwrite(outputFolder + "\\06_canny.jpg", image);
vector<vector<Point> > contours;
vector<Vec4i> hierarchy;
vector<Rect> vectorOfRects;
vector<Point> vectorOfLeftEdges;
findContours(image,contours, hierarchy, RETR_LIST, CHAIN_APPROX_SIMPLE);
int amountOfValidRects = 0;
for( int i = 0; i< contours.size(); i++ ) // iterate through each contour.
{
double a = contourArea( contours[i],false); // Find the area of contour
if(a > 3500)
{
drawContours(original_image, contours, i, Scalar(0,255,255), 1, 8, hierarchy, 0, roiOrigin);
Rect currentBoundingRect = boundingRect(contours[i]);
//Handle roi offset:
currentBoundingRect.x += roiOrigin.x;
currentBoundingRect.y += roiOrigin.y;
vectorOfRects.push_back(currentBoundingRect);
rectangle(original_image, currentBoundingRect, Scalar(0,255,0));
// get mid-point of rect
Point midRect = Point(currentBoundingRect.x+currentBoundingRect.width/2, currentBoundingRect.y+currentBoundingRect.height/2);
// Draw middle as small rect instead of circle because for whatever reasons drawing a circle doesnt work.
Rect testRect(Point(midRect.x-2, midRect.y-2), Point(midRect.x+2, midRect.y+2));
rectangle(original_image, testRect, Scalar(0,0,255));
// get the left edge of rect
// used as offset as raspicam is not
// mounted on mid of regbot
Point leftEdge(currentBoundingRect.x, currentBoundingRect.y+currentBoundingRect.height/2);
vectorOfLeftEdges.push_back(leftEdge);
testRect = Rect(Point(leftEdge.x-2, leftEdge.y-2), Point(leftEdge.x+2, leftEdge.y+2));
rectangle(original_image, testRect, Scalar(0,0,255));
amountOfValidRects++;
}
}
imwrite(current_output + "\\06_contours.jpg", original_image);
i++;
}
} }
int main(void) int main(void)
{ {
//Disable opencv logging messages //Disable opencv logging messages
cv::utils::logging::setLogLevel(cv::utils::logging::LOG_LEVEL_WARNING); //cv::utils::logging::setLogLevel(cv::utils::logging::LOG_LEVEL_WARNING);
const int thresholdBinary = 140; const int thresholdBinary = 140;
const int videoHeight = 720; const int videoHeight = 720;

View File

@ -60,4 +60,15 @@ class VectorOfLines{
static double calcDistance(Point p0, Point p1); static double calcDistance(Point p0, Point p1);
vector<Vec4i> findMiddleLine(vector<Vec4i> &lines); vector<Vec4i> findMiddleLine(vector<Vec4i> &lines);
};
class FrameData
{
public:
std::vector<std::vector<cv::Point>> contours;
std::vector<cv::Rect> boundingBoxes;
std::vector<cv::Point> leftEdges;
std::vector<cv::Point> middlePoints;
FrameData(): contours(), boundingBoxes(), leftEdges(), middlePoints() {}
}; };

View File

@ -7,17 +7,19 @@ int main(void)
cv::utils::logging::setLogLevel(cv::utils::logging::LOG_LEVEL_WARNING); cv::utils::logging::setLogLevel(cv::utils::logging::LOG_LEVEL_WARNING);
const int thresholdBinary = 140; const int thresholdBinary = 140;
const int videoHeight = 240; const int videoHeight = 720;
const int videoWidth = 320; const int videoWidth = 1280;
const int gaussKernelSize = 21; const int gaussKernelSize = 21;
const int thresholdCanny1 = 50; const int thresholdCanny1 = 50;
const int thresholdCanny2 = 100; const int thresholdCanny2 = 100;
const int apertureSizeCanny = 3; const int apertureSizeCanny = 3;
LFR lfr(videoHeight, videoWidth, thresholdBinary, gaussKernelSize, thresholdCanny1, thresholdCanny2, apertureSizeCanny); LFR lfr(videoHeight, videoWidth, thresholdBinary, gaussKernelSize, thresholdCanny1, thresholdCanny2, apertureSizeCanny);
lfr.saveOutputFlag = true;
lfr.outputFileName = "/home/pi/Line-Following-Robot/AutonomousMode/tmp/test.jpg";
lfr.startLoop(); lfr.startLoop();
//To end the video stream, write any char in the console. //To end the video stream, write any char in the console.
char a; char a;
std::cin >> a; std::cin >> a;
lfr.endLoop(); lfr.endLoop();
} }

View File

@ -2,7 +2,7 @@
LFR::LFR(int videoHeight, int videoWidth, int thresholdBinary, int gaussKernelSize, int thresholdCanny1, int thresholdCanny2, int apertureSizeCanny) LFR::LFR(int videoHeight, int videoWidth, int thresholdBinary, int gaussKernelSize, int thresholdCanny1, int thresholdCanny2, int apertureSizeCanny)
: iAmLooping(false), input(videoHeight, videoWidth), processing(), controlModule(), interpreter(), intersectionHandler() : iAmLooping(false), input(videoHeight, videoWidth), processing(), controlModule(), interpreter(), intersectionHandler(), roi()
{ {
this->iAmLooping = false; this->iAmLooping = false;
this->thresholdBinary = thresholdBinary; this->thresholdBinary = thresholdBinary;
@ -10,6 +10,14 @@ LFR::LFR(int videoHeight, int videoWidth, int thresholdBinary, int gaussKernelSi
this->thresholdCanny1 = thresholdCanny1; this->thresholdCanny1 = thresholdCanny1;
this->thresholdCanny2 = thresholdCanny2; this->thresholdCanny2 = thresholdCanny2;
this->apertureSizeCanny = apertureSizeCanny; this->apertureSizeCanny = apertureSizeCanny;
this->videoFlag = false;
this->saveOutputFlag = false;
this->outputFileName = "";
cv::Point roiOrigin(0, videoHeight*(7.5/12.0));
roi = Rect(roiOrigin.x, roiOrigin.y, videoWidth, videoHeight/12);
} }
LFR::~LFR() LFR::~LFR()
@ -22,21 +30,16 @@ LFR::~LFR()
void LFR::loop() void LFR::loop()
{ {
namedWindow("Display window"); if(this->videoFlag) {namedWindow("Display window");}
while(iAmLooping) while(iAmLooping)
{ {
Mat image = input.readWebcam(); Mat originalImage = input.readWebcam();
processing.processImage(image, this->thresholdBinary, this->gaussKernelSize, this->thresholdCanny1, thresholdCanny2, this->apertureSizeCanny); Mat processedImage = originalImage;
std::vector<Vec4i> lines = processing.calculateLineSegments(image); processing.processImage(processedImage, this->thresholdBinary, this->gaussKernelSize, this->thresholdCanny1, thresholdCanny2, this->apertureSizeCanny);
for( size_t i = 0; i < lines.size(); i++ ) FrameData data = processing.calculateLineSegments(processedImage, this->roi);
{ this->provideOutput(processedImage);
line( image, Point(lines[i][0], lines[i][1]),
Point( lines[i][2], lines[i][3]), (0,0,255), 1, 8 );
}
imshow("Display window", image);
char c = (char)waitKey(1);
} }
destroyWindow("Display window"); if(this->videoFlag) {destroyWindow("Display window");}
input.freeWebcam(); input.freeWebcam();
} }
@ -51,4 +54,17 @@ void LFR::endLoop()
iAmLooping = false; iAmLooping = false;
this->loopThread.join(); this->loopThread.join();
return; return;
} }
void LFR::provideOutput(const Mat& image)
{
if(this->videoFlag)
{
imshow("Display window", image);
char c = (char)waitKey(1);
}
if (this->saveOutputFlag && !(this->outputFileName.empty()))
{
imwrite(this->outputFileName, image);
}
}

View File

@ -30,6 +30,9 @@ class LFR
int thresholdCanny1; int thresholdCanny1;
int thresholdCanny2; int thresholdCanny2;
int apertureSizeCanny; int apertureSizeCanny;
cv::Rect roi;
void provideOutput(const Mat& image);
public: public:
@ -39,5 +42,11 @@ public:
void startLoop(); void startLoop();
void endLoop(); void endLoop();
bool videoFlag;
bool saveOutputFlag;
std::string outputFileName;
}; };