{ | { | ||||
std::srand(std::time(0)); | std::srand(std::time(0)); | ||||
// Read all .jpg files from the specified folder | // Read all .jpg files from the specified folder | ||||
std::string folder = filePath; | |||||
std::vector<std::string> filenames; | |||||
cv::String folder = filePath; | |||||
std::vector<cv::String> filenames; | |||||
cv::glob(folder, filenames); | cv::glob(folder, filenames); | ||||
// Random shuffle | // Random shuffle | ||||
std::random_shuffle(filenames.begin(), filenames.end()); | std::random_shuffle(filenames.begin(), filenames.end()); | ||||
Mat image = imread(filenames[0], IMREAD_COLOR); | |||||
Mat image = imread(filePath, IMREAD_COLOR); | |||||
if(image.empty()) | if(image.empty()) | ||||
{ | { | ||||
Mat image; | Mat image; | ||||
if(!cap.isOpened()) { | if(!cap.isOpened()) { | ||||
cout << "Fehler"; | |||||
cout << "Video capture not opened" << std::endl; | |||||
return Mat(); | return Mat(); | ||||
} | } | ||||
cap.read(image); | |||||
if(!cap.grab()) { | |||||
cout << "Could not grab frame from camera" << std::endl; | |||||
return Mat(); | |||||
} | |||||
cap.retrieve(image); | |||||
return image; | return image; | ||||
} | } | ||||
void Input::freeWebcam() | void Input::freeWebcam() | ||||
{ | { | ||||
this->cap.release(); | this->cap.release(); | ||||
} | |||||
} |
// And one (the other one) to segment the lines. | // And one (the other one) to segment the lines. | ||||
// No return value here as the input is passed by reference -> directly modified. | // No return value here as the input is passed by reference -> directly modified. | ||||
cvtColor(inputPicture, inputPicture, COLOR_BGR2GRAY); | cvtColor(inputPicture, inputPicture, COLOR_BGR2GRAY); | ||||
threshold(inputPicture, inputPicture, thresholdBinary, 255, THRESH_BINARY); | |||||
GaussianBlur(inputPicture, inputPicture, Size(gaussKernelSize, gaussKernelSize), 0); | GaussianBlur(inputPicture, inputPicture, Size(gaussKernelSize, gaussKernelSize), 0); | ||||
Canny(inputPicture, inputPicture, thresholdCanny1, thresholdCanny2, apertureSizeCanny); | |||||
threshold(inputPicture, inputPicture, thresholdBinary, 255, THRESH_BINARY); | |||||
//Perform a opening | |||||
Mat kernel(5,5, CV_8UC1,1); | |||||
morphologyEx(inputPicture, inputPicture, 2, kernel); | |||||
} | } | ||||
std::vector<Vec4i> Processing::calculateLineSegments(const Mat& inputPicture) | |||||
FrameData Processing::calculateLineSegments(const Mat& inputPicture, const cv::Rect& roi) | |||||
{ | { | ||||
//See following link | |||||
//https://stackoverflow.com/questions/45322630/how-to-detect-lines-in-opencv | |||||
vector<Vec4i> lines; | |||||
VectorOfLines linesInVectors; | |||||
HoughLinesP(inputPicture, lines, 1, CV_PI/360, 150, 0, 250); | |||||
//lines = linesInVectors.findMiddleLine(lines); | |||||
return lines; | |||||
FrameData data; | |||||
cv::findContours(inputPicture, data.contours, RETR_LIST, CHAIN_APPROX_SIMPLE); | |||||
//Delete the areas that are too small | |||||
auto iterator = data.contours.begin(); | |||||
while(iterator != data.contours.end()) | |||||
{ | |||||
if (contourArea(*iterator) < 3500) | |||||
{ | |||||
iterator = data.contours.erase(iterator); | |||||
} | |||||
else | |||||
{ | |||||
Rect boundingBox = boundingRect(*iterator); | |||||
boundingBox.x += roi.x; | |||||
boundingBox.y += roi.y; | |||||
data.boundingBoxes.push_back(boundingBox); | |||||
data.middlePoints.push_back(Point(boundingBox.x+boundingBox.width/2, boundingBox.y+boundingBox.height/2)); | |||||
data.leftEdges.push_back(Point(boundingBox.x, boundingBox.y+boundingBox.height/2)); | |||||
++iterator; | |||||
} | |||||
} | |||||
return data; | |||||
} | } |
void processImage(Mat& inputPicture, int thresholdBinary, int gaussKernelSize, int thresholdCanny1, int thresholdCanny2, int apertureSizeCanny); | void processImage(Mat& inputPicture, int thresholdBinary, int gaussKernelSize, int thresholdCanny1, int thresholdCanny2, int apertureSizeCanny); | ||||
std::vector<Vec4i> calculateLineSegments(const Mat& inputPicture); | |||||
FrameData calculateLineSegments(const Mat& inputPicture, const cv::Rect& roi); | |||||
}; | }; |
#include <opencv2/core/utils/logger.hpp> | |||||
//#include <opencv2/core/utils/logger.hpp> | |||||
#include <opencv2/opencv.hpp> | #include <opencv2/opencv.hpp> | ||||
#include <iostream> | #include <iostream> | ||||
#include <direct.h> | |||||
#include <input.h> | #include <input.h> | ||||
#include <processing.h> | #include <processing.h> | ||||
void in_depth_processing_chain(int thresholdBinary, int videoHeight, int videoWidth, int gaussKernelSize, int thresholdCanny1, int thresholdCanny2, int apertureSizeCanny) | void in_depth_processing_chain(int thresholdBinary, int videoHeight, int videoWidth, int gaussKernelSize, int thresholdCanny1, int thresholdCanny2, int apertureSizeCanny) | ||||
{ | { | ||||
std::string outputFolder = "C:\\Users\\User\\Desktop\\temp"; | |||||
Input input(videoHeight, videoWidth); | Input input(videoHeight, videoWidth); | ||||
Mat image = input.readFile("C:\\Users\\User\\Desktop\\Studium\\02_Master_MSY\\2. Semester Winter 22 23\\Projekt\\Line-Following-Robot\\Test_data"); | |||||
imwrite(outputFolder + "\\01_input.jpg", image); | |||||
cvtColor(image, image, COLOR_BGR2GRAY); | |||||
imwrite(outputFolder + "\\02_color_convert.jpg", image); | |||||
GaussianBlur(image, image, Size(gaussKernelSize, gaussKernelSize), 0); | |||||
imwrite(outputFolder + "\\03_gauss.jpg", image); | |||||
threshold(image, image, thresholdBinary, 255, THRESH_BINARY); | |||||
imwrite(outputFolder + "\\04_threshold.jpg", image); | |||||
Canny(image, image, thresholdCanny1, thresholdCanny2, apertureSizeCanny); | |||||
imwrite(outputFolder + "\\05_canny.jpg", image); | |||||
cv::String outputFolder = "C:\\Users\\tim-z\\Desktop\\temp"; | |||||
cv::String inputFolder = "C:\\Users\\tim-z\\Desktop\\Studium\\02_Master MSY\\2. Semester Winter 2022 2023\\Projekt\\Repo\\Line-Following-Robot\\AutonomousMode\\Test_data"; | |||||
std::vector<cv::String> filenames; | |||||
cv::glob(inputFolder, filenames); | |||||
//filenames.begin(), filenames.end() | |||||
int i = 0; | |||||
for(std::vector<cv::String>::iterator it = filenames.begin(); it != filenames.end(); it++) | |||||
{ | |||||
std::string current_output = outputFolder + "\\" + to_string(i); | |||||
std::cout << current_output << std::endl; | |||||
const char* current_output_char = current_output.c_str(); | |||||
_mkdir(current_output_char); | |||||
std::string inputFile = inputFolder + "\\image" + to_string(i+1) + ".jpeg"; | |||||
Mat original_image = input.readFile(inputFile); | |||||
imwrite(current_output + "\\00_input.jpg", original_image); | |||||
Point roiOrigin(0, original_image.rows*(7.5/12.0)); | |||||
Rect roi = Rect(roiOrigin.x, roiOrigin.y, original_image.cols, original_image.rows/12); | |||||
Mat image = original_image(roi); | |||||
imwrite(current_output + "\\01_roi.jpg", image); | |||||
cvtColor(image, image, COLOR_BGR2GRAY); | |||||
imwrite(current_output + "\\02_color_convert.jpg", image); | |||||
GaussianBlur(image, image, Size(gaussKernelSize, gaussKernelSize), 0); | |||||
imwrite(current_output + "\\03_gauss.jpg", image); | |||||
threshold(image, image, thresholdBinary, 255, THRESH_BINARY); | |||||
imwrite(current_output + "\\04_threshold.jpg", image); | |||||
// Opening (reduces noise) | |||||
Mat kernel(5,5, CV_8UC1,1); | |||||
morphologyEx(image, image, 2, kernel); | |||||
imwrite(current_output + "\\05_opening.jpg", image); | |||||
//Canny(image, image, thresholdCanny1, thresholdCanny2, apertureSizeCanny); | |||||
//imwrite(outputFolder + "\\06_canny.jpg", image); | |||||
vector<vector<Point> > contours; | |||||
vector<Vec4i> hierarchy; | |||||
vector<Rect> vectorOfRects; | |||||
vector<Point> vectorOfLeftEdges; | |||||
findContours(image,contours, hierarchy, RETR_LIST, CHAIN_APPROX_SIMPLE); | |||||
int amountOfValidRects = 0; | |||||
for( int i = 0; i< contours.size(); i++ ) // iterate through each contour. | |||||
{ | |||||
double a = contourArea( contours[i],false); // Find the area of contour | |||||
if(a > 3500) | |||||
{ | |||||
drawContours(original_image, contours, i, Scalar(0,255,255), 1, 8, hierarchy, 0, roiOrigin); | |||||
Rect currentBoundingRect = boundingRect(contours[i]); | |||||
//Handle roi offset: | |||||
currentBoundingRect.x += roiOrigin.x; | |||||
currentBoundingRect.y += roiOrigin.y; | |||||
vectorOfRects.push_back(currentBoundingRect); | |||||
rectangle(original_image, currentBoundingRect, Scalar(0,255,0)); | |||||
// get mid-point of rect | |||||
Point midRect = Point(currentBoundingRect.x+currentBoundingRect.width/2, currentBoundingRect.y+currentBoundingRect.height/2); | |||||
// Draw middle as small rect instead of circle because for whatever reasons drawing a circle doesnt work. | |||||
Rect testRect(Point(midRect.x-2, midRect.y-2), Point(midRect.x+2, midRect.y+2)); | |||||
rectangle(original_image, testRect, Scalar(0,0,255)); | |||||
// get the left edge of rect | |||||
// used as offset as raspicam is not | |||||
// mounted on mid of regbot | |||||
Point leftEdge(currentBoundingRect.x, currentBoundingRect.y+currentBoundingRect.height/2); | |||||
vectorOfLeftEdges.push_back(leftEdge); | |||||
testRect = Rect(Point(leftEdge.x-2, leftEdge.y-2), Point(leftEdge.x+2, leftEdge.y+2)); | |||||
rectangle(original_image, testRect, Scalar(0,0,255)); | |||||
amountOfValidRects++; | |||||
} | |||||
} | |||||
imwrite(current_output + "\\06_contours.jpg", original_image); | |||||
i++; | |||||
} | |||||
} | } | ||||
int main(void) | int main(void) | ||||
{ | { | ||||
//Disable opencv logging messages | //Disable opencv logging messages | ||||
cv::utils::logging::setLogLevel(cv::utils::logging::LOG_LEVEL_WARNING); | |||||
//cv::utils::logging::setLogLevel(cv::utils::logging::LOG_LEVEL_WARNING); | |||||
const int thresholdBinary = 140; | const int thresholdBinary = 140; | ||||
const int videoHeight = 720; | const int videoHeight = 720; |
static double calcDistance(Point p0, Point p1); | static double calcDistance(Point p0, Point p1); | ||||
vector<Vec4i> findMiddleLine(vector<Vec4i> &lines); | vector<Vec4i> findMiddleLine(vector<Vec4i> &lines); | ||||
}; | |||||
class FrameData | |||||
{ | |||||
public: | |||||
std::vector<std::vector<cv::Point>> contours; | |||||
std::vector<cv::Rect> boundingBoxes; | |||||
std::vector<cv::Point> leftEdges; | |||||
std::vector<cv::Point> middlePoints; | |||||
FrameData(): contours(), boundingBoxes(), leftEdges(), middlePoints() {} | |||||
}; | }; |
cv::utils::logging::setLogLevel(cv::utils::logging::LOG_LEVEL_WARNING); | cv::utils::logging::setLogLevel(cv::utils::logging::LOG_LEVEL_WARNING); | ||||
const int thresholdBinary = 140; | const int thresholdBinary = 140; | ||||
const int videoHeight = 240; | |||||
const int videoWidth = 320; | |||||
const int videoHeight = 720; | |||||
const int videoWidth = 1280; | |||||
const int gaussKernelSize = 21; | const int gaussKernelSize = 21; | ||||
const int thresholdCanny1 = 50; | const int thresholdCanny1 = 50; | ||||
const int thresholdCanny2 = 100; | const int thresholdCanny2 = 100; | ||||
const int apertureSizeCanny = 3; | const int apertureSizeCanny = 3; | ||||
LFR lfr(videoHeight, videoWidth, thresholdBinary, gaussKernelSize, thresholdCanny1, thresholdCanny2, apertureSizeCanny); | LFR lfr(videoHeight, videoWidth, thresholdBinary, gaussKernelSize, thresholdCanny1, thresholdCanny2, apertureSizeCanny); | ||||
lfr.saveOutputFlag = true; | |||||
lfr.outputFileName = "/home/pi/Line-Following-Robot/AutonomousMode/tmp/test.jpg"; | |||||
lfr.startLoop(); | lfr.startLoop(); | ||||
//To end the video stream, write any char in the console. | //To end the video stream, write any char in the console. | ||||
char a; | char a; | ||||
std::cin >> a; | std::cin >> a; | ||||
lfr.endLoop(); | lfr.endLoop(); | ||||
} | |||||
} |
LFR::LFR(int videoHeight, int videoWidth, int thresholdBinary, int gaussKernelSize, int thresholdCanny1, int thresholdCanny2, int apertureSizeCanny) | LFR::LFR(int videoHeight, int videoWidth, int thresholdBinary, int gaussKernelSize, int thresholdCanny1, int thresholdCanny2, int apertureSizeCanny) | ||||
: iAmLooping(false), input(videoHeight, videoWidth), processing(), controlModule(), interpreter(), intersectionHandler() | |||||
: iAmLooping(false), input(videoHeight, videoWidth), processing(), controlModule(), interpreter(), intersectionHandler(), roi() | |||||
{ | { | ||||
this->iAmLooping = false; | this->iAmLooping = false; | ||||
this->thresholdBinary = thresholdBinary; | this->thresholdBinary = thresholdBinary; | ||||
this->thresholdCanny1 = thresholdCanny1; | this->thresholdCanny1 = thresholdCanny1; | ||||
this->thresholdCanny2 = thresholdCanny2; | this->thresholdCanny2 = thresholdCanny2; | ||||
this->apertureSizeCanny = apertureSizeCanny; | this->apertureSizeCanny = apertureSizeCanny; | ||||
this->videoFlag = false; | |||||
this->saveOutputFlag = false; | |||||
this->outputFileName = ""; | |||||
cv::Point roiOrigin(0, videoHeight*(7.5/12.0)); | |||||
roi = Rect(roiOrigin.x, roiOrigin.y, videoWidth, videoHeight/12); | |||||
} | } | ||||
LFR::~LFR() | LFR::~LFR() | ||||
void LFR::loop() | void LFR::loop() | ||||
{ | { | ||||
namedWindow("Display window"); | |||||
if(this->videoFlag) {namedWindow("Display window");} | |||||
while(iAmLooping) | while(iAmLooping) | ||||
{ | { | ||||
Mat image = input.readWebcam(); | |||||
processing.processImage(image, this->thresholdBinary, this->gaussKernelSize, this->thresholdCanny1, thresholdCanny2, this->apertureSizeCanny); | |||||
std::vector<Vec4i> lines = processing.calculateLineSegments(image); | |||||
for( size_t i = 0; i < lines.size(); i++ ) | |||||
{ | |||||
line( image, Point(lines[i][0], lines[i][1]), | |||||
Point( lines[i][2], lines[i][3]), (0,0,255), 1, 8 ); | |||||
} | |||||
imshow("Display window", image); | |||||
char c = (char)waitKey(1); | |||||
Mat originalImage = input.readWebcam(); | |||||
Mat processedImage = originalImage; | |||||
processing.processImage(processedImage, this->thresholdBinary, this->gaussKernelSize, this->thresholdCanny1, thresholdCanny2, this->apertureSizeCanny); | |||||
FrameData data = processing.calculateLineSegments(processedImage, this->roi); | |||||
this->provideOutput(processedImage); | |||||
} | } | ||||
destroyWindow("Display window"); | |||||
if(this->videoFlag) {destroyWindow("Display window");} | |||||
input.freeWebcam(); | input.freeWebcam(); | ||||
} | } | ||||
iAmLooping = false; | iAmLooping = false; | ||||
this->loopThread.join(); | this->loopThread.join(); | ||||
return; | return; | ||||
} | |||||
} | |||||
void LFR::provideOutput(const Mat& image) | |||||
{ | |||||
if(this->videoFlag) | |||||
{ | |||||
imshow("Display window", image); | |||||
char c = (char)waitKey(1); | |||||
} | |||||
if (this->saveOutputFlag && !(this->outputFileName.empty())) | |||||
{ | |||||
imwrite(this->outputFileName, image); | |||||
} | |||||
} |
int thresholdCanny1; | int thresholdCanny1; | ||||
int thresholdCanny2; | int thresholdCanny2; | ||||
int apertureSizeCanny; | int apertureSizeCanny; | ||||
cv::Rect roi; | |||||
void provideOutput(const Mat& image); | |||||
public: | public: | ||||
void startLoop(); | void startLoop(); | ||||
void endLoop(); | void endLoop(); | ||||
bool videoFlag; | |||||
bool saveOutputFlag; | |||||
std::string outputFileName; | |||||
}; | }; |