opencv3.2学习

#include”opencv2/highgui/highgui.hpp”%一个小demo
#include”opencv2/imgproc/imgproc.hpp”
using namespace cv;
int main()
{
Mat picture = imread(“1.jpg”);//图片必须添加到工程目录下
//也就是和test.cpp文件放在一个文件夹下!!!
imshow(“测试程序”, picture);
Mat ppp;
blur(picture, ppp, Size(7, 7));%要注意Size的大小写
imshow(“模糊后后”,ppp);
waitKey(20150901);
}

//人脸识别

//main.cpp

#include “FaceRecognition.h”
int main()
{
char* path = “1.png”;
cv::Mat img = cv::imread(path);
FaceRecognition* recog = new FaceRecognition();
recog->setImg(img);
recog->recognition();

cvWaitKey();
return 0;
}

//FaceRecognition.h

#ifndef FACERECOGNITION_H_INCLUDED
#define FACERECOGNITION_H_INCLUDED

#include <opencv2/opencv.hpp>
class FaceRecognition
{
private:
cv::Mat m_mImg;
char* face_cascade_name = “haarcascade_frontalface_alt.xml”;
// char* eyes_cascade_name = “haarcascade_eye.xml”;
cv::CascadeClassifier face_cascade;
//cv::CascadeClassifier eyes_cascade;

public:
FaceRecognition();
~FaceRecognition();
void recognition();
void setImg(cv::Mat mat);
};

FaceRecognition::FaceRecognition(){}

FaceRecognition::~FaceRecognition()
{
delete face_cascade_name;
// delete eyes_cascade_name;
}

void FaceRecognition::setImg(cv::Mat mat)
{
this->m_mImg = mat;
}

void FaceRecognition::recognition()
{
//– 1. Load the cascades
if (!face_cascade.load(face_cascade_name)){ printf(“–(!)Error loading face cascade\n”); return ; };
//if (!eyes_cascade.load(eyes_cascade_name)){ printf(“–(!)Error loading eyes cascade\n”); return ; };

std::vector<cv::Rect> faces;
cv::Mat img_gray;

cvtColor(m_mImg, img_gray, cv::COLOR_BGR2GRAY);
cv::equalizeHist(img_gray, img_gray);

//– 2. Detect faces
face_cascade.detectMultiScale(img_gray, faces, 1.1, 2, 0 | cv::CASCADE_SCALE_IMAGE, cv::Size(30, 30));

for (int i = 0; i < faces.size(); i++)
{
cv::Point center(faces[i].x + faces[i].width / 2, faces[i].y + faces[i].height / 2);
ellipse(m_mImg, center, cv::Size(faces[i].width / 2, faces[i].height / 2), 0, 0, 360, cv::Scalar(255, 0, 255), 4, 8, 0);

cv::Mat faceROI = img_gray(faces[i]);
// std::vector<cv::Rect> eyes;

//– In each face, detect eyes
/* eyes_cascade.detectMultiScale(faceROI, eyes, 1.1, 2, 0 | cv::CASCADE_SCALE_IMAGE, cv::Size(30, 30));

for (size_t j = 0; j < eyes.size(); j++)
{
cv::Point eye_center(faces[i].x + eyes[j].x + eyes[j].width / 2, faces[i].y + eyes[j].y + eyes[j].height / 2);
int radius = cvRound((eyes[j].width + eyes[j].height)*0.25);
circle(m_mImg, eye_center, radius, cv::Scalar(255, 0, 0), 4, 8, 0);
}
}*/

//– 3. Show result
cv::imshow(“Taylor Swift Face Recognitio”, m_mImg);
}
}

#endif // FACERECOGNITION_H_INCLUDED

打开摄像头

#include “opencv2/opencv.hpp”
#include <cv.h>
using namespace cv;
int main(int argc, char** argv)
{
VideoCapture capture(0);
while (1)
{
Mat frame;
capture >> frame;
imshow(“读取视频”, frame);
waitKey(30);
}
return 0;
}

//打开摄像头人脸识别

#include “FaceRecognition.h”
#include “opencv2/opencv.hpp”
#include <cv.h>
using namespace cv;
int main()
{
/*Mat img = imread(“1.png”);
FaceRecognition* recog = new FaceRecognition();
recog->setImg(img);
recog->recognition();
waitKey(3000);
return 0;*/
VideoCapture capture(0);
while(1)
{
Mat frame;
capture >> frame;
FaceRecognition* recog = new FaceRecognition();
recog->setImg(frame);
recog->recognition();
//imshow(“读取视频”, frame);
waitKey(3);
}
}

点追踪

#include “opencv2/video/tracking.hpp”
#include “opencv2/imgproc.hpp”
#include “opencv2/videoio.hpp”
#include “opencv2/highgui.hpp”

#include <iostream>
#include <ctype.h>

using namespace cv;
using namespace std;

static void help()
{
// print a welcome message, and the OpenCV version
cout << “\nThis is a demo of Lukas-Kanade optical flow lkdemo(),\n”
“Using OpenCV version ” << CV_VERSION << endl;
cout << “\nIt uses camera by default, but you can provide a path to video as an argument.\n”;
cout << “\nHot keys: \n”
“\tESC – quit the program\n”
“\tr – auto-initialize tracking\n”
“\tc – delete all the points\n”
“\tn – switch the \”night\” mode on/off\n”
“To add/remove a feature point click it\n” << endl;
}

Point2f point;
bool addRemovePt = false;

static void onMouse( int event, int x, int y, int /*flags*/, void* /*param*/ )
{
if( event == EVENT_LBUTTONDOWN )
{
point = Point2f((float)x, (float)y);
addRemovePt = true;
}
}

int main( int argc, char** argv )
{
VideoCapture cap;
TermCriteria termcrit(TermCriteria::COUNT|TermCriteria::EPS,20,0.03);
Size subPixWinSize(10,10), winSize(31,31);

const int MAX_COUNT = 500;
bool needToInit = false;
bool nightMode = false;

cv::CommandLineParser parser(argc, argv, “{@input||}{help h||}”);
string input = parser.get<string>(“@input”);
if (parser.has(“help”))
{
help();
return 0;
}
if( input.empty() )
cap.open(0);
else if( input.size() == 1 && isdigit(input[0]) )
cap.open(input[0] – ‘0’);
else
cap.open(input);

if( !cap.isOpened() )
{
cout << “Could not initialize capturing…\n”;
return 0;
}

namedWindow( “LK Demo”, 1 );
setMouseCallback( “LK Demo”, onMouse, 0 );

Mat gray, prevGray, image, frame;
vector<Point2f> points[2];

for(;;)
{
cap >> frame;
if( frame.empty() )
break;

frame.copyTo(image);
cvtColor(image, gray, COLOR_BGR2GRAY);

if( nightMode )
image = Scalar::all(0);

if( needToInit )
{
// automatic initialization
goodFeaturesToTrack(gray, points[1], MAX_COUNT, 0.01, 10, Mat(), 3, 0, 0.04);
cornerSubPix(gray, points[1], subPixWinSize, Size(-1,-1), termcrit);
addRemovePt = false;
}
else if( !points[0].empty() )
{
vector<uchar> status;
vector<float> err;
if(prevGray.empty())
gray.copyTo(prevGray);
calcOpticalFlowPyrLK(prevGray, gray, points[0], points[1], status, err, winSize,
3, termcrit, 0, 0.001);
size_t i, k;
for( i = k = 0; i < points[1].size(); i++ )
{
if( addRemovePt )
{
if( norm(point – points[1][i]) <= 5 )
{
addRemovePt = false;
continue;
}
}

if( !status[i] )
continue;

points[1][k++] = points[1][i];
circle( image, points[1][i], 3, Scalar(0,255,0), -1, 8);
}
points[1].resize(k);
}

if( addRemovePt && points[1].size() < (size_t)MAX_COUNT )
{
vector<Point2f> tmp;
tmp.push_back(point);
cornerSubPix( gray, tmp, winSize, Size(-1,-1), termcrit);
points[1].push_back(tmp[0]);
addRemovePt = false;
}

needToInit = false;
imshow(“LK Demo”, image);

char c = (char)waitKey(10);
if( c == 27 )
break;
switch( c )
{
case ‘r’:
needToInit = true;
break;
case ‘c’:
points[0].clear();
points[1].clear();
break;
case ‘n’:
nightMode = !nightMode;
break;
}

std::swap(points[1], points[0]);
cv::swap(prevGray, gray);
}

return 0;
}

//流光法

// Farneback dense optical flow calculate and show in Munsell system of colors
// Author : Zouxy
// Date : 2013-3-15
// HomePage : http://blog.csdn.net/zouxy09
// Email : zouxy09@qq.com

// API calcOpticalFlowFarneback() comes from OpenCV, and this
// 2D dense optical flow algorithm from the following paper:
// Gunnar Farneback. “Two-Frame Motion Estimation Based on Polynomial Expansion”.
// And the OpenCV source code locate in ..\opencv2.4.3\modules\video\src\optflowgf.cpp

#include <iostream>
#include “opencv2/opencv.hpp”

using namespace cv;
using namespace std;

#define UNKNOWN_FLOW_THRESH 1e9

// Color encoding of flow vectors from:
// http://members.shaw.ca/quadibloc/other/colint.htm
// This code is modified from:
// http://vision.middlebury.edu/flow/data/
void makecolorwheel(vector<Scalar> &colorwheel)
{
int RY = 15;
int YG = 6;
int GC = 4;
int CB = 11;
int BM = 13;
int MR = 6;

int i;

for (i = 0; i < RY; i++) colorwheel.push_back(Scalar(255, 255*i/RY, 0));
for (i = 0; i < YG; i++) colorwheel.push_back(Scalar(255-255*i/YG, 255, 0));
for (i = 0; i < GC; i++) colorwheel.push_back(Scalar(0, 255, 255*i/GC));
for (i = 0; i < CB; i++) colorwheel.push_back(Scalar(0, 255-255*i/CB, 255));
for (i = 0; i < BM; i++) colorwheel.push_back(Scalar(255*i/BM, 0, 255));
for (i = 0; i < MR; i++) colorwheel.push_back(Scalar(255, 0, 255-255*i/MR));
}

void motionToColor(Mat flow, Mat &color)
{
if (color.empty())
color.create(flow.rows, flow.cols, CV_8UC3);

static vector<Scalar> colorwheel; //Scalar r,g,b
if (colorwheel.empty())
makecolorwheel(colorwheel);

// determine motion range:
float maxrad = -1;

// Find max flow to normalize fx and fy
for (int i= 0; i < flow.rows; ++i)
{
for (int j = 0; j < flow.cols; ++j)
{
Vec2f flow_at_point = flow.at<Vec2f>(i, j);
float fx = flow_at_point[0];
float fy = flow_at_point[1];
if ((fabs(fx) > UNKNOWN_FLOW_THRESH) || (fabs(fy) > UNKNOWN_FLOW_THRESH))
continue;
float rad = sqrt(fx * fx + fy * fy);
maxrad = maxrad > rad ? maxrad : rad;
}
}

for (int i= 0; i < flow.rows; ++i)
{
for (int j = 0; j < flow.cols; ++j)
{
uchar *data = color.data + color.step[0] * i + color.step[1] * j;
Vec2f flow_at_point = flow.at<Vec2f>(i, j);

float fx = flow_at_point[0] / maxrad;
float fy = flow_at_point[1] / maxrad;
if ((fabs(fx) > UNKNOWN_FLOW_THRESH) || (fabs(fy) > UNKNOWN_FLOW_THRESH))
{
data[0] = data[1] = data[2] = 0;
continue;
}
float rad = sqrt(fx * fx + fy * fy);

float angle = atan2(-fy, -fx) / CV_PI;
float fk = (angle + 1.0) / 2.0 * (colorwheel.size()-1);
int k0 = (int)fk;
int k1 = (k0 + 1) % colorwheel.size();
float f = fk – k0;
//f = 0; // uncomment to see original color wheel

for (int b = 0; b < 3; b++)
{
float col0 = colorwheel[k0][b] / 255.0;
float col1 = colorwheel[k1][b] / 255.0;
float col = (1 – f) * col0 + f * col1;
if (rad <= 1)
col = 1 – rad * (1 – col); // increase saturation with radius
else
col *= .75; // out of range
data[2 – b] = (int)(255.0 * col);
}
}
}
}

int main(int, char**)
{
VideoCapture cap;
cap.open(0);
//cap.open(“test_02.wmv”);

// if( !cap.isOpened() )
// return -1;

Mat prevgray, gray, flow, cflow, frame;
namedWindow(“flow”, 1);

Mat motion2color;

for(;;)
{
double t = (double)cvGetTickCount();

cap >> frame;
cvtColor(frame, gray, CV_BGR2GRAY);
imshow(“original”, frame);

if( prevgray.data )
{
calcOpticalFlowFarneback(prevgray, gray, flow, 0.5, 3, 15, 3, 5, 1.2, 0);
motionToColor(flow, motion2color);
imshow(“flow”, motion2color);
}
//if(waitKey(10)>=0)
// break;
std::swap(prevgray, gray);

//t = (double)cvGetTickCount() – t;
//cout << “cost time: ” << t / ((double)cvGetTickFrequency()*1000.) << endl;
waitKey(3);
}
return 0;
}

//播放视频

VideoCapture videocapture;
videocapture.open(“1.mp4”);
while(1)
{
Mat frame;
videocapture>>frame;
imshow(“video”,frame);
waitKey(30);
}

//跟踪物体

#include <opencv2/core/utility.hpp>
#include “opencv2/video/tracking.hpp”
#include “opencv2/imgproc.hpp”
#include “opencv2/videoio.hpp”
#include “opencv2/highgui.hpp”

#include <iostream>
#include <ctype.h>

using namespace cv;
using namespace std;

Mat image;

bool backprojMode = false;
bool selectObject = false;
int trackObject = 0;
bool showHist = true;
Point origin;
Rect selection;
int vmin = 10, vmax = 256, smin = 30;

// User draws box around object to track. This triggers CAMShift to start tracking
static void onMouse( int event, int x, int y, int, void* )
{
if( selectObject )
{
selection.x = MIN(x, origin.x);
selection.y = MIN(y, origin.y);
selection.width = std::abs(x – origin.x);
selection.height = std::abs(y – origin.y);

selection &= Rect(0, 0, image.cols, image.rows);
}

switch( event )
{
case EVENT_LBUTTONDOWN:
origin = Point(x,y);
selection = Rect(x,y,0,0);
selectObject = true;
break;
case EVENT_LBUTTONUP:
selectObject = false;
if( selection.width > 0 && selection.height > 0 )
trackObject = -1; // Set up CAMShift properties in main() loop
break;
}
}

string hot_keys =
“\n\nHot keys: \n”
“\tESC – quit the program\n”
“\tc – stop the tracking\n”
“\tb – switch to/from backprojection view\n”
“\th – show/hide object histogram\n”
“\tp – pause video\n”
“To initialize tracking, select the object with mouse\n”;

static void help()
{
cout << “\nThis is a demo that shows mean-shift based tracking\n”
“You select a color objects such as your face and it tracks it.\n”
“This reads from video camera (0 by default, or the camera number the user enters\n”
“Usage: \n”
” ./camshiftdemo [camera number]\n”;
cout << hot_keys;
}

const char* keys =
{
“{help h | | show help message}{@camera_number| 0 | camera number}”
};

int main( int argc, const char** argv )
{
VideoCapture cap;
Rect trackWindow;
int hsize = 16;
float hranges[] = {0,180};
const float* phranges = hranges;
CommandLineParser parser(argc, argv, keys);
if (parser.has(“help”))
{
help();
return 0;
}
int camNum = parser.get<int>(0);
cap.open(camNum);

if( !cap.isOpened() )
{
help();
cout << “***Could not initialize capturing…***\n”;
cout << “Current parameter’s value: \n”;
parser.printMessage();
return -1;
}
cout << hot_keys;
namedWindow( “Histogram”, 0 );
namedWindow( “CamShift Demo”, 0 );
setMouseCallback( “CamShift Demo”, onMouse, 0 );
createTrackbar( “Vmin”, “CamShift Demo”, &vmin, 256, 0 );
createTrackbar( “Vmax”, “CamShift Demo”, &vmax, 256, 0 );
createTrackbar( “Smin”, “CamShift Demo”, &smin, 256, 0 );

Mat frame, hsv, hue, mask, hist, histimg = Mat::zeros(200, 320, CV_8UC3), backproj;
bool paused = false;

for(;;)
{
if( !paused )
{
cap >> frame;
if( frame.empty() )
break;
}

frame.copyTo(image);

if( !paused )
{
cvtColor(image, hsv, COLOR_BGR2HSV);

if( trackObject )
{
int _vmin = vmin, _vmax = vmax;

inRange(hsv, Scalar(0, smin, MIN(_vmin,_vmax)),
Scalar(180, 256, MAX(_vmin, _vmax)), mask);
int ch[] = {0, 0};
hue.create(hsv.size(), hsv.depth());
mixChannels(&hsv, 1, &hue, 1, ch, 1);

if( trackObject < 0 )
{
// Object has been selected by user, set up CAMShift search properties once
Mat roi(hue, selection), maskroi(mask, selection);
calcHist(&roi, 1, 0, maskroi, hist, 1, &hsize, &phranges);
normalize(hist, hist, 0, 255, NORM_MINMAX);

trackWindow = selection;
trackObject = 1; // Don’t set up again, unless user selects new ROI

histimg = Scalar::all(0);
int binW = histimg.cols / hsize;
Mat buf(1, hsize, CV_8UC3);
for( int i = 0; i < hsize; i++ )
buf.at<Vec3b>(i) = Vec3b(saturate_cast<uchar>(i*180./hsize), 255, 255);
cvtColor(buf, buf, COLOR_HSV2BGR);

for( int i = 0; i < hsize; i++ )
{
int val = saturate_cast<int>(hist.at<float>(i)*histimg.rows/255);
rectangle( histimg, Point(i*binW,histimg.rows),
Point((i+1)*binW,histimg.rows – val),
Scalar(buf.at<Vec3b>(i)), -1, 8 );
}
}

// Perform CAMShift
calcBackProject(&hue, 1, 0, hist, backproj, &phranges);
backproj &= mask;
RotatedRect trackBox = CamShift(backproj, trackWindow,
TermCriteria( TermCriteria::EPS | TermCriteria::COUNT, 10, 1 ));
if( trackWindow.area() <= 1 )
{
int cols = backproj.cols, rows = backproj.rows, r = (MIN(cols, rows) + 5)/6;
trackWindow = Rect(trackWindow.x – r, trackWindow.y – r,
trackWindow.x + r, trackWindow.y + r) &
Rect(0, 0, cols, rows);
}

if( backprojMode )
cvtColor( backproj, image, COLOR_GRAY2BGR );
ellipse( image, trackBox, Scalar(0,0,255), 3, LINE_AA );
}
}
else if( trackObject < 0 )
paused = false;

if( selectObject && selection.width > 0 && selection.height > 0 )
{
Mat roi(image, selection);
bitwise_not(roi, roi);
}

imshow( “CamShift Demo”, image );
imshow( “Histogram”, histimg );

char c = (char)waitKey(10);
if( c == 27 )
break;
switch(c)
{
case ‘b’:
backprojMode = !backprojMode;
break;
case ‘c’:
trackObject = 0;
histimg = Scalar::all(0);
break;
case ‘h’:
showHist = !showHist;
if( !showHist )
destroyWindow( “Histogram” );
else
namedWindow( “Histogram”, 1 );
break;
case ‘p’:
paused = !paused;
break;
default:
;
}
}

return 0;
}

//shi-tomasi角点检测

//————————————–【程序说明】——————————————-
// 程序说明:《OpenCV3编程入门》OpenCV3版书本配套示例程序87
// 程序描述:Shi-Tomasi角点检测示例
// 开发测试所用操作系统: Windows 7 64bit
// 开发测试所用IDE版本:Visual Studio 2010
// 开发测试所用OpenCV版本: 3.0 beta
// 2014年11月 Created by @浅墨_毛星云
// 2014年12月 Revised by @浅墨_毛星云
//————————————————————————————————

//———————————【头文件、命名空间包含部分】—————————-
// 描述:包含程序所使用的头文件和命名空间
//————————————————————————————————
#include “opencv2/highgui/highgui.hpp”
#include “opencv2/imgproc/imgproc.hpp”
#include <iostream>
using namespace cv;
using namespace std;

//———————————–【宏定义部分】——————————————–
// 描述:定义一些辅助宏
//———————————————————————————————-
#define WINDOW_NAME “【Shi-Tomasi角点检测】” //为窗口标题定义的宏

//———————————–【全局变量声明部分】————————————–
// 描述:全局变量声明
//———————————————————————————————–
Mat g_srcImage, g_grayImage;
int g_maxCornerNumber = 33;
int g_maxTrackbarNumber = 500;
RNG g_rng(12345);//初始化随机数生成器
//—————————–【on_GoodFeaturesToTrack( )函数】—————————-
// 描述:响应滑动条移动消息的回调函数
//———————————————————————————————-
void on_GoodFeaturesToTrack( int, void* )
{
//【1】对变量小于等于1时的处理
if( g_maxCornerNumber <= 1 ) { g_maxCornerNumber = 1; }

//【2】Shi-Tomasi算法(goodFeaturesToTrack函数)的参数准备
vector<Point2f> corners;
double qualityLevel = 0.01;//角点检测可接受的最小特征值
double minDistance = 10;//角点之间的最小距离
int blockSize = 3;//计算导数自相关矩阵时指定的邻域范围
double k = 0.04;//权重系数
Mat copy = g_srcImage.clone(); //复制源图像到一个临时变量中,作为感兴趣区域

//【3】进行Shi-Tomasi角点检测
goodFeaturesToTrack( g_grayImage,//输入图像
corners,//检测到的角点的输出向量
g_maxCornerNumber,//角点的最大数量
qualityLevel,//角点检测可接受的最小特征值
minDistance,//角点之间的最小距离
Mat(),//感兴趣区域
blockSize,//计算导数自相关矩阵时指定的邻域范围
false,//不使用Harris角点检测
k );//权重系数
//【4】输出文字信息
cout<<“\t>此次检测到的角点数量为:”<<corners.size()<<endl;

//【5】绘制检测到的角点
int r = 4;
for( int i = 0; i < corners.size(); i++ )
{
//以随机的颜色绘制出角点
circle( copy, corners[i], r, Scalar(g_rng.uniform(0,255), g_rng.uniform(0,255),
g_rng.uniform(0,255)), -1, 8, 0 );
}

//【6】显示(更新)窗口
imshow( WINDOW_NAME, copy );
}
//———————————–【ShowHelpText( )函数】———————————-
// 描述:输出一些帮助信息
//———————————————————————————————-
static void ShowHelpText( )
{
//输出欢迎信息和OpenCV版本
printf(“\n\n\t\t\t非常感谢购买《OpenCV3编程入门》一书!\n”);
printf(“\n\n\t\t\t此为本书OpenCV3版的第87个配套示例程序\n”);
printf(“\n\n\t\t\t 当前使用的OpenCV版本为:” CV_VERSION );
printf(“\n\n —————————————————————————-\n”);
//输出一些帮助信息
printf(“\n\n\n\t欢迎来到【Shi-Tomasi角点检测】示例程序\n”);
printf(“\n\t请调整滑动条观察图像效果\n\n”);

}
//————————————–【main( )函数】—————————————–
// 描述:控制台应用程序的入口函数,我们的程序从这里开始执行
//———————————————————————————————–
int main( )
{
//【0】改变console字体颜色
system(“color 2F”);

//【0】显示帮助文字
ShowHelpText();

//【1】载入源图像并将其转换为灰度图
g_srcImage = imread(“1.png”, 1 );
cvtColor( g_srcImage, g_grayImage, COLOR_BGR2GRAY );

//【2】创建窗口和滑动条,并进行显示和回调函数初始化
namedWindow( WINDOW_NAME, WINDOW_AUTOSIZE );
createTrackbar( “最大角点数”, WINDOW_NAME, &g_maxCornerNumber, g_maxTrackbarNumber, on_GoodFeaturesToTrack );
imshow( WINDOW_NAME, g_srcImage );
on_GoodFeaturesToTrack( 0, 0 );

waitKey(0);
return(0);
}

//中值滤波函数

#include “opencv2/highgui/highgui.hpp”
#include “opencv2/imgproc/imgproc.hpp”
using namespace cv;
int main()
{
VideoCapture vv;
vv.open(0);
while(1)
{
Mat src;
vv>>src;
//Mat src=imread(“1.png”);
Mat dd;
blur(src,dd,Size(7,7));
imshow(“before blur”,src);
imshow(“blur”,dd);
waitKey(30);
}
}

//连续自适应的meanshift算法

#include <opencv2/core/utility.hpp>
#include “opencv2/video/tracking.hpp”
#include “opencv2/imgproc.hpp”
#include “opencv2/videoio.hpp”
#include “opencv2/highgui.hpp”

#include <iostream>
#include <ctype.h>

using namespace cv;
using namespace std;

Mat image;

bool backprojMode = false;
bool selectObject = false;
int trackObject = 0;
bool showHist = true;
Point origin;
Rect selection;
int vmin = 10, vmax = 256, smin = 30;

// User draws box around object to track. This triggers CAMShift to start tracking
static void onMouse( int event, int x, int y, int, void* )
{
if( selectObject )
{
selection.x = MIN(x, origin.x);
selection.y = MIN(y, origin.y);
selection.width = std::abs(x – origin.x);
selection.height = std::abs(y – origin.y);

selection &= Rect(0, 0, image.cols, image.rows);
}

switch( event )
{
case EVENT_LBUTTONDOWN:
origin = Point(x,y);
selection = Rect(x,y,0,0);
selectObject = true;
break;
case EVENT_LBUTTONUP:
selectObject = false;
if( selection.width > 0 && selection.height > 0 )
trackObject = -1; // Set up CAMShift properties in main() loop
break;
}
}

string hot_keys =
“\n\nHot keys: \n”
“\tESC – quit the program\n”
“\tc – stop the tracking\n”
“\tb – switch to/from backprojection view\n”
“\th – show/hide object histogram\n”
“\tp – pause video\n”
“To initialize tracking, select the object with mouse\n”;

static void help()
{
cout << “\nThis is a demo that shows mean-shift based tracking\n”
“You select a color objects such as your face and it tracks it.\n”
“This reads from video camera (0 by default, or the camera number the user enters\n”
“Usage: \n”
” ./camshiftdemo [camera number]\n”;
cout << hot_keys;
}

const char* keys =
{
“{help h | | show help message}{@camera_number| 0 | camera number}”
};

int main( int argc, const char** argv )
{
VideoCapture cap;
Rect trackWindow;
int hsize = 16;
float hranges[] = {0,180};
const float* phranges = hranges;
CommandLineParser parser(argc, argv, keys);
if (parser.has(“help”))
{
help();
return 0;
}
int camNum = parser.get<int>(0);
cap.open(camNum);

if( !cap.isOpened() )
{
help();
cout << “***Could not initialize capturing…***\n”;
cout << “Current parameter’s value: \n”;
parser.printMessage();
return -1;
}
cout << hot_keys;
namedWindow( “Histogram”, 0 );
namedWindow( “CamShift Demo”, 0 );
setMouseCallback( “CamShift Demo”, onMouse, 0 );
createTrackbar( “Vmin”, “CamShift Demo”, &vmin, 256, 0 );
createTrackbar( “Vmax”, “CamShift Demo”, &vmax, 256, 0 );
createTrackbar( “Smin”, “CamShift Demo”, &smin, 256, 0 );

Mat frame, hsv, hue, mask, hist, histimg = Mat::zeros(200, 320, CV_8UC3), backproj;
bool paused = false;

for(;;)
{
if( !paused )
{
cap >> frame;
if( frame.empty() )
break;
}

frame.copyTo(image);

if( !paused )
{
cvtColor(image, hsv, COLOR_BGR2HSV);

if( trackObject )
{
int _vmin = vmin, _vmax = vmax;

inRange(hsv, Scalar(0, smin, MIN(_vmin,_vmax)),
Scalar(180, 256, MAX(_vmin, _vmax)), mask);
int ch[] = {0, 0};
hue.create(hsv.size(), hsv.depth());
mixChannels(&hsv, 1, &hue, 1, ch, 1);

if( trackObject < 0 )
{
// Object has been selected by user, set up CAMShift search properties once
Mat roi(hue, selection), maskroi(mask, selection);
calcHist(&roi, 1, 0, maskroi, hist, 1, &hsize, &phranges);
normalize(hist, hist, 0, 255, NORM_MINMAX);

trackWindow = selection;
trackObject = 1; // Don’t set up again, unless user selects new ROI

histimg = Scalar::all(0);
int binW = histimg.cols / hsize;
Mat buf(1, hsize, CV_8UC3);
for( int i = 0; i < hsize; i++ )
buf.at<Vec3b>(i) = Vec3b(saturate_cast<uchar>(i*180./hsize), 255, 255);
cvtColor(buf, buf, COLOR_HSV2BGR);

for( int i = 0; i < hsize; i++ )
{
int val = saturate_cast<int>(hist.at<float>(i)*histimg.rows/255);
rectangle( histimg, Point(i*binW,histimg.rows),
Point((i+1)*binW,histimg.rows – val),
Scalar(buf.at<Vec3b>(i)), -1, 8 );
}
}

// Perform CAMShift
calcBackProject(&hue, 1, 0, hist, backproj, &phranges);
backproj &= mask;
RotatedRect trackBox = CamShift(backproj, trackWindow,
TermCriteria( TermCriteria::EPS | TermCriteria::COUNT, 10, 1 ));
if( trackWindow.area() <= 1 )
{
int cols = backproj.cols, rows = backproj.rows, r = (MIN(cols, rows) + 5)/6;
trackWindow = Rect(trackWindow.x – r, trackWindow.y – r,
trackWindow.x + r, trackWindow.y + r) &
Rect(0, 0, cols, rows);
}

if( backprojMode )
cvtColor( backproj, image, COLOR_GRAY2BGR );
ellipse( image, trackBox, Scalar(0,0,255), 3, LINE_AA );
}
}
else if( trackObject < 0 )
paused = false;

if( selectObject && selection.width > 0 && selection.height > 0 )
{
Mat roi(image, selection);
bitwise_not(roi, roi);
}

imshow( “CamShift Demo”, image );
imshow( “Histogram”, histimg );

char c = (char)waitKey(10);
if( c == 27 )
break;
switch(c)
{
case ‘b’:
backprojMode = !backprojMode;
break;
case ‘c’:
trackObject = 0;
histimg = Scalar::all(0);
break;
case ‘h’:
showHist = !showHist;
if( !showHist )
destroyWindow( “Histogram” );
else
namedWindow( “Histogram”, 1 );
break;
case ‘p’:
paused = !paused;
break;
default:
;
}
}

return 0;
}

//图像分割meanshift
//————————————–【程序说明】——————————————-
// 程序说明:《OpenCV3编程入门》OpenCV2版书本附赠示例程序09
// 程序描述:MeanShift图像分割
// 测试所用操作系统: Windows 7 64bit
// 测试所用IDE版本:Visual Studio 2010
// 测试所用OpenCV版本: 2.4.9
// 2014年11月 Revised by @浅墨_毛星云
//————————————————————————————————

//———————————【头文件、命名空间包含部分】—————————-
// 描述:包含程序所使用的头文件和命名空间
//————————————————————————————————
#include “opencv2/highgui/highgui.hpp”
#include “opencv2/core/core.hpp”
#include “opencv2/imgproc/imgproc.hpp”
#include <iostream>
using namespace cv;
using namespace std;
//————————————–【help( )函数】————————————–
// 描述:输出一些帮助信息
//———————————————————————————————-
static void help()
{
cout << “\n\t此程序演示了OpenCV中MeanShift图像分割的使用。\n”
<< “\n\t程序运行后我们可以通过3个滑动条调节分割效果。调节滑动条后可能会有些许卡顿,请耐心等待\n”
<< “\n\t3个滑动条代表的参数分别为空间窗的半径 (spatialRad)、色彩窗的半径(colorRad)、最大图像金字塔级别(maxPyrLevel)\n”
<< endl;
}
//This colors the segmentations
static void floodFillPostprocess( Mat& img, const Scalar& colorDiff=Scalar::all(1) )
{
CV_Assert( !img.empty() );
RNG rng = theRNG();
Mat mask( img.rows+2, img.cols+2, CV_8UC1, Scalar::all(0) );
for( int y = 0; y < img.rows; y++ )
{
for( int x = 0; x < img.cols; x++ )
{
if( mask.at<uchar>(y+1, x+1) == 0 )
{
Scalar newVal( rng(256), rng(256), rng(256) );
floodFill( img, mask, Point(x,y), newVal, 0, colorDiff, colorDiff );
}
}
}
}

string winName = “meanshift”;
int spatialRad, colorRad, maxPyrLevel;
Mat img, res;

static void meanShiftSegmentation( int, void* )
{
cout << “spatialRad=” << spatialRad << “; ”
<< “colorRad=” << colorRad << “; ”
<< “maxPyrLevel=” << maxPyrLevel << endl;
pyrMeanShiftFiltering( img, res, spatialRad, colorRad, maxPyrLevel );
floodFillPostprocess( res, Scalar::all(2) );
imshow( winName, res );
}

//———————————–【main( )函数】——————————————–
// 描述:控制台应用程序的入口函数,我们的程序从这里开始
//————————————————————————————————-
int main(int argc, char** argv)
{

help();

img = imread( “1.jpg” );
if( img.empty() )
return -1;
imshow(“原始图”,img);
spatialRad = 10;
colorRad = 10;
maxPyrLevel = 1;

namedWindow( winName, WINDOW_AUTOSIZE );

createTrackbar( “spatialRad”, winName, &spatialRad, 80, meanShiftSegmentation );
createTrackbar( “colorRad”, winName, &colorRad, 60, meanShiftSegmentation );
createTrackbar( “maxPyrLevel”, winName, &maxPyrLevel, 5, meanShiftSegmentation );

meanShiftSegmentation(0, 0);
waitKey();
return 0;
}

18 thoughts on “opencv3.2学习

  1. The “shale revolution”, probably, thoroughly masters of the minds political figures and entrepreneurs of the world. Palm superiority this field hold Americans, but, apparently, there is a risk that the rest of world shortly to them to join. Of course, has country where virtually is conducted receive shale gas in Russia, in particular, it endeavor the main interest of political and business elites looks sufficient skeptical. In this issue is not so much the factor economic profitability. Main condition that has the ability to impact prospects such industries as production of shale gas, – effects for environment.
    electricity shock in the body

发表评论