-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathcvcam.cpp
More file actions
173 lines (141 loc) · 5.24 KB
/
cvcam.cpp
File metadata and controls
173 lines (141 loc) · 5.24 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
#include <iostream>
#include <string>
#include <fcntl.h>
#include <unistd.h>
#include <sys/ioctl.h>
#include <sys/time.h>
#include <linux/videodev2.h>
#include <opencv2/opencv.hpp>
#include <opencv2/core/utility.hpp> // CommandLineParser
#define VID_WIDTH 640
#define VID_HEIGHT 480
void printSize(const std::string &name, const cv::Mat &m) {
std::cout << name << " size: " << m.cols << "*" << m.rows << "*" << m.elemSize() << " type " << m.type() << "\n";
}
int
main(int argc, char *argv[]) {
using namespace cv;
const char* param_spec =
"{ help h | | Print usage }"
"{ input | /dev/video0 | Video device for primary video stream input }"
"{ output | /dev/video5 | Video device for the output stream. Can be created with the v4l2loopback kernel module. }"
"{ image | | Image that replaces the background removed from the input stream }"
"{ learningSecs | 5 | Number of seconds learning the background }"
;
CommandLineParser params{argc, argv, param_spec};
params.about("This tool replaces the static background from a video stream with an image.\n");
if (params.has("help")) {
params.printMessage();
return 0;
}
std::string inputFile = params.get<String>("input");
if (inputFile.size() == 0) {
std::cerr << "Missing parameter: input device\n";
return 1;
}
std::string outputFile = params.get<String>("output");
if (outputFile.size() == 0) {
std::cerr << "Missing parameter: output device\n";
return 2;
}
std::cout << "Opening input stream: " << inputFile << "\n";
VideoCapture cam(inputFile.c_str());
if (!cam.isOpened()) {
std::cerr << "ERROR: Could not open input stream " << inputFile << ".\n";
return 3;
}
//cam.set(CAP_PROP_POS_FRAMES, 0); // Set index to 0 (start frame, just in case it is a file)
cam.set(CAP_PROP_FRAME_WIDTH, VID_WIDTH);
cam.set(CAP_PROP_FRAME_HEIGHT, VID_HEIGHT);
cam.set(CAP_PROP_AUTO_WB, false);
// open output device
std::cout << "Opening output stream: " << outputFile << "\n";
int output = open(outputFile.c_str(), O_RDWR);
if(output < 0) {
std::cerr << "ERROR: Could not open output stream " << outputFile << ": " << strerror(errno) << "\n";
return 4;
}
size_t framesize = VID_WIDTH * VID_HEIGHT * 3;
struct v4l2_format vid_format;
memset(&vid_format, 0, sizeof(vid_format));
vid_format.type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
if (ioctl(output, VIDIOC_G_FMT, &vid_format) < 0) {
std::cerr << "ERROR: Unable to get video format: " << strerror(errno) << "\n";
return 5;
}
// configure desired video format on device
vid_format.fmt.pix.width = cam.get(CAP_PROP_FRAME_WIDTH);
vid_format.fmt.pix.height = cam.get(CAP_PROP_FRAME_HEIGHT);
vid_format.fmt.pix.pixelformat = V4L2_PIX_FMT_RGB24;
vid_format.fmt.pix.sizeimage = framesize;
vid_format.fmt.pix.field = V4L2_FIELD_NONE;
if (ioctl(output, VIDIOC_S_FMT, &vid_format) < 0) {
std::cerr << "ERROR: Unable to set video format: " << strerror(errno) << "\n";
return 6;
}
// prepare virtual background
Mat bgImage{VID_HEIGHT, VID_WIDTH, CV_8UC3, {0,255,0}}; // green background
std::string imageFile = params.get<String>("image");
if (imageFile.size()) {
std::cerr << "Loading image: " << imageFile << "\n";
Size size = bgImage.size();
bgImage = imread(imageFile);
if (bgImage.empty()) {
std::cerr << "Unable to read image from " << imageFile << "\n";
return 10;
}
resize(bgImage, bgImage, size, 0, 0, INTER_LINEAR);
}
Ptr<BackgroundSubtractor> pBackSub = createBackgroundSubtractorMOG2(1, 16, true);
Mat mask;
struct timeval tv{0, 0};
gettimeofday(&tv, NULL);
long sec = tv.tv_sec;
unsigned long frames = 0;
double learningRate = 0.2;
unsigned long learningSecs = params.get<unsigned long>("learningSecs");
for (int key = 0; key != 27 /* ESC */; key = waitKey(10)) {
Mat frame;
cam >> frame;
if (frame.empty()) {
std::cerr << "Empty frame.\n";
break;
}
++frames;
/*
if (frame.size() != bgImage.size()) {
resize(frame, frame, bgImage.size(), 0, 0, INTER_LINEAR);
}
*/
pBackSub->apply(frame, mask, learningRate);
//printSize("frame ", frame);
//printSize("mask ", mask);
Mat bwMask;
threshold(mask, bwMask, 204, 255, THRESH_BINARY);
//printSize("bwMask ", bwMask);
//printSize("bgImage ", bgImage);
Mat outFrame;
bgImage.copyTo(outFrame);
copyTo(frame, outFrame, bwMask);
imshow("outFrame", outFrame);
size_t written = write(output, outFrame.data, framesize);
if (written < 0) {
close(output);
std::cerr << "ERROR: Could not write to output device: " << strerror(errno) << "\n";
return 7;
}
gettimeofday(&tv, NULL);
long sec2 = tv.tv_sec;
if (sec2 > sec) {
std::cout << "\rFPS: " << frames << " " << std::flush;
frames = 0;
sec = sec2;
if (learningSecs > 0) {
--learningSecs;
} else {
learningRate = 0.0;
}
}
} // main loop
return 0;
}