Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 5 additions & 5 deletions Examples/Monocular-Inertial/gopro_slam.cc
Original file line number Diff line number Diff line change
Expand Up @@ -226,9 +226,9 @@ int main(int argc, char **argv) {
}

// apply mask image if loaded
if (!mask_img.empty()) {
im_track.setTo(cv::Scalar(0,0,0), mask_img);
}
// if (!mask_img.empty()) {
// im_track.setTo(cv::Scalar(0,0,0), mask_img);
// }

// gather imu measurements between frames
// Load imu measurements from previous frame
Expand All @@ -245,8 +245,8 @@ int main(int argc, char **argv) {
std::chrono::steady_clock::now();

// Pass the image to the SLAM system
auto result = SLAM.LocalizeMonocular(im_track, tframe, vImuMeas);

// auto result = SLAM.LocalizeMonocular(im_track, tframe, vImuMeas);
auto result = SLAM.LocalizeMonocular(im_track, mask_img, tframe, vImuMeas);
// check lost frames
if (! result.second){
n_lost_frames += 1;
Expand Down
2 changes: 2 additions & 0 deletions include/Frame.h
Original file line number Diff line number Diff line change
Expand Up @@ -68,6 +68,8 @@ class Frame
// Constructor for Monocular cameras.
Frame(const cv::Mat &imGray, const double &timeStamp, ORBextractor* extractor,ORBVocabulary* voc, GeometricCamera* pCamera, cv::Mat &distCoef, const float &bf, const float &thDepth, Frame* pPrevF = static_cast<Frame*>(NULL), const IMU::Calib &ImuCalib = IMU::Calib(), cv::Ptr<cv::aruco::Dictionary> aruco_dict=cv::aruco::getPredefinedDictionary(cv::aruco::DICT_4X4_50));

Frame(const cv::Mat &imGray,const cv::Mat& mask, const double &timeStamp, ORBextractor* extractor,ORBVocabulary* voc, GeometricCamera* pCamera, cv::Mat &distCoef, const float &bf, const float &thDepth, Frame* pPrevF = static_cast<Frame*>(NULL), const IMU::Calib &ImuCalib = IMU::Calib(), cv::Ptr<cv::aruco::Dictionary> aruco_dict=cv::aruco::getPredefinedDictionary(cv::aruco::DICT_4X4_50));

// Destructor
// ~Frame();

Expand Down
2 changes: 2 additions & 0 deletions include/System.h
Original file line number Diff line number Diff line change
Expand Up @@ -132,6 +132,8 @@ class System
// Returns the camera pose (empty if tracking fails).
std::pair<Sophus::SE3f, bool> LocalizeMonocular(const cv::Mat &im, const double &timestamp, const vector<IMU::Point>& vImuMeas = vector<IMU::Point>(), string filename="");

std::pair<Sophus::SE3f, bool> LocalizeMonocular(const cv::Mat &im,const cv::Mat &mask, const double &timestamp, const vector<IMU::Point>& vImuMeas= vector<IMU::Point>(), string filename="");

// This stops local mapping thread (map building) and performs only camera tracking.
void ActivateLocalizationMode();
// This resumes local mapping thread and performs SLAM again.
Expand Down
1 change: 1 addition & 0 deletions include/Tracking.h
Original file line number Diff line number Diff line change
Expand Up @@ -75,6 +75,7 @@ class Tracking
Sophus::SE3f GrabImageStereo(const cv::Mat &imRectLeft,const cv::Mat &imRectRight, const double &timestamp, string filename);
Sophus::SE3f GrabImageRGBD(const cv::Mat &imRGB,const cv::Mat &imD, const double &timestamp, string filename);
Sophus::SE3f GrabImageMonocular(const cv::Mat &im, const double &timestamp, string filename);
Sophus::SE3f GrabImageMonocular(const cv::Mat &im, const cv::Mat &mask, const double &timestamp, string filename);

void GrabImuData(const IMU::Point &imuMeasurement);

Expand Down
111 changes: 111 additions & 0 deletions src/Frame.cc
Original file line number Diff line number Diff line change
Expand Up @@ -342,6 +342,117 @@ Frame::Frame(const cv::Mat &imGray, const double &timeStamp, ORBextractor* extra
}


Frame::Frame(const cv::Mat &imGray,const cv::Mat& mask, const double &timeStamp, ORBextractor* extractor,ORBVocabulary* voc, GeometricCamera* pCamera, cv::Mat &distCoef, const float &bf, const float &thDepth, Frame* pPrevF, const IMU::Calib &ImuCalib, cv::Ptr<cv::aruco::Dictionary> aruco_dict)
:mpcpi(NULL),mpORBvocabulary(voc),mpORBextractorLeft(extractor),mpORBextractorRight(static_cast<ORBextractor*>(NULL)),
mTimeStamp(timeStamp), mK(static_cast<Pinhole*>(pCamera)->toK()), mK_(static_cast<Pinhole*>(pCamera)->toK_()), mDistCoef(distCoef.clone()), mbf(bf), mThDepth(thDepth),
mImuCalib(ImuCalib), mpImuPreintegrated(NULL),mpPrevFrame(pPrevF),mpImuPreintegratedFrame(NULL), mpReferenceKF(static_cast<KeyFrame*>(NULL)), mbIsSet(false), mbImuPreintegrated(false), mpCamera(pCamera),
mpCamera2(nullptr), mbHasPose(false), mbHasVelocity(false)
{
// Frame ID
mnId=nNextId++;

// Scale Level Info
mnScaleLevels = mpORBextractorLeft->GetLevels();
mfScaleFactor = mpORBextractorLeft->GetScaleFactor();
mfLogScaleFactor = log(mfScaleFactor);
mvScaleFactors = mpORBextractorLeft->GetScaleFactors();
mvInvScaleFactors = mpORBextractorLeft->GetInverseScaleFactors();
mvLevelSigma2 = mpORBextractorLeft->GetScaleSigmaSquares();
mvInvLevelSigma2 = mpORBextractorLeft->GetInverseScaleSigmaSquares();

// ArUcO detection
cv::Ptr<cv::aruco::DetectorParameters> parameters = cv::aruco::DetectorParameters::create();
cv::aruco::detectMarkers(imGray, aruco_dict, markerCorners, markerIds, parameters, rejectedCandidates);

// ORB extraction
ExtractORB(0,imGray,0,1000);

N = mvKeys.size();
if(mvKeys.empty())
return;

std::vector<cv::KeyPoint> _mvKeys;
cv::Mat _mDescriptors;

for (size_t i(0); i < mvKeys.size(); ++i)
{
if((int)mask.at<uchar>(mvKeys[i].pt.y,mvKeys[i].pt.x)!=255)
{
_mvKeys.push_back(mvKeys[i]);
_mDescriptors.push_back(mDescriptors.row(i));
}

}

mvKeys = _mvKeys;
mDescriptors = _mDescriptors;

N = mvKeys.size();

if(mvKeys.empty())
return;

UndistortKeyPoints();

// Set no stereo information
mvuRight = vector<float>(N,-1);
mvDepth = vector<float>(N,-1);
mnCloseMPs = 0;

mvpMapPoints = vector<MapPoint*>(N,static_cast<MapPoint*>(NULL));

mmProjectPoints.clear();// = map<long unsigned int, cv::Point2f>(N, static_cast<cv::Point2f>(NULL));
mmMatchedInImage.clear();

mvbOutlier = vector<bool>(N,false);

// This is done only for the first Frame (or after a change in the calibration)
if(mbInitialComputations)
{
ComputeImageBounds(imGray);

mfGridElementWidthInv=static_cast<float>(FRAME_GRID_COLS)/static_cast<float>(mnMaxX-mnMinX);
mfGridElementHeightInv=static_cast<float>(FRAME_GRID_ROWS)/static_cast<float>(mnMaxY-mnMinY);

fx = static_cast<Pinhole*>(mpCamera)->toK().at<float>(0,0);
fy = static_cast<Pinhole*>(mpCamera)->toK().at<float>(1,1);
cx = static_cast<Pinhole*>(mpCamera)->toK().at<float>(0,2);
cy = static_cast<Pinhole*>(mpCamera)->toK().at<float>(1,2);
invfx = 1.0f/fx;
invfy = 1.0f/fy;

mbInitialComputations=false;
}


mb = mbf/fx;

//Set no stereo fisheye information
Nleft = -1;
Nright = -1;
mvLeftToRightMatch = vector<int>(0);
mvRightToLeftMatch = vector<int>(0);
mvStereo3Dpoints = vector<Eigen::Vector3f>(0);
monoLeft = -1;
monoRight = -1;

AssignFeaturesToGrid();

if(pPrevF)
{
if(pPrevF->HasVelocity())
{
SetVelocity(pPrevF->GetVelocity());
}
}
else
{
mVw.setZero();
}

mpMutexImu = new std::mutex();
}

void Frame::AssignFeaturesToGrid()
{
// Fill matrix with points
Expand Down
80 changes: 80 additions & 0 deletions src/System.cc
Original file line number Diff line number Diff line change
Expand Up @@ -518,6 +518,86 @@ std::pair<Sophus::SE3f, bool> System::LocalizeMonocular(const cv::Mat &im, const
return std::make_pair(Tcw, has_tracking);
}

std::pair<Sophus::SE3f, bool> System::LocalizeMonocular(const cv::Mat &im,const cv::Mat &mask, const double &timestamp, const vector<IMU::Point>& vImuMeas, string filename)
{

{
unique_lock<mutex> lock(mMutexReset);
if(mbShutDown)
return std::make_pair(Sophus::SE3f(), false);
}

if(mSensor!=MONOCULAR && mSensor!=IMU_MONOCULAR)
{
cerr << "ERROR: you called TrackMonocular but input sensor was not set to Monocular nor Monocular-Inertial." << endl;
exit(-1);
}

cv::Mat imToFeed = im.clone();
if(settings_ && settings_->needToResize()){
cv::Mat resizedIm;
cv::resize(im,resizedIm,settings_->newImSize());
imToFeed = resizedIm;
}

// Check mode change
{
unique_lock<mutex> lock(mMutexMode);
if(mbActivateLocalizationMode)
{
mpLocalMapper->RequestStop();

// Wait until Local Mapping has effectively stopped
while(!mpLocalMapper->isStopped())
{
usleep(1000);
}

mpTracker->InformOnlyTracking(true);
mbActivateLocalizationMode = false;
}
if(mbDeactivateLocalizationMode)
{
mpTracker->InformOnlyTracking(false);
mpLocalMapper->Release();
mbDeactivateLocalizationMode = false;
}
}

// Check reset
{
unique_lock<mutex> lock(mMutexReset);
if(mbReset)
{
mpTracker->Reset();
mbReset = false;
mbResetActiveMap = false;
}
else if(mbResetActiveMap)
{
cout << "SYSTEM-> Reseting active map in monocular case" << endl;
mpTracker->ResetActiveMap();
mbResetActiveMap = false;
}
}

if (mSensor == System::IMU_MONOCULAR)
for(size_t i_imu = 0; i_imu < vImuMeas.size(); i_imu++)
mpTracker->GrabImuData(vImuMeas[i_imu]);

Sophus::SE3f Tcw = mpTracker->GrabImageMonocular(imToFeed,mask,timestamp,filename);
int trackingState = mpTracker->mState;
// has tracking for OK and RECENTLY_LOST state.
bool has_tracking = (trackingState == 2) || (trackingState == 3);
// bool has_tracking = trackingState == 2;

unique_lock<mutex> lock2(mMutexState);
mTrackingState = mpTracker->mState;
mTrackedMapPoints = mpTracker->mCurrentFrame.mvpMapPoints;
mTrackedKeyPointsUn = mpTracker->mCurrentFrame.mvKeysUn;

return std::make_pair(Tcw, has_tracking);
}

void System::ActivateLocalizationMode()
{
Expand Down
50 changes: 50 additions & 0 deletions src/Tracking.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1202,6 +1202,56 @@ Sophus::SE3f Tracking::GrabImageMonocular(const cv::Mat &im, const double &times
return mCurrentFrame.GetPose();
}

Sophus::SE3f Tracking::GrabImageMonocular(const cv::Mat &im,const cv::Mat &mask, const double &timestamp, string filename)
{
mImGray = im;
if(mImGray.channels()==3)
{
if(mbRGB)
cvtColor(mImGray,mImGray,cv::COLOR_RGB2GRAY);
else
cvtColor(mImGray,mImGray,cv::COLOR_BGR2GRAY);
}
else if(mImGray.channels()==4)
{
if(mbRGB)
cvtColor(mImGray,mImGray,cv::COLOR_RGBA2GRAY);
else
cvtColor(mImGray,mImGray,cv::COLOR_BGRA2GRAY);
}

if (mSensor == System::MONOCULAR)
{
if(mState==NOT_INITIALIZED || mState==NO_IMAGES_YET ||(lastID - initID) < mMaxFrames)
mCurrentFrame = Frame(mImGray,timestamp,mpIniORBextractor,mpORBVocabulary,mpCamera,mDistCoef,mbf,mThDepth);
else
mCurrentFrame = Frame(mImGray,timestamp,mpORBextractorLeft,mpORBVocabulary,mpCamera,mDistCoef,mbf,mThDepth);
}
else if(mSensor == System::IMU_MONOCULAR)
{
// std::cout<< "mState: " << mState << std::endl;
if(mState==NOT_INITIALIZED || mState==NO_IMAGES_YET)
{
mCurrentFrame = Frame(mImGray,mask,timestamp,mpIniORBextractor,mpORBVocabulary,mpCamera,mDistCoef,mbf,mThDepth,&mLastFrame,*mpImuCalib, maruco_dict);
}
else{
mCurrentFrame = Frame(mImGray,mask,timestamp,mpORBextractorLeft,mpORBVocabulary,mpCamera,mDistCoef,mbf,mThDepth,&mLastFrame,*mpImuCalib, maruco_dict);
}
}


if (mState==NO_IMAGES_YET)
t0=timestamp;

mCurrentFrame.mNameFile = filename;
mCurrentFrame.mnDataset = mnNumDataset;

lastID = mCurrentFrame.mnId;

Track();

return mCurrentFrame.GetPose();
}

void Tracking::GrabImuData(const IMU::Point &imuMeasurement)
{
Expand Down