diff --git a/Demo.cpp b/Demo.cpp
index 152a01458862e77ce8628dc162676eefefcde86b..61561be797917b2b8dffb8f637f679df8c6e1c26 100644
--- a/Demo.cpp
+++ b/Demo.cpp
@@ -65,6 +65,9 @@ along with BGSLibrary.  If not, see <http://www.gnu.org/licenses/>.
 #include "package_bgs/sjn/SJN_MultiCueBGS.h"
 #include "package_bgs/bl/SigmaDeltaBGS.h"
 
+#include "package_bgs/pl/SuBSENSE.h"
+#include "package_bgs/pl/LOBSTER.h"
+
 int main(int argc, char **argv)
 {
   std::cout << "Using OpenCV " << CV_MAJOR_VERSION << "." << CV_MINOR_VERSION << "." << CV_SUBMINOR_VERSION << std::endl;
@@ -156,6 +159,10 @@ int main(int argc, char **argv)
   /*** BL Package (thanks to Benjamin Laugraud) ***/
   //bgs = new SigmaDeltaBGS;
 
+  /*** PL Package (thanks to Pierre-Luc) ***/
+  //bgs = new SuBSENSEBGS();
+  //bgs = new LOBSTERBGS();
+
   int key = 0;
   while(key != 'q')
   {
diff --git a/package_bgs/pl/BackgroundSubtractorLBSP.cpp b/package_bgs/pl/BackgroundSubtractorLBSP.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..642a8a5bee20f3b53177c479fa73482f4fd4bfd7
--- /dev/null
+++ b/package_bgs/pl/BackgroundSubtractorLBSP.cpp
@@ -0,0 +1,69 @@
+#include "BackgroundSubtractorLBSP.h"
+#include "DistanceUtils.h"
+#include "RandUtils.h"
+#include <iostream>
+#include <opencv2/imgproc/imgproc.hpp>
+#include <opencv2/highgui/highgui.hpp>
+#include <iomanip>
+#include <exception>
+
+#ifndef SIZE_MAX
+# if __WORDSIZE == 64
+#  define SIZE_MAX		(18446744073709551615UL)
+# else
+#  define SIZE_MAX		(4294967295U)
+# endif
+#endif
+
+// local define used to determine the default median blur kernel size
+#define DEFAULT_MEDIAN_BLUR_KERNEL_SIZE (9)
+
+BackgroundSubtractorLBSP::BackgroundSubtractorLBSP(float fRelLBSPThreshold, size_t nLBSPThresholdOffset)
+	:	 m_nImgChannels(0)
+		,m_nImgType(0)
+		,m_nLBSPThresholdOffset(nLBSPThresholdOffset)
+		,m_fRelLBSPThreshold(fRelLBSPThreshold)
+		,m_nTotPxCount(0)
+		,m_nTotRelevantPxCount(0)
+		,m_nFrameIndex(SIZE_MAX)
+		,m_nFramesSinceLastReset(0)
+		,m_nModelResetCooldown(0)
+		,m_aPxIdxLUT(nullptr)
+		,m_aPxInfoLUT(nullptr)
+		,m_nDefaultMedianBlurKernelSize(DEFAULT_MEDIAN_BLUR_KERNEL_SIZE)
+		,m_bInitialized(false)
+		,m_bAutoModelResetEnabled(true)
+		,m_bUsingMovingCamera(false)
+		,nDebugCoordX(0),nDebugCoordY(0) {
+	CV_Assert(m_fRelLBSPThreshold>=0);
+}
+
+BackgroundSubtractorLBSP::~BackgroundSubtractorLBSP() {}
+
+void BackgroundSubtractorLBSP::initialize(const cv::Mat& oInitImg) {
+	this->initialize(oInitImg,cv::Mat());
+}
+
+cv::AlgorithmInfo* BackgroundSubtractorLBSP::info() const {
+	return nullptr;
+}
+
+cv::Mat BackgroundSubtractorLBSP::getROICopy() const {
+	return m_oROI.clone();
+}
+
+void BackgroundSubtractorLBSP::setROI(cv::Mat& oROI) {
+	LBSP::validateROI(oROI);
+	CV_Assert(cv::countNonZero(oROI)>0);
+	if(m_bInitialized) {
+		cv::Mat oLatestBackgroundImage;
+		getBackgroundImage(oLatestBackgroundImage);
+		initialize(oLatestBackgroundImage,oROI);
+	}
+	else
+		m_oROI = oROI.clone();
+}
+
+void BackgroundSubtractorLBSP::setAutomaticModelReset(bool bVal) {
+	m_bAutoModelResetEnabled = bVal;
+}
diff --git a/package_bgs/pl/BackgroundSubtractorLBSP.h b/package_bgs/pl/BackgroundSubtractorLBSP.h
new file mode 100644
index 0000000000000000000000000000000000000000..4e4290d16de78478c72b606b0010e36dbe493e97
--- /dev/null
+++ b/package_bgs/pl/BackgroundSubtractorLBSP.h
@@ -0,0 +1,85 @@
+#pragma once
+
+#include <opencv2/features2d/features2d.hpp>
+#include <opencv2/video/background_segm.hpp>
+#include "LBSP.h"
+
+/*!
+	Local Binary Similarity Pattern (LBSP)-based change detection algorithm (abstract version/base class).
+
+	For more details on the different parameters, see P.-L. St-Charles and G.-A. Bilodeau, "Improving Background
+	Subtraction using Local Binary Similarity Patterns", in WACV 2014, or G.-A. Bilodeau et al, "Change Detection
+	in Feature Space Using Local Binary Similarity Patterns", in CRV 2013.
+
+	This algorithm is currently NOT thread-safe.
+ */
+class BackgroundSubtractorLBSP : public cv::BackgroundSubtractor {
+public:
+	//! full constructor
+	BackgroundSubtractorLBSP(float fRelLBSPThreshold, size_t nLBSPThresholdOffset=0);
+	//! default destructor
+	virtual ~BackgroundSubtractorLBSP();
+	//! (re)initiaization method; needs to be called before starting background subtraction
+	virtual void initialize(const cv::Mat& oInitImg);
+	//! (re)initiaization method; needs to be called before starting background subtraction
+	virtual void initialize(const cv::Mat& oInitImg, const cv::Mat& oROI)=0;
+	//! primary model update function; the learning param is used to override the internal learning speed (ignored when <= 0)
+	virtual void operator()(cv::InputArray image, cv::OutputArray fgmask, double learningRate=0)=0;
+	//! unused, always returns nullptr
+	virtual cv::AlgorithmInfo* info() const;
+	//! returns a copy of the ROI used for descriptor extraction
+	virtual cv::Mat getROICopy() const;
+	//! sets the ROI to be used for descriptor extraction (note: this function will reinit the model and return the usable ROI)
+	virtual void setROI(cv::Mat& oROI);
+	//! turns automatic model reset on or off
+	void setAutomaticModelReset(bool);
+
+protected:
+	struct PxInfoBase {
+		int nImgCoord_Y;
+		int nImgCoord_X;
+		size_t nModelIdx;
+	};
+	//! background model ROI used for LBSP descriptor extraction (specific to the input image size)
+	cv::Mat m_oROI;
+	//! input image size
+	cv::Size m_oImgSize;
+	//! input image channel size
+	size_t m_nImgChannels;
+	//! input image type
+	int m_nImgType;
+	//! LBSP internal threshold offset value, used to reduce texture noise in dark regions
+	const size_t m_nLBSPThresholdOffset;
+	//! LBSP relative internal threshold (kept here since we don't keep an LBSP object)
+	const float m_fRelLBSPThreshold;
+	//! total number of pixels (depends on the input frame size) & total number of relevant pixels
+	size_t m_nTotPxCount, m_nTotRelevantPxCount;
+	//! current frame index, frame count since last model reset & model reset cooldown counters
+	size_t m_nFrameIndex, m_nFramesSinceLastReset, m_nModelResetCooldown;
+	//! pre-allocated internal LBSP threshold values LUT for all possible 8-bit intensities
+	size_t m_anLBSPThreshold_8bitLUT[UCHAR_MAX+1];
+	//! internal pixel index LUT for all relevant analysis regions (based on the provided ROI)
+	size_t* m_aPxIdxLUT;
+	//! internal pixel info LUT for all possible pixel indexes
+	PxInfoBase* m_aPxInfoLUT;
+	//! default kernel size for median blur post-proc filtering
+	const int m_nDefaultMedianBlurKernelSize;
+	//! specifies whether the algorithm is fully initialized or not
+	bool m_bInitialized;
+	//! specifies whether automatic model resets are enabled or not
+	bool m_bAutoModelResetEnabled;
+	//! specifies whether the camera is considered moving or not
+	bool m_bUsingMovingCamera;
+	//! copy of latest pixel intensities (used when refreshing model)
+	cv::Mat m_oLastColorFrame;
+	//! copy of latest descriptors (used when refreshing model)
+	cv::Mat m_oLastDescFrame;
+	//! the foreground mask generated by the method at [t-1]
+	cv::Mat m_oLastFGMask;
+
+public:
+	// ######## DEBUG PURPOSES ONLY ##########
+	int nDebugCoordX, nDebugCoordY;
+	std::string sDebugName;
+};
+
diff --git a/package_bgs/pl/BackgroundSubtractorLOBSTER.cpp b/package_bgs/pl/BackgroundSubtractorLOBSTER.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..78938ad347805703f1c9a975c886e32b0467eaed
--- /dev/null
+++ b/package_bgs/pl/BackgroundSubtractorLOBSTER.cpp
@@ -0,0 +1,326 @@
+#include "BackgroundSubtractorLOBSTER.h"
+#include "DistanceUtils.h"
+#include "RandUtils.h"
+#include <iostream>
+#include <opencv2/imgproc/imgproc.hpp>
+#include <opencv2/highgui/highgui.hpp>
+#include <iomanip>
+
+BackgroundSubtractorLOBSTER::BackgroundSubtractorLOBSTER(	 float fRelLBSPThreshold
+															,size_t nLBSPThresholdOffset
+															,size_t nDescDistThreshold
+															,size_t nColorDistThreshold
+															,size_t nBGSamples
+															,size_t nRequiredBGSamples)
+	:	 BackgroundSubtractorLBSP(fRelLBSPThreshold,nLBSPThresholdOffset)
+		,m_nColorDistThreshold(nColorDistThreshold)
+		,m_nDescDistThreshold(nDescDistThreshold)
+		,m_nBGSamples(nBGSamples)
+		,m_nRequiredBGSamples(nRequiredBGSamples) {
+	CV_Assert(m_nRequiredBGSamples<=m_nBGSamples);
+	m_bAutoModelResetEnabled = false; // @@@@@@ not supported here for now
+}
+
+BackgroundSubtractorLOBSTER::~BackgroundSubtractorLOBSTER() {
+	if(m_aPxIdxLUT)
+		delete[] m_aPxIdxLUT;
+	if(m_aPxInfoLUT)
+	    delete[] m_aPxInfoLUT;
+}
+
+void BackgroundSubtractorLOBSTER::initialize(const cv::Mat& oInitImg, const cv::Mat& oROI) {
+	CV_Assert(!oInitImg.empty() && oInitImg.cols>0 && oInitImg.rows>0);
+	CV_Assert(oInitImg.isContinuous());
+	CV_Assert(oInitImg.type()==CV_8UC1 || oInitImg.type()==CV_8UC3);
+	if(oInitImg.type()==CV_8UC3) {
+		std::vector<cv::Mat> voInitImgChannels;
+		cv::split(oInitImg,voInitImgChannels);
+		if(!cv::countNonZero((voInitImgChannels[0]!=voInitImgChannels[1])|(voInitImgChannels[2]!=voInitImgChannels[1])))
+			std::cout << std::endl << "\tBackgroundSubtractorLOBSTER : Warning, grayscale images should always be passed in CV_8UC1 format for optimal performance." << std::endl;
+	}
+	cv::Mat oNewBGROI;
+	if(oROI.empty() && (m_oROI.empty() || oROI.size()!=oInitImg.size())) {
+		oNewBGROI.create(oInitImg.size(),CV_8UC1);
+		oNewBGROI = cv::Scalar_<uchar>(UCHAR_MAX);
+	}
+	else if(oROI.empty())
+		oNewBGROI = m_oROI;
+	else {
+		CV_Assert(oROI.size()==oInitImg.size() && oROI.type()==CV_8UC1);
+		CV_Assert(cv::countNonZero((oROI<UCHAR_MAX)&(oROI>0))==0);
+		oNewBGROI = oROI.clone();
+	}
+	LBSP::validateROI(oNewBGROI);
+	const size_t nROIPxCount = (size_t)cv::countNonZero(oNewBGROI);
+	CV_Assert(nROIPxCount>0);
+	m_oROI = oNewBGROI;
+	m_oImgSize = oInitImg.size();
+	m_nImgType = oInitImg.type();
+	m_nImgChannels = oInitImg.channels();
+	m_nTotPxCount = m_oImgSize.area();
+	m_nTotRelevantPxCount = nROIPxCount;
+	m_nFrameIndex = 0;
+	m_nFramesSinceLastReset = 0;
+	m_nModelResetCooldown = 0;
+	m_oLastFGMask.create(m_oImgSize,CV_8UC1);
+	m_oLastFGMask = cv::Scalar_<uchar>(0);
+	m_oLastColorFrame.create(m_oImgSize,CV_8UC((int)m_nImgChannels));
+	m_oLastColorFrame = cv::Scalar_<uchar>::all(0);
+	m_oLastDescFrame.create(m_oImgSize,CV_16UC((int)m_nImgChannels));
+	m_oLastDescFrame = cv::Scalar_<ushort>::all(0);
+	m_voBGColorSamples.resize(m_nBGSamples);
+	m_voBGDescSamples.resize(m_nBGSamples);
+	for(size_t s=0; s<m_nBGSamples; ++s) {
+		m_voBGColorSamples[s].create(m_oImgSize,CV_8UC((int)m_nImgChannels));
+		m_voBGColorSamples[s] = cv::Scalar_<uchar>::all(0);
+		m_voBGDescSamples[s].create(m_oImgSize,CV_16UC((int)m_nImgChannels));
+		m_voBGDescSamples[s] = cv::Scalar_<ushort>::all(0);
+	}
+	if(m_aPxIdxLUT)
+		delete[] m_aPxIdxLUT;
+	if(m_aPxInfoLUT)
+	    delete[] m_aPxInfoLUT;
+	m_aPxIdxLUT = new size_t[m_nTotRelevantPxCount];
+	m_aPxInfoLUT = new PxInfoBase[m_nTotPxCount];
+	if(m_nImgChannels==1) {
+		CV_Assert(m_oLastColorFrame.step.p[0]==(size_t)m_oImgSize.width && m_oLastColorFrame.step.p[1]==1);
+		CV_Assert(m_oLastDescFrame.step.p[0]==m_oLastColorFrame.step.p[0]*2 && m_oLastDescFrame.step.p[1]==m_oLastColorFrame.step.p[1]*2);
+		for(size_t t=0; t<=UCHAR_MAX; ++t)
+			m_anLBSPThreshold_8bitLUT[t] = cv::saturate_cast<uchar>((t*m_fRelLBSPThreshold+m_nLBSPThresholdOffset)/2);
+		for(size_t nPxIter=0, nModelIter=0; nPxIter<m_nTotPxCount; ++nPxIter) {
+			if(m_oROI.data[nPxIter]) {
+				m_aPxIdxLUT[nModelIter] = nPxIter;
+				m_aPxInfoLUT[nPxIter].nImgCoord_Y = (int)nPxIter/m_oImgSize.width;
+				m_aPxInfoLUT[nPxIter].nImgCoord_X = (int)nPxIter%m_oImgSize.width;
+				m_aPxInfoLUT[nPxIter].nModelIdx = nModelIter;
+				m_oLastColorFrame.data[nPxIter] = oInitImg.data[nPxIter];
+				const size_t nDescIter = nPxIter*2;
+				LBSP::computeGrayscaleDescriptor(oInitImg,oInitImg.data[nPxIter],m_aPxInfoLUT[nPxIter].nImgCoord_X,m_aPxInfoLUT[nPxIter].nImgCoord_Y,m_anLBSPThreshold_8bitLUT[oInitImg.data[nPxIter]],*((ushort*)(m_oLastDescFrame.data+nDescIter)));
+				++nModelIter;
+			}
+		}
+	}
+	else { //m_nImgChannels==3
+		CV_Assert(m_oLastColorFrame.step.p[0]==(size_t)m_oImgSize.width*3 && m_oLastColorFrame.step.p[1]==3);
+		CV_Assert(m_oLastDescFrame.step.p[0]==m_oLastColorFrame.step.p[0]*2 && m_oLastDescFrame.step.p[1]==m_oLastColorFrame.step.p[1]*2);
+		for(size_t t=0; t<=UCHAR_MAX; ++t)
+			m_anLBSPThreshold_8bitLUT[t] = cv::saturate_cast<uchar>(t*m_fRelLBSPThreshold+m_nLBSPThresholdOffset);
+		for(size_t nPxIter=0, nModelIter=0; nPxIter<m_nTotPxCount; ++nPxIter) {
+			if(m_oROI.data[nPxIter]) {
+				m_aPxIdxLUT[nModelIter] = nPxIter;
+				m_aPxInfoLUT[nPxIter].nImgCoord_Y = (int)nPxIter/m_oImgSize.width;
+				m_aPxInfoLUT[nPxIter].nImgCoord_X = (int)nPxIter%m_oImgSize.width;
+				m_aPxInfoLUT[nPxIter].nModelIdx = nModelIter;
+				const size_t nPxRGBIter = nPxIter*3;
+				const size_t nDescRGBIter = nPxRGBIter*2;
+				for(size_t c=0; c<3; ++c) {
+					m_oLastColorFrame.data[nPxRGBIter+c] = oInitImg.data[nPxRGBIter+c];
+					LBSP::computeSingleRGBDescriptor(oInitImg,oInitImg.data[nPxRGBIter+c],m_aPxInfoLUT[nPxIter].nImgCoord_X,m_aPxInfoLUT[nPxIter].nImgCoord_Y,c,m_anLBSPThreshold_8bitLUT[oInitImg.data[nPxRGBIter+c]],((ushort*)(m_oLastDescFrame.data+nDescRGBIter))[c]);
+				}
+				++nModelIter;
+			}
+		}
+	}
+	m_bInitialized = true;
+	refreshModel(1.0f);
+}
+
+void BackgroundSubtractorLOBSTER::refreshModel(float fSamplesRefreshFrac, bool bForceFGUpdate) {
+	// == refresh
+	CV_Assert(m_bInitialized);
+	CV_Assert(fSamplesRefreshFrac>0.0f && fSamplesRefreshFrac<=1.0f);
+	const size_t nModelsToRefresh = fSamplesRefreshFrac<1.0f?(size_t)(fSamplesRefreshFrac*m_nBGSamples):m_nBGSamples;
+	const size_t nRefreshStartPos = fSamplesRefreshFrac<1.0f?rand()%m_nBGSamples:0;
+	if(m_nImgChannels==1) {
+		for(size_t nModelIter=0; nModelIter<m_nTotRelevantPxCount; ++nModelIter) {
+			const size_t nPxIter = m_aPxIdxLUT[nModelIter];
+			if(bForceFGUpdate || !m_oLastFGMask.data[nPxIter]) {
+				for(size_t nCurrModelIdx=nRefreshStartPos; nCurrModelIdx<nRefreshStartPos+nModelsToRefresh; ++nCurrModelIdx) {
+					int nSampleImgCoord_Y, nSampleImgCoord_X;
+					getRandSamplePosition(nSampleImgCoord_X,nSampleImgCoord_Y,m_aPxInfoLUT[nPxIter].nImgCoord_X,m_aPxInfoLUT[nPxIter].nImgCoord_Y,LBSP::PATCH_SIZE/2,m_oImgSize);
+					const size_t nSamplePxIdx = m_oImgSize.width*nSampleImgCoord_Y + nSampleImgCoord_X;
+					if(bForceFGUpdate || !m_oLastFGMask.data[nSamplePxIdx]) {
+						const size_t nCurrRealModelIdx = nCurrModelIdx%m_nBGSamples;
+						m_voBGColorSamples[nCurrRealModelIdx].data[nPxIter] = m_oLastColorFrame.data[nSamplePxIdx];
+						*((ushort*)(m_voBGDescSamples[nCurrRealModelIdx].data+nPxIter*2)) = *((ushort*)(m_oLastDescFrame.data+nSamplePxIdx*2));
+					}
+				}
+			}
+		}
+	}
+	else { //m_nImgChannels==3
+		for(size_t nModelIter=0; nModelIter<m_nTotRelevantPxCount; ++nModelIter) {
+			const size_t nPxIter = m_aPxIdxLUT[nModelIter];
+			if(bForceFGUpdate || !m_oLastFGMask.data[nPxIter]) {
+				for(size_t nCurrModelIdx=nRefreshStartPos; nCurrModelIdx<nRefreshStartPos+nModelsToRefresh; ++nCurrModelIdx) {
+					int nSampleImgCoord_Y, nSampleImgCoord_X;
+					getRandSamplePosition(nSampleImgCoord_X,nSampleImgCoord_Y,m_aPxInfoLUT[nPxIter].nImgCoord_X,m_aPxInfoLUT[nPxIter].nImgCoord_Y,LBSP::PATCH_SIZE/2,m_oImgSize);
+					const size_t nSamplePxIdx = m_oImgSize.width*nSampleImgCoord_Y + nSampleImgCoord_X;
+					if(bForceFGUpdate || !m_oLastFGMask.data[nSamplePxIdx]) {
+						const size_t nCurrRealModelIdx = nCurrModelIdx%m_nBGSamples;
+						for(size_t c=0; c<3; ++c) {
+							m_voBGColorSamples[nCurrRealModelIdx].data[nPxIter*3+c] = m_oLastColorFrame.data[nSamplePxIdx*3+c];
+							*((ushort*)(m_voBGDescSamples[nCurrRealModelIdx].data+(nPxIter*3+c)*2)) = *((ushort*)(m_oLastDescFrame.data+(nSamplePxIdx*3+c)*2));
+						}
+					}
+				}
+			}
+		}
+	}
+}
+
+void BackgroundSubtractorLOBSTER::operator()(cv::InputArray _image, cv::OutputArray _fgmask, double learningRate) {
+	CV_Assert(m_bInitialized);
+	CV_Assert(learningRate>0);
+	cv::Mat oInputImg = _image.getMat();
+	CV_Assert(oInputImg.type()==m_nImgType && oInputImg.size()==m_oImgSize);
+	CV_Assert(oInputImg.isContinuous());
+	_fgmask.create(m_oImgSize,CV_8UC1);
+	cv::Mat oCurrFGMask = _fgmask.getMat();
+	oCurrFGMask = cv::Scalar_<uchar>(0);
+	const size_t nLearningRate = (size_t)ceil(learningRate);
+	if(m_nImgChannels==1) {
+		for(size_t nModelIter=0; nModelIter<m_nTotRelevantPxCount; ++nModelIter) {
+			const size_t nPxIter = m_aPxIdxLUT[nModelIter];
+			const size_t nDescIter = nPxIter*2;
+			const int nCurrImgCoord_X = m_aPxInfoLUT[nPxIter].nImgCoord_X;
+			const int nCurrImgCoord_Y = m_aPxInfoLUT[nPxIter].nImgCoord_Y;
+			const uchar nCurrColor = oInputImg.data[nPxIter];
+			size_t nGoodSamplesCount=0, nModelIdx=0;
+			ushort nCurrInputDesc;
+			while(nGoodSamplesCount<m_nRequiredBGSamples && nModelIdx<m_nBGSamples) {
+				const uchar nBGColor = m_voBGColorSamples[nModelIdx].data[nPxIter];
+				{
+					const size_t nColorDist = L1dist(nCurrColor,nBGColor);
+					if(nColorDist>m_nColorDistThreshold/2)
+						goto failedcheck1ch;
+					LBSP::computeGrayscaleDescriptor(oInputImg,nBGColor,nCurrImgCoord_X,nCurrImgCoord_Y,m_anLBSPThreshold_8bitLUT[nBGColor],nCurrInputDesc);
+					const size_t nDescDist = hdist(nCurrInputDesc,*((ushort*)(m_voBGDescSamples[nModelIdx].data+nDescIter)));
+					if(nDescDist>m_nDescDistThreshold)
+						goto failedcheck1ch;
+					nGoodSamplesCount++;
+				}
+				failedcheck1ch:
+				nModelIdx++;
+			}
+			if(nGoodSamplesCount<m_nRequiredBGSamples)
+				oCurrFGMask.data[nPxIter] = UCHAR_MAX;
+			else {
+				if((rand()%nLearningRate)==0) {
+					const size_t nSampleModelIdx = rand()%m_nBGSamples;
+					ushort& nRandInputDesc = *((ushort*)(m_voBGDescSamples[nSampleModelIdx].data+nDescIter));
+					LBSP::computeGrayscaleDescriptor(oInputImg,nCurrColor,nCurrImgCoord_X,nCurrImgCoord_Y,m_anLBSPThreshold_8bitLUT[nCurrColor],nRandInputDesc);
+					m_voBGColorSamples[nSampleModelIdx].data[nPxIter] = nCurrColor;
+				}
+				if((rand()%nLearningRate)==0) {
+					int nSampleImgCoord_Y, nSampleImgCoord_X;
+					getRandNeighborPosition_3x3(nSampleImgCoord_X,nSampleImgCoord_Y,nCurrImgCoord_X,nCurrImgCoord_Y,LBSP::PATCH_SIZE/2,m_oImgSize);
+					const size_t nSampleModelIdx = rand()%m_nBGSamples;
+					ushort& nRandInputDesc = m_voBGDescSamples[nSampleModelIdx].at<ushort>(nSampleImgCoord_Y,nSampleImgCoord_X);
+					LBSP::computeGrayscaleDescriptor(oInputImg,nCurrColor,nCurrImgCoord_X,nCurrImgCoord_Y,m_anLBSPThreshold_8bitLUT[nCurrColor],nRandInputDesc);
+					m_voBGColorSamples[nSampleModelIdx].at<uchar>(nSampleImgCoord_Y,nSampleImgCoord_X) = nCurrColor;
+				}
+			}
+		}
+	}
+	else { //m_nImgChannels==3
+		const size_t nCurrDescDistThreshold = m_nDescDistThreshold*3;
+		const size_t nCurrColorDistThreshold = m_nColorDistThreshold*3;
+		const size_t nCurrSCDescDistThreshold = nCurrDescDistThreshold/2;
+		const size_t nCurrSCColorDistThreshold = nCurrColorDistThreshold/2;
+		const size_t desc_row_step = m_voBGDescSamples[0].step.p[0];
+		const size_t img_row_step = m_voBGColorSamples[0].step.p[0];
+		for(size_t nModelIter=0; nModelIter<m_nTotRelevantPxCount; ++nModelIter) {
+			const size_t nPxIter = m_aPxIdxLUT[nModelIter];
+			const int nCurrImgCoord_X = m_aPxInfoLUT[nPxIter].nImgCoord_X;
+			const int nCurrImgCoord_Y = m_aPxInfoLUT[nPxIter].nImgCoord_Y;
+			const size_t nPxIterRGB = nPxIter*3;
+			const size_t nDescIterRGB = nPxIterRGB*2;
+			const uchar* const anCurrColor = oInputImg.data+nPxIterRGB;
+			size_t nGoodSamplesCount=0, nModelIdx=0;
+			ushort anCurrInputDesc[3];
+			while(nGoodSamplesCount<m_nRequiredBGSamples && nModelIdx<m_nBGSamples) {
+				const ushort* const anBGDesc = (ushort*)(m_voBGDescSamples[nModelIdx].data+nDescIterRGB);
+				const uchar* const anBGColor = m_voBGColorSamples[nModelIdx].data+nPxIterRGB;
+				size_t nTotColorDist = 0;
+				size_t nTotDescDist = 0;
+				for(size_t c=0;c<3; ++c) {
+					const size_t nColorDist = L1dist(anCurrColor[c],anBGColor[c]);
+					if(nColorDist>nCurrSCColorDistThreshold)
+						goto failedcheck3ch;
+					LBSP::computeSingleRGBDescriptor(oInputImg,anBGColor[c],nCurrImgCoord_X,nCurrImgCoord_Y,c,m_anLBSPThreshold_8bitLUT[anBGColor[c]],anCurrInputDesc[c]);
+					const size_t nDescDist = hdist(anCurrInputDesc[c],anBGDesc[c]);
+					if(nDescDist>nCurrSCDescDistThreshold)
+						goto failedcheck3ch;
+					nTotColorDist += nColorDist;
+					nTotDescDist += nDescDist;
+				}
+				if(nTotDescDist<=nCurrDescDistThreshold && nTotColorDist<=nCurrColorDistThreshold)
+					nGoodSamplesCount++;
+				failedcheck3ch:
+				nModelIdx++;
+			}
+			if(nGoodSamplesCount<m_nRequiredBGSamples)
+				oCurrFGMask.data[nPxIter] = UCHAR_MAX;
+			else {
+				if((rand()%nLearningRate)==0) {
+					const size_t nSampleModelIdx = rand()%m_nBGSamples;
+					ushort* anRandInputDesc = ((ushort*)(m_voBGDescSamples[nSampleModelIdx].data+nDescIterRGB));
+					const size_t anCurrIntraLBSPThresholds[3] = {m_anLBSPThreshold_8bitLUT[anCurrColor[0]],m_anLBSPThreshold_8bitLUT[anCurrColor[1]],m_anLBSPThreshold_8bitLUT[anCurrColor[2]]};
+					LBSP::computeRGBDescriptor(oInputImg,anCurrColor,nCurrImgCoord_X,nCurrImgCoord_Y,anCurrIntraLBSPThresholds,anRandInputDesc);
+					for(size_t c=0; c<3; ++c)
+						*(m_voBGColorSamples[nSampleModelIdx].data+nPxIterRGB+c) = anCurrColor[c];
+				}
+				if((rand()%nLearningRate)==0) {
+					int nSampleImgCoord_Y, nSampleImgCoord_X;
+					getRandNeighborPosition_3x3(nSampleImgCoord_X,nSampleImgCoord_Y,nCurrImgCoord_X,nCurrImgCoord_Y,LBSP::PATCH_SIZE/2,m_oImgSize);
+					const size_t nSampleModelIdx = rand()%m_nBGSamples;
+					ushort* anRandInputDesc = ((ushort*)(m_voBGDescSamples[nSampleModelIdx].data + desc_row_step*nSampleImgCoord_Y + 6*nSampleImgCoord_X));
+					const size_t anCurrIntraLBSPThresholds[3] = {m_anLBSPThreshold_8bitLUT[anCurrColor[0]],m_anLBSPThreshold_8bitLUT[anCurrColor[1]],m_anLBSPThreshold_8bitLUT[anCurrColor[2]]};
+					LBSP::computeRGBDescriptor(oInputImg,anCurrColor,nCurrImgCoord_X,nCurrImgCoord_Y,anCurrIntraLBSPThresholds,anRandInputDesc);
+					for(size_t c=0; c<3; ++c)
+						*(m_voBGColorSamples[nSampleModelIdx].data + img_row_step*nSampleImgCoord_Y + 3*nSampleImgCoord_X + c) = anCurrColor[c];
+				}
+			}
+		}
+	}
+	cv::medianBlur(oCurrFGMask,m_oLastFGMask,m_nDefaultMedianBlurKernelSize);
+	m_oLastFGMask.copyTo(oCurrFGMask);
+}
+
+void BackgroundSubtractorLOBSTER::getBackgroundImage(cv::OutputArray backgroundImage) const {
+	CV_DbgAssert(m_bInitialized);
+	cv::Mat oAvgBGImg = cv::Mat::zeros(m_oImgSize,CV_32FC((int)m_nImgChannels));
+	for(size_t s=0; s<m_nBGSamples; ++s) {
+		for(int y=0; y<m_oImgSize.height; ++y) {
+			for(int x=0; x<m_oImgSize.width; ++x) {
+				const size_t idx_nimg = m_voBGColorSamples[s].step.p[0]*y + m_voBGColorSamples[s].step.p[1]*x;
+				const size_t idx_flt32 = idx_nimg*4;
+				float* oAvgBgImgPtr = (float*)(oAvgBGImg.data+idx_flt32);
+				const uchar* const oBGImgPtr = m_voBGColorSamples[s].data+idx_nimg;
+				for(size_t c=0; c<m_nImgChannels; ++c)
+					oAvgBgImgPtr[c] += ((float)oBGImgPtr[c])/m_nBGSamples;
+			}
+		}
+	}
+	oAvgBGImg.convertTo(backgroundImage,CV_8U);
+}
+
+void BackgroundSubtractorLOBSTER::getBackgroundDescriptorsImage(cv::OutputArray backgroundDescImage) const {
+	CV_Assert(LBSP::DESC_SIZE==2);
+	CV_Assert(m_bInitialized);
+	cv::Mat oAvgBGDesc = cv::Mat::zeros(m_oImgSize,CV_32FC((int)m_nImgChannels));
+	for(size_t n=0; n<m_voBGDescSamples.size(); ++n) {
+		for(int y=0; y<m_oImgSize.height; ++y) {
+			for(int x=0; x<m_oImgSize.width; ++x) {
+				const size_t idx_ndesc = m_voBGDescSamples[n].step.p[0]*y + m_voBGDescSamples[n].step.p[1]*x;
+				const size_t idx_flt32 = idx_ndesc*2;
+				float* oAvgBgDescPtr = (float*)(oAvgBGDesc.data+idx_flt32);
+				const ushort* const oBGDescPtr = (ushort*)(m_voBGDescSamples[n].data+idx_ndesc);
+				for(size_t c=0; c<m_nImgChannels; ++c)
+					oAvgBgDescPtr[c] += ((float)oBGDescPtr[c])/m_voBGDescSamples.size();
+			}
+		}
+	}
+	oAvgBGDesc.convertTo(backgroundDescImage,CV_16U);
+}
diff --git a/package_bgs/pl/BackgroundSubtractorLOBSTER.h b/package_bgs/pl/BackgroundSubtractorLOBSTER.h
new file mode 100644
index 0000000000000000000000000000000000000000..bedc55d1cedccba3a2993f40de48ac056b67a8d3
--- /dev/null
+++ b/package_bgs/pl/BackgroundSubtractorLOBSTER.h
@@ -0,0 +1,67 @@
+#pragma once
+
+#include "BackgroundSubtractorLBSP.h"
+
+//! defines the default value for BackgroundSubtractorLBSP::m_fRelLBSPThreshold
+#define BGSLOBSTER_DEFAULT_LBSP_REL_SIMILARITY_THRESHOLD (0.365f)
+//! defines the default value for BackgroundSubtractorLBSP::m_nLBSPThresholdOffset
+#define BGSLOBSTER_DEFAULT_LBSP_OFFSET_SIMILARITY_THRESHOLD (0)
+//! defines the default value for BackgroundSubtractorLOBSTER::m_nDescDistThreshold
+#define BGSLOBSTER_DEFAULT_DESC_DIST_THRESHOLD (4)
+//! defines the default value for BackgroundSubtractorLOBSTER::m_nColorDistThreshold
+#define BGSLOBSTER_DEFAULT_COLOR_DIST_THRESHOLD (30)
+//! defines the default value for BackgroundSubtractorLOBSTER::m_nBGSamples
+#define BGSLOBSTER_DEFAULT_NB_BG_SAMPLES (35)
+//! defines the default value for BackgroundSubtractorLOBSTER::m_nRequiredBGSamples
+#define BGSLOBSTER_DEFAULT_REQUIRED_NB_BG_SAMPLES (2)
+//! defines the default value for the learning rate passed to BackgroundSubtractorLOBSTER::operator()
+#define BGSLOBSTER_DEFAULT_LEARNING_RATE (16)
+
+/*!
+	LOcal Binary Similarity segmenTER (LOBSTER) change detection algorithm.
+
+	Note: both grayscale and RGB/BGR images may be used with this extractor (parameters are adjusted automatically).
+	For optimal grayscale results, use CV_8UC1 frames instead of CV_8UC3.
+
+	For more details on the different parameters or on the algorithm itself, see P.-L. St-Charles and
+	G.-A. Bilodeau, "Improving Background Subtraction using Local Binary Similarity Patterns", in WACV 2014.
+
+	This algorithm is currently NOT thread-safe.
+ */
+class BackgroundSubtractorLOBSTER : public BackgroundSubtractorLBSP {
+public:
+	//! full constructor
+	BackgroundSubtractorLOBSTER(float fRelLBSPThreshold=BGSLOBSTER_DEFAULT_LBSP_REL_SIMILARITY_THRESHOLD,
+								size_t nLBSPThresholdOffset=BGSLOBSTER_DEFAULT_LBSP_OFFSET_SIMILARITY_THRESHOLD,
+								size_t nDescDistThreshold=BGSLOBSTER_DEFAULT_DESC_DIST_THRESHOLD,
+								size_t nColorDistThreshold=BGSLOBSTER_DEFAULT_COLOR_DIST_THRESHOLD,
+								size_t nBGSamples=BGSLOBSTER_DEFAULT_NB_BG_SAMPLES,
+								size_t nRequiredBGSamples=BGSLOBSTER_DEFAULT_REQUIRED_NB_BG_SAMPLES);
+	//! default destructor
+	virtual ~BackgroundSubtractorLOBSTER();
+	//! (re)initiaization method; needs to be called before starting background subtraction
+	virtual void initialize(const cv::Mat& oInitImg, const cv::Mat& oROI);
+	//! refreshes all samples based on the last analyzed frame
+	virtual void refreshModel(float fSamplesRefreshFrac, bool bForceFGUpdate=false);
+	//! primary model update function; the learning param is reinterpreted as an integer and should be > 0 (smaller values == faster adaptation)
+	virtual void operator()(cv::InputArray image, cv::OutputArray fgmask, double learningRate=BGSLOBSTER_DEFAULT_LEARNING_RATE);
+	//! returns a copy of the latest reconstructed background image
+	void getBackgroundImage(cv::OutputArray backgroundImage) const;
+	//! returns a copy of the latest reconstructed background descriptors image
+	virtual void getBackgroundDescriptorsImage(cv::OutputArray backgroundDescImage) const;
+
+protected:
+	//! absolute color distance threshold
+	const size_t m_nColorDistThreshold;
+	//! absolute descriptor distance threshold
+	const size_t m_nDescDistThreshold;
+	//! number of different samples per pixel/block to be taken from input frames to build the background model
+	const size_t m_nBGSamples;
+	//! number of similar samples needed to consider the current pixel/block as 'background'
+	const size_t m_nRequiredBGSamples;
+	//! background model pixel intensity samples
+	std::vector<cv::Mat> m_voBGColorSamples;
+	//! background model descriptors samples
+	std::vector<cv::Mat> m_voBGDescSamples;
+};
+
diff --git a/package_bgs/pl/BackgroundSubtractorSuBSENSE.cpp b/package_bgs/pl/BackgroundSubtractorSuBSENSE.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..c7d5daca785550f6290f59e19e3427892908520c
--- /dev/null
+++ b/package_bgs/pl/BackgroundSubtractorSuBSENSE.cpp
@@ -0,0 +1,737 @@
+#include "BackgroundSubtractorSuBSENSE.h"
+#include "DistanceUtils.h"
+#include "RandUtils.h"
+#include <iostream>
+#include <opencv2/imgproc/imgproc.hpp>
+#include <opencv2/highgui/highgui.hpp>
+#include <iomanip>
+
+/*
+ *
+ * Intrinsic parameters for our method are defined here; tuning these for better
+ * performance should not be required in most cases -- although improvements in
+ * very specific scenarios are always possible.
+ *
+ */
+//! defines the threshold value(s) used to detect long-term ghosting and trigger the fast edge-based absorption heuristic
+#define GHOSTDET_D_MAX (0.010f) // defines 'negligible' change here
+#define GHOSTDET_S_MIN (0.995f) // defines the required minimum local foreground saturation value
+//! parameter used to scale dynamic distance threshold adjustments ('R(x)')
+#define FEEDBACK_R_VAR (0.01f)
+//! parameters used to adjust the variation step size of 'v(x)'
+#define FEEDBACK_V_INCR  (1.000f)
+#define FEEDBACK_V_DECR  (0.100f)
+//! parameters used to scale dynamic learning rate adjustments  ('T(x)')
+#define FEEDBACK_T_DECR  (0.2500f)
+#define FEEDBACK_T_INCR  (0.5000f)
+#define FEEDBACK_T_LOWER (2.0000f)
+#define FEEDBACK_T_UPPER (256.00f)
+//! parameters used to define 'unstable' regions, based on segm noise/bg dynamics and local dist threshold values
+#define UNSTABLE_REG_RATIO_MIN (0.100f)
+#define UNSTABLE_REG_RDIST_MIN (3.000f)
+//! parameters used to scale the relative LBSP intensity threshold used for internal comparisons
+#define LBSPDESC_NONZERO_RATIO_MIN (0.100f)
+#define LBSPDESC_NONZERO_RATIO_MAX (0.500f)
+//! parameters used to define model reset/learning rate boosts in our frame-level component
+#define FRAMELEVEL_MIN_COLOR_DIFF_THRESHOLD  (m_nMinColorDistThreshold/2)
+#define FRAMELEVEL_ANALYSIS_DOWNSAMPLE_RATIO (8)
+
+// local define used to display debug information
+#define DISPLAY_SUBSENSE_DEBUG_INFO 0
+// local define used to specify the default frame size (320x240 = QVGA)
+#define DEFAULT_FRAME_SIZE cv::Size(320,240)
+// local define used to specify the color dist threshold offset used for unstable regions
+#define STAB_COLOR_DIST_OFFSET (m_nMinColorDistThreshold/5)
+// local define used to specify the desc dist threshold offset used for unstable regions
+#define UNSTAB_DESC_DIST_OFFSET (m_nDescDistThresholdOffset)
+
+static const size_t s_nColorMaxDataRange_1ch = UCHAR_MAX;
+static const size_t s_nDescMaxDataRange_1ch = LBSP::DESC_SIZE*8;
+static const size_t s_nColorMaxDataRange_3ch = s_nColorMaxDataRange_1ch*3;
+static const size_t s_nDescMaxDataRange_3ch = s_nDescMaxDataRange_1ch*3;
+
+BackgroundSubtractorSuBSENSE::BackgroundSubtractorSuBSENSE(	 float fRelLBSPThreshold
+															,size_t nDescDistThresholdOffset
+															,size_t nMinColorDistThreshold
+															,size_t nBGSamples
+															,size_t nRequiredBGSamples
+															,size_t nSamplesForMovingAvgs)
+	:	 BackgroundSubtractorLBSP(fRelLBSPThreshold)
+		,m_nMinColorDistThreshold(nMinColorDistThreshold)
+		,m_nDescDistThresholdOffset(nDescDistThresholdOffset)
+		,m_nBGSamples(nBGSamples)
+		,m_nRequiredBGSamples(nRequiredBGSamples)
+		,m_nSamplesForMovingAvgs(nSamplesForMovingAvgs)
+		,m_fLastNonZeroDescRatio(0.0f)
+		,m_bLearningRateScalingEnabled(true)
+		,m_fCurrLearningRateLowerCap(FEEDBACK_T_LOWER)
+		,m_fCurrLearningRateUpperCap(FEEDBACK_T_UPPER)
+		,m_nMedianBlurKernelSize(m_nDefaultMedianBlurKernelSize)
+		,m_bUse3x3Spread(true) {
+	CV_Assert(m_nBGSamples>0 && m_nRequiredBGSamples<=m_nBGSamples);
+	CV_Assert(m_nMinColorDistThreshold>=STAB_COLOR_DIST_OFFSET);
+}
+
+BackgroundSubtractorSuBSENSE::~BackgroundSubtractorSuBSENSE() {
+	if(m_aPxIdxLUT)
+		delete[] m_aPxIdxLUT;
+	if(m_aPxInfoLUT)
+	    delete[] m_aPxInfoLUT;
+}
+
+void BackgroundSubtractorSuBSENSE::initialize(const cv::Mat& oInitImg, const cv::Mat& oROI) {
+	// == init
+	CV_Assert(!oInitImg.empty() && oInitImg.cols>0 && oInitImg.rows>0);
+	CV_Assert(oInitImg.isContinuous());
+	CV_Assert(oInitImg.type()==CV_8UC3 || oInitImg.type()==CV_8UC1);
+	if(oInitImg.type()==CV_8UC3) {
+		std::vector<cv::Mat> voInitImgChannels;
+		cv::split(oInitImg,voInitImgChannels);
+		if(!cv::countNonZero((voInitImgChannels[0]!=voInitImgChannels[1])|(voInitImgChannels[2]!=voInitImgChannels[1])))
+			std::cout << std::endl << "\tBackgroundSubtractorSuBSENSE : Warning, grayscale images should always be passed in CV_8UC1 format for optimal performance." << std::endl;
+	}
+	cv::Mat oNewBGROI;
+	if(oROI.empty() && (m_oROI.empty() || oROI.size()!=oInitImg.size())) {
+		oNewBGROI.create(oInitImg.size(),CV_8UC1);
+		oNewBGROI = cv::Scalar_<uchar>(UCHAR_MAX);
+	}
+	else if(oROI.empty())
+		oNewBGROI = m_oROI;
+	else {
+		CV_Assert(oROI.size()==oInitImg.size() && oROI.type()==CV_8UC1);
+		CV_Assert(cv::countNonZero((oROI<UCHAR_MAX)&(oROI>0))==0);
+		oNewBGROI = oROI.clone();
+		cv::Mat oTempROI;
+		cv::dilate(oNewBGROI,oTempROI,cv::Mat(),cv::Point(-1,-1),LBSP::PATCH_SIZE/2);
+		cv::bitwise_or(oNewBGROI,oTempROI/2,oNewBGROI);
+	}
+	const size_t nOrigROIPxCount = (size_t)cv::countNonZero(oNewBGROI);
+	CV_Assert(nOrigROIPxCount>0);
+	LBSP::validateROI(oNewBGROI);
+	const size_t nFinalROIPxCount = (size_t)cv::countNonZero(oNewBGROI);
+	CV_Assert(nFinalROIPxCount>0);
+	m_oROI = oNewBGROI;
+	m_oImgSize = oInitImg.size();
+	m_nImgType = oInitImg.type();
+	m_nImgChannels = oInitImg.channels();
+	m_nTotPxCount = m_oImgSize.area();
+	m_nTotRelevantPxCount = nFinalROIPxCount;
+	m_nFrameIndex = 0;
+	m_nFramesSinceLastReset = 0;
+	m_nModelResetCooldown = 0;
+	m_fLastNonZeroDescRatio = 0.0f;
+	const int nTotImgPixels = m_oImgSize.height*m_oImgSize.width;
+	if(nOrigROIPxCount>=m_nTotPxCount/2 && (int)m_nTotPxCount>=DEFAULT_FRAME_SIZE.area()) {
+		m_bLearningRateScalingEnabled = true;
+		m_bAutoModelResetEnabled = true;
+		m_bUse3x3Spread = !(nTotImgPixels>DEFAULT_FRAME_SIZE.area()*2);
+		const int nRawMedianBlurKernelSize = std::min((int)floor((float)nTotImgPixels/DEFAULT_FRAME_SIZE.area()+0.5f)+m_nDefaultMedianBlurKernelSize,14);
+		m_nMedianBlurKernelSize = (nRawMedianBlurKernelSize%2)?nRawMedianBlurKernelSize:nRawMedianBlurKernelSize-1;
+		m_fCurrLearningRateLowerCap = FEEDBACK_T_LOWER;
+		m_fCurrLearningRateUpperCap = FEEDBACK_T_UPPER;
+	}
+	else {
+		m_bLearningRateScalingEnabled = false;
+		m_bAutoModelResetEnabled = false;
+		m_bUse3x3Spread = true;
+		m_nMedianBlurKernelSize = m_nDefaultMedianBlurKernelSize;
+		m_fCurrLearningRateLowerCap = FEEDBACK_T_LOWER*2;
+		m_fCurrLearningRateUpperCap = FEEDBACK_T_UPPER*2;
+	}
+	m_oUpdateRateFrame.create(m_oImgSize,CV_32FC1);
+	m_oUpdateRateFrame = cv::Scalar(m_fCurrLearningRateLowerCap);
+	m_oDistThresholdFrame.create(m_oImgSize,CV_32FC1);
+	m_oDistThresholdFrame = cv::Scalar(1.0f);
+	m_oVariationModulatorFrame.create(m_oImgSize,CV_32FC1);
+	m_oVariationModulatorFrame = cv::Scalar(10.0f); // should always be >= FEEDBACK_V_DECR
+	m_oMeanLastDistFrame.create(m_oImgSize,CV_32FC1);
+	m_oMeanLastDistFrame = cv::Scalar(0.0f);
+	m_oMeanMinDistFrame_LT.create(m_oImgSize,CV_32FC1);
+	m_oMeanMinDistFrame_LT = cv::Scalar(0.0f);
+	m_oMeanMinDistFrame_ST.create(m_oImgSize,CV_32FC1);
+	m_oMeanMinDistFrame_ST = cv::Scalar(0.0f);
+	m_oDownSampledFrameSize = cv::Size(m_oImgSize.width/FRAMELEVEL_ANALYSIS_DOWNSAMPLE_RATIO,m_oImgSize.height/FRAMELEVEL_ANALYSIS_DOWNSAMPLE_RATIO);
+	m_oMeanDownSampledLastDistFrame_LT.create(m_oDownSampledFrameSize,CV_32FC((int)m_nImgChannels));
+	m_oMeanDownSampledLastDistFrame_LT = cv::Scalar(0.0f);
+	m_oMeanDownSampledLastDistFrame_ST.create(m_oDownSampledFrameSize,CV_32FC((int)m_nImgChannels));
+	m_oMeanDownSampledLastDistFrame_ST = cv::Scalar(0.0f);
+	m_oMeanRawSegmResFrame_LT.create(m_oImgSize,CV_32FC1);
+	m_oMeanRawSegmResFrame_LT = cv::Scalar(0.0f);
+	m_oMeanRawSegmResFrame_ST.create(m_oImgSize,CV_32FC1);
+	m_oMeanRawSegmResFrame_ST = cv::Scalar(0.0f);
+	m_oMeanFinalSegmResFrame_LT.create(m_oImgSize,CV_32FC1);
+	m_oMeanFinalSegmResFrame_LT = cv::Scalar(0.0f);
+	m_oMeanFinalSegmResFrame_ST.create(m_oImgSize,CV_32FC1);
+	m_oMeanFinalSegmResFrame_ST = cv::Scalar(0.0f);
+	m_oUnstableRegionMask.create(m_oImgSize,CV_8UC1);
+	m_oUnstableRegionMask = cv::Scalar_<uchar>(0);
+	m_oBlinksFrame.create(m_oImgSize,CV_8UC1);
+	m_oBlinksFrame = cv::Scalar_<uchar>(0);
+	m_oDownSampledFrame_MotionAnalysis.create(m_oDownSampledFrameSize,CV_8UC((int)m_nImgChannels));
+	m_oDownSampledFrame_MotionAnalysis = cv::Scalar_<uchar>::all(0);
+	m_oLastColorFrame.create(m_oImgSize,CV_8UC((int)m_nImgChannels));
+	m_oLastColorFrame = cv::Scalar_<uchar>::all(0);
+	m_oLastDescFrame.create(m_oImgSize,CV_16UC((int)m_nImgChannels));
+	m_oLastDescFrame = cv::Scalar_<ushort>::all(0);
+	m_oLastRawFGMask.create(m_oImgSize,CV_8UC1);
+	m_oLastRawFGMask = cv::Scalar_<uchar>(0);
+	m_oLastFGMask.create(m_oImgSize,CV_8UC1);
+	m_oLastFGMask = cv::Scalar_<uchar>(0);
+	m_oLastFGMask_dilated.create(m_oImgSize,CV_8UC1);
+	m_oLastFGMask_dilated = cv::Scalar_<uchar>(0);
+	m_oLastFGMask_dilated_inverted.create(m_oImgSize,CV_8UC1);
+	m_oLastFGMask_dilated_inverted = cv::Scalar_<uchar>(0);
+	m_oFGMask_FloodedHoles.create(m_oImgSize,CV_8UC1);
+	m_oFGMask_FloodedHoles = cv::Scalar_<uchar>(0);
+	m_oFGMask_PreFlood.create(m_oImgSize,CV_8UC1);
+	m_oFGMask_PreFlood = cv::Scalar_<uchar>(0);
+	m_oCurrRawFGBlinkMask.create(m_oImgSize,CV_8UC1);
+	m_oCurrRawFGBlinkMask = cv::Scalar_<uchar>(0);
+	m_oLastRawFGBlinkMask.create(m_oImgSize,CV_8UC1);
+	m_oLastRawFGBlinkMask = cv::Scalar_<uchar>(0);
+	m_voBGColorSamples.resize(m_nBGSamples);
+	m_voBGDescSamples.resize(m_nBGSamples);
+	for(size_t s=0; s<m_nBGSamples; ++s) {
+		m_voBGColorSamples[s].create(m_oImgSize,CV_8UC((int)m_nImgChannels));
+		m_voBGColorSamples[s] = cv::Scalar_<uchar>::all(0);
+		m_voBGDescSamples[s].create(m_oImgSize,CV_16UC((int)m_nImgChannels));
+		m_voBGDescSamples[s] = cv::Scalar_<ushort>::all(0);
+	}
+	if(m_aPxIdxLUT)
+		delete[] m_aPxIdxLUT;
+	if(m_aPxInfoLUT)
+	    delete[] m_aPxInfoLUT;
+	m_aPxIdxLUT = new size_t[m_nTotRelevantPxCount];
+	m_aPxInfoLUT = new PxInfoBase[m_nTotPxCount];
+	if(m_nImgChannels==1) {
+		CV_Assert(m_oLastColorFrame.step.p[0]==(size_t)m_oImgSize.width && m_oLastColorFrame.step.p[1]==1);
+		CV_Assert(m_oLastDescFrame.step.p[0]==m_oLastColorFrame.step.p[0]*2 && m_oLastDescFrame.step.p[1]==m_oLastColorFrame.step.p[1]*2);
+		for(size_t t=0; t<=UCHAR_MAX; ++t)
+			m_anLBSPThreshold_8bitLUT[t] = cv::saturate_cast<uchar>((m_nLBSPThresholdOffset+t*m_fRelLBSPThreshold)/3);
+		for(size_t nPxIter=0, nModelIter=0; nPxIter<m_nTotPxCount; ++nPxIter) {
+			if(m_oROI.data[nPxIter]) {
+				m_aPxIdxLUT[nModelIter] = nPxIter;
+				m_aPxInfoLUT[nPxIter].nImgCoord_Y = (int)nPxIter/m_oImgSize.width;
+				m_aPxInfoLUT[nPxIter].nImgCoord_X = (int)nPxIter%m_oImgSize.width;
+				m_aPxInfoLUT[nPxIter].nModelIdx = nModelIter;
+				m_oLastColorFrame.data[nPxIter] = oInitImg.data[nPxIter];
+				const size_t nDescIter = nPxIter*2;
+				LBSP::computeGrayscaleDescriptor(oInitImg,oInitImg.data[nPxIter],m_aPxInfoLUT[nPxIter].nImgCoord_X,m_aPxInfoLUT[nPxIter].nImgCoord_Y,m_anLBSPThreshold_8bitLUT[oInitImg.data[nPxIter]],*((ushort*)(m_oLastDescFrame.data+nDescIter)));
+				++nModelIter;
+			}
+		}
+	}
+	else { //m_nImgChannels==3
+		CV_Assert(m_oLastColorFrame.step.p[0]==(size_t)m_oImgSize.width*3 && m_oLastColorFrame.step.p[1]==3);
+		CV_Assert(m_oLastDescFrame.step.p[0]==m_oLastColorFrame.step.p[0]*2 && m_oLastDescFrame.step.p[1]==m_oLastColorFrame.step.p[1]*2);
+		for(size_t t=0; t<=UCHAR_MAX; ++t)
+			m_anLBSPThreshold_8bitLUT[t] = cv::saturate_cast<uchar>(m_nLBSPThresholdOffset+t*m_fRelLBSPThreshold);
+		for(size_t nPxIter=0, nModelIter=0; nPxIter<m_nTotPxCount; ++nPxIter) {
+			if(m_oROI.data[nPxIter]) {
+				m_aPxIdxLUT[nModelIter] = nPxIter;
+				m_aPxInfoLUT[nPxIter].nImgCoord_Y = (int)nPxIter/m_oImgSize.width;
+				m_aPxInfoLUT[nPxIter].nImgCoord_X = (int)nPxIter%m_oImgSize.width;
+				m_aPxInfoLUT[nPxIter].nModelIdx = nModelIter;
+				const size_t nPxRGBIter = nPxIter*3;
+				const size_t nDescRGBIter = nPxRGBIter*2;
+				for(size_t c=0; c<3; ++c) {
+					m_oLastColorFrame.data[nPxRGBIter+c] = oInitImg.data[nPxRGBIter+c];
+					LBSP::computeSingleRGBDescriptor(oInitImg,oInitImg.data[nPxRGBIter+c],m_aPxInfoLUT[nPxIter].nImgCoord_X,m_aPxInfoLUT[nPxIter].nImgCoord_Y,c,m_anLBSPThreshold_8bitLUT[oInitImg.data[nPxRGBIter+c]],((ushort*)(m_oLastDescFrame.data+nDescRGBIter))[c]);
+				}
+				++nModelIter;
+			}
+		}
+	}
+	m_bInitialized = true;
+	refreshModel(1.0f);
+}
+
+void BackgroundSubtractorSuBSENSE::refreshModel(float fSamplesRefreshFrac, bool bForceFGUpdate) {
+	// == refresh
+	CV_Assert(m_bInitialized);
+	CV_Assert(fSamplesRefreshFrac>0.0f && fSamplesRefreshFrac<=1.0f);
+	const size_t nModelsToRefresh = fSamplesRefreshFrac<1.0f?(size_t)(fSamplesRefreshFrac*m_nBGSamples):m_nBGSamples;
+	const size_t nRefreshStartPos = fSamplesRefreshFrac<1.0f?rand()%m_nBGSamples:0;
+	if(m_nImgChannels==1) {
+		for(size_t nModelIter=0; nModelIter<m_nTotRelevantPxCount; ++nModelIter) {
+			const size_t nPxIter = m_aPxIdxLUT[nModelIter];
+			if(bForceFGUpdate || !m_oLastFGMask.data[nPxIter]) {
+				for(size_t nCurrModelIdx=nRefreshStartPos; nCurrModelIdx<nRefreshStartPos+nModelsToRefresh; ++nCurrModelIdx) {
+					int nSampleImgCoord_Y, nSampleImgCoord_X;
+					getRandSamplePosition(nSampleImgCoord_X,nSampleImgCoord_Y,m_aPxInfoLUT[nPxIter].nImgCoord_X,m_aPxInfoLUT[nPxIter].nImgCoord_Y,LBSP::PATCH_SIZE/2,m_oImgSize);
+					const size_t nSamplePxIdx = m_oImgSize.width*nSampleImgCoord_Y + nSampleImgCoord_X;
+					if(bForceFGUpdate || !m_oLastFGMask.data[nSamplePxIdx]) {
+						const size_t nCurrRealModelIdx = nCurrModelIdx%m_nBGSamples;
+						m_voBGColorSamples[nCurrRealModelIdx].data[nPxIter] = m_oLastColorFrame.data[nSamplePxIdx];
+						*((ushort*)(m_voBGDescSamples[nCurrRealModelIdx].data+nPxIter*2)) = *((ushort*)(m_oLastDescFrame.data+nSamplePxIdx*2));
+					}
+				}
+			}
+		}
+	}
+	else { //m_nImgChannels==3
+		for(size_t nModelIter=0; nModelIter<m_nTotRelevantPxCount; ++nModelIter) {
+			const size_t nPxIter = m_aPxIdxLUT[nModelIter];
+			if(bForceFGUpdate || !m_oLastFGMask.data[nPxIter]) {
+				for(size_t nCurrModelIdx=nRefreshStartPos; nCurrModelIdx<nRefreshStartPos+nModelsToRefresh; ++nCurrModelIdx) {
+					int nSampleImgCoord_Y, nSampleImgCoord_X;
+					getRandSamplePosition(nSampleImgCoord_X,nSampleImgCoord_Y,m_aPxInfoLUT[nPxIter].nImgCoord_X,m_aPxInfoLUT[nPxIter].nImgCoord_Y,LBSP::PATCH_SIZE/2,m_oImgSize);
+					const size_t nSamplePxIdx = m_oImgSize.width*nSampleImgCoord_Y + nSampleImgCoord_X;
+					if(bForceFGUpdate || !m_oLastFGMask.data[nSamplePxIdx]) {
+						const size_t nCurrRealModelIdx = nCurrModelIdx%m_nBGSamples;
+						for(size_t c=0; c<3; ++c) {
+							m_voBGColorSamples[nCurrRealModelIdx].data[nPxIter*3+c] = m_oLastColorFrame.data[nSamplePxIdx*3+c];
+							*((ushort*)(m_voBGDescSamples[nCurrRealModelIdx].data+(nPxIter*3+c)*2)) = *((ushort*)(m_oLastDescFrame.data+(nSamplePxIdx*3+c)*2));
+						}
+					}
+				}
+			}
+		}
+	}
+}
+
+void BackgroundSubtractorSuBSENSE::operator()(cv::InputArray _image, cv::OutputArray _fgmask, double learningRateOverride) {
+	// == process
+	CV_Assert(m_bInitialized);
+	cv::Mat oInputImg = _image.getMat();
+	CV_Assert(oInputImg.type()==m_nImgType && oInputImg.size()==m_oImgSize);
+	CV_Assert(oInputImg.isContinuous());
+	_fgmask.create(m_oImgSize,CV_8UC1);
+	cv::Mat oCurrFGMask = _fgmask.getMat();
+	memset(oCurrFGMask.data,0,oCurrFGMask.cols*oCurrFGMask.rows);
+	size_t nNonZeroDescCount = 0;
+	const float fRollAvgFactor_LT = 1.0f/std::min(++m_nFrameIndex,m_nSamplesForMovingAvgs);
+	const float fRollAvgFactor_ST = 1.0f/std::min(m_nFrameIndex,m_nSamplesForMovingAvgs/4);
+	if(m_nImgChannels==1) {
+		for(size_t nModelIter=0; nModelIter<m_nTotRelevantPxCount; ++nModelIter) {
+			const size_t nPxIter = m_aPxIdxLUT[nModelIter];
+			const size_t nDescIter = nPxIter*2;
+			const size_t nFloatIter = nPxIter*4;
+			const int nCurrImgCoord_X = m_aPxInfoLUT[nPxIter].nImgCoord_X;
+			const int nCurrImgCoord_Y = m_aPxInfoLUT[nPxIter].nImgCoord_Y;
+			const uchar nCurrColor = oInputImg.data[nPxIter];
+			size_t nMinDescDist = s_nDescMaxDataRange_1ch;
+			size_t nMinSumDist = s_nColorMaxDataRange_1ch;
+			float* pfCurrDistThresholdFactor = (float*)(m_oDistThresholdFrame.data+nFloatIter);
+			float* pfCurrVariationFactor = (float*)(m_oVariationModulatorFrame.data+nFloatIter);
+			float* pfCurrLearningRate = ((float*)(m_oUpdateRateFrame.data+nFloatIter));
+			float* pfCurrMeanLastDist = ((float*)(m_oMeanLastDistFrame.data+nFloatIter));
+			float* pfCurrMeanMinDist_LT = ((float*)(m_oMeanMinDistFrame_LT.data+nFloatIter));
+			float* pfCurrMeanMinDist_ST = ((float*)(m_oMeanMinDistFrame_ST.data+nFloatIter));
+			float* pfCurrMeanRawSegmRes_LT = ((float*)(m_oMeanRawSegmResFrame_LT.data+nFloatIter));
+			float* pfCurrMeanRawSegmRes_ST = ((float*)(m_oMeanRawSegmResFrame_ST.data+nFloatIter));
+			float* pfCurrMeanFinalSegmRes_LT = ((float*)(m_oMeanFinalSegmResFrame_LT.data+nFloatIter));
+			float* pfCurrMeanFinalSegmRes_ST = ((float*)(m_oMeanFinalSegmResFrame_ST.data+nFloatIter));
+			ushort& nLastIntraDesc = *((ushort*)(m_oLastDescFrame.data+nDescIter));
+			uchar& nLastColor = m_oLastColorFrame.data[nPxIter];
+			const size_t nCurrColorDistThreshold = (size_t)(((*pfCurrDistThresholdFactor)*m_nMinColorDistThreshold)-((!m_oUnstableRegionMask.data[nPxIter])*STAB_COLOR_DIST_OFFSET))/2;
+			const size_t nCurrDescDistThreshold = ((size_t)1<<((size_t)floor(*pfCurrDistThresholdFactor+0.5f)))+m_nDescDistThresholdOffset+(m_oUnstableRegionMask.data[nPxIter]*UNSTAB_DESC_DIST_OFFSET);
+			ushort nCurrInterDesc, nCurrIntraDesc;
+			LBSP::computeGrayscaleDescriptor(oInputImg,nCurrColor,nCurrImgCoord_X,nCurrImgCoord_Y,m_anLBSPThreshold_8bitLUT[nCurrColor],nCurrIntraDesc);
+			m_oUnstableRegionMask.data[nPxIter] = ((*pfCurrDistThresholdFactor)>UNSTABLE_REG_RDIST_MIN || (*pfCurrMeanRawSegmRes_LT-*pfCurrMeanFinalSegmRes_LT)>UNSTABLE_REG_RATIO_MIN || (*pfCurrMeanRawSegmRes_ST-*pfCurrMeanFinalSegmRes_ST)>UNSTABLE_REG_RATIO_MIN)?1:0;
+			size_t nGoodSamplesCount=0, nSampleIdx=0;
+			while(nGoodSamplesCount<m_nRequiredBGSamples && nSampleIdx<m_nBGSamples) {
+				const uchar& nBGColor = m_voBGColorSamples[nSampleIdx].data[nPxIter];
+				{
+					const size_t nColorDist = L1dist(nCurrColor,nBGColor);
+					if(nColorDist>nCurrColorDistThreshold)
+						goto failedcheck1ch;
+					const ushort& nBGIntraDesc = *((ushort*)(m_voBGDescSamples[nSampleIdx].data+nDescIter));
+					const size_t nIntraDescDist = hdist(nCurrIntraDesc,nBGIntraDesc);
+					LBSP::computeGrayscaleDescriptor(oInputImg,nBGColor,nCurrImgCoord_X,nCurrImgCoord_Y,m_anLBSPThreshold_8bitLUT[nBGColor],nCurrInterDesc);
+					const size_t nInterDescDist = hdist(nCurrInterDesc,nBGIntraDesc);
+					const size_t nDescDist = (nIntraDescDist+nInterDescDist)/2;
+					if(nDescDist>nCurrDescDistThreshold)
+						goto failedcheck1ch;
+					const size_t nSumDist = std::min((nDescDist/4)*(s_nColorMaxDataRange_1ch/s_nDescMaxDataRange_1ch)+nColorDist,s_nColorMaxDataRange_1ch);
+					if(nSumDist>nCurrColorDistThreshold)
+						goto failedcheck1ch;
+					if(nMinDescDist>nDescDist)
+						nMinDescDist = nDescDist;
+					if(nMinSumDist>nSumDist)
+						nMinSumDist = nSumDist;
+					nGoodSamplesCount++;
+				}
+				failedcheck1ch:
+				nSampleIdx++;
+			}
+			const float fNormalizedLastDist = ((float)L1dist(nLastColor,nCurrColor)/s_nColorMaxDataRange_1ch+(float)hdist(nLastIntraDesc,nCurrIntraDesc)/s_nDescMaxDataRange_1ch)/2;
+			*pfCurrMeanLastDist = (*pfCurrMeanLastDist)*(1.0f-fRollAvgFactor_ST) + fNormalizedLastDist*fRollAvgFactor_ST;
+			if(nGoodSamplesCount<m_nRequiredBGSamples) {
+				// == foreground
+				const float fNormalizedMinDist = std::min(1.0f,((float)nMinSumDist/s_nColorMaxDataRange_1ch+(float)nMinDescDist/s_nDescMaxDataRange_1ch)/2 + (float)(m_nRequiredBGSamples-nGoodSamplesCount)/m_nRequiredBGSamples);
+				*pfCurrMeanMinDist_LT = (*pfCurrMeanMinDist_LT)*(1.0f-fRollAvgFactor_LT) + fNormalizedMinDist*fRollAvgFactor_LT;
+				*pfCurrMeanMinDist_ST = (*pfCurrMeanMinDist_ST)*(1.0f-fRollAvgFactor_ST) + fNormalizedMinDist*fRollAvgFactor_ST;
+				*pfCurrMeanRawSegmRes_LT = (*pfCurrMeanRawSegmRes_LT)*(1.0f-fRollAvgFactor_LT) + fRollAvgFactor_LT;
+				*pfCurrMeanRawSegmRes_ST = (*pfCurrMeanRawSegmRes_ST)*(1.0f-fRollAvgFactor_ST) + fRollAvgFactor_ST;
+				oCurrFGMask.data[nPxIter] = UCHAR_MAX;
+				if(m_nModelResetCooldown && (rand()%(size_t)FEEDBACK_T_LOWER)==0) {
+					const size_t s_rand = rand()%m_nBGSamples;
+					*((ushort*)(m_voBGDescSamples[s_rand].data+nDescIter)) = nCurrIntraDesc;
+					m_voBGColorSamples[s_rand].data[nPxIter] = nCurrColor;
+				}
+			}
+			else {
+				// == background
+				const float fNormalizedMinDist = ((float)nMinSumDist/s_nColorMaxDataRange_1ch+(float)nMinDescDist/s_nDescMaxDataRange_1ch)/2;
+				*pfCurrMeanMinDist_LT = (*pfCurrMeanMinDist_LT)*(1.0f-fRollAvgFactor_LT) + fNormalizedMinDist*fRollAvgFactor_LT;
+				*pfCurrMeanMinDist_ST = (*pfCurrMeanMinDist_ST)*(1.0f-fRollAvgFactor_ST) + fNormalizedMinDist*fRollAvgFactor_ST;
+				*pfCurrMeanRawSegmRes_LT = (*pfCurrMeanRawSegmRes_LT)*(1.0f-fRollAvgFactor_LT);
+				*pfCurrMeanRawSegmRes_ST = (*pfCurrMeanRawSegmRes_ST)*(1.0f-fRollAvgFactor_ST);
+				const size_t nLearningRate = learningRateOverride>0?(size_t)ceil(learningRateOverride):(size_t)ceil(*pfCurrLearningRate);
+				if((rand()%nLearningRate)==0) {
+					const size_t s_rand = rand()%m_nBGSamples;
+					*((ushort*)(m_voBGDescSamples[s_rand].data+nDescIter)) = nCurrIntraDesc;
+					m_voBGColorSamples[s_rand].data[nPxIter] = nCurrColor;
+				}
+				int nSampleImgCoord_Y, nSampleImgCoord_X;
+				const bool bCurrUsing3x3Spread = m_bUse3x3Spread && !m_oUnstableRegionMask.data[nPxIter];
+				if(bCurrUsing3x3Spread)
+					getRandNeighborPosition_3x3(nSampleImgCoord_X,nSampleImgCoord_Y,nCurrImgCoord_X,nCurrImgCoord_Y,LBSP::PATCH_SIZE/2,m_oImgSize);
+				else
+					getRandNeighborPosition_5x5(nSampleImgCoord_X,nSampleImgCoord_Y,nCurrImgCoord_X,nCurrImgCoord_Y,LBSP::PATCH_SIZE/2,m_oImgSize);
+				const size_t n_rand = rand();
+				const size_t idx_rand_uchar = m_oImgSize.width*nSampleImgCoord_Y + nSampleImgCoord_X;
+				const size_t idx_rand_flt32 = idx_rand_uchar*4;
+				const float fRandMeanLastDist = *((float*)(m_oMeanLastDistFrame.data+idx_rand_flt32));
+				const float fRandMeanRawSegmRes = *((float*)(m_oMeanRawSegmResFrame_ST.data+idx_rand_flt32));
+				if((n_rand%(bCurrUsing3x3Spread?nLearningRate:(nLearningRate/2+1)))==0
+					|| (fRandMeanRawSegmRes>GHOSTDET_S_MIN && fRandMeanLastDist<GHOSTDET_D_MAX && (n_rand%((size_t)m_fCurrLearningRateLowerCap))==0)) {
+					const size_t idx_rand_ushrt = idx_rand_uchar*2;
+					const size_t s_rand = rand()%m_nBGSamples;
+					*((ushort*)(m_voBGDescSamples[s_rand].data+idx_rand_ushrt)) = nCurrIntraDesc;
+					m_voBGColorSamples[s_rand].data[idx_rand_uchar] = nCurrColor;
+				}
+			}
+			if(m_oLastFGMask.data[nPxIter] || (std::min(*pfCurrMeanMinDist_LT,*pfCurrMeanMinDist_ST)<UNSTABLE_REG_RATIO_MIN && oCurrFGMask.data[nPxIter])) {
+				if((*pfCurrLearningRate)<m_fCurrLearningRateUpperCap)
+					*pfCurrLearningRate += FEEDBACK_T_INCR/(std::max(*pfCurrMeanMinDist_LT,*pfCurrMeanMinDist_ST)*(*pfCurrVariationFactor));
+			}
+			else if((*pfCurrLearningRate)>m_fCurrLearningRateLowerCap)
+				*pfCurrLearningRate -= FEEDBACK_T_DECR*(*pfCurrVariationFactor)/std::max(*pfCurrMeanMinDist_LT,*pfCurrMeanMinDist_ST);
+			if((*pfCurrLearningRate)<m_fCurrLearningRateLowerCap)
+				*pfCurrLearningRate = m_fCurrLearningRateLowerCap;
+			else if((*pfCurrLearningRate)>m_fCurrLearningRateUpperCap)
+				*pfCurrLearningRate = m_fCurrLearningRateUpperCap;
+			if(std::max(*pfCurrMeanMinDist_LT,*pfCurrMeanMinDist_ST)>UNSTABLE_REG_RATIO_MIN && m_oBlinksFrame.data[nPxIter])
+				(*pfCurrVariationFactor) += FEEDBACK_V_INCR;
+			else if((*pfCurrVariationFactor)>FEEDBACK_V_DECR) {
+				(*pfCurrVariationFactor) -= m_oLastFGMask.data[nPxIter]?FEEDBACK_V_DECR/4:m_oUnstableRegionMask.data[nPxIter]?FEEDBACK_V_DECR/2:FEEDBACK_V_DECR;
+				if((*pfCurrVariationFactor)<FEEDBACK_V_DECR)
+					(*pfCurrVariationFactor) = FEEDBACK_V_DECR;
+			}
+			if((*pfCurrDistThresholdFactor)<std::pow(1.0f+std::min(*pfCurrMeanMinDist_LT,*pfCurrMeanMinDist_ST)*2,2))
+				(*pfCurrDistThresholdFactor) += FEEDBACK_R_VAR*(*pfCurrVariationFactor-FEEDBACK_V_DECR);
+			else {
+				(*pfCurrDistThresholdFactor) -= FEEDBACK_R_VAR/(*pfCurrVariationFactor);
+				if((*pfCurrDistThresholdFactor)<1.0f)
+					(*pfCurrDistThresholdFactor) = 1.0f;
+			}
+			if(popcount(nCurrIntraDesc)>=2)
+				++nNonZeroDescCount;
+			nLastIntraDesc = nCurrIntraDesc;
+			nLastColor = nCurrColor;
+		}
+	}
+	else { //m_nImgChannels==3
+		for(size_t nModelIter=0; nModelIter<m_nTotRelevantPxCount; ++nModelIter) {
+			const size_t nPxIter = m_aPxIdxLUT[nModelIter];
+			const int nCurrImgCoord_X = m_aPxInfoLUT[nPxIter].nImgCoord_X;
+			const int nCurrImgCoord_Y = m_aPxInfoLUT[nPxIter].nImgCoord_Y;
+			const size_t nPxIterRGB = nPxIter*3;
+			const size_t nDescIterRGB = nPxIterRGB*2;
+			const size_t nFloatIter = nPxIter*4;
+			const uchar* const anCurrColor = oInputImg.data+nPxIterRGB;
+			size_t nMinTotDescDist=s_nDescMaxDataRange_3ch;
+			size_t nMinTotSumDist=s_nColorMaxDataRange_3ch;
+			float* pfCurrDistThresholdFactor = (float*)(m_oDistThresholdFrame.data+nFloatIter);
+			float* pfCurrVariationFactor = (float*)(m_oVariationModulatorFrame.data+nFloatIter);
+			float* pfCurrLearningRate = ((float*)(m_oUpdateRateFrame.data+nFloatIter));
+			float* pfCurrMeanLastDist = ((float*)(m_oMeanLastDistFrame.data+nFloatIter));
+			float* pfCurrMeanMinDist_LT = ((float*)(m_oMeanMinDistFrame_LT.data+nFloatIter));
+			float* pfCurrMeanMinDist_ST = ((float*)(m_oMeanMinDistFrame_ST.data+nFloatIter));
+			float* pfCurrMeanRawSegmRes_LT = ((float*)(m_oMeanRawSegmResFrame_LT.data+nFloatIter));
+			float* pfCurrMeanRawSegmRes_ST = ((float*)(m_oMeanRawSegmResFrame_ST.data+nFloatIter));
+			float* pfCurrMeanFinalSegmRes_LT = ((float*)(m_oMeanFinalSegmResFrame_LT.data+nFloatIter));
+			float* pfCurrMeanFinalSegmRes_ST = ((float*)(m_oMeanFinalSegmResFrame_ST.data+nFloatIter));
+			ushort* anLastIntraDesc = ((ushort*)(m_oLastDescFrame.data+nDescIterRGB));
+			uchar* anLastColor = m_oLastColorFrame.data+nPxIterRGB;
+			const size_t nCurrColorDistThreshold = (size_t)(((*pfCurrDistThresholdFactor)*m_nMinColorDistThreshold)-((!m_oUnstableRegionMask.data[nPxIter])*STAB_COLOR_DIST_OFFSET));
+			const size_t nCurrDescDistThreshold = ((size_t)1<<((size_t)floor(*pfCurrDistThresholdFactor+0.5f)))+m_nDescDistThresholdOffset+(m_oUnstableRegionMask.data[nPxIter]*UNSTAB_DESC_DIST_OFFSET);
+			const size_t nCurrTotColorDistThreshold = nCurrColorDistThreshold*3;
+			const size_t nCurrTotDescDistThreshold = nCurrDescDistThreshold*3;
+			const size_t nCurrSCColorDistThreshold = nCurrTotColorDistThreshold/2;
+			ushort anCurrInterDesc[3], anCurrIntraDesc[3];
+			const size_t anCurrIntraLBSPThresholds[3] = {m_anLBSPThreshold_8bitLUT[anCurrColor[0]],m_anLBSPThreshold_8bitLUT[anCurrColor[1]],m_anLBSPThreshold_8bitLUT[anCurrColor[2]]};
+			LBSP::computeRGBDescriptor(oInputImg,anCurrColor,nCurrImgCoord_X,nCurrImgCoord_Y,anCurrIntraLBSPThresholds,anCurrIntraDesc);
+			m_oUnstableRegionMask.data[nPxIter] = ((*pfCurrDistThresholdFactor)>UNSTABLE_REG_RDIST_MIN || (*pfCurrMeanRawSegmRes_LT-*pfCurrMeanFinalSegmRes_LT)>UNSTABLE_REG_RATIO_MIN || (*pfCurrMeanRawSegmRes_ST-*pfCurrMeanFinalSegmRes_ST)>UNSTABLE_REG_RATIO_MIN)?1:0;
+			size_t nGoodSamplesCount=0, nSampleIdx=0;
+			while(nGoodSamplesCount<m_nRequiredBGSamples && nSampleIdx<m_nBGSamples) {
+				const ushort* const anBGIntraDesc = (ushort*)(m_voBGDescSamples[nSampleIdx].data+nDescIterRGB);
+				const uchar* const anBGColor = m_voBGColorSamples[nSampleIdx].data+nPxIterRGB;
+				size_t nTotDescDist = 0;
+				size_t nTotSumDist = 0;
+				for(size_t c=0;c<3; ++c) {
+					const size_t nColorDist = L1dist(anCurrColor[c],anBGColor[c]);
+					if(nColorDist>nCurrSCColorDistThreshold)
+						goto failedcheck3ch;
+					const size_t nIntraDescDist = hdist(anCurrIntraDesc[c],anBGIntraDesc[c]);
+					LBSP::computeSingleRGBDescriptor(oInputImg,anBGColor[c],nCurrImgCoord_X,nCurrImgCoord_Y,c,m_anLBSPThreshold_8bitLUT[anBGColor[c]],anCurrInterDesc[c]);
+					const size_t nInterDescDist = hdist(anCurrInterDesc[c],anBGIntraDesc[c]);
+					const size_t nDescDist = (nIntraDescDist+nInterDescDist)/2;
+					const size_t nSumDist = std::min((nDescDist/2)*(s_nColorMaxDataRange_1ch/s_nDescMaxDataRange_1ch)+nColorDist,s_nColorMaxDataRange_1ch);
+					if(nSumDist>nCurrSCColorDistThreshold)
+						goto failedcheck3ch;
+					nTotDescDist += nDescDist;
+					nTotSumDist += nSumDist;
+				}
+				if(nTotDescDist>nCurrTotDescDistThreshold || nTotSumDist>nCurrTotColorDistThreshold)
+					goto failedcheck3ch;
+				if(nMinTotDescDist>nTotDescDist)
+					nMinTotDescDist = nTotDescDist;
+				if(nMinTotSumDist>nTotSumDist)
+					nMinTotSumDist = nTotSumDist;
+				nGoodSamplesCount++;
+				failedcheck3ch:
+				nSampleIdx++;
+			}
+			const float fNormalizedLastDist = ((float)L1dist<3>(anLastColor,anCurrColor)/s_nColorMaxDataRange_3ch+(float)hdist<3>(anLastIntraDesc,anCurrIntraDesc)/s_nDescMaxDataRange_3ch)/2;
+			*pfCurrMeanLastDist = (*pfCurrMeanLastDist)*(1.0f-fRollAvgFactor_ST) + fNormalizedLastDist*fRollAvgFactor_ST;
+			if(nGoodSamplesCount<m_nRequiredBGSamples) {
+				// == foreground
+				const float fNormalizedMinDist = std::min(1.0f,((float)nMinTotSumDist/s_nColorMaxDataRange_3ch+(float)nMinTotDescDist/s_nDescMaxDataRange_3ch)/2 + (float)(m_nRequiredBGSamples-nGoodSamplesCount)/m_nRequiredBGSamples);
+				*pfCurrMeanMinDist_LT = (*pfCurrMeanMinDist_LT)*(1.0f-fRollAvgFactor_LT) + fNormalizedMinDist*fRollAvgFactor_LT;
+				*pfCurrMeanMinDist_ST = (*pfCurrMeanMinDist_ST)*(1.0f-fRollAvgFactor_ST) + fNormalizedMinDist*fRollAvgFactor_ST;
+				*pfCurrMeanRawSegmRes_LT = (*pfCurrMeanRawSegmRes_LT)*(1.0f-fRollAvgFactor_LT) + fRollAvgFactor_LT;
+				*pfCurrMeanRawSegmRes_ST = (*pfCurrMeanRawSegmRes_ST)*(1.0f-fRollAvgFactor_ST) + fRollAvgFactor_ST;
+				oCurrFGMask.data[nPxIter] = UCHAR_MAX;
+				if(m_nModelResetCooldown && (rand()%(size_t)FEEDBACK_T_LOWER)==0) {
+					const size_t s_rand = rand()%m_nBGSamples;
+					for(size_t c=0; c<3; ++c) {
+						*((ushort*)(m_voBGDescSamples[s_rand].data+nDescIterRGB+2*c)) = anCurrIntraDesc[c];
+						*(m_voBGColorSamples[s_rand].data+nPxIterRGB+c) = anCurrColor[c];
+					}
+				}
+			}
+			else {
+				// == background
+				const float fNormalizedMinDist = ((float)nMinTotSumDist/s_nColorMaxDataRange_3ch+(float)nMinTotDescDist/s_nDescMaxDataRange_3ch)/2;
+				*pfCurrMeanMinDist_LT = (*pfCurrMeanMinDist_LT)*(1.0f-fRollAvgFactor_LT) + fNormalizedMinDist*fRollAvgFactor_LT;
+				*pfCurrMeanMinDist_ST = (*pfCurrMeanMinDist_ST)*(1.0f-fRollAvgFactor_ST) + fNormalizedMinDist*fRollAvgFactor_ST;
+				*pfCurrMeanRawSegmRes_LT = (*pfCurrMeanRawSegmRes_LT)*(1.0f-fRollAvgFactor_LT);
+				*pfCurrMeanRawSegmRes_ST = (*pfCurrMeanRawSegmRes_ST)*(1.0f-fRollAvgFactor_ST);
+				const size_t nLearningRate = learningRateOverride>0?(size_t)ceil(learningRateOverride):(size_t)ceil(*pfCurrLearningRate);
+				if((rand()%nLearningRate)==0) {
+					const size_t s_rand = rand()%m_nBGSamples;
+					for(size_t c=0; c<3; ++c) {
+						*((ushort*)(m_voBGDescSamples[s_rand].data+nDescIterRGB+2*c)) = anCurrIntraDesc[c];
+						*(m_voBGColorSamples[s_rand].data+nPxIterRGB+c) = anCurrColor[c];
+					}
+				}
+				int nSampleImgCoord_Y, nSampleImgCoord_X;
+				const bool bCurrUsing3x3Spread = m_bUse3x3Spread && !m_oUnstableRegionMask.data[nPxIter];
+				if(bCurrUsing3x3Spread)
+					getRandNeighborPosition_3x3(nSampleImgCoord_X,nSampleImgCoord_Y,nCurrImgCoord_X,nCurrImgCoord_Y,LBSP::PATCH_SIZE/2,m_oImgSize);
+				else
+					getRandNeighborPosition_5x5(nSampleImgCoord_X,nSampleImgCoord_Y,nCurrImgCoord_X,nCurrImgCoord_Y,LBSP::PATCH_SIZE/2,m_oImgSize);
+				const size_t n_rand = rand();
+				const size_t idx_rand_uchar = m_oImgSize.width*nSampleImgCoord_Y + nSampleImgCoord_X;
+				const size_t idx_rand_flt32 = idx_rand_uchar*4;
+				const float fRandMeanLastDist = *((float*)(m_oMeanLastDistFrame.data+idx_rand_flt32));
+				const float fRandMeanRawSegmRes = *((float*)(m_oMeanRawSegmResFrame_ST.data+idx_rand_flt32));
+				if((n_rand%(bCurrUsing3x3Spread?nLearningRate:(nLearningRate/2+1)))==0
+					|| (fRandMeanRawSegmRes>GHOSTDET_S_MIN && fRandMeanLastDist<GHOSTDET_D_MAX && (n_rand%((size_t)m_fCurrLearningRateLowerCap))==0)) {
+					const size_t idx_rand_uchar_rgb = idx_rand_uchar*3;
+					const size_t idx_rand_ushrt_rgb = idx_rand_uchar_rgb*2;
+					const size_t s_rand = rand()%m_nBGSamples;
+					for(size_t c=0; c<3; ++c) {
+						*((ushort*)(m_voBGDescSamples[s_rand].data+idx_rand_ushrt_rgb+2*c)) = anCurrIntraDesc[c];
+						*(m_voBGColorSamples[s_rand].data+idx_rand_uchar_rgb+c) = anCurrColor[c];
+					}
+				}
+			}
+			if(m_oLastFGMask.data[nPxIter] || (std::min(*pfCurrMeanMinDist_LT,*pfCurrMeanMinDist_ST)<UNSTABLE_REG_RATIO_MIN && oCurrFGMask.data[nPxIter])) {
+				if((*pfCurrLearningRate)<m_fCurrLearningRateUpperCap)
+					*pfCurrLearningRate += FEEDBACK_T_INCR/(std::max(*pfCurrMeanMinDist_LT,*pfCurrMeanMinDist_ST)*(*pfCurrVariationFactor));
+			}
+			else if((*pfCurrLearningRate)>m_fCurrLearningRateLowerCap)
+				*pfCurrLearningRate -= FEEDBACK_T_DECR*(*pfCurrVariationFactor)/std::max(*pfCurrMeanMinDist_LT,*pfCurrMeanMinDist_ST);
+			if((*pfCurrLearningRate)<m_fCurrLearningRateLowerCap)
+				*pfCurrLearningRate = m_fCurrLearningRateLowerCap;
+			else if((*pfCurrLearningRate)>m_fCurrLearningRateUpperCap)
+				*pfCurrLearningRate = m_fCurrLearningRateUpperCap;
+			if(std::max(*pfCurrMeanMinDist_LT,*pfCurrMeanMinDist_ST)>UNSTABLE_REG_RATIO_MIN && m_oBlinksFrame.data[nPxIter])
+				(*pfCurrVariationFactor) += FEEDBACK_V_INCR;
+			else if((*pfCurrVariationFactor)>FEEDBACK_V_DECR) {
+				(*pfCurrVariationFactor) -= m_oLastFGMask.data[nPxIter]?FEEDBACK_V_DECR/4:m_oUnstableRegionMask.data[nPxIter]?FEEDBACK_V_DECR/2:FEEDBACK_V_DECR;
+				if((*pfCurrVariationFactor)<FEEDBACK_V_DECR)
+					(*pfCurrVariationFactor) = FEEDBACK_V_DECR;
+			}
+			if((*pfCurrDistThresholdFactor)<std::pow(1.0f+std::min(*pfCurrMeanMinDist_LT,*pfCurrMeanMinDist_ST)*2,2))
+				(*pfCurrDistThresholdFactor) += FEEDBACK_R_VAR*(*pfCurrVariationFactor-FEEDBACK_V_DECR);
+			else {
+				(*pfCurrDistThresholdFactor) -= FEEDBACK_R_VAR/(*pfCurrVariationFactor);
+				if((*pfCurrDistThresholdFactor)<1.0f)
+					(*pfCurrDistThresholdFactor) = 1.0f;
+			}
+			if(popcount<3>(anCurrIntraDesc)>=4)
+				++nNonZeroDescCount;
+			for(size_t c=0; c<3; ++c) {
+				anLastIntraDesc[c] = anCurrIntraDesc[c];
+				anLastColor[c] = anCurrColor[c];
+			}
+		}
+	}
+#if DISPLAY_SUBSENSE_DEBUG_INFO
+	std::cout << std::endl;
+	cv::Point dbgpt(nDebugCoordX,nDebugCoordY);
+	cv::Mat oMeanMinDistFrameNormalized; m_oMeanMinDistFrame_ST.copyTo(oMeanMinDistFrameNormalized);
+	cv::circle(oMeanMinDistFrameNormalized,dbgpt,5,cv::Scalar(1.0f));
+	cv::resize(oMeanMinDistFrameNormalized,oMeanMinDistFrameNormalized,DEFAULT_FRAME_SIZE);
+	cv::imshow("d_min(x)",oMeanMinDistFrameNormalized);
+	std::cout << std::fixed << std::setprecision(5) << "  d_min(" << dbgpt << ") = " << m_oMeanMinDistFrame_ST.at<float>(dbgpt) << std::endl;
+	cv::Mat oMeanLastDistFrameNormalized; m_oMeanLastDistFrame.copyTo(oMeanLastDistFrameNormalized);
+	cv::circle(oMeanLastDistFrameNormalized,dbgpt,5,cv::Scalar(1.0f));
+	cv::resize(oMeanLastDistFrameNormalized,oMeanLastDistFrameNormalized,DEFAULT_FRAME_SIZE);
+	cv::imshow("d_last(x)",oMeanLastDistFrameNormalized);
+	std::cout << std::fixed << std::setprecision(5) << " d_last(" << dbgpt << ") = " << m_oMeanLastDistFrame.at<float>(dbgpt) << std::endl;
+	cv::Mat oMeanRawSegmResFrameNormalized; m_oMeanRawSegmResFrame_ST.copyTo(oMeanRawSegmResFrameNormalized);
+	cv::circle(oMeanRawSegmResFrameNormalized,dbgpt,5,cv::Scalar(1.0f));
+	cv::resize(oMeanRawSegmResFrameNormalized,oMeanRawSegmResFrameNormalized,DEFAULT_FRAME_SIZE);
+	cv::imshow("s_avg(x)",oMeanRawSegmResFrameNormalized);
+	std::cout << std::fixed << std::setprecision(5) << "  s_avg(" << dbgpt << ") = " << m_oMeanRawSegmResFrame_ST.at<float>(dbgpt) << std::endl;
+	cv::Mat oMeanFinalSegmResFrameNormalized; m_oMeanFinalSegmResFrame_ST.copyTo(oMeanFinalSegmResFrameNormalized);
+	cv::circle(oMeanFinalSegmResFrameNormalized,dbgpt,5,cv::Scalar(1.0f));
+	cv::resize(oMeanFinalSegmResFrameNormalized,oMeanFinalSegmResFrameNormalized,DEFAULT_FRAME_SIZE);
+	cv::imshow("z_avg(x)",oMeanFinalSegmResFrameNormalized);
+	std::cout << std::fixed << std::setprecision(5) << "  z_avg(" << dbgpt << ") = " << m_oMeanFinalSegmResFrame_ST.at<float>(dbgpt) << std::endl;
+	cv::Mat oDistThresholdFrameNormalized; m_oDistThresholdFrame.convertTo(oDistThresholdFrameNormalized,CV_32FC1,0.25f,-0.25f);
+	cv::circle(oDistThresholdFrameNormalized,dbgpt,5,cv::Scalar(1.0f));
+	cv::resize(oDistThresholdFrameNormalized,oDistThresholdFrameNormalized,DEFAULT_FRAME_SIZE);
+	cv::imshow("r(x)",oDistThresholdFrameNormalized);
+	std::cout << std::fixed << std::setprecision(5) << "      r(" << dbgpt << ") = " << m_oDistThresholdFrame.at<float>(dbgpt) << std::endl;
+	cv::Mat oVariationModulatorFrameNormalized; cv::normalize(m_oVariationModulatorFrame,oVariationModulatorFrameNormalized,0,255,cv::NORM_MINMAX,CV_8UC1);
+	cv::circle(oVariationModulatorFrameNormalized,dbgpt,5,cv::Scalar(255));
+	cv::resize(oVariationModulatorFrameNormalized,oVariationModulatorFrameNormalized,DEFAULT_FRAME_SIZE);
+	cv::imshow("v(x)",oVariationModulatorFrameNormalized);
+	std::cout << std::fixed << std::setprecision(5) << "      v(" << dbgpt << ") = " << m_oVariationModulatorFrame.at<float>(dbgpt) << std::endl;
+	cv::Mat oUpdateRateFrameNormalized; m_oUpdateRateFrame.convertTo(oUpdateRateFrameNormalized,CV_32FC1,1.0f/FEEDBACK_T_UPPER,-FEEDBACK_T_LOWER/FEEDBACK_T_UPPER);
+	cv::circle(oUpdateRateFrameNormalized,dbgpt,5,cv::Scalar(1.0f));
+	cv::resize(oUpdateRateFrameNormalized,oUpdateRateFrameNormalized,DEFAULT_FRAME_SIZE);
+	cv::imshow("t(x)",oUpdateRateFrameNormalized);
+	std::cout << std::fixed << std::setprecision(5) << "      t(" << dbgpt << ") = " << m_oUpdateRateFrame.at<float>(dbgpt) << std::endl;
+#endif //DISPLAY_SUBSENSE_DEBUG_INFO
+	cv::bitwise_xor(oCurrFGMask,m_oLastRawFGMask,m_oCurrRawFGBlinkMask);
+	cv::bitwise_or(m_oCurrRawFGBlinkMask,m_oLastRawFGBlinkMask,m_oBlinksFrame);
+	m_oCurrRawFGBlinkMask.copyTo(m_oLastRawFGBlinkMask);
+	oCurrFGMask.copyTo(m_oLastRawFGMask);
+	cv::morphologyEx(oCurrFGMask,m_oFGMask_PreFlood,cv::MORPH_CLOSE,cv::Mat());
+	m_oFGMask_PreFlood.copyTo(m_oFGMask_FloodedHoles);
+	cv::floodFill(m_oFGMask_FloodedHoles,cv::Point(0,0),UCHAR_MAX);
+	cv::bitwise_not(m_oFGMask_FloodedHoles,m_oFGMask_FloodedHoles);
+	cv::erode(m_oFGMask_PreFlood,m_oFGMask_PreFlood,cv::Mat(),cv::Point(-1,-1),3);
+	cv::bitwise_or(oCurrFGMask,m_oFGMask_FloodedHoles,oCurrFGMask);
+	cv::bitwise_or(oCurrFGMask,m_oFGMask_PreFlood,oCurrFGMask);
+	cv::medianBlur(oCurrFGMask,m_oLastFGMask,m_nMedianBlurKernelSize);
+	cv::dilate(m_oLastFGMask,m_oLastFGMask_dilated,cv::Mat(),cv::Point(-1,-1),3);
+	cv::bitwise_and(m_oBlinksFrame,m_oLastFGMask_dilated_inverted,m_oBlinksFrame);
+	cv::bitwise_not(m_oLastFGMask_dilated,m_oLastFGMask_dilated_inverted);
+	cv::bitwise_and(m_oBlinksFrame,m_oLastFGMask_dilated_inverted,m_oBlinksFrame);
+	m_oLastFGMask.copyTo(oCurrFGMask);
+	cv::addWeighted(m_oMeanFinalSegmResFrame_LT,(1.0f-fRollAvgFactor_LT),m_oLastFGMask,(1.0/UCHAR_MAX)*fRollAvgFactor_LT,0,m_oMeanFinalSegmResFrame_LT,CV_32F);
+	cv::addWeighted(m_oMeanFinalSegmResFrame_ST,(1.0f-fRollAvgFactor_ST),m_oLastFGMask,(1.0/UCHAR_MAX)*fRollAvgFactor_ST,0,m_oMeanFinalSegmResFrame_ST,CV_32F);
+	const float fCurrNonZeroDescRatio = (float)nNonZeroDescCount/m_nTotRelevantPxCount;
+	if(fCurrNonZeroDescRatio<LBSPDESC_NONZERO_RATIO_MIN && m_fLastNonZeroDescRatio<LBSPDESC_NONZERO_RATIO_MIN) {
+	    for(size_t t=0; t<=UCHAR_MAX; ++t)
+	        if(m_anLBSPThreshold_8bitLUT[t]>cv::saturate_cast<uchar>(m_nLBSPThresholdOffset+ceil(t*m_fRelLBSPThreshold/4)))
+	            --m_anLBSPThreshold_8bitLUT[t];
+	}
+	else if(fCurrNonZeroDescRatio>LBSPDESC_NONZERO_RATIO_MAX && m_fLastNonZeroDescRatio>LBSPDESC_NONZERO_RATIO_MAX) {
+	    for(size_t t=0; t<=UCHAR_MAX; ++t)
+	        if(m_anLBSPThreshold_8bitLUT[t]<cv::saturate_cast<uchar>(m_nLBSPThresholdOffset+UCHAR_MAX*m_fRelLBSPThreshold))
+	            ++m_anLBSPThreshold_8bitLUT[t];
+	}
+	m_fLastNonZeroDescRatio = fCurrNonZeroDescRatio;
+	if(m_bLearningRateScalingEnabled) {
+		cv::resize(oInputImg,m_oDownSampledFrame_MotionAnalysis,m_oDownSampledFrameSize,0,0,cv::INTER_AREA);
+		cv::accumulateWeighted(m_oDownSampledFrame_MotionAnalysis,m_oMeanDownSampledLastDistFrame_LT,fRollAvgFactor_LT);
+		cv::accumulateWeighted(m_oDownSampledFrame_MotionAnalysis,m_oMeanDownSampledLastDistFrame_ST,fRollAvgFactor_ST);
+		size_t nTotColorDiff = 0;
+		for(int i=0; i<m_oMeanDownSampledLastDistFrame_ST.rows; ++i) {
+			const size_t idx1 = m_oMeanDownSampledLastDistFrame_ST.step.p[0]*i;
+			for(int j=0; j<m_oMeanDownSampledLastDistFrame_ST.cols; ++j) {
+				const size_t idx2 = idx1+m_oMeanDownSampledLastDistFrame_ST.step.p[1]*j;
+				nTotColorDiff += (m_nImgChannels==1)?
+					(size_t)fabs((*(float*)(m_oMeanDownSampledLastDistFrame_ST.data+idx2))-(*(float*)(m_oMeanDownSampledLastDistFrame_LT.data+idx2)))/2
+							:  //(m_nImgChannels==3)
+						std::max((size_t)fabs((*(float*)(m_oMeanDownSampledLastDistFrame_ST.data+idx2))-(*(float*)(m_oMeanDownSampledLastDistFrame_LT.data+idx2))),
+							std::max((size_t)fabs((*(float*)(m_oMeanDownSampledLastDistFrame_ST.data+idx2+4))-(*(float*)(m_oMeanDownSampledLastDistFrame_LT.data+idx2+4))),
+										(size_t)fabs((*(float*)(m_oMeanDownSampledLastDistFrame_ST.data+idx2+8))-(*(float*)(m_oMeanDownSampledLastDistFrame_LT.data+idx2+8)))));
+			}
+		}
+		const float fCurrColorDiffRatio = (float)nTotColorDiff/(m_oMeanDownSampledLastDistFrame_ST.rows*m_oMeanDownSampledLastDistFrame_ST.cols);
+		if(m_bAutoModelResetEnabled) {
+			if(m_nFramesSinceLastReset>1000)
+				m_bAutoModelResetEnabled = false;
+			else if(fCurrColorDiffRatio>=FRAMELEVEL_MIN_COLOR_DIFF_THRESHOLD && m_nModelResetCooldown==0) {
+				m_nFramesSinceLastReset = 0;
+				refreshModel(0.1f); // reset 10% of the bg model
+				m_nModelResetCooldown = m_nSamplesForMovingAvgs/4;
+				m_oUpdateRateFrame = cv::Scalar(1.0f);
+			}
+			else
+				++m_nFramesSinceLastReset;
+		}
+		else if(fCurrColorDiffRatio>=FRAMELEVEL_MIN_COLOR_DIFF_THRESHOLD*2) {
+			m_nFramesSinceLastReset = 0;
+			m_bAutoModelResetEnabled = true;
+		}
+		if(fCurrColorDiffRatio>=FRAMELEVEL_MIN_COLOR_DIFF_THRESHOLD/2) {
+			m_fCurrLearningRateLowerCap = (float)std::max((int)FEEDBACK_T_LOWER>>(int)(fCurrColorDiffRatio/2),1);
+			m_fCurrLearningRateUpperCap = (float)std::max((int)FEEDBACK_T_UPPER>>(int)(fCurrColorDiffRatio/2),1);
+		}
+		else {
+			m_fCurrLearningRateLowerCap = FEEDBACK_T_LOWER;
+			m_fCurrLearningRateUpperCap = FEEDBACK_T_UPPER;
+		}
+		if(m_nModelResetCooldown>0)
+			--m_nModelResetCooldown;
+	}
+}
+
+void BackgroundSubtractorSuBSENSE::getBackgroundImage(cv::OutputArray backgroundImage) const {
+	CV_Assert(m_bInitialized);
+	cv::Mat oAvgBGImg = cv::Mat::zeros(m_oImgSize,CV_32FC((int)m_nImgChannels));
+	for(size_t s=0; s<m_nBGSamples; ++s) {
+		for(int y=0; y<m_oImgSize.height; ++y) {
+			for(int x=0; x<m_oImgSize.width; ++x) {
+				const size_t idx_nimg = m_voBGColorSamples[s].step.p[0]*y + m_voBGColorSamples[s].step.p[1]*x;
+				const size_t nFloatIter = idx_nimg*4;
+				float* oAvgBgImgPtr = (float*)(oAvgBGImg.data+nFloatIter);
+				const uchar* const oBGImgPtr = m_voBGColorSamples[s].data+idx_nimg;
+				for(size_t c=0; c<m_nImgChannels; ++c)
+					oAvgBgImgPtr[c] += ((float)oBGImgPtr[c])/m_nBGSamples;
+			}
+		}
+	}
+	oAvgBGImg.convertTo(backgroundImage,CV_8U);
+}
+
+void BackgroundSubtractorSuBSENSE::getBackgroundDescriptorsImage(cv::OutputArray backgroundDescImage) const {
+	CV_Assert(LBSP::DESC_SIZE==2);
+	CV_Assert(m_bInitialized);
+	cv::Mat oAvgBGDesc = cv::Mat::zeros(m_oImgSize,CV_32FC((int)m_nImgChannels));
+	for(size_t n=0; n<m_voBGDescSamples.size(); ++n) {
+		for(int y=0; y<m_oImgSize.height; ++y) {
+			for(int x=0; x<m_oImgSize.width; ++x) {
+				const size_t idx_ndesc = m_voBGDescSamples[n].step.p[0]*y + m_voBGDescSamples[n].step.p[1]*x;
+				const size_t nFloatIter = idx_ndesc*2;
+				float* oAvgBgDescPtr = (float*)(oAvgBGDesc.data+nFloatIter);
+				const ushort* const oBGDescPtr = (ushort*)(m_voBGDescSamples[n].data+idx_ndesc);
+				for(size_t c=0; c<m_nImgChannels; ++c)
+					oAvgBgDescPtr[c] += ((float)oBGDescPtr[c])/m_voBGDescSamples.size();
+			}
+		}
+	}
+	oAvgBGDesc.convertTo(backgroundDescImage,CV_16U);
+}
diff --git a/package_bgs/pl/BackgroundSubtractorSuBSENSE.h b/package_bgs/pl/BackgroundSubtractorSuBSENSE.h
new file mode 100644
index 0000000000000000000000000000000000000000..f8424ad1761219c8f833e598d6b2cf1e0051c57f
--- /dev/null
+++ b/package_bgs/pl/BackgroundSubtractorSuBSENSE.h
@@ -0,0 +1,113 @@
+#pragma once
+
+#include "BackgroundSubtractorLBSP.h"
+
+//! defines the default value for BackgroundSubtractorLBSP::m_fRelLBSPThreshold
+#define BGSSUBSENSE_DEFAULT_LBSP_REL_SIMILARITY_THRESHOLD (0.333f)
+//! defines the default value for BackgroundSubtractorSuBSENSE::m_nDescDistThresholdOffset
+#define BGSSUBSENSE_DEFAULT_DESC_DIST_THRESHOLD_OFFSET (3)
+//! defines the default value for BackgroundSubtractorSuBSENSE::m_nMinColorDistThreshold
+#define BGSSUBSENSE_DEFAULT_MIN_COLOR_DIST_THRESHOLD (30)
+//! defines the default value for BackgroundSubtractorSuBSENSE::m_nBGSamples
+#define BGSSUBSENSE_DEFAULT_NB_BG_SAMPLES (50)
+//! defines the default value for BackgroundSubtractorSuBSENSE::m_nRequiredBGSamples
+#define BGSSUBSENSE_DEFAULT_REQUIRED_NB_BG_SAMPLES (2)
+//! defines the default value for BackgroundSubtractorSuBSENSE::m_nSamplesForMovingAvgs
+#define BGSSUBSENSE_DEFAULT_N_SAMPLES_FOR_MV_AVGS (100)
+
+/*!
+	Self-Balanced Sensitivity segmenTER (SuBSENSE) change detection algorithm.
+
+	Note: both grayscale and RGB/BGR images may be used with this extractor (parameters are adjusted automatically).
+	For optimal grayscale results, use CV_8UC1 frames instead of CV_8UC3.
+
+	For more details on the different parameters or on the algorithm itself, see P.-L. St-Charles et al.,
+	"Flexible Background Subtraction With Self-Balanced Local Sensitivity", in CVPRW 2014.
+
+	This algorithm is currently NOT thread-safe.
+ */
+class BackgroundSubtractorSuBSENSE : public BackgroundSubtractorLBSP {
+public:
+	//! full constructor
+	BackgroundSubtractorSuBSENSE(	float fRelLBSPThreshold=BGSSUBSENSE_DEFAULT_LBSP_REL_SIMILARITY_THRESHOLD,
+									size_t nDescDistThresholdOffset=BGSSUBSENSE_DEFAULT_DESC_DIST_THRESHOLD_OFFSET,
+									size_t nMinColorDistThreshold=BGSSUBSENSE_DEFAULT_MIN_COLOR_DIST_THRESHOLD,
+									size_t nBGSamples=BGSSUBSENSE_DEFAULT_NB_BG_SAMPLES,
+									size_t nRequiredBGSamples=BGSSUBSENSE_DEFAULT_REQUIRED_NB_BG_SAMPLES,
+									size_t nSamplesForMovingAvgs=BGSSUBSENSE_DEFAULT_N_SAMPLES_FOR_MV_AVGS);
+	//! default destructor
+	virtual ~BackgroundSubtractorSuBSENSE();
+	//! (re)initiaization method; needs to be called before starting background subtraction
+	virtual void initialize(const cv::Mat& oInitImg, const cv::Mat& oROI);
+	//! refreshes all samples based on the last analyzed frame
+	virtual void refreshModel(float fSamplesRefreshFrac, bool bForceFGUpdate=false);
+	//! primary model update function; the learning param is used to override the internal learning thresholds (ignored when <= 0)
+	virtual void operator()(cv::InputArray image, cv::OutputArray fgmask, double learningRateOverride=0);
+	//! returns a copy of the latest reconstructed background image
+	void getBackgroundImage(cv::OutputArray backgroundImage) const;
+	//! returns a copy of the latest reconstructed background descriptors image
+	void getBackgroundDescriptorsImage(cv::OutputArray backgroundDescImage) const;
+
+protected:
+	//! absolute minimal color distance threshold ('R' or 'radius' in the original ViBe paper, used as the default/initial 'R(x)' value here)
+	const size_t m_nMinColorDistThreshold;
+	//! absolute descriptor distance threshold offset
+	const size_t m_nDescDistThresholdOffset;
+	//! number of different samples per pixel/block to be taken from input frames to build the background model (same as 'N' in ViBe/PBAS)
+	const size_t m_nBGSamples;
+	//! number of similar samples needed to consider the current pixel/block as 'background' (same as '#_min' in ViBe/PBAS)
+	const size_t m_nRequiredBGSamples;
+	//! number of samples to use to compute the learning rate of moving averages
+	const size_t m_nSamplesForMovingAvgs;
+	//! last calculated non-zero desc ratio
+	float m_fLastNonZeroDescRatio;
+	//! specifies whether Tmin/Tmax scaling is enabled or not
+	bool m_bLearningRateScalingEnabled;
+	//! current learning rate caps
+	float m_fCurrLearningRateLowerCap, m_fCurrLearningRateUpperCap;
+	//! current kernel size for median blur post-proc filtering
+	int m_nMedianBlurKernelSize;
+	//! specifies the px update spread range
+	bool m_bUse3x3Spread;
+	//! specifies the downsampled frame size used for cam motion analysis
+	cv::Size m_oDownSampledFrameSize;
+
+	//! background model pixel color intensity samples (equivalent to 'B(x)' in PBAS)
+	std::vector<cv::Mat> m_voBGColorSamples;
+	//! background model descriptors samples
+	std::vector<cv::Mat> m_voBGDescSamples;
+
+	//! per-pixel update rates ('T(x)' in PBAS, which contains pixel-level 'sigmas', as referred to in ViBe)
+	cv::Mat m_oUpdateRateFrame;
+	//! per-pixel distance thresholds (equivalent to 'R(x)' in PBAS, but used as a relative value to determine both intensity and descriptor variation thresholds)
+	cv::Mat m_oDistThresholdFrame;
+	//! per-pixel distance variation modulators ('v(x)', relative value used to modulate 'R(x)' and 'T(x)' variations)
+	cv::Mat m_oVariationModulatorFrame;
+	//! per-pixel mean distances between consecutive frames ('D_last(x)', used to detect ghosts and high variation regions in the sequence)
+	cv::Mat m_oMeanLastDistFrame;
+	//! per-pixel mean minimal distances from the model ('D_min(x)' in PBAS, used to control variation magnitude and direction of 'T(x)' and 'R(x)')
+	cv::Mat m_oMeanMinDistFrame_LT, m_oMeanMinDistFrame_ST;
+	//! per-pixel mean downsampled distances between consecutive frames (used to analyze camera movement and control max learning rates globally)
+	cv::Mat m_oMeanDownSampledLastDistFrame_LT, m_oMeanDownSampledLastDistFrame_ST;
+	//! per-pixel mean raw segmentation results (used to detect unstable segmentation regions)
+	cv::Mat m_oMeanRawSegmResFrame_LT, m_oMeanRawSegmResFrame_ST;
+	//! per-pixel mean raw segmentation results (used to detect unstable segmentation regions)
+	cv::Mat m_oMeanFinalSegmResFrame_LT, m_oMeanFinalSegmResFrame_ST;
+	//! a lookup map used to keep track of unstable regions (based on segm. noise & local dist. thresholds)
+	cv::Mat m_oUnstableRegionMask;
+	//! per-pixel blink detection map ('Z(x)')
+	cv::Mat m_oBlinksFrame;
+	//! pre-allocated matrix used to downsample the input frame when needed
+	cv::Mat m_oDownSampledFrame_MotionAnalysis;
+	//! the foreground mask generated by the method at [t-1] (without post-proc, used for blinking px detection)
+	cv::Mat m_oLastRawFGMask;
+
+	//! pre-allocated CV_8UC1 matrices used to speed up morph ops
+	cv::Mat m_oFGMask_PreFlood;
+	cv::Mat m_oFGMask_FloodedHoles;
+	cv::Mat m_oLastFGMask_dilated;
+	cv::Mat m_oLastFGMask_dilated_inverted;
+	cv::Mat m_oCurrRawFGBlinkMask;
+	cv::Mat m_oLastRawFGBlinkMask;
+};
+
diff --git a/package_bgs/pl/DistanceUtils.h b/package_bgs/pl/DistanceUtils.h
new file mode 100644
index 0000000000000000000000000000000000000000..54d1cecd933b45a036c8222bed594d0b2305ef89
--- /dev/null
+++ b/package_bgs/pl/DistanceUtils.h
@@ -0,0 +1,316 @@
+#pragma once
+
+#include <opencv2/core/types_c.h>
+
+//! computes the L1 distance between two integer values
+template<typename T> static inline typename std::enable_if<std::is_integral<T>::value,size_t>::type L1dist(T a, T b) {
+	return (size_t)abs((int)a-b);
+}
+
+//! computes the L1 distance between two float values
+template<typename T> static inline typename std::enable_if<std::is_floating_point<T>::value,float>::type L1dist(T a, T b) {
+	return fabs((float)a-(float)b);
+}
+
+//! computes the L1 distance between two generic arrays
+template<size_t nChannels, typename T> static inline auto L1dist(const T* a, const T* b) -> decltype(L1dist(*a,*b)) {
+	decltype(L1dist(*a,*b)) oResult = 0;
+	for(size_t c=0; c<nChannels; ++c)
+		oResult += L1dist(a[c],b[c]);
+	return oResult;
+}
+
+//! computes the L1 distance between two generic arrays
+template<size_t nChannels, typename T> static inline auto L1dist(const T* a, const T* b, size_t nElements, const uchar* m=NULL) -> decltype(L1dist<nChannels>(a,b)) {
+	decltype(L1dist<nChannels>(a,b)) oResult = 0;
+	size_t nTotElements = nElements*nChannels;
+	if(m) {
+		for(size_t n=0,i=0; n<nTotElements; n+=nChannels,++i)
+			if(m[i])
+				oResult += L1dist<nChannels>(a+n,b+n);
+	}
+	else {
+		for(size_t n=0; n<nTotElements; n+=nChannels)
+			oResult += L1dist<nChannels>(a+n,b+n);
+	}
+	return oResult;
+}
+
+//! computes the L1 distance between two generic arrays
+template<typename T> static inline auto L1dist(const T* a, const T* b, size_t nElements, size_t nChannels, const uchar* m=NULL) -> decltype(L1dist<3>(a,b,nElements,m)) {
+	CV_Assert(nChannels>0 && nChannels<=4);
+	switch(nChannels) {
+		case 1: return L1dist<1>(a,b,nElements,m);
+		case 2: return L1dist<2>(a,b,nElements,m);
+		case 3: return L1dist<3>(a,b,nElements,m);
+		case 4: return L1dist<4>(a,b,nElements,m);
+		default: return 0;
+	}
+}
+
+//! computes the L1 distance between two opencv vectors
+template<size_t nChannels, typename T> static inline auto L1dist_(const cv::Vec<T,nChannels>& a, const cv::Vec<T,nChannels>& b) -> decltype(L1dist<nChannels,T>((T*)(0),(T*)(0))) {
+	T a_array[nChannels], b_array[nChannels];
+	for(size_t c=0; c<nChannels; ++c) {
+		a_array[c] = a[(int)c];
+		b_array[c] = b[(int)c];
+	}
+	return L1dist<nChannels>(a_array,b_array);
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+//! computes the squared L2 distance between two generic variables
+template<typename T> static inline auto L2sqrdist(T a, T b) -> decltype(L1dist(a,b)) {
+	auto oResult = L1dist(a,b);
+	return oResult*oResult;
+}
+
+//! computes the squared L2 distance between two generic arrays
+template<size_t nChannels, typename T> static inline auto L2sqrdist(const T* a, const T* b) -> decltype(L2sqrdist(*a,*b)) {
+	decltype(L2sqrdist(*a,*b)) oResult = 0;
+	for(size_t c=0; c<nChannels; ++c)
+		oResult += L2sqrdist(a[c],b[c]);
+	return oResult;
+}
+
+//! computes the squared L2 distance between two generic arrays
+template<size_t nChannels, typename T> static inline auto L2sqrdist(const T* a, const T* b, size_t nElements, const uchar* m=NULL) -> decltype(L2sqrdist<nChannels>(a,b)) {
+	decltype(L2sqrdist<nChannels>(a,b)) oResult = 0;
+	size_t nTotElements = nElements*nChannels;
+	if(m) {
+		for(size_t n=0,i=0; n<nTotElements; n+=nChannels,++i)
+			if(m[i])
+				oResult += L2sqrdist<nChannels>(a+n,b+n);
+	}
+	else {
+		for(size_t n=0; n<nTotElements; n+=nChannels)
+			oResult += L2sqrdist<nChannels>(a+n,b+n);
+	}
+	return oResult;
+}
+
+//! computes the squared L2 distance between two generic arrays
+template<typename T> static inline auto L2sqrdist(const T* a, const T* b, size_t nElements, size_t nChannels, const uchar* m=NULL) -> decltype(L2sqrdist<3>(a,b,nElements,m)) {
+	CV_Assert(nChannels>0 && nChannels<=4);
+	switch(nChannels) {
+		case 1: return L2sqrdist<1>(a,b,nElements,m);
+		case 2: return L2sqrdist<2>(a,b,nElements,m);
+		case 3: return L2sqrdist<3>(a,b,nElements,m);
+		case 4: return L2sqrdist<4>(a,b,nElements,m);
+		default: return 0;
+	}
+}
+
+//! computes the squared L2 distance between two opencv vectors
+template<size_t nChannels, typename T> static inline auto L2sqrdist_(const cv::Vec<T,nChannels>& a, const cv::Vec<T,nChannels>& b) -> decltype(L2sqrdist<nChannels,T>((T*)(0),(T*)(0))) {
+	T a_array[nChannels], b_array[nChannels];
+	for(size_t c=0; c<nChannels; ++c) {
+		a_array[c] = a[(int)c];
+		b_array[c] = b[(int)c];
+	}
+	return L2sqrdist<nChannels>(a_array,b_array);
+}
+
+//! computes the L2 distance between two generic arrays
+template<size_t nChannels, typename T> static inline float L2dist(const T* a, const T* b) {
+	decltype(L2sqrdist(*a,*b)) oResult = 0;
+	for(size_t c=0; c<nChannels; ++c)
+		oResult += L2sqrdist(a[c],b[c]);
+	return sqrt((float)oResult);
+}
+
+//! computes the L2 distance between two generic arrays
+template<size_t nChannels, typename T> static inline float L2dist(const T* a, const T* b, size_t nElements, const uchar* m=NULL) {
+	decltype(L2sqrdist<nChannels>(a,b)) oResult = 0;
+	size_t nTotElements = nElements*nChannels;
+	if(m) {
+		for(size_t n=0,i=0; n<nTotElements; n+=nChannels,++i)
+			if(m[i])
+				oResult += L2sqrdist<nChannels>(a+n,b+n);
+	}
+	else {
+		for(size_t n=0; n<nTotElements; n+=nChannels)
+			oResult += L2sqrdist<nChannels>(a+n,b+n);
+	}
+	return sqrt((float)oResult);
+}
+
+//! computes the squared L2 distance between two generic arrays
+template<typename T> static inline float L2dist(const T* a, const T* b, size_t nElements, size_t nChannels, const uchar* m=NULL) {
+	CV_Assert(nChannels>0 && nChannels<=4);
+	switch(nChannels) {
+		case 1: return L2dist<1>(a,b,nElements,m);
+		case 2: return L2dist<2>(a,b,nElements,m);
+		case 3: return L2dist<3>(a,b,nElements,m);
+		case 4: return L2dist<4>(a,b,nElements,m);
+		default: return 0;
+	}
+}
+
+//! computes the L2 distance between two opencv vectors
+template<size_t nChannels, typename T> static inline float L2dist_(const cv::Vec<T,nChannels>& a, const cv::Vec<T,nChannels>& b) {
+	T a_array[nChannels], b_array[nChannels];
+	for(size_t c=0; c<nChannels; ++c) {
+		a_array[c] = a[(int)c];
+		b_array[c] = b[(int)c];
+	}
+	return L2dist<nChannels>(a_array,b_array);
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+//! computes the color distortion between two integer arrays
+template<size_t nChannels, typename T> static inline typename std::enable_if<std::is_integral<T>::value,size_t>::type cdist(const T* curr, const T* bg) {
+	static_assert(nChannels>1,"cdist: requires more than one channel");
+	size_t curr_sqr = 0;
+	bool bSkip = true;
+	for(size_t c=0; c<nChannels; ++c) {
+		curr_sqr += curr[c]*curr[c];
+		bSkip = bSkip&(bg[c]<=0);
+	}
+	if(bSkip)
+		return (size_t)sqrt((float)curr_sqr);
+	size_t bg_sqr = 0;
+	size_t mix = 0;
+	for(size_t c=0; c<nChannels; ++c) {
+		bg_sqr += bg[c]*bg[c];
+		mix += curr[c]*bg[c];
+	}
+	return (size_t)sqrt(curr_sqr-((float)(mix*mix)/bg_sqr));
+}
+
+//! computes the color distortion between two float arrays
+template<size_t nChannels, typename T> static inline typename std::enable_if<std::is_floating_point<T>::value,float>::type cdist(const T* curr, const T* bg) {
+	static_assert(nChannels>1,"cdist: requires more than one channel");
+	float curr_sqr = 0;
+	bool bSkip = true;
+	for(size_t c=0; c<nChannels; ++c) {
+		curr_sqr += (float)curr[c]*curr[c];
+		bSkip = bSkip&(bg[c]<=0);
+	}
+	if(bSkip)
+		return sqrt(curr_sqr);
+	float bg_sqr = 0;
+	float mix = 0;
+	for(size_t c=0; c<nChannels; ++c) {
+		bg_sqr += (float)bg[c]*bg[c];
+		mix += (float)curr[c]*bg[c];
+	}
+	return sqrt(curr_sqr-((mix*mix)/bg_sqr));
+}
+
+//! computes the color distortion between two generic arrays
+template<size_t nChannels, typename T> static inline auto cdist(const T* a, const T* b, size_t nElements, const uchar* m=NULL) -> decltype(cdist<nChannels>(a,b)) {
+	decltype(cdist<nChannels>(a,b)) oResult = 0;
+	size_t nTotElements = nElements*nChannels;
+	if(m) {
+		for(size_t n=0,i=0; n<nTotElements; n+=nChannels,++i)
+			if(m[i])
+				oResult += cdist<nChannels>(a+n,b+n);
+	}
+	else {
+		for(size_t n=0; n<nTotElements; n+=nChannels)
+			oResult += cdist<nChannels>(a+n,b+n);
+	}
+	return oResult;
+}
+
+//! computes the color distortion between two generic arrays
+template<typename T> static inline auto cdist(const T* a, const T* b, size_t nElements, size_t nChannels, const uchar* m=NULL) -> decltype(cdist<3>(a,b,nElements,m)) {
+	CV_Assert(nChannels>1 && nChannels<=4);
+	switch(nChannels) {
+		case 2: return cdist<2>(a,b,nElements,m);
+		case 3: return cdist<3>(a,b,nElements,m);
+		case 4: return cdist<4>(a,b,nElements,m);
+		default: return 0;
+	}
+}
+
+//! computes the color distortion between two opencv vectors
+template<size_t nChannels, typename T> static inline auto cdist_(const cv::Vec<T,nChannels>& a, const cv::Vec<T,nChannels>& b) -> decltype(cdist<nChannels,T>((T*)(0),(T*)(0))) {
+	T a_array[nChannels], b_array[nChannels];
+	for(size_t c=0; c<nChannels; ++c) {
+		a_array[c] = a[(int)c];
+		b_array[c] = b[(int)c];
+	}
+	return cdist<nChannels>(a_array,b_array);
+}
+
+//! computes a color distortion-distance mix using two generic distances
+template<typename T> static inline T cmixdist(T oL1Distance, T oCDistortion) {
+	return (oL1Distance/2+oCDistortion*4);
+}
+
+//! computes a color distoirtion-distance mix using two generic arrays
+template<size_t nChannels, typename T> static inline typename std::enable_if<std::is_integral<T>::value,size_t>::type cmixdist(const T* curr, const T* bg) {
+	return cmixdist(L1dist<nChannels>(curr,bg),cdist<nChannels>(curr,bg));
+}
+
+template<size_t nChannels, typename T> static inline typename std::enable_if<std::is_floating_point<T>::value,float>::type cmixdist(const T* curr, const T* bg) {
+	return cmixdist(L1dist<nChannels>(curr,bg),cdist<nChannels>(curr,bg));
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
+//! popcount LUT for 8-bit vectors
+static const uchar popcount_LUT8[256] = {
+	0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4,
+	1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
+	1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
+	2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
+	1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
+	2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
+	2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
+	3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
+	1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
+	2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
+	2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
+	3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
+	2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
+	3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
+	3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
+	4, 5, 5, 6, 5, 6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8,
+};
+
+//! computes the population count of an N-byte vector using an 8-bit popcount LUT
+template<typename T> static inline size_t popcount(T x) {
+	size_t nBytes = sizeof(T);
+	size_t nResult = 0;
+	for(size_t l=0; l<nBytes; ++l)
+		nResult += popcount_LUT8[(uchar)(x>>l*8)];
+	return nResult;
+}
+
+//! computes the hamming distance between two N-byte vectors using an 8-bit popcount LUT
+template<typename T> static inline size_t hdist(T a, T b) {
+	return popcount(a^b);
+}
+
+//! computes the gradient magnitude distance between two N-byte vectors using an 8-bit popcount LUT
+template<typename T> static inline size_t gdist(T a, T b) {
+	return L1dist(popcount(a),popcount(b));
+}
+
+//! computes the population count of a (nChannels*N)-byte vector using an 8-bit popcount LUT
+template<size_t nChannels, typename T> static inline size_t popcount(const T* x) {
+	size_t nBytes = sizeof(T);
+	size_t nResult = 0;
+	for(size_t c=0; c<nChannels; ++c)
+		for(size_t l=0; l<nBytes; ++l)
+			nResult += popcount_LUT8[(uchar)(*(x+c)>>l*8)];
+	return nResult;
+}
+
+//! computes the hamming distance between two (nChannels*N)-byte vectors using an 8-bit popcount LUT
+template<size_t nChannels, typename T> static inline size_t hdist(const T* a, const T* b) {
+	T xor_array[nChannels];
+	for(size_t c=0; c<nChannels; ++c)
+		xor_array[c] = a[c]^b[c];
+	return popcount<nChannels>(xor_array);
+}
+
+//! computes the gradient magnitude distance between two (nChannels*N)-byte vectors using an 8-bit popcount LUT
+template<size_t nChannels, typename T> static inline size_t gdist(const T* a, const T* b) {
+	return L1dist(popcount<nChannels>(a),popcount<nChannels>(b));
+}
diff --git a/package_bgs/pl/LBSP.cpp b/package_bgs/pl/LBSP.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..de35f1a09bd8b4d0b762b390475c111916e0be11
--- /dev/null
+++ b/package_bgs/pl/LBSP.cpp
@@ -0,0 +1,318 @@
+#include "LBSP.h"
+
+LBSP::LBSP(size_t nThreshold)
+	:	 m_bOnlyUsingAbsThreshold(true)
+		,m_fRelThreshold(0) // unused
+		,m_nThreshold(nThreshold)
+		,m_oRefImage() {}
+
+LBSP::LBSP(float fRelThreshold, size_t nThresholdOffset)
+	:	 m_bOnlyUsingAbsThreshold(false)
+		,m_fRelThreshold(fRelThreshold)
+		,m_nThreshold(nThresholdOffset)
+		,m_oRefImage() {
+	CV_Assert(m_fRelThreshold>=0);
+}
+
+LBSP::~LBSP() {}
+
+void LBSP::read(const cv::FileNode& /*fn*/) {
+    // ... = fn["..."];
+}
+
+void LBSP::write(cv::FileStorage& /*fs*/) const {
+    //fs << "..." << ...;
+}
+
+void LBSP::setReference(const cv::Mat& img) {
+	CV_DbgAssert(img.empty() || img.type()==CV_8UC1 || img.type()==CV_8UC3);
+	m_oRefImage = img;
+}
+
+int LBSP::descriptorSize() const {
+	return DESC_SIZE;
+}
+
+int LBSP::descriptorType() const {
+	return CV_16U;
+}
+
+bool LBSP::isUsingRelThreshold() const {
+	return !m_bOnlyUsingAbsThreshold;
+}
+
+float LBSP::getRelThreshold() const {
+	return m_fRelThreshold;
+}
+
+size_t LBSP::getAbsThreshold() const {
+	return m_nThreshold;
+}
+
+static inline void lbsp_computeImpl(	const cv::Mat& oInputImg,
+										const cv::Mat& oRefImg,
+										const std::vector<cv::KeyPoint>& voKeyPoints,
+										cv::Mat& oDesc,
+										size_t _t) {
+	CV_DbgAssert(oRefImg.empty() || (oRefImg.size==oInputImg.size && oRefImg.type()==oInputImg.type()));
+	CV_DbgAssert(oInputImg.type()==CV_8UC1 || oInputImg.type()==CV_8UC3);
+	CV_DbgAssert(LBSP::DESC_SIZE==2); // @@@ also relies on a constant desc size
+	const size_t nChannels = (size_t)oInputImg.channels();
+	const size_t _step_row = oInputImg.step.p[0];
+	const uchar* _data = oInputImg.data;
+	const uchar* _refdata = oRefImg.empty()?oInputImg.data:oRefImg.data;
+	const size_t nKeyPoints = voKeyPoints.size();
+	if(nChannels==1) {
+		oDesc.create((int)nKeyPoints,1,CV_16UC1);
+		for(size_t k=0; k<nKeyPoints; ++k) {
+			const int _x = (int)voKeyPoints[k].pt.x;
+			const int _y = (int)voKeyPoints[k].pt.y;
+			const uchar _ref = _refdata[_step_row*(_y)+_x];
+			ushort& _res = oDesc.at<ushort>((int)k);
+			#include "LBSP_16bits_dbcross_1ch.i"
+		}
+	}
+	else { //nChannels==3
+		oDesc.create((int)nKeyPoints,1,CV_16UC3);
+		for(size_t k=0; k<nKeyPoints; ++k) {
+			const int _x = (int)voKeyPoints[k].pt.x;
+			const int _y = (int)voKeyPoints[k].pt.y;
+			const uchar* _ref = _refdata+_step_row*(_y)+3*(_x);
+			ushort* _res = ((ushort*)(oDesc.data + oDesc.step.p[0]*k));
+			#include "LBSP_16bits_dbcross_3ch1t.i"
+		}
+	}
+}
+
+static inline void lbsp_computeImpl(	const cv::Mat& oInputImg,
+										const cv::Mat& oRefImg,
+										const std::vector<cv::KeyPoint>& voKeyPoints,
+										cv::Mat& oDesc,
+										float fThreshold,
+										size_t nThresholdOffset) {
+	CV_DbgAssert(oRefImg.empty() || (oRefImg.size==oInputImg.size && oRefImg.type()==oInputImg.type()));
+	CV_DbgAssert(oInputImg.type()==CV_8UC1 || oInputImg.type()==CV_8UC3);
+	CV_DbgAssert(LBSP::DESC_SIZE==2); // @@@ also relies on a constant desc size
+	CV_DbgAssert(fThreshold>=0);
+	const size_t nChannels = (size_t)oInputImg.channels();
+	const size_t _step_row = oInputImg.step.p[0];
+	const uchar* _data = oInputImg.data;
+	const uchar* _refdata = oRefImg.empty()?oInputImg.data:oRefImg.data;
+	const size_t nKeyPoints = voKeyPoints.size();
+	if(nChannels==1) {
+		oDesc.create((int)nKeyPoints,1,CV_16UC1);
+		for(size_t k=0; k<nKeyPoints; ++k) {
+			const int _x = (int)voKeyPoints[k].pt.x;
+			const int _y = (int)voKeyPoints[k].pt.y;
+			const uchar _ref = _refdata[_step_row*(_y)+_x];
+			ushort& _res = oDesc.at<ushort>((int)k);
+			const size_t _t = (size_t)(_ref*fThreshold)+nThresholdOffset;
+			#include "LBSP_16bits_dbcross_1ch.i"
+		}
+	}
+	else { //nChannels==3
+		oDesc.create((int)nKeyPoints,1,CV_16UC3);
+		for(size_t k=0; k<nKeyPoints; ++k) {
+			const int _x = (int)voKeyPoints[k].pt.x;
+			const int _y = (int)voKeyPoints[k].pt.y;
+			const uchar* _ref = _refdata+_step_row*(_y)+3*(_x);
+			ushort* _res = ((ushort*)(oDesc.data + oDesc.step.p[0]*k));
+			const size_t _t[3] = {(size_t)(_ref[0]*fThreshold)+nThresholdOffset,(size_t)(_ref[1]*fThreshold)+nThresholdOffset,(size_t)(_ref[2]*fThreshold)+nThresholdOffset};
+			#include "LBSP_16bits_dbcross_3ch3t.i"
+		}
+	}
+}
+
+static inline void lbsp_computeImpl2(	const cv::Mat& oInputImg,
+										const cv::Mat& oRefImg,
+										const std::vector<cv::KeyPoint>& voKeyPoints,
+										cv::Mat& oDesc,
+										size_t _t) {
+	CV_DbgAssert(oRefImg.empty() || (oRefImg.size==oInputImg.size && oRefImg.type()==oInputImg.type()));
+	CV_DbgAssert(oInputImg.type()==CV_8UC1 || oInputImg.type()==CV_8UC3);
+	CV_DbgAssert(LBSP::DESC_SIZE==2); // @@@ also relies on a constant desc size
+	const size_t nChannels = (size_t)oInputImg.channels();
+	const size_t _step_row = oInputImg.step.p[0];
+	const uchar* _data = oInputImg.data;
+	const uchar* _refdata = oRefImg.empty()?oInputImg.data:oRefImg.data;
+	const size_t nKeyPoints = voKeyPoints.size();
+	if(nChannels==1) {
+		oDesc.create(oInputImg.size(),CV_16UC1);
+		for(size_t k=0; k<nKeyPoints; ++k) {
+			const int _x = (int)voKeyPoints[k].pt.x;
+			const int _y = (int)voKeyPoints[k].pt.y;
+			const uchar _ref = _refdata[_step_row*(_y)+_x];
+			ushort& _res = oDesc.at<ushort>(_y,_x);
+			#include "LBSP_16bits_dbcross_1ch.i"
+		}
+	}
+	else { //nChannels==3
+		oDesc.create(oInputImg.size(),CV_16UC3);
+		for(size_t k=0; k<nKeyPoints; ++k) {
+			const int _x = (int)voKeyPoints[k].pt.x;
+			const int _y = (int)voKeyPoints[k].pt.y;
+			const uchar* _ref = _refdata+_step_row*(_y)+3*(_x);
+			ushort* _res = ((ushort*)(oDesc.data + oDesc.step.p[0]*_y + oDesc.step.p[1]*_x));
+			#include "LBSP_16bits_dbcross_3ch1t.i"
+		}
+	}
+}
+
+static inline void lbsp_computeImpl2(	const cv::Mat& oInputImg,
+										const cv::Mat& oRefImg,
+										const std::vector<cv::KeyPoint>& voKeyPoints,
+										cv::Mat& oDesc,
+										float fThreshold,
+										size_t nThresholdOffset) {
+	CV_DbgAssert(oRefImg.empty() || (oRefImg.size==oInputImg.size && oRefImg.type()==oInputImg.type()));
+	CV_DbgAssert(oInputImg.type()==CV_8UC1 || oInputImg.type()==CV_8UC3);
+	CV_DbgAssert(LBSP::DESC_SIZE==2); // @@@ also relies on a constant desc size
+	CV_DbgAssert(fThreshold>=0);
+	const size_t nChannels = (size_t)oInputImg.channels();
+	const size_t _step_row = oInputImg.step.p[0];
+	const uchar* _data = oInputImg.data;
+	const uchar* _refdata = oRefImg.empty()?oInputImg.data:oRefImg.data;
+	const size_t nKeyPoints = voKeyPoints.size();
+	if(nChannels==1) {
+		oDesc.create(oInputImg.size(),CV_16UC1);
+		for(size_t k=0; k<nKeyPoints; ++k) {
+			const int _x = (int)voKeyPoints[k].pt.x;
+			const int _y = (int)voKeyPoints[k].pt.y;
+			const uchar _ref = _refdata[_step_row*(_y)+_x];
+			ushort& _res = oDesc.at<ushort>(_y,_x);
+			const size_t _t = (size_t)(_ref*fThreshold)+nThresholdOffset;
+			#include "LBSP_16bits_dbcross_1ch.i"
+		}
+	}
+	else { //nChannels==3
+		oDesc.create(oInputImg.size(),CV_16UC3);
+		for(size_t k=0; k<nKeyPoints; ++k) {
+			const int _x = (int)voKeyPoints[k].pt.x;
+			const int _y = (int)voKeyPoints[k].pt.y;
+			const uchar* _ref = _refdata+_step_row*(_y)+3*(_x);
+			ushort* _res = ((ushort*)(oDesc.data + oDesc.step.p[0]*_y + oDesc.step.p[1]*_x));
+			const size_t _t[3] = {(size_t)(_ref[0]*fThreshold)+nThresholdOffset,(size_t)(_ref[1]*fThreshold)+nThresholdOffset,(size_t)(_ref[2]*fThreshold)+nThresholdOffset};
+			#include "LBSP_16bits_dbcross_3ch3t.i"
+		}
+	}
+}
+
+void LBSP::compute2(const cv::Mat& oImage, std::vector<cv::KeyPoint>& voKeypoints, cv::Mat& oDescriptors) const {
+	CV_Assert(!oImage.empty());
+    cv::KeyPointsFilter::runByImageBorder(voKeypoints,oImage.size(),PATCH_SIZE/2);
+    cv::KeyPointsFilter::runByKeypointSize(voKeypoints,std::numeric_limits<float>::epsilon());
+    if(voKeypoints.empty()) {
+        oDescriptors.release();
+        return;
+    }
+	if(m_bOnlyUsingAbsThreshold)
+		lbsp_computeImpl2(oImage,m_oRefImage,voKeypoints,oDescriptors,m_nThreshold);
+	else
+		lbsp_computeImpl2(oImage,m_oRefImage,voKeypoints,oDescriptors,m_fRelThreshold,m_nThreshold);
+}
+
+void LBSP::compute2(const std::vector<cv::Mat>& voImageCollection, std::vector<std::vector<cv::KeyPoint> >& vvoPointCollection, std::vector<cv::Mat>& voDescCollection) const {
+    CV_Assert(voImageCollection.size() == vvoPointCollection.size());
+    voDescCollection.resize(voImageCollection.size());
+    for(size_t i=0; i<voImageCollection.size(); i++)
+        compute2(voImageCollection[i], vvoPointCollection[i], voDescCollection[i]);
+}
+
+void LBSP::computeImpl(const cv::Mat& oImage, std::vector<cv::KeyPoint>& voKeypoints, cv::Mat& oDescriptors) const {
+	CV_Assert(!oImage.empty());
+	cv::KeyPointsFilter::runByImageBorder(voKeypoints,oImage.size(),PATCH_SIZE/2);
+	cv::KeyPointsFilter::runByKeypointSize(voKeypoints,std::numeric_limits<float>::epsilon());
+	if(voKeypoints.empty()) {
+		oDescriptors.release();
+		return;
+	}
+	if(m_bOnlyUsingAbsThreshold)
+		lbsp_computeImpl(oImage,m_oRefImage,voKeypoints,oDescriptors,m_nThreshold);
+	else
+		lbsp_computeImpl(oImage,m_oRefImage,voKeypoints,oDescriptors,m_fRelThreshold,m_nThreshold);
+}
+
+void LBSP::reshapeDesc(cv::Size oSize, const std::vector<cv::KeyPoint>& voKeypoints, const cv::Mat& oDescriptors, cv::Mat& oOutput) {
+	CV_DbgAssert(!voKeypoints.empty());
+	CV_DbgAssert(!oDescriptors.empty() && oDescriptors.cols==1);
+	CV_DbgAssert(oSize.width>0 && oSize.height>0);
+	CV_DbgAssert(DESC_SIZE==2); // @@@ also relies on a constant desc size
+	CV_DbgAssert(oDescriptors.type()==CV_16UC1 || oDescriptors.type()==CV_16UC3);
+	const size_t nChannels = (size_t)oDescriptors.channels();
+	const size_t nKeyPoints = voKeypoints.size();
+	if(nChannels==1) {
+		oOutput.create(oSize,CV_16UC1);
+		oOutput = cv::Scalar_<ushort>(0);
+		for(size_t k=0; k<nKeyPoints; ++k)
+			oOutput.at<ushort>(voKeypoints[k].pt) = oDescriptors.at<ushort>((int)k);
+	}
+	else { //nChannels==3
+		oOutput.create(oSize,CV_16UC3);
+		oOutput = cv::Scalar_<ushort>(0,0,0);
+		for(size_t k=0; k<nKeyPoints; ++k) {
+			ushort* output_ptr = (ushort*)(oOutput.data + oOutput.step.p[0]*(int)voKeypoints[k].pt.y);
+			const ushort* const desc_ptr = (ushort*)(oDescriptors.data + oDescriptors.step.p[0]*k);
+			const size_t idx = 3*(int)voKeypoints[k].pt.x;
+			for(size_t n=0; n<3; ++n)
+				output_ptr[idx+n] = desc_ptr[n];
+		}
+	}
+}
+
+void LBSP::calcDescImgDiff(const cv::Mat& oDesc1, const cv::Mat& oDesc2, cv::Mat& oOutput, bool bForceMergeChannels) {
+	CV_DbgAssert(oDesc1.size()==oDesc2.size() && oDesc1.type()==oDesc2.type());
+	CV_DbgAssert(DESC_SIZE==2); // @@@ also relies on a constant desc size
+	CV_DbgAssert(oDesc1.type()==CV_16UC1 || oDesc1.type()==CV_16UC3);
+	CV_DbgAssert(CV_MAT_DEPTH(oDesc1.type())==CV_16U);
+	CV_DbgAssert(DESC_SIZE*8<=UCHAR_MAX);
+	CV_DbgAssert(oDesc1.step.p[0]==oDesc2.step.p[0] && oDesc1.step.p[1]==oDesc2.step.p[1]);
+	const float fScaleFactor = (float)UCHAR_MAX/(DESC_SIZE*8);
+	const size_t nChannels = CV_MAT_CN(oDesc1.type());
+	const size_t _step_row = oDesc1.step.p[0];
+	if(nChannels==1) {
+		oOutput.create(oDesc1.size(),CV_8UC1);
+		oOutput = cv::Scalar(0);
+		for(int i=0; i<oDesc1.rows; ++i) {
+			const size_t idx = _step_row*i;
+			const ushort* const desc1_ptr = (ushort*)(oDesc1.data+idx);
+			const ushort* const desc2_ptr = (ushort*)(oDesc2.data+idx);
+			for(int j=0; j<oDesc1.cols; ++j)
+				oOutput.at<uchar>(i,j) = (uchar)(fScaleFactor*hdist(desc1_ptr[j],desc2_ptr[j]));
+		}
+	}
+	else { //nChannels==3
+		if(bForceMergeChannels)
+			oOutput.create(oDesc1.size(),CV_8UC1);
+		else
+			oOutput.create(oDesc1.size(),CV_8UC3);
+		oOutput = cv::Scalar::all(0);
+		for(int i=0; i<oDesc1.rows; ++i) {
+			const size_t idx =  _step_row*i;
+			const ushort* const desc1_ptr = (ushort*)(oDesc1.data+idx);
+			const ushort* const desc2_ptr = (ushort*)(oDesc2.data+idx);
+			uchar* output_ptr = oOutput.data + oOutput.step.p[0]*i;
+			for(int j=0; j<oDesc1.cols; ++j) {
+				for(size_t n=0;n<3; ++n) {
+					const size_t idx2 = 3*j+n;
+					if(bForceMergeChannels)
+						output_ptr[j] += (uchar)((fScaleFactor*hdist(desc1_ptr[idx2],desc2_ptr[idx2]))/3);
+					else
+						output_ptr[idx2] = (uchar)(fScaleFactor*hdist(desc1_ptr[idx2],desc2_ptr[idx2]));
+				}
+			}
+		}
+	}
+}
+
+void LBSP::validateKeyPoints(std::vector<cv::KeyPoint>& voKeypoints, cv::Size oImgSize) {
+	cv::KeyPointsFilter::runByImageBorder(voKeypoints,oImgSize,PATCH_SIZE/2);
+}
+
+void LBSP::validateROI(cv::Mat& oROI) {
+	CV_Assert(!oROI.empty() && oROI.type()==CV_8UC1);
+	cv::Mat oROI_new(oROI.size(),CV_8UC1,cv::Scalar_<uchar>(0));
+	const size_t nBorderSize = PATCH_SIZE/2;
+	const cv::Rect nROI_inner(nBorderSize,nBorderSize,oROI.cols-nBorderSize*2,oROI.rows-nBorderSize*2);
+	cv::Mat(oROI,nROI_inner).copyTo(cv::Mat(oROI_new,nROI_inner));
+	oROI = oROI_new;
+}
diff --git a/package_bgs/pl/LBSP.h b/package_bgs/pl/LBSP.h
new file mode 100644
index 0000000000000000000000000000000000000000..1ca17a0125baae1a18e00b0d31fa673c1c34a903
--- /dev/null
+++ b/package_bgs/pl/LBSP.h
@@ -0,0 +1,118 @@
+#pragma once
+
+#include <opencv2/core/core.hpp>
+#include <opencv2/imgproc/imgproc.hpp>
+#include <opencv2/features2d/features2d.hpp>
+#include "DistanceUtils.h"
+
+/*!
+	Local Binary Similarity Pattern (LBSP) feature extractor
+
+	Note 1: both grayscale and RGB/BGR images may be used with this extractor.
+	Note 2: using LBSP::compute2(...) is logically equivalent to using LBSP::compute(...) followed by LBSP::reshapeDesc(...).
+
+	For more details on the different parameters, see G.-A. Bilodeau et al, "Change Detection in Feature Space Using Local
+	Binary Similarity Patterns", in CRV 2013.
+
+	This algorithm is currently NOT thread-safe.
+ */
+class LBSP : public cv::DescriptorExtractor {
+public:
+	//! constructor 1, threshold = absolute intensity 'similarity' threshold used when computing comparisons
+	LBSP(size_t nThreshold);
+	//! constructor 2, threshold = relative intensity 'similarity' threshold used when computing comparisons
+	LBSP(float fRelThreshold, size_t nThresholdOffset=0);
+	//! default destructor
+	virtual ~LBSP();
+	//! loads extractor params from the specified file node @@@@ not impl
+	virtual void read(const cv::FileNode&);
+	//! writes extractor params to the specified file storage @@@@ not impl
+	virtual void write(cv::FileStorage&) const;
+	//! sets the 'reference' image to be used for inter-frame comparisons (note: if no image is set or if the image is empty, the algorithm will default back to intra-frame comparisons)
+	virtual void setReference(const cv::Mat&);
+	//! returns the current descriptor size, in bytes
+	virtual int descriptorSize() const;
+	//! returns the current descriptor data type
+	virtual int descriptorType() const;
+	//! returns whether this extractor is using a relative threshold or not
+	virtual bool isUsingRelThreshold() const;
+	//! returns the current relative threshold used for comparisons (-1 = invalid/not used)
+	virtual float getRelThreshold() const;
+	//! returns the current absolute threshold used for comparisons (-1 = invalid/not used)
+	virtual size_t getAbsThreshold() const;
+
+	//! similar to DescriptorExtractor::compute(const cv::Mat& image, ...), but in this case, the descriptors matrix has the same shape as the input matrix (possibly slower, but the result can be displayed)
+	void compute2(const cv::Mat& oImage, std::vector<cv::KeyPoint>& voKeypoints, cv::Mat& oDescriptors) const;
+	//! batch version of LBSP::compute2(const cv::Mat& image, ...), also similar to DescriptorExtractor::compute(const std::vector<cv::Mat>& imageCollection, ...)
+	void compute2(const std::vector<cv::Mat>& voImageCollection, std::vector<std::vector<cv::KeyPoint> >& vvoPointCollection, std::vector<cv::Mat>& voDescCollection) const;
+
+	//! utility function, shortcut/lightweight/direct single-point LBSP computation function for extra flexibility (1-channel version)
+	inline static void computeGrayscaleDescriptor(const cv::Mat& oInputImg, const uchar _ref, const int _x, const int _y, const size_t _t, ushort& _res) {
+		CV_DbgAssert(!oInputImg.empty());
+		CV_DbgAssert(oInputImg.type()==CV_8UC1);
+		CV_DbgAssert(LBSP::DESC_SIZE==2); // @@@ also relies on a constant desc size
+		CV_DbgAssert(_x>=(int)LBSP::PATCH_SIZE/2 && _y>=(int)LBSP::PATCH_SIZE/2);
+		CV_DbgAssert(_x<oInputImg.cols-(int)LBSP::PATCH_SIZE/2 && _y<oInputImg.rows-(int)LBSP::PATCH_SIZE/2);
+		const size_t _step_row = oInputImg.step.p[0];
+		const uchar* const _data = oInputImg.data;
+		#include "LBSP_16bits_dbcross_1ch.i"
+	}
+
+	//! utility function, shortcut/lightweight/direct single-point LBSP computation function for extra flexibility (3-channels version)
+	inline static void computeRGBDescriptor(const cv::Mat& oInputImg, const uchar* const _ref,  const int _x, const int _y, const size_t* const _t, ushort* _res) {
+		CV_DbgAssert(!oInputImg.empty());
+		CV_DbgAssert(oInputImg.type()==CV_8UC3);
+		CV_DbgAssert(LBSP::DESC_SIZE==2); // @@@ also relies on a constant desc size
+		CV_DbgAssert(_x>=(int)LBSP::PATCH_SIZE/2 && _y>=(int)LBSP::PATCH_SIZE/2);
+		CV_DbgAssert(_x<oInputImg.cols-(int)LBSP::PATCH_SIZE/2 && _y<oInputImg.rows-(int)LBSP::PATCH_SIZE/2);
+		const size_t _step_row = oInputImg.step.p[0];
+		const uchar* const _data = oInputImg.data;
+		#include "LBSP_16bits_dbcross_3ch3t.i"
+	}
+
+	//! utility function, shortcut/lightweight/direct single-point LBSP computation function for extra flexibility (3-channels version)
+	inline static void computeRGBDescriptor(const cv::Mat& oInputImg, const uchar* const _ref,  const int _x, const int _y, const size_t _t, ushort* _res) {
+		CV_DbgAssert(!oInputImg.empty());
+		CV_DbgAssert(oInputImg.type()==CV_8UC3);
+		CV_DbgAssert(LBSP::DESC_SIZE==2); // @@@ also relies on a constant desc size
+		CV_DbgAssert(_x>=(int)LBSP::PATCH_SIZE/2 && _y>=(int)LBSP::PATCH_SIZE/2);
+		CV_DbgAssert(_x<oInputImg.cols-(int)LBSP::PATCH_SIZE/2 && _y<oInputImg.rows-(int)LBSP::PATCH_SIZE/2);
+		const size_t _step_row = oInputImg.step.p[0];
+		const uchar* const _data = oInputImg.data;
+		#include "LBSP_16bits_dbcross_3ch1t.i"
+	}
+
+	//! utility function, shortcut/lightweight/direct single-point LBSP computation function for extra flexibility (1-channel-RGB version)
+	inline static void computeSingleRGBDescriptor(const cv::Mat& oInputImg, const uchar _ref, const int _x, const int _y, const size_t _c, const size_t _t, ushort& _res) {
+		CV_DbgAssert(!oInputImg.empty());
+		CV_DbgAssert(oInputImg.type()==CV_8UC3 && _c<3);
+		CV_DbgAssert(LBSP::DESC_SIZE==2); // @@@ also relies on a constant desc size
+		CV_DbgAssert(_x>=(int)LBSP::PATCH_SIZE/2 && _y>=(int)LBSP::PATCH_SIZE/2);
+		CV_DbgAssert(_x<oInputImg.cols-(int)LBSP::PATCH_SIZE/2 && _y<oInputImg.rows-(int)LBSP::PATCH_SIZE/2);
+		const size_t _step_row = oInputImg.step.p[0];
+		const uchar* const _data = oInputImg.data;
+		#include "LBSP_16bits_dbcross_s3ch.i"
+	}
+
+	//! utility function, used to reshape a descriptors matrix to its input image size via their keypoint locations
+	static void reshapeDesc(cv::Size oSize, const std::vector<cv::KeyPoint>& voKeypoints, const cv::Mat& oDescriptors, cv::Mat& oOutput);
+	//! utility function, used to illustrate the difference between two descriptor images
+	static void calcDescImgDiff(const cv::Mat& oDesc1, const cv::Mat& oDesc2, cv::Mat& oOutput, bool bForceMergeChannels=false);
+	//! utility function, used to filter out bad keypoints that would trigger out of bounds error because they're too close to the image border
+	static void validateKeyPoints(std::vector<cv::KeyPoint>& voKeypoints, cv::Size oImgSize);
+	//! utility function, used to filter out bad pixels in a ROI that would trigger out of bounds error because they're too close to the image border
+	static void validateROI(cv::Mat& oROI);
+	//! utility, specifies the pixel size of the pattern used (width and height)
+	static const size_t PATCH_SIZE = 5;
+	//! utility, specifies the number of bytes per descriptor (should be the same as calling 'descriptorSize()')
+	static const size_t DESC_SIZE = 2;
+
+protected:
+	//! classic 'compute' implementation, based on the regular DescriptorExtractor::computeImpl arguments & expected output
+	virtual void computeImpl(const cv::Mat& oImage, std::vector<cv::KeyPoint>& voKeypoints, cv::Mat& oDescriptors) const;
+
+	const bool m_bOnlyUsingAbsThreshold;
+	const float m_fRelThreshold;
+	const size_t m_nThreshold;
+	cv::Mat m_oRefImage;
+};
diff --git a/package_bgs/pl/LBSP_16bits_dbcross_1ch.i b/package_bgs/pl/LBSP_16bits_dbcross_1ch.i
new file mode 100644
index 0000000000000000000000000000000000000000..ba5ffd0b1c61412cb55d3a4f5e312ec765a2c0fe
--- /dev/null
+++ b/package_bgs/pl/LBSP_16bits_dbcross_1ch.i
@@ -0,0 +1,44 @@
+// note: this is the LBSP 16 bit double-cross single channel pattern as used in
+// the original article by G.-A. Bilodeau et al.
+// 
+//  O   O   O          4 ..  3 ..  6
+//    O O O           .. 15  8 13 ..
+//  O O X O O    =>    0  9  X 11  1
+//    O O O           .. 12 10 14 ..
+//  O   O   O          7 ..  2 ..  5
+//
+//
+// must be defined externally:
+//      _t              (size_t, absolute threshold used for comparisons)
+//      _ref            (uchar, 'central' value used for comparisons)
+//      _data           (uchar*, single-channel data to be covered by the pattern)
+//      _y              (int, pattern rows location in the image data)
+//      _x              (int, pattern cols location in the image data)
+//      _step_row       (size_t, step size between rows, including padding)
+//      _res            (ushort, 16 bit result vector)
+//       L1dist         (function, returns the absolute difference between two uchars)
+
+#ifdef _val
+#error "definitions clash detected"
+#else
+#define _val(x,y) _data[_step_row*(_y+y)+_x+x]
+#endif
+
+_res = ((L1dist(_val(-1, 1),_ref) > _t) << 15)
+     + ((L1dist(_val( 1,-1),_ref) > _t) << 14)
+     + ((L1dist(_val( 1, 1),_ref) > _t) << 13)
+     + ((L1dist(_val(-1,-1),_ref) > _t) << 12)
+     + ((L1dist(_val( 1, 0),_ref) > _t) << 11)
+     + ((L1dist(_val( 0,-1),_ref) > _t) << 10)
+     + ((L1dist(_val(-1, 0),_ref) > _t) << 9)
+     + ((L1dist(_val( 0, 1),_ref) > _t) << 8)
+     + ((L1dist(_val(-2,-2),_ref) > _t) << 7)
+     + ((L1dist(_val( 2, 2),_ref) > _t) << 6)
+     + ((L1dist(_val( 2,-2),_ref) > _t) << 5)
+     + ((L1dist(_val(-2, 2),_ref) > _t) << 4)
+     + ((L1dist(_val( 0, 2),_ref) > _t) << 3)
+     + ((L1dist(_val( 0,-2),_ref) > _t) << 2)
+     + ((L1dist(_val( 2, 0),_ref) > _t) << 1)
+     + ((L1dist(_val(-2, 0),_ref) > _t));
+
+#undef _val
diff --git a/package_bgs/pl/LBSP_16bits_dbcross_3ch1t.i b/package_bgs/pl/LBSP_16bits_dbcross_3ch1t.i
new file mode 100644
index 0000000000000000000000000000000000000000..da0ebf9a2be82ae6af0f18f63f9891bd87d97273
--- /dev/null
+++ b/package_bgs/pl/LBSP_16bits_dbcross_3ch1t.i
@@ -0,0 +1,46 @@
+// note: this is the LBSP 16 bit double-cross indiv RGB pattern as used in
+// the original article by G.-A. Bilodeau et al.
+//
+//  O   O   O          4 ..  3 ..  6
+//    O O O           .. 15  8 13 ..
+//  O O X O O    =>    0  9  X 11  1
+//    O O O           .. 12 10 14 ..
+//  O   O   O          7 ..  2 ..  5
+//           3x                     3x
+//
+// must be defined externally:
+//      _t              (size_t, absolute threshold used for comparisons)
+//      _ref            (uchar[3], 'central' values used for comparisons)
+//      _data           (uchar*, triple-channel data to be covered by the pattern)
+//      _y              (int, pattern rows location in the image data)
+//      _x              (int, pattern cols location in the image data)
+//      _step_row       (size_t, step size between rows, including padding)
+//      _res            (ushort[3], 16 bit result vectors vector)
+//       L1dist         (function, returns the absolute difference between two uchars)
+
+#ifdef _val
+#error "definitions clash detected"
+#else
+#define _val(x,y,n) _data[_step_row*(_y+y)+3*(_x+x)+n]
+#endif
+
+for(int n=0; n<3; ++n) {
+    _res[n] = ((L1dist(_val(-1, 1, n),_ref[n]) > _t) << 15)
+            + ((L1dist(_val( 1,-1, n),_ref[n]) > _t) << 14)
+            + ((L1dist(_val( 1, 1, n),_ref[n]) > _t) << 13)
+            + ((L1dist(_val(-1,-1, n),_ref[n]) > _t) << 12)
+            + ((L1dist(_val( 1, 0, n),_ref[n]) > _t) << 11)
+            + ((L1dist(_val( 0,-1, n),_ref[n]) > _t) << 10)
+            + ((L1dist(_val(-1, 0, n),_ref[n]) > _t) << 9)
+            + ((L1dist(_val( 0, 1, n),_ref[n]) > _t) << 8)
+            + ((L1dist(_val(-2,-2, n),_ref[n]) > _t) << 7)
+            + ((L1dist(_val( 2, 2, n),_ref[n]) > _t) << 6)
+            + ((L1dist(_val( 2,-2, n),_ref[n]) > _t) << 5)
+            + ((L1dist(_val(-2, 2, n),_ref[n]) > _t) << 4)
+            + ((L1dist(_val( 0, 2, n),_ref[n]) > _t) << 3)
+            + ((L1dist(_val( 0,-2, n),_ref[n]) > _t) << 2)
+            + ((L1dist(_val( 2, 0, n),_ref[n]) > _t) << 1)
+            + ((L1dist(_val(-2, 0, n),_ref[n]) > _t));
+}
+
+#undef _val
diff --git a/package_bgs/pl/LBSP_16bits_dbcross_3ch3t.i b/package_bgs/pl/LBSP_16bits_dbcross_3ch3t.i
new file mode 100644
index 0000000000000000000000000000000000000000..4662367b863771d91efe7c21c8a32bb10f8c766e
--- /dev/null
+++ b/package_bgs/pl/LBSP_16bits_dbcross_3ch3t.i
@@ -0,0 +1,46 @@
+// note: this is the LBSP 16 bit double-cross indiv RGB pattern as used in
+// the original article by G.-A. Bilodeau et al.
+//
+//  O   O   O          4 ..  3 ..  6
+//    O O O           .. 15  8 13 ..
+//  O O X O O    =>    0  9  X 11  1
+//    O O O           .. 12 10 14 ..
+//  O   O   O          7 ..  2 ..  5
+//           3x                     3x
+//
+// must be defined externally:
+//      _t              (size_t[3], absolute thresholds used for comparisons)
+//      _ref            (uchar[3], 'central' values used for comparisons)
+//      _data           (uchar*, triple-channel data to be covered by the pattern)
+//      _y              (int, pattern rows location in the image data)
+//      _x              (int, pattern cols location in the image data)
+//      _step_row       (size_t, step size between rows, including padding)
+//      _res            (ushort[3], 16 bit result vectors vector)
+//       L1dist         (function, returns the absolute difference between two uchars)
+
+#ifdef _val
+#error "definitions clash detected"
+#else
+#define _val(x,y,n) _data[_step_row*(_y+y)+3*(_x+x)+n]
+#endif
+
+for(int n=0; n<3; ++n) {
+    _res[n] = ((L1dist(_val(-1, 1, n),_ref[n]) > _t[n]) << 15)
+            + ((L1dist(_val( 1,-1, n),_ref[n]) > _t[n]) << 14)
+            + ((L1dist(_val( 1, 1, n),_ref[n]) > _t[n]) << 13)
+            + ((L1dist(_val(-1,-1, n),_ref[n]) > _t[n]) << 12)
+            + ((L1dist(_val( 1, 0, n),_ref[n]) > _t[n]) << 11)
+            + ((L1dist(_val( 0,-1, n),_ref[n]) > _t[n]) << 10)
+            + ((L1dist(_val(-1, 0, n),_ref[n]) > _t[n]) << 9)
+            + ((L1dist(_val( 0, 1, n),_ref[n]) > _t[n]) << 8)
+            + ((L1dist(_val(-2,-2, n),_ref[n]) > _t[n]) << 7)
+            + ((L1dist(_val( 2, 2, n),_ref[n]) > _t[n]) << 6)
+            + ((L1dist(_val( 2,-2, n),_ref[n]) > _t[n]) << 5)
+            + ((L1dist(_val(-2, 2, n),_ref[n]) > _t[n]) << 4)
+            + ((L1dist(_val( 0, 2, n),_ref[n]) > _t[n]) << 3)
+            + ((L1dist(_val( 0,-2, n),_ref[n]) > _t[n]) << 2)
+            + ((L1dist(_val( 2, 0, n),_ref[n]) > _t[n]) << 1)
+            + ((L1dist(_val(-2, 0, n),_ref[n]) > _t[n]));
+}
+
+#undef _val
diff --git a/package_bgs/pl/LBSP_16bits_dbcross_s3ch.i b/package_bgs/pl/LBSP_16bits_dbcross_s3ch.i
new file mode 100644
index 0000000000000000000000000000000000000000..6fdc67606a248478b524b4e37cf62262bf511c3d
--- /dev/null
+++ b/package_bgs/pl/LBSP_16bits_dbcross_s3ch.i
@@ -0,0 +1,45 @@
+// note: this is the LBSP 16 bit double-cross indiv RGB pattern as used in
+// the original article by G.-A. Bilodeau et al.
+// 
+//  O   O   O          4 ..  3 ..  6
+//    O O O           .. 15  8 13 ..
+//  O O X O O    =>    0  9  X 11  1
+//    O O O           .. 12 10 14 ..
+//  O   O   O          7 ..  2 ..  5
+//          (single/3x)            (single/3x)
+//
+// must be defined externally:
+//      _t              (size_t, absolute threshold used for comparisons)
+//      _ref            (uchar, 'central' value used for comparisons)
+//      _data           (uchar*, triple-channel data to be covered by the pattern)
+//      _y              (int, pattern rows location in the image data)
+//      _x              (int, pattern cols location in the image data)
+//      _c              (size_t, pattern channel location in the image data)
+//      _step_row       (size_t, step size between rows, including padding)
+//      _res            (ushort, 16 bit result vector)
+//       L1dist         (function, returns the absolute difference between two uchars)
+
+#ifdef _val
+#error "definitions clash detected"
+#else
+#define _val(x,y,n) _data[_step_row*(_y+y)+3*(_x+x)+n]
+#endif
+
+_res = ((L1dist(_val(-1, 1, _c),_ref) > _t) << 15)
+     + ((L1dist(_val( 1,-1, _c),_ref) > _t) << 14)
+     + ((L1dist(_val( 1, 1, _c),_ref) > _t) << 13)
+     + ((L1dist(_val(-1,-1, _c),_ref) > _t) << 12)
+     + ((L1dist(_val( 1, 0, _c),_ref) > _t) << 11)
+     + ((L1dist(_val( 0,-1, _c),_ref) > _t) << 10)
+     + ((L1dist(_val(-1, 0, _c),_ref) > _t) << 9)
+     + ((L1dist(_val( 0, 1, _c),_ref) > _t) << 8)
+     + ((L1dist(_val(-2,-2, _c),_ref) > _t) << 7)
+     + ((L1dist(_val( 2, 2, _c),_ref) > _t) << 6)
+     + ((L1dist(_val( 2,-2, _c),_ref) > _t) << 5)
+     + ((L1dist(_val(-2, 2, _c),_ref) > _t) << 4)
+     + ((L1dist(_val( 0, 2, _c),_ref) > _t) << 3)
+     + ((L1dist(_val( 0,-2, _c),_ref) > _t) << 2)
+     + ((L1dist(_val( 2, 0, _c),_ref) > _t) << 1)
+     + ((L1dist(_val(-2, 0, _c),_ref) > _t));
+
+#undef _val
diff --git a/package_bgs/pl/LOBSTER.cpp b/package_bgs/pl/LOBSTER.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..879ceef850c3f74a644e5b7fdf89632b1a487339
--- /dev/null
+++ b/package_bgs/pl/LOBSTER.cpp
@@ -0,0 +1,73 @@
+#include "LOBSTER.h"
+#include "BackgroundSubtractorLOBSTER.h"
+
+
+LOBSTERBGS::LOBSTERBGS() :
+pLOBSTER(0), firstTime(true), showOutput(true),
+fRelLBSPThreshold 			(BGSLOBSTER_DEFAULT_LBSP_REL_SIMILARITY_THRESHOLD),
+nLBSPThresholdOffset 		(BGSLOBSTER_DEFAULT_LBSP_OFFSET_SIMILARITY_THRESHOLD),
+nDescDistThreshold 			(BGSLOBSTER_DEFAULT_DESC_DIST_THRESHOLD),
+nColorDistThreshold 		(BGSLOBSTER_DEFAULT_COLOR_DIST_THRESHOLD),
+nBGSamples 					(BGSLOBSTER_DEFAULT_NB_BG_SAMPLES),
+nRequiredBGSamples 			(BGSLOBSTER_DEFAULT_REQUIRED_NB_BG_SAMPLES)
+{
+}
+
+LOBSTERBGS::~LOBSTERBGS() {
+	if (pLOBSTER)
+		delete pLOBSTER;
+}
+
+void LOBSTERBGS::process(const cv::Mat &img_input, cv::Mat &img_output, cv::Mat &img_bgmodel)
+{
+  if(img_input.empty())
+    return;
+
+  loadConfig();
+
+  if (firstTime) {
+    saveConfig();
+    pLOBSTER = new BackgroundSubtractorLOBSTER(
+    		fRelLBSPThreshold, nLBSPThresholdOffset, nDescDistThreshold,
+    		nColorDistThreshold, nBGSamples, nRequiredBGSamples);
+
+    pLOBSTER->initialize(img_input, cv::Mat (img_input.size(), CV_8UC1, cv::Scalar_<uchar>(255)));
+    firstTime = false;
+  }
+
+  (*pLOBSTER)(img_input, img_output);
+  pLOBSTER->getBackgroundImage(img_bgmodel);
+
+  if(showOutput) {
+	  imshow("LOBSTER FG", img_output);
+	  imshow("LOBSTER BG", img_bgmodel);
+  }
+}
+
+void LOBSTERBGS::saveConfig()
+{
+	CvFileStorage* fs = cvOpenFileStorage("./config/LOBSTERBGS.xml", 0, CV_STORAGE_WRITE);
+
+	cvWriteReal(fs, "fRelLBSPThreshold", fRelLBSPThreshold);
+	cvWriteInt(fs, "nLBSPThresholdOffset", nLBSPThresholdOffset);
+	cvWriteInt(fs, "nDescDistThreshold", nDescDistThreshold);
+	cvWriteInt(fs, "nColorDistThreshold", nColorDistThreshold);
+	cvWriteInt(fs, "nBGSamples", nBGSamples);
+	cvWriteInt(fs, "nRequiredBGSamples", nRequiredBGSamples);
+
+	cvReleaseFileStorage(&fs);
+}
+
+void LOBSTERBGS::loadConfig()
+{
+	CvFileStorage* fs = cvOpenFileStorage("./config/LOBSTERBGS.xml", 0, CV_STORAGE_READ);
+
+	fRelLBSPThreshold = cvReadRealByName(fs, 0, "fRelLBSPThreshold", BGSLOBSTER_DEFAULT_LBSP_REL_SIMILARITY_THRESHOLD);
+	nLBSPThresholdOffset = cvReadIntByName(fs, 0, "nLBSPThresholdOffset", BGSLOBSTER_DEFAULT_LBSP_OFFSET_SIMILARITY_THRESHOLD);
+	nDescDistThreshold = cvReadIntByName(fs, 0, "nDescDistThreshold", BGSLOBSTER_DEFAULT_DESC_DIST_THRESHOLD);
+	nColorDistThreshold = cvReadIntByName(fs, 0, "nColorDistThreshold", BGSLOBSTER_DEFAULT_COLOR_DIST_THRESHOLD);
+	nBGSamples = cvReadIntByName(fs, 0, "nBGSamples", BGSLOBSTER_DEFAULT_NB_BG_SAMPLES);
+	nRequiredBGSamples = cvReadIntByName(fs, 0, "nRequiredBGSamples", BGSLOBSTER_DEFAULT_REQUIRED_NB_BG_SAMPLES);
+
+	cvReleaseFileStorage(&fs);
+}
diff --git a/package_bgs/pl/LOBSTER.h b/package_bgs/pl/LOBSTER.h
new file mode 100644
index 0000000000000000000000000000000000000000..f5954ca2ebe814d176f47936e790668959e6089c
--- /dev/null
+++ b/package_bgs/pl/LOBSTER.h
@@ -0,0 +1,33 @@
+#pragma once
+
+#include <opencv2/opencv.hpp>
+
+
+#include "../IBGS.h"
+
+class BackgroundSubtractorLOBSTER;
+
+class LOBSTERBGS : public IBGS
+{
+private:
+	BackgroundSubtractorLOBSTER* pLOBSTER;
+	bool firstTime;
+	bool showOutput;
+
+	float fRelLBSPThreshold;
+	size_t nLBSPThresholdOffset;
+	size_t nDescDistThreshold;
+	size_t nColorDistThreshold;
+	size_t nBGSamples;
+	size_t nRequiredBGSamples;
+  
+public:
+  LOBSTERBGS();
+  ~LOBSTERBGS();
+
+  void process(const cv::Mat &img_input, cv::Mat &img_output, cv::Mat &img_bgmodel);
+
+private:
+  void saveConfig();
+  void loadConfig();
+};
diff --git a/package_bgs/pl/RandUtils.h b/package_bgs/pl/RandUtils.h
new file mode 100644
index 0000000000000000000000000000000000000000..24ca5f683d86970c89d4822efdcae1ffce1ca4ae
--- /dev/null
+++ b/package_bgs/pl/RandUtils.h
@@ -0,0 +1,96 @@
+#pragma once
+
+/*// gaussian 3x3 pattern, based on 'floor(fspecial('gaussian', 3, 1)*256)'
+static const int s_nSamplesInitPatternWidth = 3;
+static const int s_nSamplesInitPatternHeight = 3;
+static const int s_nSamplesInitPatternTot = 256;
+static const int s_anSamplesInitPattern[s_nSamplesInitPatternHeight][s_nSamplesInitPatternWidth] = {
+    {19,    32,    19,},
+    {32,    52,    32,},
+    {19,    32,    19,},
+};*/
+
+// gaussian 7x7 pattern, based on 'floor(fspecial('gaussian',7,2)*512)'
+static const int s_nSamplesInitPatternWidth = 7;
+static const int s_nSamplesInitPatternHeight = 7;
+static const int s_nSamplesInitPatternTot = 512;
+static const int s_anSamplesInitPattern[s_nSamplesInitPatternHeight][s_nSamplesInitPatternWidth] = {
+    {2,     4,     6,     7,     6,     4,     2,},
+    {4,     8,    12,    14,    12,     8,     4,},
+    {6,    12,    21,    25,    21,    12,     6,},
+    {7,    14,    25,    28,    25,    14,     7,},
+    {6,    12,    21,    25,    21,    12,     6,},
+    {4,     8,    12,    14,    12,     8,     4,},
+    {2,     4,     6,     7,     6,     4,     2,},
+};
+
+//! returns a random init/sampling position for the specified pixel position; also guards against out-of-bounds values via image/border size check.
+static inline void getRandSamplePosition(int& x_sample, int& y_sample, const int x_orig, const int y_orig, const int border, const cv::Size& imgsize) {
+    int r = 1+rand()%s_nSamplesInitPatternTot;
+    for(x_sample=0; x_sample<s_nSamplesInitPatternWidth; ++x_sample) {
+        for(y_sample=0; y_sample<s_nSamplesInitPatternHeight; ++y_sample) {
+            r -= s_anSamplesInitPattern[y_sample][x_sample];
+            if(r<=0)
+                goto stop;
+        }
+    }
+    stop:
+    x_sample += x_orig-s_nSamplesInitPatternWidth/2;
+    y_sample += y_orig-s_nSamplesInitPatternHeight/2;
+    if(x_sample<border)
+        x_sample = border;
+    else if(x_sample>=imgsize.width-border)
+        x_sample = imgsize.width-border-1;
+    if(y_sample<border)
+        y_sample = border;
+    else if(y_sample>=imgsize.height-border)
+        y_sample = imgsize.height-border-1;
+}
+
+// simple 8-connected (3x3) neighbors pattern
+static const int s_anNeighborPatternSize_3x3 = 8;
+static const int s_anNeighborPattern_3x3[8][2] = {
+    {-1, 1},  { 0, 1},  { 1, 1},
+    {-1, 0},            { 1, 0},
+    {-1,-1},  { 0,-1},  { 1,-1},
+};
+
+//! returns a random neighbor position for the specified pixel position; also guards against out-of-bounds values via image/border size check.
+static inline void getRandNeighborPosition_3x3(int& x_neighbor, int& y_neighbor, const int x_orig, const int y_orig, const int border, const cv::Size& imgsize) {
+    int r = rand()%s_anNeighborPatternSize_3x3;
+    x_neighbor = x_orig+s_anNeighborPattern_3x3[r][0];
+    y_neighbor = y_orig+s_anNeighborPattern_3x3[r][1];
+    if(x_neighbor<border)
+        x_neighbor = border;
+    else if(x_neighbor>=imgsize.width-border)
+        x_neighbor = imgsize.width-border-1;
+    if(y_neighbor<border)
+        y_neighbor = border;
+    else if(y_neighbor>=imgsize.height-border)
+        y_neighbor = imgsize.height-border-1;
+}
+
+// 5x5 neighbors pattern
+static const int s_anNeighborPatternSize_5x5 = 24;
+static const int s_anNeighborPattern_5x5[24][2] = {
+    {-2, 2},  {-1, 2},  { 0, 2},  { 1, 2},  { 2, 2},
+    {-2, 1},  {-1, 1},  { 0, 1},  { 1, 1},  { 2, 1},
+    {-2, 0},  {-1, 0},            { 1, 0},  { 2, 0},
+    {-2,-1},  {-1,-1},  { 0,-1},  { 1,-1},  { 2,-1},
+    {-2,-2},  {-1,-2},  { 0,-2},  { 1,-2},  { 2,-2},
+};
+
+//! returns a random neighbor position for the specified pixel position; also guards against out-of-bounds values via image/border size check.
+static inline void getRandNeighborPosition_5x5(int& x_neighbor, int& y_neighbor, const int x_orig, const int y_orig, const int border, const cv::Size& imgsize) {
+    int r = rand()%s_anNeighborPatternSize_5x5;
+    x_neighbor = x_orig+s_anNeighborPattern_5x5[r][0];
+    y_neighbor = y_orig+s_anNeighborPattern_5x5[r][1];
+    if(x_neighbor<border)
+        x_neighbor = border;
+    else if(x_neighbor>=imgsize.width-border)
+        x_neighbor = imgsize.width-border-1;
+    if(y_neighbor<border)
+        y_neighbor = border;
+    else if(y_neighbor>=imgsize.height-border)
+        y_neighbor = imgsize.height-border-1;
+}
diff --git a/package_bgs/pl/SuBSENSE.cpp b/package_bgs/pl/SuBSENSE.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..4f0f984bea7326db5f5ad29daa5972c1c3b76445
--- /dev/null
+++ b/package_bgs/pl/SuBSENSE.cpp
@@ -0,0 +1,73 @@
+#include "SuBSENSE.h"
+#include "BackgroundSubtractorSuBSENSE.h"
+
+
+SuBSENSEBGS::SuBSENSEBGS() :
+pSubsense(0), firstTime(true), showOutput(true),
+fRelLBSPThreshold 			(BGSSUBSENSE_DEFAULT_LBSP_REL_SIMILARITY_THRESHOLD),
+nDescDistThresholdOffset 	(BGSSUBSENSE_DEFAULT_DESC_DIST_THRESHOLD_OFFSET),
+nMinColorDistThreshold 		(BGSSUBSENSE_DEFAULT_MIN_COLOR_DIST_THRESHOLD),
+nBGSamples 					(BGSSUBSENSE_DEFAULT_NB_BG_SAMPLES),
+nRequiredBGSamples 			(BGSSUBSENSE_DEFAULT_REQUIRED_NB_BG_SAMPLES),
+nSamplesForMovingAvgs 		(BGSSUBSENSE_DEFAULT_N_SAMPLES_FOR_MV_AVGS)
+{
+}
+
+SuBSENSEBGS::~SuBSENSEBGS() {
+	if (pSubsense)
+		delete pSubsense;
+}
+
+void SuBSENSEBGS::process(const cv::Mat &img_input, cv::Mat &img_output, cv::Mat &img_bgmodel)
+{
+  if(img_input.empty())
+    return;
+
+  loadConfig();
+
+  if (firstTime) {
+    saveConfig();
+    pSubsense = new BackgroundSubtractorSuBSENSE(
+    		fRelLBSPThreshold, nDescDistThresholdOffset, nMinColorDistThreshold,
+    		nBGSamples, nRequiredBGSamples, nSamplesForMovingAvgs);
+
+    pSubsense->initialize(img_input, cv::Mat (img_input.size(), CV_8UC1, cv::Scalar_<uchar>(255)));
+    firstTime = false;
+  }
+
+  (*pSubsense)(img_input, img_output);
+  pSubsense->getBackgroundImage(img_bgmodel);
+
+  if(showOutput) {
+	  imshow("SuBSENSE FG", img_output);
+	  imshow("SuBSENSE BG", img_bgmodel);
+  }
+}
+
+void SuBSENSEBGS::saveConfig()
+{
+	CvFileStorage* fs = cvOpenFileStorage("./config/SuBSENSEBGS.xml", 0, CV_STORAGE_WRITE);
+
+	cvWriteReal(fs, "fRelLBSPThreshold", fRelLBSPThreshold);
+	cvWriteInt(fs, "nDescDistThresholdOffset", nDescDistThresholdOffset);
+	cvWriteInt(fs, "nMinColorDistThreshold", nMinColorDistThreshold);
+	cvWriteInt(fs, "nBGSamples", nBGSamples);
+	cvWriteInt(fs, "nRequiredBGSamples", nRequiredBGSamples);
+	cvWriteInt(fs, "nSamplesForMovingAvgs", nSamplesForMovingAvgs);
+
+	cvReleaseFileStorage(&fs);
+}
+
+void SuBSENSEBGS::loadConfig()
+{
+	CvFileStorage* fs = cvOpenFileStorage("./config/SuBSENSEBGS.xml", 0, CV_STORAGE_READ);
+
+	fRelLBSPThreshold = cvReadRealByName(fs, 0, "fRelLBSPThreshold", BGSSUBSENSE_DEFAULT_LBSP_REL_SIMILARITY_THRESHOLD);
+	nDescDistThresholdOffset = cvReadIntByName(fs, 0, "nDescDistThresholdOffset", BGSSUBSENSE_DEFAULT_DESC_DIST_THRESHOLD_OFFSET);
+	nMinColorDistThreshold = cvReadIntByName(fs, 0, "nMinColorDistThreshold", BGSSUBSENSE_DEFAULT_MIN_COLOR_DIST_THRESHOLD);
+	nBGSamples = cvReadIntByName(fs, 0, "nBGSamples", BGSSUBSENSE_DEFAULT_NB_BG_SAMPLES);
+	nRequiredBGSamples = cvReadIntByName(fs, 0, "nRequiredBGSamples", BGSSUBSENSE_DEFAULT_REQUIRED_NB_BG_SAMPLES);
+	nSamplesForMovingAvgs = cvReadIntByName(fs, 0, "nSamplesForMovingAvgs", BGSSUBSENSE_DEFAULT_N_SAMPLES_FOR_MV_AVGS);
+
+	cvReleaseFileStorage(&fs);
+}
diff --git a/package_bgs/pl/SuBSENSE.h b/package_bgs/pl/SuBSENSE.h
new file mode 100644
index 0000000000000000000000000000000000000000..b527c2711682814d0cac39681e6b841008c1195f
--- /dev/null
+++ b/package_bgs/pl/SuBSENSE.h
@@ -0,0 +1,32 @@
+#pragma once
+
+#include <opencv2/opencv.hpp>
+
+#include "../IBGS.h"
+
+class BackgroundSubtractorSuBSENSE;
+
+class SuBSENSEBGS: public IBGS {
+private:
+	BackgroundSubtractorSuBSENSE* pSubsense;
+	bool firstTime;
+	bool showOutput;
+
+	float fRelLBSPThreshold;
+	size_t nDescDistThresholdOffset;
+	size_t nMinColorDistThreshold;
+	size_t nBGSamples;
+	size_t nRequiredBGSamples;
+	size_t nSamplesForMovingAvgs;
+
+public:
+	SuBSENSEBGS();
+	~SuBSENSEBGS();
+
+	void process(const cv::Mat &img_input, cv::Mat &img_output,
+			cv::Mat &img_bgmodel);
+
+private:
+	void saveConfig();
+	void loadConfig();
+};