diff --git a/bla b/bla
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/dox/00-overview-camtron.odt b/dox/00-overview-camtron.odt
new file mode 100644
index 0000000000000000000000000000000000000000..f957c39e2c408c158e5dc0faeb9aef97f75017d0
Binary files /dev/null and b/dox/00-overview-camtron.odt differ
diff --git a/dox/01-camera-tracking-method-englisch.odt b/dox/01-camera-tracking-method-englisch.odt
new file mode 100644
index 0000000000000000000000000000000000000000..18939b4ab11d2f37eabe8f2f0d25455475eaf01c
Binary files /dev/null and b/dox/01-camera-tracking-method-englisch.odt differ
diff --git a/dox/general_questions b/dox/general_questions
new file mode 100644
index 0000000000000000000000000000000000000000..d7029365b90120eb6a696c33ea203dfafe4719f5
--- /dev/null
+++ b/dox/general_questions
@@ -0,0 +1,5 @@
+> !! if beetle runs in circle it is in "escape/panic mode" which doesn't represent "natural" behaviour (explore, forage, quick move from a to b)
+	-> think about different setup. Bigger Arena?
+--> ! beetle needs to be small enough to not be limited by constrained size of the arena
+> 10 minutes is enough to analyze movement
+> 8 fps could be enough to reduce mean error
diff --git a/dox/notes_A b/dox/notes_A
new file mode 100644
index 0000000000000000000000000000000000000000..9af617632d33567ab61a98f5856698b5e284bcd3
--- /dev/null
+++ b/dox/notes_A
@@ -0,0 +1,58 @@
+###############################
+## PART A
+###############################
+DCT
+git
+
+Does
+	Setup / Preparation
+	Camera Settings
+	Camera Calibration
+###############################
+
+###############################
+## Setup / Preparation
+###############################
+Hardware setup:
+	> Thermostat/Plantgrow chamber
+	> round tube with camouflage
+	> sizes in (height/width)
+	> light?
+	> thermometer + humidity sensor
+	> cams
+
+Software Setup:
+	> deploy to VM
+	-> ssh-connection
+	> configure
+	-> folder, length, cams..
+	>
+
+
+
+
+###############################
+## Camera Settings
+###############################
+	* lens aperture and focus should be adjusted optimal to have a pin sharp pictures with no overexposure -> fix screw on lens
+		gain??
+		use pix of written text of diff size
+	* export/import as XML.
+	* framerate
+		how low can it be for speed error to be all right?
+		> fps can be low. Try 3!
+		depends on movement speed and estimated error
+		calc error by comparing distance of 40fps with 3fps
+
+		?? Q: how to set fps to 3?
+		?? If dark, set ExposureTimeAbs
+
+###############################
+## Calibration:
+###############################
+	* everytime cam moves!
+	* use pattern (eg chessboard)
+	opencv:
+		https://github.com/opencv/opencv/blob/master/samples/cpp/calibration.cpp
+	* stores values in xml
+	-> mean re-projection error is a useful quality metric for the calibration precision. A smaller mean error means a more precise calibration
diff --git a/dox/notes_B b/dox/notes_B
new file mode 100644
index 0000000000000000000000000000000000000000..cb18aeec4b464721ebc553b335ee67cf5cf0cbdd
--- /dev/null
+++ b/dox/notes_B
@@ -0,0 +1,87 @@
+###############################
+## PART B
+###############################
+V
+git@gitlab.idiv.de:sugu/vimba.git
+
+Does
+	> camera produces frames and timestamps
+###############################
+
+* camera sends the raw pictures to a storage server via a network (IP/UDP) connection
+* internal real time clock and delivers high precision time stamps, which were simultaneous recorded for every frame (use milliseconds)
+* compress frames as jpeg
+	used opencv version 3.2 imwrite() with parameter:
+		IMWRITE_JPEG_QUALITY 100; IMWRITE_JPEG_OPTIMIZE 1; IMWRITE_JPEG_RST_INTERVAL 4;
+
+## deploy
+operating on the storage server
+VM on ssh thomasboy@172.18.115.7 -> ask Dirk!
+
+# build/setup Vimba
+	> uses Vimba SDK 6.0 / C++ 1.9
+	> download from https://www.alliedvision.com/en/products/vimba-sdk/#c1497
+	> see Vimba_installation_under_Linux.pdf
+	> unpack Vimba SDK
+	> tar -xzf ./Vimba.tgz
+	> sudo ./VimbaGigETL/Install.sh #add api to path
+
+## SETUP GT1920C:
+> connect lap and cam directly via ethernet
+> set MTU to 8228 (jumbo frames/packets)
+> set fixed IP for eth-adapter first and then cam (use Vimbaviewer -> force IP)
+	eth 169.254.100.1
+	cam ip 	169.254.x.x # on restart will pick random IP...
+			mac
+				000f310338D3
+				000f310338D4
+			ID: DEV_000F310338D4
+			Name: GT1920C
+
+	sub 255.255.0.0
+  (gat 0.0.0.0 local / none)
+	-> ip address of each adapter needs to be on a unique subnet
+	-> for multiple cams calc bandwith and use switch
+
+
+
+
+#####################
+
+What bandwidth do i have? Do i need multiple ports?
+bandwith = fps * pixel format(bpp) * resolution (* ncams)
+StreamBytesPerSecond = 1 * 1456*1936 * 3 * 1 = 8 456 448 ~= 8,4 MBps < 125MBps
+	14×3×1936×1456 ~= 118,4 MBps ## max res, 3 bpp -> max FPS: 14!
+	44×1×1936×1456 ~= 124,1 MBps ## max res, 1 bpp -> max FPS: 44!
+		a 10 min ->  74 GB?
+		a 60 min -> 446 GB?
+	6×1×1936×1456 ~= 16,9 MBps  ## find optimal FPS to reduce size!
+	8×1×1936×1456 ~= 22,6 MBps  ## find optimal FPS to reduce size!
+	10×1×1936×1456 ~= 28,2 MBps  ## find optimal FPS to reduce size!
+		a 10 min ->  17 GB?
+		a 60 min -> 102 GB?
+
+	1. Determine max_speed with highest fps!
+	2. Take max_speed and reduce fps so it still fits 2*max_speed
+		-> subsample same video ?
+	3. calc mean_err through comparing 1. and 2. -> add to 2. as it will be lower.
+
+	!! exposure and fps: on 10fps exposure can't be more then 10ms!
+	Best practice: set gain to lowest possible val and increase exposure as needed
+
+	!! if you use more than one cam on one interface, the available bandwidth has to be shared between interfaces.
+
+
+	MAX BANDWITH = Gigabit ethernet ~= 125 Mbps
+	MAC1 00:0F:31:03:38:D4
+	YUV 422 = 2 bpp, YUV 444 = 3 bpp
+	TL - Transport Layer -> GIGE as Camera Interface
+	GENICAM - open TL -> GiGE
+
+Prosilica GX:
+	2 ethernet ports...
+	configure LAG (Link aggregate group)
+	-> double available bandwith to 240 MB/s!
+#####################
+
+
diff --git a/dox/notes_C b/dox/notes_C
new file mode 100644
index 0000000000000000000000000000000000000000..0e83043c110a123903a2356e5611c3560ba0fc44
--- /dev/null
+++ b/dox/notes_C
@@ -0,0 +1,233 @@
+###############################
+## PART C
+###############################
+CT
+git@gitlab.idiv.de:sugu/camtron.git
+
+Does
+	> background subtraction
+	> calculates centroid points of all frames in a record
+###############################
+
+###############################
+#define bgs_register(x) static BGS_Register<x> register_##x(quote(x))
+## >> glue together macro?
+quote() adds ""
+
+bgs_register(Tapter)
+>> static BGS_Register<Tapter> register_Tapter("Tapter")
+
+PCA?
+
+> difference
+	virtual();
+	virtual(){ /*empty*/ }
+	virtual() = 0; #pure virtual
+
+> also difference
+	> virtual dtor(): if pointer to base-class deletes object
+	> pure virtual dtor(): need to also define function body, cuz dtor is special function whis is not overriden
+	> interface class (needs pure virtual??)
+	> abc - abstract base class. can't be instantiated anymore
+	> abc <> interface?
+
+
+	// IplImage is oldskool mat and not supported anymore..
+
+	> use smartpointer like so:
+	auto videoAnalysis = std::make_unique<VideoAnalysis>();
+ 		videoCapture = std::make_unique<VideoCapture>();
+      	frameProcessor = std::make_shared<FrameProcessor>();
+
+
+###########
+libarchive stuff
+	archive_read_xxx()
+	archive_write_xxx()
+	struct archive_entry
+
+huge workaround for corrupted files
+clock
+random
+command arg parser
+
+
+
+???
+#if CV_MAJOR_VERSION > 3 || (CV_MAJOR_VERSION == 3 && CV_SUBMINOR_VERSION >= 9)
+  IplImage _frame = cvIplImage(img_input);
+  frame = &_frame;
+#else
+  frame = new IplImage(img_input);
+#endif
+
+#smartpointer??
+#elif CV_MAJOR_VERSION >= 3
+      cv::Ptr<cv::BackgroundSubtractorMOG2> mog;
+#endif
+
+
+
+> What Tboy do... why did he do it?
+	> forked branch bgslib_qtgui_2.0.0
+	> Tapter
+			> adapter for model
+			> was probably copied from LBAdaptiveSOM.cpp
+		--> which is disabled in openCV4
+
+
+Which Background Subtraction Algo to use??
+	median, mean, framedifference
+		+ simple, fast,
+		- not robust if light/bg changes
+		- slow changes ?
+	> adaptive bg?
+	fuzzy?
+	mixture
+
+
+NAMES
+	Kernel?
+	LBSP?
+	Multilayer?
+	Sigma-Delta?
+	Vibe, T2F ,dp ,lb ,vumeter?
+	choquet, sugeno, zivkovic, pratimediod, LOBSTER
+
+Test/Use most common >> Ground Truth
+	Frame Difference
+	WeightedMovingMean / Variance
+	LBAdaptiveSOM
+	MOG2 (Mixture Of Gaussian) MixtureOfGaussianV2.h
+	KNN (K Nearest Neighbour)
+		> fast for small fg obj
+		> TRY!
+	FuzzySugenoIntegral.h
+
+	LSBP - Local Binary Similarity Patterns - (2013)
+	LSBP-based GSoC ?
+	SuBSENSE: improved spatiotemporal LBSP + color features (2014)
+
+	Combineable with
+		ROI
+		Canny Edge Detection
+
+
+> bg modeling to update BG (eg moving trees) > pixel with threshold
+> optic flow (camera is also moving. ) > vectoral estimation of own movement
+
+
+features
+	edge
+		canny edge detector + calc contour
+		> https://en.wikipedia.org/wiki/Canny_edge_detector
+	roi
+		crop
+	color
+		RGB - not so robuse by itself (sensitive to illumination, shadows, oscillations ...)
+		YUV
+		YCrCb - brightness, chroma, color
+	texture
+		robust to illumination and shadow
+		eg Local Binary Pattern (LBP)
+
+https://github.com/murari023/awesome-background-subtraction (2021 new stuff!)
+
+https://learnopencv.com/background-subtraction-with-opencv-and-bgs-libraries/
+http://docs.opencv.org/2.4/doc/tutorials/imgproc/gausian_median_blur_bilateral_filter/gausian_median_blur_bilateral_filter.html
+https://hackthedeveloper.com/background-subtraction-opencv-python/ #mog2 + knn in python
+https://docs.opencv.org/4.3.0/d4/dd5/classcv_1_1bgsegm_1_1BackgroundSubtractorGSOC.html#details #GSOC LSBP ALGO from openCV bgsegm.hpp
+
+https://openaccess.thecvf.com/content_cvpr_workshops_2014/W12/papers/St-Charles_Flexible_Background_Subtraction_2014_CVPR_paper.pdf
+https://www.scitepress.org/Papers/2018/66296/66296.pdf #vehicle tracking latvia. 2018, BackgroundSubtractorMOG, BackgroundSubtractorMOG2 (zivkovic)
+https://www-sop.inria.fr/members/Francois.Bremond/Postscript/AnhTuanAVSS14.pdf  2014
+https://arxiv.org/pdf/1803.07985.pdf # visual animal tracking (2018)
+https://arxiv.org/pdf/1507.06821.pdf # Multimodal Deep Learning for Robust RGB-D Object Recognition (2015)
+https://towardsdatascience.com/background-removal-with-deep-learning-c4f2104b3157?gi=2ef3a5272e5d (2017 Background removal with deep learning)
+
+https://opencv.org/courses/ #xpensive ai course
+https://www.fast.ai/ #free ai course
+
+
+##################
+# computervision #
+##################
+background subtraction (bgs)
+image segmentation
+	Semantic Segmentation (ai)
+detection (feature, object)
+classification (category recognition)
+
+Challenges: Occlusion, (Sensor-)Noise, changing external conditions( lighting, Shadows, fog, reflection )
+
+> pre-training if lack of data
+> corrupt data to guarantee robust learning
+
+######
+# AI #
+######
+Convolutional Neural Networks (CNNs)
+	https://www.researchgate.net/publication/337401161_Fundamental_Concepts_of_Convolutional_Neural_Network (2020 37p)
+		> Kernel
+		> filter?
+		> preprocessing > Training > Parameter init > regularization > optimize
+
+
+RGB-D - RGB + depth >> eg Kinect-Sensors
+KNN - k-nearest neighbors algorithm
+Monte Carlo tree search algorithm (alphaGo...)
+
+RGB-D descriptor that relies on a K-Means
+HMP - hierarchical matching pursuit ( hierarchical sparse-coding method; learn features from multiple channel input)
+
+encoding depth information into three channels (HHA encoding) feature hierachy
+cascade of Random Forest classifiers that are fused in a hierarchical manner ?? #Random Forest
+
+recall - how many relevant items are retrieved?
+precision - how many retrieved items are relevant?
+	> https://en.wikipedia.org/wiki/Recall_(information_retrieval)
+
+?
+automatically learn a fusion strategy for the recognition
+task – in contrast to simply training a linear classifier on
+top of features extracted from both modalities
+
+MISRE - Multiple Input Structures with Robust Estimator
+gradient-based learning > MNIST supervised image classification
+MNISt - large db of hand-written digits (used for ML and ImagePRocessing)
+> also see classifiers: https://en.wikipedia.org/wiki/MNIST_database#Classifiers
+multimodal learning
+
+the parameters of all layers were adapted using a fixed learning rate schedule
+
+At test time the task of the CNN is to assign the correct class label to a previously unseen object instance.
+
+Tasks:
+Classification, clustering, regression, Summarization, Categorization, Natural language processing,
+
+##################################
+# STAT
+##################################
+Regression
+Hadamard Product
+Bernoulli Distribution
+Discrete Uniform Distribution
+
+##################################
+# INSTALL
+##################################
+sublime: install package CMAKE (syntax hilight)
+apt install libarchive-dev python3-pybind11
+
+##################################
+
+
+
+ init(img_input, img_output, img_bgmodel);
+
+#if CV_MAJOR_VERSION > 3 || (CV_MAJOR_VERSION == 3 && CV_SUBMINOR_VERSION >= 9)
+  IplImage _frame = cvIplImage(img_input);
+  frame = &_frame;
+#else
+  frame = new IplImage(img_input);
+#endif
diff --git a/dox/notes_D b/dox/notes_D
new file mode 100644
index 0000000000000000000000000000000000000000..c4f193113fcba6870f68e74326a034d0ad0f7c11
--- /dev/null
+++ b/dox/notes_D
@@ -0,0 +1,20 @@
+###########################
+## PART D
+###########################
+CTD
+git@gitlab.idiv.de:tb55xemi/itrackl-process-scripts.git
+
+Does:
+	> reduce size - cutROI
+	> analyse trajectory data
+	> calculate mean speed in pixel / millisecond
+##########################
+
+
+
+## R + stats
+###############################
+> read r4ds
+	> ex: http://adv-r.had.co.nz/Subsetting.html#applications
+> ex: anagram
+> course (not official, just vids+scripting homework/exercises)
diff --git a/dox/notes_X b/dox/notes_X
new file mode 100644
index 0000000000000000000000000000000000000000..9f6f6edd1c0df7eed5804429e6fc22cf535d0b66
--- /dev/null
+++ b/dox/notes_X
@@ -0,0 +1,9 @@
+###############################
+## PART X
+###############################
+	HPC - High Performance Cluster
+
+does
+	data copied on HPC
+	C) and D) will run on HPC
+###############################
diff --git a/dox/todo_tron b/dox/todo_tron
new file mode 120000
index 0000000000000000000000000000000000000000..ecb262895599140e7f5cb1e211ded47265fe1381
--- /dev/null
+++ b/dox/todo_tron
@@ -0,0 +1 @@
+/home/m/dox/todo/todo_tron
\ No newline at end of file