diff --git a/README b/README new file mode 100644 index 0000000000000000000000000000000000000000..60845db0b8d78583961e0923185642d1ebf387fc --- /dev/null +++ b/README @@ -0,0 +1,24 @@ +To get started recording, read dox/notes_b + > GETTING STARTED + > OVERVIEW VIMBA / B + + + +Overview git repos camtron, schrebatron and ecotron +############################### + +schrebatron - git@gitlab.idiv.de:tb55xemi/rftrack3k.git +schrebatrondata - git@gitlab.idiv.de:sugu/schrebatrondata.git + +ecotrondata - git@gitlab.idiv.de:sugu/ecotrondata.git - https://git.idiv.de/sugu/ecotrondata +ecotron - git@gitlab.idiv.de:tb55xemi/rft2node.git +ecotronscripts - git@gitlab.idiv.de:tb55xemi/ecotron-rfid-controller-script-set.git + +camtron_all - git@gitlab.idiv.de:sugu/camtron_all.git + camtron a - just docs inside camtron_all - notes about hardware (banfs,docs) and setup + camtron b = VIMBA - git@gitlab.idiv.de:sugu/vimba.git - records video from cameras + camtron c = camtron / bgslibrary - git@gitlab.idiv.de:sugu/camtron.git - background subtraction + camtron d = itrackl-process-scriptws - git@gitlab.idiv.de:tb55xemi/itrackl-process-scripts.git - scripts for processing + +############################### + diff --git a/b b/b index d161e4bae3857a41575afde56eba8c3ef5cac194..ec79b322e9227012fb47749336c9d0f38ad81bb7 160000 --- a/b +++ b/b @@ -1 +1 @@ -Subproject commit d161e4bae3857a41575afde56eba8c3ef5cac194 +Subproject commit ec79b322e9227012fb47749336c9d0f38ad81bb7 diff --git a/dox/.~lock.timeplan_milestones.ods# b/dox/.~lock.timeplan_milestones.ods# new file mode 100644 index 0000000000000000000000000000000000000000..d1de94b72adda50bf0f4a2b310c3c6abd52800c9 --- /dev/null +++ b/dox/.~lock.timeplan_milestones.ods# @@ -0,0 +1 @@ +,kr69sugu,idivnb303.usr.idiv.de,29.09.2024 17:16,file:///home/kr69sugu/.config/libreoffice/4; \ No newline at end of file diff --git a/dox/c++_scrap b/dox/c++_scrap deleted file mode 100644 index 9702b42c80bf28472f6c59a2218b7cfde42dac01..0000000000000000000000000000000000000000 --- a/dox/c++_scrap +++ /dev/null @@ -1,117 +0,0 @@ -//############################### -// parse arguments -//############################### - // QCommandLineParser parser; - // parser.setApplicationDescription("a commandline app that is meant to be standalone and communicates with the camera"); - // parser.addHelpOption(); - // parser.addVersionOption(); - // parser.addOptions({ - // {{"o", "out-dir"}, "store frames to <out-dir>", "out_dir"}, - // {{"l", "list-cams"}, "list available cams"}, - // {{"e", "export-cam-config"}, "export camera config" "settings.xml"}, - // {{"i", "import-cam-config"}, "import camera config" "settings.xml"}, - // {{"c", "calibrate"}, "calibrate cam"}, - // {{"r", "start record"}, "record"}, - // {{"s", "stop record"}, "stop recording"}, - // // {{"f", "force"}, "Overwrite existing files."}, - // // {{"n", "dry-run"}, "Do nothing. For testing purposes."}, - // // {{"p", "progress"}, "Show progress during copy."}, - // // {{"v", "verbose"}, "verbose"}, - // }); - - // parser.process(a); - - // const QStringList args = parser.positionalArguments(); - // // source is args.at(0), destination is args.at(1) - // qDebug() << "args: " << args.join(", ") << "\n"; - - // if( parser.isSet("o") ) - // { - // maybeCreateDir(parser.value("o")); - // } - - - -//############################### -// curCam->StartContinuousImageAcquisition(int bufferCount, const IFrameObserverPtr &pObserver) -//############################### -//open cam - // if ( VmbErrorSuccess == (* iter)->Open( VmbAccessModeFull ) ) - // { - // qDebug() << " Camera opened "; - // // emit() - // } - // else - // { - // // cameras. - // qDebug() << "Coudn't open camera"; - // } - -// (*iter)->AcquireSingleImage() -// (*iter)->GetFeatureByName -// (*iter)->GetFeatures -// (*iter)->AcquireMultipleImages() -// (*iter)->StartContinuousImageAcquisition() -// (*iter)->StartCapture() -// // (*iter)->GetPayloadSize(VmbUint32_t &nPayloadSize) - // (*iter)->GetInterface(InterfacePtr &pInterface) - // (*iter)->LoadSettings() - // (*iter)->SaveSettings(const VmbFilePathChar_t *filePath) - - - - // std::string str; - // (*iter)->GetID( str ); - // camInfo << QString::fromStdString(str); - - -// auto mf = std::bind(&Core::getCamInfo, this, std::placeholders::_1); //weird syntax for calling non-static memberfunction in for_each... - // std::for_each( cameras.begin(), cameras.end(), mf ); - -############################### -CONSOLE - -if it takes userinput interactively, create threads to not-block the main thread eventloop --> connect via signal slots -------------------------------- -Style / Naming convetions: -> use camelCase! - -############################### -on Qthread -https://mayaposch.wordpress.com/2011/11/01/how-to-really-truly-use-qthreads-the-full-explanation/ -############################### -https://stackoverflow.com/questions/7543313/how-to-handle-keypress-events-in-a-qt-console-application#18889631 -void initTermios(int echo) -{ - tcgetattr(0, &oldSettings); /* grab old terminal i/o settings */ - newSettings = oldSettings; /* make new settings same as old settings */ - newSettings.c_lflag &= ~ICANON; /* disable buffered i/o */ - newSettings.c_lflag &= echo ? ECHO : ~ECHO; /* set echo mode */ - tcsetattr(0, TCSANOW, &newSettings); /* use these new terminal i/o settings now */ -} -############################### - -// usleep(500000); // sleep 0.5s - -# use Qt wrapper for stdout/stdin/stderr, for better QString handling. -# > not working with userinput, cause not flushed properly... -QTextStream qout(stdout); //2d make global to access from anywhere - QTextStream qin(stdin); - qin.skipWhiteSpace(); // Remove white characters that interrupt - QTextStream qerr(stderr); - - qout << "Please enter your name: " << endl; - qout.flush(); // clears the keyboard buffer - - QString inpu = qin.readLine(); - qout << "u typed " << inpu.toLower() << endl; - - return 3; - -############################### -eventfilter -evt->key() -evt->modifiers() - -pushButton->installEventFilter(keyPressEater); diff --git a/dox/cmds.cpp b/dox/cmds.cpp deleted file mode 120000 index c0fe56fb7dc00433b3b9bbb4da5b5cb6a79ada19..0000000000000000000000000000000000000000 --- a/dox/cmds.cpp +++ /dev/null @@ -1 +0,0 @@ -/home/kr69sugu/dox/cmds/cmds.cpp \ No newline at end of file diff --git a/dox/install b/dox/install deleted file mode 100644 index 8587d85be0af38d2aa8b4c1dc8b226de87b7ebb7..0000000000000000000000000000000000000000 --- a/dox/install +++ /dev/null @@ -1,23 +0,0 @@ -## software B - calc centroids -################################ -g++ -ggdb -o `pkg-config --cflags --libs opencv4` - - -camera_matrix 5678 -distortion_coefficients 76745 - -where is the data? -PATHIN="/data/ibtrack/move/input/rec$IDSTRING/" - -files: -randlist.yml -fileErrorList.yml - - -## software A - grab pictures from camera -######################################### -Vimba Viewer: - see png to understand components architecture. - test: $INSTALL_DIR/Tools/Viewer/Bin/x86_64bit/VimbaViewer - - diff --git a/dox/milestones.ods b/dox/milestones.ods new file mode 100644 index 0000000000000000000000000000000000000000..1dd67c91a8dd1e7862eacec56caa4a6f158b4a31 Binary files /dev/null and b/dox/milestones.ods differ diff --git a/dox/misc b/dox/misc deleted file mode 100644 index b45acf9c750c1e4df09170b1e92a21fc981d41fc..0000000000000000000000000000000000000000 --- a/dox/misc +++ /dev/null @@ -1,168 +0,0 @@ - -## industry cameras -############################### -5gige and 10gige more expensive + might get bandwidthproblems with 6cams? + 10gige need externalframegrabber -usb2 - too old - -> usb3 - - 4.5m cable - + usb3 interface card with 4 cams connected -> gige - + 100m cable - hi res (20MP) and low fps, or - low res (2MP) and high fps - bandwith average. between usb2 and 3 - -> can i reduce the fps manually? - -multiple cams: - star-network using an ethernet switch (bandwidth will be divided), or - a point-to-point network using an interface card with multiple GigE inputs. - -what interface gige card to use? - > needs to support PoE - > ETH from PC provides PoE? if no, then interface card - -## Heating: -> cable: - cat6a is better than cat6 or 5e - dont bundle multiple cables together - -## lens -420nm and 720nm and 800nm-1300nm IR -IR corrected -> can be used for N IR - -problem: spheric abberation = rays farther from center are refracted to different focal point - > most @ max aperture (f1.8) -problem: chromatic aberration = different colors (wavelentgths) have different focal lengths (sa. ~prisma) - > color finging - > longitudinal and lateral CA - -solutions: - > achromate = multi-lenses (doublet, triplet). - color correction (blue+red) - > aspheric - different form of lense (not spheric :) - > aspheric achromate - susceptible to scratch. - good quality --> ir-corrected doublet/triplet - -"fast aperture" = max aperture -> eg f1.8 needs to be quick to close all the way -DOF --> is it possible to use a non-IR gigE cam with a IR-corrected lense, for nightvision? --> angle + distance - fixed or zoom lens - --> cant get color img _and_ IR img at the same time - -> opt for IR, because it works with light and without - -thermal >= nightvision -see through fog/grass? --- - - -450 2mp cam -600 5mp cam - -Lense: -calculate fixed focal length (fest brennweite) -8mm >> -6mm >> 225oi -5mm >> 60cm - -<900nm QE ringlichter >> NIR. - -calculation: -> 50 cm diameter -> 500mm -> 2.4 MP (1936x1216) -> 1216 pix per line -1216 / 500 = 2.4 px/mm -small insect = 4mm -> 8px -> 8*8 = 64px... - - --> can i use SWIR for VS as well? yes, but they are unaffordably expensive --> SWIR for RaspPi? or usb3? cheaper? - -## DOF - Schärfentiefe - Tiefenschärfe -############################################################# -https://photographylife.com/what-is-depth-of-field -app: simple DOF calculator -https://www.edmundoptics.com/c/achromatic-lenses/652/# - -> more DOF on - * smaller focal length - * lower aperture eg f16 > landscape - -> less DOF - * high aperture eg f1.8 - * larger focal length -> zoom 150mm - > BOKEH. nice creamy background - > macro, landscape with FG-POI - -> f5.6 sharpest + best middle ground - -## crop factor // sensorsize - > equivalent focal length - eg. olympus 2x - 8mm > 16mm - narrower - 100mm > 200mm -more zoom (fake crop zoom) - - > calc equiv. focal length: - https://www.translatorscafe.com/unit-converter/en-US/calculator/lens-efl/ - - > smaller sensor - = more crop and more "zoom" (narrower field of view, less DOF) - = less light, dynamic range, pixelsize - = less pixels (normally) - - > bigger sensor - = less crop, wider field of view -> can crop manually (but more DOF) - -##build myself... IoT / embedded camera - raspberryPi + CAM - > 15€ possible? - > https://www.raspberrypi.com/products/raspberry-pi-zero-2-w/ - MIPI CSI2 - interface -> all Pis have interface - >> datarates? - >> IR / lowlight compatible? - >> howto make rugged? - - nvidia jetson nano? - https://www.nvidia.com/de-de/autonomous-machines/embedded-systems/jetson-nano/ - -csi(2) - pi: camera serial interface -dsi(2) - pi: display serial interface -csi/dsi -> max len 25cm! - --> via FPD-Link III up to 15m - --> GSML2 ? -############################### -hub/repeater > just cp data to all connected -switch > -router > -codemeter -GenICamBrowser -############################### - - - -## How-To Stream? - - SS = Streaming Server - - GT 1920: - read stream specs. - search VIMBA ... IP-based. - - ndi-python - wrapper for NDI-SDK - ffmpeg as RTMP-SS - nginx - nginx-rtmp-module - linux SS - openCV - VideoCapture( protocol://host:port/script_name?script_params|auth ) - Gstreamer - QT - Vimba plugin? - - obs-studio/jammy 27.2.3+dfsg1-1 amd64 - recorder and streamer for live video content - can use NDI? diff --git a/dox/misc2 b/dox/misc2 deleted file mode 100644 index 0aa41073fce1a7b663bd30eb18be4e470ea465e4..0000000000000000000000000000000000000000 --- a/dox/misc2 +++ /dev/null @@ -1,52 +0,0 @@ -SW viewer [optional] -############################### - how? - Wie ruft man die index.html auf der VM auf? - Wie kommunizieren SW REC und Viewer? - eg. OnStopButtonClicked( cmd( ../camTrack3000 --stop ) ) ?? - REST-Server/ HTTP-Json / .. -############################### -vimba: GVSPAdjustPacketSize - find max size automatically - if size < 1500 - > enable jumboframes - -check: - vimbaviewer > stream > statistics - StatFramesDelivered / StatPacketsReceived != 0 - StatFramesDropped == 0 - - run as root. (or workaround p159 to start app as root and drop rigths immediatly afterwards) -############################### - -## SW from tboy old workstation - https://ctrax.sourceforge.net/ - walking fly tracker - HOBOWare Lite - temperature logger - StreamPix5 - realtime digital video recording to PC memory or hdd - batch processor - https://www.norpix.com/products/streampix/streampix.php - - Matlab (.m files) - any2ufmf - fviewflytrax - avidemux - yEd - graph editor - .graphml files - batchprocessor - process sequence files to avi - ssdcleanup - -## Emilio general critique -############################### -> !! if beetle runs in circle it is in "escape/panic mode" which doesn't represent "natural" behaviour (explore, forage, quick move from a to b) - -> think about different setup. Bigger Arena? ---> ! beetle needs to be small enough to not be limited by constrained size of the arena - -> how much time for beetle to calm down / aclimatize? 10mins? -> 10 minutes is enough to analyze movement -> 8 fps could be enough to reduce mean error - Depends on movement speed and estimated error - calc error by comparing distance of 40fps with 3fps - - diff --git a/dox/notes b/dox/notes deleted file mode 100644 index d8b610e6decd12872a63c6c0f205ab6cc575bac1..0000000000000000000000000000000000000000 --- a/dox/notes +++ /dev/null @@ -1,42 +0,0 @@ -## NOTES -############################### -"i conduct experiments in a lab, that i build up in a prestigious scientific institute" -"as a craftsman, i hone my skill to build/create things" -"i use my skills to earn €" -"i live and work with integrity" -------------------------------- -4-6h/d @ 4-5/w -> finetune DW/SW -get feedback (occassionaly) -------------------------------- - - - -############################################################## -## Software A - Frame grab - recorder - ctb -############################################################# - - - - - - - -# CMAKE -################################## -sublime: install package CMAKE (syntax hilight) -apt install libarchive-dev python3-pybind11 - -################################## - - - - init(img_input, img_output, img_bgmodel); - -#if CV_MAJOR_VERSION > 3 || (CV_MAJOR_VERSION == 3 && CV_SUBMINOR_VERSION >= 9) - IplImage _frame = cvIplImage(img_input); - frame = &_frame; -#else - frame = new IplImage(img_input); -#endif - - diff --git a/dox/notes_a b/dox/notes_a index 82b7827743ca073514068bff45765dd198076852..87df6bbe830e7cc6e7f546848a93685c3d4d13a2 100644 --- a/dox/notes_a +++ b/dox/notes_a @@ -8,7 +8,6 @@ git Does Hardware Setup Camera Settings - Camera Calibration ############################### ## Hardware Setup @@ -85,16 +84,3 @@ Prosilica GX: 2 ethernet ports... configure LAG (Link aggregate group) -> double available bandwith to 240 MB/s! - -## hardware tower old -############################### - (opt/later) setup tower1 (defunct again!) as cheap testing machine! - > get NIC working - > give name connect inet - --> as terminal to connect to server - --> later: use for DMX lighting - - -# WS / VM - > 8 cores, 16GB RAM, 2 TB for 1cam - > 48 cores, 128GB RAM, 6 TB for 6cam diff --git a/dox/notes_b b/dox/notes_b index 2cde7a482a1d497600b5053c4e82e8f7b57dd39f..9dba9fae1d332de7423b87745c18d16d25ff2fab 100644 --- a/dox/notes_b +++ b/dox/notes_b @@ -4,75 +4,173 @@ git@gitlab.idiv.de:sugu/vimba.git Does - > camera produces frames and stores them with timestamps + a commandline app that is meant to be standalone and communicates with the camera. to produce frames and stores them along with timestamps + use opencv to convert raw cam images into JPEG pixel format ############################### -use opencv to convert raw cam images into JPEG pixel format +Based of VimbaX SDK from: + https://www.alliedvision.com/en/products/software/vimba-x-sdk/ +-> expanded on VimbaCPP/Example/AsyncOpenCVRecorder ! ############################### -convert timestamp in ns -date +%F_%T -d @$(echo 2246782775624200 / 1000000000 | bc) - - - -VmbSystem::RegisterCameraListObserver - For being notified whenever a camera is detected, disconnected, or changes its open state, use +GETTING STARTED: +############################### + - setup VimbaX + > download from https://www.alliedvision.com/en/products/software/vimba-x-sdk/ + > see Vimba_installation_under_Linux.pdf + > unpack Vimba SDK + > tar -xzf ./Vimba.tgz + . cti/SetGenTLPath.sh + > install TL + -> sa VimbaX/doc/VimbaX_ReleaseNotes/GigETL.html + sudo VimbaGigETL_Install.sh #add api to path + + - install OpenCV and Qt + - setup project in IDE + - ive used Sublime editor -> see recorder.sublime-project for build commands! + test setup 1: + - connect cam directly to laptop via ethernet using white POE adapter, so cam gets power + test setup 2: + - connect cam to ethernet in lap and cam to power (POE works) + + - setup localnet (IP, subnet). eg: + adress: 169.254.100.3 + netmask 255.255.0.0 ( <- ! ) + gateway 0.0.0.0 + - find camera IP + - use cmd (replace enXXXX with local interface) + - sudo arp-scan --interface=enXXXX --localnet + - mac adress fixed. IP,name is not. + camtron1 000A4708ED3F 172.18.227.210 allied-alviumg1-240c-04ytr.idiv.de + camtron2 000A471C0C9D 172.18.227.211 allied-alviumg1-240c-04ytp.idiv.de + camtron3 000A47372777 172.18.225.164 allied-alviumg1-240c-04ytn.idiv.de + camtron4 000A471D2A66 172.18.227.213 allied-alviumg1-240c-04ytm.idiv.de + camtron5 000A471D208D 172.18.225.198 allied-alviumg1-240c-04ytt.idiv.de + camtron6 000A47139EA6 172.18.227.215 allied-alviumg1-240c-04ytv.idiv.de + + direct (find with ping scan in 169.254.x.x): + ct1 169.254.75.147 + ct4 169.254.224.36 + + - configure cam IP in config/cams.json + - build and run recorder (in b/bin/cmd) + - press "h" for help, to list available commands +############################### -map cam -> MAC -> IP -> name (contains ID) - camtron1 000A4708ED3F 172.18.227.210 allied-alviumg1-240c-04ytr.idiv.de - camtron2 000A471C0C9D 172.18.227.211 allied-alviumg1-240c-04ytp.idiv.de - camtron3 000A47372777 172.18.225.164 allied-alviumg1-240c-04ytn.idiv.de - camtron4 000A471D2A66 172.18.227.213 allied-alviumg1-240c-04ytm.idiv.de - camtron5 000A471D208D 172.18.225.198 allied-alviumg1-240c-04ytt.idiv.de - camtron6 000A47139EA6 172.18.227.215 allied-alviumg1-240c-04ytv.idiv.de - direct (find with ping scan in 169.254.x.x): - ct1 169.254.75.147 - ct4 169.254.224.36 +OVERVIEW VIMBA / B: +############################### + - start in main.cpp + - threadpool for cams + - thread for core + - thread for console -> so userinput isnt blocked + - constructs instances for VimbaSystem, Core and Controller and connects their signals + + - controller.h/cpp (Note: also contains Console class) + - emits signals which are tied to Core functions (eg openCam) + - prints messages to console + - handles userinput on commandline + + - core.h/cpp + - main class for handling the cam + - parses config to get list of cams with IPs + - its like a intermediary between the user (Controller) and the camera (Cam) + - it keeps a list of cameras and selects the current cam and calls the appropriate functions + - detects cameras and constructs Cam Objects + - starts the cam and handles the image acquisition + - signals for Controller + + - cam.h/cpp + - handles the camera + - wrapper class for a Vimba Camera -> has a member CameraPtr to the Vimba Camera Object + -> uses this pointer to access the Vimba SDK + - keeps track of the camera state (which is also used by Core and Rec) + - has a Record Object which is used to store the images + + - record.h/cpp + - abstraction of a recording + - start / stop recording + - register frameobserver + - has a duration, checks disk space, checks/creates outDir + - gets camera stats and writes them to disk + - prints progress + + - FrameObserver.h/cpp + - callback function for Vimba SDK (implements interface IFrameObserver) + - gets called when a frame is received + - creates a FrameProcessor with the frame and starts it in a new thread! + - requeues frame, when its returned by FrameProcessor + + - FrameProcessor + - transfer timestamp to millisec + - writes Frame to disk with name being the timestamp + + Those are the main classes. There are also some helper classes like + Utils - globals and misc, parse Config, get Versions, errorCodeToMessage + IPrinter - interface which allows for classes to print messages,warnings and errors to Console + - see f() and g() for vimbahandling without cluttering the code with too much error handling + - helper functions (console size, progressbar, ...) + typeDefinitions - common includes to declutter include part of files + + + + + Notes: + - to add a new function which requires userinput (eg pressing a key), search for ALL "// XXX add new func" + -> and add code analogue to lines above/below + - dont confuse CameraPtr with CamPtr and Cam with Camera + - CameraPtr is a shared pointer to the Vimba Camera Object + - CamPtr is a shared pointer to our Cam Object (which contains a CameraPtr to communicate with the Vimba Camera Object) + + + +change cam settings for all cameras +############################### +- run b/vimbax/bin/vimbaviewer +- change specific settings +- Save/export settings to file. +- run recorder b/bin/cmd +- load cam settings ('y') for each cam -camtron1/// - 169.254.158.10 - fixed ip: 172.18.205.201 + 255.255.255.0 - gateway should: 172.18.205.254 - gateway is: 0.0.0.0 +change record duration +############################### +- to change default record duration for all cams: + add "recordDurationSeconds" to conf/cams.json +- to change redord duration for current cam: + press "d" and enter new duration in seconds +- to change record duration for all cams: + press "D" and enter new duration in seconds + +conf/cams.json +############################### +- stores a list of cams with their names and IP addresses +- can include globals: + "recordDurationsSeconds" + "outDir" - base path to store recordings + "frameSizeBytes","fps" - used for calculating estimated diskspace + -> currently "fps" does NOT set the framerate +- software will take a file ending with json/xml as arguments for config/settings file +- if no file is given, it will look for cams.json in current folder -wired settings -169.254.100.3 255.255.0.0 +############################### +misc +############################### +convert timestamp in ns +date +%F_%T -d @$(echo 2246782775624200 / 1000000000 | bc) +VmbSystem::RegisterCameraListObserver + For being notified whenever a camera is detected, disconnected, or changes its open state, use ## connect VM + setup ssh ssh kr69sugu@idivtibcam01.usr.idiv.de - - see `cmds "login ssh without password"` for config - -#?? WS ??? - #idivlabo2KARL.idiv.de (172.18.226.44) ? ------------------------------- - - -* camera sends the raw pictures to a storage server via a network (IP/UDP) connection -* internal real time clock and delivers high precision TIME STAMPS, which were simultaneous recorded for every frame (use milliseconds) -* compress frames as jpg - - used opencv version 3.2 imwrite() with parameter: - IMWRITE_JPEG_QUALITY 100; IMWRITE_JPEG_OPTIMIZE 1; IMWRITE_JPEG_RST_INTERVAL 4; - - -## setup VimbaX - > download from https://www.alliedvision.com/en/products/software/vimba-x-sdk/ - > see Vimba_installation_under_Linux.pdf - > unpack Vimba SDK - > tar -xzf ./Vimba.tgz - . cti/SetGenTLPath.sh - > install TL - -> sa VimbaX/doc/VimbaX_ReleaseNotes/GigETL.html - sudo VimbaGigETL_Install.sh #add api to path - ## All APIs cover the following functions: Listing currently connected cameras Controlling camera features @@ -88,38 +186,6 @@ ssh kr69sugu@idivtibcam01.usr.idiv.de >Vimba, C++, GigE-TL,(ImgTransform,),Cam Technical Docu pdf "${HOME}/vimba_5_1/Documentation/Vimba Manual.pdf" "${HOME}/vimba_5_1/VimbaCPP/Documentation/Vimba CPP Manual.pdf" "${HOME}/vimba_5_1/VimbaGigETL/Documentation/VimbaGigETLFeaturesManual.pdf" "${HOME}/vimba_5_1/VimbaImageTransform/Documentation/Vimba ImageTransform Manual.pdf" "${HOME}/vimba_5_1/Documentation/Vimba Viewer Guide.pdf" - From Thomas: - * recording single frames in data structure - * used Vimba.2.1 (1.6 more likely) - * compress each frame with loosely jpg compression - (used framework opencv version 3.2. imwrite function with parameter: IMWRITE_JPEG_QUALITY 100 ; IMWRITE_JPEG_OPTIMIZE 1; IMWRITE_JPEG_RST_INTERVAL 4;) - * stores timestamps for each frame in milliseconds - -## SETUP GT1920C: -> connect lap and cam directly via ethernet -> set MTU to 8228 (jumbo frames/packets) - > GVSPAdjustPacketSize: 8228 -> set fixed IP for eth-adapter first and then cam (use Vimbaviewer -> force IP) - eth 169.254.100.1 - cam ip 169.254.x.x # on restart will pick random IP... - mac - 000f310338D3 - 000f310338D4 - ID: DEV_000F310338D4 - Name: GT1920C - - sub 255.255.0.0 - (gat 0.0.0.0 local / none) - -> ip address of each adapter needs to be on a unique subnet - -> for multiple cams calc bandwith and use switch - - -> list currently connected cameras -> control cam features -> receive images -> notifications about connections/disconnections - -pixel format VmbSystem = api > entry point @@ -132,39 +198,9 @@ api > cam > feature (settings like exposuretime or pixelformat) > ancillary data (cam settings at time of acquisition -> can be queried via feature access) api > interface > settings/feature -Features: - > frame: - > ChunkAcquisionFrameCount #nFrames from cur acquisition - > ChunkExposureTime, ChunkGain - > system: - > - > cam: - > Width, Height - -frames = img data + ancillaryData - > create by api and queue in cam - > when image is rcv, frame will be filled -> notification - > process and re-enqueue at cam - GenICam - camera standard TL - Transport Layer - transports data from cam to sw -# DeviceTemperatureSelector get temp of cam! -TimestampLatch -TimestampReset -TimestampLatchValue - -Statistics (sub cat) - StatFrameRate - StatFramesDelivered - StatFramesDropped - ... - -[UserSetSelector] -UserSetLoad -UserSetSave - -CurrentIPAddress Buffer management @@ -205,61 +241,6 @@ Vimba API *captures* images <> camera *acquires* images restrained fps | unrestrained fps | while working with img, the nxt img is acquired! -#VimbaSystem #api - Startup() - RegisterCameraListObserver() #for gigE cams -> getcams return immediately - GetCameras() - GetCameraByID() - OpenCameraByID ( " 192.168.0.42 ", VmbAccessModeFull , camera ) - #or serial number or MAC - UnregisterCameraListObserver() - Shutdown() #blox until all callback are done - -#api more - GetInterfaces - -# CameraPtr - # static - GetID( string ) - GetName( string ) - GetModel( string ) - GetSerialNumber( string ) - GetPermittedAccess( VmbAccessModeType& ) - GetInterfaceID( string ) #name/id of connected IF - # dyn - Open() - VmbAccessModeFull - read and write -> features, acquire images - VmbAccessModeConfig - configure IP - VmbAccessModeRead - read-only - Close() - - AcquireSingleImage() - AcquireMultipleImages() - StartContinuousImageAcquisition() - StopContinuousImageAcquisition() - StartCapture() - StopCapture() - SaveCameraSettings() - LoadCameraSettings() - - GetFeatures() - GetFeaturesByName() - - AnnounceFrame() - RevokeFrame() - RevokeAllFrames() - CueFrame() - FlushFrame() - - - -#FeaturePtr - GetValue() - SetValue() - RunCommand() - RegisterObserver() - UnregisterObserver() - Notifications ############################### > register event handler @@ -267,72 +248,7 @@ Notifications > ! Not all func from API can be called in event handler > during event handler Vimba API might be blocked -> exit AFA -## notifications of changed camera states -sys.RegisterCameraListObserver() - # callback/observer func of type ICameraListObserver* - # gets called detect, disconnect, connect, changes to open state - # !! dont call from within observer: - Startup, Shutdown GetCameras GetCameraByID RegisterCameraListObserver UnregisterCameraListObserver Feature::SetValue Feature::RunCommand - -#ICameraListObserver -> for GigE: register a CameraListObserver with the VimbaSystem > -//Discovery? - // plug&play or add/rm + press button to call getcams and list their info - - -difference? - AcquisitionFrameRateAbs <> AcquisitionFrameRate --> cam feature? (== cam setting) - - - -## FramePtr: -GetImage() - img data -getBuffer - img data + ancillary data -GetAncillaryData - ancillary data [~ genicam chunk data] - chunkmodeactive - true -> transfer ancillaryData - frame count - exposure time [ms] - gain [db] -RegisterObserver() ?? - - - -# Sharedpointer -############################### -SP_DECL() -... -CameraPtr sp1; -so.reset( new Camera() ); #ref count 1 -CameraPtr sp2 ; -sp2 = sp1; #ref count 2 - - - - - - -MISC ############################### -getTimestamp -getFrameID - -what buffer size? - -is firmware still up-to-date? - https://www.alliedvision.com/en/support/firmware.html - -camera feature reference -https://www.alliedvision.com/en/support/technical-documentation.html - - -VmbC PersistentIp example to change camera’s IP address permanently. - -GetStreamBufferAlignment? what is it? - -EventCameraDiscovery -> listen to find plugged cams - - DONT do a diskbench to get writing speed (calculate needed speed with 3 cameras) to see if frames will be dropped or not... writing speed of one of the workstations is 150MB/sec prosilica gtx1920 needs 3x 107.53 MB/sec for BGR. 1x for MONO @@ -391,22 +307,3 @@ Save "reasonable" settings in XML. If dark, set ExposureTimeAbs Q: how to set fps to 3? ############################### - -The approach you've taken to save images asynchronously in a separate thread is generally a good practice, as it helps prevent blocking the main thread and allows for better resource management. However, it's crucial to ensure that you properly handle file operations and resource cleanup within your asynchronous processing logic to avoid issues such as file leaks. - -In your code snippet, it seems that you're moving the FrameProcessor object to a separate thread (QThread) and connecting signals and slots to manage the processing of frames and their subsequent handling. While this setup is appropriate for asynchronous processing, it's essential to verify that you're closing files after writing images. - -Here are some steps you can take to ensure proper file handling and cleanup: - - Verify File Closure: Double-check your FrameProcessor class implementation (specifically, the processFrame() method) to ensure that after calling cv::imwrite(), you properly close the file handle. The cv::imwrite() function should handle file writing and closing internally, but it's still essentia to ensure there are no leaks in your code. - - Error Handling: Implement error handling mechanisms to catch any exceptions or errors that might occur during file writing. If an error occurs, make sure to close the file handle and handle the error gracefully. - - Resource Monitoring: Monitor your application's resource usage, including file descriptors, to identify any potential leaks or issues with file handling. You can use system monitoring tools or utilities to track file descriptor usage over time and detect any abnormal behavior. - - Testing: Perform thorough testing of your application, including stress testing and long-running tests, to verify that file handling is robust and doesn't lead to resource leaks or errors over time. - -Additionally, ensure that your application doesn't hit system-imposed limits on the number of open files. If necessary, you can adjust the file descriptor limit for your process using system configuration or programmatically within your application (although this should be done cautiously). - -By implementing these steps and carefully reviewing your asynchronous file writing logic, you can ensure that your application handles file operations correctly and efficiently, minimizing the risk of resource leaks or errors related to file handling. -ChatGPT can make mistakes. Consider checking important information. diff --git a/dox/notes_c b/dox/notes_c index 1ebb5f34cbee9990ac31cb82cec0404d6bfd89da..89b3b737be67f12367d351595458eb58acacdd69 100644 --- a/dox/notes_c +++ b/dox/notes_c @@ -8,230 +8,3 @@ Does > calculates centroid points of all frames in a record ############################### -############################### -#define bgs_register(x) static BGS_Register<x> register_##x(quote(x)) -## >> glue together macro? -quote() adds "" - -bgs_register(Tapter) ->> static BGS_Register<Tapter> register_Tapter("Tapter") - -PCA? - -> difference - virtual(); - virtual(){ /*empty*/ } - virtual() = 0; #pure virtual - -> also difference - > virtual dtor(): if pointer to base-class deletes object - > pure virtual dtor(): need to also define function body, cuz dtor is special function whis is not overriden - > interface class (needs pure virtual??) - > abc - abstract base class. can't be instantiated anymore - > abc <> interface? - - - // IplImage is oldskool mat and not supported anymore.. - - > use smartpointer like so: - auto videoAnalysis = std::make_unique<VideoAnalysis>(); - videoCapture = std::make_unique<VideoCapture>(); - frameProcessor = std::make_shared<FrameProcessor>(); - - -## libarchive stuff -########### - archive_read_xxx() - archive_write_xxx() - struct archive_entry - -huge workaround for corrupted files -clock -random -command arg parser - - - -??? -#if CV_MAJOR_VERSION > 3 || (CV_MAJOR_VERSION == 3 && CV_SUBMINOR_VERSION >= 9) - IplImage _frame = cvIplImage(img_input); - frame = &_frame; -#else - frame = new IplImage(img_input); -#endif - -#smartpointer?? -#elif CV_MAJOR_VERSION >= 3 - cv::Ptr<cv::BackgroundSubtractorMOG2> mog; -#endif - - - -> What Tboy do... why did he do it? - > forked branch bgslib_qtgui_2.0.0 - > Tapter - > adapter for model - > was probably copied from LBAdaptiveSOM.cpp - --> which is disabled in openCV4 - - -Which Background Subtraction Algo to use?? - median, mean, framedifference - + simple, fast, - - not robust if light/bg changes - - slow changes ? - > adaptive bg? - fuzzy? - mixture - - -NAMES - Kernel? - LBSP? - Multilayer? - Sigma-Delta? - Vibe, T2F ,dp ,lb ,vumeter? - choquet, sugeno, zivkovic, pratimediod, LOBSTER - -Test/Use most common >> Ground Truth - Frame Difference - WeightedMovingMean / Variance - LBAdaptiveSOM - MOG2 (Mixture Of Gaussian) MixtureOfGaussianV2.h - KNN (K Nearest Neighbour) - > fast for small fg obj - > TRY! - FuzzySugenoIntegral.h - - LSBP - Local Binary Similarity Patterns - (2013) - LSBP-based GSoC ? - SuBSENSE: improved spatiotemporal LBSP + color features (2014) - - Combineable with - ROI - Canny Edge Detection - - -> bg modeling to update BG (eg moving trees) > pixel with threshold -> optic flow (camera is also moving. ) > vectoral estimation of own movement - - -features - edge - canny edge detector + calc contour - > https://en.wikipedia.org/wiki/Canny_edge_detector - roi - crop - color - RGB - not so robuse by itself (sensitive to illumination, shadows, oscillations ...) - YUV - YCrCb - brightness, chroma, color - texture - robust to illumination and shadow - eg Local Binary Pattern (LBP) - -https://github.com/murari023/awesome-background-subtraction (2021 new stuff!) - -https://learnopencv.com/background-subtraction-with-opencv-and-bgs-libraries/ -http://docs.opencv.org/2.4/doc/tutorials/imgproc/gausian_median_blur_bilateral_filter/gausian_median_blur_bilateral_filter.html -https://hackthedeveloper.com/background-subtraction-opencv-python/ #mog2 + knn in python -https://docs.opencv.org/4.3.0/d4/dd5/classcv_1_1bgsegm_1_1BackgroundSubtractorGSOC.html#details #GSOC LSBP ALGO from openCV bgsegm.hpp - -https://openaccess.thecvf.com/content_cvpr_workshops_2014/W12/papers/St-Charles_Flexible_Background_Subtraction_2014_CVPR_paper.pdf -https://www.scitepress.org/Papers/2018/66296/66296.pdf #vehicle tracking latvia. 2018, BackgroundSubtractorMOG, BackgroundSubtractorMOG2 (zivkovic) -https://www-sop.inria.fr/members/Francois.Bremond/Postscript/AnhTuanAVSS14.pdf 2014 -https://arxiv.org/pdf/1803.07985.pdf # visual animal tracking (2018) -https://arxiv.org/pdf/1507.06821.pdf # Multimodal Deep Learning for Robust RGB-D Object Recognition (2015) -https://towardsdatascience.com/background-removal-with-deep-learning-c4f2104b3157?gi=2ef3a5272e5d (2017 Background removal with deep learning) - -https://opencv.org/courses/ #xpensive ai course -https://www.fast.ai/ #free ai course - - -################## -# computervision # -################## -background subtraction (bgs) -image segmentation - Semantic Segmentation (ai) -detection (feature, object) -classification (category recognition) - -Challenges: Occlusion, (Sensor-)Noise, changing external conditions( lighting, Shadows, fog, reflection ) - -> pre-training if lack of data -> corrupt data to guarantee robust learning - -###### -# AI # -###### -Convolutional Neural Networks (CNNs) - https://www.researchgate.net/publication/337401161_Fundamental_Concepts_of_Convolutional_Neural_Network (2020 37p) - > Kernel - > filter? - > preprocessing > Training > Parameter init > regularization > optimize - - -RGB-D - RGB + depth >> eg Kinect-Sensors -KNN - k-nearest neighbors algorithm -Monte Carlo tree search algorithm (alphaGo...) - -RGB-D descriptor that relies on a K-Means -HMP - hierarchical matching pursuit ( hierarchical sparse-coding method; learn features from multiple channel input) - -encoding depth information into three channels (HHA encoding) feature hierachy -cascade of Random Forest classifiers that are fused in a hierarchical manner ?? #Random Forest - -recall - how many relevant items are retrieved? -precision - how many retrieved items are relevant? - > https://en.wikipedia.org/wiki/Recall_(information_retrieval) - -? -automatically learn a fusion strategy for the recognition -task – in contrast to simply training a linear classifier on -top of features extracted from both modalities - -MISRE - Multiple Input Structures with Robust Estimator -gradient-based learning > MNIST supervised image classification -MNISt - large db of hand-written digits (used for ML and ImagePRocessing) -> also see classifiers: https://en.wikipedia.org/wiki/MNIST_database#Classifiers -multimodal learning - -the parameters of all layers were adapted using a fixed learning rate schedule - -At test time the task of the CNN is to assign the correct class label to a previously unseen object instance. - -Tasks: -Classification, clustering, regression, Summarization, Categorization, Natural language processing, - -################################## -# STAT -################################## -Regression -Hadamard Product -Bernoulli Distribution -Discrete Uniform Distribution - -################################## -# INSTALL -################################## -sublime: install package CMAKE (syntax hilight) -apt install libarchive-dev python3-pybind11 - -################################## - - - - init(img_input, img_output, img_bgmodel); - -#if CV_MAJOR_VERSION > 3 || (CV_MAJOR_VERSION == 3 && CV_SUBMINOR_VERSION >= 9) - IplImage _frame = cvIplImage(img_input); - frame = &_frame; -#else - frame = new IplImage(img_input); -#endif - - -############################### -Q: can i use OPENCV for BGS? - hint: -lopencv_bgsegm ??? can i use it for SW_C ? diff --git a/dox/notes_d b/dox/notes_d index 594e6095e312ac8092b941a4652a07573ac6decb..009d9157f5241cb502b408bff1a1813a30010980 100644 --- a/dox/notes_d +++ b/dox/notes_d @@ -9,11 +9,3 @@ Does: > calculate mean speed in pixel / millisecond ########################## - - -## R + stats -############################### -> read r4ds - > ex: http://adv-r.had.co.nz/Subsetting.html#applications -> ex: anagram -> course (not official, just vids+scripting homework/exercises) diff --git a/dox/notes_x b/dox/notes_x index 0d5cdd79cb005812d807de754da32f01f457a338..1c2af08b81b16b5ce349ae1ccdbebf76a665e78c 100644 --- a/dox/notes_x +++ b/dox/notes_x @@ -8,14 +8,11 @@ does C & D will run on HPC ############################### - get login for: - https://wiki.ufz.de/eve/ + login https://wiki.ufz.de/eve/ qsub - module load -> opencv4? Qt5.15? - - -libarchive + module load -> check versions (opencv4, Qt5.15) + libarchive 4mb blockgroesse einstellen entries lesen (=jpgs) diff --git a/dox/opencv b/dox/opencv deleted file mode 100644 index cb548bcad92dc45d48f2d42355b044a063c55f0f..0000000000000000000000000000000000000000 --- a/dox/opencv +++ /dev/null @@ -1,127 +0,0 @@ - -point2f(y,x) # inverted! -BGR #not rgb ... -Scalar = 4 element vector -CV_RGB(r, g, b) #from bgs - -Types: - #depth - CV_8U unsigned char - CV_8S char - CV_16U unsigned short - CV_16S short - CV_32S int - CV_32F float - CV_64F double - # + channels C1..4 eg RGBA: - # eg Mat M(2,2, CV_8UC3, Scalar(0,0,255)); - 0 to 255 for CV_8U images - 0 to 65535 for CV_16U images - 0 to 1 for CV_32F images - - - -# Camera Calibration: -######################## - distortion_coefficients - 5 values - camera_matrix - 3x3 matrix - eg focal length etc. - - different methods: - https://docs.opencv.org/4.x/d7/d21/tutorial_interactive_calibration.html - https://docs.opencv.org/4.x/d6/d55/tutorial_table_of_content_calib3d.html - - # using chess board, to get values - https://github.com/abidrahmank/OpenCV2-Python-Tutorials/blob/master/source/py_tutorials/py_calib3d/py_calibration/py_calibration.rs - - - -# Matrices -######################## - Fill matrix with random values - Mat R = Mat(3, 2, CV_8UC3); - randu(R, Scalar::all(0), Scalar::all(255)); - - Formatted output - cout << "R (csv) = " << endl << format(R, Formatter::FMT_CSV ) << endl << endl; - #sa FMT_C/PYTHON/NUMPY/CSV - - InputArray = vector or Mat - - type() - create(r,c,t) - copyTo() - clone() for deep copy - cv::Mat F = cv::Mat::ones(3, 4, CV_32FC1) * 3; //all 3's - - traverse: https://programmersought.com/article/72762264465/ - -##roi - region of interest - x, y, w, h - - // roi as subset of full data image - Mat D (A, Rect(10, 10, 100, 100) ); // using a rectangle - Mat E = A(Range::all(), Range(1,3)); // using row and column boundaries - - locateROI, adjustROI, - - -# CommandLineParser - @ -> positional arg - no default value -> has_XX - else: get<T>("XX") - <none> dflt value to ensure strings arent "" - -# MISC -######################## - glob( dir, file ) - - randu() # https://docs.opencv.org/4.x/d2/de8/group__core__array.html#ga1ba1026dca0807b27057ba6a49d258c0 - cvtColor(img, img, COLOR_BGR2Luv); #convert color - - sounds interesting: - selectROI, selectROIs, reduce, qt, dilate, erode, - convexhull, findcontours, checkChessboard, calibrateCamera - hough ? - hu-invariants, centroid - moments (weigthed avg of pixel intensities) - spatial <> central <> central normalized - - randPattern - - -BS: -https://docs.opencv.org/4.x/da/d5c/tutorial_canny_detector.html -https://docs.opencv.org/4.x/d1/dc5/tutorial_background_subtraction.html -https://web.archive.org/web/20140418093037/http://bmc.univ-bpclermont.fr/ -https://web.archive.org/web/20140221195750/http://docs.opencv.org/trunk/doc/tutorials/video/background_subtraction/background_subtraction.html -https://www.pyimagesearch.com/2020/07/27/opencv-grabcut-foreground-segmentation-and-extraction/ -https://learnopencv.com/applications-of-foreground-background-separation-with-semantic-segmentation/ -https://docs.opencv.org/4.x/d2/d55/group__bgsegm.html -https://docs.opencv.org/4.x/d5/de8/samples_2cpp_2segment_objects_8cpp-example.html#_a15 - -tuts:)) -https://docs.opencv.org/4.x/d6/d00/tutorial_py_root.html -https://datahacker.rs/opencv-thresholding/ - -# median frame as bg + frame difference -https://learnopencv.com/simple-background-estimation-in-videos-using-opencv-c-python/ - -https://learnopencv.com/contour-detection-using-opencv-python-c/ -https://learnopencv.com/deep-learning-with-opencvs-dnn-module-a-definitive-guide/ - -moments -https://docs.opencv.org/4.x/d0/d49/tutorial_moments.html - -#BGS -https://docs.opencv.org/4.x/d8/d38/tutorial_bgsegm_bg_subtraction.html - - - -# alliedvision-vimba-sdk -####################### - AsynchronousGrab example > to use with opencv - - /tmp/mozilla_sugu0/Vimba_installation_under_Linux-1.pdf - - C-api - https://www.ansatt.hig.no/ares/Support/Camera%20drivers/AVT/Vimba/1.1/VimbaC/Documentation/Vimba%20C%20Manual.pdf diff --git a/dox/scrap b/dox/scrap deleted file mode 100644 index 655c459a733682d5b7d87dacae61835cfd84f844..0000000000000000000000000000000000000000 --- a/dox/scrap +++ /dev/null @@ -1,35 +0,0 @@ - -############################### -## unplanned termination (ctrl-c, kill) -// std::signal(SIGINT, signalHandler); -// std::signal(SIGTERM, signalHandler); - -############################### -## limit THREADS -//use tools to analyze -// gdb -p <pid> -// or -// info threads -// strace -f -p <pid> -// int idealThreadCount = QThread::idealThreadCount(); -// int threadPoolMaxCount = QThreadPool::globalInstance()->maxThreadCount(); -// QThreadPool::globalInstance()->setMaxThreadCount(idealThreadCount); - -// qDebug() << "Ideal Thread Count:" << idealThreadCount; -// qDebug() << "Thread Pool Max Count:" << threadPoolMaxCount; - -############################### -## IsCommandDone -err = feature->RunCommand(); - if (err == VmbErrorSuccess) - { - bool commandDone = false; - do - { - if (feature->IsCommandDone(commandDone) != VmbErrorSuccess) - { - break; - } - } while (commandDone == false); - } -############################### diff --git a/dox/timeplan_milestones.ods b/dox/timeplan_milestones.ods deleted file mode 100644 index fae7ba5666e679f7088fd37d0d5656108a12690b..0000000000000000000000000000000000000000 Binary files a/dox/timeplan_milestones.ods and /dev/null differ diff --git a/dox/todo_ct b/dox/todo_ct index d1b771ea2c07c39ead6f49cde9cffa5736c642ab..11d9eaca3143201b9445e10be2fc61e4a8c57a1f 100644 --- a/dox/todo_ct +++ b/dox/todo_ct @@ -1,62 +1,38 @@ -[########################### +########################### A) setup / preparation B) recorder C) background-subtraction D) process scripts X) HPC ########################### -"tackle long-term complex projects from beginning to end" A) ########################### - eval vm performance - light - - alban: get ecolux hw from badlauchstaedt (Boxes, DMX per unit, DMX splitter/controller) - > ‼️ write mail to angry alban to get lighthw in september! - - get 1 PI for ecolux - - day + night + dirt + - get ecolux hw from badlauchstaedt (Boxes, DMX per unit, DMX splitter/controller) + - get 1 Raspberry PI for ecolux + - install https://git.idiv.de/sugu/skyglow + - LEDs for day + night + dirt light - IR leds (later) - - tape camo to arenas - - floor: find big white paperrolle in BUG/Storage + - the lenses have no IR filter, so we can use normal IR leds for night vision B) recorder - camera produces frames and timestamps ########################### - - ‼️ _rec->showProgress(); //xxx string to add to camInfo - ‼️ - checkDiskSpace() -> when invoked? check how long it takes to get disk - size test: checkDiskspace(), stats(), pb(), ... if long: init once if - short: before startrecord - ‼️ stats() / progressBar -> invoked: by - user,at end of rec,listcameras, ... every 1s timer (toggle by user) - - - settime() - - test with 1 cam + - calibrate + - see opencv tutorial: https://docs.opencv.org/4.10.0/dc/dbb/tutorial_py_calibration.html + - see misc examples + - e.g. cameracalibrationtool (by boy) - test with x cams - - occasionaly frames are incomplete/dropped - - sometimes PacketSize gets changed and then no frames are rcv -> calling VV once fixes issues >> adjust PACKETSIZE -============================ - CORE - - ‼️ calibrate - - see opencv tut + code/bash cali example - - ada-HPC-CALISW - -=============================== - - get pix, using VV and recorder - - laptop - direct connection # DONE - - laptop - local network - - virtual machine - - get video for 3 cams simultaneously - - do tests and see if frames are dropped - - cam > acquisition framrate + framerate enable - - getFramerate - - stream > statistics - - stats frame dropped - - state frame rate - - - central config - - threads for started recordings [PID] - - storage folder - - - where to store frames? estimate size. + - Note: use 1 settings file for all! + - fix aperture/lense -> how to manually focus? + - get video for 3 cams simultaneously + - rm pbar updatetimer. instead use "l" listcams to show progress + - set-fps function + - sometimes PacketSize gets changed and then no frames are rcv -> calling VV once fixes issues + >> call adjust PACKETSIZE in the code + - set auto gain/exposure @@ -102,6 +78,12 @@ X) HPC - High Performance Cluster BACK-BURNER 2D ############################### ctb + - settime() + - occasionaly frames are incomplete/dropped + - test checkDiskSpace() with longer recs (how long) + - verify estimated calc + - save general info in outdir (dur, versions, ...) + - GUI APP: - build GUI - ... parse output for -LISTCAMERAS- and put into tablewidget @@ -118,18 +100,3 @@ ctb - copy last setup plan from thomas - - -Done -####################### - - set duration for 1 or all cams - - info() - dbg - - files, threads, settings, dirs, ... - - print global vars (dur,,fps,framesize,dir,threads,ncam,...) - - record class (extract cam stuff into own class) - - BUG files are not closed, causing OS limit crash (ulimit -n, too many open files...). why? - open files (-n) 1024 - - eval arena 3D-printed + attach camo - - config: record duration, out_dir, threads_per_cam, ncam - - camo pattern print+create - diff --git a/dox/workflow b/dox/workflow deleted file mode 100644 index 04d545cc33f3df18676828f546b3bae9d1f9ccac..0000000000000000000000000000000000000000 --- a/dox/workflow +++ /dev/null @@ -1,11 +0,0 @@ -Option A - 1. record and store on server - > timecritical - 2. process - -Option B - 1. record, process (cut ROI to reduce size), store - 2. more processing (R..) - --> define process --> draw picture as doc of workflow diff --git a/misc/board.jpg b/misc/board.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e449d6d85370045fe56c557df64683bdb1da22fa Binary files /dev/null and b/misc/board.jpg differ diff --git a/misc/camTrack3000-master.tar.gz b/misc/camTrack3000-master.tar.gz new file mode 100644 index 0000000000000000000000000000000000000000..aec255005a575bd29d408434ea1623a2b5363a01 Binary files /dev/null and b/misc/camTrack3000-master.tar.gz differ diff --git a/misc/cameraCalibrationTool-master.tar b/misc/cameraCalibrationTool-master.tar new file mode 100644 index 0000000000000000000000000000000000000000..2f2dedaab3f0675397e393bdd5bed085f49c5fc5 Binary files /dev/null and b/misc/cameraCalibrationTool-master.tar differ diff --git a/misc/camera_calib.cpp b/misc/camera_calib.cpp new file mode 100644 index 0000000000000000000000000000000000000000..e7a584f695429c290544356b65f2664eef1a49c9 --- /dev/null +++ b/misc/camera_calib.cpp @@ -0,0 +1,109 @@ +/* +1. generate pattern + -> checkerboard, circle or random +2. take at least 10 photos of pattern + -> different angles and positions +3. calculate parameters + extract corners of pattern image + -> imgpoints + corresponding 3d points + -> objpoints + imgSize +4. calibration + -> calculate camera_matrix and distortion_coefficients + -> store in xml + +OPT +x. test accuracy with projectPoints (should be as close to 0 as possible) + +https://opencv24-python-tutorials.readthedocs.io/_/downloads/en/stable/pdf/#subsection.1.7.1 +*/ + + +#include <iostream> +#include <opencv2/opencv.hpp> +#include <opencv2/core.hpp> +#include <opencv2/ccalib.hpp> +#include <opencv2/calib3d.hpp> +#include <opencv2/ccalib/randpattern.hpp> + +// using namespace cv; +using namespace std; + +void gen_rand_pattern(const int width, const int height) + +const std::string outFileName = "pattern_for_camera_calibration.png"; +const cv::string keys = +"{help h usage ? | | camera calibration help msg }" +"{w width | 300 | pattern width }" +"{h height | 300 | pattern height }" +"{g generate | | generate, show and save a calibration pattern }" +"{c calibrate | | calibrate camera }" +; + +int main(int argc, char const *argv[]) +{ + cv::CommandLineParser parser( argc, argv, keys ); + + if (!parser.check()) + { + parser.printErrors(); + return 0; + } + + if (parser.has("g")) + { + cout << "GENERATE" << endl; + generate_pattern(); + } + else if (parser.has("d")) + { + cout << "DETECT" << endl; + calibrate(); + } + + + return 0; +} + +void generate_pattern() +{ + int width = parser.get<int>("width"); + int height = parser.get<int>("height"); + + std::cout << "width: " << width << std::endl; + std::cout << "height: " << height << std::endl; + + cv::randpattern::RandomPatternGenerator generator(width, height); + generator.generatePattern(); + + cv::Mat pattern = generator.getPattern(); + + cv::imshow("random Pattern", pattern); + int k = waitKey(0); // Wait for a keystroke in the window + cv::imwrite(outFileName, pattern); + std::cout << "Saved pattern to " << outFileName << + "Now print pattern, take photos of it. Then run 'calibrate' on those photos" << std::endl; +} + +void calibrate() +{ + //calc parameters + //patternWidth+patternHeight are physical pattern width and height with some user defined unit + patternWidth = width; + patternHeight = height; + std::vector vecImg; //vector of calibration images + + int nminiMatch = 20; // # of minimal matches to NOT abort image + cv::randpattern::RandomPatternCornerFinder finder(patternWidth, patternHeight, nMiniMatches); + finder.loadPattern(pattern); + + finder.computeObjectImagePoints(vecImg); + std::vector<cv::Mat> imagePoints = finder.getImagePoints(); //2d Vec2f + std::vector<cv::Mat> objectPoints = finder.getObjectPoints(); //3d Vec3f + //size + + + //calibrate + cv::calibrateCamera(InputArrayOfArrays objectPoints, InputArrayOfArrays imagePoints, Size imageSize, InputOutputArray cameraMatrix, InputOutputArray distCoeffs, OutputArrayOfArrays rvecs, OutputArrayOfArrays tvecs, OutputArray stdDeviationsIntrinsics, OutputArray stdDeviationsExtrinsics, OutputArray perViewErrors) +} diff --git a/misc/camera_calibration.py b/misc/camera_calibration.py new file mode 100755 index 0000000000000000000000000000000000000000..2a31587f1dad5d05b31f0e5916e5c0ebbdcd5dc1 --- /dev/null +++ b/misc/camera_calibration.py @@ -0,0 +1,48 @@ +#!/usr/bin/python3 + +import numpy as np +import cv2 +import glob + +width = 9 +height = 6 + +# termination criteria +criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001) + +# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0) +objp = np.zeros((width*height,3), np.float32) +objp[:,:2] = np.mgrid[0:width,0:height].T.reshape(-1,2) + +# Arrays to store object points and image points from all the images. +objpoints = [] # 3d point in real world space +imgpoints = [] # 2d points in image plane. + +# images = glob.glob('*.jpg') +images = glob.glob('pattern*') + +for fname in images: + print(f"Search in {fname} ... ", end="") + + img = cv2.imread(fname) + gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) + + # Find the chess board corners + ret, corners = cv2.findChessboardCorners(gray, (width,height), None) + + # If found, add object points, image points (after refining them) + + if ret == True: + objpoints.append(objp) + corners2 = cv2.cornerSubPix(gray,corners,(11,11),(-1,-1),criteria) + imgpoints.append(corners2) + + # Draw and display the corners + img = cv2.drawChessboardCorners(img, (width,height), corners2,ret) + cv2.imshow('img',img) + cv2.waitKey(0) + + else: + print("no corners found :(") + +cv2.destroyAllWindows() diff --git a/misc/cannycontours.cpp b/misc/cannycontours.cpp new file mode 100644 index 0000000000000000000000000000000000000000..8ef071362cd4c2e3e6c05134e16a266642d09120 --- /dev/null +++ b/misc/cannycontours.cpp @@ -0,0 +1,99 @@ + +#include "opencv2/imgcodecs.hpp" +#include "opencv2/highgui.hpp" +#include "opencv2/imgproc.hpp" +#include <iostream> +#include <iomanip> + +using namespace cv; +using namespace std; + +Mat src_gray; +int thresh = 100; +RNG rng(12345); + +/// Function header +void thresh_callback(int, void* ); + +/** + * @function main + */ +int main( int argc, char** argv ) +{ + /// Load source image + CommandLineParser parser( argc, argv, "{@input | stuff.jpg | input image}" ); + Mat src = imread( samples::findFile( parser.get<String>( "@input" ) ) ); + + if( src.empty() ) + { + cout << "Could not open or find the image!\n" << endl; + cout << "usage: " << argv[0] << " <Input image>" << endl; + return -1; + } + + /// Convert image to gray and blur it + cvtColor( src, src_gray, COLOR_BGR2GRAY ); + blur( src_gray, src_gray, Size(3,3) ); + + /// Create Window + const char* source_window = "Source"; + namedWindow( source_window ); + imshow( source_window, src ); + + const int max_thresh = 255; + createTrackbar( "Canny thresh:", source_window, &thresh, max_thresh, thresh_callback ); + thresh_callback( 0, 0 ); + + waitKey(); + return 0; +} + +/** + * @function thresh_callback + */ +void thresh_callback(int, void* ) +{ + /// Detect edges using canny + Mat canny_output; + Canny( src_gray, canny_output, thresh, thresh*2, 3 ); + /// Find contours + vector<vector<Point> > contours; + findContours( canny_output, contours, RETR_TREE, CHAIN_APPROX_SIMPLE ); + + /// Get the moments + vector<Moments> mu(contours.size() ); + for( size_t i = 0; i < contours.size(); i++ ) + { + mu[i] = moments( contours[i] ); + } + + /// Get the mass centers + vector<Point2f> mc( contours.size() ); + for( size_t i = 0; i < contours.size(); i++ ) + { + //add 1e-5 to avoid division by zero + mc[i] = Point2f( static_cast<float>(mu[i].m10 / (mu[i].m00 + 1e-5)), + static_cast<float>(mu[i].m01 / (mu[i].m00 + 1e-5)) ); + cout << "mc[" << i << "]=" << mc[i] << endl; + } + + /// Draw contours + Mat drawing = Mat::zeros( canny_output.size(), CV_8UC3 ); + for( size_t i = 0; i< contours.size(); i++ ) + { + Scalar color = Scalar( rng.uniform(0, 256), rng.uniform(0,256), rng.uniform(0,256) ); + drawContours( drawing, contours, (int)i, color, 2 ); + circle( drawing, mc[i], 4, color, -1 ); + } + + /// Show in a window + imshow( "Contours", drawing ); + + /// Calculate the area with the moments 00 and compare with the result of the OpenCV function + cout << "\t Info: Area and Contour Length \n"; + for( size_t i = 0; i < contours.size(); i++ ) + { + cout << " * Contour[" << i << "] - Area (M_00) = " << std::fixed << std::setprecision(2) << mu[i].m00 + << " - Area OpenCV: " << contourArea(contours[i]) << " - Length: " << arcLength( contours[i], true ) << endl; + } +} diff --git a/misc/chess_9x6.png b/misc/chess_9x6.png new file mode 100644 index 0000000000000000000000000000000000000000..1f9112c3a45c8191c969c20649aafb528803a6f6 Binary files /dev/null and b/misc/chess_9x6.png differ