diff --git a/docs/api.rst b/docs/api.rst index 053df44..d47c072 100644 --- a/docs/api.rst +++ b/docs/api.rst @@ -97,7 +97,6 @@ Class and properties Video.end_frame Video.start_frame Video.corners - Video.plot_rigid_pts Getting frames from video objects --------------------------------- @@ -220,8 +219,9 @@ Data infilling .. autosummary:: :toctree: _generated + :template: accessor_method.rst - Velocimetry.replace_outliers + Velocimetry.mask.window_replace .. _transects: diff --git a/docs/user-guide/camera_config/api_gcps.rst b/docs/user-guide/camera_config/api_gcps.rst index e616a20..5d94911 100644 --- a/docs/user-guide/camera_config/api_gcps.rst +++ b/docs/user-guide/camera_config/api_gcps.rst @@ -69,3 +69,10 @@ A full example that supplies GCPs to the existing camera configuration in variab cam_config.set_gcps(src=src, dst=dst, z_0=z_0, crs=crs) +If you have not supplied ``camera_matrix`` and ``dist_coeffs`` to the camera configuration, then these can be optimized +using the provided GCPs after these have been set using the following without any arguments. + +.. code-block:: python + + cam_config.set_intrinsic() + diff --git a/docs/user-guide/camera_config/cli.rst b/docs/user-guide/camera_config/cli.rst index 2339dc0..2610a8a 100644 --- a/docs/user-guide/camera_config/cli.rst +++ b/docs/user-guide/camera_config/cli.rst @@ -21,5 +21,10 @@ To setup a camera configuration, you will at minimum need to provide the followi * 4 corner points that approximately indicate the bounding box of your area of interest. These must be provided in the order *upstream-left*, *downstream-left*, *downstream-right*, *upstream-right*, where left is the left-bank as seen while looking in downstream direction. +* If you wish to use the video only from a selected frame (for instance if the first frames/seconds are bad quality, or + moving) then you must also provide the frame number from which you would like to start the analysis and provide the + camera configuration information. This is done with the ``-f`` or ``--frame-sample`` option. In the interactive + views that will support you, this frame will be displayed. If you do not provide ``-f`` then the first frame (index 0) + will be displayed. There are several ways to assign this information, further explained below. diff --git a/docs/user-guide/camera_config/cli_bbox.rst b/docs/user-guide/camera_config/cli_bbox.rst index ec012ad..c0fa7ed 100644 --- a/docs/user-guide/camera_config/cli_bbox.rst +++ b/docs/user-guide/camera_config/cli_bbox.rst @@ -21,3 +21,7 @@ clicked 4 points, and then you will see the allocated bounding box around the 4 you will see the same bounding box in geographical space. If you wish to improve your corner point selection, then simply use right-click to remove points and select new locations for them to change the bounding box. Once you are satisfied, click on *Done* and the bounding box will be stored in the camera configuration. + +Below an example of the bounding box as it appears in the interactive interface is shown. + +.. figure:: ../../_images/bbox_interactive.jpg \ No newline at end of file diff --git a/docs/user-guide/camera_config/cli_gcps.rst b/docs/user-guide/camera_config/cli_gcps.rst index 0e3ea96..5f4add8 100644 --- a/docs/user-guide/camera_config/cli_gcps.rst +++ b/docs/user-guide/camera_config/cli_gcps.rst @@ -25,14 +25,14 @@ For our example video, supplying the gcps in a full command-line option manner w $ pyorc camera-config --crs_gcps 32735 --src "[[1421, 1001], [1251, 460], [421, 432], [470, 607]]" --dst "[[642735.8076, 8304292.1190], [642737.5823, 8304295.593], [642732.7864, 8304298.4250], [642732.6705, 8304296.8580]]" --z_0 1182.2 --h_ref 0.0 ...... -After the ..... more inputs such as the video with control points, the CRS of the camera configuration, and the output -filename must be presented. With this input, no further interaction with the user to complete the control points is -required. +At the end of this command, after the ``.....`` more inputs such as the video with control points, the CRS of the camera +configuration, and the output filename must be presented. With this input, no further interaction with the user to +complete the control points is required. In many cases though, the information under ``-dst`` may be cumbersome to insert. Hence a more convenient option is to leave out ``-dst`` and replace it for ``--shapefile`` and provide a path to a shapefile or other vector formatted -GIS file containing your control points. *pyorc* assumes that the shapefile contains exactly those points you wish to -use, no more and no less, that all information is in the geometries (i.e. x, y and if +6-points are used, also z) and +GIS file containing your real-world control points. *pyorc* assumes that the shapefile contains exactly those points you +wish to use, no more and no less, that all information is in the geometries (i.e. x, y and if +6-points are used, also z) and that the file contains a CRS that allows for automated reprojection to the CRS of the camera configuration (i.e. supplied with ``--crs``). The geometries MUST be of the type POINT only! *pyorc* will attempt to indicate any problems with shapefiles that contain wrong or incomplete information so that you can resolve that if needed. @@ -45,16 +45,28 @@ problems with shapefiles that contain wrong or incomplete information so that yo Similarly, the information contained in ``--src`` may also be very cumbersome to collect. In fact, you need to open up a frame in a photo editor and note down rows and columns to do so. Therefore a much more and highly recommended -approach is to simply leave out ``-src`` at all. You will then be asked if you want to interactively select these +approach is to simply leave out ``-src`` entirely. You will then be asked if you want to interactively select these points. Simply select `Y` and use our convenient point and click approach to select the control points. To make sure you click them in the right order, you can click on the *Map* button to display a map overview of the situation. Here, your ``--dst`` points (or points collected from the shapefile) will be displayed in a map view, with numbers indicating which point should be selected first, which second and so on. You can go back to the camera view with the *camera* button. Then you simply click on the first, second, third, ... and so on control point to collect all control points. Did you make an accidental click on a random point? No problem: just right-click and the last point you clicked -will be removed. Right-click again and the point before that is removed so that you can click again. Once all point are -collected, the *Done* button will become clickable. Click *Done* to close the view and store the points in the camera -configuration. +will be removed. Right-click again and the point before that is removed so that you can click again. + +Once all point are clicked, *pyorc* will optimize both the perspective and the camera's lens characteristics +simultaneously, and display the points you clicked, but then projected using the camera matrix, distortion coefficients +and the estimated perspective pose of the camera. You will see the result as red "+" signs on the screen and an average +error in meters displayed on the top of the frame. If the average error is larger than 0.1 meter you will get a warning. +Large errors are likely due to poorly measured control points, or because you clicked the points in the wrong order. + +Once the optimization is performed, the *Done* button will become clickable. Click *Done* to close the view and store +the points in the camera configuration. If you made a mistake and want to rectify poorly clicked points, simply right +click as many times as needed to remove points, and add them again with left clicks. The optimization will be repeated +again once you have reached the total amount of control points. The view after the optimization is shown in the example +below. + +.. figure:: ../../_images/GCPs_interactive.jpg .. note:: diff --git a/docs/user-guide/camera_config/index.rst b/docs/user-guide/camera_config/index.rst index f77b34a..f725da8 100644 --- a/docs/user-guide/camera_config/index.rst +++ b/docs/user-guide/camera_config/index.rst @@ -3,10 +3,6 @@ Camera configurations ===================== -.. note:: - - This manual is a work in progress. - An essential element in doing optical velocity estimates is understanding how the Field Of View (FOV) of a camera relates to the real-world coordinates. This is needed so that a camera's FOV can be "orthorectified", meaning it can be transposed to real-world coordinates with equal pixel distances in meters. For this we need understanding of the @@ -89,45 +85,50 @@ then your geographical referencing can easily be very wrong (even in the order o if you do not properly account for these. If for example your real-world coordinates are measured somewhere in the middle of the FOV, then velocities at the edges are likely to be overestimated. -The default parameters (i.e. no distortion and an ideal world camera intrinsic matrix) may therefore be insufficient -and can lead to unnecessary error in the interpretation of the real world distances in the FOV. To -establish a more sound camera intrinsic matrix and distortion coefficients, we recommend to take a video of -a checkerboard pattern using the exact settings you will use in the field and perform camera calibration with this. -Below you can see an animated .gif of such a video. Basically, you print a checkerboard pattern, hold it in front of -your camera, ensure that you run video at the exact settings at which you intend to record in the field, -and capture the printed checkerboard pattern from as many angles as possible. Include rotation and movements in all -directions. +.. note:: + + In *pyorc* the focal distance is automatically optimized based on your real-world coordinates, provided as ground + control points. This is done already if you provide 4 control points in one vertical plane (e.g. at the water level). + In case you provide 6 or more ground control points with varying vertical levels, then *pyorc* will also attempt to + optimize the radial distortion. Therefore we strongly recommend that you measure 6 or more control points in case + you use a lens with significant radial distortion. + +You can also provide a camera intrinsic matrix and distortion coefficients in the API if you have these, or optimize +the intrinsic matrix and distortion coefficients using a checkerboard pattern. More on this is described below. Preparing a video for camera calibration ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -We have a method available to establish an intrinsic matrix and distortion coefficients. It reads in a video in which -a user shows a chessboard pattern and holds it in front of the camera in many different poses and at as many different -locations in the field of view as possible. It then strips frames in a staggered manner starting with the first and -last frame, and then the middle frame, and then the two frames in between the first, last and middle, and so on, until -a satisfactroy number of frames have been found in which the chessboard pattern was found. The intrinsic matrix and -distortion coefficients are then calculated based on the results, and added to the camera configuration. +We have a method available to manually establish an intrinsic matrix and distortion coefficients. It reads in a video in +which a user shows a chessboard pattern and holds it in front of the camera in many different poses and at as many +different locations in the field of view as possible. It then strips frames in a staggered manner starting with the +first and last frame, and then the middle frame, and then the two frames in between the first, last and middle, and so +on, until a satisfactory number of frames have been found in which the chessboard pattern was found. The intrinsic +matrix and distortion coefficients are then calculated based on the results, and added to the camera configuration. .. note:: - Making a video of a chessboard pattern and calibrating on it is only uyseful if you do it the right way. Take care + Making a video of a chessboard pattern and calibrating on it is only useful if you do it the right way. Take care of the following guidelines: * ensure that the printed chessboard is carefully fixed or glued to a hard object, like a strong straight piece of cardboard or a piece of wood. Otherwise, the pattern may look wobbly and cause incorrect calibration * a larger chessboard pattern (e.g. A0 printed) shown at a larger distance may give better results because the - focal length is more similar to field conditions. + focal length is more similar to field conditions. An A4 printed pattern is too small. Using *pyorc*'s built-in + calibration is then more trustworthy. * make sure that while navigating you cover all degrees of freedom. This means you should move the checkerboard from top to bottom and left to right; in all positions, rotate the board around its horizontal and vertical middle line; and rotate it clockwise. - * make sure you record the video in exactly the same resolution as you intend to use during the taking of the videos - in the field. + * make sure you record the video in exactly the same resolution and zoom level as you intend to use during the + taking of the videos in the field. - If the calibration process is not carefully followed it may do more harm than good!!! + If the calibration process is not carefully followed it may do more harm than good!!! Therefore, if you are unsure + then we strongly recommend simply relying on the built-in automated calibration. -An example of extracts from a calibration video with found corner points is shown below. It gives an impression of how -you can move the chessboard pattern around. As said above, it is better to print a larger chessboard and show that to -the camera at a larger distance. +An example of extracts from a calibration video with found corner points is shown below (with A4 printed chessboard so +not reliable for a field deployment, this is only an example). It gives an impression of how you can move the chessboard +pattern around. As said above, it is better to print a (much!) larger chessboard and show that to the camera at a larger +distance. .. image:: ../../_images/camera_calib.gif @@ -140,7 +141,7 @@ Lens calibration method .. note:: - At the moment, lens calibration is only available at API level. If you require a command-line option + At the moment, manual lens calibration is only available at API level. If you require a command-line option for lens calibration, then please contact us at info@rainbowsensing.com. .. tab-item:: API @@ -249,6 +250,11 @@ Entering control points in the camera configuration Setting the lens position ------------------------- +If you also provide a lens position, then this position will be used along-side the ground control points to better +optimize the pose estimation and to better estimate the focal length. Similar to the control points, we advise to +measure the location as accurately as possible, and naturally in exactly the same horizontal and vertical datum as the +control points. + .. tab-set:: .. tab-item:: Command-line @@ -265,8 +271,7 @@ Setting the lens position .. tab-item:: API - For treatment of multiple videos, the water surface must also be reprojected to a new water level. This requires the - position of the x, y, z of the lens position. This can be provided using a simple method ``set_lens_position``. You only + The lens position can be provided using a simple method ``set_lens_position``. You only need to provide x, y, z and the CRS (if this is different from the CRS of the camera configuration itself. A full example supplying the lens position to the existing ``cam_config`` is shown below: @@ -310,6 +315,36 @@ unexpected direction more easily, and without tuning of masking parameters. .. include:: ./api_bbox.rst +Stabilization +------------- +You can decide whether videos must be stabilized. *pyorc* needs to be able to find so-called "rigid points" to do this. +Rigid points are points that do not move during the video. *pyorc* can automatically detect easy stable points to track +and then follow how these move from frame to frame. As the points should not move, *pyorc* will then transform each +frame so that the resulting movements are minimized. To ensure the transformation are really rigid, such regid points +must be found on all edges of the video. Hence it is important that when you take an unstable video, that there is +enough visibility of surrounding banks, or infrastructure or other stable elements around the video to perform the +stabilization. If such objects are only found in e.g. one half or (worse) one quadrant of the video, then the +stabilization may give very strange results in the areas where no rigid points are found. Therefore, only use this +if you know quite certainly that stable points will be found in many regions around the water. + +For stabilization, *pyorc* requires a polygon that defines the area where no rigid points are expected. This is +essentially the moving water and possibly also strongly moving vegetation if this is present in the frame. So select +the polygon such that it encompasses both water and other strongly moving elements as much as possible. + +.. tab-set:: + + .. tab-item:: Command-line + + On the command line, simply provide ``--stabilize`` or ``-s`` as additional argument and you will be provided + with a interactive point and click view on your selected frame. You may click as many points as you wih to + create a polygon that encompasses moving things. To ensure that you include all edges, you can also pan + the frame so that areas outside of the frame become visible. Select the 4th button from the left (two crossed + double-arrows) to select panning. Click on the most left (Home) button to return to the original view. + + .. tab-item:: API + + For stabilization, provide ``stabilize`` as additional argument to ``CameraConfig`` and provide as value + a list of lists of coordinates in [column, row] format, similar to ``gcps["src"]``. Result of a camera configuration -------------------------------- @@ -322,7 +357,7 @@ used for processing videos into velocimetry. .. tab-item:: Command-line When all required parameters are provided, the resulting camera configuration will be stored in a file - set as on the command line. If you have our code base and the ``examples`` folder, then you can for + set as ```` on the command line. If you have our code base and the ``examples`` folder, then you can for instance try the following to get a camera configuration without any interactive user inputs required: .. code-block:: console @@ -338,15 +373,15 @@ used for processing videos into velocimetry. are provided as a list with [column, row] coordinates in the frame object. Finally, corner points to set the bounding box are provided as a list of [column, row] coordinates as well. The configuration is stored in ``ngwerere_cam_config.json``. If you leave out the ``--src`` and ``--corners`` components, you will be able to - select these interactively as shown before. Also the ``--h_ref`` and ``--z_0`` values can be supplied - interactively. + select these interactively as shown before. You can also add ``--stabilize`` to also provide a region for + stabilization as described before. Also the ``--h_ref`` and ``--z_0`` values can be supplied + interactively on the command line. The command-line interface will also automatically store visualizations of the resulting camera configuration in both planar view (with a satellite background if a CRS has been used) and in the camera perspective. The file names for this have the same name as but with the suffixes ``_geo.jpg`` for the planar view and ``_cam.jpg`` for the camera FOV perspective. - .. tab-item:: API Storing a camera configuration within the API is as simple as calling ``to_file``. Camera configurations can diff --git a/docs/user-guide/video/index.rst b/docs/user-guide/video/index.rst index cfb5413..e40f768 100644 --- a/docs/user-guide/video/index.rst +++ b/docs/user-guide/video/index.rst @@ -38,8 +38,7 @@ from different platforms, with suggestions given in the table below. recipe you can define several inputs to define further what should be done with the video. The example below demonstrates all inputs that currently can be supplied. The inputs define the start and end frame (``start_frame`` and ``end_frame``), the actual water level (``h_a``) in meters during the video, which will - be compared against the water level taken with the video used for camera calibration. Finally you may supply - ``stabilize`` without any further arguments to stabilize the video in case it has some movements. + be compared against the water level taken with the video used for camera calibration. .. code-block:: yaml @@ -47,7 +46,6 @@ from different platforms, with suggestions given in the table below. start_frame: 150 end_frame: 250 h_a: 92.23 - stabilize: .. tab-item:: API @@ -71,7 +69,9 @@ image or frame, with control point information in view. .. tab-item:: Command-line When processing for velocimetry, a camera configuration must be supplied through the - option ``-c``, followed by the path to the file containing the camera configuration. + option ``-c``, followed by the path to the file containing the camera configuration. If you do not yet have + a camera configuration, then first go to the section on :ref:`camera configuration ` and + construct a camera configuration with these guidelines. .. tab-item:: API @@ -96,9 +96,12 @@ and will be related to the water level, as read during the survey, used to const .. tab-item:: Command-line - You can supply the "current" water level by providing a water level in the recipe as follows. If you do not - supply it, ``pyorc`` will assume the water level is the same as in the control video. The example below will - process a video with a water level reading of 92.23 meters. + You can supply the "current" water level with the ``-h`` (short) or ``--h_a`` (long) option. You can also insert the + water level in the recipe yaml (although this is less practical mostly). The example below will process a video + with a water level reading of 92.23 meters. If you do not supply it, ``pyorc`` will assume the water level is + the same as in the control video. With incidental observations (e.g. with a drone survey), the video used to + setup the camera configuration and video processed here will be the same, and in this case you can simply leave + out the current water level. .. code-block:: yaml @@ -120,7 +123,7 @@ and will be related to the water level, as read during the survey, used to const To guarantee that the perspective does not change, the following conditions MUST be met: - * The same lens must be used as used for the control image of the camera configuration. Note that smnartphone often + * The same lens must be used as used for the control image of the camera configuration. Note that smartphones often have multiple lenses e.g. for wide angle versus close-ups. Ensure you have zoom level at the same level as used for the control image and do not use digital zoom! It generally only reduces image quality. * The camera must be placed at exactly the same location and oriented to exactly the same objective @@ -163,14 +166,10 @@ Videos may be taken in unsteady conditions. This may happen e.g. with slight mov drone that has varying air pressure conditions or wind gusts to deal with, or even fixed cameras in strong winds. But also, someone may have taken an incidental video, that was not originally intended to be used for river flow and velocity observations, but may render important information about a flood. For this the ``stabilize`` option can be passed with -a stabilization strategy as input. Currently only the input "fixed" is supported, which is meant for a video with -unintentional movements, e.g. due to wind or pressure changes (drone) or because the video was taken by hand. -With this option, each frame will be stabilized with respect to the start frame chosen by the user -(through the option ``start_frame``). The method works by first finding well traceable points in the first frame, -then tracing where these points move to in the next frames. Of course some of these points may actually be moving water. -Therefore we highly recommend to provide a polygon with the video that identifies the area that contains the entire water -surface. The polygon must be measured in column, row coordinates in the image frame and can be supplied with the -keyword ``mask_exterior``. +a stabilization strategy as input. The stabilization may already be set during the camera configuration and this is +in most cases the recommended approach. You may also arrange stabilization per video, and then supply a list of +[column, row] coordinates in the recipe. With this option, each frame will be stabilized with respect to the start frame chosen by the user +(through the option ``start_frame``). .. note:: @@ -202,9 +201,7 @@ keyword ``mask_exterior``. start_frame: 150 end_frame: 250 h_a: 92.23 - stabilize: fixed - mask_exterior: [[816, 2160], [744, 0], [3287, 0], [3374, 2160]] - + stabilize: [[816, 2160], [744, 0], [3287, 0], [3374, 2160]] .. tab-item:: API @@ -224,25 +221,10 @@ keyword ``mask_exterior``. camera_config=cam_config, start_frame=0, end_frame=125, - stabilize="fixed", - mask_exterior=[[150, 0], [500, 1080], [1750, 1080], [900, 0]] + stabilize=[[150, 0], [500, 1080], [1750, 1080], [900, 0]] ) video - If you wish to plot which points were found in the stabilization process, then you can use the method ``video.plot_rigid_pts``. - This makes a scatter plot of the found assumed rigid points on the image frame. Of course it is adviced to also plot - the first image frame on the axes, so that you understand where these points are in the objective. This can be done for example - as follows: - - .. code:: - - import matplotlib.pyplot as plt - img = video.get_frame(0, method="rgb") - ax = plt.axes() - ax.imshow(img) - video.plot_rigid_pts(ax=ax) - - .. note:: If you choose to only treat a very short part of a video such as only one second, then it may be difficult for the diff --git a/pyorc/__init__.py b/pyorc/__init__.py index 1a436ef..5f3f8b5 100644 --- a/pyorc/__init__.py +++ b/pyorc/__init__.py @@ -1,4 +1,4 @@ -__version__ = "0.4.3" +__version__ = "0.5.0" from .api.cameraconfig import CameraConfig, load_camera_config, get_camera_config from .api.video import Video from .api.frames import Frames diff --git a/pyorc/api/cameraconfig.py b/pyorc/api/cameraconfig.py index 3e8f68e..d6d7915 100644 --- a/pyorc/api/cameraconfig.py +++ b/pyorc/api/cameraconfig.py @@ -4,6 +4,7 @@ import matplotlib.pyplot as plt import numpy as np import os +import shapely.geometry from shapely import ops, wkt from shapely.geometry import Polygon, LineString, Point @@ -11,6 +12,8 @@ from pyproj import CRS, Transformer from pyproj.exceptions import CRSError +from typing import Any, Dict, List, Optional, Tuple, Union + from .. import cv, helpers @@ -27,20 +30,21 @@ def __repr__(self): def __init__( self, - height, - width, - crs=None, - window_size=10, - resolution=0.05, - bbox=None, - camera_matrix=None, - dist_coeffs=None, - lens_position=None, - corners=None, - gcps=None, - lens_pars=None, - calibration_video=None, - is_nadir=False + height: int, + width: int, + crs: Optional[Any] = None, + window_size: int = 10, + resolution: float = 0.05, + bbox: Optional[shapely.geometry.Polygon] = None, + camera_matrix: Optional[List[List[float]]] = None, + dist_coeffs: Optional[List[List[float]]] = None, + lens_position: Optional[List[float]] = None, + corners: Optional[List[List[float]]] = None, + gcps: Optional[Dict[str, Union[List, float]]] = None, + lens_pars: Optional[Dict[str, float]] = None, + calibration_video: Optional[str] = None, + is_nadir: Optional[bool] = False, + stabilize: Optional[List[List]] = None ): """ @@ -66,7 +70,7 @@ def __init__( gcps : dict Can contain "src": list of lists, with column, row locations in objective of control points, "dst": list of lists, with x, y or x, y, z locations (local or global coordinate reference system) of - control points, + control points, "h_ref": float, measured water level [m] in local reference system (e.g. from staff gauge or pressure gauge) during gcp survey, "z_0": float, water level [m] in global reference system (e.g. from used GPS system CRS). This must be in @@ -127,6 +131,8 @@ def __init__( # override the transform and bbox with the set corners if corners is not None: self.set_bbox_from_corners(corners) + if stabilize is not None: + self.stabilize = stabilize @property def bbox(self): @@ -250,7 +256,10 @@ def is_nadir(self): return self._is_nadir @is_nadir.setter - def is_nadir(self, nadir_prop): + def is_nadir( + self, + nadir_prop: bool + ): self._is_nadir = nadir_prop @property @@ -282,6 +291,25 @@ def shape(self): ) return rows, cols + @property + def stabilize(self): + """ + Return stabilization polygon (anything outside is used for stabilization + + Returns + ------- + coords : list of lists + coordinates in original image frame comprising the polygon for use in stabilization + """ + return self._stabilize + + @stabilize.setter + def stabilize( + self, + coords: List[List[float]] + ): + self._stabilize = coords + @property def transform(self): """ @@ -296,11 +324,11 @@ def transform(self): def set_lens_calibration( self, - fn, - chessboard_size=(9, 6), - max_imgs=30, - plot=True, - progress_bar=True, + fn: str, + chessboard_size: Optional[Tuple] = (9, 6), + max_imgs: Optional[int] = 30, + plot: Optional[bool] = True, + progress_bar: Optional[bool] = True, **kwargs ): """ @@ -341,7 +369,12 @@ def set_lens_calibration( self.camera_matrix = camera_matrix self.dist_coeffs = dist_coeffs - def get_bbox(self, camera=False, h_a=None, redistort=False): + def get_bbox( + self, + camera: Optional[bool] = False, + h_a: Optional[float] = None, + redistort: Optional[bool] = False + ) -> Polygon: """ Parameters @@ -383,7 +416,11 @@ def get_bbox(self, camera=False, h_a=None, redistort=False): bbox = Polygon(corners) return bbox - def get_depth(self, z, h_a=None): + def get_depth( + self, + z: List[float], + h_a: Optional[float] = None + ) -> List[float]: """ Retrieve depth for measured bathymetry points using the camera configuration and an actual water level, measured in local reference (e.g. staff gauge). @@ -407,7 +444,13 @@ def get_depth(self, z, h_a=None): return z_pressure - z - def get_dist_shore(self, x, y, z, h_a=None): + def get_dist_shore( + self, + x: List[float], + y: List[float], + z: List[float], + h_a: Optional[float] = None + ) -> List[float]: """ Retrieve depth for measured bathymetry points using the camera configuration and an actual water level, measured in local reference (e.g. staff gauge). @@ -444,13 +487,43 @@ def get_dist_shore(self, x, y, z, h_a=None): dist_shore = np.array([(((x[z_dry] - _x) ** 2 + (y[z_dry] - _y) ** 2) ** 0.5).min() for _x, _y, in zip(x, y)]) return dist_shore - def get_dist_wall(self, x, y, z, h_a=None): + def get_dist_wall( + self, + x: List[float], + y: List[float], + z: List[float], + h_a: Optional[float] = None + ) -> List[float]: + """ + Retrieve distance to wall for measured bathymetry points using the camera configuration and an actual water + level, measured in local reference (e.g. staff gauge). + + Parameters + ---------- + x : list of floats + measured bathymetry point x-coordinates + y : list of floats + measured bathymetry point y-coordinates + z : list of floats + measured bathymetry point depths + h_a : float, optional + actual water level measured [m], if not set, assumption is that a single video + is processed and thus changes in water level are not relevant. (default: None) + + Returns + ------- + distance : list of floats + + """ depth = self.get_depth(z, h_a=h_a) dist_shore = self.get_dist_shore(x, y, z, h_a=h_a) dist_wall = (dist_shore**2 + depth**2)**0.5 return dist_wall - def z_to_h(self, z): + def z_to_h( + self, + z: float + ) -> float: """Convert z coordinates of bathymetry to height coordinates in local reference (e.g. staff gauge) Parameters @@ -466,7 +539,12 @@ def z_to_h(self, z): h = z + h_ref - self.gcps["z_0"] return h - def get_M(self, h_a=None, to_bbox_grid=False, reverse=False): + def get_M( + self, + h_a: Optional[float] = None, + to_bbox_grid: Optional[bool] = False, + reverse: Optional[bool] = False + ) -> np.ndarray: """Establish a transformation matrix for a certain actual water level `h_a`. This is done by mapping where the ground control points, measured at `h_ref` will end up with new water level `h_a`, given the lens position. @@ -506,7 +584,10 @@ def get_M(self, h_a=None, to_bbox_grid=False, reverse=False): reverse=reverse ) - def get_z_a(self, h_a=None): + def get_z_a( + self, + h_a: Optional[float] = None + ) -> float: """ h_a : float, optional actual water level measured [m], if not set, assumption is that a single video @@ -522,7 +603,10 @@ def get_z_a(self, h_a=None): else: return self.gcps["z_0"] + (h_a - self.gcps["h_ref"]) - def set_bbox_from_corners(self, corners): + def set_bbox_from_corners( + self, + corners: List[List[float]] + ): """ Establish bbox based on a set of camera perspective corner points Assign corner coordinates to camera configuration @@ -546,7 +630,12 @@ def set_bbox_from_corners(self, corners): self.bbox = bbox - def set_intrinsic(self, camera_matrix=None, dist_coeffs=None, lens_pars=None): + def set_intrinsic( + self, + camera_matrix: Optional[List[List]] = None, + dist_coeffs: Optional[List[List]] = None, + lens_pars: Optional[Dict[str, float]] = None + ): # first set a default estimate from pose if 3D gcps are available self.set_lens_pars() # default parameters use width of frame if hasattr(self, "gcps"): @@ -569,7 +658,12 @@ def set_intrinsic(self, camera_matrix=None, dist_coeffs=None, lens_pars=None): self.dist_coeffs = dist_coeffs - def set_lens_pars(self, k1=0., c=2., focal_length=None): + def set_lens_pars( + self, + k1: Optional[float] = 0., + c: Optional[float] = 2., + focal_length: Optional[float] = None + ): """Set the lens parameters of the given CameraConfig Parameters @@ -591,7 +685,14 @@ def set_lens_pars(self, k1=0., c=2., focal_length=None): self.dist_coeffs = cv._get_dist_coefs(k1) self.camera_matrix = cv._get_cam_mtx(self.height, self.width, c=c, focal_length=focal_length) - def set_gcps(self, src, dst, z_0, h_ref=None, crs=None): + def set_gcps( + self, + src: List[List], + dst: List[List], + z_0: float, + h_ref: Optional[float] = None, + crs: Optional[Any] = None + ): """ Set ground control points for the given CameraConfig @@ -654,7 +755,13 @@ def set_gcps(self, src, dst, z_0, h_ref=None, crs=None): "z_0": z_0, } - def set_lens_position(self, x, y, z, crs=None): + def set_lens_position( + self, + x: float, + y: float, + z: float, + crs: Optional[Any] = None + ): """Set the geographical position of the lens of current CameraConfig. Parameters @@ -677,13 +784,16 @@ def set_lens_position(self, x, y, z, crs=None): x, y = helpers.xyz_transform([[x, y]], crs, self.crs)[0] self.lens_position = [x, y, z] - def project_points(self, points): + def project_points( + self, + points: List[List] + ) -> np.ndarray: """ Project real world x, y, z coordinates into col, row coordinates on image Parameters ---------- - points : list or array-like + points : list of lists or array-like list of points [x, y, z] in real world coordinates Returns @@ -705,17 +815,23 @@ def project_points(self, points): return points_proj - def unproject_points(self, points, zs): + def unproject_points( + self, + points: List[List], + zs: List[float] + ) -> np.ndarray: """ Reverse projects points in [column, row] space to [x, y, z] real world Parameters ---------- - points - zs + points : List of lists or array-like + Points in [col, row] to unproject + zs : float or list of floats : z-coordinate on which to unproject points Returns ------- - + points_unproject : List of lists or array-like + unprojected points as list of [x, y, z] coordinates """ _, rvec, tvec = self.pnp # reduce zs by the mean of the gcps @@ -734,14 +850,14 @@ def unproject_points(self, points, zs): def plot( self, - figsize=(13, 8), - ax=None, - tiles=None, - buffer=0.0005, - zoom_level=19, - camera=False, - tiles_kwargs={} - ): + figsize: Optional[Tuple] = (13, 8), + ax: Optional[plt.Axes] = None, + tiles: Optional[Any] = None, + buffer: Optional[float] = 0.0005, + zoom_level: Optional[int] = 19, + camera: Optional[bool] = False, + tiles_kwargs: Optional[Dict] = {} + ) -> plt.Axes: """ Plot the geographical situation of the CameraConfig. This is very useful to check if the CameraConfig seems to be in the right location. Requires cartopy to be installed. @@ -851,7 +967,15 @@ def plot( ax.legend() return ax - def plot_bbox(self, ax=None, camera=False, transformer=None, h_a=None, redistort=True, **kwargs): + def plot_bbox( + self, + ax: Optional[plt.Axes] = None, + camera: Optional[bool] = False, + transformer: Optional[Any] = None, + h_a: Optional[float] = None, + redistort: Optional[bool] = True, + **kwargs + ): """ Plot bounding box for orthorectification in a geographical projection (``camera=False``) or the camera Field Of View (``camera=True``). @@ -889,7 +1013,7 @@ def plot_bbox(self, ax=None, camera=False, transformer=None, h_a=None, redistort return p - def to_dict(self): + def to_dict(self) -> Dict: """Return the CameraConfig object as dictionary Returns @@ -907,13 +1031,16 @@ def to_dict(self): return d - def to_dict_str(self): + def to_dict_str(self) -> Dict: d = self.to_dict() # convert anything that is not string in string dict_str = {k: v if not(isinstance(v, Polygon)) else v.__str__() for k, v in d.items()} return dict_str - def to_file(self, fn): + def to_file( + self, + fn: str + ): """Write the CameraConfig object to json structure Parameters @@ -926,7 +1053,7 @@ def to_file(self, fn): with open(fn, "w") as f: f.write(self.to_json()) - def to_json(self): + def to_json(self) -> str: """Convert CameraConfig object to string Returns @@ -950,7 +1077,9 @@ def to_json(self): """ -def get_camera_config(s): +def get_camera_config( + s: str +) -> CameraConfig: """Read camera config from string Parameters @@ -973,7 +1102,9 @@ def get_camera_config(s): return CameraConfig(**d) -def load_camera_config(fn): +def load_camera_config( + fn: str +) -> CameraConfig: """Load a CameraConfig from a geojson file. Parameters diff --git a/pyorc/api/transect.py b/pyorc/api/transect.py index b012d72..43c5bb0 100644 --- a/pyorc/api/transect.py +++ b/pyorc/api/transect.py @@ -163,11 +163,12 @@ def get_q(self, v_corr=0.9, fill_method="zeros"): ---------- v_corr : float, optional correction factor (default: 0.9) - fill_method : method to fill missing values. "zeros" fills NaNS with zeros, "interpolate" interpolates values + fill_method : str, optional + method to fill missing values. "zeros" fills NaNS with zeros, "interpolate" interpolates values from nearest neighbour, "log_interp" interpolates values linearly with velocities scaled by the log of - depth over a roughness length, "log_fit" fits a 4-parameter logarithmic profile with depth and with - changing velocities towards banks on known velocities, and fills missing with the fitted relationship - (experimental) (Default value = "zeros"). + depth over a roughness length, "log_fit" fits a 4-parameter logarithmic profile with depth and with + changing velocities towards banks on known velocities, and fills missing with the fitted relationship + (experimental) (Default value = "zeros"). Returns ------- diff --git a/pyorc/api/video.py b/pyorc/api/video.py index d8a3086..107e765 100644 --- a/pyorc/api/video.py +++ b/pyorc/api/video.py @@ -10,11 +10,13 @@ import warnings import xarray as xr +from typing import List, Optional, Union + from .. import cv, const from .cameraconfig import load_camera_config, get_camera_config, CameraConfig -class Video: #(cv2.VideoCapture) +class Video: # (cv2.VideoCapture) def __repr__(self): template = """ Filename: {:s} @@ -34,14 +36,13 @@ def __repr__(self): def __init__( self, - fn, - camera_config=None, - h_a=None, - start_frame=None, - end_frame=None, - freq=1, - stabilize=None, - mask_exterior=None, + fn: str, + camera_config: Optional[Union[str, CameraConfig]] = None, + h_a: Optional[float] = None, + start_frame: Optional[int] = None, + end_frame: Optional[int] = None, + freq: Optional[int] = 1, + stabilize: Optional[List[List]] = None, ): """ Video class, inheriting parts from cv2.VideoCapture. Contains a camera configuration to it, and a start and end @@ -63,17 +64,14 @@ def __init__( first frame to use in analysis (default: 0) end_frame : int, optional last frame to use in analysis (if not set, last frame available in video will be used) - stabilize : optional - If set to a recipe name, the video will be stabilized by attempting to find rigid points and track these with - Lukas Kanade optical flow. Currently supported is "fixed" for FOV that is meant to be in one place. - mask_exterior : list of lists, + stabilize : list of lists, optional set of coordinates, that together encapsulate the polygon that defines the mask, separating land from water. - The mask is used to select region (on land) for rigid point search for stabilization. + The mask is used to select region (on land) for rigid point search for stabilization. If not set, then no + stabilization will be performed """ - assert(isinstance(start_frame, (int, type(None)))), 'start_frame must be of type "int"' - assert(isinstance(end_frame, (int, type(None)))), 'end_frame must be of type "int"' - assert(stabilize in ["fixed", None]), f'stabilize is only implemented for method "fixed", "{stabilize}" given' - self.feats_pos = None + assert (isinstance(start_frame, (int, type(None)))), 'start_frame must be of type "int"' + assert (isinstance(end_frame, (int, type(None)))), 'end_frame must be of type "int"' + # assert (isinstance(stabilize, (list, type(None)))), f'stabilize must contain a list of points, but is {stabilize}' self.feats_stats = None self.feats_errs = None self.ms = None @@ -81,14 +79,14 @@ def __init__( self.stabilize = stabilize if camera_config is not None: self.camera_config = camera_config - # if camera_config is not None: + # if camera_config is not None: # check if h_a is supplied, if so, then also z_0 and h_ref must be available if h_a is not None: - assert(isinstance(self.camera_config.gcps["z_0"], float)),\ + assert (isinstance(self.camera_config.gcps["z_0"], float)), \ "h_a was supplied, but camera config's gcps do not contain z_0, this is needed for dynamic " \ "reprojection. You can supplying z_0 and h_ref in the camera_config's gcps upon making a camera " \ "configuration. " - assert (isinstance(self.camera_config.gcps["h_ref"], float)),\ + assert (isinstance(self.camera_config.gcps["h_ref"], float)), \ "h_a was supplied, but camera config's gcps do not contain h_ref, this is needed for dynamic " \ "reprojection. You must supply z_0 and h_ref in the camera_config's gcps upon making a camera " \ "configuration. " @@ -96,11 +94,10 @@ def __init__( cap.set(cv2.CAP_PROP_ORIENTATION_AUTO, 180.0) self.height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) self.width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) - self.mask_exterior = mask_exterior # explicitly open file for reading - if mask_exterior is not None: - # set a mask based on the roi points - self.set_mask_from_exterior(mask_exterior) + if self.stabilize is not None: + # set a gridded mask based on the roi points + self.set_mask_from_exterior(self.stabilize) # set end and start frame self.frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) if start_frame is not None: @@ -145,25 +142,6 @@ def __init__( cap.release() del cap - - @property - def mask_exterior(self): - """ - - Returns - ------- - np.ndarray - Mask of region of interest - """ - return self._mask_exterior - - @mask_exterior.setter - def mask_exterior(self, mask_exterior): - if mask_exterior is None: - self._mask_exterior = None - else: - self._mask_exterior = mask_exterior - @property def mask(self): """ @@ -211,7 +189,8 @@ def camera_config(self, camera_config_input): # Create CameraConfig from dict self._camera_config = CameraConfig(**camera_config_input) except: - raise IOError("Could not recognise input as a CameraConfig file, string, dictionary or CameraConfig object.") + raise IOError( + "Could not recognise input as a CameraConfig file, string, dictionary or CameraConfig object.") @property def end_frame(self): @@ -246,17 +225,18 @@ def freq(self, freq=1): @property def stabilize(self): - """ - - :return: int, last frame considered in analysis - """ - return self._stabilize + if self._stabilize is not None: + return self._stabilize + elif hasattr(self, "camera_config"): + if hasattr(self.camera_config, "stabilize"): + return self.camera_config.stabilize @stabilize.setter - def stabilize(self, stabilize=None): - # sometimes last frames are not read by OpenCV, hence we skip the last frame always - self._stabilize = stabilize - + def stabilize( + self, + coords: Optional[List[List]] = None + ): + self._stabilize = coords @property def h_a(self): @@ -267,11 +247,15 @@ def h_a(self): return self._h_a @h_a.setter - def h_a(self, h_a): + def h_a( + self, + h_a: float + ): if h_a is not None: - assert(isinstance(h_a, float)), f"The actual water level must be a float, you supplied a {type(h_a)}" + assert (isinstance(h_a, float)), f"The actual water level must be a float, you supplied a {type(h_a)}" if h_a < 0: - warnings.warn("Water level is negative. This can be correct, but may be unlikely, especially if you use a staff gauge.") + warnings.warn( + "Water level is negative. This can be correct, but may be unlikely, especially if you use a staff gauge.") self._h_a = h_a @property @@ -283,7 +267,10 @@ def start_frame(self): return self._start_frame @start_frame.setter - def start_frame(self, start_frame=None): + def start_frame( + self, + start_frame: Optional[int] = None + ): if start_frame is None: self._start_frame = 0 else: @@ -298,7 +285,10 @@ def fps(self): return self._fps @fps.setter - def fps(self, fps): + def fps( + self, + fps: float + ): if (np.isinf(fps)) or (fps <= 0): raise ValueError(f"FPS in video is {fps} which is not a valid value. Repair the video file before use") self._fps = fps @@ -312,16 +302,21 @@ def corners(self): return self._corners @corners.setter - def corners(self, corners): + def corners( + self, + corners: List[List] + ): self._corners = corners - @property def rotation(self): return self._rotation @rotation.setter - def rotation(self, rotation_code): + def rotation( + self, + rotation_code: int + ): """ Solves a likely bug in OpenCV (4.6.0) that straight up videos rotate in the wrong direction. Tested for both 90 degree and 270 degrees rotation videos on several smartphone (iPhone and Android) @@ -331,20 +326,34 @@ def rotation(self, rotation_code): else: self._rotation = None - - - def get_frame(self, n, method="grayscale", lens_corr=False): + def get_frame( + self, + n: int, + method: Optional[str] = "grayscale", + lens_corr: Optional[bool] = False + ) -> np.ndarray: """ Retrieve one frame. Frame will be corrected for lens distortion if lens parameters are given. - :param n: int, frame number to retrieve - :param method: str, can be "rgb", "grayscale", or "hsv", default: "grayscale" - :param lens_corr: bool, optional, if set to True, lens parameters will be used to undistort image - :return: np.ndarray containing frame - """ - assert(n >= 0), "frame number cannot be negative" - assert(n - self.start_frame <= self.end_frame - self.start_frame), "frame number is larger than the different between the start and end frame" - assert(method in ["grayscale", "rgb", "hsv"]), f'method must be "grayscale", "rgb" or "hsv", method is "{method}"' + Parameters: + ----------- + n : int + frame number to retrieve + method : str + can be "rgb", "grayscale", or "hsv", default: "grayscale" + lens_corr: bool, optional + if set to True, lens parameters will be used to undistort image + + Returns + ------- + frame : np.ndarray + 2d array (grayscale) or 3d (rgb/hsv) with frame + """ + assert (n >= 0), "frame number cannot be negative" + assert ( + n - self.start_frame <= self.end_frame - self.start_frame), "frame number is larger than the different between the start and end frame" + assert (method in ["grayscale", "rgb", + "hsv"]), f'method must be "grayscale", "rgb" or "hsv", method is "{method}"' cap = cv2.VideoCapture(self.fn) cap.set(cv2.CAP_PROP_POS_FRAMES, n + self.start_frame) try: @@ -362,7 +371,7 @@ def get_frame(self, n, method="grayscale", lens_corr=False): if method == "grayscale": # apply gray scaling, contrast- and gamma correction # img = _corr_color(img, alpha=None, beta=None, gamma=0.4) - img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # mean(axis=2) + img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # mean(axis=2) elif method == "rgb": # turn bgr to rgb for plotting purposes img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) @@ -373,16 +382,27 @@ def get_frame(self, n, method="grayscale", lens_corr=False): cap.release() return img - def get_frames(self, **kwargs): + def get_frames( + self, + **kwargs + ) -> xr.DataArray: """ Get a xr.DataArray, containing a dask array of frames, from `start_frame` until `end_frame`, expected to be read lazily. The xr.DataArray will contain all coordinate variables and attributes, needed for further processing steps. - :param kwargs: dict, keyword arguments to pass to `get_frame`. Currently only `grayscale` is supported. - :return: xr.DataArray, containing all requested frames + Parameters + ---------- + **kwargs: dict, optional + keyword arguments to pass to `get_frame`. Currently only `grayscale` is supported. + + Returns + ------- + frames : xr.DataArray + containing all requested frames """ - assert(hasattr(self, "_camera_config")), "No camera configuration is set, add it to the video using the .camera_config method" + assert (hasattr(self, + "_camera_config")), "No camera configuration is set, add it to the video using the .camera_config method" # camera_config may be altered for the frames object, so copy below camera_config = copy.deepcopy(self.camera_config) get_frame = dask.delayed(self.get_frame, pure=True) # Lazy version of get_frame @@ -394,22 +414,15 @@ def get_frames(self, **kwargs): dtype=sample.dtype, shape=sample.shape ) for frame in frames] - # if "lens_corr" in kwargs: - # if kwargs["lens_corr"]: - # also correct the control point src + # undistort source control points if hasattr(camera_config, "gcps"): camera_config.gcps["src"] = cv.undistort_points( camera_config.gcps["src"], camera_config.camera_matrix, camera_config.dist_coeffs, ) - # camera_config.corners = cv.undistort_points( - # camera_config.corners, - # sample.shape[0], - # sample.shape[1], - # **self.camera_config.lens_pars - # ) - time = np.array(self.time) * 0.001 # measure in seconds to comply with CF conventions # np.arange(len(data_array))*1/self.fps + time = np.array( + self.time) * 0.001 # measure in seconds to comply with CF conventions # np.arange(len(data_array))*1/self.fps # y needs to be flipped up down to match the order of rows followed by coordinate systems (bottom to top) y = np.flipud(np.arange(data_array[0].shape[0])) x = np.arange(data_array[0].shape[1]) @@ -443,18 +456,37 @@ def get_frames(self, **kwargs): frames.name = "frames" return frames + def set_mask_from_exterior( + self, + exterior + ): + """ + Prepare a mask grid with 255 outside of the stabilization polygon and 0 inside - def set_mask_from_exterior(self, exterior): + Parameters + ---------- + exterior : list of lists + coordinates defining the polygon for masking + + Returns + ------- + self.mask : np.ndarray + mask for stabilization region + + """ mask_coords = np.array([exterior], dtype=np.int32) mask = np.zeros((self.height, self.width), np.uint8) mask = cv2.fillPoly(mask, [mask_coords], 255) - mask[mask==0] = 1 - mask[mask==255] = 0 - mask[mask==1] = 255 + mask[mask == 0] = 1 + mask[mask == 255] = 0 + mask[mask == 1] = 255 self.mask = mask - - def get_ms(self, cap, split=2): + def get_ms( + self, + cap: cv2.VideoCapture, + split: Optional[int] = 2 + ): self.ms = cv._get_ms_gftt( cap, start_frame=self.start_frame, @@ -462,31 +494,4 @@ def get_ms(self, cap, split=2): split=split, mask=self.mask, ) - # def _get_ms(self): - # # retrieve the transformation matrices for stabilization - # self.ms = cv._ms_from_displacements(self.feats_pos, self.feats_stats) - - - def plot_rigid_pts(self, ax=None, **kwargs): - """ - Plots found rigid points (column, row) for stabilization and their path throughout the frames in time on an - axes object. - - Parameters - ---------- - ax : plt.axes object, optional - If None (default), use the current axes. - **kwargs : additional keyword arguments to `matplotlib.pyplot.scatter` wrapped Matplotlib function. - - - Returns - ------- - - """ - assert self.feats_pos is not None, "No stabilization applied hence no rigid points available to plot" - if ax is None: - ax = plt.axes() - for t_p in np.swapaxes(self.feats_pos, 0, 1): - p = ax.scatter(t_p[:, 0], t_p[:, 1], c=np.linspace(0, 1, len(t_p)), **kwargs) - return p \ No newline at end of file diff --git a/pyorc/cli/cli_elements.py b/pyorc/cli/cli_elements.py index 0dec4da..418e3fc 100644 --- a/pyorc/cli/cli_elements.py +++ b/pyorc/cli/cli_elements.py @@ -25,29 +25,31 @@ "upstream-right" ] class BaseSelect: - def __init__(self, img, dst, crs=None, buffer=0.0002, zoom_level=19, logger=logging): + def __init__(self, img, dst=None, crs=None, buffer=0.0002, zoom_level=19, logger=logging): self.logger = logger self.height, self.width = img.shape[0:2] self.crs = crs fig = plt.figure(figsize=(16, 9), frameon=False, facecolor="black") fig.subplots_adjust(left=0, bottom=0, right=1, top=1, wspace=None, hspace=None) - # fig = plt.figure(figsize=(12, 7)) - xmin = np.array(dst)[:, 0].min() - xmax = np.array(dst)[:, 0].max() - ymin = np.array(dst)[:, 1].min() - ymax = np.array(dst)[:, 1].max() - extent = [xmin - buffer, xmax + buffer, ymin-buffer, ymax + buffer] + ax_geo = None # extent = [4.5, 4.51, 51.2, 51.21] - if crs is not None: - tiler = getattr(cimgt, "GoogleTiles")(style="satellite") - ax_geo = fig.add_axes([0., 0., 1, 1], projection=tiler.crs) - ax_geo.set_extent(extent, crs=ccrs.PlateCarree()) - ax_geo.add_image(tiler, zoom_level, zorder=1) - else: - ax_geo = fig.add_axes([0., 0., 1, 1]) - ax_geo.set_aspect("equal") - plt.tick_params(left=False, right=False, labelleft=False, labelbottom=False, bottom=False) - ax_geo.set_visible(False) + if dst is not None: + # fig = plt.figure(figsize=(12, 7)) + xmin = np.array(dst)[:, 0].min() + xmax = np.array(dst)[:, 0].max() + ymin = np.array(dst)[:, 1].min() + ymax = np.array(dst)[:, 1].max() + extent = [xmin - buffer, xmax + buffer, ymin - buffer, ymax + buffer] + if crs is not None: + tiler = getattr(cimgt, "GoogleTiles")(style="satellite") + ax_geo = fig.add_axes([0., 0., 1, 1], projection=tiler.crs) + ax_geo.set_extent(extent, crs=ccrs.PlateCarree()) + ax_geo.add_image(tiler, zoom_level, zorder=1) + else: + ax_geo = fig.add_axes([0., 0., 1, 1]) + ax_geo.set_aspect("equal") + plt.tick_params(left=False, right=False, labelleft=False, labelbottom=False, bottom=False) + ax_geo.set_visible(False) ax = fig.add_axes([0.2, 0.1, 0.7, 0.8]) ax.set_facecolor("k") ax.set_position([0, 0, 1, 1]) @@ -72,20 +74,21 @@ def __init__(self, img, dst, crs=None, buffer=0.0002, zoom_level=19, logger=logg patheffects.Normal(), ], ) - if crs is not None: - kwargs["transform"] = ccrs.PlateCarree() - transform = ccrs.PlateCarree()._as_mpl_transform(ax_geo) - kwargs_text["xycoords"] = transform - self.p_geo = ax_geo.plot( - *list(zip(*dst))[0:2], "o", - **kwargs - ) - for n, _pt in enumerate(dst): - pt = ax_geo.annotate( - n + 1, - xy = _pt[0:2], - **kwargs_text + if dst is not None: + if crs is not None: + kwargs["transform"] = ccrs.PlateCarree() + transform = ccrs.PlateCarree()._as_mpl_transform(ax_geo) + kwargs_text["xycoords"] = transform + self.p_geo = ax_geo.plot( + *list(zip(*dst))[0:2], "o", + **kwargs ) + for n, _pt in enumerate(dst): + pt = ax_geo.annotate( + n + 1, + xy = _pt[0:2], + **kwargs_text + ) self.fig = fig self.ax_geo = ax_geo self.ax = ax # add axes @@ -107,20 +110,27 @@ def add_buttons(self): v2 = [Size.Fixed(1.45), Size.Fixed(0.3)] v3 = [Size.Fixed(1.95), Size.Fixed(0.3)] - divider1 = Divider(self.fig, (0, 0, 1, 1), h, v, aspect=False) + divider3 = Divider(self.fig, (0, 0, 1, 1), h, v, aspect=False) divider2 = Divider(self.fig, (0, 0, 1, 1), h, v2, aspect=False) - divider3 = Divider(self.fig, (0, 0, 1, 1), h, v3, aspect=False) - self.ax_button1 = self.fig.add_axes(divider1.get_position(), - axes_locator=divider1.new_locator(nx=1, ny=1)) - self.ax_button2 = self.fig.add_axes(divider2.get_position(), - axes_locator=divider2.new_locator(nx=1, ny=1)) - self.ax_button3 = self.fig.add_axes(divider3.get_position(), - axes_locator=divider3.new_locator(nx=1, ny=1)) - self.button1 = Button(self.ax_button1, 'Camera') - self.button2 = Button(self.ax_button2, 'Map') + divider1 = Divider(self.fig, (0, 0, 1, 1), h, v3, aspect=False) + if self.ax_geo is not None: + self.ax_button1 = self.fig.add_axes( + divider1.get_position(), + axes_locator=divider1.new_locator(nx=1, ny=1) + ) + self.button1 = Button(self.ax_button1, 'Camera') + self.button1.on_clicked(self.switch_to_ax) + self.ax_button2 = self.fig.add_axes( + divider2.get_position(), + axes_locator=divider2.new_locator(nx=1, ny=1) + ) + self.button2 = Button(self.ax_button2, 'Map') + self.button2.on_clicked(self.switch_to_ax_geo) + self.ax_button3 = self.fig.add_axes( + divider3.get_position(), + axes_locator=divider3.new_locator(nx=1, ny=1) + ) self.button3 = Button(self.ax_button3, 'Done') - self.button1.on_clicked(self.switch_to_ax) - self.button2.on_clicked(self.switch_to_ax_geo) self.button3.on_clicked(self.close_window) self.button3.set_active(False) self.button3.label.set_color("gray") @@ -437,3 +447,87 @@ def on_click(self, event): self.p_geo_selected.set_data(*list(zip(*dst_sel))[0:2]) else: self.p_geo_selected.set_data([], []) + + +class StabilizeSelect(BaseSelect): + def __init__(self, img, logger=logging): + super(StabilizeSelect, self).__init__(img, logger=logger) + # make empty plot + pol = Polygon(np.zeros((0, 2)), edgecolor="w", alpha=0.5, linewidth=2) + self.p = self.ax.add_patch(pol) + kwargs = dict( + color="c", + markeredgecolor="w", + zorder=4, + markersize=10, + label="Selected control points" + ) + xloc = self.ax.get_xlim()[0] + 50 + yloc = self.ax.get_ylim()[-1] + 50 + self.title = self.ax.text( + xloc, + yloc, + "Select a polygon of at least 4 points that encompasses at least the water surface. Areas outside will be " + "treated as stable areas for stabilization.", + size=12, + path_effects=path_effects + ) + self.ax.legend() + # add dst coords in the intended CRS + self.required_clicks = 4 # minimum 4 points needed for a satisfactory ROI + + + def on_click(self, event): + # only if ax is visible and click was within the window + if self.ax.get_visible() and event.inaxes == self.ax: + if event.button is MouseButton.RIGHT: + self.on_right_click(event) + elif event.button is MouseButton.LEFT: + self.on_left_click(event) + # check if enough points are collected to enable the Done button + if len(self.src) >= self.required_clicks: + self.button3.set_active(True) + self.button3.label.set_color("k") + else: + self.button3.set_active(False) + self.button3.label.set_color("grey") + + self.ax.figure.canvas.draw() + + + def on_right_click(self, event): + if len(self.pts_t) > 0: + self.pts_t[-1].remove() + del self.pts_t[-1] + if len(self.src) > 0: + del self.src[-1] + if len(self.src) > 0: + self.p.set_xy(self.src) + else: + self.p.set_xy(np.zeros((0, 2))) + self.ax.figure.canvas.draw() + + def on_left_click(self, event): + if event.xdata is not None: + self.logger.debug(f"Storing coordinate x: {event.xdata} y: {event.ydata} to src") + self.src.append([int(np.round(event.xdata)), int(np.round(event.ydata))]) + # self.p.set_data(*list(zip(*self.src))) + self.p.set_xy(self.src) + pt = self.ax.annotate( + len(self.src), + xytext=(6, 6), + xy=(event.xdata, event.ydata), + textcoords="offset points", + zorder=4, + path_effects=[ + patheffects.Stroke(linewidth=3, foreground="w"), + patheffects.Normal(), + ], + ) + self.pts_t.append(pt) + self.ax.figure.canvas.draw() + + def on_close(self, event): + # overrule the amount of required clicks + self.required_clicks = len(self.src) + super(StabilizeSelect, self).on_close(event) diff --git a/pyorc/cli/cli_utils.py b/pyorc/cli/cli_utils.py index eb60136..886ae1d 100644 --- a/pyorc/cli/cli_utils.py +++ b/pyorc/cli/cli_utils.py @@ -11,7 +11,7 @@ import yaml from pyorc import Video, helpers, CameraConfig, cv -from pyorc.cli.cli_elements import GcpSelect, AoiSelect +from pyorc.cli.cli_elements import GcpSelect, AoiSelect, StabilizeSelect from shapely.geometry import Point @@ -49,18 +49,31 @@ def get_corners_interactive( # setup a cam_config without -def get_gcps_interactive(fn, dst, crs=None, crs_gcps=None, frame_sample=0., lens_position=None, logger=logging): + +def get_gcps_interactive(fn, dst, crs=None, crs_gcps=None, frame_sample=0, lens_position=None, logger=logging): vid = Video(fn, start_frame=frame_sample, end_frame=frame_sample + 1) # get first frame frame = vid.get_frame(0, method="rgb") if crs_gcps is not None: dst = helpers.xyz_transform(dst, crs_from=crs_gcps, crs_to=4326) selector = GcpSelect(frame, dst, crs=crs, lens_position=lens_position, logger=logger) - # uncomment below to test the interaction, not suitable for automated unit test plt.show(block=True) return selector.src, selector.camera_matrix, selector.dist_coeffs +def get_stabilize_pol( + fn, + frame_sample=0, + logger=logging +): + vid = Video(fn, start_frame=frame_sample, end_frame=frame_sample + 1) + frame = vid.get_frame(0, method="rgb") + selector = StabilizeSelect(frame, logger=logger) + plt.show(block=True) + return selector.src + + + def get_file_hash(fn): hash256 = hashlib.sha256() with open(fn, "rb") as f: diff --git a/pyorc/cli/main.py b/pyorc/cli/main.py index 77826af..b9231bd 100644 --- a/pyorc/cli/main.py +++ b/pyorc/cli/main.py @@ -152,6 +152,14 @@ def cli(ctx, info, license, debug): # , quiet, verbose): callback=cli_utils.parse_corners, help="Video ojective corner points as list of 4 [column, row] points" ) +@click.option( + "--stabilize", + "-s", + is_flag=True, + default=False, + help="Stabilize the videos using this camera configuration (you can provide a stable area in an interactive view)." + +) @verbose_opt @click.pass_context @typechecked @@ -171,9 +179,10 @@ def camera_config( lens_position: Optional[List[float]], shapefile: Optional[str], corners: Optional[List[List]], + stabilize: Optional[bool], verbose: int ): - log_level = max(10, 30 - 10 * verbose) + log_level = max(10, 20 - 10 * verbose) logger = log.setuplog("cameraconfig", os.path.abspath("pyorc.log"), append=False, log_level=log_level) logger.info(f"Preparing your cameraconfig file in {output}") logger.info(f"Found video file {videofile}") @@ -237,6 +246,14 @@ def camera_config( dist_coeffs=dist_coeffs, logger=logger ) + if stabilize: + stabilize = cli_utils.get_stabilize_pol( + videofile, + frame_sample=frame_sample, + logger=logger + ) + else: + stabilize=None pyorc.service.camera_config( video_file=videofile, cam_config_file=output, @@ -248,12 +265,13 @@ def camera_config( lens_position=lens_position, corners=corners, camera_matrix=camera_matrix, - dist_coeffs=dist_coeffs + dist_coeffs=dist_coeffs, + stabilize=stabilize ) logger.info(f"Camera configuration created and stored in {output}") -## VELOCIMETRY +# VELOCIMETRY @cli.command(short_help="Estimate velocimetry") @click.argument( 'OUTPUT', @@ -285,6 +303,13 @@ def camera_config( default="", help="Prefix for produced output files" ) +@click.option( + "-h", + "--h_a", + type=float, + required=False, + help="Water level in local vertical datum (e.g. staff or pressure gauge) belonging to video." +) @click.option( "-u", "--update", @@ -295,10 +320,12 @@ def camera_config( @verbose_opt @click.pass_context -def velocimetry(ctx, output, videofile, recipe, cameraconfig, prefix, update, verbose): - log_level = max(10, 30 - 10 * verbose) +def velocimetry(ctx, output, videofile, recipe, cameraconfig, prefix, h_a, update, verbose): + log_level = max(10, 20 - 10 * verbose) logger = log.setuplog("velocimetry", os.path.abspath("pyorc.log"), append=False, log_level=log_level) logger.info(f"Preparing your velocimetry result in {output}") + if h_a is not None: + recipe["video"]["h_a"] = h_a processor = pyorc.service.VelocityFlowProcessor( recipe, videofile, @@ -310,7 +337,6 @@ def velocimetry(ctx, output, videofile, recipe, cameraconfig, prefix, update, ve ) # process video following the settings processor.process() - # read yaml pass if __name__ == "__main__": diff --git a/setup.py b/setup.py index edd9398..841c4e2 100644 --- a/setup.py +++ b/setup.py @@ -12,7 +12,7 @@ setup( name="pyopenrivercam", description="pyopenrivercam (pyorc) is a front and backend to control river camera observation locations", - version="0.4.3", + version="0.5.0", long_description=readme + "\n\n", long_description_content_type="text/markdown", url="https://github.com/localdevices/pyorc", diff --git a/tests/conftest.py b/tests/conftest.py index b73da00..e26c392 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -286,7 +286,12 @@ def vid_cam_config_stabilize(cam_config): end_frame=20, camera_config=cam_config, h_a=0., - stabilize="fixed" + stabilize=[ + [400, 1080], + [170, 0], + [1000, 0], + [1750, 1080] + ] # coordinates for which outside area is meant for stabilization ) yield vid diff --git a/tests/test_cli.py b/tests/test_cli.py index 0ca9fe9..83def03 100644 --- a/tests/test_cli.py +++ b/tests/test_cli.py @@ -2,7 +2,7 @@ from click.testing import CliRunner from pyorc.cli.main import cli -from pyorc.cli.cli_elements import GcpSelect, AoiSelect +from pyorc.cli.cli_elements import GcpSelect, AoiSelect, StabilizeSelect from pyorc.cli import cli_utils from pyorc.helpers import xyz_transform import json @@ -63,12 +63,14 @@ def test_cli_velocimetry(cli_obj, vid_file, cam_config_fn, cli_recipe_fn, cli_ou cam_config_fn, '-r', cli_recipe_fn, - '-vvv', + '-v', cli_output_dir, '-u' ], echo=True ) + import time + time.sleep(1) assert result.exit_code == 0 @@ -91,7 +93,7 @@ def test_gcps_interact(gcps_dst, frame_rgb): dst = gcps_dst selector = GcpSelect(frame_rgb, dst, crs=crs) # uncomment below to test the interaction, not suitable for automated unit test - # plt.show(block=True) + plt.show(block=True) @@ -108,6 +110,12 @@ def test_aoi_interact(frame_rgb, cam_config_without_aoi): # uncomment below to test the interaction, not suitable for automated unit test # plt.show(block=True) +def test_stabilize_interact(frame_rgb): + import matplotlib.pyplot as plt + selector = StabilizeSelect(frame_rgb) + # uncomment below to test the interaction, not suitable for automated unit test + # plt.show(block=True) + # cli utils def test_read_shape(gcps_fn): coords, wkt = cli_utils.read_shape(gcps_fn) diff --git a/tests/test_video.py b/tests/test_video.py index 2af5d7a..f2c18e3 100644 --- a/tests/test_video.py +++ b/tests/test_video.py @@ -31,7 +31,7 @@ def test_fps(vid): "video, method, result", [ (pytest.lazy_fixture("vid_cam_config"), "grayscale", [85, 71, 65, 80]), - (pytest.lazy_fixture("vid_cam_config_stabilize"), "grayscale", [8, 87, 76, 72]), + (pytest.lazy_fixture("vid_cam_config_stabilize"), "grayscale", [5, 88, 78, 73]), (pytest.lazy_fixture("vid_cam_config"), "rgb", [84, 91, 57, 70]), (pytest.lazy_fixture("vid_cam_config"), "hsv", [36, 95, 91, 36]) ]