never executed always true always false
1 {-# LANGUAGE QuasiQuotes #-}
2 {-# LANGUAGE TemplateHaskell #-}
3
4 module OpenCV.Calib3d
5 ( FundamentalMatMethod(..)
6 , FindHomographyMethod(..)
7 , FindHomographyParams(..)
8 , WhichImage(..)
9 -- , calibrateCamera
10 , findFundamentalMat
11 , findHomography
12 , computeCorrespondEpilines
13
14 , SolvePnPMethod(..)
15 , solvePnP
16 ) where
17
18 import "base" Data.Int
19 import "base" Data.Word
20 import "base" Foreign.C.Types
21 import "base" Foreign.Marshal.Utils ( fromBool )
22 import qualified "inline-c" Language.C.Inline as C
23 import qualified "inline-c-cpp" Language.C.Inline.Cpp as C
24 import "data-default" Data.Default
25 import "this" OpenCV.Internal.C.Inline ( openCvCtx )
26 import "this" OpenCV.Internal.C.Types
27 import "this" OpenCV.Internal.Calib3d.Constants
28 import "this" OpenCV.Core.Types
29 import "this" OpenCV.Internal.Core.Types
30 import "this" OpenCV.Internal.Core.Types.Mat
31 import "this" OpenCV.Internal.Exception
32 import "this" OpenCV.TypeLevel
33 import "transformers" Control.Monad.Trans.Except
34 import qualified "vector" Data.Vector as V
35
36 --------------------------------------------------------------------------------
37
38 C.context openCvCtx
39
40 C.include "opencv2/core.hpp"
41 C.include "opencv2/calib3d.hpp"
42 C.using "namespace cv"
43
44 --------------------------------------------------------------------------------
45 -- Types
46
47 data FundamentalMatMethod
48 = FM_7Point
49 | FM_8Point
50 | FM_Ransac !(Maybe Double) !(Maybe Double)
51 | FM_Lmeds !(Maybe Double)
52 deriving (Show, Eq)
53
54 marshalFundamentalMatMethod :: FundamentalMatMethod -> (Int32, CDouble, CDouble)
55 marshalFundamentalMatMethod = \case
56 FM_7Point -> (c'CV_FM_7POINT, 0, 0)
57 FM_8Point -> (c'CV_FM_8POINT, 0, 0)
58 FM_Ransac p1 p2 -> (c'CV_FM_RANSAC, maybe 3 realToFrac p1, maybe 0.99 realToFrac p2)
59 FM_Lmeds p2 -> (c'CV_FM_LMEDS, 0, maybe 0.99 realToFrac p2)
60
61 data WhichImage = Image1 | Image2 deriving (Show, Eq)
62
63 marshalWhichImage :: WhichImage -> Int32
64 marshalWhichImage = \case
65 Image1 -> 1
66 Image2 -> 2
67
68 data FindHomographyMethod
69 = FindHomographyMethod_0
70 -- ^ A regular method using all the points.
71 | FindHomographyMethod_RANSAC
72 -- ^ RANSAC-based robust method.
73 | FindHomographyMethod_LMEDS
74 -- ^ Least-Median robust method.
75 | FindHomographyMethod_RHO
76 -- ^ PROSAC-based robust method.
77 deriving (Show)
78
79 marshalFindHomographyMethod :: FindHomographyMethod -> Int32
80 marshalFindHomographyMethod = \case
81 FindHomographyMethod_0 -> 0
82 FindHomographyMethod_RANSAC -> c'RANSAC
83 FindHomographyMethod_LMEDS -> c'LMEDS
84 FindHomographyMethod_RHO -> c'RHO
85
86 --------------------------------------------------------------------------------
87
88 -- {- |
89 -- <http://docs.opencv.org/3.0-last-rst/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.html#calibratecamera OpenCV Sphinx doc>
90 -- -}
91 -- calibrateCamera
92 -- :: ( ToSize2i imageSize
93 -- , camMat ~ Mat (ShapeT [3, 3]) ('S 1) ('S Double)
94 -- )
95 -- . V.Vector () -- combine objectPoints and imagePoints
96 -- -> imageSize
97 -- -> camMat
98 -- -> flags
99 -- -> criteria
100 -- -> (camMat, distCoeffs, rvecs, tvecs)
101 -- calibrateCamera = _todo
102
103 {- | Calculates a fundamental matrix from the corresponding points in two images
104
105 The minimum number of points required depends on the 'FundamentalMatMethod'.
106
107 * 'FM_7Point': @N == 7@
108 * 'FM_8Point': @N >= 8@
109 * 'FM_Ransac': @N >= 15@
110 * 'FM_Lmeds': @N >= 8@
111
112 With 7 points the 'FM_7Point' method is used, despite the given method.
113
114 With more than 7 points the 'FM_7Point' method will be replaced by the
115 'FM_8Point' method.
116
117 Between 7 and 15 points the 'FM_Ransac' method will be replaced by the
118 'FM_Lmeds' method.
119
120 With the 'FM_7Point' method and with 7 points the result can contain up to 3
121 matrices, resulting in either 3, 6 or 9 rows. This is why the number of
122 resulting rows in tagged as 'D'ynamic. For all other methods the result always
123 contains 3 rows.
124
125 <http://docs.opencv.org/3.0-last-rst/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.html#findfundamentalmat OpenCV Sphinx doc>
126 -}
127 findFundamentalMat
128 :: (IsPoint2 point2 CDouble)
129 => V.Vector (point2 CDouble) -- ^ Points from the first image.
130 -> V.Vector (point2 CDouble) -- ^ Points from the second image.
131 -> FundamentalMatMethod
132 -> CvExcept ( Maybe ( Mat ('S '[ 'D, 'S 3 ]) ('S 1) ('S Double)
133 , Mat ('S '[ 'D, 'D ]) ('S 1) ('S Word8 )
134 )
135 )
136 findFundamentalMat pts1 pts2 method = do
137 (fm, pointMask) <- c'findFundamentalMat
138 -- If the c++ function can't find a fundamental matrix it will
139 -- return an empty matrix. We check for this case by trying to
140 -- coerce the result to the desired type.
141 catchE (Just . (, unsafeCoerceMat pointMask) <$> coerceMat fm)
142 (\case CoerceMatError _msgs -> pure Nothing
143 otherError -> throwE otherError
144 )
145 where
146 c'findFundamentalMat = unsafeWrapException $ do
147 fm <- newEmptyMat
148 pointMask <- newEmptyMat
149 handleCvException (pure (fm, pointMask)) $
150 withPtr fm $ \fmPtr ->
151 withPtr pointMask $ \pointMaskPtr ->
152 withArrayPtr (V.map toPoint pts1) $ \pts1Ptr ->
153 withArrayPtr (V.map toPoint pts2) $ \pts2Ptr ->
154 [cvExcept|
155 cv::_InputArray pts1 = cv::_InputArray($(Point2d * pts1Ptr), $(int32_t c'numPts1));
156 cv::_InputArray pts2 = cv::_InputArray($(Point2d * pts2Ptr), $(int32_t c'numPts2));
157 *$(Mat * fmPtr) =
158 cv::findFundamentalMat
159 ( pts1
160 , pts2
161 , $(int32_t c'method)
162 , $(double c'p1)
163 , $(double c'p2)
164 , *$(Mat * pointMaskPtr)
165 );
166 |]
167
168 c'numPts1 = fromIntegral $ V.length pts1
169 c'numPts2 = fromIntegral $ V.length pts2
170 (c'method, c'p1, c'p2) = marshalFundamentalMatMethod method
171
172 data FindHomographyParams
173 = FindHomographyParams
174 { fhpMethod :: !FindHomographyMethod
175 , fhpRansacReprojThreshold :: !Double
176 , fhpMaxIters :: !Int
177 , fhpConfidence :: !Double
178 } deriving (Show)
179
180 instance Default FindHomographyParams where
181 def = FindHomographyParams
182 { fhpMethod = FindHomographyMethod_0
183 , fhpRansacReprojThreshold = 3
184 , fhpMaxIters = 2000
185 , fhpConfidence = 0.995
186 }
187
188 findHomography
189 :: (IsPoint2 point2 CDouble)
190 => V.Vector (point2 CDouble) -- ^ Points from the first image.
191 -> V.Vector (point2 CDouble) -- ^ Points from the second image.
192 -> FindHomographyParams
193 -> CvExcept ( Maybe ( Mat ('S '[ 'S 3, 'S 3 ]) ('S 1) ('S Double)
194 , Mat ('S '[ 'D, 'D ]) ('S 1) ('S Word8 )
195 )
196 )
197 findHomography srcPoints dstPoints fhp = do
198 (fm, pointMask) <- c'findHomography
199 -- If the c++ function can't find a fundamental matrix it will
200 -- return an empty matrix. We check for this case by trying to
201 -- coerce the result to the desired type.
202 catchE (Just . (, unsafeCoerceMat pointMask) <$> coerceMat fm)
203 (\case CoerceMatError _msgs -> pure Nothing
204 otherError -> throwE otherError
205 )
206 where
207 c'findHomography = unsafeWrapException $ do
208 fm <- newEmptyMat
209 pointMask <- newEmptyMat
210 handleCvException (pure (fm, pointMask)) $
211 withPtr fm $ \fmPtr ->
212 withPtr pointMask $ \pointMaskPtr ->
213 withArrayPtr (V.map toPoint srcPoints) $ \srcPtr ->
214 withArrayPtr (V.map toPoint dstPoints) $ \dstPtr ->
215 [cvExcept|
216 cv::_InputArray srcPts = cv::_InputArray($(Point2d * srcPtr), $(int32_t c'numSrcPts));
217 cv::_InputArray dstPts = cv::_InputArray($(Point2d * dstPtr), $(int32_t c'numDstPts));
218 *$(Mat * fmPtr) =
219 cv::findHomography
220 ( srcPts
221 , dstPts
222 , $(int32_t c'method)
223 , $(double c'ransacReprojThreshold)
224 , *$(Mat * pointMaskPtr)
225 , $(int32_t c'maxIters)
226 , $(double c'confidence)
227 );
228 |]
229 c'numSrcPts = fromIntegral $ V.length srcPoints
230 c'numDstPts = fromIntegral $ V.length dstPoints
231 c'method = marshalFindHomographyMethod $ fhpMethod fhp
232 c'ransacReprojThreshold = realToFrac $ fhpRansacReprojThreshold fhp
233 c'maxIters = fromIntegral $ fhpMaxIters fhp
234 c'confidence = realToFrac $ fhpConfidence fhp
235
236 {- | For points in an image of a stereo pair, computes the corresponding epilines in the other image
237
238 <http://docs.opencv.org/3.0-last-rst/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.html#computecorrespondepilines OpenCV Sphinx doc>
239 -}
240 computeCorrespondEpilines
241 :: (IsPoint2 point2 CDouble)
242 => V.Vector (point2 CDouble) -- ^ Points.
243 -> WhichImage -- ^ Image which contains the points.
244 -> Mat (ShapeT [3, 3]) ('S 1) ('S Double) -- ^ Fundamental matrix.
245 -> CvExcept (Mat ('S ['D, 'S 1]) ('S 3) ('S Double))
246 computeCorrespondEpilines points whichImage fm = unsafeWrapException $ do
247 epilines <- newEmptyMat
248 handleCvException (pure $ unsafeCoerceMat epilines) $
249 withArrayPtr (V.map toPoint points) $ \pointsPtr ->
250 withPtr fm $ \fmPtr ->
251 withPtr epilines $ \epilinesPtr -> do
252 -- Destroy type information about the pointsPtr. We wan't to generate
253 -- C++ code that works for any type of point. Specifically Point2f and
254 -- Point2d.
255 [cvExcept|
256 cv::_InputArray points =
257 cv::_InputArray( $(Point2d * pointsPtr)
258 , $(int32_t c'numPoints)
259 );
260 cv::computeCorrespondEpilines
261 ( points
262 , $(int32_t c'whichImage)
263 , *$(Mat * fmPtr)
264 , *$(Mat * epilinesPtr)
265 );
266 |]
267 where
268 c'numPoints = fromIntegral $ V.length points
269 c'whichImage = marshalWhichImage whichImage
270
271 data SolvePnPMethod
272 = SolvePnP_Iterative !Bool
273 | SolvePnP_P3P
274 | SolvePnP_AP3P
275 | SolvePnP_EPNP
276 | SolvePnP_DLS
277 | SolvePnP_UPNP
278
279 marshalSolvePnPMethod :: SolvePnPMethod -> (Int32, Int32)
280 marshalSolvePnPMethod = \case
281 SolvePnP_Iterative useExtrinsicGuess
282 -> (c'SOLVEPNP_ITERATIVE, fromBool useExtrinsicGuess)
283 SolvePnP_P3P -> (c'SOLVEPNP_P3P , fromBool False)
284 SolvePnP_AP3P -> (c'SOLVEPNP_AP3P, fromBool False)
285 SolvePnP_EPNP -> (c'SOLVEPNP_EPNP, fromBool False)
286 SolvePnP_DLS -> (c'SOLVEPNP_DLS , fromBool False)
287 SolvePnP_UPNP -> (c'SOLVEPNP_UPNP, fromBool False)
288
289 {- | Finds an object pose from 3D-2D point correspondences.
290
291 Parameters:
292
293 [@objectImageMatches@]: Correspondences between object coordinate space (3D)
294 and image points (2D).
295
296 [@cameraMatrix@]: Input camera matrix
297 \[
298 A =
299 \begin{bmatrix}
300 f_x & 0 & c_x \\
301 0 & f_y & c_y \\
302 0 & 0 & 1
303 \end{bmatrix}
304 \]
305
306 [@distCoeffs@]: Input distortion coefficients
307 \( \left ( k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6 [, s_1, s_2, s_3, s_4[, \tau_x, \tau_y ] ] ] ] \right ) \)
308 of 4, 5, 8, 12 or 14 elements. If not given, the zero distortion
309 coefficients are assumed.
310
311 In case of success the algorithm outputs 3 values:
312
313 [@rvec@]: Output rotation vector that, together with __tvec__, brings points
314 from the model coordinate system to the camera coordinate system.
315
316 [@tvec@]: Output translation vector.
317
318 [@cameraMatrix@]: Output camera matrix. In most cases a copy of the input
319 camera matrix. With the 'SolvePnP_UPNP' method the \(f_x\) and \(f_y\)
320 parameters will be estimated.
321
322 The function estimates the object pose given a set of object points, their
323 corresponding image projections, as well as the camera matrix and the distortion
324 coefficients, see the figure below (more precisely, the X-axis of the camera
325 frame is pointing to the right, the Y-axis downward and the Z-axis forward).
326
327 <<data/solvepnp.jpg solvepnp explanatory figure>>
328
329 Points expressed in the world frame \(\bf{X_w}\) are projected into the image
330 plane \([u,v]\) using the perspective projection model \(\bf{\Pi}\) and the
331 camera intrinsic parameters matrix \(\bf{A}\):
332
333 \[
334 \begin{align*}
335 \begin{bmatrix}
336 u \\
337 v \\
338 1
339 \end{bmatrix} &=
340 \bf{A} \hspace{0.1em} \Pi \hspace{0.2em} ^{c}\bf{M}_w
341 \begin{bmatrix}
342 X_{w} \\
343 Y_{w} \\
344 Z_{w} \\
345 1
346 \end{bmatrix} \\
347 \begin{bmatrix}
348 u \\
349 v \\
350 1
351 \end{bmatrix} &=
352 \begin{bmatrix}
353 f_x & 0 & c_x \\
354 0 & f_y & c_y \\
355 0 & 0 & 1
356 \end{bmatrix}
357 \begin{bmatrix}
358 1 & 0 & 0 & 0 \\
359 0 & 1 & 0 & 0 \\
360 0 & 0 & 1 & 0
361 \end{bmatrix}
362 \begin{bmatrix}
363 r_{11} & r_{12} & r_{13} & t_x \\
364 r_{21} & r_{22} & r_{23} & t_y \\
365 r_{31} & r_{32} & r_{33} & t_z \\
366 0 & 0 & 0 & 1
367 \end{bmatrix}
368 \begin{bmatrix}
369 X_{w} \\
370 Y_{w} \\
371 Z_{w} \\
372 1
373 \end{bmatrix}
374 \end{align*}
375 \]
376
377 The estimated pose is thus the rotation (__rvec__) and the translation
378 (__tvec__) vectors that allow to transform a 3D point expressed in the world
379 frame into the camera frame:
380
381 \[
382 \begin{align*}
383 \begin{bmatrix}
384 X_c \\
385 Y_c \\
386 Z_c \\
387 1
388 \end{bmatrix} &=
389 \hspace{0.2em} ^{c}\bf{M}_w
390 \begin{bmatrix}
391 X_{w} \\
392 Y_{w} \\
393 Z_{w} \\
394 1
395 \end{bmatrix} \\
396 \begin{bmatrix}
397 X_c \\
398 Y_c \\
399 Z_c \\
400 1
401 \end{bmatrix} &=
402 \begin{bmatrix}
403 r_{11} & r_{12} & r_{13} & t_x \\
404 r_{21} & r_{22} & r_{23} & t_y \\
405 r_{31} & r_{32} & r_{33} & t_z \\
406 0 & 0 & 0 & 1
407 \end{bmatrix}
408 \begin{bmatrix}
409 X_{w} \\
410 Y_{w} \\
411 Z_{w} \\
412 1
413 \end{bmatrix}
414 \end{align*}
415 \]
416
417 -}
418 solvePnP
419 :: forall point3 point2 distCoeffs
420 . ( IsPoint3 point3 CDouble
421 , IsPoint2 point2 CDouble
422 , ToMat distCoeffs
423 , MatShape distCoeffs `In` '[ 'S '[ 'S 4, 'S 1 ]
424 , 'S '[ 'S 5, 'S 1 ]
425 , 'S '[ 'S 8, 'S 1 ]
426 , 'S '[ 'S 12, 'S 1 ]
427 , 'S '[ 'S 14, 'S 1 ]
428 ]
429 )
430 => V.Vector (point3 CDouble, point2 CDouble) -- ^ 3D-2D point correspondences.
431 -> Mat (ShapeT '[3, 3]) ('S 1) ('S Double) -- ^ Camera matrix.
432 -> Maybe distCoeffs -- ^ Distortion coefficients.
433 -> SolvePnPMethod
434 -> CvExcept
435 ( Mat (ShapeT '[3, 1]) ('S 1) ('S Double) -- rotation vector
436 , Mat (ShapeT '[3, 1]) ('S 1) ('S Double) -- translation vector
437 , Mat (ShapeT '[3, 3]) ('S 1) ('S Double) -- output camera matrix
438 )
439 solvePnP objectImageMatches cameraMatrix mbDistCoeffs method = unsafeWrapException $ do
440 rvec <- newEmptyMat
441 tvec <- newEmptyMat
442 let cameraMatrixOut = cloneMat cameraMatrix
443 handleCvException (pure ( unsafeCoerceMat rvec
444 , unsafeCoerceMat tvec
445 , cameraMatrixOut
446 )) $
447 withArrayPtr objectPoints $ \objectPoinstPtr ->
448 withArrayPtr imagePoints $ \imagePointsPtr ->
449 withPtr cameraMatrixOut $ \cameraMatrixOutPtr ->
450 withPtr (toMat <$> mbDistCoeffs) $ \distCoeffsPtr ->
451 withPtr rvec $ \rvecPtr ->
452 withPtr tvec $ \tvecPtr ->
453 [cvExcept|
454 cv::_InputArray objectPoints =
455 cv::_InputArray( $(Point3d * objectPoinstPtr)
456 , $(int32_t c'numPoints)
457 );
458 cv::_InputArray imagePoints =
459 cv::_InputArray( $(Point2d * imagePointsPtr)
460 , $(int32_t c'numPoints)
461 );
462 cv::Mat * distCoeffsPtr = $(Mat * distCoeffsPtr);
463 bool retval =
464 cv::solvePnP
465 ( objectPoints
466 , imagePoints
467 , *$(Mat * cameraMatrixOutPtr)
468 , distCoeffsPtr
469 ? cv::_InputArray(*distCoeffsPtr)
470 : cv::_InputArray(cv::noArray())
471 , *$(Mat * rvecPtr)
472 , *$(Mat * tvecPtr)
473 , $(int32_t useExtrinsicGuess)
474 , $(int32_t methodFlag)
475 );
476 |]
477 where
478 (methodFlag, useExtrinsicGuess) = marshalSolvePnPMethod method
479
480 c'numPoints :: Int32
481 c'numPoints = fromIntegral $ V.length objectImageMatches
482
483 objectPoints :: V.Vector Point3d
484 objectPoints = V.map (toPoint . fst) objectImageMatches
485
486 imagePoints :: V.Vector Point2d
487 imagePoints = V.map (toPoint . snd) objectImageMatches