MRPT  1.9.9
CObservation3DRangeScan_project3D_impl.h
Go to the documentation of this file.
1 /* +------------------------------------------------------------------------+
2  | Mobile Robot Programming Toolkit (MRPT) |
3  | https://www.mrpt.org/ |
4  | |
5  | Copyright (c) 2005-2019, Individual contributors, see AUTHORS file |
6  | See: https://www.mrpt.org/Authors - All rights reserved. |
7  | Released under BSD License. See: https://www.mrpt.org/License |
8  +------------------------------------------------------------------------+ */
9 #pragma once
10 
11 #include <mrpt/core/cpu.h>
12 #include <mrpt/core/round.h> // round()
13 #include <mrpt/math/CVectorFixed.h>
14 #include <Eigen/Dense> // block<>()
15 
16 namespace mrpt::obs::detail
17 {
18 // Auxiliary functions which implement SSE-optimized proyection of 3D point
19 // cloud:
20 template <class POINTMAP>
22  const int H, const int W, const float* kys, const float* kzs,
23  mrpt::math::CMatrixF& rangeImage,
25  std::vector<uint16_t>& idxs_x, std::vector<uint16_t>& idxs_y,
26  const mrpt::obs::TRangeImageFilterParams& fp, bool MAKE_ORGANIZED,
27  const int DECIM);
28 template <class POINTMAP>
30  const int H, const int W, const float* kys, const float* kzs,
31  mrpt::math::CMatrixF& rangeImage,
33  std::vector<uint16_t>& idxs_x, std::vector<uint16_t>& idxs_y,
34  const mrpt::obs::TRangeImageFilterParams& fp, bool MAKE_ORGANIZED);
35 
36 template <typename POINTMAP, bool isDepth>
37 inline void range2XYZ(
40  const mrpt::obs::TRangeImageFilterParams& fp, const int H, const int W)
41 {
42  /* range_is_depth = false :
43  * Ky = (r_cx - c)/r_fx
44  * Kz = (r_cy - r)/r_fy
45  *
46  * x(i) = rangeImage(r,c) / sqrt( 1 + Ky^2 + Kz^2 )
47  * y(i) = Ky * x(i)
48  * z(i) = Kz * x(i)
49  */
50  const float r_cx = src_obs.cameraParams.cx();
51  const float r_cy = src_obs.cameraParams.cy();
52  const float r_fx_inv = 1.0f / src_obs.cameraParams.fx();
53  const float r_fy_inv = 1.0f / src_obs.cameraParams.fy();
54  TRangeImageFilter rif(fp);
55  size_t idx = 0;
56  for (int r = 0; r < H; r++)
57  for (int c = 0; c < W; c++)
58  {
59  const float D = src_obs.rangeImage.coeff(r, c);
60  if (rif.do_range_filter(r, c, D))
61  {
62  const float Ky = (r_cx - c) * r_fx_inv;
63  const float Kz = (r_cy - r) * r_fy_inv;
64  pca.setPointXYZ(
65  idx,
66  isDepth ? D : D / std::sqrt(1 + Ky * Ky + Kz * Kz), // x
67  Ky * D, // y
68  Kz * D // z
69  );
70  src_obs.points3D_idxs_x[idx] = c;
71  src_obs.points3D_idxs_y[idx] = r;
72  ++idx;
73  }
74  else
75  {
76  if (fp.mark_invalid_ranges)
77  src_obs.rangeImage.coeffRef(r, c) = 0;
78  }
79  }
80  pca.resize(idx); // Actual number of valid pts
81 }
82 
83 template <typename POINTMAP, bool isDepth>
84 inline void range2XYZ_LUT(
88  const mrpt::obs::TRangeImageFilterParams& fp, const int H, const int W,
89  const int DECIM = 1)
90 {
91  const size_t WH = W * H;
92  if (src_obs.get_3dproj_lut().prev_camParams != src_obs.cameraParams ||
93  WH != size_t(src_obs.get_3dproj_lut().Kys.size()))
94  {
95  src_obs.get_3dproj_lut().prev_camParams = src_obs.cameraParams;
96  src_obs.get_3dproj_lut().Kys.resize(WH);
97  src_obs.get_3dproj_lut().Kzs.resize(WH);
98 
99  const float r_cx = src_obs.cameraParams.cx();
100  const float r_cy = src_obs.cameraParams.cy();
101  const float r_fx_inv = 1.0f / src_obs.cameraParams.fx();
102  const float r_fy_inv = 1.0f / src_obs.cameraParams.fy();
103 
104  float* kys = &src_obs.get_3dproj_lut().Kys[0];
105  float* kzs = &src_obs.get_3dproj_lut().Kzs[0];
106  for (int r = 0; r < H; r++)
107  for (int c = 0; c < W; c++)
108  {
109  *kys++ = (r_cx - c) * r_fx_inv;
110  *kzs++ = (r_cy - r) * r_fy_inv;
111  }
112  } // end update LUT.
113 
114  ASSERT_EQUAL_(WH, size_t(src_obs.get_3dproj_lut().Kys.size()));
115  ASSERT_EQUAL_(WH, size_t(src_obs.get_3dproj_lut().Kzs.size()));
116  float* kys = &src_obs.get_3dproj_lut().Kys[0];
117  float* kzs = &src_obs.get_3dproj_lut().Kzs[0];
118 
119  if (fp.rangeMask_min)
120  { // sanity check:
121  ASSERT_EQUAL_(fp.rangeMask_min->cols(), src_obs.rangeImage.cols());
122  ASSERT_EQUAL_(fp.rangeMask_min->rows(), src_obs.rangeImage.rows());
123  }
124  if (fp.rangeMask_max)
125  { // sanity check:
126  ASSERT_EQUAL_(fp.rangeMask_max->cols(), src_obs.rangeImage.cols());
127  ASSERT_EQUAL_(fp.rangeMask_max->rows(), src_obs.rangeImage.rows());
128  }
129 #if MRPT_HAS_SSE2
130  // if image width is not 8*N, use standard method
131  if ((W & 0x07) == 0 && pp.USE_SSE2 && DECIM == 1 &&
134  H, W, kys, kzs, src_obs.rangeImage, pca, src_obs.points3D_idxs_x,
135  src_obs.points3D_idxs_y, fp, pp.MAKE_ORGANIZED);
136  else
137 #endif
139  H, W, kys, kzs, src_obs.rangeImage, pca, src_obs.points3D_idxs_x,
140  src_obs.points3D_idxs_y, fp, pp.MAKE_ORGANIZED, DECIM);
141 }
142 
143 template <class POINTMAP>
145  mrpt::obs::CObservation3DRangeScan& src_obs, POINTMAP& dest_pointcloud,
148 {
149  using namespace mrpt::math;
150 
151  if (!src_obs.hasRangeImage) return;
152 
153  mrpt::opengl::PointCloudAdapter<POINTMAP> pca(dest_pointcloud);
154 
155  // ------------------------------------------------------------
156  // Stage 1/3: Create 3D point cloud local coordinates
157  // ------------------------------------------------------------
158  const int W = src_obs.rangeImage.cols();
159  const int H = src_obs.rangeImage.rows();
160  ASSERT_(W != 0 && H != 0);
161  const size_t WH = W * H;
162 
163  if (pp.decimation == 1)
164  {
165  // No decimation: one point per range image pixel
166 
167  // This is to make sure points3D_idxs_{x,y} have the expected sizes
168  src_obs.resizePoints3DVectors(WH);
169  // Reserve memory for 3D points. It will be later resized again to the
170  // actual number of valid points
171  pca.resize(WH);
172  if (pp.MAKE_ORGANIZED) pca.setDimensions(H, W);
173  if (src_obs.range_is_depth)
174  {
175  // range_is_depth = true
176  // Use cached tables?
177  if (pp.PROJ3D_USE_LUT)
178  range2XYZ_LUT<POINTMAP, true>(pca, src_obs, pp, fp, H, W);
179  else
180  range2XYZ<POINTMAP, true>(pca, src_obs, fp, H, W);
181  }
182  else
183  range2XYZ<POINTMAP, false>(pca, src_obs, fp, H, W);
184  }
185  else
186  {
187  // Decimate range image:
188  const auto DECIM = pp.decimation;
189  ASSERTMSG_(
190  (W % DECIM) == 0 && (H % DECIM == 0),
191  "Width/Height are not an exact multiple of decimation");
192  const int Wd = W / DECIM;
193  const int Hd = H / DECIM;
194  ASSERT_(Wd != 0 && Hd != 0);
195  const size_t WHd = Wd * Hd;
196 
197  src_obs.resizePoints3DVectors(WHd);
198  pca.resize(WHd);
199  if (pp.MAKE_ORGANIZED) pca.setDimensions(Hd, Wd);
200  ASSERTMSG_(
201  src_obs.range_is_depth && pp.PROJ3D_USE_LUT,
202  "Decimation only available if range_is_depth && PROJ3D_USE_LUT");
203  range2XYZ_LUT<POINTMAP, true>(pca, src_obs, pp, fp, H, W, DECIM);
204  }
205 
206  // -------------------------------------------------------------
207  // Stage 2/3: Project local points into RGB image to get colors
208  // -------------------------------------------------------------
209  if constexpr (pca.HAS_RGB)
210  {
211  if (src_obs.hasIntensityImage)
212  {
213  const int imgW = src_obs.intensityImage.getWidth();
214  const int imgH = src_obs.intensityImage.getHeight();
215  const bool hasColorIntensityImg = src_obs.intensityImage.isColor();
216 
217  const float cx = src_obs.cameraParamsIntensity.cx();
218  const float cy = src_obs.cameraParamsIntensity.cy();
219  const float fx = src_obs.cameraParamsIntensity.fx();
220  const float fy = src_obs.cameraParamsIntensity.fy();
221 
222  // Unless we are in a special case (both depth & RGB images
223  // coincide)...
224  const bool isDirectCorresp =
226 
227  // ...precompute the inverse of the pose transformation out of
228  // the
229  // loop,
230  // store as a 4x4 homogeneous matrix to exploit SSE
231  // optimizations
232  // below:
234  if (!isDirectCorresp)
235  {
241  t_inv);
242 
243  T_inv(3, 3) = 1;
244  T_inv.insertMatrix(0, 0, R_inv.cast_float());
245  T_inv.insertMatrix(0, 3, t_inv.cast_float());
246  }
247 
248  CVectorFixedFloat<4> pt_wrt_color, pt_wrt_depth;
249  pt_wrt_depth[3] = 1;
250  mrpt::img::TColor pCol;
251 
252  // For each local point:
253  const size_t nPts = pca.size();
254  const auto& iimg = src_obs.intensityImage;
255  const uint8_t* img_data = iimg.ptrLine<uint8_t>(0);
256  const auto img_stride = iimg.getRowStride();
257  for (size_t i = 0; i < nPts; i++)
258  {
259  int img_idx_x,
260  img_idx_y; // projected pixel coordinates, in the
261  // RGB image plane
262  bool pointWithinImage = false;
263  if (isDirectCorresp)
264  {
265  pointWithinImage = true;
266  img_idx_x = src_obs.points3D_idxs_x[i];
267  img_idx_y = src_obs.points3D_idxs_y[i];
268  }
269  else
270  {
271  // Project point, which is now in "pca" in local
272  // coordinates
273  // wrt the depth camera, into the intensity camera:
274  pca.getPointXYZ(
275  i, pt_wrt_depth[0], pt_wrt_depth[1], pt_wrt_depth[2]);
276  pt_wrt_color = T_inv * pt_wrt_depth;
277 
278  // Project to image plane:
279  if (pt_wrt_color[2])
280  {
281  img_idx_x = mrpt::round(
282  cx + fx * pt_wrt_color[0] / pt_wrt_color[2]);
283  img_idx_y = mrpt::round(
284  cy + fy * pt_wrt_color[1] / pt_wrt_color[2]);
285  pointWithinImage = img_idx_x >= 0 && img_idx_x < imgW &&
286  img_idx_y >= 0 && img_idx_y < imgH;
287  }
288  }
289 
290  if (pointWithinImage)
291  {
292  if (hasColorIntensityImg)
293  {
294  const auto px_idx =
295  img_stride * img_idx_y + 3 * img_idx_x;
296  pCol.R = img_data[px_idx + 2];
297  pCol.G = img_data[px_idx + 1];
298  pCol.B = img_data[px_idx + 0];
299  }
300  else
301  {
302  const auto px_idx = img_stride * img_idx_y + img_idx_x;
303  pCol.R = pCol.G = pCol.B = img_data[px_idx];
304  }
305  }
306  else
307  {
308  pCol.R = pCol.G = pCol.B = 255;
309  }
310  // Set color:
311  pca.setPointRGBu8(i, pCol.R, pCol.G, pCol.B);
312  } // end for each point
313  } // end if src_obs has intensity image
314  }
315  // ...
316 
317  // ------------------------------------------------------------
318  // Stage 3/3: Apply 6D transformations
319  // ------------------------------------------------------------
321  {
322  mrpt::poses::CPose3D transf_to_apply; // Either ROBOTPOSE or
323  // ROBOTPOSE(+)SENSORPOSE or
324  // SENSORPOSE
326  transf_to_apply = src_obs.sensorPose;
327  if (pp.robotPoseInTheWorld)
328  transf_to_apply.composeFrom(
329  *pp.robotPoseInTheWorld, mrpt::poses::CPose3D(transf_to_apply));
330 
331  const auto HM =
332  transf_to_apply
334  .cast_float();
335  mrpt::math::CVectorFixedFloat<4> pt, pt_transf;
336  pt[3] = 1;
337 
338  const size_t nPts = pca.size();
339  for (size_t i = 0; i < nPts; i++)
340  {
341  pca.getPointXYZ(i, pt[0], pt[1], pt[2]);
342  pt_transf = HM * pt;
343  pca.setPointXYZ(i, pt_transf[0], pt_transf[1], pt_transf[2]);
344  }
345  }
346 } // end of project3DPointsFromDepthImageInto
347 
348 // Auxiliary functions which implement (un)projection of 3D point clouds:
349 template <class POINTMAP>
351  const int H, const int W, const float* kys, const float* kzs,
352  mrpt::math::CMatrixF& rangeImage,
354  std::vector<uint16_t>& idxs_x, std::vector<uint16_t>& idxs_y,
355  const mrpt::obs::TRangeImageFilterParams& fp, bool MAKE_ORGANIZED,
356  const int DECIM)
357 {
358  TRangeImageFilter rif(fp);
359  // Preconditions: minRangeMask() has the right size
360  size_t idx = 0;
361  if (DECIM == 1)
362  {
363  for (int r = 0; r < H; r++)
364  for (int c = 0; c < W; c++)
365  {
366  const float D = rangeImage.coeff(r, c);
367  // LUT projection coefs:
368  const auto ky = *kys++, kz = *kzs++;
369  if (!rif.do_range_filter(r, c, D))
370  {
371  if (MAKE_ORGANIZED) pca.setInvalidPoint(idx++);
372  if (fp.mark_invalid_ranges) rangeImage.coeffRef(r, c) = 0;
373  continue;
374  }
375  pca.setPointXYZ(idx, D /*x*/, ky * D /*y*/, kz * D /*z*/);
376  idxs_x[idx] = c;
377  idxs_y[idx] = r;
378  ++idx;
379  }
380  }
381  else
382  {
383  const int Hd = H / DECIM, Wd = W / DECIM;
384 
385  for (int rd = 0; rd < Hd; rd++)
386  for (int cd = 0; cd < Wd; cd++)
387  {
388  bool valid_pt = false;
389  float min_d = std::numeric_limits<float>::max();
390  for (int rb = 0; rb < DECIM; rb++)
391  for (int cb = 0; cb < DECIM; cb++)
392  {
393  const auto r = rd * DECIM + rb, c = cd * DECIM + cb;
394  const float D = rangeImage.coeff(r, c);
395  if (rif.do_range_filter(r, c, D))
396  {
397  valid_pt = true;
398  if (D < min_d) min_d = D;
399  }
400  else
401  {
402  if (fp.mark_invalid_ranges)
403  rangeImage.coeffRef(r, c) = 0;
404  }
405  }
406  if (!valid_pt)
407  {
408  if (MAKE_ORGANIZED) pca.setInvalidPoint(idx++);
409  continue;
410  }
411  const auto eq_r = rd * DECIM + DECIM / 2,
412  eq_c = cd * DECIM + DECIM / 2;
413  const auto ky = kys[eq_c + eq_r * W], kz = kzs[eq_c + eq_r * W];
414  pca.setPointXYZ(
415  idx, min_d /*x*/, ky * min_d /*y*/, kz * min_d /*z*/);
416  idxs_x[idx] = eq_c;
417  idxs_y[idx] = eq_r;
418  ++idx;
419  }
420  }
421  pca.resize(idx);
422  // Make sure indices are also resized down to the actual number of points,
423  // even if they are not part of the object PCA refers to:
424  idxs_x.resize(idx);
425  idxs_y.resize(idx);
426 }
427 
428 // Auxiliary functions which implement (un)projection of 3D point clouds:
429 template <class POINTMAP>
431  const int H, const int W, const float* kys, const float* kzs,
432  mrpt::math::CMatrixF& rangeImage,
434  std::vector<uint16_t>& idxs_x, std::vector<uint16_t>& idxs_y,
435  const mrpt::obs::TRangeImageFilterParams& fp, bool MAKE_ORGANIZED)
436 {
437 #if MRPT_HAS_SSE2
438  // Preconditions: minRangeMask() has the right size
439  // Use optimized version:
440  const int W_4 = W >> 2; // /=4 , since we process 4 values at a time.
441  size_t idx = 0;
442  alignas(MRPT_MAX_STATIC_ALIGN_BYTES) float xs[4], ys[4], zs[4];
443  const __m128 D_zeros = _mm_set_ps(.0f, .0f, .0f, .0f);
444  const __m128 xormask =
445  (fp.rangeCheckBetween) ? _mm_cmpneq_ps(D_zeros, D_zeros)
446  : // want points BETWEEN min and max to be valid
447  _mm_cmpeq_ps(
448  D_zeros,
449  D_zeros); // want points OUTSIDE of min and max to be valid
450  for (int r = 0; r < H; r++)
451  {
452  const float* D_ptr = &rangeImage(r, 0); // Matrices are 16-aligned
453  const float* Dgt_ptr =
454  !fp.rangeMask_min ? nullptr : &(*fp.rangeMask_min)(r, 0);
455  const float* Dlt_ptr =
456  !fp.rangeMask_max ? nullptr : &(*fp.rangeMask_max)(r, 0);
457 
458  for (int c = 0; c < W_4; c++)
459  {
460  const __m128 D = _mm_load_ps(D_ptr);
461  const __m128 nz_mask = _mm_cmpgt_ps(D, D_zeros);
462  __m128 valid_range_mask;
463  if (!fp.rangeMask_min && !fp.rangeMask_max)
464  { // No filter: just skip D=0 points
465  valid_range_mask = nz_mask;
466  }
467  else
468  {
469  if (!fp.rangeMask_min || !fp.rangeMask_max)
470  { // Only one filter
471  if (fp.rangeMask_min)
472  {
473  const __m128 Dmin = _mm_load_ps(Dgt_ptr);
474  valid_range_mask = _mm_or_ps(
475  _mm_cmpgt_ps(D, Dmin), _mm_cmpeq_ps(Dmin, D_zeros));
476  }
477  else
478  {
479  const __m128 Dmax = _mm_load_ps(Dlt_ptr);
480  valid_range_mask = _mm_or_ps(
481  _mm_cmplt_ps(D, Dmax), _mm_cmpeq_ps(Dmax, D_zeros));
482  }
483  valid_range_mask = _mm_and_ps(
484  valid_range_mask, nz_mask); // Filter out D=0 points
485  }
486  else
487  {
488  // We have both: D>Dmin and D<Dmax conditions, with XOR to
489  // optionally invert the selection:
490  const __m128 Dmin = _mm_load_ps(Dgt_ptr);
491  const __m128 Dmax = _mm_load_ps(Dlt_ptr);
492 
493  const __m128 gt_mask = _mm_or_ps(
494  _mm_cmpgt_ps(D, Dmin), _mm_cmpeq_ps(Dmin, D_zeros));
495  const __m128 lt_mask = _mm_or_ps(
496  _mm_cmplt_ps(D, Dmax), _mm_cmpeq_ps(Dmax, D_zeros));
497  // (D>Dmin && D<Dmax) & skip points at zero
498  valid_range_mask =
499  _mm_and_ps(nz_mask, _mm_and_ps(gt_mask, lt_mask));
500  valid_range_mask = _mm_xor_ps(valid_range_mask, xormask);
501  // Add the case of D_min & D_max = 0 (no filtering)
502  valid_range_mask = _mm_or_ps(
503  valid_range_mask, _mm_and_ps(
504  _mm_cmpeq_ps(Dmin, D_zeros),
505  _mm_cmpeq_ps(Dmax, D_zeros)));
506  // Finally, ensure no invalid ranges get thru:
507  valid_range_mask = _mm_and_ps(valid_range_mask, nz_mask);
508  }
509  }
510  const int valid_range_maski = _mm_movemask_epi8(
511  _mm_castps_si128(valid_range_mask)); // 0x{f|0}{f|0}{f|0}{f|0}
512  if (valid_range_maski != 0) // Any of the 4 values is valid?
513  {
514  const __m128 KY = _mm_load_ps(kys);
515  const __m128 KZ = _mm_load_ps(kzs);
516 
517  _mm_storeu_ps(xs, D);
518  _mm_storeu_ps(ys, _mm_mul_ps(KY, D));
519  _mm_storeu_ps(zs, _mm_mul_ps(KZ, D));
520 
521  for (int q = 0; q < 4; q++)
522  {
523  const int actual_c = (c << 2) + q;
524  if ((valid_range_maski & (1 << (q * 4))) != 0)
525  {
526  pca.setPointXYZ(idx, xs[q], ys[q], zs[q]);
527  idxs_x[idx] = actual_c;
528  idxs_y[idx] = r;
529  ++idx;
530  }
531  else
532  {
533  if (MAKE_ORGANIZED)
534  {
535  pca.setInvalidPoint(idx);
536  ++idx;
537  }
538  if (fp.mark_invalid_ranges)
539  rangeImage.coeffRef(r, actual_c) = 0;
540  }
541  }
542  }
543  else if (MAKE_ORGANIZED)
544  {
545  for (int q = 0; q < 4; q++)
546  {
547  pca.setInvalidPoint(idx);
548  ++idx;
549  const int actual_c = (c << 2) + q;
550  if (fp.mark_invalid_ranges)
551  rangeImage.coeffRef(r, actual_c) = 0;
552  }
553  }
554  D_ptr += 4;
555  if (Dgt_ptr) Dgt_ptr += 4;
556  if (Dlt_ptr) Dlt_ptr += 4;
557  kys += 4;
558  kzs += 4;
559  }
560  }
561  pca.resize(idx);
562  // Make sure indices are also resized down to the actual number of points,
563  // even if they are not part of the object PCA refers to:
564  idxs_x.resize(idx);
565  idxs_y.resize(idx);
566 #endif
567 }
568 } // namespace mrpt::obs::detail
mrpt::img::TCamera cameraParams
Projection parameters of the depth camera.
Mainly for internal use within CObservation3DRangeScan::project3DPointsFromDepthImageInto() ...
uint8_t decimation
(Default:1) If !=1, split the range image in blocks of DxD (D=decimation), and only generates one poi...
void resizePoints3DVectors(const size_t nPoints)
Use this method instead of resizing all three points3D_x, points3D_y & points3D_z to allow the usage ...
const mrpt::poses::CPose3D * robotPoseInTheWorld
(Default: nullptr) Read takeIntoAccountSensorPoseOnRobot
double fx() const
Get the value of the focal length x-value (in pixels).
Definition: TCamera.h:174
void do_project_3d_pointcloud_SSE2(const int H, const int W, const float *kys, const float *kzs, mrpt::math::CMatrixF &rangeImage, mrpt::opengl::PointCloudAdapter< POINTMAP > &pca, std::vector< uint16_t > &idxs_x, std::vector< uint16_t > &idxs_y, const mrpt::obs::TRangeImageFilterParams &fp, bool MAKE_ORGANIZED)
mrpt::math::CVectorFixedDouble< 3 > m_coords
The translation vector [x,y,z] access directly or with x(), y(), z() setter/getter methods...
Definition: CPose3D.h:97
const T * ptrLine(unsigned int row) const
Returns a pointer to the first pixel of the given line.
Definition: img/CImage.h:596
An adapter to different kinds of point cloud object.
GLdouble GLdouble GLdouble GLdouble q
Definition: glext.h:3727
std::vector< uint16_t > points3D_idxs_x
If hasPoints3D=true, the (x,y) pixel coordinates for each (X,Y,Z) point in points3D_x, points3D_y, points3D_z.
mrpt::math::CMatrixF rangeImage
If hasRangeImage=true, a matrix of floats with the range data as captured by the camera (in meters) ...
void insertMatrix(const int row_start, const int col_start, const OTHERMATVEC &submat)
Copies the given input submatrix/vector into this matrix/vector, starting at the given top-left coord...
Definition: MatrixBase.h:210
double fy() const
Get the value of the focal length y-value (in pixels).
Definition: TCamera.h:176
Declares a class derived from "CObservation" that encapsules a 3D range scan measurement, as from a time-of-flight range camera or any other RGBD sensor.
void range2XYZ(mrpt::opengl::PointCloudAdapter< POINTMAP > &pca, mrpt::obs::CObservation3DRangeScan &src_obs, const mrpt::obs::TRangeImageFilterParams &fp, const int H, const int W)
size_t getHeight() const override
Returns the height of the image in pixels.
Definition: CImage.cpp:878
uint8_t B
Definition: TColor.h:46
uint8_t G
Definition: TColor.h:46
Used in CObservation3DRangeScan::project3DPointsFromDepthImageInto()
bool supports(feature f) noexcept
Returns true if CPU (and OS) supports the given CPU feature, and that instruction set or feature was ...
Definition: cpu.h:77
const mrpt::math::CMatrixF * rangeMask_max
#define ASSERT_(f)
Defines an assertion mechanism.
Definition: exceptions.h:120
This base provides a set of functions for maths stuff.
size_t getWidth() const override
Returns the width of the image in pixels.
Definition: CImage.cpp:847
mrpt::poses::CPose3D relativePoseIntensityWRTDepth
Relative pose of the intensity camera wrt the depth camera (which is the coordinates origin for this ...
double cy() const
Get the value of the principal point y-coordinate (in pixels).
Definition: TCamera.h:172
An adapter to different kinds of point cloud object.
void composeFrom(const CPose3D &A, const CPose3D &B)
Makes "this = A (+) B"; this method is slightly more efficient than "this= A + B;" since it avoids th...
Definition: CPose3D.cpp:572
#define ASSERT_EQUAL_(__A, __B)
Assert comparing two values, reporting their actual values upon failure.
Definition: exceptions.h:137
const GLubyte * c
Definition: glext.h:6406
void project3DPointsFromDepthImageInto(mrpt::obs::CObservation3DRangeScan &src_obs, POINTMAP &dest_pointcloud, const mrpt::obs::T3DPointsProjectionParams &projectParams, const mrpt::obs::TRangeImageFilterParams &filterParams)
mrpt::img::CImage intensityImage
If hasIntensityImage=true, a color or gray-level intensity image of the same size than "rangeImage"...
bool hasRangeImage
true means the field rangeImage contains valid data
#define ASSERTMSG_(f, __ERROR_MSG)
Defines an assertion mechanism.
Definition: exceptions.h:108
const mrpt::math::CMatrixF * rangeMask_min
(Default: nullptr) If provided, each data range will be tested to be greater-than (rangeMask_min) or ...
size_type rows() const
Number of rows in the matrix.
size_type cols() const
Number of columns in the matrix.
bool mark_invalid_ranges
If enabled, the range pixels of points that do NOT pass the mask filter will be marked as invalid ran...
uint8_t R
Definition: TColor.h:46
void range2XYZ_LUT(mrpt::opengl::PointCloudAdapter< POINTMAP > &pca, mrpt::obs::CObservation3DRangeScan &src_obs, const mrpt::obs::T3DPointsProjectionParams &pp, const mrpt::obs::TRangeImageFilterParams &fp, const int H, const int W, const int DECIM=1)
const Scalar & coeff(int r, int c) const
bool USE_SSE2
(Default:true) If possible, use SSE2 optimized code.
double cx() const
Get the value of the principal point x-coordinate (in pixels).
Definition: TCamera.h:170
This class is a "CSerializable" wrapper for "CMatrixFloat".
Definition: CMatrixF.h:22
bool MAKE_ORGANIZED
(Default:false) set to true if you want an organized point cloud
GLdouble GLdouble GLdouble r
Definition: glext.h:3711
bool isColor() const
Returns true if the image is RGB, false if it is grayscale.
Definition: CImage.cpp:888
mrpt::poses::CPose3D sensorPose
The 6D pose of the sensor on the robot.
void homogeneousMatrixInverse(const MATRIXLIKE1 &M, MATRIXLIKE2 &out_inverse_M)
Efficiently compute the inverse of a 4x4 homogeneous matrix by only transposing the rotation 3x3 part...
CMatrixFixed< float, ROWS, COLS > cast_float() const
A class used to store a 3D pose (a 3D translation + a rotation in 3D).
Definition: CPose3D.h:85
static TCached3DProjTables & get_3dproj_lut()
3D point cloud projection look-up-table
Used in CObservation3DRangeScan::project3DPointsFromDepthImageInto()
bool hasIntensityImage
true means the field intensityImage contains valid data
bool PROJ3D_USE_LUT
(Default:true) [Only used when range_is_depth=true] Whether to use a Look-up-table (LUT) to speed up ...
bool doDepthAndIntensityCamerasCoincide() const
Return true if relativePoseIntensityWRTDepth equals the pure rotation (0,0,0,-90deg,0,-90deg) (with a small comparison epsilon)
MATRIX44 getHomogeneousMatrixVal() const
Returns the corresponding 4x4 homogeneous transformation matrix for the point(translation) or pose (t...
Definition: CPoseOrPoint.h:278
A RGB color - 8bit.
Definition: TColor.h:20
void getRotationMatrix(mrpt::math::CMatrixDouble33 &ROT) const
Get the 3x3 rotation matrix.
Definition: CPose3D.h:225
mrpt::img::TCamera cameraParamsIntensity
Projection parameters of the intensity (graylevel or RGB) camera.
bool range_is_depth
true: Kinect-like ranges: entries of rangeImage are distances along the +X axis; false: Ranges in ran...
bool takeIntoAccountSensorPoseOnRobot
(Default: false) If false, local (sensor-centric) coordinates of points are generated.
Scalar & coeffRef(int r, int c)
bool do_range_filter(size_t r, size_t c, const float D) const
Returns true if the point (r,c) with depth D passes all filters.
bool rangeCheckBetween
Only used if both rangeMask_min and rangeMask_max are present.
void do_project_3d_pointcloud(const int H, const int W, const float *kys, const float *kzs, mrpt::math::CMatrixF &rangeImage, mrpt::opengl::PointCloudAdapter< POINTMAP > &pca, std::vector< uint16_t > &idxs_x, std::vector< uint16_t > &idxs_y, const mrpt::obs::TRangeImageFilterParams &fp, bool MAKE_ORGANIZED, const int DECIM)
int round(const T value)
Returns the closer integer (int) to x.
Definition: round.h:24



Page generated by Doxygen 1.8.14 for MRPT 1.9.9 Git: ce444d842 Fri Dec 6 19:35:10 2019 +0100 at vie dic 6 19:45:12 CET 2019