Main MRPT website > C++ reference for MRPT 1.5.9
CObservation3DRangeScan_project3D_impl.h
Go to the documentation of this file.
1 /* +---------------------------------------------------------------------------+
2  | Mobile Robot Programming Toolkit (MRPT) |
3  | http://www.mrpt.org/ |
4  | |
5  | Copyright (c) 2005-2017, Individual contributors, see AUTHORS file |
6  | See: http://www.mrpt.org/Authors - All rights reserved. |
7  | Released under BSD License. See details in http://www.mrpt.org/License |
8  +---------------------------------------------------------------------------+ */
9 #ifndef CObservation3DRangeScan_project3D_impl_H
10 #define CObservation3DRangeScan_project3D_impl_H
11 
12 #include <mrpt/utils/round.h> // round()
13 
14 namespace mrpt {
15 namespace obs {
16 namespace detail {
17  // Auxiliary functions which implement SSE-optimized proyection of 3D point cloud:
18  template <class POINTMAP> void do_project_3d_pointcloud(const int H,const int W,const float *kys,const float *kzs,mrpt::math::CMatrix &rangeImage, mrpt::utils::PointCloudAdapter<POINTMAP> &pca, std::vector<uint16_t> &idxs_x, std::vector<uint16_t> &idxs_y,const mrpt::obs::TRangeImageFilterParams &fp, bool MAKE_DENSE, const int DECIM);
19  template <class POINTMAP> void do_project_3d_pointcloud_SSE2(const int H,const int W,const float *kys,const float *kzs,mrpt::math::CMatrix &rangeImage, mrpt::utils::PointCloudAdapter<POINTMAP> &pca, std::vector<uint16_t> &idxs_x, std::vector<uint16_t> &idxs_y,const mrpt::obs::TRangeImageFilterParams &fp, bool MAKE_DENSE);
20 
21  template <typename POINTMAP, bool isDepth>
22  inline void range2XYZ(
25  const mrpt::obs::TRangeImageFilterParams& fp, const int H, const int W)
26  {
27  /* range_is_depth = false :
28  * Ky = (r_cx - c)/r_fx
29  * Kz = (r_cy - r)/r_fy
30  *
31  * x(i) = rangeImage(r,c) / sqrt( 1 + Ky^2 + Kz^2 )
32  * y(i) = Ky * x(i)
33  * z(i) = Kz * x(i)
34  */
35  const float r_cx = src_obs.cameraParams.cx();
36  const float r_cy = src_obs.cameraParams.cy();
37  const float r_fx_inv = 1.0f / src_obs.cameraParams.fx();
38  const float r_fy_inv = 1.0f / src_obs.cameraParams.fy();
39  TRangeImageFilter rif(fp);
40  size_t idx = 0;
41  for (int r = 0; r < H; r++)
42  for (int c = 0; c < W; c++)
43  {
44  const float D = src_obs.rangeImage.coeff(r, c);
45  if (rif.do_range_filter(r, c, D))
46  {
47  const float Ky = (r_cx - c) * r_fx_inv;
48  const float Kz = (r_cy - r) * r_fy_inv;
49  pca.setPointXYZ(
50  idx,
51  isDepth ? D : D / std::sqrt(1 + Ky * Ky + Kz * Kz), // x
52  Ky * D, // y
53  Kz * D // z
54  );
55  src_obs.points3D_idxs_x[idx] = c;
56  src_obs.points3D_idxs_y[idx] = r;
57  ++idx;
58  }
59  else
60  {
61  if (fp.mark_invalid_ranges)
62  src_obs.rangeImage.coeffRef(r, c) = 0;
63  }
64  }
65  pca.resize(idx); // Actual number of valid pts
66  }
67 
68  template <typename POINTMAP, bool isDepth>
69  inline void range2XYZ_LUT(
73  const mrpt::obs::TRangeImageFilterParams& fp, const int H, const int W,
74  const int DECIM = 1)
75  {
76  const size_t WH = W * H;
77  if (src_obs.m_3dproj_lut.prev_camParams != src_obs.cameraParams ||
78  WH != size_t(src_obs.m_3dproj_lut.Kys.size()))
79  {
80  src_obs.m_3dproj_lut.prev_camParams = src_obs.cameraParams;
81  src_obs.m_3dproj_lut.Kys.resize(WH);
82  src_obs.m_3dproj_lut.Kzs.resize(WH);
83 
84  const float r_cx = src_obs.cameraParams.cx();
85  const float r_cy = src_obs.cameraParams.cy();
86  const float r_fx_inv = 1.0f / src_obs.cameraParams.fx();
87  const float r_fy_inv = 1.0f / src_obs.cameraParams.fy();
88 
89  float* kys = &src_obs.m_3dproj_lut.Kys[0];
90  float* kzs = &src_obs.m_3dproj_lut.Kzs[0];
91  for (int r = 0; r < H; r++)
92  for (int c = 0; c < W; c++)
93  {
94  *kys++ = (r_cx - c) * r_fx_inv;
95  *kzs++ = (r_cy - r) * r_fy_inv;
96  }
97  } // end update LUT.
98 
99  ASSERT_EQUAL_(WH, size_t(src_obs.m_3dproj_lut.Kys.size()));
100  ASSERT_EQUAL_(WH, size_t(src_obs.m_3dproj_lut.Kzs.size()));
101  float* kys = &src_obs.m_3dproj_lut.Kys[0];
102  float* kzs = &src_obs.m_3dproj_lut.Kzs[0];
103 
104  if (fp.rangeMask_min)
105  { // sanity check:
106  ASSERT_EQUAL_(fp.rangeMask_min->cols(), src_obs.rangeImage.cols());
107  ASSERT_EQUAL_(fp.rangeMask_min->rows(), src_obs.rangeImage.rows());
108  }
109  if (fp.rangeMask_max)
110  { // sanity check:
111  ASSERT_EQUAL_(fp.rangeMask_max->cols(), src_obs.rangeImage.cols());
112  ASSERT_EQUAL_(fp.rangeMask_max->rows(), src_obs.rangeImage.rows());
113  }
114  #if MRPT_HAS_SSE2
115  // if image width is not 8*N, use standard method
116  if ((W & 0x07) == 0 && pp.USE_SSE2 && DECIM == 1)
118  H, W, kys, kzs, src_obs.rangeImage, pca, src_obs.points3D_idxs_x,
119  src_obs.points3D_idxs_y, fp, pp.MAKE_DENSE);
120  else
121  #endif
123  H, W, kys, kzs, src_obs.rangeImage, pca, src_obs.points3D_idxs_x,
124  src_obs.points3D_idxs_y, fp, pp.MAKE_DENSE, DECIM);
125  }
126 
127  template <class POINTMAP>
129  mrpt::obs::CObservation3DRangeScan& src_obs, POINTMAP& dest_pointcloud,
132  {
133  using namespace mrpt::math;
134 
135  if (!src_obs.hasRangeImage) return;
136 
137  mrpt::utils::PointCloudAdapter<POINTMAP> pca(dest_pointcloud);
138 
139  // ------------------------------------------------------------
140  // Stage 1/3: Create 3D point cloud local coordinates
141  // ------------------------------------------------------------
142  const int W = src_obs.rangeImage.cols();
143  const int H = src_obs.rangeImage.rows();
144  ASSERT_(W != 0 && H != 0);
145  const size_t WH = W * H;
146 
147  if (pp.decimation == 1)
148  {
149  // No decimation: one point per range image pixel
150 
151  // This is to make sure points3D_idxs_{x,y} have the expected sizes
152  src_obs.resizePoints3DVectors(WH);
153  // Reserve memory for 3D points. It will be later resized again to the
154  // actual number of valid points
155  pca.resize(WH);
156  //if (pp.MAKE_DENSE) pca.setDimensions(H, W);
157  if (src_obs.range_is_depth)
158  {
159  // range_is_depth = true
160  // Use cached tables?
161  if (pp.PROJ3D_USE_LUT)
162  range2XYZ_LUT<POINTMAP, true>(pca, src_obs, pp, fp, H, W);
163  else
164  range2XYZ<POINTMAP, true>(pca, src_obs, fp, H, W);
165  }
166  else
167  range2XYZ<POINTMAP, false>(pca, src_obs, fp, H, W);
168  }
169  else
170  {
171  // Decimate range image:
172  const auto DECIM = pp.decimation;
173  ASSERTMSG_(
174  (W % DECIM) == 0 && (H % DECIM == 0),
175  "Width/Height are not an exact multiple of decimation");
176  const int Wd = W / DECIM;
177  const int Hd = H / DECIM;
178  ASSERT_(Wd != 0 && Hd != 0);
179  const size_t WHd = Wd * Hd;
180 
181  src_obs.resizePoints3DVectors(WHd);
182  pca.resize(WHd);
183  //if (pp.MAKE_DENSE) pca.setDimensions(Hd, Wd);
184  ASSERTMSG_(
185  src_obs.range_is_depth && pp.PROJ3D_USE_LUT,
186  "Decimation only available if range_is_depth && PROJ3D_USE_LUT");
187  range2XYZ_LUT<POINTMAP, true>(pca, src_obs, pp, fp, H, W, DECIM);
188  }
189 
190  // -------------------------------------------------------------
191  // Stage 2/3: Project local points into RGB image to get colors
192  // -------------------------------------------------------------
193  if (src_obs.hasIntensityImage)
194  {
195  const int imgW = src_obs.intensityImage.getWidth();
196  const int imgH = src_obs.intensityImage.getHeight();
197  const bool hasColorIntensityImg = src_obs.intensityImage.isColor();
198 
199  const float cx = src_obs.cameraParamsIntensity.cx();
200  const float cy = src_obs.cameraParamsIntensity.cy();
201  const float fx = src_obs.cameraParamsIntensity.fx();
202  const float fy = src_obs.cameraParamsIntensity.fy();
203 
204  // Unless we are in a special case (both depth & RGB images coincide)...
205  const bool isDirectCorresp = src_obs.doDepthAndIntensityCamerasCoincide();
206 
207  // ...precompute the inverse of the pose transformation out of the loop,
208  // store as a 4x4 homogeneous matrix to exploit SSE optimizations below:
210  if (!isDirectCorresp)
211  {
216  R_inv,t_inv);
217 
218  T_inv(3,3)=1;
219  T_inv.block<3,3>(0,0)=R_inv.cast<float>();
220  T_inv.block<3,1>(0,3)=t_inv.cast<float>();
221  }
222 
223  Eigen::Matrix<float,4,1> pt_wrt_color, pt_wrt_depth;
224  pt_wrt_depth[3]=1;
225 
226  mrpt::utils::TColor pCol;
227 
228  // For each local point:
229  const size_t nPts = pca.size();
230  for (size_t i=0;i<nPts;i++)
231  {
232  int img_idx_x, img_idx_y; // projected pixel coordinates, in the RGB image plane
233  bool pointWithinImage = false;
234  if (isDirectCorresp)
235  {
236  pointWithinImage=true;
237  img_idx_x = src_obs.points3D_idxs_x[i];
238  img_idx_y = src_obs.points3D_idxs_y[i];
239  }
240  else
241  {
242  // Project point, which is now in "pca" in local coordinates wrt the depth camera, into the intensity camera:
243  pca.getPointXYZ(i,pt_wrt_depth[0],pt_wrt_depth[1],pt_wrt_depth[2]);
244  pt_wrt_color.noalias() = T_inv*pt_wrt_depth;
245 
246  // Project to image plane:
247  if (pt_wrt_color[2]) {
248  img_idx_x = mrpt::utils::round( cx + fx * pt_wrt_color[0]/pt_wrt_color[2] );
249  img_idx_y = mrpt::utils::round( cy + fy * pt_wrt_color[1]/pt_wrt_color[2] );
250  pointWithinImage=
251  img_idx_x>=0 && img_idx_x<imgW &&
252  img_idx_y>=0 && img_idx_y<imgH;
253  }
254  }
255 
256  if (pointWithinImage)
257  {
258  if (hasColorIntensityImg) {
259  const uint8_t *c= src_obs.intensityImage.get_unsafe(img_idx_x, img_idx_y, 0);
260  pCol.R = c[2];
261  pCol.G = c[1];
262  pCol.B = c[0];
263  }
264  else{
265  uint8_t c= *src_obs.intensityImage.get_unsafe(img_idx_x, img_idx_y, 0);
266  pCol.R = pCol.G = pCol.B = c;
267  }
268  }
269  else
270  {
271  pCol.R = pCol.G = pCol.B = 255;
272  }
273  // Set color:
274  pca.setPointRGBu8(i,pCol.R,pCol.G,pCol.B);
275  } // end for each point
276  } // end if src_obs has intensity image
277 
278  // ...
279 
280  // ------------------------------------------------------------
281  // Stage 3/3: Apply 6D transformations
282  // ------------------------------------------------------------
284  {
285  mrpt::poses::CPose3D transf_to_apply; // Either ROBOTPOSE or ROBOTPOSE(+)SENSORPOSE or SENSORPOSE
287  transf_to_apply = src_obs.sensorPose;
288  if (pp.robotPoseInTheWorld)
289  transf_to_apply.composeFrom(*pp.robotPoseInTheWorld, mrpt::poses::CPose3D(transf_to_apply));
290 
291  mrpt::math::CMatrixFixedNumeric<float,4,4> HM = transf_to_apply.getHomogeneousMatrixVal().cast<float>();
292  Eigen::Matrix<float,4,1> pt, pt_transf;
293  pt[3]=1;
294 
295  const size_t nPts = pca.size();
296  for (size_t i=0;i<nPts;i++)
297  {
298  pca.getPointXYZ(i,pt[0],pt[1],pt[2]);
299  pt_transf.noalias() = HM*pt;
300  pca.setPointXYZ(i,pt_transf[0],pt_transf[1],pt_transf[2]);
301  }
302  }
303  } // end of project3DPointsFromDepthImageInto
304 
305  // Auxiliary functions which implement (un)projection of 3D point clouds:
306  template <class POINTMAP>
308  const int H, const int W, const float* kys, const float* kzs,
309  mrpt::math::CMatrix& rangeImage,
311  std::vector<uint16_t>& idxs_x, std::vector<uint16_t>& idxs_y,
312  const mrpt::obs::TRangeImageFilterParams& fp, bool MAKE_DENSE,
313  const int DECIM)
314  {
315  TRangeImageFilter rif(fp);
316  // Preconditions: minRangeMask() has the right size
317  size_t idx = 0;
318  if (DECIM == 1)
319  {
320  for (int r = 0; r < H; r++)
321  for (int c = 0; c < W; c++)
322  {
323  const float D = rangeImage.coeff(r, c);
324  // LUT projection coefs:
325  const auto ky = *kys++, kz = *kzs++;
326  if (!rif.do_range_filter(r, c, D))
327  {
328  if (!MAKE_DENSE) pca.setInvalidPoint(idx++);
329  if (fp.mark_invalid_ranges) rangeImage.coeffRef(r, c) = 0;
330  continue;
331  }
332  pca.setPointXYZ(idx, D /*x*/, ky * D /*y*/, kz * D /*z*/);
333  idxs_x[idx] = c;
334  idxs_y[idx] = r;
335  ++idx;
336  }
337  }
338  else
339  {
340  const int Hd = H / DECIM, Wd = W / DECIM;
341 
342  for (int rd = 0; rd < Hd; rd++)
343  for (int cd = 0; cd < Wd; cd++)
344  {
345  bool valid_pt = false;
346  float min_d = std::numeric_limits<float>::max();
347  for (int rb = 0; rb < DECIM; rb++)
348  for (int cb = 0; cb < DECIM; cb++)
349  {
350  const auto r = rd * DECIM + rb, c = cd * DECIM + cb;
351  const float D = rangeImage.coeff(r, c);
352  if (rif.do_range_filter(r, c, D))
353  {
354  valid_pt = true;
355  if (D < min_d) min_d = D;
356  }
357  else
358  {
359  if (fp.mark_invalid_ranges)
360  rangeImage.coeffRef(r, c) = 0;
361  }
362  }
363  if (!valid_pt)
364  {
365  if (!MAKE_DENSE) pca.setInvalidPoint(idx++);
366  continue;
367  }
368  const auto eq_r = rd * DECIM + DECIM / 2,
369  eq_c = cd * DECIM + DECIM / 2;
370  const auto ky = kys[eq_c + eq_r * W], kz = kzs[eq_c + eq_r * W];
371  pca.setPointXYZ(
372  idx, min_d /*x*/, ky * min_d /*y*/, kz * min_d /*z*/);
373  idxs_x[idx] = eq_c;
374  idxs_y[idx] = eq_r;
375  ++idx;
376  }
377  }
378  pca.resize(idx);
379  // Make sure indices are also resized down to the actual number of points,
380  // even if they are not part of the object PCA refers to:
381  idxs_x.resize(idx);
382  idxs_y.resize(idx);
383  }
384 
385  // Auxiliary functions which implement (un)projection of 3D point clouds:
386  template <class POINTMAP>
387  inline void do_project_3d_pointcloud_SSE2(const int H,const int W,const float *kys,const float *kzs,mrpt::math::CMatrix &rangeImage, mrpt::utils::PointCloudAdapter<POINTMAP> &pca, std::vector<uint16_t> &idxs_x, std::vector<uint16_t> &idxs_y,const mrpt::obs::TRangeImageFilterParams &fp, bool MAKE_DENSE)
388  {
389  #if MRPT_HAS_SSE2
390  // Preconditions: minRangeMask() has the right size
391  // Use optimized version:
392  const int W_4 = W >> 2; // /=4 , since we process 4 values at a time.
393  size_t idx=0;
394  MRPT_ALIGN16 float xs[4],ys[4],zs[4];
395  const __m128 D_zeros = _mm_set_ps(.0f,.0f,.0f,.0f);
396  const __m128 xormask = (fp.rangeCheckBetween) ?
397  _mm_cmpneq_ps(D_zeros,D_zeros) : // want points BETWEEN min and max to be valid
398  _mm_cmpeq_ps(D_zeros,D_zeros); // want points OUTSIDE of min and max to be valid
399  for (int r=0;r<H;r++)
400  {
401  const float *D_ptr = &rangeImage.coeffRef(r,0); // Matrices are 16-aligned
402  const float *Dgt_ptr = !fp.rangeMask_min ? NULL : &fp.rangeMask_min->coeffRef(r,0);
403  const float *Dlt_ptr = !fp.rangeMask_max ? NULL : &fp.rangeMask_max->coeffRef(r,0);
404 
405  for (int c=0;c<W_4;c++)
406  {
407  const __m128 D = _mm_load_ps(D_ptr);
408  const __m128 nz_mask = _mm_cmpgt_ps(D, D_zeros);
409  __m128 valid_range_mask;
410  if (!fp.rangeMask_min && !fp.rangeMask_max)
411  { // No filter: just skip D=0 points
412  valid_range_mask = nz_mask;
413  }
414  else
415  {
416  if (!fp.rangeMask_min || !fp.rangeMask_max)
417  { // Only one filter
418  if (fp.rangeMask_min)
419  {
420  const __m128 Dmin = _mm_load_ps(Dgt_ptr);
421  valid_range_mask = _mm_or_ps(
422  _mm_cmpgt_ps(D, Dmin), _mm_cmpeq_ps(Dmin, D_zeros));
423  }
424  else
425  {
426  const __m128 Dmax = _mm_load_ps(Dlt_ptr);
427  valid_range_mask = _mm_or_ps(
428  _mm_cmplt_ps(D, Dmax), _mm_cmpeq_ps(Dmax, D_zeros));
429  }
430  valid_range_mask = _mm_and_ps(
431  valid_range_mask, nz_mask); // Filter out D=0 points
432  }
433  else
434  {
435  // We have both: D>Dmin and D<Dmax conditions, with XOR to
436  // optionally invert the selection:
437  const __m128 Dmin = _mm_load_ps(Dgt_ptr);
438  const __m128 Dmax = _mm_load_ps(Dlt_ptr);
439 
440  const __m128 gt_mask = _mm_or_ps(
441  _mm_cmpgt_ps(D, Dmin), _mm_cmpeq_ps(Dmin, D_zeros));
442  const __m128 lt_mask = _mm_or_ps(
443  _mm_cmplt_ps(D, Dmax), _mm_cmpeq_ps(Dmax, D_zeros));
444  // (D>Dmin && D<Dmax) & skip points at zero
445  valid_range_mask =
446  _mm_and_ps(nz_mask, _mm_and_ps(gt_mask, lt_mask));
447  valid_range_mask = _mm_xor_ps(valid_range_mask, xormask);
448  // Add the case of D_min & D_max = 0 (no filtering)
449  valid_range_mask = _mm_or_ps(
450  valid_range_mask, _mm_and_ps(
451  _mm_cmpeq_ps(Dmin, D_zeros),
452  _mm_cmpeq_ps(Dmax, D_zeros)));
453  // Finally, ensure no invalid ranges get thru:
454  valid_range_mask = _mm_and_ps(valid_range_mask, nz_mask);
455  }
456  }
457  const int valid_range_maski = _mm_movemask_epi8(_mm_castps_si128(valid_range_mask)); // 0x{f|0}{f|0}{f|0}{f|0}
458  if (valid_range_maski!=0) // Any of the 4 values is valid?
459  {
460  const __m128 KY = _mm_load_ps(kys);
461  const __m128 KZ = _mm_load_ps(kzs);
462 
463  _mm_storeu_ps(xs , D);
464  _mm_storeu_ps(ys , _mm_mul_ps(KY,D));
465  _mm_storeu_ps(zs , _mm_mul_ps(KZ,D));
466 
467  for (int q=0;q<4;q++)
468  {
469  const int actual_c = (c << 2) + q;
470  if ((valid_range_maski & (1<<(q*4))) !=0) {
471  pca.setPointXYZ(idx,xs[q],ys[q],zs[q]);
472  idxs_x[idx]=actual_c;
473  idxs_y[idx]=r;
474  ++idx;
475  }
476  else
477  {
478  if (!MAKE_DENSE)
479  {
480  pca.setInvalidPoint(idx);
481  ++idx;
482  }
483  if (fp.mark_invalid_ranges)
484  rangeImage.coeffRef(r, actual_c) = 0;
485  }
486  }
487  }
488  else if (!MAKE_DENSE)
489  {
490  for( int q=0; q<4; q++)
491  {
492  pca.setInvalidPoint(idx);
493  ++idx;
494  const int actual_c = (c << 2) + q;
495  if (fp.mark_invalid_ranges)
496  rangeImage.coeffRef(r, actual_c) = 0;
497  }
498  }
499  D_ptr+=4;
500  if (Dgt_ptr) Dgt_ptr+=4;
501  if (Dlt_ptr) Dlt_ptr+=4;
502  kys+=4;
503  kzs+=4;
504  }
505  }
506  pca.resize(idx);
507  // Make sure indices are also resized down to the actual number of points,
508  // even if they are not part of the object PCA refers to:
509  idxs_x.resize(idx);
510  idxs_y.resize(idx);
511  #endif
512  }
513 
514 } // End of namespace
515 } // End of namespace
516 } // End of namespace
517 #endif
#define ASSERT_EQUAL_(__A, __B)
void range2XYZ_LUT(mrpt::utils::PointCloudAdapter< POINTMAP > &pca, mrpt::obs::CObservation3DRangeScan &src_obs, const mrpt::obs::T3DPointsProjectionParams &pp, const mrpt::obs::TRangeImageFilterParams &fp, const int H, const int W, const int DECIM=1)
Mainly for internal use within CObservation3DRangeScan::project3DPointsFromDepthImageInto() ...
uint8_t decimation
(Default:1) If !=1, split the range image in blocks of DxD (D=decimation), and only generates one poi...
void resizePoints3DVectors(const size_t nPoints)
Use this method instead of resizing all three points3D_x, points3D_y & points3D_z to allow the usage ...
const mrpt::poses::CPose3D * robotPoseInTheWorld
(Default: NULL) Read takeIntoAccountSensorPoseOnRobot
void range2XYZ(mrpt::utils::PointCloudAdapter< POINTMAP > &pca, mrpt::obs::CObservation3DRangeScan &src_obs, const mrpt::obs::TRangeImageFilterParams &fp, const int H, const int W)
mrpt::utils::TCamera cameraParamsIntensity
Projection parameters of the intensity (graylevel or RGB) camera.
GLdouble GLdouble GLdouble GLdouble q
Definition: glext.h:3626
double cy() const
Get the value of the principal point y-coordinate (in pixels).
Definition: TCamera.h:156
Declares a class derived from "CObservation" that encapsules a 3D range scan measurement, as from a time-of-flight range camera or any other RGBD sensor.
const mrpt::math::CMatrix * rangeMask_min
(Default: NULL) If provided, each data range will be tested to be greater-than (rangeMask_min) or les...
Used in CObservation3DRangeScan::project3DPointsFromDepthImageInto()
mrpt::math::CMatrix rangeImage
If hasRangeImage=true, a matrix of floats with the range data as captured by the camera (in meters) ...
unsigned char uint8_t
Definition: rptypes.h:43
mrpt::utils::TCamera cameraParams
Projection parameters of the depth camera.
double fx() const
Get the value of the focal length x-value (in pixels).
Definition: TCamera.h:158
double fy() const
Get the value of the focal length y-value (in pixels).
Definition: TCamera.h:160
A numeric matrix of compile-time fixed size.
This base provides a set of functions for maths stuff.
Definition: CArrayNumeric.h:19
mrpt::poses::CPose3D relativePoseIntensityWRTDepth
Relative pose of the intensity camera wrt the depth camera (which is the coordinates origin for this ...
std::vector< uint16_t > points3D_idxs_y
//!< If hasPoints3D=true, the (x,y) pixel coordinates for each (X,Y,Z) point in points3D_x, points3D_y, points3D_z
mrpt::math::CArrayDouble< 3 > m_coords
The translation vector [x,y,z] access directly or with x(), y(), z() setter/getter methods...
Definition: CPose3D.h:81
void composeFrom(const CPose3D &A, const CPose3D &B)
Makes "this = A (+) B"; this method is slightly more efficient than "this= A + B;" since it avoids th...
Definition: CPose3D.cpp:607
mrpt::math::CMatrixDouble44 getHomogeneousMatrixVal() const
Definition: CPose3D.h:173
const GLubyte * c
Definition: glext.h:5590
void project3DPointsFromDepthImageInto(mrpt::obs::CObservation3DRangeScan &src_obs, POINTMAP &dest_pointcloud, const mrpt::obs::T3DPointsProjectionParams &projectParams, const mrpt::obs::TRangeImageFilterParams &filterParams)
A RGB color - 8bit.
Definition: TColor.h:26
bool hasRangeImage
true means the field rangeImage contains valid data
void do_project_3d_pointcloud(const int H, const int W, const float *kys, const float *kzs, mrpt::math::CMatrix &rangeImage, mrpt::utils::PointCloudAdapter< POINTMAP > &pca, std::vector< uint16_t > &idxs_x, std::vector< uint16_t > &idxs_y, const mrpt::obs::TRangeImageFilterParams &fp, bool MAKE_DENSE, const int DECIM)
An adapter to different kinds of point cloud object.
double cx() const
Get the value of the principal point x-coordinate (in pixels).
Definition: TCamera.h:154
static TCached3DProjTables m_3dproj_lut
3D point cloud projection look-up-table
bool mark_invalid_ranges
If enabled, the range pixels of points that do NOT pass the mask filter will be marked as invalid ran...
bool USE_SSE2
(Default:true) If possible, use SSE2 optimized code.
This is the global namespace for all Mobile Robot Programming Toolkit (MRPT) libraries.
GLdouble GLdouble GLdouble r
Definition: glext.h:3618
mrpt::poses::CPose3D sensorPose
The 6D pose of the sensor on the robot.
const mrpt::math::CMatrix * rangeMask_max
void homogeneousMatrixInverse(const MATRIXLIKE1 &M, MATRIXLIKE2 &out_inverse_M)
Efficiently compute the inverse of a 4x4 homogeneous matrix by only transposing the rotation 3x3 part...
A class used to store a 3D pose (a 3D translation + a rotation in 3D).
Definition: CPose3D.h:72
void do_project_3d_pointcloud_SSE2(const int H, const int W, const float *kys, const float *kzs, mrpt::math::CMatrix &rangeImage, mrpt::utils::PointCloudAdapter< POINTMAP > &pca, std::vector< uint16_t > &idxs_x, std::vector< uint16_t > &idxs_y, const mrpt::obs::TRangeImageFilterParams &fp, bool MAKE_DENSE)
Used in CObservation3DRangeScan::project3DPointsFromDepthImageInto()
bool hasIntensityImage
true means the field intensityImage contains valid data
#define ASSERT_(f)
int round(const T value)
Returns the closer integer (int) to x.
Definition: round.h:26
bool PROJ3D_USE_LUT
(Default:true) [Only used when range_is_depth=true] Whether to use a Look-up-table (LUT) to speed up ...
bool doDepthAndIntensityCamerasCoincide() const
Return true if relativePoseIntensityWRTDepth equals the pure rotation (0,0,0,-90deg,0,-90deg) (with a small comparison epsilon)
bool isColor() const
Returns true if the image is RGB, false if it is grayscale.
Definition: CImage.cpp:898
unsigned char * get_unsafe(unsigned int col, unsigned int row, unsigned int channel=0) const
Access to pixels without checking boundaries - Use normally the () operator better, which checks the coordinates.
Definition: CImage.cpp:491
An adapter to different kinds of point cloud object.
Definition: adapters.h:38
void getRotationMatrix(mrpt::math::CMatrixDouble33 &ROT) const
Get the 3x3 rotation matrix.
Definition: CPose3D.h:176
bool MAKE_DENSE
(Default:true) set to false if you want to preserve the organization of the point cloud ...
mrpt::utils::CImage intensityImage
If hasIntensityImage=true, a color or gray-level intensity image of the same size than "rangeImage"...
This class is a "CSerializable" wrapper for "CMatrixFloat".
Definition: CMatrix.h:30
bool range_is_depth
true: Kinect-like ranges: entries of rangeImage are distances along the +X axis; false: Ranges in ran...
#define ASSERTMSG_(f, __ERROR_MSG)
size_t getWidth() const MRPT_OVERRIDE
Returns the width of the image in pixels.
Definition: CImage.cpp:855
#define MRPT_ALIGN16
bool takeIntoAccountSensorPoseOnRobot
(Default: false) If false, local (sensor-centric) coordinates of points are generated. Otherwise, points are transformed with sensorPose. Furthermore, if provided, those coordinates are transformed with robotPoseInTheWorld
size_t getHeight() const MRPT_OVERRIDE
Returns the height of the image in pixels.
Definition: CImage.cpp:884
bool do_range_filter(size_t r, size_t c, const float D) const
Returns true if the point (r,c) with depth D passes all filters.
bool rangeCheckBetween
Only used if both rangeMask_min and rangeMask_max are present.



Page generated by Doxygen 1.8.14 for MRPT 1.5.9 Git: 690a4699f Wed Apr 15 19:29:53 2020 +0200 at miƩ abr 15 19:30:12 CEST 2020