[C.C++] 深度图,亮度图,点云图获取以及深度图转点云图

50 0
Honkers 昨天 15:07 来自手机 | 显示全部楼层 |阅读模式

前言

        本文中图像采集选取硬件为3D轮廓传感器,也称3D线激光或3D相机,至于其硬件采图原理(三角测量法)这里就不做详细说明,想了解的小伙伴可以去网上搜索相关资料,这里我们着重讲解基于halcon算子,获取深度图,亮度图,以及如何将深度图转换成点云图,并进行显示。

1) 连接外部3D相机后,我们一般需要设置帧/行触发方式,以及图像行高,保证产品扫描的完整性;然后采集过程中,不论使用轮询采集或者图像回调的方式,在图像数据返回的事件中,取得图像的源数据(指针,数组,实例对象,图像信息包括图像格式,行高等),然后通过halcon算子“GenImage1”即可完整数据的转换,最终可输出halcon数据对象hobject表示的图像,下面所示代码即对此做出具体演示

图像轮询线程

  1. /// <summary>
  2. /// 轮询取图流程
  3. /// </summary>
  4. private void ReceiveThreadProcess()
  5. {
  6. int nRet = (int)Mv3dLpSDK.MV3D_LP_OK;
  7. STC_DataSet pDataSet = IntPtr.Zero;
  8. UInt32 nTimeOut = 50;
  9. while (m_bGrabbing)
  10. {
  11. Thread.Sleep(1);
  12. MV3D_LP_IMAGE_DATA pstImage = new MV3D_LP_IMAGE_DATA();
  13. nRet = Mv3dLpSDK.MV3D_LP_GetImage(m_DevHandle, pstImage, nTimeOut);
  14. if (0 == nRet)
  15. {
  16. try
  17. {
  18. nRet = DisplayImage(pstImage);//存储原始数据,方便后续获取其他图像
  19. GrabFlagEvent?.Invoke((int)Mv3dLpSDK.MV3D_LP_OK == nRet);//原始输出存储完成
  20. if ((int)Mv3dLpSDK.MV3D_LP_OK != nRet)
  21. {
  22. throw new ArgumentException(nRet.ToString());
  23. }
  24. }
  25. catch
  26. {
  27. Console.WriteLine("ERROR !\r\n");
  28. }
  29. }
  30. else
  31. {
  32. continue;
  33. }
  34. }
  35. }
复制代码

 图像数据缓存:

  1. /// <summary>
  2. /// 图像显示
  3. /// </summary>
  4. /// <param name="pstImage"></param>
  5. /// <returns></returns>
  6. private int DisplayImage(MV3D_LP_IMAGE_DATA pstImage)
  7. {
  8. int nRet = (int)Mv3dLpSDK.MV3D_LP_OK;
  9. Monitor.Enter(Lock);
  10. m_stImageInfo.nWidth = pstImage.nWidth;
  11. m_stImageInfo.nHeight = pstImage.nHeight;
  12. m_stImageInfo.nDataLen = pstImage.nDataLen;
  13. m_stImageInfo.nIntensityDataLen = pstImage.nIntensityDataLen;
  14. m_stImageInfo.enImageType = pstImage.enImageType;
  15. m_stImageInfo.nFrameNum = pstImage.nFrameNum;
  16. m_stImageInfo.fXScale = pstImage.fXScale;
  17. m_stImageInfo.fYScale = pstImage.fYScale;
  18. m_stImageInfo.fZScale = pstImage.fZScale;
  19. m_stImageInfo.nXOffset = pstImage.nXOffset;
  20. m_stImageInfo.nYOffset = pstImage.nYOffset;
  21. m_stImageInfo.nZOffset = pstImage.nZOffset;
  22. if (m_nMaxImageSize < pstImage.nDataLen)
  23. {
  24. m_pcDataBuf = new byte[pstImage.nDataLen];
  25. m_nMaxImageSize = pstImage.nDataLen;
  26. }
  27. if (m_nMaxImageSize2 < pstImage.nIntensityDataLen)
  28. {
  29. m_pcDataBuf2 = new byte[pstImage.nIntensityDataLen];
  30. m_nMaxImageSize2 = pstImage.nIntensityDataLen;
  31. }
  32. m_stImageInfo.pData = Marshal.UnsafeAddrOfPinnedArrayElement(m_pcDataBuf, 0);
  33. m_stImageInfo.pIntensityData = Marshal.UnsafeAddrOfPinnedArrayElement(m_pcDataBuf2, 0);
  34. Marshal.Copy(pstImage.pData, m_pcDataBuf, 0, (int)pstImage.nDataLen);
  35. Marshal.Copy(pstImage.pIntensityData, m_pcDataBuf2, 0, (int)pstImage.nIntensityDataLen);
  36. Monitor.Exit(Lock);
  37. //不在此处显示
  38. //nRet = Mv3dLpSDK.MV3D_LP_DisplayImage(pstImage, m_hWnd, Mv3dLpSDK.DisplayType_Auto, 0, 0);
  39. return nRet;
  40. }
复制代码

通过源数据生成深度图hobject 

  1. /// <summary>
  2. /// 获取深度图(.tiff)
  3. /// </summary>
  4. /// <returns></returns>
  5. public HObject GetDepthImage()
  6. {
  7. HObject ho_Image = null;
  8. Monitor.Enter(Lock);
  9. GCHandle hBuf = GCHandle.Alloc(m_pcDataBuf, GCHandleType.Pinned);
  10. IntPtr ptr = hBuf.AddrOfPinnedObject();
  11. HOperatorSet.GenImage1(out ho_Image, "int2", m_stImageInfo.nWidth, m_stImageInfo.nHeight, ptr.ToInt64());
  12. Monitor.Exit(Lock);
  13. if (hBuf.IsAllocated)
  14. {
  15. hBuf.Free();
  16. }
  17. //if (null != ho_Image)
  18. //{
  19. // // 保存深度图 | Save Depth
  20. // string strtiffName = "./Halcon_Image_";
  21. // strtiffName += m_stImageInfo.nFrameNum;
  22. // strtiffName += ".tiff";
  23. // HOperatorSet.WriteImage(ho_Image, "tiff", 0, strtiffName);
  24. //}
  25. return ho_Image;
  26. }
复制代码

下图为显示的深度图:

2)通过相似的方法我们可以获取亮度图

  1. /// <summary>
  2. /// 获取亮度图(.bmp)
  3. /// </summary>
  4. /// <returns></returns>
  5. public HObject GetIntensityImage()
  6. {
  7. HObject ho_Image = null;
  8. Monitor.Enter(Lock);
  9. GCHandle hBuf = GCHandle.Alloc(m_pcDataBuf2, GCHandleType.Pinned);
  10. IntPtr ptr = hBuf.AddrOfPinnedObject();
  11. HOperatorSet.GenImage1(out ho_Image, "byte", m_stImageInfo.nWidth, m_stImageInfo.nHeight, ptr.ToInt64());
  12. Monitor.Exit(Lock);
  13. if (hBuf.IsAllocated)
  14. {
  15. hBuf.Free();
  16. }
  17. //if (null != ho_Image)
  18. //{
  19. // // 保存深度图 | Save Depth
  20. // string strtiffName = "./Halcon_Image_";
  21. // strtiffName += m_stImageInfo.nFrameNum;
  22. // strtiffName += ".bmp";
  23. // HOperatorSet.WriteImage(ho_Image, "bmp", 0, strtiffName);
  24. //}
  25. return ho_Image;
  26. }
复制代码

3)点云图的获取可以直接通过客户端SDK中相应的接口来完成,这里不做说明,我们着重说明下如何通过halcon算子将源数据生成点云图,获取点云对象(HTuple)

  1. /// <summary>
  2. /// 获取点云对象(object handle)
  3. /// </summary>
  4. /// <returns></returns>
  5. public HTuple GetObjectModel3D()
  6. {
  7. MV3D_LP_IMAGE_DATA stImageInfoTmp = new MV3D_LP_IMAGE_DATA();
  8. Monitor.Enter(Lock);
  9. GCHandle hBuf = GCHandle.Alloc(m_pcDataBuf, GCHandleType.Pinned);
  10. IntPtr ptr = hBuf.AddrOfPinnedObject();
  11. stImageInfoTmp.enImageType = m_stImageInfo.enImageType;
  12. stImageInfoTmp.nFrameNum = m_stImageInfo.nFrameNum;
  13. stImageInfoTmp.nWidth = m_stImageInfo.nWidth;
  14. stImageInfoTmp.nHeight = m_stImageInfo.nHeight;
  15. stImageInfoTmp.nDataLen = m_stImageInfo.nDataLen;
  16. stImageInfoTmp.pData = ptr;
  17. stImageInfoTmp.fXScale = m_stImageInfo.fXScale;
  18. stImageInfoTmp.fYScale = m_stImageInfo.fYScale;
  19. stImageInfoTmp.fZScale = m_stImageInfo.fZScale;
  20. stImageInfoTmp.nXOffset = m_stImageInfo.nXOffset;
  21. stImageInfoTmp.nYOffset = m_stImageInfo.nYOffset;
  22. stImageInfoTmp.nZOffset = m_stImageInfo.nZOffset;
  23. MV3D_LP_IMAGE_DATA plyData = new MV3D_LP_IMAGE_DATA();
  24. //先通过接口将深度图转点云图
  25. Mv3dLpSDK.MV3D_LP_MapDepthToPointCloud(stImageInfoTmp, plyData);
  26. //再将点云数据转换成halcon 可用的对象指针
  27. HTuple hObjectModel3D = null;
  28. HTuple hv_X = new HTuple();
  29. HTuple hv_Y = new HTuple();
  30. HTuple hv_Z = new HTuple();
  31. float[] ConvertData = new float[plyData.nDataLen / 4];
  32. Marshal.Copy(plyData.pData, ConvertData, 0, (int)plyData.nDataLen / 4);
  33. for (int j = 0; j < plyData.nWidth * plyData.nHeight; j++)
  34. {
  35. hv_X[j] = ConvertData[j * 3];
  36. hv_Y[j] = ConvertData[j * 3 + 1];
  37. hv_Z[j] = ConvertData[j * 3 + 2];
  38. }
  39. HOperatorSet.GenObjectModel3dFromPoints(hv_X, hv_Y, hv_Z, out hObjectModel3D);
  40. Monitor.Exit(Lock);
  41. if (hBuf.IsAllocated)
  42. {
  43. hBuf.Free();
  44. }
  45. //if (null != hObjectModel3D)
  46. //{
  47. // // 保存点云图 | Save PointCloud
  48. // string strPlyName = "./Halcon_Image_";
  49. // strPlyName += plyData.nFrameNum;
  50. // strPlyName += ".ply";
  51. // HOperatorSet.WriteObjectModel3d(hObjectModel3D, "ply", strPlyName, "invert_normals", "false");
  52. //}
  53. return hObjectModel3D;
  54. }
复制代码

代码中设计到的XYZ缩放以及偏移,均可通过硬件技术资料获取,有关像素坐标到物理坐标的计算公式不同的硬件有些许的差异,大家不用担心;在这个转换工程中会使用一个关键的halcon算子“GenObjectModel3dFromPoints”

 gen_object_model_3d_from_points( : : X, Y, Z : ObjectModel3D)

  函数说明: 创建表示点云的三维对象模型。这些点由参数x、y和z坐标描述。

  函数参数:
    X:输入三维点云中点的x坐标;
    Y:输入 三维点云中点的y坐标;
    Z:输入 三维点云中点的y坐标;
    ObjectModel3D:输出 3D对象模型句柄;

4)上面显示如何将相机源数据转换成点云对象模型句柄Htuple,其实我们可以只获取相机的深度图,然后通过方法将深度图转换成点云图并进行操作和显示,如下将说明转换的具体方法

  1. /// <summary>
  2. /// 深度图转点云图
  3. /// </summary>
  4. /// <param name="ho_ImageH">输入深度图</param>
  5. /// <param name="hv_XScale">转点云坐标X缩放</param>
  6. /// <param name="hv_XOffset">转点云坐标X偏移</param>
  7. /// <param name="hv_YScale">转点云坐标Y缩放</param>
  8. /// <param name="hv_YOffset">转点云坐标Y偏移</param>
  9. /// <param name="hv_ZScale">转点云坐标Z缩放</param>
  10. /// <param name="hv_ZOffset">转点云坐标Z偏移</param>
  11. /// <returns></returns>
  12. public static HTuple DepthImg2PointCloud(HObject ho_ImageH,
  13. double hv_XScale, double hv_XOffset,
  14. double hv_YScale, double hv_YOffset,
  15. double hv_ZScale, double hv_ZOffset,
  16. out HObject ho_ImageX, out HObject ho_ImageY, out HObject ho_ImageZ)
  17. {
  18. //对象变量
  19. HObject ho_Domain, ho_ImageSurface;
  20. HObject ho_ImageSurface1, ho_Region1;
  21. HObject ho_Region2, ho_RegionDifference, ho_ImageReduced;
  22. HObject ho_ImageHReal;
  23. //转化比例
  24. HTuple hv_Width = new HTuple();
  25. HTuple hv_Height = new HTuple(), hv_Min = new HTuple();
  26. HTuple hv_Max = new HTuple(), hv_Range = new HTuple();
  27. HTuple hv_ObjectModel3D = new HTuple();
  28. //初始化
  29. HOperatorSet.GenEmptyObj(out ho_Domain);
  30. HOperatorSet.GenEmptyObj(out ho_ImageSurface);
  31. HOperatorSet.GenEmptyObj(out ho_ImageSurface1);
  32. HOperatorSet.GenEmptyObj(out ho_ImageX);
  33. HOperatorSet.GenEmptyObj(out ho_ImageY);
  34. HOperatorSet.GenEmptyObj(out ho_Region1);
  35. HOperatorSet.GenEmptyObj(out ho_Region2);
  36. HOperatorSet.GenEmptyObj(out ho_RegionDifference);
  37. HOperatorSet.GenEmptyObj(out ho_ImageReduced);
  38. HOperatorSet.GenEmptyObj(out ho_ImageHReal);
  39. HOperatorSet.GenEmptyObj(out ho_ImageZ);
  40. //* 单位mm
  41. //hv_ZScale = 0.001;
  42. //using (HDevDisposeHelper dh = new HDevDisposeHelper())
  43. //{
  44. // hv_ZOffset = -10.0 / 1000;
  45. //}
  46. //hv_XScale.Dispose();
  47. //hv_XScale = 0.020;
  48. //hv_XOffset.Dispose();
  49. //using (HDevDisposeHelper dh = new HDevDisposeHelper())
  50. //{
  51. // hv_XOffset = -7051.0 / 1000;
  52. //}
  53. //hv_YScale.Dispose();
  54. //hv_YScale = -0.020;
  55. //hv_YOffset.Dispose();
  56. //hv_YOffset = 0.0;
  57. //xyz三通道的深度图
  58. hv_Width.Dispose(); hv_Height.Dispose();
  59. HOperatorSet.GetImageSize(ho_ImageH, out hv_Width, out hv_Height);
  60. ho_Domain.Dispose();
  61. HOperatorSet.GetDomain(ho_ImageH, out ho_Domain);
  62. //生成xy坐标的图像映射,乘以分辨率就是xy的相对值
  63. ////yImage
  64. ho_ImageSurface.Dispose();
  65. HOperatorSet.GenImageSurfaceFirstOrder(out ho_ImageSurface, "real", 1.0, 0.0,
  66. 0.0, 0.0, 0.0, hv_Width, hv_Height);
  67. ////xImage
  68. ho_ImageSurface1.Dispose();
  69. HOperatorSet.GenImageSurfaceFirstOrder(out ho_ImageSurface1, "real", 0.0, 1.0,
  70. 0.0, 0.0, 0.0, hv_Width, hv_Height);
  71. ho_ImageX.Dispose();
  72. HOperatorSet.ScaleImage(ho_ImageSurface1, out ho_ImageX, hv_XScale, hv_XOffset);
  73. ho_ImageY.Dispose();
  74. HOperatorSet.ScaleImage(ho_ImageSurface, out ho_ImageY, hv_YScale, hv_YOffset);
  75. //使用算子scale_image将图像灰度值转实际高度,并提取最大最小高度
  76. ho_Domain.Dispose();
  77. HOperatorSet.GetDomain(ho_ImageH, out ho_Domain);
  78. hv_Min.Dispose(); hv_Max.Dispose(); hv_Range.Dispose();
  79. HOperatorSet.MinMaxGray(ho_Domain, ho_ImageH, 0, out hv_Min, out hv_Max, out hv_Range);
  80. ho_Region1.Dispose();
  81. HOperatorSet.Threshold(ho_ImageH, out ho_Region1, hv_Min, hv_Max);
  82. using (HDevDisposeHelper dh = new HDevDisposeHelper())
  83. {
  84. ho_Region2.Dispose();
  85. HOperatorSet.Threshold(ho_ImageH, out ho_Region2, hv_Min, hv_Min + 2);
  86. }
  87. ho_RegionDifference.Dispose();
  88. HOperatorSet.Difference(ho_Region1, ho_Region2, out ho_RegionDifference);
  89. ho_ImageReduced.Dispose();
  90. HOperatorSet.ReduceDomain(ho_ImageH, ho_RegionDifference, out ho_ImageReduced
  91. );
  92. ho_ImageHReal.Dispose();
  93. HOperatorSet.ConvertImageType(ho_ImageReduced, out ho_ImageHReal, "real");
  94. ho_ImageZ.Dispose();
  95. HOperatorSet.ScaleImage(ho_ImageHReal, out ho_ImageZ, hv_ZScale, hv_ZOffset);
  96. //ho_MultiChannelImage.Dispose();
  97. //HOperatorSet.Compose3(ho_ImageX, ho_ImageY, ho_ImageZ, out ho_MultiChannelImage
  98. // );
  99. hv_ObjectModel3D.Dispose();
  100. HOperatorSet.XyzToObjectModel3d(ho_ImageX, ho_ImageY, ho_ImageZ, out hv_ObjectModel3D);
  101. //点云保存
  102. //HOperatorSet.WriteObjectModel3d(hv_ObjectModel3D, "ply", "transply.ply",
  103. // new HTuple(), new HTuple());
  104. ho_Domain.Dispose();
  105. ho_ImageSurface.Dispose();
  106. ho_ImageSurface1.Dispose();
  107. //ho_ImageX.Dispose();
  108. //ho_ImageY.Dispose();
  109. ho_Region1.Dispose();
  110. ho_Region2.Dispose();
  111. ho_RegionDifference.Dispose();
  112. ho_ImageReduced.Dispose();
  113. ho_ImageHReal.Dispose();
  114. //ho_ImageZ.Dispose();
  115. return hv_ObjectModel3D;
  116. }
复制代码

上图代码中都有详细的注解,所以就不做进一步说明,不过最后会使用一个关键的算子“XyzToObjectModel3d”来进行转换

xyz_to_object_model_3d
(X, Y, Z : : : ObjectModel3D)
将3D点从图像转换为3D对象模型。

输入:

X (input_object) 单通道图像→object (real)
用三维点的x坐标和三维点ROI区域。
Y (input_object) 单通道图像→object (real)
图像与三维点的y坐标。
Z (input_object) 单通道图像→object (real)
图像与三维点的z坐标。
输出:

ObjectModel3D (output_control)
3D对象模型的句柄。

5)通过上面的方法转换后,咱们在这将点云显示出来看看   

点云的查看会用到一个重要的算子“visualize_object_model_3d”

visualize_object_model_3d( : : WindowHandle, ObjectModel3D, CamParam, PoseIn, GenParamName, GenParamValue, Title, Label, Information : PoseOut)

WindowHandle:显示窗口句柄

ObjectModel3D:需要展示的3d模型

CamParam:假想的观察这个模型的一个面阵相机的内参

PoseIn:这个模型的3d姿态

GenParamName:参数名

GenParamValue:参数值

Title:展示在窗口左上角的文字

Label:在每个3d模型位置显示的文本

Information:窗口左下角显示的信息

PoseOut:用户调整模型姿态后输出这个3d姿态

6)附件:我们来看看转换后的点云如何查看和操作

点云查看器

该文章主要讲解了图像的来源以及他们之间如何进行转换,并且生成可通过halcon操作的对象,后面的文件我们会一一讲解图像显示控件以及点云的相关操作算法,敬请期待!!!

本帖子中包含更多资源

您需要 登录 才可以下载或查看,没有账号?立即注册

×
您需要登录后才可以回帖 登录 | 立即注册

本版积分规则

Honkers

特级红客

关注
  • 3366
    主题
  • 36
    粉丝
  • 0
    关注
这家伙很懒,什么都没留下!

中国红客联盟公众号

联系站长QQ:5520533

admin@chnhonker.com
Copyright © 2001-2025 Discuz Team. Powered by Discuz! X3.5 ( 粤ICP备13060014号 )|天天打卡 本站已运行