How to convert RawDepthImage

Give us as much detail as possible regarding the issue you're experiencing:

**Unity Editor version2022.3.17f:
**ML2 OS version1.6.0:
**Unity SDK version1.9.0:

I want to get RawDepthImage in magicLeap2.
But I don't know how to convert rawdepthimage to m unit.

And Is it correct to get rawdepth image using below code.

       if (lastData.RawDepthImage != null)
       {
           depth_Image_Texture=CheckAndCreateTexture(depth_Image_Texture, (int)lastData.RawDepthImage.Value.Width, (int)lastData.RawDepthImage.Value.Height);
           /* depthImgMinDist = depthImgMin.GetComponentInChildren<Slider>().value;
               depthImgMaxDist = depthImgMax.GetComponentInChildren<Slider>().value;*/

           AdjustRendererFloats(imgRenderer, depthImgMinDist, depthImgMaxDist);
           depth_Image_Texture.LoadRawTextureData(lastData.RawDepthImage.Value.Data);
           depth_Image_Texture.Apply();
                   
           recent_depthData = depth_Image_Texture;
           takeDepthData_Time = DateTime.Now;
       }

I recommend updating to the Unity 1.12.0 Unity SDK if possible, here is a snippet I have on rendering the depth data on a texture:

           if (lastData.RawDepthImage != null)
            {
                if (rawDepthTexture == null || rawDepthTexture.width != lastData.RawDepthImage.Value.Width || rawDepthTexture.height != lastData.RawDepthImage.Value.Height)
                {
                    rawDepthTexture = new Texture2D((int)lastData.RawDepthImage.Value.Width, (int)lastData.RawDepthImage.Value.Height, TextureFormat.RFloat, false)
                    {
                        filterMode = FilterMode.Bilinear
                    };
                    rawDepthRenderer.material.mainTexture = rawDepthTexture;
                    rawDepthRenderer.material.mainTextureScale = new Vector2(1.0f, -1.0f);
                }
                rawDepthTexture.LoadRawTextureData(lastData.RawDepthImage.Value.Data);
                rawDepthTexture.Apply();
            }

Thank you for your reply. I appreciate the guidance, and I will try to implement this. Additionally, I have a question about converting raw depth values to meters. The data in lastData.RawDepthImage.Value.Data isn't in meters, correct? I need to know the depth represented by each pixel. Could you please explain how to convert the raw depth to meters

You will need to do two things.

  1. Undistort the image so that the depth points are projected correctly.
  2. The image is a range image, so you will need to multiply the pixel position by the depth from the image.

The example below uses a cached projection table (that handles the distortion) and the depth data obtained from the depth sensor:

        // Iterate through each pixel in the depth data.
        for (int y = 0; y < resolution.y; ++y)
        {
            for (int x = 0; x < resolution.x; ++x)
            {
                // Calculate the linear index based on x, y coordinates.
                int index = x + (resolution.y - y - 1) * resolution.x;
                float depth = depthData[index];

                // Skip processing if depth is out of range or confidence is too low (if filter is enabled).
                // Confidence comes directly from the sensor pipeline and is represented as a float ranging from
                // [-1.0, 0.0] for long range and [-0.1, 0.0] for short range, where 0 is highest confidence. 
                if (depth < minDepth || depth > maxDepth || (useConfidenceFilter && confidenceData[index] < -0.1f))
                {
                    //Set the invalid points to be positioned at 0,0,0
                    depthPoints[index] = Vector3.zero;
                    continue;
                }

                // Use the cached projection table to find the UV coordinates for the current point.
                Vector2 uv = cachedProjectionTable[y, x];
                // Transform the UV coordinates into a camera space point.
                Vector3 cameraPoint = new Vector3(uv.x, uv.y, 1).normalized * depth;
                // Convert the camera space point into a world space point.
                Vector3 worldPoint = cameraToWorldMatrix.MultiplyPoint3x4(cameraPoint);

                // Store the world space point in the depthPoints array.
                depthPoints[index] = worldPoint;
            }
        }

Here is a more detailed example:

RawDepth is already meter?
I don't need to convert rawdepth to meter?

Yes, but please note that this is a Range image and not a depth image so please pay attention to the pixel depth calculation.

I check the value of RawDepthImage using below code.

 case MLDepthCamera.CaptureFlags.RawDepthImage:
                if (lastData.RawDepthImage != null)
                {
                    CheckAndCreateTexture((int)lastData.RawDepthImage.Value.Width, (int)lastData.RawDepthImage.Value.Height);

                    depthImgMinDist = depthImgMin.GetComponentInChildren<Slider>().value;
                    depthImgMaxDist = depthImgMax.GetComponentInChildren<Slider>().value;

                    AdjustRendererFloats(imgRenderer, depthImgMinDist, depthImgMaxDist);
                    ImageTexture.LoadRawTextureData(lastData.RawDepthImage.Value.Data);
                    ImageTexture.Apply();
                    ShowImageTextureData(ImageTexture);
                }
                break;

 private void ShowImageTextureData(Texture2D texture)
    {
        if (ImageTexture != null)
        {
            // ImageTextureからピクセルデータをfloat型で取得
            NativeArray<float> pixelData = texture.GetPixelData<float>(0);

            // ピクセルデータをループで処理
            for (int i = 0; i < pixelData.Length; i++)
            {
                // ピクセルの距離値をログに出力
                Debug.Log("RawDepth_Picel " + i + ": " + pixelData[i]);
            }
        }
        else
        {
            Debug.Log("ImageTexture is null");
        }
    }

Then the data is like this.

RawDepth_Picel 159623: 1886;
RawDepth_Picel 159638: 30.95313;
\RawDepth_Picel 159650: 29.5625;

1886,30.95313,29.5625 is the rawdepth data value.
So I think this is not meter.
How to convert this value to meter.

Using the script in the other post you can generate a point cloud. The point cloud will overlay the environment using the snippet I posted above . The value that you obtained could be due to an invalid pixel or one with low confidence.

No, I don't want to generate a point cloud. I'm interested in using "RAW" depth data, specifically RawDepthImage, not DepthImage.

min_distance_[GetIndexFromCameraFlag(MLDepthCameraFlags_DepthImage)] = 0.0f;
max_distance_[GetIndexFromCameraFlag(MLDepthCameraFlags_DepthImage)] = 5.0f;
distance_limit_[GetIndexFromCameraFlag(MLDepthCameraFlags_DepthImage)] = 7.5f;
legend_unit_[GetIndexFromCameraFlag(MLDepthCameraFlags_DepthImage)] = 'm';

min_distance_[GetIndexFromCameraFlag(MLDepthCameraFlags_Confidence)] = 0.0f;
max_distance_[GetIndexFromCameraFlag(MLDepthCameraFlags_Confidence)] = 100.0f;
distance_limit_[GetIndexFromCameraFlag(MLDepthCameraFlags_Confidence)] = 100.0f;
legend_unit_[GetIndexFromCameraFlag(MLDepthCameraFlags_Confidence)] = '%';

min_distance_[GetIndexFromCameraFlag(MLDepthCameraFlags_AmbientRawDepthImage)] = 5.0f;
max_distance_[GetIndexFromCameraFlag(MLDepthCameraFlags_AmbientRawDepthImage)] = 2000.0f;
distance_limit_[GetIndexFromCameraFlag(MLDepthCameraFlags_AmbientRawDepthImage)] = 2000.0f;
legend_unit_[GetIndexFromCameraFlag(MLDepthCameraFlags_AmbientRawDepthImage)] = ' ';

min_distance_[GetIndexFromCameraFlag(MLDepthCameraFlags_RawDepthImage)] = 5.0f;
max_distance_[GetIndexFromCameraFlag(MLDepthCameraFlags_RawDepthImage)] = 3000.0f;
distance_limit_[GetIndexFromCameraFlag(MLDepthCameraFlags_RawDepthImage)] = 3000.0f;
legend_unit_[GetIndexFromCameraFlag(MLDepthCameraFlags_RawDepthImage)] = ' ';

The values for RawDepthImage range from 5 to 3000, which doesn't seem to be in meters. It appears more like a 64-bit integer.

Since I am specifically looking to use RawDepthImage data, could you provide a formula to convert these "RAWDEPTHIMAGE" values into meters? It's important to note that I am not interested in using DepthImage data.

Hi, I'm not from Magic Leap, but might be able to help since we use the RawDepthImage for our application. The values in the RawDepthImage are raw sensor measurements from the time-of-flight depth camera on the device, and should not be directly interpreted as distances from the camera.

I think you would need internal details about the TOF algorithm running on the ML2 in order to convert this raw image into a depth image. But, even if you had these details, the end result would likely be the same as the DepthImage itself.

In short, my personal advice is that you should use the DepthImage if you want meter distances and only use the RawDepthImage if you have another use for the raw frames besides depth estimation.

1 Like

Really I appreciate to your response.

I previously used a depth image for my project, but the image was automatically flattened, as shown in the picture below. I don’t want to use a flattened image for my application. Could you please teach me how to use a raw depth image in my application?

And this is current code.I tried to implement this while refering this paper.

https://www.researchgate.net/figure/Range-image-and-depth-image-corresponding-to-the-long-throw-intensity-image-shown-in_fig3_339376481

 private Vector3? Convert_depth_Point_to_WorldPoint(int x, int y)
    {
        // Calculate the linear index based on x, y coordinates.
        int index = x + (resolution.y - y - 1) * resolution.x;
        float rawDetph = depthData[index];

        // Skip processing if depth is out of range or confidence is too low (if filter is enabled).
        // Confidence comes directly from the sensor pipeline and is represented as a float ranging from
        // [-1.0, 0.0] for long range and [-0.1, 0.0] for short range, where 0 is highest confidence. 
        if (useConfidenceFilter && confidenceData[index] < -0.1f)
        {
            Debug.Log("LowConffidence:"+ confidenceData[index]);
            return null;
        }

        // Use the cached projection table to find the UV coordinates for the current point.
        Vector2 uv = cachedProjectionTable[y, x];
        // Transform the UV coordinates into a camera space point.
        float depth = Convert_RangeValueToDepth(rawDetph, uv.x, uv.y);

        Debug.Log("RawDepth" + rawDetph.ToString());
        Debug.Log("Depth:" + depth.ToString());

       // Vector3 cameraPoint = new Vector3(uv.x, uv.y, 1).normalized * depth;
        Vector3 cameraPoint = new Vector3(uv.x, uv.y, 1) * depth;
       // Convert the camera space point into a world space point.
        Vector3 worldPoint = cameraToWorldMatrix.MultiplyPoint3x4(cameraPoint);
         Debug.Log(worldPoint.ToString());
        return worldPoint;
    }
    private float Convert_RangeValueToDepth(float range, float u, float v)
    {
        float depth = range / Mathf.Sqrt(u * u + v * v + 1);
        return depth;
    }

Then next is all code.

using Cysharp.Threading.Tasks;
using LabAssistVision;
using System.Collections;
using System.Threading.Tasks;
using Unity.Collections;
using UnityEngine;
using UnityEngine.XR.MagicLeap;

/// <summary>
/// Converts the depth images from the Magic Leap 2 to an array of 3d points and sends the points to be rendered by a PointCloudRender script.
/// Points will only be submitted to the PointCloudRenderer when the bumper button is pressed.
/// </summary>
public class DepthCameraPoint : SingletonMonoBehaviour<DepthCameraPoint>
{

    private bool permissionGranted = false;
    private Texture2D depthTexture;
    private Texture2D confidenceTexture;
    DepthInfo? recent_depth_data;
    private readonly MLPermissions.Callbacks permissionCallbacks = new();
    [SerializeField, Tooltip("Timeout in milliseconds for data retrieval.")]
    private ulong timeout = 0;
    private bool isPerceptionSystemStarted;
    private MLDepthCamera.Stream stream = MLDepthCamera.Stream.LongRange;
    private MLDepthCamera.CaptureFlags captureFlag = MLDepthCamera.CaptureFlags.RawDepthImage | MLDepthCamera.CaptureFlags.Confidence;



//#if !UNITY_EDITOR
    protected override void Awake()
    {
        base.Awake();
        permissionCallbacks.OnPermissionGranted += OnPermissionGranted;
        permissionCallbacks.OnPermissionDenied += OnPermissionDenied;
        permissionCallbacks.OnPermissionDeniedAndDontAskAgain += OnPermissionDenied;

    }

    private void OnDestroy()
    {
        permissionCallbacks.OnPermissionGranted -= OnPermissionGranted;
        permissionCallbacks.OnPermissionDenied -= OnPermissionDenied;
        permissionCallbacks.OnPermissionDeniedAndDontAskAgain -= OnPermissionDenied;
        if (MLDepthCamera.IsConnected)
        {
            DisonnectCamera();
        }

    }

    void Start()
    {
        MLPermissions.RequestPermission(MLPermission.DepthCamera, permissionCallbacks);
    }


    /// <summary>
    /// Starts the depth camera if it's not already running by setting its configuration and connecting to it.
    /// </summary>
    private void OnPermissionGranted(string permission)
    {
        MLPluginLog.Debug($"Granted {permission}.");
        permissionGranted = true;

        MLDepthCamera.StreamConfig[] config = new MLDepthCamera.StreamConfig[2];

        int i = (int)MLDepthCamera.FrameType.LongRange;
        config[i].Flags = (uint)captureFlag;
        config[i].Exposure = 1600;
        config[i].FrameRateConfig = MLDepthCamera.FrameRate.FPS_5;

        i = (int)MLDepthCamera.FrameType.ShortRange;
        config[i].Flags = (uint)captureFlag;
        config[i].Exposure = 375;
        config[i].FrameRateConfig = MLDepthCamera.FrameRate.FPS_5;

        var settings = new MLDepthCamera.Settings()
        {
            Streams = stream,
            StreamConfig = config
        };

        MLDepthCamera.SetSettings(settings);

        ConnectCamera();
        UpdateSettings();
    }
    private void ConnectCamera()
    {
        var result = MLDepthCamera.Connect();
        if (result.IsOk && MLDepthCamera.IsConnected)
        {
            isPerceptionSystemStarted = true;
            Debug.Log($"Connected to new depth camera with stream = {MLDepthCamera.CurrentSettings.Streams}");
        }
        else
        {
            Debug.LogError($"Failed to connect to camera: {result.Result}");
        }
    }
    private void UpdateSettings()
    {
        MLDepthCamera.StreamConfig[] config = new MLDepthCamera.StreamConfig[2];

        int i = (int)MLDepthCamera.FrameType.LongRange;
        config[i].Flags = (uint)captureFlag;
        config[i].Exposure = 1600;
        config[i].FrameRateConfig = MLDepthCamera.FrameRate.FPS_5;

        i = (int)MLDepthCamera.FrameType.ShortRange;
        config[i].Flags = (uint)captureFlag;
        config[i].Exposure = 375;
        config[i].FrameRateConfig = MLDepthCamera.FrameRate.FPS_5;

        var settings = new MLDepthCamera.Settings()
        {
            Streams = stream,
            StreamConfig = config
        };

        MLDepthCamera.UpdateSettings(settings);
    }


    private void DisonnectCamera()
    {
        var result = MLDepthCamera.Disconnect();
        if (result.IsOk && !MLDepthCamera.IsConnected)
        {
            Debug.Log($"Disconnected depth camera with stream = {MLDepthCamera.CurrentSettings.Streams}");
        }
        else
        {
            Debug.LogError($"Failed to disconnect to camera: {result.Result}");
        }
    }



    private void OnPermissionDenied(string permission)
    {
        if (permission == MLPermission.Camera)
        {
            MLPluginLog.Error($"{permission} denied, example won't function.");
        }
        else if (permission == MLPermission.DepthCamera)
        {
            MLPluginLog.Error($"{permission} denied, example won't function.");
        }
    }


  
    void Update()
    {
        if (!permissionGranted || !MLDepthCamera.IsConnected)
        {
            return;
        }

        var result = MLDepthCamera.GetLatestDepthData(timeout, out MLDepthCamera.Data data);
        if (result.IsOk)
        {
            if (data.RawDepthImage.HasValue && data.ConfidenceBuffer.HasValue)
            {
              
                recent_depth_data = new DepthInfo(data.Position, data.Rotation, data.RawDepthImage.Value, data.ConfidenceBuffer.Value, data.Intrinsics);
               
            }
            
        }


    }


    public  Converter_RGB_to_World CreateConverter()
    {
     
        Debug.Log("recent_depth_data:" + recent_depth_data);
        if(recent_depth_data.HasValue){
            depthTexture = CreateOrUpdateTexture(depthTexture, recent_depth_data.Value.rawdepthBuffer);
            confidenceTexture = CreateOrUpdateTexture(confidenceTexture, recent_depth_data.Value.confidenceBuffer);
            Matrix4x4 cameraToWorldMatrix = new Matrix4x4();
            cameraToWorldMatrix.SetTRS(recent_depth_data.Value.Position, recent_depth_data.Value.Rotation, Vector3.one);
            Converter_RGB_to_World converter_RGB_To_World = new Converter_RGB_to_World(depthTexture, confidenceTexture, recent_depth_data.Value.Intrinsics, cameraToWorldMatrix);
          
            return converter_RGB_To_World;
        }
        else
        {
            return null;
        }
      
    }


    public Texture2D Copy_to_NewTexture(Texture2D input)
    {
        Texture2D newTexture = new Texture2D(input.width, input.height);
        newTexture.SetPixels(input.GetPixels());
        newTexture.Apply();
        return newTexture;
    }

 
    private Texture2D CreateOrUpdateTexture(Texture2D texture, MLDepthCamera.FrameBuffer frameBuffer)
    {
        if (texture == null)
        {
            texture = new Texture2D((int)frameBuffer.Width, (int)frameBuffer.Height, TextureFormat.RFloat, false);
        }

        texture.LoadRawTextureData(frameBuffer.Data);
        texture.Apply();
        return texture;
    }
}



public class Converter_RGB_to_World
{
    public NativeArray<float> depthData;
    public NativeArray<float> confidenceData;
    public MLDepthCamera.Intrinsics intrinsics;
    public Matrix4x4 cameraToWorldMatrix;
    public Vector2Int resolution;
    private bool useConfidenceFilter = true;
    [SerializeField]
    private float minDepth = 0.2f;
    [SerializeField] private float maxDepth = 10f;
    private Vector2[,] cachedProjectionTable;
    private static readonly Vector2 Half2 = Vector2.one * 0.5f;
    public Converter_RGB_to_World(Texture2D depthTexutre, Texture2D confidenceTexture, MLDepthCamera.Intrinsics intrinsics, Matrix4x4 cameraToWorldMatrix)
    {
        depthData = depthTexutre.GetRawTextureData<float>();
        confidenceData = confidenceTexture.GetRawTextureData<float>();
        this.cameraToWorldMatrix = cameraToWorldMatrix;
        this.resolution = new Vector2Int((int)intrinsics.Width, (int)intrinsics.Height);
        this.intrinsics = intrinsics;
    }

    public async UniTask<Vector3?> Convert_2D_To_WorldPointAsync(int x, int y)
    {

        return await UniTask.RunOnThreadPool<Vector3?>(() =>
        {
            // Ensure the projection table is calculated and cached to avoid recomputation.
            if (cachedProjectionTable == null)
            {
                cachedProjectionTable = CreateProjectionTable();
                Debug.Log("DepthCloud: Projection Table Created");
            }

            // Process depth points to populate the cachedDepthPoints array with world positions.
            return Convert_depth_Point_to_WorldPoint(x, y);

        });
    }

    private static Vector2 ProjectToNormalizedViewport(Vector3 direction, Quaternion cameraRotation)
    {
        Vector3 localDirection = Quaternion.Inverse(cameraRotation) * direction;

        // Project onto the imaging plane
        float x = localDirection.x / localDirection.z;
        float y = localDirection.y / localDirection.z;
       
        return new Vector2(x, y);
    }

    public async UniTask<float> WorldPointToDeptValueAsync(CameraFrame frame,Vector3 worldPoint)
    {
        Vector2 depthPoint = WorldPointToDepthPixel(worldPoint);
        Vector3? newWorldPoint = await Convert_2D_To_WorldPointAsync((int)depthPoint.x, (int)depthPoint.y);

        if (newWorldPoint == null)
        {
            return -1;
        }

        float distance = Vector3.Distance(frame.Extrinsic.Position,newWorldPoint.Value);

        return distance;
    }




    public Vector2 WorldPointToDepthPixel(Vector3 worldPoint)
    {
        // Step 1: World to Camera Ray

        Vector3 rayDirection = (worldPoint - cameraToWorldMatrix.GetPosition()).normalized;

        // Step 2: Ray to Normalized Viewport
        Vector2 normalizedPoint = ProjectToNormalizedViewport(rayDirection, cameraToWorldMatrix.rotation);

        // Step 3: Apply Distortion (if necessary)
        //Vector2 distortedNormalizedPoint = ApplyDistortion(normalizedPoint);

        Vector2 focalLength = intrinsics.FocalLength;
        Vector2 priciplePoint = intrinsics.PrincipalPoint;

        // Step 4: Normalized Viewport to Pixel Coordinates
        return NormalizedToPixel(focalLength, priciplePoint, normalizedPoint, intrinsics.Height);
    }

    private static Vector2 NormalizedToPixel(Vector2 focalLength, Vector2 priciplePoint, Vector2 normalizedPoint, float height)
    {
        /*float u = normalizedPoint.x * (float)depthCameraInfo.Matric.get(0, 0)[0] + (float)depthCameraInfo.Matric.get(0, 2)[0];
        float v = depthCameraInfo.depth_height - (normalizedPoint.y * (float)depthCameraInfo.Matric.get(1, 1)[0] + (float)depthCameraInfo.Matric.get(1, 2)[0]);*/
        float u = normalizedPoint.x * (float)focalLength.x + priciplePoint.x;
        float v = height - (normalizedPoint.y * (float)focalLength.y + (float)priciplePoint.y);
        return new Vector2(u, v);
    }



    private Vector2[,] CreateProjectionTable()
    {
        // Convert the camera's resolution from intrinsics to a Vector2Int for easier manipulation.
        Vector2Int resolution = new Vector2Int((int)intrinsics.Width, (int)intrinsics.Height);
        // Initialize the projection table with the same dimensions as the camera's resolution.
        Vector2[,] projectionTable = new Vector2[resolution.y, resolution.x];

        // Iterate over each pixel in the resolution.
        for (int y = 0; y < resolution.y; ++y)
        {
            for (int x = 0; x < resolution.x; ++x)
            {
                // Normalize the current pixel coordinates to a range of [0, 1] by dividing
                // by the resolution. This converts pixel coordinates to UV coordinates.
                Vector2 uv = new Vector2(x, y) / new Vector2(resolution.x, resolution.y);

                // Apply distortion correction to the UV coordinates. This step compensates
                // for the lens distortion inherent in the depth camera's optics.
                Vector2 correctedUV = Undistort(uv, intrinsics.Distortion);

                // Convert the corrected UV coordinates back to pixel space, then shift
                // them based on the principal point and scale by the focal length to
                // achieve normalized device coordinates (NDC). These coordinates are
                // useful for mapping 2D image points to 3D space.
                //
                projectionTable[y, x] = ((correctedUV * new Vector2(resolution.x, resolution.y)) - intrinsics.PrincipalPoint);
            }
        }
        //Return the created projection Table
        return projectionTable;
    }
    private Vector2 Undistort(Vector2 uv, MLDepthCamera.DistortionCoefficients distortionParameters)
    {
        // Calculate the offset from the center of the image.
        Vector2 offsetFromCenter = uv - Half2;

        // Compute radial distance squared (r^2), its fourth power (r^4), and its sixth power (r^6) for radial distortion correction.
        float rSquared = Vector2.Dot(offsetFromCenter, offsetFromCenter);
        float rSquaredSquared = rSquared * rSquared;
        float rSquaredCubed = rSquaredSquared * rSquared;

        // Apply radial distortion correction based on the distortion coefficients.
        Vector2 radialDistortionCorrection = offsetFromCenter * (float)(1 + distortionParameters.K1 * rSquared + distortionParameters.K2 * rSquaredSquared + distortionParameters.K3 * rSquaredCubed);

        // Compute tangential distortion correction.
        float tangentialDistortionCorrectionX = (float)((2 * distortionParameters.P1 * offsetFromCenter.x * offsetFromCenter.y) + (distortionParameters.P2 * (rSquared + 2 * offsetFromCenter.x * offsetFromCenter.x)));
        float tangentialDistortionCorrectionY = (float)((2 * distortionParameters.P2 * offsetFromCenter.x * offsetFromCenter.y) + (distortionParameters.P1 * (rSquared + 2 * offsetFromCenter.y * offsetFromCenter.y)));
        Vector2 tangentialDistortionCorrection = new Vector2(tangentialDistortionCorrectionX, tangentialDistortionCorrectionY);

        // Combine the radial and tangential distortion corrections and adjust back to original image coordinates.
        return (radialDistortionCorrection + tangentialDistortionCorrection) + Half2;
    }
    private Vector3? Convert_depth_Point_to_WorldPoint(int x, int y)
    {
        // Calculate the linear index based on x, y coordinates.
        int index = x + (resolution.y - y - 1) * resolution.x;
        float rawDetph = depthData[index];

        // Skip processing if depth is out of range or confidence is too low (if filter is enabled).
        // Confidence comes directly from the sensor pipeline and is represented as a float ranging from
        // [-1.0, 0.0] for long range and [-0.1, 0.0] for short range, where 0 is highest confidence. 
        if (useConfidenceFilter && confidenceData[index] < -0.1f)
        {
            Debug.Log("LowConffidence:"+ confidenceData[index]);
            return null;
        }

        // Use the cached projection table to find the UV coordinates for the current point.
        Vector2 uv = cachedProjectionTable[y, x];
        // Transform the UV coordinates into a camera space point.
        float depth = Convert_RangeValueToDepth(rawDetph, uv.x, uv.y);

        Debug.Log("RawDepth" + rawDetph.ToString());
        Debug.Log("Depth:" + depth.ToString());

       // Vector3 cameraPoint = new Vector3(uv.x, uv.y, 1).normalized * depth;
        Vector3 cameraPoint = new Vector3(uv.x, uv.y, 1) * depth;
       // Convert the camera space point into a world space point.
        Vector3 worldPoint = cameraToWorldMatrix.MultiplyPoint3x4(cameraPoint);
         Debug.Log(worldPoint.ToString());
        return worldPoint;
    }
    private float Convert_RangeValueToDepth(float range, float u, float v)
    {
        float depth = range / Mathf.Sqrt(u * u + v * v + 1);
        return depth;
    }
#endif
}

public struct DepthInfo
{
    public Vector3 Position;
    public Quaternion Rotation;
    public MLDepthCamera.Intrinsics Intrinsics;
    public MLDepthCamera.FrameBuffer rawdepthBuffer;
    public MLDepthCamera.FrameBuffer confidenceBuffer;

    public DepthInfo(Vector3 position, Quaternion rotation, MLDepthCamera.FrameBuffer rawdepthBuffer, MLDepthCamera.FrameBuffer confidenceBuffer, MLDepthCamera.Intrinsics Intrinsics)
    {
        Position = position;
        Rotation = rotation;
        this.rawdepthBuffer = rawdepthBuffer;
        this.confidenceBuffer = confidenceBuffer;
        this.Intrinsics = Intrinsics;
    }
}

Can you describe what you mean by this? The other image is a general shader and so the shading would not correspond to the actual depth. In the C++ header it is described as:

The Raw Depth Image: This is the raw depth camera sensor data captured with the depth camera illumination and corresponds to the amount of total light incident on the sensor.