getting depth value using vision sensor

Typically: "How do I... ", "How can I... " questions
Post Reply
NandanUmrikar
Posts: 12
Joined: 11 Mar 2024, 09:45

getting depth value using vision sensor

Post by NandanUmrikar »

I'm using a vision sensor for an object follower application. I used the UR5 model from the ObjectTracking example directly into my scene, which works perfectly. I need to get the depth information foe an object in the scene. I changed the code accordingly and the simulation works fine but I'm not getting any output for the depth values. Adding the code below. Please check and give me some suggestions on how to fix this.

Code: Select all

# python

import numpy as np
import cv2 as cv

def readFrame():
    sim.handleVisionSensor(self.handle)
    buf, res = sim.getVisionSensorImg(self.handle)
    return np.frombuffer(buf, dtype=np.uint8).reshape(*res, 3)

def showImage(k, img):
    sim.setVisionSensorImg(sim.getObject(f':/{k}'), img.tobytes())

def sysCall_init():
    sim = require('sim')
    self.handle = sim.getObject('.')
    robot = sim.getObject(':')
    script = sim.getScript(sim.scripttype_childscript, robot)
    self.robotScriptFuncs = sim.getScriptFunctions(script)
    resX=sim.getObjectInt32Param(self.handle,sim.visionintparam_resolution_x)
    resY=sim.getObjectInt32Param(self.handle,sim.visionintparam_resolution_y)

def sysCall_sensing():
    m=sim.getObjectMatrix(self.handle)
    res,packet1,packet2=sim.handleVisionSensor(self.handle)
    if res>=0:
        blobCnt=packet2[1]
        valCnt=packet2[2]
        for i in range(blobCnt):
            blobPositionX=packet2[2+valCnt*i+3]
            blobPositionY=packet2[2+valCnt*i+4]
            depth=sim.getVisionSensorDepth(self.handle,1,{1+math.floor(blobPositionX*(resX-0.99)),1+math.floor(blobPositionY*(resY-0.99))},{1,1})
            depth=sim.unpackFloatTable(depth,1)
            print ("depth=",depth)

def sysCall_thread():
    while True:
        frame = readFrame()
        imgw, imgh, imgc = frame.shape
        blurred = cv.GaussianBlur(frame, (13, 13), 0)
        hsv = cv.cvtColor(blurred, cv.COLOR_RGB2HSV)
        # construct a mask for the color "red", then perform
        # a series of dilations and erosions to remove any small
        # blobs left in the mask
        mask = cv.inRange(hsv, (0, 200, 64), (80, 255, 200))
        n = 5
        mask = cv.dilate(mask, None, iterations=n)
        mask = cv.erode(mask, None, iterations=n)
        maskdbg = cv.cvtColor(mask, cv.COLOR_GRAY2RGB)
        
        contours, hierarchy = cv.findContours(mask.copy(), cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)
        if contours:
            c = max(contours, key=cv.contourArea)
    # Get the minimum enclosing circle for the contour
            ((x, y), radius) = cv.minEnclosingCircle(c)
            center = (int(x), int(y))
            radius = int(radius)
            cv.circle(maskdbg, center, radius, (0, 255, 0), 2)
            cv.arrowedLine(maskdbg, center, (imgw//2, imgh//2), (255, 0, 0), 2)
            
            #print ("radius=",radius)
    # Calculate the normalized center coordinates
            normalize = lambda x: np.interp(x, (0, imgw), (-1, 1))
            x_normalized, y_normalized = map(normalize, (x, y))

    # Set the box center using the provided function from the robot script
            self.robotScriptFuncs.setBoxCenter(x_normalized, y_normalized)

        #if contours:
            #c = max(contours, key = cv.contourArea)
            #x, y, w, h = cv.boundingRect(c)
            #cv.rectangle(maskdbg, (x, y), (x+w, y+h), (0, 255, 0), 2)
            #cv.arrowedLine(maskdbg, (x+w//2, y+h//2), (imgw//2, imgh//2), (255, 0, 0), 2)
            
            # normalize rect in -1 ... +1:
            #normalize = lambda x: np.interp(x[0], (0, x[1]), (-1, 1))
            #x, y, w, h = map(normalize, ((x, imgw), (y, imgh), (w, imgw), (h, imgh)))
            
            # box center:
            #self.robotScriptFuncs.setBoxCenter(x , y )

        showImage('blurred', blurred)
        showImage('hsv', hsv)
        showImage('mask', maskdbg)
Thank you in advance!!!

coppelia
Site Admin
Posts: 10375
Joined: 14 Dec 2012, 00:25

Re: getting depth value using vision sensor

Post by coppelia »

Hello,

Make sure the vision sensor has the checkbox Ignore depth info (faster) unchecked...

Cheers

NandanUmrikar
Posts: 12
Joined: 11 Mar 2024, 09:45

Re: getting depth value using vision sensor

Post by NandanUmrikar »

coppelia wrote: 21 Mar 2024, 15:25 Hello,

Make sure the vision sensor has the checkbox Ignore depth info (faster) unchecked...

Cheers
Already checked. Everything else works fine, but the depth value won't show on the console. I tried printing other values (you can see the commented out print(radius) command) which worked perfectly, but not the depth. I don't know if it's the code or anything else causing the issue.

coppelia
Site Admin
Posts: 10375
Joined: 14 Dec 2012, 00:25

Re: getting depth value using vision sensor

Post by coppelia »

Please share the scene file.

Cheers

Post Reply