getting depth value using vision sensor
Posted: 21 Mar 2024, 06:20
I'm using a vision sensor for an object follower application. I used the UR5 model from the ObjectTracking example directly into my scene, which works perfectly. I need to get the depth information foe an object in the scene. I changed the code accordingly and the simulation works fine but I'm not getting any output for the depth values. Adding the code below. Please check and give me some suggestions on how to fix this.
Thank you in advance!!!
Code: Select all
# python
import numpy as np
import cv2 as cv
def readFrame():
sim.handleVisionSensor(self.handle)
buf, res = sim.getVisionSensorImg(self.handle)
return np.frombuffer(buf, dtype=np.uint8).reshape(*res, 3)
def showImage(k, img):
sim.setVisionSensorImg(sim.getObject(f':/{k}'), img.tobytes())
def sysCall_init():
sim = require('sim')
self.handle = sim.getObject('.')
robot = sim.getObject(':')
script = sim.getScript(sim.scripttype_childscript, robot)
self.robotScriptFuncs = sim.getScriptFunctions(script)
resX=sim.getObjectInt32Param(self.handle,sim.visionintparam_resolution_x)
resY=sim.getObjectInt32Param(self.handle,sim.visionintparam_resolution_y)
def sysCall_sensing():
m=sim.getObjectMatrix(self.handle)
res,packet1,packet2=sim.handleVisionSensor(self.handle)
if res>=0:
blobCnt=packet2[1]
valCnt=packet2[2]
for i in range(blobCnt):
blobPositionX=packet2[2+valCnt*i+3]
blobPositionY=packet2[2+valCnt*i+4]
depth=sim.getVisionSensorDepth(self.handle,1,{1+math.floor(blobPositionX*(resX-0.99)),1+math.floor(blobPositionY*(resY-0.99))},{1,1})
depth=sim.unpackFloatTable(depth,1)
print ("depth=",depth)
def sysCall_thread():
while True:
frame = readFrame()
imgw, imgh, imgc = frame.shape
blurred = cv.GaussianBlur(frame, (13, 13), 0)
hsv = cv.cvtColor(blurred, cv.COLOR_RGB2HSV)
# construct a mask for the color "red", then perform
# a series of dilations and erosions to remove any small
# blobs left in the mask
mask = cv.inRange(hsv, (0, 200, 64), (80, 255, 200))
n = 5
mask = cv.dilate(mask, None, iterations=n)
mask = cv.erode(mask, None, iterations=n)
maskdbg = cv.cvtColor(mask, cv.COLOR_GRAY2RGB)
contours, hierarchy = cv.findContours(mask.copy(), cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)
if contours:
c = max(contours, key=cv.contourArea)
# Get the minimum enclosing circle for the contour
((x, y), radius) = cv.minEnclosingCircle(c)
center = (int(x), int(y))
radius = int(radius)
cv.circle(maskdbg, center, radius, (0, 255, 0), 2)
cv.arrowedLine(maskdbg, center, (imgw//2, imgh//2), (255, 0, 0), 2)
#print ("radius=",radius)
# Calculate the normalized center coordinates
normalize = lambda x: np.interp(x, (0, imgw), (-1, 1))
x_normalized, y_normalized = map(normalize, (x, y))
# Set the box center using the provided function from the robot script
self.robotScriptFuncs.setBoxCenter(x_normalized, y_normalized)
#if contours:
#c = max(contours, key = cv.contourArea)
#x, y, w, h = cv.boundingRect(c)
#cv.rectangle(maskdbg, (x, y), (x+w, y+h), (0, 255, 0), 2)
#cv.arrowedLine(maskdbg, (x+w//2, y+h//2), (imgw//2, imgh//2), (255, 0, 0), 2)
# normalize rect in -1 ... +1:
#normalize = lambda x: np.interp(x[0], (0, x[1]), (-1, 1))
#x, y, w, h = map(normalize, ((x, imgw), (y, imgh), (w, imgw), (h, imgh)))
# box center:
#self.robotScriptFuncs.setBoxCenter(x , y )
showImage('blurred', blurred)
showImage('hsv', hsv)
showImage('mask', maskdbg)