From 719a194a50fe79aeb4fd51a9cd5daeba2a83c752 Mon Sep 17 00:00:00 2001 From: Omer Erdinc Yagmurlu Date: Fri, 19 Jan 2024 18:20:31 +0100 Subject: [PATCH] add depthai camera support --- robohive/envs/env_base.py | 4 +++ robohive/robot/hardware_depthai.py | 39 +++++++++++++++++++++++++++--- robohive/robot/robot.py | 10 +++++--- 3 files changed, 46 insertions(+), 7 deletions(-) diff --git a/robohive/envs/env_base.py b/robohive/envs/env_base.py index 9bff0158..bead4a43 100644 --- a/robohive/envs/env_base.py +++ b/robohive/envs/env_base.py @@ -311,7 +311,11 @@ def get_obs(self, update_proprioception=True, update_exteroception=False): # Don't update extero keys by default for efficiency # User should make an explicit call to get_visual_dict when needed if update_exteroception: + # import time + # aa = time.time() self.visual_dict = self.get_visuals(sim=self.sim_obsd) + # bb = time.time() + # print("Time taken for visual dict: ", bb-aa) # recoved observation vector from the obs_dict t, obs = self.obsdict2obsvec(self.obs_dict, self.obs_keys) diff --git a/robohive/robot/hardware_depthai.py b/robohive/robot/hardware_depthai.py index 70e832ba..7e3dee64 100644 --- a/robohive/robot/hardware_depthai.py +++ b/robohive/robot/hardware_depthai.py @@ -21,10 +21,34 @@ def __init__(self, device_MxId, **kwargs): pipeline = dai.Pipeline() + # # Closer-in minimum depth, disparity range is doubled (from 95 to 190): + # extended_disparity = False + # # Better accuracy for longer distance, fractional disparity 32-levels: + # subpixel = False + # # Better handling for occlusions: + # lr_check = True + # monoLeft = pipeline.create(dai.node.MonoCamera) + # monoRight = pipeline.create(dai.node.MonoCamera) + # monoLeft.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) + # monoLeft.setCamera("left") + # monoRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) + # monoRight.setCamera("right") + # depth = pipeline.create(dai.node.StereoDepth) + # depth.setDefaultProfilePreset(dai.node.StereoDepth.PresetMode.HIGH_DENSITY) + # # Options: MEDIAN_OFF, KERNEL_3x3, KERNEL_5x5, KERNEL_7x7 (default) + # depth.initialConfig.setMedianFilter(dai.MedianFilter.KERNEL_7x7) + # depth.setLeftRightCheck(lr_check) + # depth.setExtendedDisparity(extended_disparity) + # depth.setSubpixel(subpixel) + # xoutDepth = pipeline.create(dai.node.XLinkOut) + # xoutDepth.setStreamName("depth") + # monoLeft.out.link(depth.left) + # monoRight.out.link(depth.right) + # depth.depth.link(xoutDepth.input) + camRgb = pipeline.create(dai.node.ColorCamera) camRgb.setBoardSocket(dai.CameraBoardSocket.CAM_A) camRgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P) - xoutRgb = pipeline.create(dai.node.XLinkOut) xoutRgb.setStreamName("rgb") camRgb.video.link(xoutRgb.input) @@ -34,17 +58,22 @@ def __init__(self, device_MxId, **kwargs): self.frame = None self.timestamp = None + self.depthFrame = np.zeros((1080, 1920)) self.should_stop = threading.Event() def run(self): with dai.Device(self.pipeline, self.device_info) as device: # Output queue will be used to get the rgb frames from the output defined above qRgb = device.getOutputQueue(name="rgb", maxSize=1, blocking=False) + # qDepth = device.getOutputQueue(name="depth", maxSize=1, blocking=False) while not self.should_stop.is_set(): inRgb = qRgb.get() + # inDepth = qDepth.get() # blocking call, will wait until a new data has arrived if inRgb is not None: - self.frame = inRgb.getFrame() + self.frame = inRgb.getCvFrame() self.timestamp = time.time() + # if inDepth is not None: + # self.depthFrame = inDepth.getFrame() class DepthAI(hardwareBase): def __init__(self, name, device_MxId, **kwargs): @@ -59,10 +88,12 @@ def connect(self): def get_sensors(self): # get all data from all topics last_img = copy.deepcopy(self.thread.frame) - while last_img is None: + last_depth = copy.deepcopy(self.thread.frame) + while last_img is None or last_depth is None: last_img = copy.deepcopy(self.thread.frame) + last_depth = copy.deepcopy(self.thread.depthFrame) time.sleep(0.1) - return {'time':self.thread.timestamp, 'rgb': last_img} + return {'time':self.thread.timestamp, 'rgb': last_img, 'd': last_depth} def apply_commands(self): return 0 diff --git a/robohive/robot/robot.py b/robohive/robot/robot.py index 60dc1482..fe873782 100644 --- a/robohive/robot/robot.py +++ b/robohive/robot/robot.py @@ -146,6 +146,10 @@ def hardware_init(self, robot_config): from .hardware_realsense_single import RealsenseAPI device['robot'] = RealsenseAPI(**device['interface']) + elif device['interface']['type'] == 'depthai': + from .hardware_depthai import DepthAI + device['robot'] = DepthAI(name=name, **device['interface']) + elif device['interface']['type'] == 'robotiq': from .hardware_robotiq import Robotiq device['robot'] = Robotiq(name=name, **device['interface']) @@ -173,7 +177,7 @@ def hardware_init(self, robot_config): device['robot'].engage_motor(motor_id=device['actuator_ids'], enable=True) # Other devices - elif device['interface']['type'] in ['optitrack', 'franka', 'realsense', 'robotiq', 'frankahand']: + elif device['interface']['type'] in ['optitrack', 'franka', 'realsense', 'depthai', 'robotiq', 'frankahand']: device['robot'].connect() else: @@ -304,7 +308,7 @@ def hardware_close(self): status = device['robot'].close(ids) if status is True: device['robot']= None - elif device['interface']['type'] in ['optitrack', 'franka', 'realsense', 'robotiq', 'frankahand']: + elif device['interface']['type'] in ['optitrack', 'franka', 'realsense', 'depthai' 'robotiq', 'frankahand']: if device['robot']: print("Closing {} connection".format(device['interface']['type'])) status = device['robot'].close() @@ -446,7 +450,7 @@ def get_visual_sensors(self, height:int, width:int, cameras:list, device_id:int, for ind, cam_name in enumerate(cameras): assert cam_name in self.robot_config.keys(), "{} camera not found".format(cam_name) device = self.robot_config[cam_name] - assert device['interface']['type'] == 'realsense', "Check interface type for {}".format(cam) + # assert device['interface']['type'] == 'realsense', "Check interface type for {}".format(cam) data = device['robot'].get_sensors() data_height = data['rgb'].shape[0] assert data_height == height, "Incorrect image height: required:{}, found:{}".format(height, data_height)