transic-robot / transic

MIT License
63 stars 3 forks source link

Add JointPositionControlFollower() Example Code #4

Closed PeideHuang closed 4 months ago

PeideHuang commented 4 months ago

Great work! Is it possible to upload an example code for the implementation of JointPositionControlFollower?

yunfanjiang commented 4 months ago

Thanks for your interest. Please find this code snippet for the implementation. Our controller is based on deoxys, so you probably also need to install it. Other Franka control libraries such as polymetis may also work.

import time

import numpy as np
from deoxys.franka_interface import FrankaInterface

class JointPositionControlFollower:
    def __init__(
        self,
        robot_interface: FrankaInterface,
        controller_cfg,
        max_steps: int = 100,
    ):
        self.robot_interface = robot_interface
        self.controller_cfg = controller_cfg

        # joint position goal
        self._q_goal = None

        self._goal_arrived = False

        self._gripper_action = None

        self._max_steps = max_steps
        self._elapsed_steps = 0

        self._prev_gripper_action = None

    @property
    def is_goal_arrived(self):
        return self._goal_arrived

    def set_new_goal(self, new_q_goal, gripper_action):
        assert len(new_q_goal) == 7
        self._q_goal = np.array(new_q_goal)
        self._goal_arrived = False
        self._gripper_action = gripper_action
        self._elapsed_steps = 0

    def control(self):
        assert self._q_goal is not None and self._gripper_action is not None

        action = np.concatenate(
            [
                self._q_goal,
                [
                    self._prev_gripper_action
                    if self._prev_gripper_action is not None
                    else -1
                ],
            ]
        )
        self.robot_interface.control(
            controller_type="JOINT_POSITION",
            action=action,
            controller_cfg=self.controller_cfg,
        )
        self._elapsed_steps += 1

        curr_q = self.robot_interface.last_q
        q_error = np.abs(curr_q - self._q_goal)
        if np.max(q_error) > 1.5e-3 and self._elapsed_steps < self._max_steps:
            self._goal_arrived = False
        else:
            self._goal_arrived = True
            # finally control gripper if different from previous action
            if (
                self._prev_gripper_action is not None
                and self._prev_gripper_action != self._gripper_action
            ):
                self.robot_interface.gripper_control(self._gripper_action)
            self._prev_gripper_action = self._gripper_action

            return
PeideHuang commented 4 months ago

Thank you! It is very helpful!