isaac-sim / IsaacLab

Unified framework for robot learning built on NVIDIA Isaac Sim
https://isaac-sim.github.io/IsaacLab
Other
2.16k stars 888 forks source link

Designing robotic arm motion environment #278

Closed Nimingez closed 4 months ago

Nimingez commented 8 months ago

Hi, I am wondering where I can find out how the get_observation function is fulfilled, it is a declaration of abstract method in vec_env.py. I found you answer here https://github.com/leggedrobotics/rsl_rl/issues/11. What it means in orbit? By the way ,I want my robotic arm to move from point A to point B and then to point C using rsl_rl . Can you give me a hit?Thank you

Mayankm96 commented 8 months ago

Here: https://github.com/NVIDIA-Omniverse/orbit/blob/main/source/extensions/omni.isaac.orbit_tasks/omni/isaac/orbit_tasks/utils/wrappers/rsl_rl/vecenv_wrapper.py#L127-L130

What specific hints you need for that? You can build on top of the reach environment for that.

Nimingez commented 8 months ago

I want to modify the "reach" task so that the robotic arm moves successively to four designated points, first reaching point A, then reaching point B, followed by reaching point C, and finally reaching point D. here is my change in reach_env_cfg.py Is there anything else that needs to be modified?

class CommandsCfg: """Command terms for the MDP."""

ee_pose = mdp.UniformPoseCommandCfg(
    asset_name="robot",
    body_name=MISSING,
    resampling_time_range=(4.0, 4.0),
    debug_vis=True,
    ranges=mdp.UniformPoseCommandCfg.Ranges(
        pos_x=(0.35, 0.35),
        pos_y=(0.2, 0.2),
        pos_z=(0.15, 0.15),
        roll=(0.0, 0.0),
        pitch=MISSING,  # depends on end-effector axis
        yaw=(0, 0),
    ),

)
target_pose = mdp.UniformPoseCommandCfg(
    asset_name="robot",
    body_name=MISSING,
    resampling_time_range=(4.0, 4.0),
    debug_vis=True,
    ranges=mdp.UniformPoseCommandCfg.Ranges(
        pos_x=(0.65, 0.65),
        pos_y=(0.5, 0.5),
        pos_z=(0.15, 0.15),
        roll=(0.0, 0.0),
        pitch=MISSING,  # depends on end-effector axis
        yaw=(0, 0),
    ),

)

class ObservationsCfg: """Observation specifications for the MDP."""

@configclass
class PolicyCfg(ObsGroup):
    """Observations for policy group."""

    # observation terms (order preserved)
    joint_pos = ObsTerm(func=mdp.joint_pos_rel, noise=Unoise(n_min=-0.01, n_max=0.01))
    joint_vel = ObsTerm(func=mdp.joint_vel_rel, noise=Unoise(n_min=-0.01, n_max=0.01))
    pose_command = ObsTerm(func=mdp.generated_commands, params={"command_name": "ee_pose"})
    target_command = ObsTerm(func=mdp.generated_commands, params={"command_name": "target_pose"})

    actions = ObsTerm(func=mdp.last_action)

    def __post_init__(self):
        self.enable_corruption = True
        self.concatenate_terms = True

# observation groups
policy: PolicyCfg = PolicyCfg()

@configclass class RandomizationCfg: """Configuration for randomization."""

reset_robot_joints = RandTerm(
    func=mdp.reset_joints_by_scale,
    mode="reset",
    params={
        "position_range": (0.5, 1.5),
        "velocity_range": (0.0, 0.0),
    },
)

@configclass class RewardsCfg: """Reward terms for the MDP."""

# task terms
end_effector_position_tracking = RewTerm(
    func=mdp.position_command_error,
    weight=-0.2,
    params={"asset_cfg": SceneEntityCfg("robot", body_names=MISSING), "command_name": "ee_pose"},
)
end_effector_orientation_tracking = RewTerm(
    func=mdp.orientation_command_error,
    weight=-0.05,
    params={"asset_cfg": SceneEntityCfg("robot", body_names=MISSING), "command_name": "ee_pose"},
)
target_position_tracking = RewTerm(
    func=mdp.position_command_error,
    weight=-0.2,
    params={"asset_cfg": SceneEntityCfg("robot", body_names=MISSING), "command_name": "target_pose"},
)
target_orientation_tracking = RewTerm(
    func=mdp.orientation_command_error,
    weight=-0.05,
    params={"asset_cfg": SceneEntityCfg("robot", body_names=MISSING), "command_name": "target_pose"},
)

# action penalty
action_rate = RewTerm(func=mdp.action_rate_l2, weight=-0.0001)
joint_vel = RewTerm(
    func=mdp.joint_vel_l2,
    weight=-0.0001,
    params={"asset_cfg": SceneEntityCfg("robot")},
)

@configclass class TerminationsCfg: """Termination terms for the MDP."""

time_out = DoneTerm(func=mdp.time_out, time_out=True)

@configclass class CurriculumCfg: """Curriculum terms for the MDP."""

action_rate = CurrTerm(
    func=mdp.modify_reward_weight, params={"term_name": "action_rate", "weight": -0.005, "num_steps": 4500}
)

Environment configuration

@configclass class ReachEnvCfg(RLTaskEnvCfg): """Configuration for the reach end-effector pose tracking environment."""

# Scene settings
scene: ReachSceneCfg = ReachSceneCfg(num_envs=4096, env_spacing=2.5, replicate_physics=False)
# Basic settings
observations: ObservationsCfg = ObservationsCfg()
actions: ActionsCfg = ActionsCfg()
commands: CommandsCfg = CommandsCfg()
# MDP settings
rewards: RewardsCfg = RewardsCfg()
terminations: TerminationsCfg = TerminationsCfg()
randomization: RandomizationCfg = RandomizationCfg()
curriculum: CurriculumCfg = CurriculumCfg()

def __post_init__(self):
    """Post initialization."""
    # general settings
    self.decimation = 2
    self.episode_length_s = 12.0
    self.viewer.eye = (3.5, 3.5, 3.5)
    # simulation settings
    self.sim.dt = 1.0 / 60.0
Mayankm96 commented 4 months ago

Unfortunately, this issue seems to require support when designing your own environment. If there are more specific environment-level documentation/clarifications you want us to provide, please make the issues for them.