Shuijing725 / CrowdNav_DSRNN

[ICRA 2021] Decentralized Structural-RNN for Robot Crowd Navigation with Deep Reinforcement Learning
https://sites.google.com/illinois.edu/crowdnav-dsrnn/home
MIT License
99 stars 23 forks source link

fov environment #20

Open ksy1251 opened 1 year ago

ksy1251 commented 1 year ago

i want to creat fov environment like you with this code.

def render(self, mode='human', output_file=None): from matplotlib import animation import matplotlib.pyplot as plt plt.rcParams['animation.ffmpeg_path'] = '/usr/bin/ffmpeg'

    x_offset = 0.11
    y_offset = 0.11
    cmap = plt.cm.get_cmap('hsv', 10)
    robot_color = 'yellow'
    goal_color = 'red'
    arrow_color = 'red'
    arrow_style = patches.ArrowStyle("->", head_length=4, head_width=2)

    if mode == 'human':
        fig, ax = plt.subplots(figsize=(7, 7))
        ax.set_xlim(-4, 4)
        ax.set_ylim(-4, 4)
        for human in self.humans:
            human_circle = plt.Circle(human.get_position(), human.radius, fill=False, color='b')
            ax.add_artist(human_circle)
        ax.add_artist(plt.Circle(self.robot.get_position(), self.robot.radius, fill=True, color='r'))
        plt.show()
    elif mode == 'traj':
        fig, ax = plt.subplots(figsize=(7, 7))
        ax.tick_params(labelsize=16)
        ax.set_xlim(-5, 5)
        ax.set_ylim(-5, 5)
        ax.set_xlabel('x(m)', fontsize=16)
        ax.set_ylabel('y(m)', fontsize=16)

        robot_positions = [self.states[i][0].position for i in range(len(self.states))]
        human_positions = [[self.states[i][1][j].position for j in range(len(self.humans))]
                           for i in range(len(self.states))]
        for k in range(len(self.states)):
            if k % 4 == 0 or k == len(self.states) - 1:
                robot = plt.Circle(robot_positions[k], self.robot.radius, fill=True, color=robot_color)
                humans = [plt.Circle(human_positions[k][i], self.humans[i].radius, fill=False, color=cmap(i))
                          for i in range(len(self.humans))]
                ax.add_artist(robot)
                for human in humans:
                    ax.add_artist(human)
            # add time annotation
            global_time = k * self.time_step
            if global_time % 4 == 0 or k == len(self.states) - 1:
                agents = humans + [robot]
                times = [plt.text(agents[i].center[0] - x_offset, agents[i].center[1] - y_offset,
                                  '{:.1f}'.format(global_time),
                                  color='black', fontsize=14) for i in range(self.human_num + 1)]
                for time in times:
                    ax.add_artist(time)
            if k != 0:
                nav_direction = plt.Line2D((self.states[k - 1][0].px, self.states[k][0].px),
                                           (self.states[k - 1][0].py, self.states[k][0].py),
                                           color=robot_color, ls='solid')
                human_directions = [plt.Line2D((self.states[k - 1][1][i].px, self.states[k][1][i].px),
                                               (self.states[k - 1][1][i].py, self.states[k][1][i].py),
                                               color=cmap(i), ls='solid')
                                    for i in range(self.human_num)]
                ax.add_artist(nav_direction)
                for human_direction in human_directions:
                    ax.add_artist(human_direction)
        plt.legend([robot], ['Robot'], fontsize=16)
        plt.show()
    elif mode == 'video':
        fig, ax = plt.subplots(figsize=(7, 7))
        ax.tick_params(labelsize=16)
        ax.set_xlim(-6, 6)
        ax.set_ylim(-6, 6)
        ax.set_xlabel('x(m)', fontsize=16)
        ax.set_ylabel('y(m)', fontsize=16)
        def calcFOVLineEndPoint(ang, point, extendFactor):
            # choose the extendFactor big enough
            # so that the endPoints of the FOVLine is out of xlim and ylim of the figure
            FOVLineRot = np.array([[np.cos(ang), -np.sin(ang), 0],
                                [np.sin(ang), np.cos(ang), 0],
                                [0, 0, 1]])
            point.extend([1])
            # apply rotation matrix
            newPoint = np.matmul(FOVLineRot, np.reshape(point, [3, 1]))
            # increase the distance between the line start point and the end point
            newPoint = [extendFactor * newPoint[0, 0], extendFactor * newPoint[1, 0], 1]
            return newPoint

        artists=[]

        # add goal
        goal=mlines.Line2D([self.robot.gx], [self.robot.gy], color=goal_color, marker='*', linestyle='None', markersize=15, label='Goal')
        ax.add_artist(goal)
        artists.append(goal)

        # add robot
        robotX,robotY=self.robot.get_position()

        robot=plt.Circle((robotX,robotY), self.robot.radius, fill=True, color=robot_color)
        ax.add_artist(robot)
        artists.append(robot)

        plt.legend([robot, goal], ['Robot', 'Goal'], fontsize=16)

        # compute orientation in each step and add arrow to show the direction
        radius = self.robot.radius
        arrowStartEnd=[]

        robot_theta = self.robot.theta if self.robot.kinematics == 'unicycle' else np.arctan2(self.robot.vy, self.robot.vx)

        arrowStartEnd.append(((robotX, robotY), (robotX + radius * np.cos(robot_theta), robotY + radius * np.sin(robot_theta))))

        for i, human in enumerate(self.humans):
            theta = np.arctan2(human.vy, human.vx)
            arrowStartEnd.append(((human.px, human.py), (human.px + radius * np.cos(theta), human.py + radius * np.sin(theta))))

        arrows = [patches.FancyArrowPatch(*arrow, color=arrow_color, arrowstyle=arrow_style)
                for arrow in arrowStartEnd]
        for arrow in arrows:
            ax.add_artist(arrow)
            artists.append(arrow)

        # draw FOV for the robot
        # add robot FOV
        if self.robot_fov < np.pi * 2:
            FOVAng = self.robot_fov / 2
            FOVLine1 = mlines.Line2D([0, 0], [0, 0], linestyle='--')
            FOVLine2 = mlines.Line2D([0, 0], [0, 0], linestyle='--')

            startPointX = robotX
            startPointY = robotY
            endPointX = robotX + radius * np.cos(robot_theta)
            endPointY = robotY + radius * np.sin(robot_theta)

            # transform the vector back to world frame origin, apply rotation matrix, and get end point of FOVLine
            # the start point of the FOVLine is the center of the robot
            FOVEndPoint1 = calcFOVLineEndPoint(FOVAng, [endPointX - startPointX, endPointY - startPointY], 20. / self.robot.radius)
            FOVLine1.set_xdata(np.array([startPointX, startPointX + FOVEndPoint1[0]]))
            FOVLine1.set_ydata(np.array([startPointY, startPointY + FOVEndPoint1[1]]))
            FOVEndPoint2 = calcFOVLineEndPoint(-FOVAng, [endPointX - startPointX, endPointY - startPointY], 20. / self.robot.radius)
            FOVLine2.set_xdata(np.array([startPointX, startPointX + FOVEndPoint2[0]]))
            FOVLine2.set_ydata(np.array([startPointY, startPointY + FOVEndPoint2[1]]))

            ax.add_artist(FOVLine1)
            ax.add_artist(FOVLine2)
            artists.append(FOVLine1)
            artists.append(FOVLine2)

        # add humans and change the color of them based on visibility
        human_circles = [plt.Circle(human.get_position(), human.radius, fill=False) for human in self.humans]

        for i in range(len(self.humans)):
            ax.add_artist(human_circles[i])
            artists.append(human_circles[i])

            # green: visible; red: invisible
            if self.detect_visible(self.robot, self.humans[i], robot1=True):
                human_circles[i].set_color(c='g')
            else:
                human_circles[i].set_color(c='r')
            plt.text(self.humans[i].px - 0.1, self.humans[i].py - 0.1, str(i), color='black', fontsize=12)

        plt.pause(0.1)
        for item in artists:
            item.remove() # there should be a better way to do this. For example,
            # initially use add_artist and draw_artist later on
        for t in ax.texts:
            t.set_visible(False)

        # add robot and its goal
        robot_positions = [state[0].position for state in self.states]
        # draw a star at the goal position (0,4)
        goal = mlines.Line2D([0], [4], color=goal_color, marker='*', linestyle='None', markersize=15, label='Goal')
        robot = plt.Circle(robot_positions[0], self.robot.radius, fill=True, color=robot_color)
        ax.add_artist(robot)
        ax.add_artist(goal)
        plt.legend([robot, goal], ['Robot', 'Goal'], fontsize=16, numpoints=1)   # numpoints=1: only 1 star in the legend

        # add humans and their numbers
        human_positions = [[state[1][j].position for j in range(len(self.humans))] for state in self.states]
        humans = [plt.Circle(human_positions[0][i], self.humans[i].radius, fill=False)
                  for i in range(len(self.humans))]
        human_numbers = [plt.text(humans[i].center[0] - x_offset, humans[i].center[1] - y_offset, str(i),
                                  color='black', fontsize=12) for i in range(len(self.humans))]
        for i, human in enumerate(humans):
            ax.add_artist(human)
            ax.add_artist(human_numbers[i])

        # add time annotation
        time = plt.text(-1, 5, 'Time: {}'.format(0), fontsize=16)
        ax.add_artist(time)

        # compute attention scores
        if self.attention_weights is not None:
            attention_scores = [
                plt.text(-6, 6 - 1.0 * i, 'Human {}: {:.2f}'.format(i + 1, self.attention_weights[0][i]),
                         fontsize=16) for i in range(len(self.humans))]

        # compute orientation in each step and use arrow to show the direction
        radius = self.robot.radius
        if self.robot.kinematics == 'unicycle':
            orientation = [((state[0].px, state[0].py), (state[0].px + radius * np.cos(state[0].theta),
                                                         state[0].py + radius * np.sin(state[0].theta))) for state
                           in self.states]
            orientations = [orientation]
        else:
            orientations = []
            for i in range(self.human_num + 1):
                orientation = []
                for state in self.states:
                    if i == 0:
                        agent_state = state[0]
                    else:
                        agent_state = state[1][i - 1]
                    theta = np.arctan2(agent_state.vy, agent_state.vx)
                    orientation.append(((agent_state.px, agent_state.py), (agent_state.px + radius * np.cos(theta),
                                         agent_state.py + radius * np.sin(theta))))
                orientations.append(orientation)
        arrows = {}
        arrows[0] = [patches.FancyArrowPatch(*orientation[0], color=arrow_color, arrowstyle=arrow_style)
                  for orientation in orientations]
        for arrow in arrows[0]:
            ax.add_artist(arrow)
        global_step = {}
        global_step[0] = 0

        def update(frame_num):
            # nonlocal global_step
            # nonlocal arrows
            global_step[0] = frame_num
            robot.center = robot_positions[frame_num]
            for i, human in enumerate(humans):
                human.center = human_positions[frame_num][i]
                human_numbers[i].set_position((human.center[0] - x_offset, human.center[1] - y_offset))
                for arrow in arrows[0]:
                    arrow.remove()
                arrows[0] = [patches.FancyArrowPatch(*orientation[frame_num], color=arrow_color,
                                                  arrowstyle=arrow_style) for orientation in orientations]
                for arrow in arrows[0]:
                    ax.add_artist(arrow)
                if self.attention_weights is not None:
                    human.set_color(str(self.attention_weights[frame_num][i]))
                    attention_scores[i].set_text('human {}: {:.2f}'.format(i, self.attention_weights[frame_num][i]))

            time.set_text('Time: {:.2f}'.format(frame_num * self.time_step))

        def plot_value_heatmap():
            assert self.robot.kinematics == 'holonomic'
            # when any key is pressed draw the action value plot
            fig, axis = plt.subplots()
            speeds = self.robot.policy.speeds
            rotations = self.robot.policy.rotations + [np.pi * 2]
            r, th = np.meshgrid(speeds, rotations)
            z = np.array(self.action_values[global_step[0] % len(self.states)][1:])
            z = (z - np.min(z)) / (np.max(z) - np.min(z))  # z: normalized action values
            z = np.append(z, z[:6])
            z = z.reshape(16, 6)  # rotations: 16   speeds:6
            polar = plt.subplot(projection="polar")
            polar.tick_params(labelsize=16)
            mesh = plt.pcolormesh(th, r, z, cmap=plt.cm.viridis)
            plt.plot(rotations, r, color='k', ls='none')
            plt.grid()
            cbaxes = fig.add_axes([0.85, 0.1, 0.03, 0.8])
            cbar = plt.colorbar(mesh, cax=cbaxes)
            cbar.ax.tick_params(labelsize=16)
            plt.show()

        def on_click(event):
            anim.running ^= True
            if anim.running:
                anim.event_source.stop()
                if hasattr(self.robot.policy, 'action_values'):
                    plot_value_heatmap()
            else:
                anim.event_source.start()

        fig.canvas.mpl_connect('key_press_event', on_click)
        anim = animation.FuncAnimation(fig, update, frames=len(self.states), interval=self.time_step * 1000)
        anim.running = True
        anim.save('testcase_animation.gif', writer='imagemagick')

        if output_file is not None:
            ffmpeg_writer = animation.writers['ffmpeg']
            writer = ffmpeg_writer(fps=8, metadata=dict(artist='Me'), bitrate=1800)
            anim.save(output_file, writer=writer)
        else:
            plt.show()
    else:
        raise NotImplementedError

i'm trying to make fov animation with your code and can you help to modify the code for fov? how can i make animation?

Shuijing725 commented 1 year ago

We replace plt.show() with plt.savefig(...), and gave each frame a difference name. Then we use Adobe Photoshop to create a .gif from a sequence of frames.

If you want to generate the gifs by code, you may refer to line 167 -177 of this file from another of our repos.

ksy1251 commented 1 year ago

In my code, the robot is shown moving each frame, but the direction of the fov line only shows the last frame. How can the robot's fov recognize and avoid humans every frame?

Shuijing725 commented 1 year ago

Sorry about the late reply, you forgot to copy and paste the code for rendering the FOV lines under the condition if mode == 'human':. Here is my fixed version:

` def render(self, mode='human', output_file=None): from matplotlib import animation from matplotlib import patches import matplotlib.pyplot as plt plt.rcParams['animation.ffmpeg_path'] = '/usr/bin/ffmpeg'

    x_offset = 0.11
    y_offset = 0.11
    cmap = plt.cm.get_cmap('hsv', 10)
    robot_color = 'yellow'
    goal_color = 'red'
    arrow_color = 'red'
    arrow_style = patches.ArrowStyle("->", head_length=4, head_width=2)

    if mode == 'human':
        fig, ax = plt.subplots(figsize=(7, 7))
        ax.set_xlim(-10, 10)
        ax.set_ylim(-10, 10)
        for human in self.humans:
            human_circle = plt.Circle(human.get_position(), human.radius, fill=False, color='b')
            ax.add_artist(human_circle)
        ax.add_artist(plt.Circle(self.robot.get_position(), self.robot.radius, fill=True, color='r'))

        import matplotlib.lines as mlines
        robotX, robotY = self.robot.get_position()
        radius = self.robot.radius
        robot_theta = self.robot.theta if self.robot.kinematics == 'unicycle' else np.arctan2(self.robot.vy,
                                                                                    self.robot.vx)

        def calcFOVLineEndPoint(ang, point, extendFactor):
            # choose the extendFactor big enough
            # so that the endPoints of the FOVLine is out of xlim and ylim of the figure
            FOVLineRot = np.array([[np.cos(ang), -np.sin(ang), 0],
                                   [np.sin(ang), np.cos(ang), 0],
                                   [0, 0, 1]])
            point.extend([1])
            # apply rotation matrix
            newPoint = np.matmul(FOVLineRot, np.reshape(point, [3, 1]))
            # increase the distance between the line start point and the end point
            newPoint = [extendFactor * newPoint[0, 0], extendFactor * newPoint[1, 0], 1]
            return newPoint

        FOVAng = self.robot_fov / 2
        FOVLine1 = mlines.Line2D([0, 0], [0, 0], linestyle='--')
        FOVLine2 = mlines.Line2D([0, 0], [0, 0], linestyle='--')

        startPointX = robotX
        startPointY = robotY
        endPointX = robotX + radius * np.cos(robot_theta)
        endPointY = robotY + radius * np.sin(robot_theta)

        # transform the vector back to world frame origin, apply rotation matrix, and get end point of FOVLine
        # the start point of the FOVLine is the center of the robot
        FOVEndPoint1 = calcFOVLineEndPoint(FOVAng, [endPointX - startPointX, endPointY - startPointY],
                                           20. / self.robot.radius)
        FOVLine1.set_xdata(np.array([startPointX, startPointX + FOVEndPoint1[0]]))
        FOVLine1.set_ydata(np.array([startPointY, startPointY + FOVEndPoint1[1]]))
        FOVEndPoint2 = calcFOVLineEndPoint(-FOVAng, [endPointX - startPointX, endPointY - startPointY],
                                           20. / self.robot.radius)
        FOVLine2.set_xdata(np.array([startPointX, startPointX + FOVEndPoint2[0]]))
        FOVLine2.set_ydata(np.array([startPointY, startPointY + FOVEndPoint2[1]]))

        ax.add_artist(FOVLine1)
        ax.add_artist(FOVLine2)

        plt.show()
    elif mode == 'traj':
        fig, ax = plt.subplots(figsize=(7, 7))
        ax.tick_params(labelsize=16)
        ax.set_xlim(-5, 5)
        ax.set_ylim(-5, 5)
        ax.set_xlabel('x(m)', fontsize=16)
        ax.set_ylabel('y(m)', fontsize=16)

        robot_positions = [self.states[i][0].position for i in range(len(self.states))]
        human_positions = [[self.states[i][1][j].position for j in range(len(self.humans))]
                           for i in range(len(self.states))]
        for k in range(len(self.states)):
            if k % 4 == 0 or k == len(self.states) - 1:
                robot = plt.Circle(robot_positions[k], self.robot.radius, fill=True, color=robot_color)
                humans = [plt.Circle(human_positions[k][i], self.humans[i].radius, fill=False, color=cmap(i))
                          for i in range(len(self.humans))]
                ax.add_artist(robot)
                for human in humans:
                    ax.add_artist(human)
            # add time annotation
            global_time = k * self.time_step
            if global_time % 4 == 0 or k == len(self.states) - 1:
                agents = humans + [robot]
                times = [plt.text(agents[i].center[0] - x_offset, agents[i].center[1] - y_offset,
                                  '{:.1f}'.format(global_time),
                                  color='black', fontsize=14) for i in range(self.human_num + 1)]
                for time in times:
                    ax.add_artist(time)
            if k != 0:
                nav_direction = plt.Line2D((self.states[k - 1][0].px, self.states[k][0].px),
                                           (self.states[k - 1][0].py, self.states[k][0].py),
                                           color=robot_color, ls='solid')
                human_directions = [plt.Line2D((self.states[k - 1][1][i].px, self.states[k][1][i].px),
                                               (self.states[k - 1][1][i].py, self.states[k][1][i].py),
                                               color=cmap(i), ls='solid')
                                    for i in range(self.human_num)]
                ax.add_artist(nav_direction)
                for human_direction in human_directions:
                    ax.add_artist(human_direction)
        plt.legend([robot], ['Robot'], fontsize=16)
        plt.show()
    elif mode == 'video':
        fig, ax = plt.subplots(figsize=(7, 7))
        ax.tick_params(labelsize=16)
        ax.set_xlim(-6, 6)
        ax.set_ylim(-6, 6)
        ax.set_xlabel('x(m)', fontsize=16)
        ax.set_ylabel('y(m)', fontsize=16)

        def calcFOVLineEndPoint(ang, point, extendFactor):
            # choose the extendFactor big enough
            # so that the endPoints of the FOVLine is out of xlim and ylim of the figure
            FOVLineRot = np.array([[np.cos(ang), -np.sin(ang), 0],
                                   [np.sin(ang), np.cos(ang), 0],
                                   [0, 0, 1]])
            point.extend([1])
            # apply rotation matrix
            newPoint = np.matmul(FOVLineRot, np.reshape(point, [3, 1]))
            # increase the distance between the line start point and the end point
            newPoint = [extendFactor * newPoint[0, 0], extendFactor * newPoint[1, 0], 1]
            return newPoint

        artists = []

        # add goal
        goal = mlines.Line2D([self.robot.gx], [self.robot.gy], color=goal_color, marker='*', linestyle='None',
                             markersize=15, label='Goal')
        ax.add_artist(goal)
        artists.append(goal)

        # add robot
        robotX, robotY = self.robot.get_position()

        robot = plt.Circle((robotX, robotY), self.robot.radius, fill=True, color=robot_color)
        ax.add_artist(robot)
        artists.append(robot)

        plt.legend([robot, goal], ['Robot', 'Goal'], fontsize=16)

        # compute orientation in each step and add arrow to show the direction
        radius = self.robot.radius
        arrowStartEnd = []

        robot_theta = self.robot.theta if self.robot.kinematics == 'unicycle' else np.arctan2(self.robot.vy,
                                                                                              self.robot.vx)

        arrowStartEnd.append(
            ((robotX, robotY), (robotX + radius * np.cos(robot_theta), robotY + radius * np.sin(robot_theta))))

        for i, human in enumerate(self.humans):
            theta = np.arctan2(human.vy, human.vx)
            arrowStartEnd.append(
                ((human.px, human.py), (human.px + radius * np.cos(theta), human.py + radius * np.sin(theta))))

        arrows = [patches.FancyArrowPatch(*arrow, color=arrow_color, arrowstyle=arrow_style)
                  for arrow in arrowStartEnd]
        for arrow in arrows:
            ax.add_artist(arrow)
            artists.append(arrow)

        # draw FOV for the robot
        # add robot FOV
        if self.robot_fov < np.pi * 2:
            FOVAng = self.robot_fov / 2
            FOVLine1 = mlines.Line2D([0, 0], [0, 0], linestyle='--')
            FOVLine2 = mlines.Line2D([0, 0], [0, 0], linestyle='--')

            startPointX = robotX
            startPointY = robotY
            endPointX = robotX + radius * np.cos(robot_theta)
            endPointY = robotY + radius * np.sin(robot_theta)

            # transform the vector back to world frame origin, apply rotation matrix, and get end point of FOVLine
            # the start point of the FOVLine is the center of the robot
            FOVEndPoint1 = calcFOVLineEndPoint(FOVAng, [endPointX - startPointX, endPointY - startPointY],
                                               20. / self.robot.radius)
            FOVLine1.set_xdata(np.array([startPointX, startPointX + FOVEndPoint1[0]]))
            FOVLine1.set_ydata(np.array([startPointY, startPointY + FOVEndPoint1[1]]))
            FOVEndPoint2 = calcFOVLineEndPoint(-FOVAng, [endPointX - startPointX, endPointY - startPointY],
                                               20. / self.robot.radius)
            FOVLine2.set_xdata(np.array([startPointX, startPointX + FOVEndPoint2[0]]))
            FOVLine2.set_ydata(np.array([startPointY, startPointY + FOVEndPoint2[1]]))

            ax.add_artist(FOVLine1)
            ax.add_artist(FOVLine2)
            artists.append(FOVLine1)
            artists.append(FOVLine2)

        # add humans and change the color of them based on visibility
        human_circles = [plt.Circle(human.get_position(), human.radius, fill=False) for human in self.humans]

        for i in range(len(self.humans)):
            ax.add_artist(human_circles[i])
            artists.append(human_circles[i])

            # green: visible; red: invisible
            if self.detect_visible(self.robot, self.humans[i], robot1=True):
                human_circles[i].set_color(c='g')
            else:
                human_circles[i].set_color(c='r')
            plt.text(self.humans[i].px - 0.1, self.humans[i].py - 0.1, str(i), color='black', fontsize=12)

        plt.pause(0.1)
        for item in artists:
            item.remove()  # there should be a better way to do this. For example,
            # initially use add_artist and draw_artist later on
        for t in ax.texts:
            t.set_visible(False)

        # add robot and its goal
        robot_positions = [state[0].position for state in self.states]
        # draw a star at the goal position (0,4)
        goal = mlines.Line2D([0], [4], color=goal_color, marker='*', linestyle='None', markersize=15, label='Goal')
        robot = plt.Circle(robot_positions[0], self.robot.radius, fill=True, color=robot_color)
        ax.add_artist(robot)
        ax.add_artist(goal)
        plt.legend([robot, goal], ['Robot', 'Goal'], fontsize=16, numpoints=1)  # numpoints=1: only 1 star in the legend

        # add humans and their numbers
        human_positions = [[state[1][j].position for j in range(len(self.humans))] for state in self.states]
        humans = [plt.Circle(human_positions[0][i], self.humans[i].radius, fill=False)
                  for i in range(len(self.humans))]
        human_numbers = [plt.text(humans[i].center[0] - x_offset, humans[i].center[1] - y_offset, str(i),
                                  color='black', fontsize=12) for i in range(len(self.humans))]
        for i, human in enumerate(humans):
            ax.add_artist(human)
            ax.add_artist(human_numbers[i])

        # add time annotation
        time = plt.text(-1, 5, 'Time: {}'.format(0), fontsize=16)
        ax.add_artist(time)

        # compute attention scores
        if self.attention_weights is not None:
            attention_scores = [
                plt.text(-6, 6 - 1.0 * i, 'Human {}: {:.2f}'.format(i + 1, self.attention_weights[0][i]),
                         fontsize=16) for i in range(len(self.humans))]

        # compute orientation in each step and use arrow to show the direction
        radius = self.robot.radius
        if self.robot.kinematics == 'unicycle':
            orientation = [((state[0].px, state[0].py), (state[0].px + radius * np.cos(state[0].theta),
                                                         state[0].py + radius * np.sin(state[0].theta))) for state
                           in self.states]
            orientations = [orientation]
        else:
            orientations = []
            for i in range(self.human_num + 1):
                orientation = []
                for state in self.states:
                    if i == 0:
                        agent_state = state[0]
                    else:
                        agent_state = state[1][i - 1]
                    theta = np.arctan2(agent_state.vy, agent_state.vx)
                    orientation.append(((agent_state.px, agent_state.py), (agent_state.px + radius * np.cos(theta),
                                                                           agent_state.py + radius * np.sin(theta))))
                orientations.append(orientation)
        arrows = {}
        arrows[0] = [patches.FancyArrowPatch(*orientation[0], color=arrow_color, arrowstyle=arrow_style)
                     for orientation in orientations]
        for arrow in arrows[0]:
            ax.add_artist(arrow)
        global_step = {}
        global_step[0] = 0

        def update(frame_num):
            # nonlocal global_step
            # nonlocal arrows
            global_step[0] = frame_num
            robot.center = robot_positions[frame_num]
            for i, human in enumerate(humans):
                human.center = human_positions[frame_num][i]
                human_numbers[i].set_position((human.center[0] - x_offset, human.center[1] - y_offset))
                for arrow in arrows[0]:
                    arrow.remove()
                arrows[0] = [patches.FancyArrowPatch(*orientation[frame_num], color=arrow_color,
                                                     arrowstyle=arrow_style) for orientation in orientations]
                for arrow in arrows[0]:
                    ax.add_artist(arrow)
                if self.attention_weights is not None:
                    human.set_color(str(self.attention_weights[frame_num][i]))
                    attention_scores[i].set_text('human {}: {:.2f}'.format(i, self.attention_weights[frame_num][i]))

            time.set_text('Time: {:.2f}'.format(frame_num * self.time_step))

        def plot_value_heatmap():
            assert self.robot.kinematics == 'holonomic'
            # when any key is pressed draw the action value plot
            fig, axis = plt.subplots()
            speeds = self.robot.policy.speeds
            rotations = self.robot.policy.rotations + [np.pi * 2]
            r, th = np.meshgrid(speeds, rotations)
            z = np.array(self.action_values[global_step[0] % len(self.states)][1:])
            z = (z - np.min(z)) / (np.max(z) - np.min(z))  # z: normalized action values
            z = np.append(z, z[:6])
            z = z.reshape(16, 6)  # rotations: 16   speeds:6
            polar = plt.subplot(projection="polar")
            polar.tick_params(labelsize=16)
            mesh = plt.pcolormesh(th, r, z, cmap=plt.cm.viridis)
            plt.plot(rotations, r, color='k', ls='none')
            plt.grid()
            cbaxes = fig.add_axes([0.85, 0.1, 0.03, 0.8])
            cbar = plt.colorbar(mesh, cax=cbaxes)
            cbar.ax.tick_params(labelsize=16)
            plt.show()

        def on_click(event):
            anim.running ^= True
            if anim.running:
                anim.event_source.stop()
                if hasattr(self.robot.policy, 'action_values'):
                    plot_value_heatmap()
            else:
                anim.event_source.start()

        fig.canvas.mpl_connect('key_press_event', on_click)
        anim = animation.FuncAnimation(fig, update, frames=len(self.states), interval=self.time_step * 1000)
        anim.running = True
        anim.save('testcase_animation.gif', writer='imagemagick')

        if output_file is not None:
            ffmpeg_writer = animation.writers['ffmpeg']
            writer = ffmpeg_writer(fps=8, metadata=dict(artist='Me'), bitrate=1800)
            anim.save(output_file, writer=writer)
        else:
            plt.show()
    else:
        raise NotImplementedError

`

Shuijing725 commented 1 year ago

FYI, your code creates a new subplot every timestep, which easily fills up all memory and crashes the computer.