danijar / director

Deep Hierarchical Planning from Pixels
https://danijar.com/director/
85 stars 22 forks source link

How to do inference with trained model?? #2

Open YsYusaito opened 1 year ago

YsYusaito commented 1 year ago

@danijar Would you please tell me how to do inference with model trained by "train_with_viz"??

I would be appreciate it if you could answer my questions.

danijar commented 1 year ago

What do you mean by inference in this context?

YsYusaito commented 1 year ago

@danijar
Sorry for my poor representation...
In this context, "Rollout" is correct word not "inference", I think.

After consideration, I could realize what I want to do with following code.

import pathlib
import sys
import pickle
from PIL import Image
import numpy as np

directory = pathlib.Path(__file__)
print(directory)

directory = directory.parent
sys.path.append(str(directory.parent))
sys.path.append(str(directory.parent.parent.parent))
__package__ = directory.name

import embodied

def main(argv=None):
    from . import agent as agnt
    argv = None

    # config読み込み
    parsed, other = embodied.Flags(
        configs=['defaults'], actor_id=0, actors=0,
    ).parse_known(argv)

    config = embodied.Config(agnt.Agent.configs['defaults'])
    for name in parsed.configs:
        config = config.update(agnt.Agent.configs[name])
    config = embodied.Flags(config).parse(other)

    config = config.update(logdir=str(embodied.Path(config.logdir)))
    args = embodied.Config(logdir=config.logdir, **config.train)
    args = args.update(expl_until=args.expl_until // config.env.repeat)
    print(config)
    logdir = embodied.Path(config.logdir)

    step = embodied.Counter()
    config = config.update({'env.seed': hash((config.seed, parsed.actor_id))})
    cleanup = []
    chunk = config.replay_chunk

    env = embodied.envs.load_env(
        config.task, mode='train', logdir=logdir, **config.env)

    def make_replay(name, capacity):
      directory = logdir / name
      store = embodied.replay.CkptRAMStore(directory, capacity, parallel=True)
      cleanup.append(store)
      return embodied.replay.FixedLength(store, chunk, **config.replay_fixed)

    # agentを適当な重みで初期化する。そのために1stepのみ学習
    agent = agnt.Agent(env.obs_space, env.act_space, step, config)
    replay = make_replay('episodes', config.replay_size)
    eval_replay = make_replay(config.eval_dir, config.replay_size // 10)

    driver_for_init = embodied.Driver(env)
    driver_for_init.on_step(replay.add)

    random_agent = embodied.RandomAgent(env.act_space)
    driver_for_init(random_agent.policy, steps=1, episodes=1)

    dataset_train = iter(agent.dataset(replay.dataset))
    state = [None]  # To be writable from train step function below.
    assert args.pretrain > 0  # At least one step to initialize variables.
    for _ in range(args.pretrain):
       _, state[0], _ = agent.train(next(dataset_train), state[0])

    # 学習済みモデルの情報が書き込まれたpklファイルを読み込み
    agent_cp = embodied.Checkpoint('/home/yusaito/work/DirectorPJ/Director/director/embodied/agents/director/checkpoint.pkl')
    agent_cp.step = step
    agent_cp.agent = agent
    agent_cp.train_replay = replay
    agent_cp.eval_replay = eval_replay
    agent_cp.load()

    should_expl = embodied.when.Until(args.expl_until)
    policy = lambda *args: agent_cp._values['agent'].policy(*args, mode='explore' if should_expl(step) else 'train')

    driver_rollout = embodied.Driver(env)
    driver_rollout._activate_obs_imgs_mode()

    # 推論実行
    driver_rollout(policy, steps=300)

    # 推論結果をgifにする
    obs_imgs = driver_rollout._get_obs_imgs()
    obs_imgs_squeezed = []

    for i in range(len(obs_imgs)):
        tmp = np.squeeze(obs_imgs[i], 0)
        obs_imgs_squeezed.append(Image.fromarray(tmp))

    obs_imgs_squeezed[0].save('rollout_dmc_walk_walker.gif', save_all=True, append_images=obs_imgs_squeezed[1:], optimize=False)

if __name__ == '__main__':
  main()

I have one additonal question. What is the difference between these strings which can be set in config.yaml, "train_with_viz", "actiong", "learning" , "train_eval" and "train_fixed_eval"??

image

panmt commented 1 year ago

Hi, @danijar I am also confused about the settings "train_with_viz", "actiong", "learning" , "train_eval" and "train_fixed_eval". Could you explain them? Thank you !