nnaisense / evotorch

Advanced evolutionary computation library built directly on top of PyTorch, created at NNAISENSE.
https://evotorch.ai
Apache License 2.0
997 stars 62 forks source link

When I run a mpc optimization code, it shows: "AttributeError: 'NoneType' object has no attribute 'evals'" #88

Closed lk1983823 closed 1 year ago

lk1983823 commented 1 year ago

I run multiple rounds without any errors, but the in the last round, it shows "AttributeError: 'NoneType' object has no attribute 'evals'" . The whole error is:

Output exceeds the [size limit](command:workbench.action.openSettings?%5B%22notebook.output.textLineLimit%22%5D). Open the full output data [in a text editor](command:workbench.action.openLargeOutput?cfd71559-5421-469a-b470-e0a1cf3a17b8)
---------------------------------------------------------------------------
AttributeError                            Traceback (most recent call last)
Cell In[28], line 22
     19 k2 = 0.02 # 
     20 k3 = 1.1 # 
---> 22 temp_new, temp_old = run_episode(data, state_dim, action_dim, horizon, k0, k1, k2, k3, rounds, time_steps)

Cell In[27], line 44, in run_episode(offline_states, state_dim, action_dim, horizon, k0, k1, k2, k3, rounds, time_steps)
     41     n_state = n_state.reshape(1, -1)
     42     action_input = n_action.reshape(1,-1)
---> 44 action = do_planning(n_state, 
     45                     action_input,
     46                     bias, 
     47                     input_length=input_length, 
     48                     horizon=horizon, 
     49                     action_dim=action_dim,
     50                     k0 = k0,
     51                     k1 = k1,
     52                     k2 = k2,
     53                     k3 = k3)
     55 action = np.clip(action, 0.0, 100.0) 
     56 n_action = action.reshape(1, -1) # (1, action_dim)

Cell In[10], line 18, in do_planning(state, hist_action, bias, input_length, horizon, action_dim, k0, k1, k2, k3)
      4 searcher = CEM(
      5     problem,
      6     stdev_init=10.0,
   (...)
      9     stdev_max_change=5.0,
     10 )
     11 # searcher = SNES(
     12 #     problem,
     13 #     radius_init=10,
   (...)
     16 #     # stdev_max_change=0.2,
     17 # )
---> 18 searcher.run(20)  # run for this many generations
     20 # new_actions = searcher.status["best"].values.reshape(horizon, action_dim).clone()
     22 action_candidate = searcher.status["best"].values.clone().numpy()

File [~/anaconda3/envs/dpc/lib/python3.10/site-packages/evotorch/algorithms/searchalgorithm.py:425](https://file+.vscode-resource.vscode-cdn.net/media/lk/lksgcc/DL_lk/202205_RZ_GAN%E6%A8%A1%E5%9E%8B/model/EvoOpt/~/anaconda3/envs/dpc/lib/python3.10/site-packages/evotorch/algorithms/searchalgorithm.py:425), in SearchAlgorithm.run(self, num_generations, reset_first_step_datetime)
    422     self.reset_first_step_datetime()
    424 for _ in range(int(num_generations)):
--> 425     self.step()
    427 if len(self._end_of_run_hook) >= 1:
    428     self._end_of_run_hook(dict(self.status))

File [~/anaconda3/envs/dpc/lib/python3.10/site-packages/evotorch/algorithms/searchalgorithm.py:390](https://file+.vscode-resource.vscode-cdn.net/media/lk/lksgcc/DL_lk/202205_RZ_GAN%E6%A8%A1%E5%9E%8B/model/EvoOpt/~/anaconda3/envs/dpc/lib/python3.10/site-packages/evotorch/algorithms/searchalgorithm.py:390), in SearchAlgorithm.step(self)
    387 if self._first_step_datetime is None:
    388     self._first_step_datetime = datetime.now()
--> 390 self._step()
    391 self._steps_count += 1
    392 self.update_status({"iter": self._steps_count})

File [~/anaconda3/envs/dpc/lib/python3.10/site-packages/evotorch/algorithms/distributed/gaussian.py:354](https://file+.vscode-resource.vscode-cdn.net/media/lk/lksgcc/DL_lk/202205_RZ_GAN%E6%A8%A1%E5%9E%8B/model/EvoOpt/~/anaconda3/envs/dpc/lib/python3.10/site-packages/evotorch/algorithms/distributed/gaussian.py:354), in GaussianSearchAlgorithm._step_non_distributed(self)
    349         self._population = SolutionBatch.cat(populations)
    351 if self._first_iter:
    352     # If we are computing the first generation, we just sample from our distribution and evaluate
    353     # the solutions.
--> 354     fill_and_eval_pop()
    355     self._first_iter = False
    356 else:
    357     # If we are computing next generations, then we need to compute the gradients of the last
    358     # generation, sample a new population, and evaluate the new population's solutions.

File [~/anaconda3/envs/dpc/lib/python3.10/site-packages/evotorch/algorithms/distributed/gaussian.py:295](https://file+.vscode-resource.vscode-cdn.net/media/lk/lksgcc/DL_lk/202205_RZ_GAN%E6%A8%A1%E5%9E%8B/model/EvoOpt/~/anaconda3/envs/dpc/lib/python3.10/site-packages/evotorch/algorithms/distributed/gaussian.py:295), in GaussianSearchAlgorithm._step_non_distributed..fill_and_eval_pop()
    292     self._distribution.sample(out=self._population.access_values(), generator=self.problem)
    294     # Finally, here, the solutions are evaluated.
--> 295     self.problem.evaluate(self._population)
    296 else:
    297     # If num_interactions is not None, then this means that we have a threshold for the number
    298     # of simulator interactions to reach before declaring the phase of sampling complete.
   (...)
    303     # Therefore, to properly count the simulator interactions we made during this generation, we need
    304     # to get the interaction count before starting our sampling and evaluation operations.
    305     first_num_interactions = self.problem.status.get("total_interaction_count", 0)

File [~/anaconda3/envs/dpc/lib/python3.10/site-packages/evotorch/core.py:2403](https://file+.vscode-resource.vscode-cdn.net/media/lk/lksgcc/DL_lk/202205_RZ_GAN%E6%A8%A1%E5%9E%8B/model/EvoOpt/~/anaconda3/envs/dpc/lib/python3.10/site-packages/evotorch/core.py:2403), in Problem.evaluate(self, x)
   2400 if self.is_main:
   2401     self._after_eval_status = {}
-> 2403     best_and_worst = self._get_best_and_worst(batch)
   2404     if best_and_worst is not None:
   2405         self._after_eval_status.update(best_and_worst)

File [~/anaconda3/envs/dpc/lib/python3.10/site-packages/torch/utils/_contextlib.py:115](https://file+.vscode-resource.vscode-cdn.net/media/lk/lksgcc/DL_lk/202205_RZ_GAN%E6%A8%A1%E5%9E%8B/model/EvoOpt/~/anaconda3/envs/dpc/lib/python3.10/site-packages/torch/utils/_contextlib.py:115), in context_decorator..decorate_context(*args, **kwargs)
    112 @functools.wraps(func)
    113 def decorate_context(*args, **kwargs):
    114     with ctx_factory():
--> 115         return func(*args, **kwargs)

File [~/anaconda3/envs/dpc/lib/python3.10/site-packages/evotorch/core.py:2232](https://file+.vscode-resource.vscode-cdn.net/media/lk/lksgcc/DL_lk/202205_RZ_GAN%E6%A8%A1%E5%9E%8B/model/EvoOpt/~/anaconda3/envs/dpc/lib/python3.10/site-packages/evotorch/core.py:2232), in Problem._get_best_and_worst(self, batch)
   2226         self._worst[i_obj] = batch[worst_sln_index].clone()
   2228 if len(senses) == 1:
   2229     return dict(
   2230         best=self._best[0],
...
   2234     )
   2235 else:
   2236     return {"best": self._best, "worst": self._worst}

AttributeError: 'NoneType' object has no attribute 'evals'

I use CEM to solve my problem, the parameters are set as: searcher = CEM( problem, stdev_init=10.0, popsize=50, # population size parenthood_ratio=0.2, stdev_max_change=5.0, )

searcher.run(20)

super().init( objective_sense="min", initial_bounds=(50.0, 100.0), solution_length=(horizon * action_dim), # action的dim=4

device='cuda:0'

    )

the actions should be in [0, 100], So I set the initial_bounds as (50.0, 100.0).