Closed kkckk1110 closed 7 months ago
ray == 2.9.3. neuralforecast==1.6.4
nf.fit(df=Y_df)
None
Hey. This seems to be a duplicate of #526, I'm closing this in favor of that one.
What happened + What you expected to happen
I came across a bug:
Stacktrace
```python KeyError Traceback (most recent call last) File ~\AppData\Roaming\Python\Python39\site-packages\tensorboardX\record_writer.py:58, in open_file(path) 57 prefix = path.split(':')[0] ---> 58 factory = REGISTERED_FACTORIES[prefix] 59 return factory.open(path) KeyError: 'C' During handling of the above exception, another exception occurred: FileNotFoundError Traceback (most recent call last) Cell In[13], line 1 ----> 1 nf.fit(df=Y_df) File ~\AppData\Roaming\Python\Python39\site-packages\neuralforecast\core.py:274, in NeuralForecast.fit(self, df, static_df, val_size, sort_df, use_init_models, verbose) 271 print("WARNING: Deleting previously fitted models.") 273 for model in self.models: --> 274 model.fit(self.dataset, val_size=val_size) 276 self._fitted = True File ~\AppData\Roaming\Python\Python39\site-packages\neuralforecast\common\_base_auto.py:361, in BaseAuto.fit(self, dataset, val_size, test_size, random_seed) 359 val_size = val_size if val_size > 0 else self.h 360 if self.backend == "ray": --> 361 results = self._tune_model( 362 cls_model=self.cls_model, 363 dataset=dataset, 364 val_size=val_size, 365 test_size=test_size, 366 cpus=self.cpus, 367 gpus=self.gpus, 368 verbose=self.verbose, 369 num_samples=self.num_samples, 370 search_alg=search_alg, 371 config=self.config, 372 ) 373 best_config = results.get_best_result().config 374 else: File ~\AppData\Roaming\Python\Python39\site-packages\neuralforecast\common\_base_auto.py:259, in BaseAuto._tune_model(self, cls_model, dataset, val_size, test_size, cpus, gpus, verbose, num_samples, search_alg, config) 240 device_dict = {"cpu": cpus} 242 tuner = tune.Tuner( 243 tune.with_resources(train_fn_with_parameters, device_dict), 244 run_config=air.RunConfig( (...) 257 param_space=config, 258 ) --> 259 results = tuner.fit() 260 return results File D:\Anaconda\envs\neural\lib\site-packages\ray\tune\tuner.py:381, in Tuner.fit(self) 379 if not self._is_ray_client: 380 try: --> 381 return self._local_tuner.fit() 382 except TuneError as e: 383 raise TuneError( 384 _TUNER_FAILED_MSG.format( 385 path=self._local_tuner.get_experiment_checkpoint_dir() 386 ) 387 ) from e File D:\Anaconda\envs\neural\lib\site-packages\ray\tune\impl\tuner_internal.py:509, in TunerInternal.fit(self) 507 param_space = copy.deepcopy(self.param_space) 508 if not self._is_restored: --> 509 analysis = self._fit_internal(trainable, param_space) 510 else: 511 analysis = self._fit_resume(trainable, param_space) File D:\Anaconda\envs\neural\lib\site-packages\ray\tune\impl\tuner_internal.py:628, in TunerInternal._fit_internal(self, trainable, param_space) 615 """Fitting for a fresh Tuner.""" 616 args = { 617 **self._get_tune_run_arguments(trainable), 618 **dict( (...) 626 **self._tuner_kwargs, 627 } --> 628 analysis = run( 629 **args, 630 ) 631 self.clear_remote_string_queue() 632 return analysis File D:\Anaconda\envs\neural\lib\site-packages\ray\tune\tune.py:1002, in run(run_or_experiment, name, metric, mode, stop, time_budget_s, config, resources_per_trial, num_samples, storage_path, storage_filesystem, search_alg, scheduler, checkpoint_config, verbose, progress_reporter, log_to_file, trial_name_creator, trial_dirname_creator, sync_config, export_formats, max_failures, fail_fast, restore, resume, reuse_actors, raise_on_failed_trial, callbacks, max_concurrent_trials, keep_checkpoints_num, checkpoint_score_attr, checkpoint_freq, checkpoint_at_end, chdir_to_trial_dir, local_dir, _remote, _remote_string_queue, _entrypoint) 1000 try: 1001 while not runner.is_finished() and not experiment_interrupted_event.is_set(): -> 1002 runner.step() 1003 if has_verbosity(Verbosity.V1_EXPERIMENT): 1004 _report_progress(runner, progress_reporter) File D:\Anaconda\envs\neural\lib\site-packages\ray\tune\execution\tune_controller.py:728, in TuneController.step(self) 725 self._maybe_add_actors() 727 # Handle one event --> 728 if not self._actor_manager.next(timeout=0.1): 729 # If there are no actors running, warn about potentially 730 # insufficient resources 731 if not self._actor_manager.num_live_actors: 732 self._insufficient_resources_manager.on_no_available_trials( 733 self.get_trials() 734 ) File D:\Anaconda\envs\neural\lib\site-packages\ray\air\execution\_internal\actor_manager.py:222, in RayActorManager.next(self, timeout) 219 [future] = ready 221 if future in actor_state_futures: --> 222 self._actor_state_events.resolve_future(future) 223 elif future in actor_task_futures: 224 self._actor_task_events.resolve_future(future) File D:\Anaconda\envs\neural\lib\site-packages\ray\air\execution\_internal\event_manager.py:118, in RayEventManager.resolve_future(self, future) 116 else: 117 if on_result: --> 118 on_result(result) File D:\Anaconda\envs\neural\lib\site-packages\ray\air\execution\_internal\actor_manager.py:381, in RayActorManager._try_start_actors.Versions / Dependencies
ray == 2.9.3. neuralforecast==1.6.4
Reproduction script
nf.fit(df=Y_df)
Issue Severity
None