axinc-ai / ailia-models

The collection of pre-trained, state-of-the-art AI models for ailia SDK
2.04k stars 325 forks source link

ADD timesfm #1521

Open kyakuno opened 3 months ago

kyakuno commented 3 months ago

https://github.com/google-research/timesfm

ooe1123 commented 2 days ago

○ timesfm/pytorch_patched_decoder.py

class PatchedTimeSeriesDecoder(nn.Module):
  ...
  def _preprocess_input(
      self,
      input_ts: torch.Tensor,
      input_padding: torch.Tensor,
  ) -> tuple[
      torch.Tensor,
      torch.Tensor,
      tuple[torch.Tensor, torch.Tensor] | None,
      torch.Tensor,
  ]:
    ...
    if self.config.use_positional_embedding:
      pos_emb = self.position_emb(model_input.shape[1]).to(model_input.device)
      pos_emb = torch.concat([pos_emb] * model_input.shape[0], dim=0)
  ...
  def decode(
      self,
      ...
  ) -> tuple[torch.Tensor, torch.Tensor]:
    ...
    for step_index in range(num_decode_patches):
      current_padding = paddings[:, 0:final_out.shape[1]]
      input_ts = final_out[:, -max_len:]
      input_padding = current_padding[:, -max_len:]

      fprop_outputs = self(input_ts, input_padding, freq)

class PatchedTimeSeriesDecoder(nn.Module):
  ...
  def _preprocess_input(
      self,
      input_ts: torch.Tensor,
      input_padding: torch.Tensor,
  ) -> tuple[
      torch.Tensor,
      torch.Tensor,
      tuple[torch.Tensor, torch.Tensor] | None,
      torch.Tensor,
  ]:
    ...
    if self.config.use_positional_embedding:
      pos_emb = self.position_emb(model_input.shape[1]).to(model_input.device)
      pos_emb = pos_emb.repeat(model_input.shape[0], 1, 1)
  ...
  def decode(
      self,
      ...
  ) -> tuple[torch.Tensor, torch.Tensor]:
    ...
    for step_index in range(num_decode_patches):
      current_padding = paddings[:, 0:final_out.shape[1]]
      input_ts = final_out[:, -max_len:]
      input_padding = current_padding[:, -max_len:]
      if 1:
        with torch.no_grad():
          print("------>")
          x = (input_ts, input_padding, freq)
          torch.onnx.export(
            self, x, 'timesfm-1.0-200m.onnx',
            input_names=["input_ts", "input_padding", "freq"],
            output_names=["fprop_outputs"],
            dynamic_axes={"input_ts": {0: "batch"}, "input_padding": {0: "batch"}, "freq": {0: "batch"}, "fprop_outputs": {0: "batch"}},
            verbose=False, opset_version=17
          )
          print("<------")
          exit()

      fprop_outputs = self(input_ts, input_padding, freq)
ooe1123 commented 2 days ago

オリジナルの実行

import timesfm
import torch
import pandas as pd

tfm = timesfm.TimesFm(
    hparams=timesfm.TimesFmHparams(
        backend="cpu",
        per_core_batch_size=32,
        horizon_len=128,
        # horizon_len=300,
    ),
    checkpoint=timesfm.TimesFmCheckpoint(
        huggingface_repo_id="google/timesfm-1.0-200m-pytorch"),
)

context_length = 512
forecast_horizon = 128

df = pd.read_csv("datasets/ETTh1.csv")

df_train = df.iloc[-(context_length+forecast_horizon):-forecast_horizon]
df_test = df.iloc[-forecast_horizon:]

train_tensor = torch.tensor(df_train[["HUFL", "HULL", "MUFL", "MULL", "LUFL", "LULL", "OT"]].values, dtype=torch.float)
train_tensor = train_tensor.t()
test_tensor = torch.tensor(df_test[["HUFL", "HULL", "MUFL", "MULL", "LUFL", "LULL", "OT"]].values, dtype=torch.float)
test_tensor = test_tensor.t()
channel_idx = 6

frequency_input = [0] * train_tensor.size(0)
point_forecast, experimental_quantile_forecast = tfm.forecast(
    train_tensor,
    freq=frequency_input,
)
forecast_tensor = torch.tensor(point_forecast)
quantile_tensor = torch.tensor(experimental_quantile_forecast)

history = train_tensor[channel_idx, :].detach().numpy()
true = test_tensor[channel_idx, :].detach().numpy()
pred = forecast_tensor[channel_idx, :].detach().numpy()