TimesFM(Time Series Foundation Model)时间序列预测的数据研究(3)

前一篇完成了 TimesFM 的运行

TimesFM(Time Series Foundation Model)安装(2)-CSDN博客文章浏览阅读520次,点赞13次,收藏24次。决定在 小红帽ubuntu UBUNTU安装 timesFM在 ide.cloud.tencent.com 的环境上进行安装 环境 慎选环境,确保>16G安装Conda 3.10 python重要步骤 安装 pyenv and poetry确认已经完成安装这里安装完需要设置环境变量,如果不能看到 version 版本时Add `export PATH="/root/.local/bin:$PATH"` to your shell configuratiohttps://blog.csdn.net/chenchihwen/article/details/144386472?sharetype=blogdetail&sharerId=144386472&sharerefer=PC&sharesource=chenchihwen&spm=1011.2480.3001.8118

这篇针对里面运行的数据进行分析

根据代码 数据是来自 exp, exp 是来自引用 的 .utils.py

这是 utilis.py 的代码

是从Nixtla来的 尼克斯塔与时间序列预测

"""Forked from https://github.com/Nixtla/nixtla/blob/main/experiments/amazon-chronos/src/utils.py."""

# Copyright 2024 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

"""Forked from https://github.com/Nixtla/nixtla/blob/main/experiments/amazon-chronos/src/utils.py."""

from functools import partial
from itertools import repeat
import multiprocessing
import os
from pathlib import Path
from typing import List

from gluonts.dataset import Dataset
from gluonts.dataset.repository.datasets import (
    dataset_names as gluonts_datasets,
    get_dataset,
)
from gluonts.time_feature.seasonality import get_seasonality
import numpy as np
import pandas as pd
from utilsforecast.evaluation import evaluate
from utilsforecast.losses import mae, mase, smape


def parallel_transform(inp):
  ts, last_n = inp[0], inp[1]
  return ExperimentHandler._transform_gluonts_instance_to_df(ts, last_n=last_n)


def quantile_loss(
    df: pd.DataFrame,
    models: list,
    q: float = 0.5,
    id_col: str = "unique_id",
    target_col: str = "y",
) -> pd.DataFrame:
  delta_y = df[models].sub(df[target_col], axis=0)
  res = (
      np.maximum(q * delta_y, (q - 1) * delta_y)
      .groupby(df[id_col], observed=True)
      .mean()
  )
  res.index.name = id_col
  res = res.reset_index()
  return res


class ExperimentHandler:

  def __init__(
      self,
      dataset: str,
      quantiles: List[float] = list(np.arange(1, 10) / 10.0),
      results_dir: str = "./results",
      models_dir: str = "./models",
  ):
    if dataset not in gluonts_datasets:
      raise Exception(
          f"dataset {dataset} not found in gluonts "
          f"available datasets: {', '.join(gluonts_datasets)}"
      )
    self.dataset = dataset
    self.quantiles = quantiles
    self.level = self._transform_quantiles_to_levels(quantiles)
    self.results_dir = results_dir
    self.models_dir = models_dir
    # defining datasets
    self._maybe_download_m3_or_m5_file(self.dataset)
    gluonts_dataset = get_dataset(self.dataset)
    self.horizon = gluonts_dataset.metadata.prediction_length
    if self.horizon is None:
      raise Exception(
          f"horizon not found for dataset {self.dataset} "
          "experiment cannot be run"
      )
    self.freq = gluonts_dataset.metadata.freq
    # get_seasonality() returns 1 for freq='D', override this to 7. This significantly improves the accuracy of
    # statistical models on datasets like m5/nn5_daily. The models like AutoARIMA/AutoETS can still set
    # seasonality=1 internally on datasets like weather by choosing non-seasonal models during model selection.
    if self.freq == "D":
      self.seasonality = 7
    else:
      self.seasonality = get_seasonality(self.freq)
    self.gluonts_train_dataset = gluonts_dataset.train
    self.gluonts_test_dataset = gluonts_dataset.test
    self._create_dir_if_not_exists(self.results_dir)
    try:
      multiprocessing.set_start_method("spawn")
    except RuntimeError:
      print("Multiprocessing context has already been set.")

  @staticmethod
  def _maybe_download_m3_or_m5_file(dataset: str):
    if dataset[:2] == "m3":
      m3_file = Path.home() / ".gluonts" / "datasets" / "M3C.xls"
      if not m3_file.exists():
        from datasetsforecast.m3 import M3
        from datasetsforecast.utils import download_file

        download_file(m3_file.parent, M3.source_url)
    elif dataset == "m5":
      m5_raw_dir = Path.home() / ".gluonts" / "m5"
      if not m5_raw_dir.exists():
        import zipfile
        from datasetsforecast.m5 import M5
        from datasetsforecast.utils import download_file

        download_file(m5_raw_dir, M5.source_url)
        with zipfile.ZipFile(m5_raw_dir / "m5.zip", "r") as zip_ref:
          zip_ref.extractall(m5_raw_dir)

  @staticmethod
  def _transform_quantiles_to_levels(quantiles: List[float]) -> List[int]:
    level = [
        int(100 - 200 * q) for q in quantiles if q < 0.5
    ]  # in this case mean=mediain
    level = sorted(list(set(level)))
    return level

  @staticmethod
  def _create_dir_if_not_exists(directory: str):
    Path(directory).mkdir(parents=True, exist_ok=True)

  @staticmethod
  def _transform_gluonts_instance_to_df(
      ts: dict,
      last_n: int | None = None,
  ) -> pd.DataFrame:
    start_period = ts["start"]
    start_ds, freq = start_period.to_timestamp(), start_period.freq
    target = ts["target"]
    ds = pd.date_range(start=start_ds, freq=freq, periods=len(target))
    if last_n is not None:
      target = target[-last_n:]
      ds = ds[-last_n:]
    ts_df = pd.DataFrame({"unique_id": ts["item_id"], "ds": ds, "y": target})
    return ts_df

  @staticmethod
  def _transform_gluonts_dataset_to_df(
      gluonts_dataset: Dataset,
      last_n: int | None = None,
  ) -> pd.DataFrame:
    with multiprocessing.Pool(os.cpu_count()) as pool:  # Create a process pool
      results = pool.map(
          parallel_transform, zip(gluonts_dataset, repeat(last_n))
      )
    df = pd.concat(results)
    df = df.reset_index(drop=True)
    return df

  @property
  def train_df(self) -> pd.DataFrame:
    train_df = self._transform_gluonts_dataset_to_df(self.gluonts_train_dataset)
    return train_df

  @property
  def test_df(self) -> pd.DataFrame:
    test_df = self._transform_gluonts_dataset_to_df(
        self.gluonts_test_dataset,
        last_n=self.horizon,
    )
    # Make sure that only the first backtest window is used for evaluation on `traffic` / `exchange_rate` datasets
    return test_df.groupby("unique_id", sort=False).head(self.horizon)

  def save_dataframe(self, df: pd.DataFrame, file_name: str):
    df.to_csv(f"{self.results_dir}/{file_name}", index=False)

  def save_results(
      self, fcst_df: pd.DataFrame, total_time: float, model_name: str
  ):
    self.save_dataframe(
        fcst_df,
        f"{model_name}-{self.dataset}-fcst.csv",
    )
    time_df = pd.DataFrame({"time": [total_time], "model": model_name})
    self.save_dataframe(
        time_df,
        f"{model_name}-{self.dataset}-time.csv",
    )

  def fcst_from_level_to_quantiles(
      self,
      fcst_df: pd.DataFrame,
      model_name: str,
  ) -> pd.DataFrame:
    fcst_df = fcst_df.copy()
    cols = ["unique_id", "ds", model_name]
    for q in self.quantiles:
      if q == 0.5:
        col = f"{model_name}"
      else:
        lv = int(100 - 200 * q)
        hi_or_lo = "lo" if lv > 0 else "hi"
        lv = abs(lv)
        col = f"{model_name}-{hi_or_lo}-{lv}"
      q_col = f"{model_name}-q-{q}"
      fcst_df[q_col] = fcst_df[col].values
      cols.append(q_col)
    return fcst_df[cols]

  def evaluate_models(self, models: List[str]) -> pd.DataFrame:
    fcsts_df = []
    times_df = []
    for model in models:
      fcst_method_df = pd.read_csv(
          f"{self.results_dir}/{model}-{self.dataset}-fcst.csv"
      ).set_index(["unique_id", "ds"])
      fcsts_df.append(fcst_method_df)
      time_method_df = pd.read_csv(
          f"{self.results_dir}/{model}-{self.dataset}-time.csv"
      )
      times_df.append(time_method_df)
    fcsts_df = pd.concat(fcsts_df, axis=1).reset_index()
    fcsts_df["ds"] = pd.to_datetime(fcsts_df["ds"])
    times_df = pd.concat(times_df)
    return self.evaluate_from_predictions(
        models=models, fcsts_df=fcsts_df, times_df=times_df
    )

  def evaluate_from_predictions(
      self, models: List[str], fcsts_df: pd.DataFrame, times_df: pd.DataFrame
  ) -> pd.DataFrame:
    test_df = self.test_df
    train_df = self.train_df
    test_df = test_df.merge(fcsts_df, how="left")
    assert test_df.isna().sum().sum() == 0, "merge contains nas"
    # point evaluation
    point_fcsts_cols = ["unique_id", "ds", "y"] + models
    test_df["unique_id"] = test_df["unique_id"].astype(str)
    train_df["unique_id"] = train_df["unique_id"].astype(str)
    mase_seas = partial(mase, seasonality=self.seasonality)
    eval_df = evaluate(
        test_df[point_fcsts_cols],
        train_df=train_df,
        metrics=[smape, mase_seas, mae],
    )
    # probabilistic evaluation
    eval_prob_df = []
    for q in self.quantiles:
      prob_cols = [f"{model}-q-{q}" for model in models]
      eval_q_df = quantile_loss(test_df, models=prob_cols, q=q)
      eval_q_df[prob_cols] = eval_q_df[prob_cols] * self.horizon
      eval_q_df = eval_q_df.rename(columns=dict(zip(prob_cols, models)))
      eval_q_df["metric"] = f"quantile-loss-{q}"
      eval_prob_df.append(eval_q_df)
    eval_prob_df = pd.concat(eval_prob_df)
    eval_prob_df = eval_prob_df.groupby("metric").sum().reset_index()
    total_y = test_df["y"].sum()
    eval_prob_df[models] = eval_prob_df[models] / total_y
    eval_prob_df["metric"] = "scaled_crps"
    eval_df = pd.concat([eval_df, eval_prob_df]).reset_index(drop=True)
    eval_df = eval_df.groupby("metric").mean(numeric_only=True).reset_index()
    eval_df = eval_df.melt(
        id_vars="metric", value_name="value", var_name="model"
    )
    times_df.insert(0, "metric", "time")
    times_df = times_df.rename(columns={"time": "value"})
    eval_df = pd.concat([eval_df, times_df])
    eval_df.insert(0, "dataset", self.dataset)
    eval_df = eval_df.sort_values(["dataset", "metric", "model"])
    eval_df = eval_df.reset_index(drop=True)
    return eval_df


if __name__ == "__main__":
  multiprocessing.set_start_method("spawn")

以 toursim 月预测来分析

相关数据可以查看我上传的资源

TimesFM 预测数据来源 TimesFM(时间序列基础模型)是由谷歌研究开发的一种预训练时间序列基础模型https://download.csdn.net/download/chenchihwen/90124776?spm=1001.2014.3001.5503这里已经将最原始的的 json 转换成 excel 格式

我们来看training 原始的 [data.json.gz] 内容如下:

通过 tourism_monthly 的 metadata.json

{"freq": "M", "target": null, "feat_static_cat": [{"name": "feat_static_cat_0", "cardinality": "366"}], "feat_static_real": [], "feat_dynamic_real": [], "feat_dynamic_cat": [], "prediction_length": 24}

代码转译后 excel 如下,

T1 标签最后的 training 资料 是1992/7/31

我们来看下预测的结果,与实际数据的比较,还行

|---------------|------------|--------|---|---|-----------|------------|-------------|
| unique_id | ds | y | | | unique_id | ds | timesfm |
| T1 | 1992/8/31 | 6611.1 | | | T1 | 1992/8/31 | 5975.3 |
| T1 | 1992/9/30 | 4150.2 | | | T1 | 1992/9/30 | 4250.7 |
| T1 | 1992/10/31 | 2841.0 | | | T1 | 1992/10/31 | 2843.6 |
| T1 | 1992/11/30 | 1813.4 | | | T1 | 1992/11/30 | 2144.1 |
| T1 | 1992/12/31 | 2261.1 | | | T1 | 1992/12/31 | 2206.6 |
| T1 | 1993/1/31 | 1873.6 | | | T1 | 1993/1/31 | 1862.6 |
| T1 | 1993/2/28 | 1772.8 | | | T1 | 1993/2/28 | 1961.5 |
| T1 | 1993/3/31 | 2049.6 | | | T1 | 1993/3/31 | 2007.9 |
| T1 | 1993/4/30 | 2932.3 | | | T1 | 1993/4/30 | 2531.2 |
| T1 | 1993/5/31 | 3113.3 | | | T1 | 1993/5/31 | 2908.7 |
| T1 | 1993/6/30 | 3461.5 | | | T1 | 1993/6/30 | 3474.2 |
| T1 | 1993/7/31 | 6265.7 | | | T1 | 1993/7/31 | 5924.7 |
| T1 | 1993/8/31 | 6857.8 | | | T1 | 1993/8/31 | 6098.7 |
| T1 | 1993/9/30 | 4346.1 | | | T1 | 1993/9/30 | 4134.4 |
| T1 | 1993/10/31 | 3154.7 | | | T1 | 1993/10/31 | 2737.1 |
| T1 | 1993/11/30 | 2142.2 | | | T1 | 1993/11/30 | 1909.0 |
| T1 | 1993/12/31 | 2375.7 | | | T1 | 1993/12/31 | 2115.6 |
| T1 | 1994/1/31 | 1981.1 | | | T1 | 1994/1/31 | 1823.7 |
| T1 | 1994/2/28 | 1959.9 | | | T1 | 1994/2/28 | 1850.2 |
| T1 | 1994/3/31 | 2466.3 | | | T1 | 1994/3/31 | 1935.1 |
| T1 | 1994/4/30 | 2851.7 | | | T1 | 1994/4/30 | 2440.0 |
| T1 | 1994/5/31 | 3671.8 | | | T1 | 1994/5/31 | 2753.3 |
| T1 | 1994/6/30 | 3806.8 | | | T1 | 1994/6/30 | 3497.1 |
| T1 | 1994/7/31 | 6995.0 | | | T1 | 1994/7/31 | 5773.2 |

实际跑出的预计数是长得这个样子, 会有 timesfm-q-0.1 : timesfm-q-0.9

是因为代码中设置了

在Python代码中,尤其是涉及到时间序列分析、预测等相关场景(从你之前提供的看起来和时间序列相关的代码链接推测),`quantiles` 通常有以下一些常见用途:

概率预测与不确定性量化方面

  1. **表示预测区间**:在很多预测任务中,模型给出的不只是一个单一的预测值(比如预测某个时间点的数值),而是一个预测区间来体现不确定性。`quantiles` 可以用来定义这些区间的边界,例如,常见的会使用 0.05、0.5、0.95 等不同分位数对应的数值来表示预测区间的下限、中位数、上限。像如果预测某商品未来销量,通过计算不同 `quantiles` 的值,能知道销量大概率(比如 90% 的置信区间,对应 0.05 和 0.95 分位数)会落在哪个范围,以及最有可能的中间值(0.5 分位数即中位数)是多少。

  2. **评估预测的不确定性**:在评估预测模型好坏时,除了看预测值与实际值的偏差(比如均方误差等指标衡量准确性),还需要考量预测的不确定性是否合理。通过比较模型输出的 `quantiles` 对应的预测区间和实际观测值落在该区间的比例等情况,可以判断模型对不确定性的把握能力。例如,如果模型声称某个 `quantiles` 对应的区间有 90% 的概率包含真实值,但实际多次验证下来真实值只有 50% 的情况落在该区间,那就说明模型对不确定性的估计可能不太准确。

timesfm forecast 是取中位数 也就是 timesfm-q-0.5

相关推荐
海阔天空_201318 分钟前
Python常用库介绍系列
开发语言·python·自动化
AI科研技术派19 分钟前
颠覆LSTM!贝叶斯优化+LSTM+时序预测=Nature子刊!
人工智能·rnn·lstm
m0_6760995843 分钟前
OpenCV图片矫正
人工智能·opencv·计算机视觉
无水先生1 小时前
掌握特征提取:机器学习中的 PCA、t-SNE 和 LDA模型
人工智能·机器学习
qwe3526331 小时前
open cv学习之图片添加水印
人工智能·学习·计算机视觉
dubochao_xinxi2 小时前
在 Termux 中安装 Docker
开发语言·数据库·python·qt
稀土君2 小时前
角逐20万奖金!这里有一份完整的豆包MarsCode AI编程挑战赛参赛指南!
前端·人工智能·豆包marscode
dubochao_xinxi2 小时前
E: 仓库目录 /var/cache/apt/archives/partial 确实。 - Acquire (2: 没有那个文件或目录)
开发语言·数据库·python·qt
液态不合群2 小时前
使用Python实现两组数据纵向排序
开发语言·python·算法
hu_wenjie2 小时前
使用pyinstaller打包flask项目
后端·python·flask