---
title: NumPy Evaluation
keywords: fastai
sidebar: home_sidebar
summary: "The most important evaluation signal is the forecast error, which is the difference between the observed value $y_{\\tau}$ and the prediction $\\hat{y}_{\\tau}$, at time $\\tau$: $$e_{\\tau} = y_{\\tau}-\\hat{y}_{\\tau} \\qquad \\qquad \\tau \\in \\{t+1,\\dots,t+H \\}.$$The forecast accuracy summarizes the forecast errors in different metrics:
1. Scale-dependent errors - These metrics are on the same scale as the data.
import unittest
import torch as t
import numpy as np
from neuralforecast.losses.pytorch import (
MAELoss, MSELoss, RMSELoss, # unscaled errors
MAPELoss, SMAPELoss, # percentage errors
MASELoss, RMAELoss, # scaled error
QuantileLoss, MQLoss # probabilistic errors
)
from neuralforecast.losses.numpy import (
mae, mse, rmse, # unscaled errors
mape, smape, # percentage errors
mase, # scaled error
quantile_loss, mqloss # probabilistic errors
)
class TestLoss(unittest.TestCase):
def setUp(self):
self.num_quantiles = np.random.randint(3, 10)
self.first_num = np.random.randint(1, 300)
self.second_num = np.random.randint(1, 300)
self.y = t.rand(self.first_num, self.second_num)
self.y_hat = t.rand(self.first_num, self.second_num)
self.y_hat2 = t.rand(self.first_num, self.second_num)
self.y_hat_quantile = t.rand(self.first_num, self.second_num, self.num_quantiles)
self.quantiles = t.rand(self.num_quantiles)
self.q_float = np.random.random_sample()
def test_mae(self):
mae_numpy = mae(self.y, self.y_hat)
mae_pytorch = MAELoss(self.y, self.y_hat).numpy()
self.assertAlmostEqual(mae_numpy, mae_pytorch, places=6)
def test_mse(self):
mse_numpy = mse(self.y, self.y_hat)
mse_pytorch = MSELoss(self.y, self.y_hat).numpy()
self.assertAlmostEqual(mse_numpy, mse_pytorch, places=6)
def test_rmse(self):
rmse_numpy = rmse(self.y, self.y_hat)
rmse_pytorch = RMSELoss(self.y, self.y_hat).numpy()
self.assertAlmostEqual(rmse_numpy, rmse_pytorch, places=6)
def test_mape(self):
mape_numpy = mae(self.y, self.y_hat)
mape_pytorch = MAELoss(self.y, self.y_hat).numpy()
self.assertAlmostEqual(mape_numpy, mape_pytorch, places=6)
def test_smape(self):
smape_numpy = smape(self.y, self.y_hat)
smape_pytorch = SMAPELoss(self.y, self.y_hat).numpy()
self.assertAlmostEqual(smape_numpy, smape_pytorch, places=4)
def test_mase(self):
y_insample = t.rand(self.first_num, self.second_num)
seasonality = 24
# Hourly 24, Daily 7, Weekly 52
# Monthly 12, Quarterly 4, Yearly 1
mase_numpy = mase(self.y, self.y_hat, y_insample, seasonality)
mase_pytorch = MASELoss(self.y, self.y_hat, y_insample, seasonality).numpy()
self.assertAlmostEqual(mase_numpy, mase_pytorch, places=2)
def test_rmae(self):
rmae_numpy = rmae(self.y, self.y_hat, self.y_hat2)
rmae_pytorch = RMAELoss(self.y, self.y_hat, self.y_hat2).numpy()
self.assertAlmostEqual(rmae_numpy, rmae_pytorch, places=4)
def test_quantile(self):
quantile_numpy = quantile_loss(self.y, self.y_hat, q = self.q_float)
quantile_pytorch = QuantileLoss(self.y, self.y_hat, q = self.q_float).numpy()
self.assertAlmostEqual(quantile_numpy, quantile_pytorch,
places=6)
def test_mqloss(self):
weights = np.ones_like(self.y)
mql_np_w = mqloss(self.y, self.y_hat_quantile, self.quantiles, weights=weights)
mql_np_default_w = mqloss(self.y, self.y_hat_quantile, self.quantiles)
mql_py_w = MQLoss(y=self.y,
y_hat=self.y_hat_quantile,
quantiles=self.quantiles,
mask=t.Tensor(weights)).numpy()
mql_py_default_w = MQLoss(y=self.y,
y_hat=self.y_hat_quantile,
quantiles=self.quantiles).numpy()
weights[0,:] = 0
mql_np_new_w = mqloss(self.y, self.y_hat_quantile, self.quantiles, weights=weights)
mql_py_new_w = MQLoss(y=self.y,
y_hat=self.y_hat_quantile,
quantiles=self.quantiles,
mask=t.Tensor(weights)).numpy()
self.assertAlmostEqual(mql_np_w, mql_np_default_w)
self.assertAlmostEqual(mql_py_w, mql_py_default_w)
self.assertAlmostEqual(mql_np_new_w, mql_py_new_w)
unittest.main(argv=[''], verbosity=2, exit=False)