Skip to content

Commit

Permalink
Working GARCH model.
Browse files Browse the repository at this point in the history
  • Loading branch information
Sinbad-The-Sailor committed Aug 4, 2023
1 parent b429ded commit b822941
Show file tree
Hide file tree
Showing 9 changed files with 409 additions and 145 deletions.
39 changes: 38 additions & 1 deletion src2/autograd.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 3,
"metadata": {},
"outputs": [],
"source": [
Expand All @@ -98,6 +98,43 @@
" method=\"BFGS\",\n",
" jac=True) # NB: we will compute the jacobian"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
" fun: -72.71104166630812\n",
" hess_inv: array([[3.43587575e-06, 4.82986207e-12],\n",
" [4.82986207e-12, 9.07303824e-05]])\n",
" jac: array([2.63101469e-08, 3.41229494e-06])\n",
" message: 'Optimization terminated successfully.'\n",
" nfev: 21\n",
" nit: 9\n",
" njev: 21\n",
" status: 0\n",
" success: True\n",
" x: array([-0.02342842, 0.93132647])"
]
},
"execution_count": 4,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"res"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
Expand Down
8 changes: 5 additions & 3 deletions src2/models/ar.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@

from models.model import Model
from utils.config import MAXIMUM_AR_ORDER
from utils.exceptions import ParameterError, StationarityError
from utils.exceptions import StationarityError


class AR(Model):
Expand All @@ -32,7 +32,9 @@ class AR(Model):
* Model signifiance can be ascertained through regression like F-test.
REFERENCES:
*
* Time Series Analysis by Hamilton.
* PACF Statistical Cutoff:
http://sfb649.wiwi.hu-berlin.de/fedc_homepage/xplore/tutorials/xegbohtmlnode39.html
"""

def __init__(self, time_series: pd.Series):
Expand Down Expand Up @@ -69,7 +71,7 @@ def _log_likelihood(self) -> torch.Tensor:
variance = torch.square(self._sigma)
pi = torch.tensor(torch.pi)

return - (self._number_of_observations - self._order) * (torch.log(pi) + torch.log(variance)) - 1 / (2 * variance) * torch.sum(squared_difference)
return - (self._number_of_observations - self._order) / 2 * (torch.log(pi) + torch.log(variance)) - 1 / (2 * variance) * torch.sum(squared_difference)


def calibrate(self):
Expand Down
139 changes: 104 additions & 35 deletions src2/models/garch.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,47 +4,69 @@
import pandas as pd

from scipy.optimize import minimize
from torch.distributions import Normal

from models.model import Model
from utils.config import INITIAL_VARIANCE_GARCH_OBSERVATIONS, INITIAL_GARCH_PARAMETERS
from utils.exceptions import ParameterError

class GARCH(Model):

def __init__(self, time_series: pd.Series):
super().__init__(time_series)

@property
def parameters(self) -> tuple[float, 3]:
self._check_calibration()

optimal_parameters = self._optimal_parameters
alpha = optimal_parameters[0]
beta = optimal_parameters[1]
long_run_variance = self._long_run_variance

return alpha, beta, long_run_variance

@property
def _number_of_parameters(self) -> int:
return len(self.parameters)

@property
def _optimal_parameters(self) -> np.array:
return self._uncondition_parameters(parameters=torch.tensor(self._solution.x)).numpy()

@property
def _inital_solution(self) -> np.array:
return np.array(self._precondition_parameters(parameters=torch.tensor(INITIAL_GARCH_PARAMETERS)))

@property
def _log_likelihood(self) -> torch.tensor:
optimal_cost = torch.tensor(self._solution.fun)
pi = torch.tensor(torch.pi)
number_of_observations = torch.tensor(self._number_of_observations)
return -(1 / 2) * (number_of_observations * torch.log(2 * pi) + optimal_cost)

def calibrate(self):
self._initiate_parameters()
self._inital_solution = np.array(self._precondition_parameters(parameters=torch.tensor(INITIAL_GARCH_PARAMETERS)))
solution = minimize(self._cost_function,
self._inital_solution,
method="BFGS",
jac=True)
self._solution = solution
self._solve_maximum_likelihood()
self._sanity_check()
self._calibrated = True

def transform_to_true(self, uniform_sample: torch.Tensor) -> torch.Tensor:
...
self._check_calibration()

def transform_to_uniform(self):
...
self._check_calibration()

parameters = torch.Tensor(self._solution.x)
variance = self._compute_variance(parameters=parameters)
volatility = torch.sqrt(variance)
returns = self._data
residuals = returns / volatility

return Normal(0, 1).cdf(residuals)

def _compute_variance(self, parameters: torch.Tensor) -> torch.Tensor:
initial_variance = self._initial_variance
variance = np.zeros(self._number_of_observations)
mu_corr = torch.exp(-torch.exp(-parameters[0]))
mu_ewma = torch.exp(-torch.exp(-parameters[1]))

for i in range(self._number_of_observations):
if i == 0:
variance[i] = initial_variance
else:
variance[i] = self._long_run_variance + mu_corr * (mu_ewma * variance[i - 1]
+ (1 - mu_ewma) * self._squared_returns[i - 1]
- self._long_run_variance
)

return torch.tensor(variance)

def _cost_function(self, parameters: np.array) -> tuple[float, 2]:
"""
Expand All @@ -57,11 +79,22 @@ def _cost_function(self, parameters: np.array) -> tuple[float, 2]:
tuple(float, float): log loss value and the corresponding gradient.
"""
parameters = torch.tensor(parameters, requires_grad=True)
variance = self._compute_variance(parameters=parameters)
log_loss = torch.sum(torch.log(variance) + self._squared_returns / variance)
log_loss.backward()
print(log_loss, parameters.grad)
mu = torch.exp(-torch.exp(-parameters))
mu_corr = mu[0]
mu_ewma = mu[1]

log_loss = torch.tensor(0.0)
for i in range(self._number_of_observations):
if i == 0:
variance = self._initial_variance
else:
variance = self._long_run_variance + mu_corr * (mu_ewma * variance
+ (1 - mu_ewma) * self._squared_returns[i - 1].detach()
- self._long_run_variance
)
log_loss = log_loss + torch.log(variance) + self._squared_returns[i].detach() / variance

log_loss.backward()
return log_loss.data.cpu().numpy(), parameters.grad.data.cpu().numpy()

def _initiate_parameters(self):
Expand All @@ -70,18 +103,54 @@ def _initiate_parameters(self):
self._initial_variance = self._compute_inital_variance()
self._long_run_variance = torch.square(torch.std(self._data))

def _solve_maximum_likelihood(self):
solution = minimize(self._cost_function,
self._inital_solution,
method="L-BFGS-B",
jac=True)
self._solution = solution

def _compute_inital_variance(self) -> torch.Tensor:
if self._number_of_observations > INITIAL_VARIANCE_GARCH_OBSERVATIONS:
return torch.square(torch.std(self._data[:INITIAL_VARIANCE_GARCH_OBSERVATIONS]))
return self._initial_squared_returns

@property
def _number_of_parameters(self):
return super()._number_of_parameters
def _compute_variance(self, parameters: torch.Tensor) -> torch.Tensor:
initial_variance = self._initial_variance
variance = torch.zeros(self._number_of_observations)
mu_corr = torch.exp(-torch.exp(-parameters[0]))
mu_ewma = torch.exp(-torch.exp(-parameters[1]))

@property
def _log_likelihood(self):
return super()._log_likelihood
for i in range(self._number_of_observations):
if i == 0:
variance[i] = initial_variance
else:
variance[i] = self._long_run_variance + mu_corr * (mu_ewma * variance[i - 1]
+ (1 - mu_ewma) * self._squared_returns[i - 1]
- self._long_run_variance
)
return variance

def _sanity_check(self):
parameter_check = self._check_parameters()
solution_check = self._check_solution()

if not parameter_check:
# log.
...

if not solution_check:
# log.
...

if not parameter_check or not solution_check:
raise ParameterError("Parameters could not be asceratined succesfully.")

def _check_parameters(self) -> bool:
return self._solution.success

def _check_solution(self) -> bool:
return np.sum(self._optimal_parameters) < 1

@staticmethod
def _precondition_parameters(parameters: torch.Tensor) -> torch.Tensor:
Expand All @@ -103,7 +172,7 @@ def _precondition_parameters(parameters: torch.Tensor) -> torch.Tensor:
return torch.tensor([z_corr, z_ewma])

@staticmethod
def _uncondition_parameters(params: torch.Tensor) -> torch.Tensor:
def _uncondition_parameters(parameters: torch.Tensor) -> torch.Tensor:
"""
Unconditioning to obtain more original parameters from transformed parameters.
Expand All @@ -113,8 +182,8 @@ def _uncondition_parameters(params: torch.Tensor) -> torch.Tensor:
Returns:
torch.Tensor: GARCH parameters.
"""
mu_corr = torch.exp(-torch.exp(-params[0]))
mu_ewma = torch.exp(-torch.exp(-params[1]))
mu_corr = torch.exp(-torch.exp(-parameters[0]))
mu_ewma = torch.exp(-torch.exp(-parameters[1]))

alpha = mu_corr * (1 - mu_ewma)
beta = mu_corr * mu_ewma
Expand Down
2 changes: 1 addition & 1 deletion src2/models/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -80,10 +80,10 @@ def bic(self) -> torch.Tensor:
log_likelihood = self._log_likelihood
number_of_parameters = torch.tensor(self._number_of_parameters)
number_of_observations = torch.tensor(self._number_of_observations)
pi = torch.tensor(torch.pi)

return number_of_parameters * torch.log(number_of_observations) - 2 * log_likelihood


def _check_calibration(self):
"""
Checks if successful calibration has been made.
Expand Down
11 changes: 0 additions & 11 deletions src2/models/model_factory.py

This file was deleted.

35 changes: 12 additions & 23 deletions src2/run.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -2,22 +2,9 @@
"cells": [
{
"cell_type": "code",
"execution_count": 22,
"execution_count": 1,
"metadata": {},
"outputs": [
{
"ename": "ImportError",
"evalue": "cannot import name 'INITIAL_VARIANCE_GARCH_OBSERVATIONS' from 'utils.config' (/Users/axelnilsson/Desktop/Abacus/src2/utils/config.py)",
"output_type": "error",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mImportError\u001b[0m Traceback (most recent call last)",
"\u001b[0;32m/var/folders/d7/rpl_88f12ln2z1s2h4m9ybjm0000gn/T/ipykernel_43843/1386310877.py\u001b[0m in \u001b[0;36m<cell line: 11>\u001b[0;34m()\u001b[0m\n\u001b[1;32m 9\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mutils\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdata_handler\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mYahooDataHandler\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 10\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mmodels\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mar\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mAR\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 11\u001b[0;31m \u001b[0;32mfrom\u001b[0m \u001b[0mmodels\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mgarch\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mGARCH\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 12\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 13\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m~/Desktop/Abacus/src2/models/garch.py\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[1;32m 6\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 7\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mmodels\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmodel\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mModel\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 8\u001b[0;31m \u001b[0;32mfrom\u001b[0m \u001b[0mutils\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mconfig\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mINITIAL_VARIANCE_GARCH_OBSERVATIONS\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mINITIAL_GARCH_PARAMETERS\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 9\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 10\u001b[0m \u001b[0;32mclass\u001b[0m \u001b[0mGARCH\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mModel\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;31mImportError\u001b[0m: cannot import name 'INITIAL_VARIANCE_GARCH_OBSERVATIONS' from 'utils.config' (/Users/axelnilsson/Desktop/Abacus/src2/utils/config.py)"
]
}
],
"outputs": [],
"source": [
"import torch\n",
"import pandas as pd\n",
Expand All @@ -36,14 +23,9 @@
"plt.style.use(['science', 'notebook', 'grid'])"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": []
},
{
"cell_type": "code",
"execution_count": 5,
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
Expand All @@ -52,7 +34,7 @@
},
{
"cell_type": "code",
"execution_count": 6,
"execution_count": 3,
"metadata": {},
"outputs": [
{
Expand Down Expand Up @@ -91,7 +73,7 @@
},
{
"cell_type": "code",
"execution_count": 7,
"execution_count": 4,
"metadata": {},
"outputs": [],
"source": [
Expand All @@ -100,6 +82,13 @@
" data = risk_factor.price_history.log_returns"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": 8,
Expand Down
Loading

0 comments on commit b822941

Please sign in to comment.