|
7 | 7 |
|
8 | 8 | from arch.compat.python import add_metaclass, range
|
9 | 9 |
|
10 |
| -from abc import abstractmethod |
11 | 10 | import itertools
|
| 11 | +from abc import abstractmethod |
12 | 12 | from warnings import warn
|
13 | 13 |
|
14 | 14 | import numpy as np
|
@@ -948,8 +948,7 @@ def starting_values(self, resids):
|
948 | 948 | if q > 0:
|
949 | 949 | sv[1 + p + o:1 + p + o + q] = agb / q
|
950 | 950 | svs.append(sv)
|
951 |
| - llfs[i] = self._gaussian_loglikelihood(sv, resids, backcast, |
952 |
| - var_bounds) |
| 951 | + llfs[i] = self._gaussian_loglikelihood(sv, resids, backcast, var_bounds) |
953 | 952 | loc = np.argmax(llfs)
|
954 | 953 |
|
955 | 954 | return svs[int(loc)]
|
@@ -2702,3 +2701,163 @@ def _simulation_forecast(self, parameters, resids, backcast, var_bounds, start,
|
2702 | 2701 |
|
2703 | 2702 | forecasts[:start] = np.nan
|
2704 | 2703 | return VarianceForecast(forecasts, paths, shocks)
|
| 2704 | + |
| 2705 | + |
| 2706 | +class VarianceTargetingGARCH(GARCH): |
| 2707 | + r""" |
| 2708 | + GARCH and related model estimation |
| 2709 | +
|
| 2710 | + The following models can be specified using GARCH: |
| 2711 | + * ARCH(p) |
| 2712 | + * GARCH(p,q) |
| 2713 | + * GJR-GARCH(p,o,q) |
| 2714 | + * AVARCH(p) |
| 2715 | + * AVGARCH(p,q) |
| 2716 | + * TARCH(p,o,q) |
| 2717 | + * Models with arbitrary, pre-specified powers |
| 2718 | +
|
| 2719 | + Parameters |
| 2720 | + ---------- |
| 2721 | + p : int |
| 2722 | + Order of the symmetric innovation |
| 2723 | + o : int |
| 2724 | + Order of the asymmetric innovation |
| 2725 | + q: int |
| 2726 | + Order of the lagged (transformed) conditional variance |
| 2727 | +
|
| 2728 | + Attributes |
| 2729 | + ---------- |
| 2730 | + num_params : int |
| 2731 | + The number of parameters in the model |
| 2732 | +
|
| 2733 | + Examples |
| 2734 | + -------- |
| 2735 | + >>> from arch.univariate import VarianceTargetingGARCH |
| 2736 | +
|
| 2737 | + Standard GARCH(1,1) with targeting |
| 2738 | +
|
| 2739 | + >>> vt = VarianceTargetingGARCH(p=1, q=1) |
| 2740 | +
|
| 2741 | + Asymmetric GJR-GARCH process with targeting |
| 2742 | +
|
| 2743 | + >>> vt = VarianceTargetingGARCH(p=1, o=1, q=1) |
| 2744 | +
|
| 2745 | + Notes |
| 2746 | + ----- |
| 2747 | + In this class of processes, the variance dynamics are |
| 2748 | +
|
| 2749 | + .. math:: |
| 2750 | +
|
| 2751 | + \sigma_{t}^{\lambda}= |
| 2752 | + bar{\omega}(1-\sum_{i=1}^{p}\alpha_{i} |
| 2753 | + - \frac{1}{2}\sum_{j=1}^{o}\gamma_{j} |
| 2754 | + - \sum_{k=1}^{q}\beta_{k}) |
| 2755 | + + \sum_{i=1}^{p}\alpha_{i}\left|\epsilon_{t-i}\right|^{\lambda} |
| 2756 | + +\sum_{j=1}^{o}\gamma_{j}\left|\epsilon_{t-j}\right|^{\lambda} |
| 2757 | + I\left[\epsilon_{t-j}<0\right]+\sum_{k=1}^{q}\beta_{k}\sigma_{t-k}^{\lambda} |
| 2758 | + """ |
| 2759 | + |
| 2760 | + def __init__(self, p=1, o=0, q=1): |
| 2761 | + super(VarianceTargetingGARCH, self).__init__() |
| 2762 | + self.p = int(p) |
| 2763 | + self.o = int(o) |
| 2764 | + self.q = int(q) |
| 2765 | + self.num_params = p + o + q |
| 2766 | + if p < 0 or o < 0 or q < 0: |
| 2767 | + raise ValueError('All lags lengths must be non-negative') |
| 2768 | + if p == 0 and o == 0: |
| 2769 | + raise ValueError('One of p or o must be strictly positive') |
| 2770 | + self.name = 'Variance Targeting ' + self._name() |
| 2771 | + |
| 2772 | + def bounds(self, resids): |
| 2773 | + bounds = super(VarianceTargetingGARCH, self).bounds(resids) |
| 2774 | + return bounds[1:] |
| 2775 | + |
| 2776 | + def constraints(self): |
| 2777 | + a, b = super(VarianceTargetingGARCH, self).constraints() |
| 2778 | + a = a[1:, 1:] |
| 2779 | + b = b[1:] |
| 2780 | + return a, b |
| 2781 | + |
| 2782 | + def compute_variance(self, parameters, resids, sigma2, backcast, |
| 2783 | + var_bounds): |
| 2784 | + |
| 2785 | + # Add target |
| 2786 | + target = (resids ** 2).mean() |
| 2787 | + abar = parameters[:self.p].sum() |
| 2788 | + gbar = parameters[self.p:self.p + self.o].sum() |
| 2789 | + bbar = parameters[self.p + self.o:].sum() |
| 2790 | + omega = target * (1 - abar - 0.5 * gbar - bbar) |
| 2791 | + omega = max(omega, np.finfo(np.double).eps) |
| 2792 | + parameters = np.r_[omega, parameters] |
| 2793 | + |
| 2794 | + fresids = np.abs(resids) ** 2.0 |
| 2795 | + sresids = np.sign(resids) |
| 2796 | + |
| 2797 | + p, o, q = self.p, self.o, self.q |
| 2798 | + nobs = resids.shape[0] |
| 2799 | + |
| 2800 | + garch_recursion(parameters, fresids, sresids, sigma2, p, o, q, nobs, |
| 2801 | + backcast, var_bounds) |
| 2802 | + return sigma2 |
| 2803 | + |
| 2804 | + def simulate(self, parameters, nobs, rng, burn=500, initial_value=None): |
| 2805 | + if initial_value is None: |
| 2806 | + initial_value = parameters[0] |
| 2807 | + |
| 2808 | + parameters = self._targeting_to_stangard_garch(parameters) |
| 2809 | + return super(VarianceTargetingGARCH, self).simulate(parameters, nobs, rng, burn=burn, |
| 2810 | + initial_value=initial_value) |
| 2811 | + |
| 2812 | + def _targeting_to_stangard_garch(self, parameters): |
| 2813 | + p, o = self.p, self.o |
| 2814 | + abar = parameters[:p].sum() |
| 2815 | + gbar = parameters[p:p + o].sum() |
| 2816 | + bbar = parameters[p + o:].sum() |
| 2817 | + const = parameters[0](1 - abar - 0.5 * gbar - bbar) |
| 2818 | + return np.r_[const, parameters] |
| 2819 | + |
| 2820 | + def parameter_names(self): |
| 2821 | + return _common_names(self.p, self.o, self.q)[1:] |
| 2822 | + |
| 2823 | + def _analytic_forecast(self, parameters, resids, backcast, var_bounds, start, horizon): |
| 2824 | + parameters = self._targeting_to_stangard_garch(parameters) |
| 2825 | + return super(VarianceTargetingGARCH, self)._analytic_forecast(parameters, resids, |
| 2826 | + backcast, var_bounds, |
| 2827 | + start, horizon) |
| 2828 | + |
| 2829 | + def _simulation_forecast(self, parameters, resids, backcast, var_bounds, start, horizon, |
| 2830 | + simulations, rng): |
| 2831 | + parameters = self._targeting_to_stangard_garch(parameters) |
| 2832 | + return super(VarianceTargetingGARCH, self)._simulation_forecast(parameters, resids, |
| 2833 | + backcast, var_bounds, |
| 2834 | + start, horizon, |
| 2835 | + simulations, rng) |
| 2836 | + |
| 2837 | + def starting_values(self, resids): |
| 2838 | + p, o, q = self.p, self.o, self.q |
| 2839 | + alphas = [.01, .05, .1, .2] |
| 2840 | + gammas = alphas |
| 2841 | + abg = [.5, .7, .9, .98] |
| 2842 | + abgs = list(itertools.product(*[alphas, gammas, abg])) |
| 2843 | + |
| 2844 | + svs = [] |
| 2845 | + var_bounds = self.variance_bounds(resids) |
| 2846 | + backcast = self.backcast(resids) |
| 2847 | + llfs = np.zeros(len(abgs)) |
| 2848 | + for i, values in enumerate(abgs): |
| 2849 | + alpha, gamma, agb = values |
| 2850 | + sv = np.ones(p + o + q) |
| 2851 | + if p > 0: |
| 2852 | + sv[:p] = alpha / p |
| 2853 | + agb -= alpha |
| 2854 | + if o > 0: |
| 2855 | + sv[p: p + o] = gamma / o |
| 2856 | + agb -= gamma / 2.0 |
| 2857 | + if q > 0: |
| 2858 | + sv[p + o:] = agb / q |
| 2859 | + svs.append(sv) |
| 2860 | + llfs[i] = self._gaussian_loglikelihood(sv, resids, backcast, var_bounds) |
| 2861 | + loc = np.argmax(llfs) |
| 2862 | + |
| 2863 | + return svs[int(loc)] |
0 commit comments