@@ -1390,7 +1390,7 @@ class LKJCholeskyCov:
13901390
13911391 Examples
13921392 --------
1393- .. code:: python
1393+ .. code-block :: python
13941394
13951395 with pm.Model() as model:
13961396 # Note that we access the distribution for the standard
@@ -1682,28 +1682,25 @@ class LKJCorr:
16821682
16831683 Examples
16841684 --------
1685- .. code:: python
1685+ .. code-block :: python
16861686
16871687 with pm.Model() as model:
1688-
16891688 # Define the vector of fixed standard deviations
1690- sds = 3* np.ones(10)
1689+ sds = 3 * np.ones(10)
16911690
1692- corr = pm.LKJCorr(
1693- 'corr', eta=4, n=10, return_matrix=True
1694- )
1691+ corr = pm.LKJCorr("corr", eta=4, n=10, return_matrix=True)
16951692
16961693 # Define a new MvNormal with the given correlation matrix
1697- vals = sds* pm.MvNormal(' vals' , mu=np.zeros(10), cov=corr, shape=10)
1694+ vals = sds * pm.MvNormal(" vals" , mu=np.zeros(10), cov=corr, shape=10)
16981695
16991696 # Or transform an uncorrelated normal distribution:
1700- vals_raw = pm.Normal(' vals_raw' , shape=10)
1697+ vals_raw = pm.Normal(" vals_raw" , shape=10)
17011698 chol = pt.linalg.cholesky(corr)
1702- vals = sds* pt.dot(chol,vals_raw)
1699+ vals = sds * pt.dot(chol, vals_raw)
17031700
17041701 # The matrix is internally still sampled as a upper triangular vector
17051702 # If you want access to it in matrix form in the trace, add
1706- pm.Deterministic(' corr_mat' , corr)
1703+ pm.Deterministic(" corr_mat" , corr)
17071704
17081705
17091706 References
@@ -1797,7 +1794,7 @@ class MatrixNormal(Continuous):
17971794 Define a matrixvariate normal variable for given row and column covariance
17981795 matrices.
17991796
1800- .. code:: python
1797+ .. code-block :: python
18011798
18021799 import pymc as pm
18031800 import numpy as np
@@ -1820,16 +1817,20 @@ class MatrixNormal(Continuous):
18201817 constant, both the covariance and scaling could be learned as follows
18211818 (see the docstring of `LKJCholeskyCov` for more information about this)
18221819
1823- .. code:: python
1820+ .. code-block :: python
18241821
18251822 # Setup data
1826- true_colcov = np.array([[1.0, 0.5, 0.1],
1827- [0.5, 1.0, 0.2],
1828- [0.1, 0.2, 1.0]])
1823+ true_colcov = np.array(
1824+ [
1825+ [1.0, 0.5, 0.1],
1826+ [0.5, 1.0, 0.2],
1827+ [0.1, 0.2, 1.0],
1828+ ]
1829+ )
18291830 m = 3
18301831 n = true_colcov.shape[0]
18311832 true_scale = 3
1832- true_rowcov = np.diag([true_scale**(2* i) for i in range(m)])
1833+ true_rowcov = np.diag([true_scale ** (2 * i) for i in range(m)])
18331834 mu = np.zeros((m, n))
18341835 true_kron = np.kron(true_rowcov, true_colcov)
18351836 data = np.random.multivariate_normal(mu.flatten(), true_kron)
@@ -1838,13 +1839,12 @@ class MatrixNormal(Continuous):
18381839 with pm.Model() as model:
18391840 # Setup right cholesky matrix
18401841 sd_dist = pm.HalfCauchy.dist(beta=2.5, shape=3)
1841- colchol,_, _ = pm.LKJCholeskyCov(' colchol' , n=3, eta=2,sd_dist=sd_dist)
1842+ colchol, _, _ = pm.LKJCholeskyCov(" colchol" , n=3, eta=2, sd_dist=sd_dist)
18421843 # Setup left covariance matrix
1843- scale = pm.LogNormal(' scale' , mu=np.log(true_scale), sigma=0.5)
1844- rowcov = pt.diag([scale**(2* i) for i in range(m)])
1844+ scale = pm.LogNormal(" scale" , mu=np.log(true_scale), sigma=0.5)
1845+ rowcov = pt.diag([scale ** (2 * i) for i in range(m)])
18451846
1846- vals = pm.MatrixNormal('vals', mu=mu, colchol=colchol, rowcov=rowcov,
1847- observed=data)
1847+ vals = pm.MatrixNormal("vals", mu=mu, colchol=colchol, rowcov=rowcov, observed=data)
18481848 """
18491849
18501850 rv_op = matrixnormal
@@ -2010,30 +2010,30 @@ class KroneckerNormal(Continuous):
20102010 Define a multivariate normal variable with a covariance
20112011 :math:`K = K_1 \otimes K_2`
20122012
2013- .. code:: python
2013+ .. code-block :: python
20142014
2015- K1 = np.array([[1., 0.5], [0.5, 2]])
2016- K2 = np.array([[1., 0.4, 0.2], [0.4, 2, 0.3], [0.2, 0.3, 1]])
2015+ K1 = np.array([[1.0 , 0.5], [0.5, 2]])
2016+ K2 = np.array([[1.0 , 0.4, 0.2], [0.4, 2, 0.3], [0.2, 0.3, 1]])
20172017 covs = [K1, K2]
20182018 N = 6
20192019 mu = np.zeros(N)
20202020 with pm.Model() as model:
2021- vals = pm.KroneckerNormal(' vals' , mu=mu, covs=covs, shape=N)
2021+ vals = pm.KroneckerNormal(" vals" , mu=mu, covs=covs, shape=N)
20222022
20232023 Efficiency gains are made by cholesky decomposing :math:`K_1` and
20242024 :math:`K_2` individually rather than the larger :math:`K` matrix. Although
20252025 only two matrices :math:`K_1` and :math:`K_2` are shown here, an arbitrary
20262026 number of submatrices can be combined in this way. Choleskys and
20272027 eigendecompositions can be provided instead
20282028
2029- .. code:: python
2029+ .. code-block :: python
20302030
20312031 chols = [np.linalg.cholesky(Ki) for Ki in covs]
20322032 evds = [np.linalg.eigh(Ki) for Ki in covs]
20332033 with pm.Model() as model:
2034- vals2 = pm.KroneckerNormal(' vals2' , mu=mu, chols=chols, shape=N)
2034+ vals2 = pm.KroneckerNormal(" vals2" , mu=mu, chols=chols, shape=N)
20352035 # or
2036- vals3 = pm.KroneckerNormal(' vals3' , mu=mu, evds=evds, shape=N)
2036+ vals3 = pm.KroneckerNormal(" vals3" , mu=mu, evds=evds, shape=N)
20372037
20382038 neither of which will be converted. Diagonal noise can also be added to
20392039 the covariance matrix, :math:`K = K_1 \otimes K_2 + \sigma^2 I_N`.
@@ -2042,13 +2042,13 @@ class KroneckerNormal(Continuous):
20422042 utilizing eigendecompositons of the submatrices behind the scenes [1].
20432043 Thus,
20442044
2045- .. code:: python
2045+ .. code-block :: python
20462046
20472047 sigma = 0.1
20482048 with pm.Model() as noise_model:
2049- vals = pm.KroneckerNormal(' vals' , mu=mu, covs=covs, sigma=sigma, shape=N)
2050- vals2 = pm.KroneckerNormal(' vals2' , mu=mu, chols=chols, sigma=sigma, shape=N)
2051- vals3 = pm.KroneckerNormal(' vals3' , mu=mu, evds=evds, sigma=sigma, shape=N)
2049+ vals = pm.KroneckerNormal(" vals" , mu=mu, covs=covs, sigma=sigma, shape=N)
2050+ vals2 = pm.KroneckerNormal(" vals2" , mu=mu, chols=chols, sigma=sigma, shape=N)
2051+ vals3 = pm.KroneckerNormal(" vals3" , mu=mu, evds=evds, sigma=sigma, shape=N)
20522052
20532053 are identical, with `covs` and `chols` each converted to
20542054 eigendecompositions.
0 commit comments