by N. Desassis and D. Renard
%%javascript
IPython.OutputArea.prototype._should_scroll = function(lines) {
return false;
}
The aim of this tutorial is to show how to use gstlearn to simulate the solution of
$$(\kappa^2-\Delta_{\mathcal{S}_R})^{\alpha/2}Z = \sigma\mathcal{W}$$on the sphere $\mathcal{S}_R$ of radius $R$.
$\Delta_{{\mathcal{S}_R}}$ is the Laplace-Beltrami operator, i.e, it acts on each point of the sphere as the usual Laplacian on the tangent plane at this point.
$\kappa$ is the inverse of the scale parameter
$\alpha \geq 2$ is an integer describing the smoothness of the solution.
$\mathcal{W}$ is a Gaussian white-noise suitably normalized such as $\sigma^2$ is the variance of the solution.
In this notebook, we will define the covariance of Matérn on the sphere, as the covariance of the solution of this SPDE (other extensions of the Matérn function are possible). By analogy with the Euclidian case, its smoothness parameter will be defined by $\nu = \alpha -1$. To compute the covariance function with respect on the geodetic distance, one have to use a decomposition on the Legendre polynomial (see below).
We also treat the more general case $$P^{1/2}(-\Delta_{\mathcal{S}_R})Z = \sigma\mathcal{W}$$
where $P$ is a polynom positive on $\mathbb{R}^+$
import gstlearn as gl
import numpy as np
# Plot packages
import gstlearn.plot as gp
import gstlearn.plot3D as gop
import matplotlib.pyplot as plt
import plotly.graph_objects as go
import IPython
# Markov (if False, Matérn covariance will be used)
Markov = True
#Sphere radius
R = gl.EARTH_RADIUS
gl.defineDefaultSpace(gl.ESpaceType.SN,param=R)
#Scale parameter (for convenience, it is defined as the proportion of the radius)
ratioRange = 0.2
scale = R * ratioRange
# sill
sill = 2.
# Smoothness parameter (for Matérn case)
nu = 2
# Markov coefficients (for Markov case)
coeffs = [1,-1,.5]
mesh = gl.MeshSphericalExt()
err = mesh.resetFromDb(None,None,triswitch = "-r5",verbose=False)
Sampling Db creation
nsample = 4000
#sub-sampling to reduce computational burden
np.random.seed(123)
ind = np.random.choice(mesh.getNApices(),size=nsample,replace=False)
#Creation of the db
X = mesh.getCoordinates(0)
Y = mesh.getCoordinates(1)
dbdat = gl.Db.create()
dbdat["x"] = np.array(X)[ind]
dbdat["y"] = np.array(Y)[ind]
dbdat.setLocators(["x","y"],gl.ELoc.X)
The covariance model is Markov or Matérn.
if Markov :
model = gl.Model.createFromParam(type=gl.ECov.MARKOV,
range = scale,
sill = sill,
flagRange= False)
model.getCova(0).setMarkovCoeffs(coeffs)
else :
model = gl.Model.createFromParam(type=gl.ECov.BESSEL_K,
range = scale,
sill = sill,
param=nu,
flagRange= False)
Q = gl.PrecisionOp(mesh,model)
result = np.array(Q.simulate()[0])
surface = gop.SurfaceOnMesh(mesh, result,opacity=1)
fig = go.Figure(data=[ surface ])
fig.update_scenes(xaxis_visible=False, yaxis_visible=False,zaxis_visible=False )
f = fig.show()
We use the fact that $\Sigma = Q^{-1}$ and solve $Qx = e_j$ for an arbitrary index $j$.
Get the distances
ind0 = 12
distances = np.array(mesh.getDistances(ind0))
Compute the covariances
covDiscr = np.array(Q.evalCov(ind0))
Sort for the plot
covDiscrClose = covDiscr[np.argsort(distances)]
deltaLong = np.sort(distances)
Display the result
plt.plot(deltaLong,covDiscrClose,"--",label = "Discretized covariance")
ax = plt.legend()
The empirical variogram is computed by using the great-circle distance.
npas = 50 # number of discretization points
dpas = 0.04 # lag with respect to the unit sphere (it will be multiplied
# by R in the creation of the VarioParam.
dbdat["simu"] = np.array(result)[ind]
dbdat.setLocators(["simu"],gl.ELoc.Z)
#Variogram
vp = gl.VarioParam.createOmniDirection(npas=npas,dpas=dpas * R)
vario = gl.Vario.create(vp,dbdat)
ax = vario.compute(gl.ECalcVario.VARIOGRAM)
#vario.display()
ax = vario.plot(label = "Empirical Variogram")
ax = plt.legend()
The covariance between two points with great-circle distance $d$ on the sphere of radius $R$ is given by $$C(d) = \frac{\sigma^2}{\sum_{i=0}^\infty f(i)}\sum_{i=0}^\infty f(i) P_i\left(\cos \frac{d}{R}\right)$$
where the $P_i$'s are the Legendre polynomials computed with the following reccurence formula
$$P_0(x) = 1.$$$$P_1(x) = x$$$$P_{n+1}(x)=\frac{(2n+1)xP_n(x) - n P_{n-1}(x)}{n+1}$$For $n\geq 0$, $$f(n) = \frac{2 (n + 1)}{ (R^2\kappa^2 + n ( n + 1))^\alpha}$$
For numerical computations, the sums are truncated at N.
For more details on the covariances on sphere, see Lantuejoul, Freulon and Renard (2019)
ndisc = 100 # number of discretization steps for the covariance
N = 20 # size of the decomposition
h = np.linspace(0,np.max(deltaLong),ndisc)
ax = vario.plot(label = "Empirical Variogram")
a = model.getCova(0)
uu = np.array([a.evalCovOnSphere(i,N) for i in h]) # modif dR
ax = plt.plot(h, sill - uu,label = "Theoretical Variogram")
plt.plot(deltaLong,covDiscrClose[0] - covDiscrClose,"--",label = "Discretized model")
ax = plt.legend()
There is a slight difference between the theoretical variogram and the one obtained from the SPDE discretization due to a numerical error on the variance introduced by the discretization. The comparison of the covariance shows that this numerical error is rather small :
h = np.linspace(0,np.max(deltaLong),ndisc)
vario = gl.Vario.create(vp,dbdat)
ax = vario.compute(gl.ECalcVario.COVARIANCE)
#ax = gp.variogram(vario,label = "Empirical Covariance")
ax = plt.plot(h, uu,label = "Theoretical Covariance")
plt.plot(deltaLong,covDiscrClose,"--",label = "Discretized model")
ax = plt.legend()
plt.show()
h = np.linspace(0,np.max(deltaLong),ndisc)
#vario = gl.Vario.create(vp,dbdat)
#ax = vario.compute(gl.ECalcVario.COVARIANCE)
#ax = gp.variogram(vario,label = "Empirical Covariance")
#ax = plt.plot(h, uu,label = "Theoretical Covariance")
plt.plot(deltaLong,covDiscrClose,"--",label = "Discretized model")
ax = plt.legend()
plt.show()
Kriging
#TODO : optimize
#proj = gl.ProjMatrix(dbdat, mesh)