SPDE simulation on a sphere¶
import gstlearn as gl
import gstlearn.plot as gp
import gstlearn.document as gdoc
import gstlearn.plot3D as gop
import numpy as np
import matplotlib.pyplot as plt
import plotly.graph_objects as go
import IPython
from IPython.display import Markdown
gdoc.setNoScroll()
Markdown(gdoc.loadDoc("SPDE_Sphere.md"))
SPDE on the Sphere
The aim of this tutorial is to show how to use gstlearn to simulate the solution of
$$(\kappa^2-\Delta_{\mathcal{S}_R})^{\alpha/2}Z = \sigma\mathcal{W}$$
on the sphere $\mathcal{S}_R$ of radius $R$.
$\Delta_{{\mathcal{S}_R}}$ is the Laplace-Beltrami operator, i.e, it acts on each point of the sphere as the usual Laplacian on the tangent plane at this point.
$\kappa$ is the inverse of the scale parameter
$\alpha \geq 2$ is an integer describing the smoothness of the solution.
$\mathcal{W}$ is a Gaussian white-noise suitably normalized such as $\sigma^2$ is the variance of the solution.
In this notebook, we will define the covariance of Matérn on the sphere, as the covariance of the solution of this SPDE (other extensions of the Matérn function are possible). By analogy with the Euclidian case, its smoothness parameter will be defined by $\nu = \alpha -1$. To compute the covariance function with respect on the geodetic distance, one have to use a decomposition on the Legendre polynomial (see below).
We also treat the more general case $$P^{1/2}(-\Delta_{\mathcal{S}_R})Z = \sigma\mathcal{W}$$
where $P$ is a polynom positive on $\mathbb{R}^+$
# Markov (if False, Matérn covariance will be used)
Markov = True
Parametrization¶
#Sphere radius
R = gl.EARTH_RADIUS
gl.defineDefaultSpace(gl.ESpaceType.SN,param=R)
#Scale parameter (for convenience, it is defined as the proportion of the radius)
ratioRange = 0.2
scale = R * ratioRange
# sill
sill = 2.
# Smoothness parameter (for Matérn case)
nu = 2
# Markov coefficients (for Markov case)
coeffs = [1,-1,.5]
Meshing¶
mesh = gl.MeshSphericalExt()
err = mesh.resetFromDb(None,None,triswitch = "-r5",verbose=False)
Sampling Db creation
nsample = 4000
#sub-sampling to reduce computational burden
np.random.seed(123)
ind = np.random.choice(mesh.getNApices(),size=nsample,replace=False)
#Creation of the db
X = mesh.getCoordinates(0)
Y = mesh.getCoordinates(1)
dbdat = gl.Db.create()
dbdat["x"] = np.array(X)[ind]
dbdat["y"] = np.array(Y)[ind]
dbdat.setLocators(["x","y"],gl.ELoc.X)
Covariance model¶
The covariance model is Markov or Matérn.
if Markov :
model = gl.Model.createFromParam(type=gl.ECov.MARKOV,
range = scale,
sill = sill,
flagRange= False)
model.setMarkovCoeffs(0, coeffs)
else :
model = gl.Model.createFromParam(type=gl.ECov.MATERN,
range = scale,
sill = sill,
param=nu,
flagRange= False)
Precision matrix¶
Q = gl.PrecisionOp(mesh,model.getCova(0))
Simulation¶
result = np.array(Q.simulateOne())
Display the realization¶
surface = gop.SurfaceOnMesh(mesh, result,opacity=1)
fig = go.Figure(data=[ surface ])
fig.update_scenes(xaxis_visible=False, yaxis_visible=False,zaxis_visible=False )
f = fig.show()
Compute covariance (of discretized solution)¶
We use the fact that $\Sigma = Q^{-1}$ and solve $Qx = e_j$ for an arbitrary index $j$.
Get the distances
ind0 = 12
distances = np.array(mesh.getDistances(ind0))
Compute the covariances
covDiscr = np.array(Q.evalCov(ind0))
Sort for the plot
covDiscrClose = covDiscr[np.argsort(distances)]
deltaLong = np.sort(distances)
print(f"Discretized Covariance = {round(covDiscrClose[0],4)}")
Discretized Covariance = 2.0393
Display the result
plt.plot(deltaLong,covDiscrClose,"--",label = "Discretized covariance")
ax = plt.legend()
print(f"Discretized variance = {round(covDiscrClose[0],4)}")
Discretized variance = 2.0393
Variogram of the realization¶
The empirical variogram is computed by using the great-circle distance.
npas = 50 # number of discretization points
dpas = 0.04 # lag with respect to the unit sphere (it will be multiplied
# by R in the creation of the VarioParam.
dbdat["simu"] = np.array(result)[ind]
dbdat.setLocators(["simu"],gl.ELoc.Z)
#Variogram
vp = gl.VarioParam.createOmniDirection(npas=npas,dpas=dpas * R)
vario = gl.Vario.create(vp)
ax = vario.compute(dbdat,gl.ECalcVario.VARIOGRAM)
#vario.display()
ax = vario.plot(label = "Empirical Variogram")
ax = plt.legend()
Theoretical covariance function¶
Markdown(gdoc.loadDoc("Covariance_Sphere.md"))
Covariance on the Sphere
The covariance between two points with great-circle distance $d$ on the sphere of radius $R$ is given by $$C(d) = \frac{\sigma^2}{\sum_{i=0}^\infty f(i)}\sum_{i=0}^\infty f(i) P_i\left(\cos \frac{d}{R}\right)$$
where the $P_i$'s are the Legendre polynomials computed with the following reccurence formula
$$P_0(x) = 1.$$
$$P_1(x) = x$$
$$P_{n+1}(x)=\frac{(2n+1)xP_n(x) - n P_{n-1}(x)}{n+1}$$
For $n\geq 0$, $$f(n) = \frac{2n+1}{ (R^2\kappa^2 + n ( n + 1))^\alpha}$$
For numerical computations, the sums are truncated at N.
For more details on the covariances on sphere, see Lantuejoul, Freulon and Renard (2019)
Evaluation¶
ndisc = 100 # number of discretization steps for the covariance
N = 20 # size of the decomposition
h = np.linspace(0,np.max(deltaLong),ndisc)
ax = vario.plot(label = "Empirical Variogram")
a = model.getCova(0)
uu = np.array([a.evalCovOnSphere(i,N) for i in h]) # modif dR
ax = plt.plot(h, sill - uu,label = "Theoretical Variogram")
plt.plot(deltaLong,covDiscrClose[0] - covDiscrClose,"--",label = "Discretized model")
ax = plt.legend()
There is a slight difference between the theoretical variogram and the one obtained from the SPDE discretization due to a numerical error on the variance introduced by the discretization. The comparison of the covariance shows that this numerical error is rather small :
h = np.linspace(0,np.max(deltaLong),ndisc)
vario = gl.Vario.create(vp)
ax = vario.compute(dbdat,gl.ECalcVario.COVARIANCE)
#ax = gp.variogram(vario,label = "Empirical Covariance")
ax = plt.plot(h, uu,label = "Theoretical Covariance")
plt.plot(deltaLong,covDiscrClose,"--",label = "Discretized model")
ax = plt.legend()
plt.show()
h = np.linspace(0,np.max(deltaLong),ndisc)
ax = plt.plot(h, uu,label = "Theoretical Covariance")
ax = plt.legend()
plt.show()
print(r"Theoretical variance",uu[0])
Theoretical variance 1.994317114145671
h = np.linspace(0,np.max(deltaLong),ndisc)
#vario = gl.Vario.create(vp,dbdat)
#ax = vario.compute(gl.ECalcVario.COVARIANCE)
#ax = gp.variogram(vario,label = "Empirical Covariance")
#ax = plt.plot(h, uu,label = "Theoretical Covariance")
plt.plot(deltaLong,covDiscrClose,"--",label = "Discretized model")
ax = plt.legend()
plt.show()
Kriging