In [1]:
# gdapy07_00 clears all variables and import various modules
# you must run this cell first, before running any of others

# note that these examples use inverse theory techniques that
# are only covered in later chapters of the book

# History:
# 2023/04/19 - Created by W. Menke
# 2023/07/12 - Fixed cmap issue
# 2024/03/04 - fixed bug in 07_05 and 07_06 involving forming f
# 2024/03/04 - fixed bug in 07_06 involving boundary consitions
# 2024/03/04 - changed scale for error plot in 07_05 and 07_06
# 2024/05/23 -  Patches to accomodate new verions of numpy, scipy, matplotlib
#               patches dot product to return scalar value
#               las.bicg keyword tol changes to rtol

# A reset deletes any previously-created variables from memory
%reset -f

# import various packages

import numpy as np                    # matrices & etc
from matplotlib import pyplot as plt  # general plotting 
from matplotlib import patches        # plotting shapes
from datetime import date             # date and time arithmetic
from math import exp, pi, sin, cos, tan, sqrt, floor, log10, nan   # math functions
import scipy.linalg as la             # linear algebra functions
import os                             # operating system
import matplotlib.cm as cm
from matplotlib.colors import ListedColormap
import scipy.signal as sg
import scipy.stats as st
import scipy.sparse.linalg as las
from scipy import sparse
import matplotlib

# function to make a numpy N-by-1 column vector
# c=eda_cvec(v1, v2, ...) from a list of several
# array-like entities v1, v2, including a number
# a list of numbers, a tuple of numbers, an N-by-0 np array
# and a N-by-1 np array. The function
# also insures that, if v is an np array, that
# c is a copy, as contrasted to a view, of it
# It promotes integers to floats, and integers
# and floats to complex, by context.
# This version concatenates many argments,
# whereas c=eda_cvec1(v1) takes just one argiment.
# I recommend always using eda_cvec(v1, v2, ...)
def gda_cvec(*argv):
    t = int;
    Nt = 0;
    for a in argv:
        v = gda_cvec1(a);
        N,M = np.shape(v);
        Nt = Nt + N;
        if( N==0 ):
            continue; # skip vector of zero length
        if (t==int) and isinstance(v[0,0],float):
            t=float;
        elif isinstance(v[0,0],complex):
            t=complex;
    w = np.zeros((Nt,1),dtype=t);
    Nt = 0;
    for a in argv:
        v = gda_cvec1(a);
        N,M = np.shape(v);
        w[Nt:Nt+N,0:1] = v;  # patch 20230418 was #w[Nt:Nt+N,0] = v[0:N,0];
        Nt = Nt + N;
    return w;

# function to make a numpy N-by-1 column vector
# c=gda_cvec1(v) from entity v that is array-like,
# including a number, a list of numbers, a tuple
# of numbers, an N-by-0 np array and a N-by1 np array.
# It promotes integers to floats, and integers
# and floats to complex, by context. The function
# also insures that, if v is an np array, that
# c is a copy, as contrasted to a view, of it.
# This version takes just one input argment.
# whereas c=gda_cvec(v1,v2,...) concatenates
# many argiments.
def gda_cvec1(v):
    if isinstance(v, int) or isinstance(v, np.int32):
        w = np.zeros((1,1),dtype=int);
        w[0,0] = v;
        return w;
    elif isinstance(v, float):
        w = np.zeros((1,1),dtype=float);
        w[0,0] = v;
        return w;
    elif isinstance(v, complex):
        w = np.zeros((1,1),dtype=complex);
        w[0,0] = v;
        return w;
    elif isinstance(v, np.ndarray):
        s = np.shape(v);
        if len(s) == 1:
            return np.copy(np.reshape(v,(s[0],1)));
        else:
            [r,c]=s;
            if( c==1 ):
                return(np.copy(v));
            elif(r==1):
                return(np.copy(v.T));
            else:
                raise TypeError("gda_cvec: %d by %d ndarray not allowed" % (r, c));
    elif isinstance(v, list):
        r = len(v);
        t = int;
        for vi in v:
            if isinstance(vi,int) or isinstance(vi, np.int32): #patch v->vi 20230418
                pass;
            elif isinstance(vi,float):
                t=float;
            elif isinstance(vi,complex):
                t=complex;
                break;
            else:
                raise TypeError("gda_cvec: list contains unsupported type %s" % type(vi));
        w = np.zeros((r,1),dtype=t);
        w[:,0] = np.array(v); # patch v -> np.array(v)
        return w;
    elif isinstance(v, tuple):
        r = len(v);
        t = int;
        for vi in v:
            if isinstance(vi,int) or isinstance(vi, np.int32): #patch v->vi 20230418
                pass;
            elif isinstance(vi,float):
                t=float;
            elif isinstance(vi,complex):
                t=complex;
                break;
            else:
                raise TypeError("gda_cvec: tuple contains unsupported type %s" % type(vi));
        w = np.zeros((r,1),dtype=t);
        w[:,0] = np.array(list(v)); # patch v -> np.array(list(v));
        return w;
    else:
        raise TypeError("gda_cvec: %s not supported" % type(v));

        
# gda_draw function makes a "pictorial matrix equation"
# arguments are vectors, matrices and strings
# which are plotted in the order that the appear
# except that strings starting with 'title ' are plotted
# under the subseqeunt matrix or vector
# always returns a status of 1
def gda_draw(*argv):
    DOCOLOR=True;
    if( DOCOLOR ):
        bwcmap = matplotlib.colormaps['jet'];
    else:
        bw = np.zeros((256,4));
        v = 0.9*(256 - np.linspace( 0, 255, 256 ))/255;
        bw[:,0] = v;
        bw[:,1] = v;
        bw[:,2] = v;
        bw[:,3] = np.ones(256);
        bwcmap = ListedColormap(bw);
    # size of plot
    W = 16;
    H = 4;
    fig1 = plt.figure(1);
    # figsize width and height in inches
    fig1.set_size_inches(W,H);
    ax1 = plt.subplot(1,1,1);
    plt.axis([0, W, -H/2, H/2]);
    plt.axis('off');
    LM = W/6;    # matrix width and heoght
    LV = W/40;   # vector width
    FS = 0.12;    # character width
    TO = 0.4;    # title vertical offset
    SP = 0.2;    # space between objects
    LS = 0.2;    # leading space
    p = LS; # starting x-position
    istitle=0; # flags presence of a title
    for a in argv:
        if isinstance(a,np.ndarray):
            sh = np.shape(a);
            if len(sh) == 1:  # conversion to nx1 array
                n = sh[0];
                m = 1;
                ap = a;
                a = np.zeros((n,1));
                a[:,0] = ap;
            else:
                n = sh[0];
                m = sh[1];
            if m==1:
                pold=p;
                left=p;
                right=p+LV;
                bottom=-LM/2;
                top=LM/2;
                plt.imshow( a, cmap=bwcmap, vmin=np.min(a), vmax=np.max(a), extent=(left,right,bottom,top) );
                p = p+LV;
                pm = (p+pold)/2;
                if istitle:
                    plt.text(pm,-(LM/2)-TO,titlestr,horizontalalignment='center');
                    istitle=0;
                p = p+SP;
            else:
                pold=p;
                left=p;
                right=p+LM;
                bottom=-LM/2;
                top=LM/2;
                plt.imshow( a, cmap=bwcmap, vmin=np.min(a), vmax=np.max(a), extent=(left,right,bottom,top) );
                p = p+LM;
                pm = (p+pold)/2;
                if istitle:
                    plt.text(pm,-(LM/2)-TO,titlestr,horizontalalignment='center');
                    istitle=0;
                p = p+SP;
        elif isinstance(a,str):
            ns = len(a);
            istitle=0;
            if( ns>=6 ):
                if 'title ' in a[0:6]:
                    istitle=1;
                    titlestr=a[6:];
            if( istitle != 1):
                plt.text(p,0,a);
                p = p + ns*FS + SP;
    plt.show();
    return 1;

# bandpass filter, used in seismological example, but hand
# in a variety of settings involving time series
def gda_chebyshevfilt(d, Dt, flow, fhigh):
    # (dout,u,v)=gda_chebyshevfilt(d, Dt, flow, fhigh);
    # chebyshev IIR bandpass filter
    # d - input array of data
    # Dt - sampling interval
    # flow - low pass frequency, Hz
    # fhigh - high pass frequency, Hz
    # dout - output array of data
    # u - the numerator filter
    # v - the denominator filter
    # these filters can be used again using dout=filter(u,v,din);

    # make sure input timeseries is a column vector
    s = np.shape(d);
    N = s[0];
    if(N==1):
        dd = np.zeros((N,1));
        dd[:,0] = d;
    else:
        dd=d;
        
    # sampling rate
    rate=1/Dt;

    # ripple parameter, set to ten percent
    ripple=0.1;  

    # normalise frequency
    fl=2.0*flow/rate;
    fh=2.0*fhigh/rate;

    # center frequency 
    cf = 4 * tan( (fl*pi/2) ) * tan( (fh*pi/2) );

    # bandwidth
    bw = 2 * ( tan( (fh*pi/2) ) - tan( (fl*pi/2) ) );

    # ripple parameter factor
    rpf = sqrt((sqrt((1.0+1.0/(ripple*ripple))) + 1.0/ripple));
    a = 0.5*(rpf-1.0/rpf);
    b = 0.5*(rpf+1.0/rpf);

    u=np.zeros((5,1));
    v=np.zeros((5,1));
    theta = 3*pi/4;
    sr = a * cos(theta);
    si = b * sin(theta);
    es = sqrt(sr*sr+si*si);
    tmp= 16.0 - 16.0*bw*sr + 8.0*cf + 4.0*es*es*bw*bw - 4.0*bw*cf*sr + cf*cf;
    v[0,0] = 1.0;
    v[1,0] = 4.0*(-16.0 + 8.0*bw*sr - 2.0*bw*cf*sr + cf*cf)/tmp;
    v[2,0] = (96.0 - 16.0*cf - 8.0*es*es*bw*bw + 6.0*cf*cf)/tmp;
    v[3,0] = (-64.0 - 32.0*bw*sr + 8.0*bw*cf*sr + 4.0*cf*cf)/tmp;
    v[4,0] = (16.0 + 16.0*bw*sr + 8.0*cf + 4.0*es*es*bw*bw + 4.0*bw*cf*sr + cf*cf)/tmp;
    tmp = 4.0*es*es*bw*bw/tmp;
    u[0,0] = tmp;
    u[1,0] = 0.0;
    u[2,0] = -2.0*tmp;
    u[3,0] = 0.0;
    u[4,0] = tmp;

    dout = sg.lfilter(u.ravel(),v.ravel(),dd.ravel());
    return (gda_cvec(dout),gda_cvec(u),gda_cvec(v));

# function to perform the multiplication FT (F v)
def gdaFTFmul(v):
    # this function is used by bicongugate gradient to
    # solve the generalized least squares problem Fm=F.
    # Note that "F" must be called "gdaFsparse".
    global gdaFsparse;
    s = np.shape(v);
    if(len(s)==1):
        vv = np.zeros((s[0],1));
        vv[:,0] = v;
    else:
        vv=v;
    # weirdly, "*" multiplies sparse matrices just fine
    temp = gdaFsparse*vv;
    return gdaFsparse.transpose()*temp;

# function to perform the multiplication FT (G v)
def gda_FTFmul(v):
    # this function is used by bicongugate gradient to
    # solve the generalized least squares problem Fm=F.
    # Note that "F" must be called "gdaFsparse".
    global gdaFsparse;
    s = np.shape(v);
    if(len(s)==1):
        vv = np.zeros((s[0],1));
        vv[:,0] = v;
    else:
        vv=v;
    # weirdly, "*" multiplies sparse matrices just fine
    temp = gdaFsparse*vv;
    return gdaFsparse.transpose()*temp;


# function to perform the multiplication FT f
def gda_FTFrhs(f):
    # this function is used by bicongugate gradient to
    # solve the generalized least squares problem Fm=F.
    # Note that "F" must be called "gdaFsparse".
    global gdaFsparse;
    return gdaFsparse.transpose()*f;
In [2]:
# gdapy07_01
# makes random images with a exponential and Gaussian autocorrelation

print("gdapy07_01");

# note: in order to prevent overwriting file '../Data/random_fields.txt'
# the filename changed to '../Data/my_random_fields.txt'
 
# field shpuld have variance gamma2
sigmam2=1;
sigmam = sqrt(sigmam2);
 
# field should have autocorrelation with scale factor, s
s = 5; # scale factor
s2 = s**2; # scale factor
 
# standard set-up of position x and wavenumber kx-axis
Nx=40;
Nkx=int(Nx/2+1);
Dx=1.0;
x=gda_cvec(Dx*np.linspace(0,Nx-1,Nx));
xmax = Dx*(Nx-1);
kxmax=2.0*pi/(2.0*Dx);
Dkx=kxmax/(Nx/2);
kxp = gda_cvec( Dkx*np.linspace(0,Nkx-1,Nkx));
kxn = -np.flipud(kxp[1:Nkx-1,0:1]);
kx = gda_cvec(kxp,kxn);
ixmid = int(floor(Nx/2));
 
# standard set-up of position y and wavenumber ky-axis
Ny=40;
Nky=int(Ny/2+1);
Dy=1.0;
y=gda_cvec(Dy*np.linspace(0,Ny-1,Ny));
ymax = Dy*(Ny-1);
kymax=2.0*pi/(2.0*Dy);
Dky=kymax/(Ny/2);
kyp = gda_cvec( Dky*np.linspace(0,Nky-1,Nky));
kyn = -np.flipud(kyp[1:Nky-1,0:1]);
ky = gda_cvec(kyp,kyn);
iymid = int(floor(Ny/2));
 
# PART 1: exponential autocorrelation, Cxy (unnormalized)
CxyE = np.zeros((Nx,Ny)); # target amplitude spectral density
for ix in range(Nx):
    for iy in range(Ny):
        r = abs(x[ix,0]-x[ixmid,0])+abs(y[iy,0]-y[iymid,0]);
        CxyE[ix,iy] = exp(-r/s);
        
# target covariance
c = np.copy(CxyE);
 
# make m(x,y) with desired autocovariance, c using fact that
# amplitude spectrum of autocorrelation is power spectrum of data
sqrtabsctt = np.sqrt(np.abs(np.fft.fft2(c)));  # target asd of m
n = np.random.normal(loc=0.0,scale=1.0,size=(Nx,Ny));  # random noise
m = np.fft.ifft2(np.multiply(sqrtabsctt,np.fft.fft2(n))); # shape spectrum on n into d
m = np.real(m);
sigmamest = np.std(m);  # sqrt variance
m = (sigmam/sigmamest)*m; # normalize to correct variance
 
# save result to variable with a better name
SEtrue = np.copy(m);
 
CxyEest = np.fft.ifft2(np.power(np.abs(np.fft.fft2(SEtrue)),2)); # unnormalized autocorrelation
CxyEest = np.real(CxyEest);

# put into more reasonable order, with origin in the center
CxyEest2=np.zeros((Nx,Ny));
CxyEest2[Nx-Nkx:Nx,Ny-Nky:Ny] = CxyEest[0:Nkx,0:Nky];
CxyEest2[0:Nkx-2,Ny-Nky:Ny] = CxyEest[Nkx:Nx,0:Nky];
CxyEest2[0:Nkx-2,0:Nky-2] = CxyEest[Nkx:Nx,Nky:Ny];
CxyEest2[Nkx-2:Nx,0:Nky-2] = CxyEest[0:Nkx,Nky+0:Ny];
 
# PART 2: Gaussian autocorrelation, Cxy (unnormalized)
CxyG = np.zeros((Nx,Ny)); # target amplitude spectral density
for ix in range(Nx):
    for iy in range(Ny):
        r2 = abs(x[ix,0]-x[ixmid,0])**2+abs(y[iy,0]-y[iymid,0])**2;
        CxyG[ix,iy] = exp(-0.5*r2/s2);
        
# target auto-covariance
c = np.copy(CxyG);
 
# make m(x,y) with desired autocovariance, c using fact that
# amplitude spectrum of autocorrelation is power spectrum of data
sqrtabsctt = np.sqrt(np.abs(np.fft.fft2(c)));  # target asd of m
n = np.random.normal(loc=0.0,scale=1.0,size=(Nx,Ny));  # random noise
m = np.fft.ifft2(np.multiply(sqrtabsctt,np.fft.fft2(n))); # shape spectrum on n into d
sigmamest = np.std(m);  # sqrt variance
m = np.real(m);
m = (sigmam/sigmamest)*m; # normalize to correct variance
 
# save result to variable with a better name
SGtrue = np.copy(m);
 
CxyGest = np.fft.ifft2(np.power(np.abs(np.fft.fft2(SGtrue)),2)); # unnormalized autocorrelation
CxyGest = np.real(CxyGest);
 
# put into more reasonable order, with origin in the center
CxyGest2=np.zeros((Nx,Ny));
CxyGest2[Nx-Nkx:Nx,Ny-Nky:Ny] = CxyGest[0:Nkx,0:Nky];
CxyGest2[0:Nkx-2,Ny-Nky:Ny] = CxyGest[Nkx:Nx,0:Nky];
CxyGest2[0:Nkx-2,0:Nky-2] = CxyGest[Nkx:Nx,Nky:Ny];
CxyGest2[Nkx-2:Nx,0:Nky-2] = CxyGest[0:Nkx,Nky+0:Ny];
 
# randomly sample images
N = 256;
ix = np.random.randint(low=0,high=Nx,size=(N,1));
iy = np.random.randint(low=0,high=Nx,size=(N,1));
xd = x[ix.ravel(),0:1];
yd = y[iy.ravel(),0:1];
dE = np.zeros((N,1));
dG = np.zeros((N,1));
for i in range(N):
    dE[i,0] = SEtrue[ix[i,0],iy[i,0]];
    dG[i,0] = SGtrue[ix[i,0],iy[i,0]];
    
gda_draw("title true",CxyE,"title est",CxyEest2,);
print("Caption: 2D exponential autocovariance, (left) true, (right) realized.");

gda_draw("title true",CxyG,"title est",CxyGest2);
print("Caption: 2D Gaussian autocovariance, (left) true, (right) realized.");

fig1 = plt.figure(1,figsize=(12,5));   # smallish figure

# It's important to check that the image is being rendered in the right orientation
#ix=2; iy=2; SEtrue=np.zeros((Nx,Ny)); SEtrue[ix,iy]=1.0; # to check orientation
#ix=38; iy=2; SEtrue=np.zeros((Nx,Ny)); SEtrue[ix,iy]=1.0; # to check orientation
#ix=2; iy=38; SEtrue=np.zeros((Nx,Ny)); SEtrue[ix,iy]=1.0; # to check orientation

ax1 = plt.subplot(1, 2, 1);
plt.axis([0,xmax,0,ymax]);
mycmap=matplotlib.colormaps['jet'];
dmax = np.max(np.abs(SGtrue));
left=0; right=xmax;
top=ymax; bottom=0;
plt.imshow(np.flipud(SEtrue.T), cmap=mycmap, vmin=-dmax, vmax=dmax, extent=(left,right,bottom,top) );
plt.colorbar();
plt.plot(xd,yd,"k.",lw=2);
plt.xlabel("x");
plt.ylabel("y");

ax1 = plt.subplot(1, 2, 2);
plt.axis([0,xmax,0,ymax]);
mycmap=matplotlib.colormaps['jet'];
dmax = np.max(np.abs(SGtrue));
left=0; right=xmax;
top=ymax; bottom=0;
plt.imshow(SGtrue.T, cmap=mycmap, vmin=-dmax, vmax=dmax, extent=(left,right,bottom,top) );
plt.colorbar();
plt.plot(xd,yd,"k.",lw=2);
plt.xlabel("x");
plt.ylabel("y");
plt.show();

print("Caption:(left) 2D field with exponential autocovariance, (right) 2D field");
print("with Gaussian autocovariance. Both fields are randomly sampled (dots).");


# concatenate time, discharge into a matrix C
DF = np.concatenate( (xd,yd,dE,dG),axis=1); 
# save matrix to file of tab-separated values
np.savetxt("../Data/my_random_fields.txt",DF,delimiter="\t");
gdapy07_01
No description has been provided for this image
Caption: 2D exponential autocovariance, (left) true, (right) realized.
No description has been provided for this image
Caption: 2D Gaussian autocovariance, (left) true, (right) realized.
No description has been provided for this image
Caption:(left) 2D field with exponential autocovariance, (right) 2D field
with Gaussian autocovariance. Both fields are randomly sampled (dots).
In [3]:
# gdapy07_02
# comparison of three verdions of [cov m]
# and their operator versions D=[cov m]^(-1/2)
 
print("gdapy07_02");
 
# x-axis
N=100;
Dx=1.0;
x=gda_cvec(Dx*np.linspace(0,N-1,N));
xmax = Dx*(N-1);
          
# first derivative
r = gda_cvec( 1.0, -1.0, np.zeros((N-2,1)) );
c = gda_cvec( 1.0, np.zeros((N-1,1)) );

D1=la.toeplitz( r.ravel(), c.ravel() );
D1[0,N-1]=1.0; # modify to make boundsry condition symmetric
D1 = (1.0/Dx)*D1;
 
# second and fourth derivative
D2 = np.matmul(D1,D1);
D4 = np.matmul(D2,D2);
 
# Exponential Covariance Matrix
s=10.0/xmax;
C = np.zeros((N,N));
for i in range(N):
    for j in range(N):
        y = s*Dx*abs(i-j);
        C[i,j]=exp(-y);
        
# corresponding D
D = (1.0/s)*D1+np.identity(N);
 
# corresponding covariance
DTD = np.matmul(D.T,D);
C2 = la.inv(DTD);
 
# save
CE=np.copy(C);
CE2=np.copy(C2);
 
# Long-Tailed Bell Covariance Matrix
s=20.0/xmax;
C = np.zeros((N,N));
for i in range(N):
    for j in range(N):
        y = s*Dx*abs(i-j);
        C[i,j]=(1.0+y)*exp(-y);
        
# corresponding D
D = (1/(s**2))*D2-np.identity(N);
 
# corresponding covariance
DTD = np.matmul(D.T,D);
C2 = la.inv(DTD);
 
# save
CL=np.copy(C);
CL2=np.copy(C2);
 
# Short-Tailed Bell (Gaussian) Covariance Matrix
s=20.0/xmax;
C = np.zeros((N,N));
for i in range(N):
    for j in range(N):
        y = s*Dx*abs(i-j);
        C[i,j]=exp(-y**2);

# corresponding D
D = np.identity(N) - (1.0/4.0)*(1.0/(s**2))*D2 + (1.0/32.0)*(1.0/(s**4))*D4;
                               
# corresponding covariance
DTD = np.matmul(D.T,D);
C2 = la.inv(DTD);
 
# save
CG=np.copy(C);
CG2=np.copy(C2);

fig1 = plt.figure(1,figsize=(12,8));
ax1 = plt.subplot(2, 3, 1);
ax1.invert_yaxis()
plt.axis([0,N,0,N]);
mycmap=matplotlib.colormaps['jet'];
plt.imshow(CE/np.max(CE), cmap=mycmap, vmin=-1.0, vmax=1.0 );
plt.colorbar(shrink=0.5);
plt.xlabel("i");
plt.ylabel("j");

ax1 = plt.subplot(2, 3, 2);
ax1.invert_yaxis()
plt.axis([0,N,0,N]);
mycmap=matplotlib.colormaps['jet'];
plt.imshow(CL/np.max(CL), cmap=mycmap, vmin=-1.0, vmax=1.0 );
plt.colorbar(shrink=0.5);
plt.xlabel("i");
plt.ylabel("j");

ax1 = plt.subplot(2, 3, 3);
ax1.invert_yaxis()
plt.axis([0,N,0,N]);
mycmap=matplotlib.colormaps['jet'];
plt.imshow(CG/np.max(CG), cmap=mycmap, vmin=-1.0, vmax=1.0 );
plt.colorbar(shrink=0.5);
plt.xlabel("i");
plt.ylabel("j");

ax1 = plt.subplot(2, 3, 4);
ax1.invert_yaxis()
plt.axis([0,N,0,N]);
mycmap=matplotlib.colormaps['jet'];
plt.imshow(CE2/np.max(CE2), cmap=mycmap, vmin=-1.0, vmax=1.0 );
plt.colorbar(shrink=0.5);
plt.xlabel("i");
plt.ylabel("j");

ax1 = plt.subplot(2, 3, 5);
ax1.invert_yaxis()
plt.axis([0,N,0,N]);
mycmap=matplotlib.colormaps['jet'];
plt.imshow(CL2/np.max(CL2), cmap=mycmap, vmin=-1.0, vmax=1.0 );
plt.colorbar(shrink=0.5);
plt.xlabel("i");
plt.ylabel("j");

ax1 = plt.subplot(2, 3, 6);
ax1.invert_yaxis()
plt.axis([0,N,0,N]);
mycmap=matplotlib.colormaps['jet'];
plt.imshow(CG2/np.max(CG2), cmap=mycmap, vmin=-1.0, vmax=1.0 );
plt.colorbar(shrink=0.5);
plt.xlabel("i");
plt.ylabel("j");

plt.show();

print("Caption: (Top) true autocovariance (left) exponential (middle) long-tailed bell (right) Gaussian.");
print("(Bottom) approximate autocovariance (left) exponential (middle) long-tailed bell (right) Gaussian.");

# plot middle column

fig2 = plt.figure(2,figsize=(12,8));
ax1 = plt.subplot(1, 1, 1);
plt.axis([0,xmax,0,1])
ic = int(floor(N/2));
plt.plot(x,CE[0:N,ic:ic+1]/np.max(CE[0:N,ic:ic+1]),"r-",lw=2);
plt.plot(x,CE2[0:N,ic:ic+1]/np.max(CE2[0:N,ic:ic+1]),"g--",lw=3);
plt.xlabel("row i");
plt.ylabel("Cij");
plt.show();

fig3 = plt.figure(3,figsize=(12,8));
ax1 = plt.subplot(1, 1, 1);
plt.axis([0,xmax,0,1])
ic = int(floor(N/2));
plt.plot(x,CL[0:N,ic:ic+1]/np.max(CL[0:N,ic:ic+1]),"r-",lw=2);
plt.plot(x,CL2[0:N,ic:ic+1]/np.max(CL2[0:N,ic:ic+1]),"g--",lw=3);
plt.xlabel("row i");
plt.ylabel("Cij");
plt.show();

fig4 = plt.figure(4,figsize=(12,8));
ax1 = plt.subplot(1, 1, 1);
plt.axis([0,xmax,0,1])
ic = int(floor(N/2));
plt.plot(x,CG[0:N,ic:ic+1]/np.max(CG[0:N,ic:ic+1]),"r-",lw=2);
plt.plot(x,CG2[0:N,ic:ic+1]/np.max(CG2[0:N,ic:ic+1]),"g--",lw=3);
plt.xlabel("row i");
plt.ylabel("Cij");
plt.show();
print("Caption: Central row of autocovariance matrix. (Top) exponential, with exact (red) and approximate (green).");
print("(Middle) Long-tailed bell, with exact (red) and approximate (green).");
print("(Bottom) Gaussian, with exact (red) and approximate (green).");


 
gdapy07_02
No description has been provided for this image
Caption: (Top) true autocovariance (left) exponential (middle) long-tailed bell (right) Gaussian.
(Bottom) approximate autocovariance (left) exponential (middle) long-tailed bell (right) Gaussian.
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
Caption: Central row of autocovariance matrix. (Top) exponential, with exact (red) and approximate (green).
(Middle) Long-tailed bell, with exact (red) and approximate (green).
(Bottom) Gaussian, with exact (red) and approximate (green).
In [4]:
# gdapy07_03
# Exploration of variance of Gaussian Process Regression
# The solution a generalized least squares problem is
# mest = GMG dobs + (I- GMG G) m0
# and covariance
# cov_mest = sigma2d GMG GMG' + (I- GMG G) cov_m (I- GMG G)' = c1 + c2
# and variance
# vari_mest = diag(c1) + diag(c2) = v1 + v2
# this code explores the relative contributions of v1 and v2
 
print("gdapy07_03");

# the true random field has variance sigmam2
sigmam=1.0;
sigmam2 = sigmam**2;
 
# field should has a Gaussian autocorrelation with scale factor, s
s = 5; # scale factor
s2 = s**2;
 
# standard set-up of position x and wavenumber kx-axis
Nx=256;
Nkx=int(Nx/2+1);
Dx=1.0;
x=gda_cvec(Dx*np.linspace(0,Nx-1,Nx));
xmax = Dx*(Nx-1);
kxmax=2*pi/(2*Dx);
Dkx=kxmax/(Nx/2);
kxp=gda_cvec(np.linspace(0,Nkx-1,Nkx));
kxn=-np.flipud(kxp[1:Nkx-1,0:1]);
kx = gda_cvec(kxp,kxn);
ixmid = int(floor(Nkx));
 
# evaluate the Gaussian autocovariance, centered in the interval
c = np.zeros((Nx,1)); # target amplitude spectral density
for ix in range(Nx):
    r=Dx*abs(ix-Nkx);
    c[ix,0] = exp(-0.5*(r**2)/s2);
    
# make m(x) with desired autocovariance, c using fact that
# amplitude spectrum of autocorrelation is power spectrum of data
sqrtabsctt = np.sqrt(np.abs(np.fft.fft(c,axis=0)));  # target asd of m
n = np.random.normal(loc=0.0,scale=1.0,size=(Nx,1)); # random noise
mtrue = np.real(np.fft.ifft(np.multiply(sqrtabsctt,np.fft.fft(n,axis=0)),axis=0)); # shape spectrum on n into d
sigmamest = np.std(mtrue); # sqrt variance
mtrue = (sigmam/sigmamest)*mtrue; # normalize to correct variance
 
# sample data, pattern of expanding space between samples
Ns=16;
idata=np.zeros((0,1),dtype=int);
for i in range(Ns):
    idatai = gda_cvec( i*Ns+np.arange(0,Ns-1,i+1) );
    idata = np.concatenate((idata, idatai.astype(int)),axis=0);
N,i=np.shape(idata);
xd = x[idata.ravel(),0:1];
dtrue = mtrue[idata.ravel(),0:1];

# data kernel
M=Nx;
G = np.zeros((N,M));
for i in range(N):
    G[i, idata[i,0] ] = 1.0;
    
# variance of data
sigmad = 0.1;
sigmad2 = sigmad**2;
 
# plot the true model and the true data
fig1 = plt.figure(1,figsize=(12,5));
ax1 = plt.subplot(1, 1, 1);
plt.axis([0.0,xmax,-4.0*sigmam,4.0*sigmam]);
plt.plot(x,mtrue,"k-",lw=2);
plt.plot(xd,dtrue,"ko",lw=1);
plt.xlabel("x");
plt.ylabel("m(x) and d(x)");
 
# control-control covariance matri
Ccc = np.zeros((N,N));
for i in range(N):
    for j in range(N):
        r = abs(xd[i,0]-xd[j,0]);
        Ccc[i,j] = sigmam2*exp(-0.5*(r**2)/s2);
        
# target-control covariance matrix
Ctc = np.zeros((M,N));
for i in range(M):
    for j in range(N):
        r = abs(x[i,0]-xd[j,0]);
        Ctc[i,j] = sigmam2*exp(-0.5*(r**2)/s2);

# target-target covariance matrix
Ctt = np.zeros((M,M));
for i in range(M):
    for j in range(M):
        r = abs(x[i,0]-x[j,0]);
        Ctt[i,j] = sigmam2*exp(-0.5*(r**2)/s2);
        
# generalized inverse & resolution matrix
GMG = np.matmul(Ctc,la.inv(Ccc + sigmad2*np.identity(N)));
R = np.matmul(GMG,G);
IMR = np.identity(M)-R;
 
# variane of estimates
v1 = np.diag(sigmad2*np.matmul(GMG,GMG.T));          # part associated with data
v2 = np.diag(np.matmul(IMR,np.matmul(Ctt,IMR.T)));   # part associalted with prior information
 
# noisy data
dobs = dtrue + np.random.normal(loc=0.0,scale=sigmad,size=(N,1));
 
# solution
mest = np.matmul(GMG,dobs);
               
plt.plot(x,mest,"r:",lw=2);
plt.show();
print("Caption: True data (circles) and true model (black curve), estimated model (red curve).");

fig2 = plt.figure(2,figsize=(12,5));
ax1 = plt.subplot(1, 1, 1);
plt.axis([0,xmax,0,sigmam]);
plt.xlabel("x");
plt.ylabel("sigma");
 
# many realizations of the solution, each time with new data
# noise, and holding m0 at zero
NR = 10000;
MEST = np.zeros((M,NR));
for i in range(NR):
    dobsir = dtrue + np.random.normal(loc=0,scale=sigmad,size=(N,1));
    MEST[0:M,i:i+1] = np.matmul(GMG,dobsir);

# standard deviation of these solutions
S1 = np.zeros((M,1));
for i in range(M):
    S1[i,0] = np.std(MEST[i:i+1,0:NR].ravel());
    
plt.plot(x,S1,"k-",lw=3);
plt.plot(x,np.sqrt(v1),"y:",lw=2);
plt.show();
print("Caption: Part of standard error of solution arising from measurement error");
print("(solid) from ensemble of realizations (dotted) from formula.");

fig3 = plt.figure(3,figsize=(12,5));
ax1 = plt.subplot(1, 1, 1);
plt.axis([0,xmax,0,sigmam]);
plt.xlabel("x");
plt.ylabel("sigma");
 
# many realizations of the solution, each time with new data
# noise, and creating a new m0 with correct covariance
NR = 10000;
MEST = np.zeros((M,NR));
for i in range(NR):
    dobsir = dtrue + np.random.normal(loc=0.0,scale=sigmad,size=(N,1));
    n = np.random.normal(loc=0.0,scale=1.0,size=(Nx,1));  # random noise
    m0 = np.real(np.fft.ifft(np.multiply(sqrtabsctt,np.fft.fft(n,axis=0)),axis=0)); # shape spectrum on n into d
    sigmamest = np.std(m0); # sqrt variance
    m0 = (sigmam/sigmamest)*m0; # normalize to correct variance
    MEST[0:M,i:i+1] = np.matmul(GMG,dobsir) + np.matmul(IMR,m0);
S2 = np.zeros((M,1));
for i in range(M):
    S2[i,0] = np.std(MEST[i:i+1,0:NR].ravel());

plt.plot(x,S2,"k-",lw=3);
plt.plot(x,np.sqrt(v1+v2),"y:",lw=2);
plt.show();
print("Caption: Complete standard error of solution");
print("(solid) from ensemble of realizations (dotted) from formula");

fig4 = plt.figure(4,figsize=(12,5));
ax1 = plt.subplot(1, 1, 1);
plt.axis([0,xmax,0,sigmam]);
plt.xlabel("x");
plt.ylabel("sigma mest");
plt.plot(x,S1,"k-",lw=2);
plt.plot(x,S2,"r-",lw=2);
print("Caption: Comparison of the part of standard error of solution arising");
print("from measurement error (solid) and the complete standard error (red).");
 
gdapy07_03
No description has been provided for this image
Caption: True data (circles) and true model (black curve), estimated model (red curve).
No description has been provided for this image
Caption: Part of standard error of solution arising from measurement error
(solid) from ensemble of realizations (dotted) from formula.
No description has been provided for this image
Caption: Complete standard error of solution
(solid) from ensemble of realizations (dotted) from formula
Caption: Comparison of the part of standard error of solution arising
from measurement error (solid) and the complete standard error (red).
No description has been provided for this image
In [5]:
# gdapy07_04
# Gaussian Process Regression to reconstruct field from data
# in file '../Data/random_fields.txt'.  Two versions, with
# exponential and Gaussian auto-covariance, are made.

print("gdapy07_04");

# Case 1: exponential covariance

# field shpuld have variance gamma2
gamma2=1.0;
gamma = sqrt(gamma2);

# field should have autocorrelation with scale factor, s
s = 5.0; # scale factor
s2 = s**2;
 
# load data (the fields sampled at random locations)
D = np.genfromtxt("../Data/random_fields.txt", delimiter="\t");
N, M = np.shape(D);
xd=np.copy(D[0:N,0:1]);
yd=np.copy(D[0:N,1:2]);
dE=np.copy(D[0:N,2:3]);
dG=np.copy(D[0:N,3:4]);
del D;

# add noise to data
sd = 0.01;
sd2 = sd**2;
n = np.random.normal(loc=0.0,scale=sd,size=(N,1));
dE = dE+n;
dG = dG+n;
 
# x-axis
Nx=41;
Dx=1.0;
x=gda_cvec(Dx*np.linspace(0,Nx-1,Nx));
xmax = Dx*(Nx-1);
 
# y-axis
Ny=41;
Dy=1.0;
y=gda_cvec(Dy*np.linspace(0,Ny-1,Ny));
ymax = Dy*(Ny-1);
 
M = Nx*Ny; # total number of model parameters
 
# lookup tables
iofk = np.zeros((M,1),dtype=int);
jofk = np.zeros((M,1),dtype=int);
kofij = np.zeros((Nx,Ny),dtype=int);
k=0;
for i in range(Nx):
    for j in range(Ny):
        iofk[k,0]=i;
        jofk[k,0]=j;
        kofij[i,j]=k;
        k=k+1;
        
# positions of model parameters
xm = np.zeros((M,1));
ym = np.zeros((M,1));
for k in range(M):
    xm[k,0]=x[iofk[k,0],0];
    ym[k,0]=y[jofk[k,0],0];
    
# covariance matrices, Exponential case
Ccc = np.zeros((N,N));
for i in range(N):
    for j in range(N):
        r = abs(xd[i,0]-xd[j,0]) + abs(yd[i,0]-yd[j,0]);
        Ccc[i,j] = gamma2*exp(-r/s);
        
Ctc = np.zeros((M,N));
for i in range(M):
    for j in range(N):
        r = abs(xm[i,0]-xd[j,0]) + abs(ym[i,0]-yd[j,0]);
        Ctc[i,j] = gamma2*exp(-r/s);
        
# GPR solution
# mest = (Ctc / (Ccc + sd2*eye(N) )) * dE;
#      = Ctc * Qinv * dE     with Q = Ccc + epsi*eye(N)
#      = Ctc * u             with u = Qinv * dE or Qu=dE
Q = Ccc + sd2*np.identity(N);
u = la.solve(Q,dE);
mest = np.matmul(Ctc,u);
 
# posterior covariance
Ctt = np.zeros((M,M));
for i in range(M):
    for j in range(M):
        r = abs(xm[i,0]-xm[j,0]) + abs(ym[i,0]-ym[j,0]);
        Ctt[i,j] = gamma2*exp(-r/s);
# Cttest = Ctt  - (Ctc/(Ccc + sd2*eye(N)))*(Ctc');
#        = Ctt - Ctc * Qinv * Ctc'     with Q = Ccc + epsi*eye(N)
#        = Ctt - Ctc * U               with U = Qinv * Ctc' or Q U = Ctc'
# Q = Ccc + epsi*np.identity(N); already calculated, above
U = la.solve(Q,Ctc.T);
Cttest = Ctt  - np.matmul(Ctc,U);
sigmatcpos = gda_cvec(np.sqrt(np.diag(Cttest)));
 
# fold into imag
Sest = np.zeros((Nx,Ny));
sigmaest = np.zeros((Nx,Ny));
for k in range(M):
    i=iofk[k,0];
    j=jofk[k,0];
    Sest[i,j]=mest[k,0];
    sigmaest[i,j]=sigmatcpos[k,0];
                
fig1 = plt.figure(1,figsize=(12,5));
ax1 = plt.subplot(1, 3, 1);
plt.axis([0,xmax,0,xmax]);
mycmap=matplotlib.colormaps['jet'];
dmax = np.max(np.abs(Sest));
left=0; right=xmax;
top=ymax; bottom=0;
plt.imshow(np.flipud(Sest.T), cmap=mycmap, vmin=-dmax, vmax=dmax, extent=(left,right,bottom,top) );
plt.colorbar(shrink=0.5);
plt.plot(xd,yd,"k.",lw=2);
plt.xlabel("x");
plt.ylabel("y");
 
ax1 = plt.subplot(1, 3, 2);
plt.axis([0,xmax,0,xmax]);
mycmap=matplotlib.colormaps['jet'];
dmax = np.max(np.abs(Sest));
# test that image is in correct orientation
#Sest=np.zeros((Nx,Ny)); ix=2; jy=2; Sest[ix,jy]=dmax;
#Sest=np.zeros((Nx,Ny)); ix=Nx-2; jy=2; Sest[ix,jy]=dmax;
#Sest=np.zeros((Nx,Ny)); ix=2; jy=Ny-2; Sest[ix,jy]=dmax;
left=0; right=xmax;
top=ymax; bottom=0;
plt.imshow(np.flipud(Sest.T), cmap=mycmap, vmin=-dmax, vmax=dmax, extent=(left,right,bottom,top) );
plt.colorbar(shrink=0.5);
plt.xlabel("x");
plt.ylabel("y");
                
ax1 = plt.subplot(1, 3, 3);
plt.axis([0,xmax,0,xmax]);
mycmap=matplotlib.colormaps['jet'];
dmax = np.max(np.abs(Sest));
left=0; right=xmax;
top=ymax; bottom=0;
plt.imshow(np.flipud(sigmaest.T), cmap=mycmap, vmin=-dmax, vmax=dmax, extent=(left,right,bottom,top) );
plt.colorbar(shrink=0.5);
plt.plot(xd,yd,"k.",lw=2);
plt.xlabel("x");
plt.ylabel("y");
plt.show();
print("Caption: Reconstruction of 2D field, presuming exponetial autocovariance");
print("(left) Estimated field (colors) with data (dots) superimposed");
print("(middle) Estimated field (colors). (Right) Standard error of the estimate");

# estimated data
dEest = np.zeros((N,1));
for k in range(N):
    i = int(floor(Nx*xd[k,0]/xmax));
    if(i<0):
        i=0;
    elif(i>(Nx-1)):
        i=Nx-1;
    j = int(floor(Ny*yd[k,0]/ymax));
    if(j<0):
        j=0;
    elif(j>(Ny-1)):
        j=Ny-1;
    dEest[k,0] = Sest[i,j];

fig2 = plt.figure(2,figsize=(6,6));
ax1 = plt.subplot(1, 1, 1);
plt.plot(dE,dEest,"k.",lw=2)
plt.xlabel("dobs");
plt.ylabel("dpre");
plt.show();
print("Caption: Scatter plot of obsered and predicted data");


# Case 2: Gaussian covariance

# field shpuld have variance gamma2
gamma2=1.0;
gamma = sqrt(gamma2);

# field should have autocorrelation with scale factor, s
s = 5.0; # scale factor
s2 = s**2;
 
# load data (the fields sampled at random locations)
D = np.genfromtxt("../Data/random_fields.txt", delimiter="\t");
N, M = np.shape(D);
xd=np.copy(D[0:N,0:1]);
yd=np.copy(D[0:N,1:2]);
dE=np.copy(D[0:N,2:3]);
dG=np.copy(D[0:N,3:4]);
del D;

# add noise to data
sd = 0.01;
sd2 = sd**2;
n = np.random.normal(loc=0.0,scale=sd,size=(N,1));
dE = dE+n;
dG = dG+n;
 
# x-axis
Nx=41;
Dx=1.0;
x=gda_cvec(Dx*np.linspace(0,Nx-1,Nx));
xmax = Dx*(Nx-1);
 
# y-axis
Ny=41;
Dy=1.0;
y=gda_cvec(Dy*np.linspace(0,Ny-1,Ny));
ymax = Dy*(Ny-1);
 
M = Nx*Ny; # total number of model parameters
 
# lookup tables
iofk = np.zeros((M,1),dtype=int);
jofk = np.zeros((M,1),dtype=int);
kofij = np.zeros((Nx,Ny),dtype=int);
k=0;
for i in range(Nx):
    for j in range(Ny):
        iofk[k,0]=i;
        jofk[k,0]=j;
        kofij[i,j]=k;
        k=k+1;
        
# positions of model parameters
xm = np.zeros((M,1));
ym = np.zeros((M,1));
for k in range(M):
    xm[k,0]=x[iofk[k,0],0];
    ym[k,0]=y[jofk[k,0],0];
    
# covariance matrices, Exponential case
Ccc = np.zeros((N,N));
for i in range(N):
    for j in range(N):
        r2 = abs(xd[i,0]-xd[j,0])**2 + abs(yd[i,0]-yd[j,0])**2;
        Ccc[i,j] = gamma2*exp(-r2/s2);
         
Ctc = np.zeros((M,N));
for i in range(M):
    for j in range(N):
        r2 = abs(xm[i,0]-xd[j,0])**2 + abs(ym[i,0]-yd[j,0])**2;
        Ctc[i,j] = gamma2*exp(-r2/s2);
        
# GPR solution
# mest = (Ctc / (Ccc + sd2*eye(N) )) * dE;
#      = Ctc * Qinv * dE     with Q = Ccc + epsi*eye(N)
#      = Ctc * u             with u = Qinv * dE or Qu=dE
Q = Ccc + sd2*np.identity(N);
u = la.solve(Q,dG);
mest = np.matmul(Ctc,u);
 
# posterior covariance
Ctt = np.zeros((M,M));
for i in range(M):
    for j in range(M):
        r2 = abs(xm[i,0]-xm[j,0])**2 + abs(ym[i,0]-ym[j,0])**2;
        Ctt[i,j] = gamma2*exp(-r2/s2);
# Cttest = Ctt  - (Ctc/(Ccc + sd2*eye(N)))*(Ctc');
#        = Ctt - Ctc * Qinv * Ctc'     with Q = Ccc + epsi*eye(N)
#        = Ctt - Ctc * U               with U = Qinv * Ctc' or Q U = Ctc'
# Q = Ccc + epsi*np.identity(N); already calculated, above
U = la.solve(Q,Ctc.T);
Cttest = Ctt  - np.matmul(Ctc,U);
sigmatcpos = gda_cvec(np.sqrt(np.diag(Cttest)));
 
# fold into imag
Sest = np.zeros((Nx,Ny));
sigmaest = np.zeros((Nx,Ny));
for k in range(M):
    i=iofk[k,0];
    j=jofk[k,0];
    Sest[i,j]=mest[k,0];
    sigmaest[i,j]=sigmatcpos[k,0];
                
fig3 = plt.figure(3,figsize=(12,5));
ax1 = plt.subplot(1, 3, 1);
plt.axis([0,xmax,0,xmax]);
mycmap=matplotlib.colormaps['jet'];
dmax = np.max(np.abs(Sest));
left=0; right=xmax;
top=ymax; bottom=0;
plt.imshow(np.flipud(Sest.T), cmap=mycmap, vmin=-dmax, vmax=dmax, extent=(left,right,bottom,top) );
plt.colorbar(shrink=0.5);
plt.plot(xd,yd,"k.",lw=2);
plt.xlabel("x");
plt.ylabel("y");
 
ax1 = plt.subplot(1, 3, 2);
plt.axis([0,xmax,0,xmax]);
mycmap=matplotlib.colormaps['jet'];
dmax = np.max(np.abs(Sest));
# test that image is in correct orientation
#Sest=np.zeros((Nx,Ny)); ix=2; jy=2; Sest[ix,jy]=dmax;
#Sest=np.zeros((Nx,Ny)); ix=Nx-2; jy=2; Sest[ix,jy]=dmax;
#Sest=np.zeros((Nx,Ny)); ix=2; jy=Ny-2; Sest[ix,jy]=dmax;
left=0; right=xmax;
top=ymax; bottom=0;
plt.imshow(np.flipud(Sest.T), cmap=mycmap, vmin=-dmax, vmax=dmax, extent=(left,right,bottom,top) );
plt.colorbar(shrink=0.5);
plt.xlabel("x");
plt.ylabel("y");
                
ax1 = plt.subplot(1, 3, 3);
plt.axis([0,xmax,0,xmax]);
mycmap=matplotlib.colormaps['jet'];
dmax = np.max(np.abs(Sest));
left=0; right=xmax;
top=ymax; bottom=0;
plt.imshow(np.flipud(sigmaest.T), cmap=mycmap, vmin=-dmax, vmax=dmax, extent=(left,right,bottom,top) );
plt.colorbar(shrink=0.5);
plt.plot(xd,yd,"k.",lw=2);
plt.xlabel("x");
plt.ylabel("y");
plt.show();
print("Caption: Reconstruction of 2D field, presuming Gaussian autocovariance");
print("(left) Estimated field (colors) with data (dots) superimposed");
print("(middle) Estimated field (colors). (Right) Standard error of the estimate");

# estimated data
dGest = np.zeros((N,1));
for k in range(N):
    i = int(floor(Nx*xd[k,0]/xmax));
    if(i<0):
        i=0;
    elif(i>(Nx-1)):
        i=Nx-1;
    j = int(floor(Ny*yd[k,0]/ymax));
    if(j<0):
        j=0;
    elif(j>(Ny-1)):
        j=Ny-1;
    dGest[k,0] = Sest[i,j];

fig4 = plt.figure(2,figsize=(6,6));
ax1 = plt.subplot(1, 1, 1);
plt.plot(dG,dGest,"k.",lw=2)
plt.xlabel("dobs");
plt.ylabel("dpre");
plt.show();
print("Caption: Scatter plot of obsered and predicted data");
gdapy07_04
No description has been provided for this image
Caption: Reconstruction of 2D field, presuming exponetial autocovariance
(left) Estimated field (colors) with data (dots) superimposed
(middle) Estimated field (colors). (Right) Standard error of the estimate
No description has been provided for this image
Caption: Scatter plot of obsered and predicted data
No description has been provided for this image
Caption: Reconstruction of 2D field, presuming Gaussian autocovariance
(left) Estimated field (colors) with data (dots) superimposed
(middle) Estimated field (colors). (Right) Standard error of the estimate
No description has been provided for this image
Caption: Scatter plot of obsered and predicted data
In [6]:
# gdapy07_05
# data assimilation by Generalized Least Squares
# for the thermal diffusion equation
# This version uses non-sparse matrices

print("gdapy07_05");

# prior error in data (moderately accurate)
sigmad = 0.1;
sigmad2 = sigmad**2;
sigmadi = 1.0/sigmad;
 
# prior error in diff eqn (highly accurate)
sigmaeqn = 1e-4;
sigmaeqn2 = sigmaeqn**2;
sigmaeqni = 1.0/sigmaeqn;
 
# prior error in bc (highly accurate)
sigmabc = 1e-4;
sigmabc2 = sigmabc**2;
sigmabci = 1.0/sigmabc;
 
# prior error in ic (poorly accurate)
sigmaic = 1.0;
sigmaic2 = sigmaic**2;
sigmaici = 1.0/sigmaic;
 
# x-axis
Lx = 41;
Dx = 1.0;
x = gda_cvec(Dx*np.linspace(0,Lx-1,Lx));
xmax = Dx*(Lx-1);
Dxr2 = 1/(Dx**2);
ixm = floor(Lx/2)+1;
xm = x[ixm,0];
 
# t-axis
Lt = 41;
Dt = 1.0;
t = gda_cvec(Dt*np.linspace(0,Lt-1,Lt));
tmax = Dt*(Lt-1);
Dtr = 1.0/Dt;
 
M = Lx*Lt;
 
# lookup tables
iofk = np.zeros((M,1),dtype=int);
jofk = np.zeros((M,1),dtype=int);
kofij = np.zeros((Lx,Lt),dtype=int);
k=0;
for i in range(Lx):
    for j in range(Lt):
        iofk[k,0]=i;
        jofk[k,0]=j;
        kofij[i,j]=k;
        k=k+1;
        
D = np.zeros((M,M));
s = np.zeros((M,1));
 
# thermal constant
kappa = 0.1;
 
# row counter
nr = 0;
 
# interior
for n in range(1,Lx-1):
    for m in range(1,Lt):
        nm = kofij[n,m];
        nmommo = kofij[n-1,m-1];
        npommo = kofij[n+1,m-1];
        nmmo = kofij[n,m-1];
        D[nr,nmmo] = (-Dtr+2.0*kappa*Dxr2)*sigmaeqni;
        D[nr,nmommo] = -kappa*Dxr2*sigmaeqni;
        D[nr,npommo] = -kappa*Dxr2*sigmaeqni;
        D[nr,nm] = Dtr*sigmaeqni;
        s[nr,0] = 0.0;
        nr = nr+1;

# boundary conditions
for m in range(Lt):
    # left
    n=0;
    nm = kofij[n,m];
    D[nr,nm] = sigmabci;
    s[nr,0] = 0.0;
    nr = nr+1;
    # right
    n=Lx-1;
    nm = kofij[n,m];
    D[nr,nm] = sigmabci;
    s[nr,0] = 0.0;
    nr = nr+1;

nrnoic = nr;
 
# initial conditions
m0 = np.zeros((Lx,1));
sx = 2;
sx2 = sx**2;
m0 = np.exp( -0.5 * np.power(x-xm,2) / sx2 );
m0[0,0]=0.0;
m0[Lx-1,0]=0.0;
 
for n in range(1,Lx-1):
    m = 0;
    nm = kofij[n,m];
    D[nr,nm] = 1.0*sigmaici;
    s[nr,0] = m0[n,0]*sigmaici;
    nr = nr+1;
      
print("D: expected %d rows, got %d" % (M,nr) );
 
# true solution
mtrue = la.solve(D,s);
 
Strue = np.zeros((Lx,Lt));
for k in range(M):
    i = iofk[k,0];
    j = jofk[k,0];
    Strue[i,j]=mtrue[k,0];

# randomly sample images
Nd = 15;
N = Nd*Lx;
ix = np.random.randint(low=0,high=Lx,size=(N,1));
it = np.random.randint(low=0,high=Lt-1,size=(N,1))+1; # no data at t=0
xd = x[ix.ravel(),0];
td = t[it.ravel(),0];
dtrue = np.zeros((N,1));
for i in range(N):
    dtrue[i,0] = Strue[ix[i,0],it[i,0]];

# add noise to data
nse = np.random.normal(loc=0.0,scale=sigmad,size=(N,1));
dobs = dtrue + nse;
 
# data kernel
G = np.zeros((N,M));
for i in range(N):
    k = kofij[ ix[i,0], it[i,0] ];
    G[i,k] = 1.0;

# generalized least squares
# use zeros instead of s, so that
# no info on ic's enter into solution
F = np.concatenate((D,G*sigmadi),axis=0);
f = np.concatenate((s,dobs*sigmadi),axis=0); # fixed bug here; s in place of zeros
FTF = np.matmul(F.T,F);
FTf = np.matmul(F.T,f);
mest = la.solve(FTF,FTf);
dpre = np.matmul(G,mest);
 
fig1 = plt.figure(1,figsize=(12,5));
ax1 = plt.subplot(1, 1, 1);
plt.plot(dobs,dpre,"k.");
plt.xlabel("dobs");
plt.ylabel("dpre");
plt.show();
print("Caption: Scatter plot of observed and predicted data");

Sest = np.zeros((Lx,Lt));
for k in range(M):
    i = iofk[k,0];
    j = jofk[k,0];
    Sest[i,j]=mest[k,0];

m0est = np.zeros((Lx,1));
for i in range(Lx):
    m0est[i,0]=Sest[i,0];

fig2 = plt.figure(2,figsize=(12,5));
ax1 = plt.subplot(1, 1, 1);
plt.plot(x,m0,"k-");
plt.plot(x,m0est,"r--");
plt.xlabel("x");
plt.ylabel("m0");
plt.show();
print("Caption: True (back) and estimated (red) solution m0=m(t=0)");

Sdata = np.zeros((Lx,Lt));
for i in range(N):
    Sdata[ix[i,0],it[i,0]]=dobs[i,0];

fig1 = plt.figure(1,figsize=(12,8));
 
ax1 = plt.subplot(2, 2, 1);
plt.axis([0.0,xmax,0.0,tmax])
ax1.invert_yaxis();
mycmap=matplotlib.colormaps['jet'];
dmax = np.max(np.abs(Strue));
left=0; right=xmax;
top=tmax; bottom=0;
# for testing orientation of image
#ix=2; it=2; Strue[ix,it]=dmax;
#ix=2; it=Lt-2; Strue[ix,it]=dmax;
#ix=Lx-2; it=2; Strue[ix,it]=dmax;
plt.imshow(np.flipud(Strue.T), cmap=mycmap, vmin=0.0, vmax=dmax, extent=(left,right,bottom,top) );
plt.colorbar(shrink=0.5);
plt.xlabel('x');
plt.ylabel('t');
plt.title('true');

ax2 = plt.subplot(2, 2, 2);
plt.axis([0.0,xmax,0.0,tmax])
ax2.invert_yaxis();
mycmap=matplotlib.colormaps['jet'];
left=0; right=xmax;
top=tmax; bottom=0;
plt.imshow(np.flipud(Sdata.T), cmap=mycmap, vmin=0.0, vmax=dmax, extent=(left,right,bottom,top) );
plt.colorbar(shrink=0.5);
plt.xlabel('x');
plt.ylabel('t');
plt.title('data');

ax3 = plt.subplot(2, 2, 3);
plt.axis([0.0,xmax,0.0,tmax])
ax3.invert_yaxis();
mycmap=matplotlib.colormaps['jet'];
left=0; right=xmax;
top=tmax; bottom=0;
plt.imshow(np.flipud(Sest.T), cmap=mycmap, vmin=0.0, vmax=dmax, extent=(left,right,bottom,top) );
plt.colorbar(shrink=0.5);
plt.xlabel('x');
plt.ylabel('t');
plt.title('est');

ax4 = plt.subplot(2, 2, 4);
plt.axis([0.0,xmax,0.0,tmax])
ax4.invert_yaxis();
mycmap=matplotlib.colormaps['jet'];
dmax = np.max(np.abs(Strue));
left=0; right=xmax;
top=tmax; bottom=0;
# changed plotting scale here
plt.imshow(np.flipud((Strue-Sest).T), cmap=mycmap, vmin=-dmax, vmax=dmax, extent=(left,right,bottom,top) );
plt.colorbar(shrink=0.5);
plt.xlabel('x');
plt.ylabel('t');
plt.title('error');
plt.show()
 
print("Caption: (top left) True solution, (top right) observed data");
print("(bottom left) Estimated solution (bottom right) error");
 
gdapy07_05
D: expected 1681 rows, got 1681
No description has been provided for this image
Caption: Scatter plot of observed and predicted data
No description has been provided for this image
Caption: True (back) and estimated (red) solution m0=m(t=0)
No description has been provided for this image
Caption: (top left) True solution, (top right) observed data
(bottom left) Estimated solution (bottom right) error
In [9]:
# gdapy07_06
# data assimilation by Generalized Least Squares
# for the thermal diffusion equation

print("gdapy07_06");

# prior error in data (moderately accurate)
sigmad = 0.1;
sigmad2 = sigmad**2;
sigmadi = 1.0/sigmad;
 
# prior error in diff eqn (highly accurate)
sigmaeqn = 1e-2;
sigmaeqn2 = sigmaeqn**2;
sigmaeqni = 1.0/sigmaeqn;
 
# prior error in bc (highly accurate)
sigmabc = 1e-2;
sigmabc2 = sigmabc**2;
sigmabci = 1.0/sigmabc;
 
# prior error in ic (poorly accurate)
sigmaic = 1.0;
sigmaic2 = sigmaic**2;
sigmaici = 1.0/sigmaic;
 
# x-axis
Lx = 41;
Dx = 1.0;
x = gda_cvec(Dx*np.linspace(0,Lx-1,Lx));
xmax = Dx*(Lx-1);
Dxr2 = 1/(Dx**2);
ixm = floor(Lx/2)+1;
xm = x[ixm,0];
 
# t-axis
Lt = 41;
Dt = 1.0;
t = gda_cvec(Dt*np.linspace(0,Lt-1,Lt));
tmax = Dt*(Lt-1);
Dtr = 1.0/Dt;
 
M = Lx*Lt;
 
# lookup tables
iofk = np.zeros((M,1),dtype=int);
jofk = np.zeros((M,1),dtype=int);
kofij = np.zeros((Lx,Lt),dtype=int);
k=0;
for i in range(Lx):
    for j in range(Lt):
        iofk[k,0]=i;
        jofk[k,0]=j;
        kofij[i,j]=k;
        k=k+1;
        
D = np.zeros((M,M));
s = np.zeros((M,1));
 
# thermal constant
kappa = 0.1;
 
# row counter
nr = 0;

# build sparse matrix using row-column-value method
P=0;
Pmax = 100000;
Fr = np.zeros((Pmax,1),dtype=int);
Fc = np.zeros((Pmax,1),dtype=int);
Fv = np.zeros((Pmax,1));
s = np.zeros((M,1));

# interior
for n in range(1,Lx-1):
    for m in range(1,Lt):
        nm = kofij[n,m];
        nmommo = kofij[n-1,m-1];
        npommo = kofij[n+1,m-1];
        nmmo = kofij[n,m-1];
 
        # D(nr,nmmo) = (-Dtr+2.0*kappa*Dxr2)*sigmaeqni;
        Fr[P,0]=nr;   # row index
        Fc[P,0]=nmmo;   # column index
        Fv[P,0]=(-Dtr+2.0*kappa*Dxr2)*sigmaeqni; # value
        P=P+1;       # increment element number
    
        # D(nr,nmommo) = -kappa*Dxr2*sigmaeqni;
        Fr[P,0]=nr;   # row index
        Fc[P,0]=nmommo;   # column index
        Fv[P,0]=-kappa*Dxr2*sigmaeqni; # value
        P=P+1;       # increment element number
 
        # D(nr,npommo) = -kappa*Dxr2*sigmaeqni;
        Fr[P,0]=nr;   # row index
        Fc[P,0]=npommo;   # column index
        Fv[P,0]=-kappa*Dxr2*sigmaeqni; # value
        P=P+1;      #% increment element number
 
        # D(nr,nm) = Dtr*sigmaeqni;
        Fr[P,0]=nr;   # row index
        Fc[P,0]=nm;   # column index
        Fv[P,0]=Dtr*sigmaeqni; # value
        P=P+1;       # increment element number
 
        s[nr,0] = 0.0; # right hand side
        nr = nr+1;   # increment row counteR

# boundary conditions
for m in range(Lt):
    # left
    n=1;
    nm = kofij[n,m];
    npom = kofij[n+1,m];
 
    # D(nr,nm) = sigmabci;
    Fr[P,0]=nr;   # row index
    Fc[P,0]=nm;   # column index
    Fv[P,0]=sigmabci; # value
    P=P+1;        # increment element number
    s[nr,0] = 0.0;
    nr = nr+1; # incement row counter
 
    # right
    n=Lx-1;
    nm = kofij[n,m];
                       
    # D(nr,nm) = sigmabci;
    Fr[P,0]=nr;   # row index
    Fc[P,0]=nm;   # column index
    Fv[P,0]=sigmabci; # value (fixed error here)
    P=P+1;        # increment element number
                       
    s[nr,0] = 0.0;
    nr = nr+1; # incement row counter

nrnoic = nr;
 
# initial conditions
m0 = np.zeros((Lx,1));
sx = 2.0;
sx2 = sx**2;
m0 = np.exp( - 0.5 * np.power(x-xm,2) / sx2 );
m0[0,0]=0.0;
m0[Lx-1,0]=0.0;
 
for n in range(1,Lx-1):
    m = 1;
    nm = kofij[n,m];
 
    # D(nr,nm) = 1.0*sigmaici;
    Fr[P,0]=nr;   # row index
    Fc[P,0]=nm;   # column index
    Fv[P,0]=1.0*sigmaici; # value
    P=P+1;        # increment element number
 
    s[nr,0] = m0[n,0]*sigmaici;
    nr = nr+1; # increment row conter

print("D: expected %d rows, got %d" % (M,nr) );
nel = 4*(Lx-2)*(Lt-1) + 2*Lt + (Lx-2);
print("D: expected %d elements, got %d" % (nel,P) );

L=M;
gdaFsparse=sparse.coo_matrix((Fv[0:P,0:1].ravel(),
                             (Fr[0:P,0:1].ravel(),Fc[0:P,0:1].ravel())),shape=(L,M));

# define linear operator needed for biconjugate gradient solver
LO=las.LinearOperator(shape=(M,M),matvec=gda_FTFmul,rmatvec=gda_FTFmul);
# solve using conjugate gradient algorithm
tol = 1e-12;     # tolerance
maxit = 3*(L+M); # maximum number of iterations allowed
FTf=gda_FTFrhs(s)
q=las.bicg(LO,FTf,rtol=tol,maxiter=maxit);
mtrue = gda_cvec(q[0]);

Strue = np.zeros((Lx,Lt));
for k in range(M):
    i = iofk[k,0];
    j = jofk[k,0];
    Strue[i,j]=mtrue[k,0];
gda_draw(Strue);# randomly sample images
print("Caption: True solution");

Nd = 15;
N = Nd*Lx;
ix = np.random.randint(low=0,high=Lx,size=(N,1));
it = np.random.randint(low=0,high=Lt-1,size=(N,1))+1; # no data at t=0
xd = x[ix.ravel(),0];
td = t[it.ravel(),0];
dtrue = np.zeros((N,1));
for i in range(N):
    dtrue[i,0] = Strue[ix[i,0],it[i,0]];

# add noise to data
nse = np.random.normal(loc=0.0,scale=sigmad,size=(N,1));
dobs = dtrue + nse;
 
# data kernel
for i in range(N):
    k = kofij[ ix[i,0], it[i,0] ];
    Fr[P,0]=i+M;   # row index
    Fc[P,0]=k;     # column index
    Fv[P,0]=1.0*sigmadi; # value
    P=P+1;         # increment element number
    
nel = 4*(Lx-2)*(Lt-1) + 2*Lt + (Lx-2) + N;
print("D+G: expected %d elements, got %d" % (nel,P));
    
L=M+N;
gdaFsparse=sparse.coo_matrix((Fv[0:P,0:1].ravel(),
                             (Fr[0:P,0:1].ravel(),Fc[0:P,0:1].ravel())),shape=(L,M));
del Fr; del Fc; del Fv;

# define linear operator needed for biconjugate gradient solver
LO=las.LinearOperator(shape=(M,M),matvec=gda_FTFmul,rmatvec=gda_FTFmul);
# solve using conjugate gradient algorithm
tol = 1e-12;     # tolerance
maxit = 3*(L+M); # maximum number of iterations allowed
f = np.concatenate( (s, dobs*sigmadi),axis=0); # fixed bug here; s in place of zeros
FTf=gda_FTFrhs(f)
q=las.bicg(LO,FTf,rtol=tol,maxiter=maxit);
mest = gda_cvec(q[0]);

Sest = np.zeros((Lx,Lt));
for k in range(M):
    i = iofk[k,0];
    j = jofk[k,0];
    Sest[i,j]=mest[k,0];

# obs data on (x,t) plane, predicted data
Sdata = np.zeros((Lx,Lt));
dpre = np.zeros((N,1));
for i in range(N):
    Sdata[ix[i,0],it[i,0]]=dobs[i,0];
    k = kofij[ix[i,0],it[i,0]];
    dpre[i,0] = mest[k,0];
    
fig1 = plt.figure(1,figsize=(12,5));
ax1 = plt.subplot(1, 1, 1);
plt.plot(dobs,dpre,"k.");
plt.xlabel("dobs");
plt.ylabel("dpre");
plt.show();
print("Caption: Scatter plot of observed and predicted data");

Sest = np.zeros((Lx,Lt));
for k in range(M):
    i = iofk[k,0];
    j = jofk[k,0];
    Sest[i,j]=mest[k,0];

m0est = np.zeros((Lx,1));
for i in range(Lx):
    m0est[i,0]=Sest[i,0];

fig2 = plt.figure(2,figsize=(12,5));
ax1 = plt.subplot(1, 1, 1);
plt.plot(x,m0,"k-");
plt.plot(x,m0est,"r--");
plt.xlabel("x");
plt.ylabel("m0");
plt.show();
print("Caption: True (back) and estimated (red) solution m0=m(t=0)");

Sdata = np.zeros((Lx,Lt));
for i in range(N):
    Sdata[ix[i,0],it[i,0]]=dobs[i,0];

fig1 = plt.figure(1,figsize=(12,8));
 
ax1 = plt.subplot(2, 2, 1);
plt.axis([0.0,xmax,0.0,tmax])
ax1.invert_yaxis();
mycmap=matplotlib.colormaps['jet'];
dmax = np.max(np.abs(Strue));
left=0; right=xmax;
top=tmax; bottom=0;
# for testing orientation of image
#ix=2; it=2; Strue[ix,it]=dmax;
#ix=2; it=Lt-2; Strue[ix,it]=dmax;
#ix=Lx-2; it=2; Strue[ix,it]=dmax;
plt.imshow(np.flipud(Strue.T), cmap=mycmap, vmin=0.0, vmax=dmax, extent=(left,right,bottom,top) );
plt.colorbar(shrink=0.5);
plt.xlabel('x');
plt.ylabel('t');
plt.title('true');

ax2 = plt.subplot(2, 2, 2);
plt.axis([0.0,xmax,0.0,tmax])
ax2.invert_yaxis();
mycmap=matplotlib.colormaps['jet'];
left=0; right=xmax;
top=tmax; bottom=0;
plt.imshow(np.flipud(Sdata.T), cmap=mycmap, vmin=0.0, vmax=dmax, extent=(left,right,bottom,top) );
plt.colorbar(shrink=0.5);
plt.xlabel('x');
plt.ylabel('t');
plt.title('data');

ax3 = plt.subplot(2, 2, 3);
plt.axis([0.0,xmax,0.0,tmax])
ax3.invert_yaxis();
mycmap=matplotlib.colormaps['jet'];
left=0; right=xmax;
top=tmax; bottom=0;
plt.imshow(np.flipud(Sest.T), cmap=mycmap, vmin=0.0, vmax=dmax, extent=(left,right,bottom,top) );
plt.colorbar(shrink=0.5);
plt.xlabel('x');
plt.ylabel('t');
plt.title('est');

ax4 = plt.subplot(2, 2, 4);
plt.axis([0.0,xmax,0.0,tmax])
ax4.invert_yaxis();
mycmap=matplotlib.colormaps['jet'];
dmax = np.max(np.abs(Strue));
left=0; right=xmax;
top=tmax; bottom=0;
# changed plottign scale here
plt.imshow(np.flipud((Strue-Sest).T), cmap=mycmap, vmin=-dmax, vmax=dmax, extent=(left,right,bottom,top) );
plt.colorbar(shrink=0.5);
plt.xlabel('x');
plt.ylabel('t');
plt.title('error');
plt.show()
 
print("Caption: (top left) True solution, (top right) observed data");
print("(bottom left) Estimated solution (bottom right) error");
gdapy07_06
D: expected 1681 rows, got 1681
D: expected 6361 elements, got 6361
No description has been provided for this image
Caption: True solution
D+G: expected 6976 elements, got 6976
No description has been provided for this image
Caption: Scatter plot of observed and predicted data
No description has been provided for this image
Caption: True (back) and estimated (red) solution m0=m(t=0)
No description has been provided for this image
Caption: (top left) True solution, (top right) observed data
(bottom left) Estimated solution (bottom right) error
In [12]:
# gdapy07_07
# Thomas recursion used to solve a MxM block tri-diagonal system
# system has Lt variable diagonal blocks, Ai, each Lx by Lx
# and constant off-diagonal blocks, Lx by Lx, all randomly generated
 
print("gdapy07_07");

# size of blocks and matrices
Lx = 5;
Lt = 30;
M = Lx*Lt;
 
# build random symmetric block tridiagonal matrix, A
# variable diagonal blocks
A = np.zeros((M,M));
Ai = np.zeros((Lx,Lx,Lt));
for i in range(Lt):
    At = np.random.normal(loc=0.0,scale=1.0,size=(Lx,Lx));
    Ai[0:Lx,0:Lx:,i] = 0.5*(At+At.T);
    j = Lx*i;  # upper left element in A
    A[j:j+Lx,j:j+Lx] = Ai[0:Lx,0:Lx:,i];

# constant off-diagonal blocks
B = np.random.normal(loc=0.0,scale=1.0,size=(Lx,Lx));
for i in range(1,Lt):
    j = Lx*i;  # upper left element in A
    k = Lx*(i-1);
    A[j:j+Lx,k:k+Lx] = B[0:Lx,0:Lx];
    A[k:k+Lx,j:j+Lx] = B[0:Lx,0:Lx].T;

# true solution
mtrue = np.random.normal(loc=0.0,scale=1.0,size=(M,1));
 
#  right hand side
a = np.matmul(A,mtrue);
ai = np.zeros((Lx,Lt));
for i in range(Lt):
    j = Lx*i;  # first element in a
    ai[0:Lx,i:i+1] = a[j:j+Lx,0:1];

# forward recursion
Ahinvi = np.zeros((Lx,Lx,Lt));
ahi = np.zeros((Lx,Lt));
Ahinvi[0:Lx,0:Lx,0] = la.inv(Ai[0:Lx,0:Lx,0]);
ahi[0:Lx,0] = ai[0:Lx,0];
for i in range(1,Lt):
    Ahinvi[0:Lx,0:Lx,i] = la.inv(Ai[0:Lx,0:Lx,i]-np.matmul(B,np.matmul(Ahinvi[0:Lx,0:Lx,i-1],B.T)));
    ahi[0:Lx,i] = ai[0:Lx,i]-np.matmul(B,np.matmul(Ahinvi[0:Lx,0:Lx,i-1],ahi[0:Lx,i-1]));
                        
# backward recursion
mi = np.zeros((Lx,Lt));
mi[0:Lx,Lt-1] = np.matmul(Ahinvi[0:Lx,0:Lx,Lt-1],ahi[0:Lx,Lt-1]);
for i in reversed(range(Lt-1)):
    mi[0:Lx,i] = np.matmul(Ahinvi[0:Lx,0:Lx,i],ahi[0:Lx,i]-np.matmul(B.T,mi[0:Lx,i+1]));
    
# assemble solution
m = np.zeros((M,1));
for i in range(Lt):
    j = Lx*i;  # first element in m
    m[j:j+Lx,0:1]=mi[0:Lx,i:i+1];

#  error
e = mtrue-m;
E = np.matmul(e.T,e)/M; E=E[0,0];
sqrtE = sqrt(E);
print("rms error %e" % (sqrtE) );
gdapy07_07
rms error 3.918099e-13
In [7]:
# gdapy07_08
# Thomas recursion used in data assimilation problem
# with thermal diffusion equation.
# This version has extensive error checking.
# I've deleted all the error checking in the version in the next section
 
print("gdapy07_08");
 
# thermal constant
kappa = 0.1;
 
# size of blocks and matrices
Lx = 41;
Lt = 41;
M = Lx*Lt;
 
# prior error in data (moderately accurate)
sigmad = 0.05;
sigmad2 = sigmad**2;
sigmadi = 1.0/sigmad;
sigmadi2 = 1.0/sigmad2;
 
# prior error in diff eqn and bc (highly accurate)
sigmas = 1e-2;
sigmas2 = sigmas**2;
sigmasi = 1.0/sigmas;
sigmasi2 = 1.0/sigmas2;
 
# prior error in ic (poorly accurate)
sigmam0 = 1.0;
sigmam02 = sigmam0**2;
sigmam0i = 1.0/sigmam0;
sigmam0i2 = 1.0/sigmam02;
 
# x-axis
Dx = 1.0;
x = gda_cvec(Dx*np.linspace(0,Lx-1,Lx));
xmax = Dx*(Lx-1);
Dxr = 1.0/Dx;
Dxr2 = 1/(Dx**2);
ixm = int(floor(Lx/2));
xm = x[ixm,0];
 
# t-axis
Dt = 1.0;
t = gda_cvec(Dt*np.linspace(0,Lt-1,Lt));
tmax = Dt*(Lt-1);
Dtr = 1.0/Dt;
 
# lookup tables, must ensure times contoguous
iofk = np.zeros((M,1),dtype=int);
jofk = np.zeros((M,1),dtype=int);
kofij = np.zeros((Lx,Lt));
k=0;
for j in range(Lt):
    for i in range(Lx):
        iofk[k,0]=i;
        jofk[k,0]=j;
        kofij[i,j]=k;
        k=k+1;

# 2nd derivative with zero value bc's
r = gda_cvec(-2.0,1.0,np.zeros((Lx-2,1)));
c = gda_cvec(-2.0,1.0,np.zeros((Lx-2,1)));
D2 = Dxr2*la.toeplitz(r.ravel(),c.ravel());
D2[0,0]=1.0;
D2[0,1]=0.0;
D2[Lx-1,Lx-2]=0.0;
D2[Lx-1,Lx-1]=1.0;
 
# differential operator, D
D = Dt*kappa*D2+np.identity(Lx);
 
# initial conditions
m0 = np.zeros((Lx,1));
sx = 2.0;
sx2 = sx**2;
m0 = np.exp( -0.5 * np.power(x-xm,2) / sx2 );
m0[0,0]=0.0;
m0[Lx-1,0]=0.0;
 
# execute the recursion
# to get the true solution
mtrue = np.zeros((Lx,Lt));
mtrue[0:Lx,0:1]=m0;
for i in range(1,Lt):
    mtrue[0:Lx:,i:i+1] = np.matmul(D,mtrue[0:Lx,i-1:i]);

gda_draw(mtrue);
print("Caption: True solution mtrue, via recursion");

H = np.zeros((M,M));
h = np.zeros((M,1));
for i in range(Lt):
    j=Lx*i;
    H[j:j+Lx,j:j+Lx]= np.identity(Lx);
h[0:Lx,0:1] = m0;
 
for i in range(1,Lt):
    j=Lx*i;
    k=Lx*(i-1)
    H[j:j+Lx,k:k+Lx] = -D;
# alternate version of true solution
# checks H and h
malt = la.solve(H,h);
 
S=np.zeros((Lx,Lt));
for k in range(M):
    i=iofk[k,0];
    j=jofk[k,0];
    S[i,j]=malt[k,0];

# error
e = S-mtrue;
print("Differece between recursion and m=Hinv*h: %e" % (np.max(np.abs(e))));

# sample data
Nd = 5; # number of data per time slice
d = np.zeros((Nd,Lt));
dobs = np.zeros((Nd,Lt));
ido = np.zeros((Nd,Lt),dtype=int);
Sdata = np.zeros((Lx,Lt));
for j in range(1,Lt): # no data at t=0
    i = np.random.permutation(np.arange(2,Lx-1)); # permute [2, ... N-2]
    i = i[0:Nd];
    ido[0:Nd,j]=i;
    d[0:Nd,j:j+1]=S[i,j:j+1]; # true data
    # observed data = true data + noise
    dobs[0:Nd,j:j+1]= d[0:Nd,j:j+1] + np.random.normal(loc=0.0,scale=sigmad,size=(Nd,1));
    Sdata[i,j:j+1]=dobs[0:Nd,j:j+1];
    
gda_draw(Sdata);
print("Caption: The data");

# indidual data kernels
Gi = np.zeros((Nd,Lx,Lt));
for i in range(1,Lt):  # no data at t=1
    for j in range(Nd):
        k=ido[j,i];
        Gi[j,k,i]=1.0;

# full data kernel
G = np.zeros((Nd*(Lt-1),M));
dv = np.zeros((Nd*(Lt-1),1));
for i in range(1,Lt):
    j = (i-1)*Nd;
    k = i*Lx;
    G[j:j+Nd,k:k+Lx] = Gi[0:Nd,0:M,i];
    dv[j:j+Nd,0] = dobs[0:Nd,i];
    
sigmahi2 = gda_cvec(sigmam0i2*np.ones((Lx,1)), sigmasi2*np.ones((Lx*(Lt-1),1)));
HTH = np.matmul(H.T,np.matmul(np.diag(sigmahi2.ravel()),H));
HTH = 0.5*(HTH+HTH.T); # ensure symmetric
HTh = np.matmul(H.T,np.matmul(np.diag(sigmahi2.ravel()),h));
GTG = sigmadi2*np.matmul(G.T,G);
GTG = 0.5*(GTG+GTG.T); # ensure symmetric
GTd = sigmadi2*np.matmul(G.T,dv);
FTF = GTG+HTH;
FTf = HTh+GTd;

# check solution of data eqn alone as a way of checking GTG and GTd
mest = la.solve(GTG+1e-8*np.identity(M),GTd);
Sest = np.zeros((Lx,Lt));
for k in range(M): # fold solution
    i=iofk[k,0];
    j=jofk[k,0];
    Sest[i,j]=mest[k,0];
gda_draw(Sest);
print("Caption: Solution of just the data equation");
print("(Should look just like data, above.)");

# Prior solution, as a way of checking HTH and HTh
mPRI = la.solve(HTH,HTh);
SPRI = np.zeros((Lx,Lt));
for k in range(M): # fold solutio 
    i=iofk[k,0];
    j=jofk[k,0];
    SPRI[i,j]=mPRI[k,0];
gda_draw(SPRI);
e = mtrue - SPRI;
print("Caption: Prior Solution");
print("(Should look just like true soln, above.)");
print("error with respect to true soln: %e" % (np.max(np.abs(e))));
print(" ");

# Generlized least squares solution
mGLS = la.solve(FTF,FTf);
SGLS = np.zeros((Lx,Lt));
for k in range(M): # fold solutio 
    i=iofk[k,0];
    j=jofk[k,0];
    SGLS[i,j]=mGLS[k,0];
gda_draw(SGLS)
print("Caption: Generalized Least Squares Solution");
print(" ");

# setup for Thomas Recursion
Ai=np.zeros((Lx,Lx,Lt));
ai=np.zeros((Lx,Lt));
 
# A1 and a1
si = np.zeros((Lx,1));
sim1 = np.zeros((Lx,1));
Ai[0:Lx,0:Lx,0] = sigmasi2*np.matmul(D.T,D) + sigmam0i2*np.identity(Lx);
ai[0:Lx,0:1]  = -sigmasi2*np.matmul(D.T,si) + sigmam0i2*m0;
e = Ai[0:Lx,0:Lx,0] - FTF[0:Lx,0:Lx];
print("Thomas soln, error in A1: %e" % (np.max(np.abs(e))) );
e = ai[0:Lx,0:1] - FTf[0:Lx,0:1];
print("error in a1: %e" % (np.max(np.abs(e))) );

# interior A's and a's
EA = 0.0;
Ea = 0.0;
for i in range(1,Lt-1):
    Ai[0:Lx,0:Lx,i]=sigmasi2*np.matmul(D.T,D)+sigmasi2*np.identity(Lx) +sigmadi2*np.matmul(Gi[0:Nd:,0:Lx,i].T,Gi[0:Nd,0:Lx,i]);
    ai[0:Lx,i:i+1] =-sigmasi2*np.matmul(D.T,si)+sigmasi2*sim1+sigmadi2*np.matmul(Gi[0:Nd,0:Lx,i].T,dobs[0:Nd,i:i+1]);
    j = Lx*i;
    e = Ai[0:Lx,0:Lx,i] - FTF[j:j+Lx,j:j+Lx];
    EAi = np.max(np.abs(e));
    if( EAi > EA ):
        EA = EAi;
    e = ai[0:Lx,i:i+1] - FTf[j:j+Lx,0:1];
    Eai =  np.max(np.abs(e))
    if( Eai > Ea ):
        Ea = Eai;
print("max interior error, A: %e and a: %e" % (EA, Ea) );

# final A and a
sLtm1 = np.zeros((Lx,1));
Ai[0:Lx,0:Lx,Lt-1] = sigmasi2*np.identity(Lx) + sigmadi2*np.matmul(Gi[0:Nd,0:Lx,Lt-1].T,Gi[0:Nd,0:Lx,Lt-1]);
ai[0:Lx,Lt-1:Lt]  =  sigmasi2*sLtm1           + sigmadi2*np.matmul(Gi[0:Nd,0:Lx,Lt-1].T,dobs[0:Nd,Lt-1:Lt]);
e = Ai[0:Lx,0:Lx,Lt-1] - FTF[M-Lx:M,M-Lx:M];
print("error in Alast: %e" % (np.max(np.abs(e))));
e = ai[0:Lx,Lt-1:Lt] - FTf[M-Lx:M,0:1];
print("error in alast: %e" % (np.max(np.abs(e))));

# B
B = -sigmasi2*D;
e = B - FTF[Lx:2*Lx,0:Lx];
print("error in B: %e" % (np.max(np.abs(e))));

# forward recursion
Ahinvi = np.zeros((Lx,Lx,Lt));
ahi = np.zeros((Lx,Lt));
Ahinvi[0:Lx,0:Lx,0] = la.inv(Ai[0:Lx,0:Lx,0]);
ahi[0:Lx,0] = ai[0:Lx,0];
for i in range(1,Lt):
    Ahinvi[0:Lx,0:Lx,i] = la.inv(Ai[0:Lx,0:Lx,i]-np.matmul(B,np.matmul(Ahinvi[0:Lx,0:Lx,i-1],B.T)));
    ahi[0:Lx,i] = ai[0:Lx,i]-np.matmul(B,np.matmul(Ahinvi[0:Lx,0:Lx,i-1],ahi[0:Lx,i-1]));
                        
# backward recursion
mi = np.zeros((Lx,Lt));
mi[0:Lx,Lt-1] = np.matmul(Ahinvi[0:Lx,0:Lx,Lt-1],ahi[0:Lx,Lt-1]);
for i in reversed(range(Lt-1)):
    mi[0:Lx,i] = np.matmul(Ahinvi[0:Lx,0:Lx,i],ahi[0:Lx,i]-np.matmul(B.T,mi[0:Lx,i+1]));

gda_draw(mi);
print("Caption: Estimated solution using the Thomas method.");

e = SGLS-mi;
print("difference between GLS and Thomas soln: %e" % (np.max(np.abs(e))));

fig1 = plt.figure(1,figsize=(12,8));
 
ax1 = plt.subplot(2, 2, 1);
plt.axis([0.0,xmax,0.0,tmax])
ax1.invert_yaxis();
mycmap=matplotlib.colormaps['jet'];
dmax = np.max(np.abs(mtrue));
left=0; right=xmax;
top=tmax; bottom=0;
# for testing orientation of image
#ix=2; it=2; Strue[ix,it]=dmax;
#ix=2; it=Lt-2; Strue[ix,it]=dmax;
#ix=Lx-2; it=2; Strue[ix,it]=dmax;
plt.imshow(np.flipud(mtrue.T), cmap=mycmap, vmin=0.0, vmax=dmax, extent=(left,right,bottom,top) );
plt.colorbar(shrink=0.5);
plt.xlabel('x');
plt.ylabel('t');
plt.title('true');

ax2 = plt.subplot(2, 2, 2);
plt.axis([0.0,xmax,0.0,tmax])
ax1.invert_yaxis();
mycmap=matplotlib.colormaps['jet'];
dmax = np.max(np.abs(Sdata));
left=0; right=xmax;
top=tmax; bottom=0;
plt.imshow(np.flipud(Sdata.T), cmap=mycmap, vmin=0.0, vmax=dmax, extent=(left,right,bottom,top) );
plt.colorbar(shrink=0.5);
plt.xlabel('x');
plt.ylabel('t');
plt.title('data');

ax3 = plt.subplot(2, 2, 3);
plt.axis([0.0,xmax,0.0,tmax])
ax1.invert_yaxis();
mycmap=matplotlib.colormaps['jet'];
dmax = np.max(np.abs(SGLS));
left=0; right=xmax;
top=tmax; bottom=0;
plt.imshow(np.flipud(SGLS.T), cmap=mycmap, vmin=0.0, vmax=dmax, extent=(left,right,bottom,top) );
plt.colorbar(shrink=0.5);
plt.xlabel('x');
plt.ylabel('t');
plt.title('GLS');

ax4 = plt.subplot(2, 2, 4);
plt.axis([0.0,xmax,0.0,tmax])
ax1.invert_yaxis();
mycmap=matplotlib.colormaps['jet'];
dmax = np.max(np.abs(mi));
left=0; right=xmax;
top=tmax; bottom=0;
plt.imshow(np.flipud(mi.T), cmap=mycmap, vmin=0.0, vmax=dmax, extent=(left,right,bottom,top) );
plt.colorbar(shrink=0.5);
plt.xlabel('x');
plt.ylabel('t');
plt.title('Thomas');
plt.show();
print("Caption: (Upper left) true solution, (upper right) data,");
print("(lower left) Estimated solution using Generalized least squares");
print("(lower right) Estimated solution using Thomas recursion.");
gdapy07_08
No description has been provided for this image
Caption: True solution mtrue, via recursion
Differece between recursion and m=Hinv*h: 5.551115e-16
No description has been provided for this image
Caption: The data
No description has been provided for this image
Caption: Solution of just the data equation
(Should look just like data, above.)
No description has been provided for this image
Caption: Prior Solution
(Should look just like true soln, above.)
error with respect to true soln: 1.374781e-10
 
No description has been provided for this image
Caption: Generalized Least Squares Solution
 
Thomas soln, error in A1: 9.094947e-13
error in a1: 0.000000e+00
max interior error, A: 2.273737e-13 and a: 0.000000e+00
error in Alast: 0.000000e+00
error in alast: 0.000000e+00
error in B: 0.000000e+00
No description has been provided for this image
Caption: Estimated solution using the Thomas method.
difference between GLS and Thomas soln: 1.443193e-12
No description has been provided for this image
Caption: (Upper left) true solution, (upper right) data,
(lower left) Estimated solution using Generalized least squares
(lower right) Estimated solution using Thomas recursion.
In [13]:
# gdapy07_09

# Same as gdapy07_09, but without extensive error checking
# Thomas recursion used in data assimilation problem
# with thermal diffusion equation.

print("gdapy07_09");
 
# thermal constant
kappa = 0.1;
 
# size of blocks and matrices
Lx = 41;
Lt = 41;
M = Lx*Lt;
 
# prior error in data (moderately accurate)
sigmad = 0.05;
sigmad2 = sigmad**2;
sigmadi = 1.0/sigmad;
sigmadi2 = 1.0/sigmad2;
 
# prior error in diff eqn and bc (highly accurate)
sigmas = 1e-2;
sigmas2 = sigmas**2;
sigmasi = 1.0/sigmas;
sigmasi2 = 1.0/sigmas2;
 
# prior error in ic (poorly accurate)
sigmam0 = 1.0;
sigmam02 = sigmam0**2;
sigmam0i = 1.0/sigmam0;
sigmam0i2 = 1.0/sigmam02;
 
# x-axis
Dx = 1.0;
x = gda_cvec(Dx*np.linspace(0,Lx-1,Lx));
xmax = Dx*(Lx-1);
Dxr = 1.0/Dx;
Dxr2 = 1/(Dx**2);
ixm = int(floor(Lx/2));
xm = x[ixm,0];
 
# t-axis
Dt = 1.0;
t = gda_cvec(Dt*np.linspace(0,Lt-1,Lt));
tmax = Dt*(Lt-1);
Dtr = 1.0/Dt;
 
# lookup tables, must ensure times contoguous
iofk = np.zeros((M,1),dtype=int);
jofk = np.zeros((M,1),dtype=int);
kofij = np.zeros((Lx,Lt));
k=0;
for j in range(Lt):
    for i in range(Lx):
        iofk[k,0]=i;
        jofk[k,0]=j;
        kofij[i,j]=k;
        k=k+1;

# 2nd derivative with zero value bc's
r = gda_cvec(-2.0,1.0,np.zeros((Lx-2,1)));
c = gda_cvec(-2.0,1.0,np.zeros((Lx-2,1)));
D2 = Dxr2*la.toeplitz(r.ravel(),c.ravel());
D2[0,0]=1.0;
D2[0,1]=0.0;
D2[Lx-1,Lx-2]=0.0;
D2[Lx-1,Lx-1]=1.0;
 
# differential operator, D
D = Dt*kappa*D2+np.identity(Lx);
 
# initial conditions
m0 = np.zeros((Lx,1));
sx = 2.0;
sx2 = sx**2;
m0 = np.exp( -0.5 * np.power(x-xm,2) / sx2 );
m0[0,0]=0.0;
m0[Lx-1,0]=0.0;
 
# execute the recursion
# to get the true solution
mtrue = np.zeros((Lx,Lt));
mtrue[0:Lx,0:1]=m0;
for i in range(1,Lt):
    mtrue[0:Lx:,i:i+1] = np.matmul(D,mtrue[0:Lx,i-1:i]);

H = np.zeros((M,M));
h = np.zeros((M,1));
for i in range(Lt):
    j=Lx*i;
    H[j:j+Lx,j:j+Lx]= np.identity(Lx);
h[0:Lx,0:1] = m0;
 
for i in range(1,Lt):
    j=Lx*i;
    k=Lx*(i-1)
    H[j:j+Lx,k:k+Lx] = -D;
# alternate version of true solution
# checks H and h
malt = la.solve(H,h);
 
S=np.zeros((Lx,Lt));
for k in range(M):
    i=iofk[k,0];
    j=jofk[k,0];
    S[i,j]=malt[k,0];

# sample data
Nd = 5; # number of data per time slice
d = np.zeros((Nd,Lt));
dobs = np.zeros((Nd,Lt));
ido = np.zeros((Nd,Lt),dtype=int);
Sdata = np.zeros((Lx,Lt));
for j in range(1,Lt): # no data at t=0
    i = np.random.permutation(np.arange(2,Lx-1)); # permute [2, ... N-2]
    i = i[0:Nd];
    ido[0:Nd,j]=i;
    d[0:Nd,j:j+1]=S[i,j:j+1]; # true data
    # observed data = true data + noise
    dobs[0:Nd,j:j+1]= d[0:Nd,j:j+1] + np.random.normal(loc=0.0,scale=sigmad,size=(Nd,1));
    Sdata[i,j:j+1]=dobs[0:Nd,j:j+1];

# indidual data kernels
Gi = np.zeros((Nd,Lx,Lt));
for i in range(1,Lt):  # no data at t=1
    for j in range(Nd):
        k=ido[j,i];
        Gi[j,k,i]=1.0;

# full data kernel
G = np.zeros((Nd*(Lt-1),M));
dv = np.zeros((Nd*(Lt-1),1));
for i in range(1,Lt):
    j = (i-1)*Nd;
    k = i*Lx;
    G[j:j+Nd,k:k+Lx] = Gi[0:Nd,0:M,i];
    dv[j:j+Nd,0] = dobs[0:Nd,i];
    
sigmahi2 = gda_cvec(sigmam0i2*np.ones((Lx,1)), sigmasi2*np.ones((Lx*(Lt-1),1)));
HTH = np.matmul(H.T,np.matmul(np.diag(sigmahi2.ravel()),H));
HTH = 0.5*(HTH+HTH.T); # ensure symmetric
HTh = np.matmul(H.T,np.matmul(np.diag(sigmahi2.ravel()),h));
GTG = sigmadi2*np.matmul(G.T,G);
GTG = 0.5*(GTG+GTG.T); # ensure symmetric
GTd = sigmadi2*np.matmul(G.T,dv);
FTF = GTG+HTH;
FTf = HTh+GTd;

# Generlized least squares solution
mGLS = la.solve(FTF,FTf);
SGLS = np.zeros((Lx,Lt));
for k in range(M): # fold solutio 
    i=iofk[k,0];
    j=jofk[k,0];
    SGLS[i,j]=mGLS[k,0];

# setup for Thomas Recursion
Ai=np.zeros((Lx,Lx,Lt));
ai=np.zeros((Lx,Lt));
 
# A1 and a1
si = np.zeros((Lx,1));
sim1 = np.zeros((Lx,1));
Ai[0:Lx,0:Lx,0] = sigmasi2*np.matmul(D.T,D) + sigmam0i2*np.identity(Lx);
ai[0:Lx,0:1]  = -sigmasi2*np.matmul(D.T,si) + sigmam0i2*m0;

# interior A's and a's
EA = 0.0;
Ea = 0.0;
for i in range(1,Lt-1):
    Ai[0:Lx,0:Lx,i]=sigmasi2*np.matmul(D.T,D)+sigmasi2*np.identity(Lx) +sigmadi2*np.matmul(Gi[0:Nd:,0:Lx,i].T,Gi[0:Nd,0:Lx,i]);
    ai[0:Lx,i:i+1] =-sigmasi2*np.matmul(D.T,si)+sigmasi2*sim1+sigmadi2*np.matmul(Gi[0:Nd,0:Lx,i].T,dobs[0:Nd,i:i+1]);
    j = Lx*i;

# final A and a
sLtm1 = np.zeros((Lx,1));
Ai[0:Lx,0:Lx,Lt-1] = sigmasi2*np.identity(Lx) + sigmadi2*np.matmul(Gi[0:Nd,0:Lx,Lt-1].T,Gi[0:Nd,0:Lx,Lt-1]);
ai[0:Lx,Lt-1:Lt]  =  sigmasi2*sLtm1           + sigmadi2*np.matmul(Gi[0:Nd,0:Lx,Lt-1].T,dobs[0:Nd,Lt-1:Lt]);

# B
B = -sigmasi2*D;

# forward recursion
Ahinvi = np.zeros((Lx,Lx,Lt));
ahi = np.zeros((Lx,Lt));
Ahinvi[0:Lx,0:Lx,0] = la.inv(Ai[0:Lx,0:Lx,0]);
ahi[0:Lx,0] = ai[0:Lx,0];
for i in range(1,Lt):
    Ahinvi[0:Lx,0:Lx,i] = la.inv(Ai[0:Lx,0:Lx,i]-np.matmul(B,np.matmul(Ahinvi[0:Lx,0:Lx,i-1],B.T)));
    ahi[0:Lx,i] = ai[0:Lx,i]-np.matmul(B,np.matmul(Ahinvi[0:Lx,0:Lx,i-1],ahi[0:Lx,i-1]));
                        
# backward recursion
mi = np.zeros((Lx,Lt));
mi[0:Lx,Lt-1] = np.matmul(Ahinvi[0:Lx,0:Lx,Lt-1],ahi[0:Lx,Lt-1]);
for i in reversed(range(Lt-1)):
    mi[0:Lx,i] = np.matmul(Ahinvi[0:Lx,0:Lx,i],ahi[0:Lx,i]-np.matmul(B.T,mi[0:Lx,i+1]));

fig1 = plt.figure(1,figsize=(12,8));
 
ax1 = plt.subplot(2, 2, 1);
plt.axis([0.0,xmax,0.0,tmax])
ax1.invert_yaxis();
mycmap=matplotlib.colormaps['jet'];
dmax = np.max(np.abs(mtrue));
left=0; right=xmax;
top=tmax; bottom=0;
# for testing orientation of image
#ix=2; it=2; Strue[ix,it]=dmax;
#ix=2; it=Lt-2; Strue[ix,it]=dmax;
#ix=Lx-2; it=2; Strue[ix,it]=dmax;
plt.imshow(np.flipud(mtrue.T), cmap=mycmap, vmin=0.0, vmax=dmax, extent=(left,right,bottom,top) );
plt.colorbar(shrink=0.5);
plt.xlabel('x');
plt.ylabel('t');
plt.title('true');

ax2 = plt.subplot(2, 2, 2);
plt.axis([0.0,xmax,0.0,tmax])
ax1.invert_yaxis();
mycmap=matplotlib.colormaps['jet'];
dmax = np.max(np.abs(Sdata));
left=0; right=xmax;
top=tmax; bottom=0;
plt.imshow(np.flipud(Sdata.T), cmap=mycmap, vmin=0.0, vmax=dmax, extent=(left,right,bottom,top) );
plt.colorbar(shrink=0.5);
plt.xlabel('x');
plt.ylabel('t');
plt.title('data');

ax3 = plt.subplot(2, 2, 3);
plt.axis([0.0,xmax,0.0,tmax])
ax1.invert_yaxis();
mycmap=matplotlib.colormaps['jet'];
dmax = np.max(np.abs(SGLS));
left=0; right=xmax;
top=tmax; bottom=0;
plt.imshow(np.flipud(SGLS.T), cmap=mycmap, vmin=0.0, vmax=dmax, extent=(left,right,bottom,top) );
plt.colorbar(shrink=0.5);
plt.xlabel('x');
plt.ylabel('t');
plt.title('GLS');

ax4 = plt.subplot(2, 2, 4);
plt.axis([0.0,xmax,0.0,tmax])
ax1.invert_yaxis();
mycmap=matplotlib.colormaps['jet'];
dmax = np.max(np.abs(mi));
left=0; right=xmax;
top=tmax; bottom=0;
plt.imshow(np.flipud(mi.T), cmap=mycmap, vmin=0.0, vmax=dmax, extent=(left,right,bottom,top) );
plt.colorbar(shrink=0.5);
plt.xlabel('x');
plt.ylabel('t');
plt.title('Thomas');
plt.show();
print("Caption: (Upper left) true solution, (upper right) data,");
print("(lower left) Estimated solution using Generalized least squares");
print("(lower right) Estimated solution using Thomas recursion.");
gdapy07_09
No description has been provided for this image
Caption: (Upper left) true solution, (upper right) data,
(lower left) Estimated solution using Generalized least squares
(lower right) Estimated solution using Thomas recursion.
In [14]:
# gdama07_10

# Thomas recursion and Kalman filtering used in the present time
# data assimilation problem with the thermal diffusion equation.
# I also compute the global time solution for comparison

print("gdapy07_10");
 
# thermal constant
kappa = 0.05;
 
# size of blocks and matrices
Lx = 41;
Lt = 41;
M = Lx*Lt;
 
# prior error in data
sigmad = 0.05;
sigmad2 = sigmad**2;
sigmadi = 1.0/sigmad;
sigmadi2 = 1.0/sigmad2;
 
# prior error in diff eqn and bc
sigmas = 1;
sigmas2 = sigmas**2;
sigmasi = 1.0/sigmas;
sigmasi2 = 1.0/sigmas2;
 
# prior error in ic
sigmam0 = 1.0;
sigmam02 = sigmam0**2;
sigmam0i = 1.0/sigmam0;
sigmam0i2 = 1.0/sigmam02;
 
# x-axis
Dx = 1.0;
x = gda_cvec(Dx*np.linspace(0,Lx-1,Lx));
xmax = Dx*(Lx-1);
Dxr = 1.0/Dx;
Dxr2 = 1/(Dx**2);
ixm = int(floor(Lx/2));
xm = x[ixm,0];
 
# t-axis
Dt = 1.0;
t = gda_cvec(Dt*np.linspace(0,Lt-1,Lt));
tmax = Dt*(Lt-1);
Dtr = 1.0/Dt;
 
# lookup tables, must ensure times contoguous
iofk = np.zeros((M,1),dtype=int);
jofk = np.zeros((M,1),dtype=int);
kofij = np.zeros((Lx,Lt));
k=0;
for j in range(Lt):
    for i in range(Lx):
        iofk[k,0]=i;
        jofk[k,0]=j;
        kofij[i,j]=k;
        k=k+1;

# 2nd derivative with zero value bc's
r = gda_cvec(-2.0,1.0,np.zeros((Lx-2,1)));
c = gda_cvec(-2.0,1.0,np.zeros((Lx-2,1)));
D2 = Dxr2*la.toeplitz(r.ravel(),c.ravel());
D2[0,0]=1.0;
D2[0,1]=0.0;
D2[Lx-1,Lx-2]=0.0;
D2[Lx-1,Lx-1]=1.0;
 
# differential operator, D
D = Dt*kappa*D2+np.identity(Lx);
 
# initial conditions
m0 = np.zeros((Lx,1));
sx = 2.0;
sx2 = sx**2;
m0 = np.exp( -0.5 * np.power(x-xm,2) / sx2 );
m0[0,0]=0.0;
m0[Lx-1,0]=0.0;
 
# execute the recursion
# to get the true solution
mtrue = np.zeros((Lx,Lt));
mtrue[0:Lx,0:1]=m0;
for i in range(1,Lt):
    mtrue[0:Lx:,i:i+1] = np.matmul(D,mtrue[0:Lx,i-1:i]);

H = np.zeros((M,M));
h = np.zeros((M,1));
for i in range(Lt):
    j=Lx*i;
    H[j:j+Lx,j:j+Lx]= np.identity(Lx);
h[0:Lx,0:1] = m0;
 
for i in range(1,Lt):
    j=Lx*i;
    k=Lx*(i-1)
    H[j:j+Lx,k:k+Lx] = -D;
# alternate version of true solution
# checks H and h
malt = la.solve(H,h);
 
S=np.zeros((Lx,Lt));
for k in range(M):
    i=iofk[k,0];
    j=jofk[k,0];
    S[i,j]=malt[k,0];

# sample data
Nd = 15; # number of data per time slice
d = np.zeros((Nd,Lt));
dobs = np.zeros((Nd,Lt));
ido = np.zeros((Nd,Lt),dtype=int);
Sd = np.zeros((Lx,Lt));
Sdata = np.zeros((Lx,Lt));
for j in range(1,Lt): # no data at t=0
    i = np.random.permutation(np.arange(2,Lx-1)); # permute [2, ... N-2]
    i = i[0:Nd];
    ido[0:Nd,j]=i;
    d[0:Nd,j:j+1]=S[i,j:j+1]; # true data
    # observed data = true data + noise
    dobs[0:Nd,j:j+1]= d[0:Nd,j:j+1] + np.random.normal(loc=0.0,scale=sigmad,size=(Nd,1));
    Sd[i,j:j+1]=d[0:Nd,j:j+1];
    Sdata[i,j:j+1]=dobs[0:Nd,j:j+1];

# indidual data kernels
Gi = np.zeros((Nd,Lx,Lt));
for i in range(1,Lt):  # no data at t=1
    for j in range(Nd):
        k=ido[j,i];
        Gi[j,k,i]=1.0;

# full data kernel
G = np.zeros((Nd*(Lt-1),M));
dv = np.zeros((Nd*(Lt-1),1));
for i in range(1,Lt):
    j = (i-1)*Nd;
    k = i*Lx;
    G[j:j+Nd,k:k+Lx] = Gi[0:Nd,0:M,i];
    dv[j:j+Nd,0] = dobs[0:Nd,i];
    
sigmahi2 = gda_cvec(sigmam0i2*np.ones((Lx,1)), sigmasi2*np.ones((Lx*(Lt-1),1)));
HTH = np.matmul(H.T,np.matmul(np.diag(sigmahi2.ravel()),H));
HTH = 0.5*(HTH+HTH.T); # ensure symmetric
HTh = np.matmul(H.T,np.matmul(np.diag(sigmahi2.ravel()),h));
GTG = sigmadi2*np.matmul(G.T,G);
GTG = 0.5*(GTG+GTG.T); # ensure symmetric
GTd = sigmadi2*np.matmul(G.T,dv);
FTF = GTG+HTH;
FTf = HTh+GTd;

# Generlized least squares solution
mGLS = la.solve(FTF,FTf);
SGLS = np.zeros((Lx,Lt));
for k in range(M): # fold solutio 
    i=iofk[k,0];
    j=jofk[k,0];
    SGLS[i,j]=mGLS[k,0];
    
# SOLUTION 1: Global time solution by full Thomas Recursion
# setup for Thomas Recursion

# setup for Thomas Recursion
Ai=np.zeros((Lx,Lx,Lt));
ai=np.zeros((Lx,Lt));
 
# A1 and a1
si = np.zeros((Lx,1));
sim1 = np.zeros((Lx,1));
Ai[0:Lx,0:Lx,0] = sigmasi2*np.matmul(D.T,D) + sigmam0i2*np.identity(Lx);
ai[0:Lx,0:1]  = -sigmasi2*np.matmul(D.T,si) + sigmam0i2*m0;

# interior A's and a's
EA = 0.0;
Ea = 0.0;
for i in range(1,Lt-1):
    Ai[0:Lx,0:Lx,i]=sigmasi2*np.matmul(D.T,D)+sigmasi2*np.identity(Lx) +sigmadi2*np.matmul(Gi[0:Nd:,0:Lx,i].T,Gi[0:Nd,0:Lx,i]);
    ai[0:Lx,i:i+1] =-sigmasi2*np.matmul(D.T,si)+sigmasi2*sim1+sigmadi2*np.matmul(Gi[0:Nd,0:Lx,i].T,dobs[0:Nd,i:i+1]);
    j = Lx*i;

# final A and a
sLtm1 = np.zeros((Lx,1));
Ai[0:Lx,0:Lx,Lt-1] = sigmasi2*np.identity(Lx) + sigmadi2*np.matmul(Gi[0:Nd,0:Lx,Lt-1].T,Gi[0:Nd,0:Lx,Lt-1]);
ai[0:Lx,Lt-1:Lt]  =  sigmasi2*sLtm1           + sigmadi2*np.matmul(Gi[0:Nd,0:Lx,Lt-1].T,dobs[0:Nd,Lt-1:Lt]);

# B
B = -sigmasi2*D;

# forward recursion
Ahinvi = np.zeros((Lx,Lx,Lt));
ahi = np.zeros((Lx,Lt));
Ahinvi[0:Lx,0:Lx,0] = la.inv(Ai[0:Lx,0:Lx,0]);
ahi[0:Lx,0] = ai[0:Lx,0];
for i in range(1,Lt):
    Ahinvi[0:Lx,0:Lx,i] = la.inv(Ai[0:Lx,0:Lx,i]-np.matmul(B,np.matmul(Ahinvi[0:Lx,0:Lx,i-1],B.T)));
    ahi[0:Lx,i] = ai[0:Lx,i]-np.matmul(B,np.matmul(Ahinvi[0:Lx,0:Lx,i-1],ahi[0:Lx,i-1]));
                        
# backward recursion
mi = np.zeros((Lx,Lt));
mi[0:Lx,Lt-1] = np.matmul(Ahinvi[0:Lx,0:Lx,Lt-1],ahi[0:Lx,Lt-1]);
for i in reversed(range(Lt-1)):
    mi[0:Lx,i] = np.matmul(Ahinvi[0:Lx,0:Lx,i],ahi[0:Lx,i]-np.matmul(B.T,mi[0:Lx,i+1]));
    
    
# SOLUTION 2: Thomas present time solution
 
# B
B = -sigmasi2*D;
 
# A1 and a1
si = np.zeros((Lx,1));
sim1 = np.zeros((Lx,1));
 
A1 = sigmasi2*np.matmul(D.T,D) + sigmam0i2*np.identity(Lx);
a1  = -sigmasi2*np.matmul(D.T,si) + sigmam0i2*m0;
Ah1 = A1;
ah1 = a1;
mPT = np.zeros((Lx,Lt)); # present time solution
mPT[0:Lx,0:1] = m0;  # first soln is just initial condition
Ahim1 = Ah1;
ahim1 = ah1;
# interior A's and a's
for i in range(1,Lt):
    Gs = np.squeeze(Gi[0:Nd,0:Lx,i]);
    AiPT = sigmasi2*np.identity(Lx) + sigmadi2*np.matmul(Gs.T,Gs);
    Ai = sigmasi2*np.matmul(D.T,D) + AiPT;
    aiPT = sigmasi2*sim1 +  sigmadi2*np.matmul(Gs.T,dobs[0:Nd,i:i+1]);
    ai  = -sigmasi2*np.matmul(D.T,si) + aiPT;
    BAI = np.matmul(B,la.inv(Ahim1));
    BAIBT = np.matmul(BAI,B.T);
    AhiPT = AiPT - BAIBT;
    Ahi =   Ai   - BAIBT;
    ahiPT = aiPT - np.matmul(BAI,ahim1);
    ahi   = ai   - np.matmul(BAI,ahim1);
    eps = 1e-6*np.max(np.abs(AhiPT));
    mPT[0:Lx,i:i+1] = la.solve(AhiPT+eps*np.identity(Lx),ahiPT);
    Ahim1 = Ahi;
    ahim1 = ahi;

# SOLUTION 3: Present time solution by Kalman Filtering
sim1 = np.zeros((Lx,1));
mK = np.zeros((Lx,Lt)); # Kalman solution
varm = np.zeros((Lx,Lt));
mK[0:Lx,0:1] = m0;  # first soln is just initial condition
covm1 = sigmam02*np.identity(Lx);
varm[0:Lx,0:1] = gda_cvec(np.diag(covm1));

covmAKim1 = covm1;
for i in range(1,Lt):
    mAi = np.matmul(D,mK[0:Lx,i-1:i])+sim1;
    covmAi = np.matmul(D,np.matmul(covmAKim1,D.T)) + sigmas2*np.identity(Lx);
    Gs = np.squeeze(Gi[0:Nd,0:Lx,i]);
    AK=la.inv(covmAi) + sigmadi2*np.matmul(Gs.T,Gs);
    AKinv=la.inv(AK);
    mK[0:Lx:,i:i+1] = np.matmul(AKinv,la.solve(covmAi,mAi) + sigmadi2*np.matmul(Gs.T,dobs[0:Nd,i:i+1]));
    varm[0:Lx,i:i+1]=gda_cvec(np.diag(AKinv));
    covmAKim1 = AKinv;

fig1 = plt.figure(1,figsize=(12,8));

ax1 = plt.subplot(2, 2, 1);
plt.axis([0.0,xmax,0.0,tmax])
ax1.invert_yaxis();
mycmap=matplotlib.colormaps['jet'];
dmax = np.max(np.abs(mtrue));
left=0; right=xmax;
top=tmax; bottom=0;
# for testing orientation of image
#ix=2; it=2; Strue[ix,it]=dmax;
#ix=2; it=Lt-2; Strue[ix,it]=dmax;
#ix=Lx-2; it=2; Strue[ix,it]=dmax;
plt.imshow(np.flipud(mtrue.T), cmap=mycmap, vmin=0.0, vmax=dmax, extent=(left,right,bottom,top) );
plt.colorbar(shrink=0.5);
plt.xlabel('x');
plt.ylabel('t');
plt.title('true');

ax2 = plt.subplot(2, 2, 2);
plt.axis([0.0,xmax,0.0,tmax])
ax1.invert_yaxis();
mycmap=matplotlib.colormaps['jet'];
dmax = np.max(np.abs(mi));
left=0; right=xmax;
top=tmax; bottom=0;
plt.imshow(np.flipud(mi.T), cmap=mycmap, vmin=0.0, vmax=dmax, extent=(left,right,bottom,top) );
plt.colorbar(shrink=0.5);
plt.xlabel('x');
plt.ylabel('t');
plt.title('GLS');

ax3 = plt.subplot(2, 2, 3);
plt.axis([0.0,xmax,0.0,tmax])
ax1.invert_yaxis();
mycmap=matplotlib.colormaps['jet'];
dmax = np.max(np.abs(mPT));
left=0; right=xmax;
top=tmax; bottom=0;
plt.imshow(np.flipud(mPT.T), cmap=mycmap, vmin=0.0, vmax=dmax, extent=(left,right,bottom,top) );
plt.colorbar(shrink=0.5);
plt.xlabel('x');
plt.ylabel('t');
plt.title('PT');

ax4 = plt.subplot(2, 2, 4);
plt.axis([0.0,xmax,0.0,tmax])
ax1.invert_yaxis();
mycmap=matplotlib.colormaps['jet'];
dmax = np.max(np.abs(mPT));
left=0; right=xmax;
top=tmax; bottom=0;
plt.imshow(np.flipud(mK.T), cmap=mycmap, vmin=0.0, vmax=dmax, extent=(left,right,bottom,top) );
plt.colorbar(shrink=0.5);
plt.xlabel('x');
plt.ylabel('t');
plt.title('Kalman');
plt.show();
print("Caption: Solutons. (Upper left) true , (upper right) Generalized Least Squares");
print("(lower left) Thomas Present-time, (lower right) Kalman");

fig2 = plt.figure(2,figsize=(12,8));

ax1 = plt.subplot(1, 2, 1);
plt.axis([0.0,xmax,0.0,tmax])
ax1.invert_yaxis();
mycmap=matplotlib.colormaps['jet'];
dmax = np.max(np.abs(mtrue));
left=0; right=xmax;
top=tmax; bottom=0;
plt.imshow(np.flipud((mi-mK).T), cmap=mycmap, vmin=0.0, vmax=dmax, extent=(left,right,bottom,top) );
plt.colorbar(shrink=0.5);
plt.xlabel('x');
plt.ylabel('t');
plt.title('mGLS-mK');

ax2 = plt.subplot(1, 2, 2);
plt.axis([0.0,xmax,0.0,tmax])
ax1.invert_yaxis();
mycmap=matplotlib.colormaps['jet'];
dmax = np.max(np.abs(mi));
left=0; right=xmax;
top=tmax; bottom=0;
plt.imshow(np.flipud((mPT-mK).T), cmap=mycmap, vmin=0.0, vmax=dmax, extent=(left,right,bottom,top) );
plt.colorbar(shrink=0.5);
plt.xlabel('x');
plt.ylabel('t');
plt.title('mPT-mK');
plt.show();
print("Caption: Errors (Left) Thomas minus Kalman, (right) Thomas Present minus Kalman");

fig3 = plt.figure(3,figsize=(12,8));

ax1 = plt.subplot(1, 2, 1);
plt.axis([0.0,xmax,0.0,tmax])
ax1.invert_yaxis();
mycmap=matplotlib.colormaps['jet'];
dmax = np.max(np.abs(Sd));
left=0; right=xmax;
top=tmax; bottom=0;
plt.imshow(np.flipud(Sd.T), cmap=mycmap, vmin=0.0, vmax=dmax, extent=(left,right,bottom,top) );
plt.colorbar(shrink=0.5);
plt.xlabel("x");
plt.ylabel("t");
plt.title("dtrue");

ax2 = plt.subplot(1, 2, 2);
plt.axis([0.0,xmax,0.0,tmax])
ax1.invert_yaxis();
mycmap=matplotlib.colormaps['jet'];
dmax = np.max(np.abs(mi));
left=0; right=xmax;
top=tmax; bottom=0;
plt.imshow(np.flipud(Sdata.T), cmap=mycmap, vmin=0.0, vmax=dmax, extent=(left,right,bottom,top) );
plt.colorbar(shrink=0.5);
plt.xlabel("x");
plt.ylabel("t");
plt.title("dobs");
plt.show();
print("Caption: Data (Left) True, (right) Observed");


fig4 = plt.figure(4,figsize=(12,8));
ax1 = plt.subplot(1, 1, 1);
plt.axis([0.0,xmax,0.0,tmax])
ax1.invert_yaxis();
mycmap=matplotlib.colormaps['jet'];
SEM=np.sqrt(varm);
dmax = 10.0;
left=0; right=xmax;
top=tmax; bottom=0;
plt.imshow(np.flipud(SEM.T), cmap=mycmap, vmin=0.0, vmax=dmax, extent=(left,right,bottom,top) );
plt.colorbar(shrink=0.5);
plt.xlabel("x");
plt.ylabel("t");
plt.title("sqrt(varm)");
plt.show();
print("Caption: Standard error of the solution");
gdapy07_10
No description has been provided for this image
Caption: Solutons. (Upper left) true , (upper right) Generalized Least Squares
(lower left) Thomas Present-time, (lower right) Kalman
No description has been provided for this image
Caption: Errors (Left) Thomas minus Kalman, (right) Thomas Present minus Kalman
No description has been provided for this image
Caption: Data (Left) True, (right) Observed
No description has been provided for this image
Caption: Standard error of the solution
In [ ]:
 
In [ ]: