In [1]:
# gdapy14_00 clears all variables and import various modules
# you must run this cell first, before running any of others

# note that these examples use inverse theory techniques that
# are only covered in later chapters of the book

# History:
# 2022/11/25 - Created by W. Menke
# 2023/04/20 - Addition of cells translated from MATLAB(R)
# 2024/05/23 -  Patches to accomodate new verions of numpy, scipy, matplotlib
#               patched dot product to return scalar value
#               las.bicg keyword tol changes to rtol

# A reset deletes any previously-created variables from memory
%reset -f

# import various packages

import numpy as np                    # matrices & etc
from matplotlib import pyplot as plt  # general plotting 
from matplotlib import patches        # plotting shapes
from datetime import date             # date and time arithmetic
from math import exp, pi, sin, cos, tan, atan2, sqrt, floor, log10, nan   # math functions
import scipy.linalg as la             # linear algebra functions
import os                             # operating system
import matplotlib.cm as cm
from matplotlib.colors import ListedColormap
import scipy.signal as sg
import scipy.stats as st
import scipy.sparse.linalg as las
from scipy import sparse
import matplotlib

# function to make a numpy N-by-1 column vector
# c=eda_cvec(v1, v2, ...) from a list of several
# array-like entities v1, v2, including a number
# a list of numbers, a tuple of numbers, an N-by-0 np array
# and a N-by-1 np array. The function
# also insures that, if v is an np array, that
# c is a copy, as contrasted to a view, of it
# It promotes integers to floats, and integers
# and floats to complex, by context.
# This version concatenates many argments,
# whereas c=eda_cvec1(v1) takes just one argiment.
# I recommend always using eda_cvec(v1, v2, ...)
def gda_cvec(*argv):
    t = int;
    Nt = 0;
    for a in argv:
        v = gda_cvec1(a);
        N,M = np.shape(v);
        Nt = Nt + N;
        if( N==0 ):
            continue; # skip vector of zero length
        if (t==int) and isinstance(v[0,0],float):
            t=float;
        elif isinstance(v[0,0],complex):
            t=complex;
    w = np.zeros((Nt,1),dtype=t);
    Nt = 0;
    for a in argv:
        v = gda_cvec1(a);
        N,M = np.shape(v);
        #w[Nt:Nt+N,0] = v[0:N,0];
        w[Nt:Nt+N,0:1] = v;  # patch 20230418 was #w[Nt:Nt+N,0] = v[0:N,0];
        Nt = Nt + N;
    return w;

# function to make a numpy N-by-1 column vector
# c=gda_cvec1(v) from entity v that is array-like,
# including a number, a list of numbers, a tuple
# of numbers, an N-by-0 np array and a N-by1 np array.
# It promotes integers to floats, and integers
# and floats to complex, by context. The function
# also insures that, if v is an np array, that
# c is a copy, as contrasted to a view, of it.
# This version takes just one input argment.
# whereas c=gda_cvec(v1,v2,...) concatenates
# many argiments.
def gda_cvec1(v):
    if isinstance(v, int) or isinstance(v, np.int32):
        w = np.zeros((1,1),dtype=int);
        w[0,0] = v;
        return w;
    elif isinstance(v, float):
        w = np.zeros((1,1),dtype=float);
        w[0,0] = v;
        return w;
    elif isinstance(v, complex):
        w = np.zeros((1,1),dtype=complex);
        w[0,0] = v;
        return w;
    elif isinstance(v, np.ndarray):
        s = np.shape(v);
        if len(s) == 1:
            return np.copy(np.reshape(v,(s[0],1)));
        else:
            [r,c]=s;
            if( c==1 ):
                return(np.copy(v));
            elif(r==1):
                return(np.copy(v.T));
            else:
                raise TypeError("gda_cvec: %d by %d ndarray not allowed" % (r, c));
    elif isinstance(v, list):
        r = len(v);
        t = int;
        for vi in v:
            if isinstance(vi,int) or isinstance(vi, np.int32): #patch v->vi 20230418
                pass;
            elif isinstance(vi,float):
                t=float;
            elif isinstance(vi,complex):
                t=complex;
                break;
            else:
                raise TypeError("gda_cvec: list contains unsupported type %s" % type(vi));
        w = np.zeros((r,1),dtype=t);
        w[:,0] = np.array(v); # patch v -> np.array(v)
        return w;
    elif isinstance(v, tuple):
        r = len(v);
        t = int;
        for vi in v:
            if isinstance(vi,int) or isinstance(vi, np.int32): #patch v->vi 20230418
                pass;
            elif isinstance(vi,float):
                t=float;
            elif isinstance(vi,complex):
                t=complex;
                break;
            else:
                raise TypeError("gda_cvec: tuple contains unsupported type %s" % type(vi));
        w = np.zeros((r,1),dtype=t);
        w[:,0] = np.array(list(v)); # patch v -> np.array(list(v));
        return w;
    else:
        raise TypeError("gda_cvec: %s not supported" % type(v));


        
# gda_draw function makes a "pictorial matrix equation"
# arguments are vectors, matrices and strings
# which are plotted in the order that the appear
# except that strings starting with 'title ' are plotted
# under the subseqeunt matrix or vector
# always returns a status of 1
def gda_draw(*argv):
    DOCOLOR=True;
    if( DOCOLOR ):
        bwcmap = matplotlib.colormaps['jet'];
    else:
        bw = np.zeros((256,4));
        v = 0.9*(256 - np.linspace( 0, 255, 256 ))/255;
        bw[:,0] = v;
        bw[:,1] = v;
        bw[:,2] = v;
        bw[:,3] = np.ones(256);
        bwcmap = ListedColormap(bw);
    # size of plot
    W = 16;
    H = 4;
    fig1 = plt.figure(1);
    # figsize width and height in inches
    fig1.set_size_inches(W,H);
    ax1 = plt.subplot(1,1,1);
    plt.axis([0, W, -H/2, H/2]);
    plt.axis('off');
    LM = W/6;    # matrix width and heoght
    LV = W/40;   # vector width
    FS = 0.12;    # character width
    TO = 0.4;    # title vertical offset
    SP = 0.2;    # space between objects
    LS = 0.2;    # leading space
    p = LS; # starting x-position
    istitle=0; # flags presence of a title
    for a in argv:
        if isinstance(a,np.ndarray):
            sh = np.shape(a);
            if len(sh) == 1:  # conversion to nx1 array
                n = sh[0];
                m = 1;
                ap = a;
                a = np.zeros((n,1));
                a[:,0] = ap;
            else:
                n = sh[0];
                m = sh[1];
            if m==1:
                pold=p;
                left=p;
                right=p+LV;
                bottom=-LM/2;
                top=LM/2;
                plt.imshow( a, cmap=bwcmap, vmin=np.min(a), vmax=np.max(a), extent=(left,right,bottom,top) );
                p = p+LV;
                pm = (p+pold)/2;
                if istitle:
                    plt.text(pm,-(LM/2)-TO,titlestr,horizontalalignment='center');
                    istitle=0;
                p = p+SP;
            else:
                pold=p;
                left=p;
                right=p+LM;
                bottom=-LM/2;
                top=LM/2;
                plt.imshow( a, cmap=bwcmap, vmin=np.min(a), vmax=np.max(a), extent=(left,right,bottom,top) );
                p = p+LM;
                pm = (p+pold)/2;
                if istitle:
                    plt.text(pm,-(LM/2)-TO,titlestr,horizontalalignment='center');
                    istitle=0;
                p = p+SP;
        elif isinstance(a,str):
            ns = len(a);
            istitle=0;
            if( ns>=6 ):
                if 'title ' in a[0:6]:
                    istitle=1;
                    titlestr=a[6:];
            if( istitle != 1):
                plt.text(p,0,a);
                p = p + ns*FS + SP;
    plt.show();
    return 1;

# bandpass filter, used in seismological example, but hand
# in a variety of settings involving time series
def gda_chebyshevfilt(d, Dt, flow, fhigh):
    # (dout,u,v)=gda_chebyshevfilt(d, Dt, flow, fhigh);
    # chebyshev IIR bandpass filter
    # d - input array of data
    # Dt - sampling interval
    # flow - low pass frequency, Hz
    # fhigh - high pass frequency, Hz
    # dout - output array of data
    # u - the numerator filter
    # v - the denominator filter
    # these filters can be used again using dout=filter(u,v,din);

    # make sure input timeseries is a column vector
    s = np.shape(d);
    N = s[0];
    if(N==1):
        dd = np.zeros((N,1));
        dd[:,0] = d;
    else:
        dd=d;
        
    # sampling rate
    rate=1/Dt;

    # ripple parameter, set to ten percent
    ripple=0.1;  

    # normalise frequency
    fl=2.0*flow/rate;
    fh=2.0*fhigh/rate;

    # center frequency 
    cf = 4 * tan( (fl*pi/2) ) * tan( (fh*pi/2) );

    # bandwidth
    bw = 2 * ( tan( (fh*pi/2) ) - tan( (fl*pi/2) ) );

    # ripple parameter factor
    rpf = sqrt((sqrt((1.0+1.0/(ripple*ripple))) + 1.0/ripple));
    a = 0.5*(rpf-1.0/rpf);
    b = 0.5*(rpf+1.0/rpf);

    u=np.zeros((5,1));
    v=np.zeros((5,1));
    theta = 3*pi/4;
    sr = a * cos(theta);
    si = b * sin(theta);
    es = sqrt(sr*sr+si*si);
    tmp= 16.0 - 16.0*bw*sr + 8.0*cf + 4.0*es*es*bw*bw - 4.0*bw*cf*sr + cf*cf;
    v[0,0] = 1.0;
    v[1,0] = 4.0*(-16.0 + 8.0*bw*sr - 2.0*bw*cf*sr + cf*cf)/tmp;
    v[2,0] = (96.0 - 16.0*cf - 8.0*es*es*bw*bw + 6.0*cf*cf)/tmp;
    v[3,0] = (-64.0 - 32.0*bw*sr + 8.0*bw*cf*sr + 4.0*cf*cf)/tmp;
    v[4,0] = (16.0 + 16.0*bw*sr + 8.0*cf + 4.0*es*es*bw*bw + 4.0*bw*cf*sr + cf*cf)/tmp;
    tmp = 4.0*es*es*bw*bw/tmp;
    u[0,0] = tmp;
    u[1,0] = 0.0;
    u[2,0] = -2.0*tmp;
    u[3,0] = 0.0;
    u[4,0] = tmp;

    dout = sg.lfilter(u.ravel(),v.ravel(),dd.ravel());
    return (gda_cvec(dout),gda_cvec(u),gda_cvec(v));

# function to perform the multiplication FT (F v)
def gda_FTFmul(v):
    # this function is used by bicongugate gradient to
    # solve the generalized least squares problem Fm=F.
    # Note that "F" must be called "gdaFsparse".
    global gdaFsparse;
    s = np.shape(v);
    if(len(s)==1):
        vv = np.zeros((s[0],1));
        vv[:,0] = v;
    else:
        vv=v;
    # weirdly, "*" multiplies sparse matrices just fine
    temp = gdaFsparse*vv;
    return gdaFsparse.transpose()*temp;


# function to perform the multiplication FT f
def gda_FTFrhs(f):
    # this function is used by bicongugate gradient to
    # solve the generalized least squares problem Fm=F.
    # Note that "F" must be called "gdaFsparse".
    global gdaFsparse;
    return gdaFsparse.transpose()*f;


# implements (GTG + epilon I) m = (GT d) when G is a sparse matrix
def gda_dlsmul(v):
    # this function is used by bicongugate gradient to
    # solve the damped least squares problem [Gm=F; epsilon*Im=0].
    # Note that "G" must be called "gdaGsparse".
    global gdaGsparse, gdaepsilon;
    s = np.shape(v);
    if(len(s)==1):
        vv = np.zeros((s[0],1));
        vv[:,0] = v;
    else:
        vv=v;
    # impliments (G'G + epsilon I) v = G'(G v) + epsilon v
    # weirdly, "*" multiplies sparse matrices just fine
    temp = gdaGsparse*vv;
    return gdaGsparse.transpose()*temp + gdaepsilon*vv;

# function to perform the multiplication FT f
def gda_dlsrhs(d):
    # this function is used by bicongugate gradient to
    # solve the generalized least squares problem Fm=F.
    # Note that "G" must be called "gdaGsparse".
    global gdaGsparse, gdaepsilon;
    return gdaGsparse.transpose()*d;
In [3]:
# gdapy14_01
 
# Radon transform example.  This version uses
# an image containing smooth features

# Note: The inversion of the Radom Transform
# requires working with 2D Fourier transforms,
# which is nitty-gritty stuff and requires
# extensive knowledge of their properties
 
# image mtrue(i,j) is N by N, in (x=i,y=j) space,
# range of both x, y is +/- 1

print("gdapy14_01");

N = 256;
 
# independent variable x
Nx = N;
Nx2 = int(Nx/2);
xmin = -1.0;
xmax =  1.0;
Dx = (xmax-xmin)/Nx;
x = gda_cvec( xmin + Dx*np.linspace(0,Nx-1,Nx));
 
# independent variable y
Ny = N;
Ny2 = int(Ny/2);
ymin = -1.0;
ymax =  1.0;
Dy = (ymax-ymin)/Ny;
y = gda_cvec(ymin + Dy*np.linspace(0,Ny-1,Ny));
 
# true image m(x,y)
# test image is a sum of several Normal curves
xbar1=0.2;
ybar1=0.0;
s1=0.1;
mtrue = np.exp(-(np.power(x-xbar1,2)*np.ones((1,Ny)) + np.ones((Nx,1))*np.power(y-ybar1,2).T)/(2.0*s1*s1));
xbar2=-0.3;
ybar2=0.0;
s2=0.05;
mtrue = np.exp(-(np.power(x-xbar2,2)*np.ones((1,Ny)) + np.ones((Nx,1))*np.power(y-ybar2,2).T)/(2.0*s2*s2)) + mtrue;
xbar3=0;
ybar3=0.2;
s3=0.025;
mtrue = np.exp(-(np.power(x-xbar3,2)*np.ones((1,Ny)) + np.ones((Nx,1))*np.power(y-ybar3,2).T)/(2.0*s3*s3)) + mtrue;

# raveled version of mtrue
Lmtrue=np.reshape(mtrue, (Nx*Ny,) );
 
# some statistics, to compare with reconstructed image
print("m has minimum value %f" % np.min(mtrue) );
print("m has maximum value %f" % np.max(mtrue) );
print(" ");

# plot true image
fig1 = plt.figure(1,figsize=(12,8));
ax1=plt.subplot(1,1,1);
mycmap=matplotlib.colormaps['jet'];
plt.axis( [0, xmax, 0, ymax] );
left=0;
right=xmax;
bottom=xmax;
top=0;
plt.imshow( mtrue, cmap=mycmap, vmin=0, vmax=1, extent=(left,right,bottom,top) );
plt.colorbar();
plt.gca().invert_yaxis();
plt.xlabel('x');
plt.ylabel('y');
plt.show();
print("Caption: True image.");

# Radon transform, R(i,j) is in (u=i, q=theta=j) space
# range of u is +/- 1 (note corners of box cur off)
# range of q is 0 to pi
 
# independent variable u
Nu = N;
umin = -1.0;
umax = 1.0;
Du = (umax-umin)/Nu;
u = gda_cvec(umin + Du*np.linspace(0,Nu-1,Nu));
 
# independent variable q
Nq = N;
qmin = -pi/2.0;
qmax =  pi/2.0;
Dq = (qmax-qmin)/(Nq-1); # note includes both -pi/2 and pi/2
q = gda_cvec(qmin + Dq*np.linspace(0,Nq-1,Nq)); # for ease of interpolation
 
# independent variable s
# integration over s, range +/- 1
Ns = 4*N;
smin = -1.0;
smax = 1.0;
Ds =(smax-smin)/Ns;
s = gda_cvec(smin + Ds*np.linspace(0,Ns-1,Ns));
 
# brute force Radon transform via ray sums
R = np.zeros((Nu,Nq));
# loop over theta
for i in range(Nq):
    
    # object here is top build Nu by Ns array of image, m,
    # evaluated at proper values of (u, s)
 
    # (x,y) from (u,s); these are Nu by Ns matrices
    # so that x(i,j) and y(i,j) are the (x,y) coordinates of mtrue(u(i),s(j))
    qi = q[i,0];
    sqi = sin(qi);
    cqi = cos(qi);
    xp = u*np.ones((1, Ns))*sqi - np.ones((Nu,1))*s.T*cqi
    yp = u*np.ones((1, Ns))*cqi + np.ones((Nu,1))*s.T*sqi;
 
    # indices corresponding to (x, y)
    ixp = np.floor((xp-xmin)/Dx).astype(int);
    iyp = np.floor((yp-ymin)/Dy).astype(int);
    
    # Convert Nu by Ns arrays to vectors
    Lixp = np.reshape( ixp, (Nu*Ns,1) );
    Liyp = np.reshape( iyp, (Nu*Ns,1) );
 
    # handle out of range x indices
    kk = np.where(Lixp<0);
    jxp1 = kk[0];
    if( jxp1.any() ):
        Lixp[jxp1,0] = 0;
    kk = np.where(Lixp>=Nx);
    jxp1 = kk[0];
    if( jxp1.any() ):
        Lixp[jxp1,0] = Nx-1;
 
    # handle out of range y indices
    jyp1 = np.where(Liyp<0);
    jyp1 = kk[0];
    if( jyp1.any() ):
        Liyp[jyp1,0] = 0;
    jyp1 = np.where(Liyp<0);
    kk = np.where(iyp>=Ny);
    jyp1 = kk[0];
    if( jyp1.any() ):
        Liyp[jyp1,0] = Ny-1;
    
    # generate linear index into image
    L = np.ravel_multi_index( (Lixp.ravel(), Liyp.ravel()), (Nx, Ny), mode="clip" );
                   
    # sample image, mtrue, and rearrange it back into Nu by Ns 
    S = np.reshape(Lmtrue[L],(Nu,Ns));
    
    # integrate (sum) S(u,s) over s to get R(u) for fixed q
    R[0:Nu,i:i+1] = gda_cvec(Ds*np.sum(S,axis=1));

# plot the radon transform
fig2 = plt.figure(2,figsize=(12,8));
ax1=plt.subplot(1,1,1);
mycmap=matplotlib.colormaps['jet'];
plt.axis( [qmin, qmax, umin, umax] );
left=qmin;
right=qmax;
bottom=umax;
top=umin;
dmax=np.max(np.abs(R));
plt.imshow( R, cmap=mycmap, vmin=0.0, vmax=dmax, extent=(left,right,bottom,top) );
plt.colorbar(shrink=0.5);
plt.gca().invert_yaxis();
plt.xlabel('theta');
plt.ylabel('u');
plt.show();
print("Caption: Radon transform of true image");
print("  ");

# statistics of the Radon transform
print("R has minimum value %f" % (np.min(R)) );
print("R has maximum value %f" % (np.max(R)) );

# Inverse Radon Transform via Fourier transforms
 
# reorder array so that s=0, currenty at row N/2, is at row 1.
R = np.roll(R, -Nx2+1, axis=0);
#now take 1D fourier transform over columns of R
Rt = np.fft.fft(R,axis=0);
 
# invert Radon transform via central slice theorem
 
# Fourier transform of image
mtt=np.zeros((Nx,Ny),dtype=complex);
 
# fourier transform u -> ku
kumin=0.0;
kumax = 1.0/(2.0*Du);
Dku=kumax/(Nu/2.0);
Nku2 = int(Nu/2+1); # non-negative frequuencies
kup = gda_cvec(Dku*np.linspace(0,Nku2-1,Nku2));
kun = -np.flipud(kup[1:Nku2-1,0:1]);
ku= gda_cvec( kup, kun );

# fourier transform x -> kx
kxmin=0.0;
kxmax = 1/(2.0*Dx);
Dkx=kxmax/(Nx/2.0);
Nkx2 = int(Nx/2+1); # non-negative frequuencies
kxp = gda_cvec(Dkx*np.linspace(0,Nkx2-1,Nkx2));
kxn = -np.flipud(kxp[1:Nkx2-1,0:1]);
kx= gda_cvec( kxp, kxn );

# fourier transform y -> ky
kymin=0;
kymax = 1.0/(2.0*Dy);
Dky=kymax/(Ny/2.0);
Nky2 = int(Ny/2+1); # non-negative frequuencies
kyp = gda_cvec(Dky*np.linspace(0,Nky2-1,Nky2));
kyn = -np.flipud(kyp[1:Nky2-1,0:1]);
ky= gda_cvec( kyp, kyn );
 
# Now use the Fourier Slice theorem. All the
# work is in sampling the radon transform at
# a series of points that correspond to wavenumber
# pairs on a rectangular grid.  That requires
# the Radon tranform to be interpolated.

piby2 = pi/2.0;
 
for i in range(Nx):
    for j in range(Nky2): # only need left side, will rebuild right using symmetry
    
        # theta calculation
        theta = atan2( kx[i,0], ky[j,0] );
        if( theta > piby2 ): # map large thetas onto negative thetas
            theta=pi-theta;
            thflag=1;
        else:
            thflag=0;
        
        # radial wavenumber
        kr = sqrt(kx[i,0]**2 + ky[j,0]**2);
        # index into randon transform array
        if( kr > kumax ):
            continue; # ignore points that are out of bounds
            
        # treat indices as real numbers
        krindex = (kr-kumin)/Dku;
        thindex = (theta-qmin)/Dq;
        if( thindex<0.0 ):
            thindex=0.0;
        elif( thindex>=Nq ):
            thindex=Nq-1;
                
        # bracket real number indices with integers
        
        # radial index
        A = int(floor(krindex));
        B = int(floor(krindex+1));
        if( B >= Nku2 ):
            A = Nku2-2;
            B = Nku2-1;
        
        # theta index
        C = int(floor(thindex));
        D = int(floor(thindex+1));
        if( D >= Nq ):
            C=Nq-2;
            D=Nq-1;
        
        # Lagrangian polynomial interpoaltion
        # note that the indices are separated by unity
        FC  = (D-thindex);
        CKR = ((B-krindex)*Rt[A,C]+(krindex-A)*Rt[B,C]);
        FD = (thindex-C);
        DKR = ((B-krindex)*Rt[A,D]+(krindex-A)*Rt[B,D]);
        tmp = (FC*CKR+FD*DKR);
        if (thflag==1):
             mtt[i,j]=np.conj(tmp);
        else:
             mtt[i,j]=tmp;

# Fourier transform of image needs to be phase shifted
# else origin is in the wrong place
xshift = (xmax-xmin)/2.0;
yshift = (ymax-ymin)/2.0;
phase = 2*pi*(kx*np.ones((1,Ny))*xshift + np.ones((Nx,1))*(ky.T)*yshift);
mtt = np.multiply(mtt,np.cos(phase)+complex(0.0,1.0)*np.sin(phase));
 
# impose all necessary symmetries required for a real image
# these four elements must be real
# MARLAB)R)
# mtt(1,1)=real(mtt(1,1));
# mtt(Nx/2+1,1)=real(mtt(Nx/2+1,1));
# mtt(1,Ny/2+1)=real(mtt(1,Ny/2+1,1));
# mtt(Nx/2+1,Ny/2+1)=real(mtt(Nx/2+1,Ny/2+1));
mtt[0,0]=np.real(mtt[0,0]);
mtt[Nx2,0]=np.real(mtt[Nx2,0]);
mtt[0,Ny2]=np.real(mtt[0,Ny2]);
mtt[Nx2,Ny2]=np.real(mtt[Nx2,Ny2]);
# bottoms of these two columns must be conjugates of top
# MATLAB(R)
# mtt(Nx:-1:Nx/2+2,1)=conj(mtt(2:Nx/2,1));
# mtt(Nx:-1:Nx/2+2,Ny/2+1)=conj(mtt(2:Nx/2,Ny/2+1));
mtt[Nx2+1:Nx,0:1]=np.flipud(np.conj(mtt[1:Nx2,0:1]));
mtt[Nx2+1:Nx,Ny2:Ny2+1]=np.conj(mtt[1:Nx2,Ny2:Ny2+1]);
# right hand side flipped conjugate of left hand side
#  MATLAB(R)
#for m = (2:Ny/2)
#  mp=Ny-m+2;
#  mtt(1,mp) = conj(mtt(1,m));
#  mtt(2:Nx,mp) = flipud(conj(mtt(2:Nx,m)));
#end
for m in range(1,Ny):
    mp=Ny-m;
    mtt[0,mp] = np.conj(mtt[0,m]);
    mtt[1:Nx,mp:mp+1] = np.flipud(np.conj(mtt[1:Nx,m:m+1]));

# now take inverse fourier transform
# Note: normalization not necessarily correct for non-square images
mest = np.fft.ifft2(mtt)*(N/2);
mest = np.real(mest); # throw away imaginary part (which should be zero)
 
# plot reconstructed image
fig3 = plt.figure(3,figsize=(12,8));
ax1=plt.subplot(1,1,1);
mycmap=matplotlib.colormaps['jet'];
plt.axis( [0, xmax, 0, ymax] );
left=0;
right=xmax;
bottom=xmax;
top=0;
plt.imshow( mest, cmap=mycmap, vmin=0, vmax=1, extent=(left,right,bottom,top) );
plt.colorbar();
plt.gca().invert_yaxis();
plt.xlabel('x');
plt.ylabel('y');
plt.show();
print("Caption: True image.");
print("  ");

# some statistics, to compare with reconstructed image
print("Minimum value of image: true %f estimated: %f" %  (np.min(mtrue), np.min(mest)) );
print("Maximum value of image: true %f estimated: %f" %  (np.max(mtrue), np.max(mest)) );
gdapy14_01
m has minimum value 0.000000
m has maximum value 1.009415
 
No description has been provided for this image
Caption: True image.
No description has been provided for this image
Caption: Radon transform of true image
  
R has minimum value 0.000000
R has maximum value 0.375511
No description has been provided for this image
Caption: True image.
  
Minimum value of image: true 0.000000 estimated: -0.024783
Maximum value of image: true 1.009415 estimated: 0.996982
In [4]:
# gdapy14_02
 
# Radon transform example.  This version uses
# an image from a jpg file

# Note: The inversion of the Radom Transform
# requires working with 2D Fourier transforms,
# which is nitty-gritty stuff and requires
# extensive knowledge of their properties
 
# image mtrue(i,j) is N by N, in (x=i,y=j) space,
# range of both x, y is +/- 1

print("gdapy14_02");

N = 256;
 
# independent variable x
Nx = N;
Nx2 = int(Nx/2);
xmin = -1.0;
xmax =  1.0;
Dx = (xmax-xmin)/Nx;
x = gda_cvec( xmin + Dx*np.linspace(0,Nx-1,Nx));
 
# independent variable y
Ny = N;
Ny2 = int(Ny/2);
ymin = -1.0;
ymax =  1.0;
Dy = (ymax-ymin)/Ny;
y = gda_cvec(ymin + Dy*np.linspace(0,Ny-1,Ny));
 
# true image m(x,y) is from a file
mtrue = 256-plt.imread("../Data/magma.tif");

# raveled version of mtrue
Lmtrue=np.reshape(mtrue, (Nx*Ny,) );
 
# some statistics, to compare with reconstructed image
print("m has minimum value %f" % np.min(mtrue) );
print("m has maximum value %f" % np.max(mtrue) );
print(" ");

# plot true image
fig1 = plt.figure(1,figsize=(12,8));
ax1=plt.subplot(1,1,1);
mycmap=matplotlib.colormaps['jet'];
plt.axis( [0, xmax, 0, ymax] );
left=0;
right=xmax;
bottom=xmax;
top=0;
plt.imshow( mtrue, cmap=mycmap, vmin=0, vmax=256, extent=(left,right,bottom,top) );
plt.colorbar();
plt.gca().invert_yaxis();
plt.xlabel('x');
plt.ylabel('y');
plt.show();
print("Caption: True image.");

# Radon transform, R(i,j) is in (u=i, q=theta=j) space
# range of u is +/- 1 (note corners of box cur off)
# range of q is 0 to pi
 
# independent variable u
Nu = N;
umin = -1.0;
umax = 1.0;
Du = (umax-umin)/Nu;
u = gda_cvec(umin + Du*np.linspace(0,Nu-1,Nu));
 
# independent variable q
Nq = N;
qmin = -pi/2.0;
qmax =  pi/2.0;
Dq = (qmax-qmin)/(Nq-1); # note includes both -pi/2 and pi/2
q = gda_cvec(qmin + Dq*np.linspace(0,Nq-1,Nq)); # for ease of interpolation
 
# independent variable s
# integration over s, range +/- 1
Ns = 4*N;
smin = -1.0;
smax = 1.0;
Ds =(smax-smin)/Ns;
s = gda_cvec(smin + Ds*np.linspace(0,Ns-1,Ns));
 
# brute force Radon transform via ray sums
R = np.zeros((Nu,Nq));
# loop over theta
for i in range(Nq):
    
    # object here is top build Nu by Ns array of image, m,
    # evaluated at proper values of (u, s)
 
    # (x,y) from (u,s); these are Nu by Ns matrices
    # so that x(i,j) and y(i,j) are the (x,y) coordinates of mtrue(u(i),s(j))
    qi = q[i,0];
    sqi = sin(qi);
    cqi = cos(qi);
    xp = u*np.ones((1, Ns))*sqi - np.ones((Nu,1))*s.T*cqi
    yp = u*np.ones((1, Ns))*cqi + np.ones((Nu,1))*s.T*sqi;
 
    # indices corresponding to (x, y)
    ixp = np.floor((xp-xmin)/Dx).astype(int);
    iyp = np.floor((yp-ymin)/Dy).astype(int);
    
    # Convert Nu by Ns arrays to vectors
    Lixp = np.reshape( ixp, (Nu*Ns,1) );
    Liyp = np.reshape( iyp, (Nu*Ns,1) );
 
    # handle out of range x indices
    kk = np.where(Lixp<0);
    jxp1 = kk[0];
    if( jxp1.any() ):
        Lixp[jxp1,0] = 0;
    kk = np.where(Lixp>=Nx);
    jxp1 = kk[0];
    if( jxp1.any() ):
        Lixp[jxp1,0] = Nx-1;
 
    # handle out of range y indices
    jyp1 = np.where(Liyp<0);
    jyp1 = kk[0];
    if( jyp1.any() ):
        Liyp[jyp1,0] = 0;
    jyp1 = np.where(Liyp<0);
    kk = np.where(iyp>=Ny);
    jyp1 = kk[0];
    if( jyp1.any() ):
        Liyp[jyp1,0] = Ny-1;
    
    # generate linear index into image
    L = np.ravel_multi_index( (Lixp.ravel(), Liyp.ravel()), (Nx, Ny), mode="clip" );
                   
    # sample image, mtrue, and rearrange it back into Nu by Ns 
    S = np.reshape(Lmtrue[L],(Nu,Ns));
    
    # integrate (sum) S(u,s) over s to get R(u) for fixed q
    R[0:Nu,i:i+1] = gda_cvec(Ds*np.sum(S,axis=1));

# plot the radon transform
fig2 = plt.figure(2,figsize=(12,8));
ax1=plt.subplot(1,1,1);
mycmap=matplotlib.colormaps['jet'];
plt.axis( [qmin, qmax, umin, umax] );
left=qmin;
right=qmax;
bottom=umax;
top=umin;
dmin=np.min(R);
dmax=np.max(R);
plt.imshow( R, cmap=mycmap, vmin=dmin, vmax=dmax, extent=(left,right,bottom,top) );
plt.colorbar(shrink=0.5);
plt.gca().invert_yaxis();
plt.xlabel('theta');
plt.ylabel('u');
plt.show();
print("Caption: Radon transform of true image");
print("  ");

# statistics of the Radon transform
print("R has minimum value %f" % (np.min(R)) );
print("R has maximum value %f" % (np.max(R)) );

# Inverse Radon Transform via Fourier transforms
 
# reorder array so that s=0, currenty at row N/2, is at row 1.
R = np.roll(R, -Nx2+1, axis=0);
#now take 1D fourier transform over columns of R
Rt = np.fft.fft(R,axis=0);
 
# invert Radon transform via central slice theorem
 
# Fourier transform of image
mtt=np.zeros((Nx,Ny),dtype=complex);
 
# fourier transform u -> ku
kumin=0.0;
kumax = 1.0/(2.0*Du);
Dku=kumax/(Nu/2.0);
Nku2 = int(Nu/2+1); # non-negative frequuencies
kup = gda_cvec(Dku*np.linspace(0,Nku2-1,Nku2));
kun = -np.flipud(kup[1:Nku2-1,0:1]);
ku= gda_cvec( kup, kun );

# fourier transform x -> kx
kxmin=0.0;
kxmax = 1/(2.0*Dx);
Dkx=kxmax/(Nx/2.0);
Nkx2 = int(Nx/2+1); # non-negative frequuencies
kxp = gda_cvec(Dkx*np.linspace(0,Nkx2-1,Nkx2));
kxn = -np.flipud(kxp[1:Nkx2-1,0:1]);
kx= gda_cvec( kxp, kxn );

# fourier transform y -> ky
kymin=0;
kymax = 1.0/(2.0*Dy);
Dky=kymax/(Ny/2.0);
Nky2 = int(Ny/2+1); # non-negative frequuencies
kyp = gda_cvec(Dky*np.linspace(0,Nky2-1,Nky2));
kyn = -np.flipud(kyp[1:Nky2-1,0:1]);
ky= gda_cvec( kyp, kyn );
 
# Now use the Fourier Slice theorem. All the
# work is in sampling the radon transform at
# a series of points that correspond to wavenumber
# pairs on a rectangular grid.  That requires
# the Radon tranform to be interpolated.

piby2 = pi/2.0;
 
for i in range(Nx):
    for j in range(Nky2): # only need left side, will rebuild right using symmetry
    
        # theta calculation
        theta = atan2( kx[i,0], ky[j,0] );
        if( theta > piby2 ): # map large thetas onto negative thetas
            theta=pi-theta;
            thflag=1;
        else:
            thflag=0;
        
        # radial wavenumber
        kr = sqrt(kx[i,0]**2 + ky[j,0]**2);
        # index into randon transform array
        if( kr > kumax ):
            continue; # ignore points that are out of bounds
            
        # treat indices as real numbers
        krindex = (kr-kumin)/Dku;
        thindex = (theta-qmin)/Dq;
        if( thindex<0.0 ):
            thindex=0.0;
        elif( thindex>=Nq ):
            thindex=Nq-1;
                
        # bracket real number indices with integers
        
        # radial index
        A = int(floor(krindex));
        B = int(floor(krindex+1));
        if( B >= Nku2 ):
            A = Nku2-2;
            B = Nku2-1;
        
        # theta index
        C = int(floor(thindex));
        D = int(floor(thindex+1));
        if( D >= Nq ):
            C=Nq-2;
            D=Nq-1;
        
        # Lagrangian polynomial interpoaltion
        # note that the indices are separated by unity
        FC  = (D-thindex);
        CKR = ((B-krindex)*Rt[A,C]+(krindex-A)*Rt[B,C]);
        FD = (thindex-C);
        DKR = ((B-krindex)*Rt[A,D]+(krindex-A)*Rt[B,D]);
        tmp = (FC*CKR+FD*DKR);
        if (thflag==1):
             mtt[i,j]=np.conj(tmp);
        else:
             mtt[i,j]=tmp;

# Fourier transform of image needs to be phase shifted
# else origin is in the wrong place
xshift = (xmax-xmin)/2.0;
yshift = (ymax-ymin)/2.0;
phase = 2*pi*(kx*np.ones((1,Ny))*xshift + np.ones((Nx,1))*(ky.T)*yshift);
mtt = np.multiply(mtt,np.cos(phase)+complex(0.0,1.0)*np.sin(phase));
 
# impose all necessary symmetries required for a real image
# these four elements must be real
# MARLAB)R)
# mtt(1,1)=real(mtt(1,1));
# mtt(Nx/2+1,1)=real(mtt(Nx/2+1,1));
# mtt(1,Ny/2+1)=real(mtt(1,Ny/2+1,1));
# mtt(Nx/2+1,Ny/2+1)=real(mtt(Nx/2+1,Ny/2+1));
mtt[0,0]=np.real(mtt[0,0]);
mtt[Nx2,0]=np.real(mtt[Nx2,0]);
mtt[0,Ny2]=np.real(mtt[0,Ny2]);
mtt[Nx2,Ny2]=np.real(mtt[Nx2,Ny2]);
# bottoms of these two columns must be conjugates of top
# MATLAB(R)
# mtt(Nx:-1:Nx/2+2,1)=conj(mtt(2:Nx/2,1));
# mtt(Nx:-1:Nx/2+2,Ny/2+1)=conj(mtt(2:Nx/2,Ny/2+1));
mtt[Nx2+1:Nx,0:1]=np.flipud(np.conj(mtt[1:Nx2,0:1]));
mtt[Nx2+1:Nx,Ny2:Ny2+1]=np.conj(mtt[1:Nx2,Ny2:Ny2+1]);
# right hand side flipped conjugate of left hand side
#  MATLAB(R)
#for m = (2:Ny/2)
#  mp=Ny-m+2;
#  mtt(1,mp) = conj(mtt(1,m));
#  mtt(2:Nx,mp) = flipud(conj(mtt(2:Nx,m)));
#end
for m in range(1,Ny):
    mp=Ny-m;
    mtt[0,mp] = np.conj(mtt[0,m]);
    mtt[1:Nx,mp:mp+1] = np.flipud(np.conj(mtt[1:Nx,m:m+1]));

# now take inverse fourier transform
# Note: normalization not necessarily correct for non-square images
mest = np.fft.ifft2(mtt)*(N/2);
mest = np.real(mest); # throw away imaginary part (which should be zero)
 
# plot reconstructed image
fig3 = plt.figure(3,figsize=(12,8));
ax1=plt.subplot(1,1,1);
mycmap=matplotlib.colormaps['jet'];
plt.axis( [0, xmax, 0, ymax] );
left=0;
right=xmax;
bottom=xmax;
top=0;
plt.imshow( mest, cmap=mycmap, vmin=0, vmax=256, extent=(left,right,bottom,top) );
plt.colorbar();
plt.gca().invert_yaxis();
plt.xlabel('x');
plt.ylabel('y');
plt.show();
print("Caption: True image.");
print("  ");

# some statistics, to compare with reconstructed image
print("Minimum value of image: true %f estimated: %f" %  (np.min(mtrue), np.min(mest)) );
print("Maximum value of image: true %f estimated: %f" %  (np.max(mtrue), np.max(mest)) );
gdapy14_02
m has minimum value 32.000000
m has maximum value 243.000000
 
No description has been provided for this image
Caption: True image.
No description has been provided for this image
Caption: Radon transform of true image
  
R has minimum value 260.714844
R has maximum value 383.480469
No description has been provided for this image
Caption: True image.
  
Minimum value of image: true 32.000000 estimated: 33.766253
Maximum value of image: true 243.000000 estimated: 257.126252
In [7]:
# gdapy14_03
 
# gradient descent inversion of d(x)=Lm(x),
# where L is a linear operator. The gradient of
# error dE/dm is calculated using the adjoint method
# The linear operator is 0.1*(D + 2*I) where D is a
# first derivative and I is an integral
 
print("gdapy14_03");

# auxiliary variable x
M=101;
N=M;
Dx=1.0;
x = gda_cvec(Dx*np.linspace(0,M-1,M)/(M-1));
xmax=x[M-1,0];
 
# the true model function m(x) that is zero at the boundaries
mtrue = np.sin(4.0*pi*x/xmax)+0.5*np.sin(7.0*pi*x/xmax)+0.25*np.sin(13.0*pi*x/xmax);
 
# derivative D and integral I operators
tr = gda_cvec( 1.0, -1.0, np.zeros((M-2,1)) );
tc = gda_cvec( 1.0, np.zeros((M-1,1)) );
D = (1.0/Dx)*la.toeplitz( tr.ravel(), tc.ravel() );
tr = np.ones((M,1));
tc = gda_cvec( 1.0, np.zeros((M-1,1)) )
I = Dx*la.toeplitz( tr.ravel(), tc.ravel() );
 
# data is a linear operator L on the model parameters, d=Lm
# where the linear operator contains both a derivative and an intergal:
# d = 0.1*( dm/dx + 2 * (integral 0 x) m dm )
L = 0.1*(D + 2*I);
dtrue = np.matmul(L,mtrue);
 
# trial solution
mgo = np.ones((M,1));
mgo1 = mgo;
 
# maximum iterations
Niter=100000;
 
# save error history
Ehist = np.zeros((Niter,1));
 
# error and its gradient at the trial solution
dgo = np.matmul(L,mgo);
Ego = np.matmul((dtrue-dgo).T,(dtrue-dgo)); Ego=Ego[0,0];
dEdmo = -2.0*Dx*np.matmul(L.T,(dtrue-dgo));
tau=0.5;
c1=0.0001;
alpha=0.9;
Ehist[0,0] = Ego;
 
# gradient descent method
for k in range(1,Niter):

    tmp = np.matmul(dEdmo.T,dEdmo); tmp=tmp[0,0];
    v = -dEdmo / sqrt(tmp);
        
    # backstep
    for kk in range(10):
        mg = mgo+alpha*v;
        dg = np.matmul(L,mg);
        Eg = np.matmul((dtrue-dg).T,(dtrue-dg)); Eg=Eg[0,0];
        dEdm= -2.0*np.matmul(L.T,(dtrue-dg));
        if( Eg <= (Ego + c1*alpha*np.matmul(v.T,dEdmo)) ):
            break;
        alpha = tau*alpha;

    # change in solution
    tmp = np.matmul((mg-mgo).T,(mg-mgo)); tmp=tmp[0,0];
    Dmg = sqrt(tmp);
    
    # update
    mgo=mg;
    dgo = dg;
    Ego = Eg;
    dEdmo = dEdm;
    Ehist[k,0] = Ego;
    
    # plot one intermediate result
    if( k==40 ):
       mgo2=np.copy(mgo);
    
    # terminate on small change
    if( Dmg < 1.0e-6 ):
        break;

maxiter=k;
print("%d iterations" % (maxiter) );
 
# plot the true model parameters
fig1 = plt.figure(1,figsize=(12,5));   # smallish figure
ax1 = plt.subplot(1, 1, 1);
dmax = np.max(np.abs(mtrue));                    
plt.axis( [0, xmax, -1.1*dmax, 1.1*dmax ] );
plt.plot(x,mtrue,"k-",lw=6);
plt.plot(x,mgo1,"g:",lw=3);
plt.plot(x,mgo,"g-",lw=3);
plt.plot(x,mgo2,"g--",lw=3);
plt.xlabel("x");
plt.ylabel("m(x)");
plt.show();
print("Caption: True solution (black), starting solution (green dotted line),");
print("solution in progress (green dashed curve), estimated solution (green curve).");

# plot the data
fig2 = plt.figure(2,figsize=(12,5));   # smallish figure
ax1 = plt.subplot(1, 1, 1);
dmax = np.max(np.abs(dtrue));                    
plt.axis( [0, xmax, -1.1*dmax, 1.1*dmax ] );
plt.plot(x,dtrue,"r-",lw=6);
plt.plot(x,dgo,"k-",lw=3);
plt.xlabel("x");
plt.ylabel("m(x)");
plt.show();
print("Caption: True data (black curve), predicted data (red curve).");

# plot error history
fig3 = plt.figure(3,figsize=(12,5));   # smallish figure
ax1 = plt.subplot(1, 1, 1);
dmax = np.max(np.abs(dtrue));                    
plt.axis( [1.0, 100.0, -4.0, 0.0 ] );
plt.plot(np.linspace(0,100,101),np.log10(Ehist[0:101,0:1]/Ehist[0,0]),"k-",lw=3);
plt.xlabel("iteration i");
plt.ylabel("log10 error, E(i)");
print("Caption: Prediction error E Eas a function of iteration number i.");
gdapy14_03
15890 iterations
No description has been provided for this image
Caption: True solution (black), starting solution (green dotted line),
solution in progress (green dashed curve), estimated solution (green curve).
No description has been provided for this image
Caption: True data (black curve), predicted data (red curve).
Caption: Prediction error E Eas a function of iteration number i.
No description has been provided for this image
In [8]:
# gdapy14_04
 
# simple 1D backprojection example
# the relationship d(x) = L m(x) is "inveted"
# using the back-propagation approximation m=L'd
# (where L' is the adjoint of L).  In this
# example, L is the integral from 0 to x, Linv
# is the first derivative, and its adjoint is the
# integral from infinity to x
 
print("gdapy14_04");

# independent variable x
M=201;
Dx=1.0;
x = gda_cvec(Dx*np.linspace(0,M-1,M));
xmax=x[M-1,0];
 
# true model
sigma=12.0;
sigma2=sigma**2;
xbar1 = 3.0*xmax/8.0;
xbar2 = 5.0*xmax/8.0;
f1=1.0;
f2=2.0;
mtrue = np.multiply(np.sin(f1*x),np.exp(-np.power(x-xbar1,2)/(2*sigma2)));
mtrue = mtrue + np.multiply(np.sin(f2*x),np.exp(-np.power(x-xbar2,2)/(2*sigma2)));
mtrue=mtrue-np.mean(mtrue);
 
# data is integral of model
dobs = Dx*np.cumsum(mtrue);
 
# plot true model
fig1 = plt.figure(1,figsize=(12,5));   # smallish figure
ax1 = plt.subplot(1, 1, 1);
plt.axis( [0.0, M, np.min(mtrue), np.max(mtrue) ] );
plt.plot( x, mtrue, "k-", lw= 2 );
plt.xlabel("x");
plt.ylabel("m(x)");
plt.show();
print("Caption: True solution.");

# exact solution is m = Linv d
# where Linv the derivative operator d/dx
mest1 = np.diff(dobs)/Dx;
 
# plot exact solution
fig2 = plt.figure(2,figsize=(12,5));   # smallish figure
ax1 = plt.subplot(1, 1, 1);
plt.axis( [0.0, M, np.min(mtrue), np.max(mtrue) ] );
plt.plot( x[1:M,0], mest1, "k-", lw= 2 );
plt.xlabel("x");
plt.ylabel("m(x)");
plt.show();
print("Caption: Estimated solution using exact inversion.");

# solution via backprojection
mest2 = np.flipud(Dx*np.cumsum(np.flipud(dobs)));
 
# plot back-projected solution
fig3 = plt.figure(3,figsize=(12,5));   # smallish figure
ax1 = plt.subplot(1, 1, 1);
plt.axis( [0.0, M, np.min(mtrue), np.max(mtrue) ] );
plt.plot( x, mest2, "k-", lw= 2 );
plt.xlabel("x");
plt.ylabel("m(x)");
plt.show();
print("Caption: Estimated solution using back-projection.");
gdapy14_04
No description has been provided for this image
Caption: True solution.
No description has been provided for this image
Caption: Estimated solution using exact inversion.
No description has been provided for this image
Caption: Estimated solution using back-projection.
In [10]:
# gdapy14_05
 
# backprojection in a a simple tomography example
# The data are "travel times", that is integrals
# of m(x,y) along straight line rays between sources
# and receivers on the edges of a square region.
 
print("gdapy14_05");
 
# shortest ray allowed
Rmin=5;
 
# true model m(x,y) is read from a file
Sbig=plt.imread("../Data/magma.tif");
Nybig, Nxbig = np.shape(Sbig);
Sbig = -(Sbig-128.0*np.ones((Nybig, Nxbig)));
                
# decimate the model
idec=4;
Nx=floor(Nxbig/idec);
Ny=floor(Nybig/idec);
Nx2 = int(Nx/2);
Ny2 = int(Ny/2);
S=Sbig[0:Nybig:idec, 0:Nxbig:idec];
 
# independent variable x
xmin = 0.0;
xmax = float(Nx);
dx=(xmax-xmin)/(Nx-1);
x = gda_cvec(xmin+dx*np.linspace(0,Nx-1,Nx));
Dx = xmax-xmin;
 
# independent variable y
ymin = 0.0;
ymax = Ny;
dy=(ymax-ymin)/(Ny-1);
y = gda_cvec(ymin+dy*np.linspace(0,Ny-1,Ny));
Dy = ymax-ymin;
 
# rearrage model into vector
# order of model parameters in vector: index = (ix-1)*Ny + iy
mtrue = np.zeros((Nx*Ny,1));
rowindex = np.zeros((Nx*Ny,1),dtype=int);
colindex = np.zeros((Nx*Ny,1),dtype=int);
for ix in range(Nx):
    for iy in range(Ny):
        q = ix*Ny + iy;
        mtrue[q,0]=S[ix,iy];
        rowindex[q,0]=ix;
        colindex[q,0]=iy;

# plot true model
fig1 = plt.figure(1,figsize=(12,8));
ax1=plt.subplot(1,1,1);
mycmap=matplotlib.colormaps['jet'];
plt.axis( [xmin, xmax, ymin, ymax] );
left=xmin;
right=xmax;
bottom=xmax;
top=xmin;
plt.imshow( S, cmap=mycmap, vmin=-128, vmax=128, extent=(left,right,bottom,top) );
plt.colorbar();
plt.gca().invert_yaxis();
plt.xlabel("y");
plt.ylabel("x");
plt.show();
print("Caption: True model.");
print(" ");

# define sources and receivers on the edges of the box
Nside = 50;
Ns = 4*Nside;
xs = np.zeros((Ns, 2));
# side 1
xs[0:Nside,0:1] = (xmin+dx/10.0)*np.ones((Nside,1));
xs[0:Nside,1:2] = gda_cvec(ymin+Dy*np.linspace(0,Nside-1,Nside)/(Nside-1));
# side  2
xs[Nside:2*Nside,0:1] = (xmax-dx/10)*np.ones((Nside,1));
xs[Nside:2*Nside,1:2] = gda_cvec(ymin+Dy*np.linspace(0,Nside-1,Nside)/(Nside-1));
# side 3
xs[2*Nside:3*Nside,0:1] = gda_cvec(xmin+Dx*np.linspace(0,Nside-1,Nside)/(Nside-1));
xs[2*Nside:3*Nside,1:2] = (ymin+dy/10.0)*np.ones((Nside,1));
# side 4
xs[3*Nside:4*Nside,0:1] = gda_cvec(xmin+Dx*np.linspace(0,Nside-1,Nside)/(Nside-1));
xs[3*Nside:4*Nside,1:2] = (ymax-dy/10.0)*np.ones((Nside,1));
 
# source receiver pairs
N = int(Ns*Ns/2);  # bigger than the actual number of rays
ray = np.zeros((N,4));
k=0; # ray counter
for i in range(Ns-1):
    for j in range(i+1,Ns):
        R=sqrt( (xs[i,0]-xs[j,0])**2+(xs[i,1]-xs[j,1])**2 );
        if (R<Rmin): # exclude really short rays
            continue;
        ray[k,0] = xs[i,0];
        ray[k,1] = xs[i,1];
        ray[k,2] = xs[j,0];
        ray[k,3] = xs[j,1];
        k = k+1;
N=k;
print("%d rays" % (N));
ray = np.copy(ray[0:N,0:4]);
 
fig2 = plt.figure(2,figsize=(12,8));
ax1=plt.subplot(1,1,1);
mycmap=matplotlib.colormaps['jet'];
plt.axis( [xmin, xmax, ymin, ymax] );
left=xmin;
right=xmax;
bottom=xmax;
top=xmin;
plt.imshow( S, cmap=mycmap, vmin=-128, vmax=128, extent=(left,right,bottom,top) );
plt.colorbar();
plt.gca().invert_yaxis();
plt.xlabel("y");
plt.ylabel("x");
for k in range(0,Ns):
    plt.plot( xs[k,0], xs[k,1], "k^", lw=1 );
for k in range(N):
    if( np.random.randint(low=0,high=100)==0):
        plt.plot( gda_cvec(ray[k,0], ray[k,2]), gda_cvec(ray[k,1], ray[k,3]), "k-", lw=1 );
plt.show();
print("Caption: Stations (triangles) and every 100th ray (lines).");
print("  ");

# build data kernel
# order of model parameters in vector: index = (ix-1)*Ny + iy
M = Nx*Ny;
# use row-column-value method, instead of gdaGsparse=spalloc(N,M,2*N*Nx);
NELEMENTS = 2*N*Nx;
Gr = np.zeros((NELEMENTS,1),dtype=int);
Gc = np.zeros((NELEMENTS,1),dtype=int);
Gv = np.zeros((NELEMENTS,1));
Nel=0;
Nr = 2000;
binmin=0;
binmax=Nx*Ny;
rowsum=np.zeros((N,1)); # row sums of the data kernel, needed for backprojection
for k in range(N):
    # also use build non-sparse row Gk first method
    Gk = np.zeros((M,1));
    x1 = ray[k,0];
    y1 = ray[k,1];
    x2 = ray[k,2];
    y2 = ray[k,3];
    r = sqrt( (x2-x1)**2 + (y2-y1)**2 );
    dr = r/Nr;
    xv = x1 + (x2-x1)*np.linspace(0,Nr-1,Nr)/Nr;
    yv = y1 + (y2-y1)*np.linspace(0,Nr-1,Nr)/Nr;
    ixv = np.floor( Nx*(xv-xmin)/Dx ).astype(int);
    iyv = np.floor( Ny*(yv-ymin)/Dy ).astype(int);
    qv = ixv*Ny + iyv;
    # now count the ray segments in each pixel of the
    # image, and use the count to increment the appropriate
    # element of G.  The use of the histogram() method to do
    # the counting is a bit weird, but it seems to work
    h, e = np.histogram(qv, bins=M, range=(binmin,binmax));
    h = gda_cvec(h);
    kk = np.where(h != 0 );
    icount = kk[0];
    Gk[icount,0:1]=Gk[icount,0:1]+h[icount,0:1]*dr;
    rowsum[k,0]=np.sum(Gk);
    for i in range(M):
        if( Gk[i,0] != 0.0 ):
            Gr[Nel,0]=k;
            Gc[Nel,0]=i;
            Gv[Nel,0]=Gk[i,0];
            Nel=Nel+1;

# delete unused elements
Gr=Gr[0:Nel,0:1];
Gc=Gc[0:Nel,0:1];
Gv=Gv[0:Nel,0:1];

print("Data kernel has %d non-zero elements" % (Nel));

# build sparse matrix
gdaGsparse=sparse.coo_matrix((Gv.ravel(),(Gr.ravel(),Gc.ravel())),shape=(N,M));
# note: don't clear Gr, Gc, Gv yet, will use them later

# compute true travel times
d = gdaGsparse*mtrue;  # * OK sing G is sparse

# define linear operator needed for biconjugate gradient solver
gdaepsilon = 1.0e-6;
LO=las.LinearOperator(shape=(M,M),matvec=gda_dlsmul,rmatvec=gda_dlsmul);
# solve using conjugate gradient algorithm
mest = np.zeros((M,1));
tol = 1e-12;     # tolerance
maxit = 3*(N+M); # maximum number of iterations allowed

# solve least squares problem Fm=f by biconjugate gradients
GTd=gda_dlsrhs(d);
q=las.bicg(LO,GTd,rtol=tol,maxiter=maxit);
mest1 = gda_cvec(q[0]);

# inversion of data by back projection
# normalize each row of the data kernel to unity
# with compensatory normalization of the data
rowsumr = np.reciprocal(rowsum);
kk = np.where( np.logical_not(np.isfinite(rowsumr)) );
k = kk[0];
if( k.any() ):
    rowsumr[k,0]=0.0;

dp = np.multiply(d, rowsumr);
# manipulaste row-col-value arrays instead of
for el in range(Nel):
    k = Gr[el,0];
    Gv[el,0] = Gv[el,0] * rowsumr[k,0];

GP=sparse.coo_matrix((Gv.ravel(),(Gr.ravel(),Gc.ravel())),shape=(N,M));
del Gr;
del Gc;
del Gv;

# backprojected solution
mest2=GP.transpose()*dp;

# rearrange m back into image
Sest1 = np.zeros((Nx,Ny));
Sest2 = np.zeros((Nx,Ny));
for k in range(M):
    ix=rowindex[k,0];
    iy=colindex[k,0];
    Sest1[ix,iy]=mest1[k,0];
    Sest2[ix,iy]=mest2[k,0];
    
Sest2 = Sest2 - np.mean(Sest2);

# plot least squares model
fig2 = plt.figure(2,figsize=(12,8));
ax1=plt.subplot(1,1,1);
mycmap=matplotlib.colormaps['jet'];
plt.axis( [xmin, xmax, ymin, ymax] );
left=xmin;
right=xmax;
bottom=xmax;
top=xmin;
plt.imshow( Sest1, cmap=mycmap, vmin=-128, vmax=128, extent=(left,right,bottom,top) );
plt.colorbar();
plt.gca().invert_yaxis();
plt.xlabel("y");
plt.ylabel("x");
plt.show();
print("Caption: Damped least squares estimate of model.");

# plot back-projected model
fig3 = plt.figure(3,figsize=(12,8));
ax1=plt.subplot(1,1,1);
mycmap=matplotlib.colormaps['jet'];
plt.axis( [xmin, xmax, ymin, ymax] );
left=xmin;
right=xmax;
bottom=xmax;
top=xmin;
dmin=-100.0;
dmax=10.0;
plt.imshow( Sest2, cmap=mycmap, vmin=dmin, vmax=dmax, extent=(left,right,bottom,top) );
plt.colorbar();
plt.gca().invert_yaxis();
plt.xlabel("y");
plt.ylabel("x");
plt.show();
print("Caption: Backprojection estimate of model.");
gdapy14_05
No description has been provided for this image
Caption: True model.
 
19264 rays
No description has been provided for this image
Caption: Stations (triangles) and every 100th ray (lines).
  
Data kernel has 1171344 non-zero elements
No description has been provided for this image
Caption: Damped least squares estimate of model.
No description has been provided for this image
Caption: Backprojection estimate of model.
In [11]:
# gdapy14_06
 
# temperature / chemical reaction example
# adjoint method used to build data kernel
# and the problem is then solved usign least squares
 
print("gdapy14_06");

# time variable t
M=501;
N=M;
Dt=1.0;
t = gda_cvec(Dt*np.linspace(0,M-1,M));
tmax=t[M-1,0];
 
# differential equation (d/dt + c)u = f
# has green function G(t,t')=H(t-t')exp(-c(t-t'))
c = 0.04;
# Forward problem
 
# create a heat function m 
# allow two options, complex and simple
if( 1 ):
    # complicated m built out of Gaussians
    t1 = 30.0;
    sigma1 = 6.0;
    m = np.exp(-0.5*np.power(t-t1,2)/(sigma1**2));
    t1 = 50.0;
    sigma1 = 10.0;
    m = m+2.0*np.exp(-0.5*np.power(t-t1,2)/(sigma1**2));
    t1 = 80.0;
    sigma1 = 14.0;
    m = m+0.5*np.exp(-0.5*np.power(t-t1,2)/(sigma1**2));
    t1 = 200.0;
    sigma1 = 5.0;
    m = m+np.exp(-0.5*np.power(t-t1,2)/(sigma1**2));
else:
    # very simple m for test purposes
    m=np.zeros((M,1));
    m[int(1*M/4),0]=1.0;
    m[int(2*M/4),0]=0.5;

# perform the Green function integral to predict u
# use convolution function; faster than coding the integral
Ho = np.exp(-c*t);
u = gda_cvec(Dt*sg.convolve(Ho.ravel(),m.ravel())); # Green function integral is a convolution
u=u[0:M,0:1];
 
# integrate u to get P
b=1.0;
P = gda_cvec(Dt*b*np.cumsum(u.ravel()));
 
# data is P with random noise
dtrue = P;
sigmad = 0.1;
dobs = dtrue+np.random.normal(loc=0.0,scale=sigmad,size=(N,1));
 
# time series plots

# plot Green function for t'=10;
fig1 = plt.figure(1,figsize=(12,5));
ax1 = plt.subplot(1, 1, 1);
plt.axis( [0, tmax, 0, 1] );
tp=30;
H = np.multiply(t>tp,np.exp(-c*(t-tp)));
plt.plot( t, H, "k-", lw=3 );
plt.xlabel("t");
plt.ylabel("m(t)");
plt.show();
print("Caption: Green function for tp=10");

# inverse problem
 
# adjoint differential equation (-d/dt + c)u = h
# has green function G(t,t')=H(t'-t)exp(c(t-t'))
 
# data kernel Gd
N=M;
Gd = np.zeros((N,M));
for i in range(N):
    for j in range(M):
        ti = i*Dt;
        tee = j*Dt;
        if( tee <= ti ):
            Gd[i,j]=Dt*(-b/c)*(exp(-c*(ti-tee))-1.0);
        else:
            Gd[i,j]=0;
            
# solve with dampled least squares
e2 = 1.0e-1;
GTG = np.matmul(Gd.T,Gd) + e2*np.identity(M);
GTd = np.matmul(Gd.T,dobs);
mest = la.solve(GTG,GTd);
# plot estimsted solution

# plot heat function
fig2 = plt.figure(2,figsize=(12,5));
ax1 = plt.subplot(1, 1, 1);
plt.axis( [0, tmax, 0, 1.2*np.max(m)] );
plt.plot( t, m, "k-", lw=3 );
plt.plot( t, mest, "r--", lw=3 );
plt.xlabel("t");
plt.ylabel("m(t)");
plt.show();
print("Caption: True heat function, m(t) (black) and estimated heat");
print("heat function (red)");

# plot temperature function
fig3 = plt.figure(3,figsize=(12,5));
ax1 = plt.subplot(1, 1, 1)
plt.axis( [0, tmax, 0, np.max(u)] );
plt.plot( t, u, "k-", lw=3 );
plt.xlabel("t");
plt.ylabel("u(t)");
plt.show();
print("Caption: Temperature function, u(t)");

# plot the observed data
fig4 = plt.figure(4,figsize=(12,5));
ax1 = plt.subplot(1, 1, 1)
plt.axis( [0, tmax, 0, 1.1*np.max(dobs)] );
plt.plot( t, dobs, "k-", lw=3 );
plt.xlabel("t");
plt.ylabel("d(t)");
plt.show();
print("Caption: Observed data");

# plot adjoint green function for t'=10;
fig5 = plt.figure(5,figsize=(12,5));
plt.ax1 = plt.subplot(1, 1, 1);
plt.axis( [0, tmax, 0, 1] );
tp=100;
H = Dt*np.multiply(t<tp,np.exp(c*(t-tp)));
plt.plot( t, H, "k-", lw=3 );
plt.xlabel("t");
plt.ylabel("Q");
plt.show();
print("Caption: Adjoint Green function, Q(t,tau=100)");
 
# plot a few rowa of the data kernel
fig6 = plt.figure(6,figsize=(12,5));
ax1 = plt.subplot(1, 1, 1);
plt.axis( [0, tmax, 0, 1.1*max(Gd[i,0:M]) ] );
i=int(1*N/4);
plt.plot( t, Gd[i:i+1,0:M].T, 'k-', 'LineWidth', 3 );
plt.xlabel("t");
plt.ylabel("g");
plt.show();
print("Caption: Row %d of the data kernel G" % (i) );
     
# plot a few rowa of the data kernel
fig7 = plt.figure(7,figsize=(12,5));
ax1 = plt.subplot(1, 1, 1);
plt.axis( [0, tmax, 0, 1.1*max(Gd[i,0:M]) ] );
i=int(2*N/4);
plt.plot( t, Gd[i:i+1,0:M].T, 'k-', 'LineWidth', 3 );
plt.xlabel("t");
plt.ylabel("g");
plt.show();
print("Caption: Row %d of the data kernel G" % (i) );

# plot a few rowa of the data kernel
fig8 = plt.figure(8,figsize=(12,5));
ax1 = plt.subplot(1, 1, 1);
plt.axis( [0, tmax, 0, 1.1*max(Gd[i,0:M]) ] );
i=int(3*N/4);
plt.plot( t, Gd[i:i+1,0:M].T, 'k-', 'LineWidth', 3 );
plt.xlabel("t");
plt.ylabel("g");
plt.show();
print("Caption: Row %d of the data kernel G" % (i) );

# draw the data kernel
gda_draw(Gd);
print("Caption: The data kernel G" );
gdapy14_06
No description has been provided for this image
Caption: Green function for tp=10
No description has been provided for this image
Caption: True heat function, m(t) (black) and estimated heat
heat function (red)
No description has been provided for this image
Caption: Temperature function, u(t)
No description has been provided for this image
Caption: Observed data
No description has been provided for this image
Caption: Adjoint Green function, Q(t,tau=100)
No description has been provided for this image
Caption: Row 125 of the data kernel G
No description has been provided for this image
Caption: Row 250 of the data kernel G
No description has been provided for this image
Caption: Row 375 of the data kernel G
No description has been provided for this image
Caption: The data kernel G
In [12]:
# gdapy14_07
 
# Uses heat flow problem to illustrate the use of
# the adjoint method to compute the data kernel dd/dm.
# This deriavtive is calculated two ways, using finite
# difference and using the adjoint method (achieving the
# same result, of course).
 
print("gdapy14_07");

# time t setup
N = 101;
Dt = 0.1;
i0 = int(floor(N/10));
t = gda_cvec(Dt*np.linspace(-i0+1,N-i0,N));
tmin=t[0,0];
tmax=t[N-1,0];

# reference solution is analytic
c0 = 0.7;
u0 = np.multiply(t>0,np.exp(-c0*t));
 
# the data is the b times the integral of the solution
# predicted data associated with the reference field
b = 0.5;
d0 = gda_cvec(b*Dt*np.cumsum(np.multiply(t>0.0,u0)));
 
# point scatterer at t0, that is
# c(t) = c0 + dm * delta(t-t0)
t0 = 0.5; # time of scatterer
it0 = int((t0-tmin)/Dt); # index of scatterer
print("Scatterer at time %.2f index %d wit u %.4f" % (t0, it0, u0[it0,0]) );
dm = 0.3; # amplitude of scatterer
# field perturbation according the the Born approximation
du = -dm * u0[it0,0] * np.multiply(t>t0,np.exp(-c0*(t-t0)));
u = u0 + du; # total field
d = gda_cvec(b*Dt*np.cumsum(np.multiply(t>0.0,u))); # predicted data
 
# plot the reference and total field
fig1 = plt.figure(1,figsize=(12,5));
ax1 = plt.subplot(1, 1, 1);
plt.axis( [tmin, tmax, -1.1, 1.1] );
plt.xlabel("time t");
plt.ylabel("u");
plt.plot( t, u, "k-", lw=3 );
plt.plot( t, u0, "r-", lw=2 );
plt.show();
print("Caption: Total field (red) and reference field (black).")
 
# plot the predicted data associated with u and u0
# plot the data
fig2 = plt.figure(2,figsize=(12,5));
ax1 = plt.subplot(1, 1, 1);
plt.axis( [tmin, tmax, -1.1, 1.1] );
plt.xlabel("time t");
plt.ylabel("d");
plt.plot( t, d, "r-", lw=3 );
plt.plot( t, d0, 'k-', 'LineWidth', 2 );
plt.show();
print("Caption: Total data (red) and reference data (black).")
 
# derivative by finite difference
dddm = (d-d0)/dm;
fig3 = plt.figure(3,figsize=(12,5));
ax1 = plt.subplot(1, 1, 1);
yr = 1.1;
plt.axis( [tmin, tmax, -yr, yr] );
plt.plot( t, dddm, "k-", lw=3 );
plt.xlabel("time t");
plt.ylabel("dd/dm");
 
# derivative by adjoint formula
# which should (and does) exactly match the adjoint formula
# MATLAB(R): dddm2 = -(b/c0)*(t>0).*(t>t0).*(1-exp(-c0*(t-t0))).*exp(-c0*t0);
dddm2 = -(b/c0)*exp(-c0*t0)*np.ones((N,1));
dddm2 = np.multiply( dddm2, (t>0.0) );
dddm2 = np.multiply( dddm2, (t>t0) );
dddm2 = np.multiply( dddm2, np.ones((N,1))-np.exp(-c0*(t-t0)))
plt.plot( t, dddm2, "r-", lw=2 );
plt.show();
print("Caption: derivative dd/dm by finite differences (black) and the adjoint");
print("method (red).");
gdapy14_07
Scatterer at time 0.50 index 13 wit u 0.7558
No description has been provided for this image
Caption: Total field (red) and reference field (black).
No description has been provided for this image
Caption: Total data (red) and reference data (black).
No description has been provided for this image
Caption: derivative dd/dm by finite differences (black) and the adjoint
method (red).
In [13]:
# gdapy14_08
# data kernel for heat flow problem with time-variable thermal constant c(t) 

print("gdapy14_08");

# data constant
b = 0.5;
 
# time t axis
N = 101;
Dt = 0.1;
i0 = int(floor(N/10));
t = gda_cvec(Dt*np.linspace(-i0+1,N-i0,N));
tmin=t[0,0];
tmax=t[N-1,0];
 
# thermal constant
c00 = 0.7;
c01 = 0.2;
 
# reference c
c0 = c00*np.ones((N,1));
 
# true c
ctrue = c0+c01*np.multiply(np.sin(pi*t/tmax),t>0.0);

fig2 = plt.figure(2,figsize=(12,5));
ax1 = plt.subplot(1, 1, 1);
plt.axis( [tmin, tmax, 0, 1.25] );
plt.xlabel("time t");
plt.ylabel("c");
plt.plot( t, ctrue, "k-", lw=3);
plt.plot( t, c0, "r-", lw=2);
plt.show();
print("Caption: True c (black curve) and reference c (red curve).");

 
# true solution and correponding data
utrue = np.zeros((N,1));
utrue[i0,0]=1.0;
for i in range(i0+1,N):
    utrue[i,0] = utrue[i-1,0] - ctrue[i,0]*Dt*utrue[i-1,0];
dtrue = gda_cvec(b*Dt*np.cumsum(utrue.ravel()));
 
# reference solution and corresponding data
u0 = np.zeros((N,1));
u0[i0,0]=1.0;
for i in range(i0+1,N):
    u0[i,0] = u0[i-1,0] - c0[i,0]*Dt*u0[i-1,0];
d0 = gda_cvec(b*Dt*np.cumsum(u0.ravel()));
 
fig2 = plt.figure(2,figsize=(12,5));
ax1 = plt.subplot(1, 1, 1);
plt.axis( [tmin, tmax, -1.1, 1.1] );
plt.xlabel("time t");
plt.ylabel("u");
plt.plot( t, utrue, "k-", lw=3);
plt.plot( t, u0, "r-", lw=2);
plt.show();
print("Caption: True field u (black curve) and reference field (red curve).");

fig3 = plt.figure(3,figsize=(12,5));
ax1 = plt.subplot(1, 1, 1);
plt.axis( [tmin, tmax, -1.1, 1.1] );
plt.xlabel("time t");
plt.ylabel("d");
plt.plot( t, dtrue, "k-", lw=3);
plt.plot( t, d0, "r-", lw=2);
plt.show();
print("Caption: True data d (black curve) and reference data (red curve).");

# data kernel
ddidmj = np.zeros((N,N));
for i in range(N):  # loop over data points
 
    # solve adjoint equation
    # ( -d/dt + c0 ) lami = hi where hi = b H(ti-t) 
    hi = b*(t[i,0]*np.ones((N,1))>=t);
    lami = np.zeros((N,1));
    # descrete version of adjoint equation
    # (-lamj(k+1)+lamj(k))/Dt + c0 lamj(k+1) = hi
    # lamj(k) = lamj(k+1) - c0 = Dt lamj(k+1) + Dt hi
    for k in reversed(range(N-1)):
        lami[k,0] = lami[k+1,0] - c0[k+1,0]*Dt*lami[k+1,0] + Dt*hi[k+1,0];
        
    # perturbed equation is L u = 0 with
    # L = ( d/dt + c0 + mj delta(t-tj) )
    # so dLdmj = delta(t-tj)
    # and xi = - dLdmj u0
    
    # perform inner product
    # ddidmj = ( lami, dLdmi u0 ) = -( lami, delta(t-tj) u0 )  =
    # ddidmj = - lami(j) u0(j)
    
    ddidmj[i:i+1,0:N] = -np.multiply(lami, u0).T;

G = ddidmj;  # data kernel
 
gda_draw("title G",G);
print("Caption: Data kernel.");
gdapy14_08
No description has been provided for this image
Caption: True c (black curve) and reference c (red curve).
No description has been provided for this image
Caption: True field u (black curve) and reference field (red curve).
No description has been provided for this image
Caption: True data d (black curve) and reference data (red curve).
No description has been provided for this image
Caption: Data kernel.
In [14]:
# gdapy14_09
 
# solve inverse problem for the function c(t) in
# the cooling problem using adjoint methods
 
# the equation of cooling is
# ( d/dt + c(t) ) u = 0  with initial condition u(0)=1
 
# the true c(t) is a constant plus a sinusoid
 
# the problem is solved iteratively with Newton's
# methid using the linearized data kernel Gij = ddi/dmj
# calculated by adjoint methods.
 
# both the original equation and the adjoint equation
# are solved numerically with a very simple recursion based on
# Euler's method. A more accurate method, such as Runga
# Kutta could be substituted.
 
# At each iteration, the function c(t) is parameteried
# c = c0 + (sum over i) mi delta(t-ti)
# which implies that the update is c0 = c0 + m/Dt
# where the factor of Dt arises from the delta funcion
# that is, the integral of the mi delta(t-ti) over one
# pixel is mi, and the integral of mi/Dt over one pixel
# is also mi
 
# syntheic data are created by adding a small amount of
# random noise to the true data.
 
# The problem solved via generalized least squares, with
# both flatness and smallness prior information. A good
# solution is achieved in about five iterations.

print("gdapy14_09");

c0 = c00*np.ones((N,1));
cstart = c0;
 
# true c
ctrue = c0+c01*np.multiply(np.sin(pi*t/tmax),t>0.0);
 
# true solution and correponding data
utrue = np.zeros((N,1));
utrue[i0,0]=1.0;
for i in range(i0+1,N):
    utrue[i,0] = utrue[i-1,0] - ctrue[i,0]*Dt*utrue[i-1,0];
dtrue = gda_cvec(b*Dt*np.cumsum(utrue.ravel()));
 
sd = 0.0001;
dobs = dtrue + np.random.normal(loc=0.0,scale=sd,size=(N,1));
 
# flatness matrix for generalized least squares
tr = gda_cvec(-1.0, np.zeros((N-2,1)) );
tc = gda_cvec(-1.0, 1.0, np.zeros((N-2,1)) );
H = la.toeplitz(tr.ravel(),tc.ravel());
h1 = np.zeros((N-1,1));
h2 = np.zeros((N,1));
 
maxiter = 10;
for iter in range(maxiter):
 
    # reference solution and corresponding data
    u0 = np.zeros((N,1));
    u0[i0,0]=1.0;
    for i in range(i0+1,N):
        u0[i,0] = u0[i-1,0] - c0[i,0]*Dt*u0[i-1,0];
    d0 = gda_cvec(b*Dt*np.cumsum(u0.ravel()));
 
    ddidmj = np.zeros((N,N));
    for i in range(N):  # loop over data points
 
        # solve adjoint equation
        # ( -d/dt + c0 ) lami = hi where hi = b H(ti-t) 
        hi = b*(t[i,0]*np.ones((N,1))>=t);
        lami = np.zeros((N,1));
        # descrete version of adjoint equation
        # (-lamj(k+1)+lamj(k))/Dt + c0 lamj(k+1) = hi
        # lamj(k) = lamj(k+1) - c0 = Dt lamj(k+1) + Dt hi
        for k in reversed(range(N-1)):
            lami[k,0] = lami[k+1,0] - c0[k+1,0]*Dt*lami[k+1,0] + Dt*hi[k+1,0];
 
        # perturbed equation is L u = 0 with
        # L = ( d/dt + c0 + mj delta(t-tj) )
        # so dLdmj = delta(t-tj)
        # and xi = - dLdmj u0
 
        # perform inner product
        # ddidmj = ( lami, dLdmi u0 ) = -( lami, delta(t-tj) u0 )  =
        # ddidmj = - lami(j) u0(j)
 
        ddidmj[i:i+1,0:N] = -np.multiply( lami,u0 ).T;
 
    G = ddidmj;  # data kernel
 
    # inverse problem
    # G m = dtrue-d0
    Dd = dobs-d0;
    if( 1 ):
        Dh1 = h1-np.matmul(H,c0); # flatness applies to solution
    else:
        Dh1 = h1; # flatness applies to perturbation
    Dh0 = h2;  # smallness applied to the perturbation
    
    # generalized least squares with prior informaiton of flatness and smallness
    # Warning: epsi's hand-tuned pretty carefully
    epsi1 = 1e-2;
    epsi2 = 0.4e-1;

    # MATLAB(R) F = [G;   epsi1*H;       epsi2*eye(N) ];
    F = np.concatenate( (G,epsi1*H,epsi2*np.identity(N)), axis=0)
    # MATLAB(R) f = [Dd;  zeros(N-1,1);  zeros(N,1)   ];
    f = np.concatenate((Dd, epsi1*Dh1, epsi2*Dh0), axis=0);
    FTF = np.matmul(F.T,F);
    FTf = np.matmul(F.T,f);
    m = la.solve(FTF,FTf);
    c0 = c0 + m/Dt;  # factor of Dt from delta function
    
fig2 = plt.figure(2,figsize=(12,5));
ax1 = plt.subplot(1, 1, 1);
plt.axis( [tmin, tmax, 0, 1.25] );
plt.xlabel("time t");
plt.ylabel("c");
plt.plot( t, ctrue, "k-", lw=3);
plt.plot( t, cstart, "g--", lw=2);
plt.plot( t, c0, "r-", lw=2);
plt.show();
print("Caption: True c (black curve), estimated (red) and reference (green curve).");
    
gdapy14_09
No description has been provided for this image
Caption: True c (black curve), estimated (red) and reference (green curve).
In [15]:
# gdama14_10
# Data Assimilation example
# A insulated rod of length Lx cools because its ends
# are held at a temperature u(0)=u(Lx)=0.  Heat moves
# according to the thermal diffusion equation

print("gdapy14_10");

# x-axis
Dx = 1.0;
Lx = 128;
x = gda_cvec(np.linspace(0,Dx*(Lx-1),Lx));
xmax = x[Lx-1,0];
xmid = xmax/2.0;
M=Lx;

# x-axis
Dt = 1.0;
Lt = 128;
t = gda_cvec(np.linspace(0,Dt*(Lt-1),Lt));
tmax = t[Lt-1,0];

# thermal constant
kappa = 0.05/Dt;

# second derivative
r = np.concatenate( (gda_cvec(-2.0, 1.0), np.zeros((Lx-2,1))), axis=0 );
c = np.concatenate( (gda_cvec(-2.0,1.0), np.zeros((Lx-2,1))), axis=0 );
D2 = la.toeplitz( r.ravel(), c.ravel()) / (Dx**2);
D2[0,0]=1.0;
D2[0,1]=0.0;
D2[Lx-1,Lx-2]=0.0;
D2[Lx-1,Lx-1]=1.0;

# dynamics matrix
D = Dt*kappa*D2 + np.identity(Lx);

# source
sx = 5.0;
sx2 = sx**2;
m0 = np.exp( -0.5 * np.power(x-xmid,2)/ sx2 );

# true solution, by stepping the equation forward in time
utrue = np.zeros((Lx,Lt));
utrue[0:Lx,0:1] = m0;
for i in range(1,Lt):
    utrue[0:Lx,i:i+1] = np.matmul(D,utrue[0:Lx,i-1:i]);

# data positions and time are randomly assigned
Ndt = 35;  # data per time
N = (Lt-1)*Ndt; # total number of data
kd = np.zeros((Ndt,Lt-1),dtype=int);
for i in range(1,Lt):
    # no data at zero time and no data at boundaries
    j = np.random.permutation( range(1,Lx-1) );
    # index array kd gives locations of data
    kd[0:Ndt,i-1:i] = gda_cvec( np.sort(j[0:Ndt]) );
    
# although noise is defined everywhere, only values at
# data positions-times are used
sd = 0.1;
nse = np.random.normal(loc=0.0,scale=sd,size=(Lx,Lt));

# guessed source
sx = 2.0;  # half-width of Gaussian source
sx2 = sx**2;
mg = np.exp( -0.5 * np.power(x-xmid,2)/ sx2 );

Dm = 0.05;
maxiter = 25;
Emax = np.zeros((maxiter,1));
for iter in range(maxiter):

    # guessed solution, treat source as initial condition
    # and use recurion to propagate solution forward in
    # time
    u = np.zeros((Lx,Lt));
    u[0:Lx,0:1] = mg;
    for i in range(1,Lt):
        u[0:Lx,i:i+1] = np.matmul(D,u[0:Lx,i-1:i]);

    # error = observed temperature - predicted temperature
    # observed temperature = true temperature + noise
    e = np.zeros((Lx,Lt)); # although the error is defined
    # for all poitions-times, it is non-zero only at observation
    # points
    for i in range(1,Lt):
        k = kd[0:Ndt,i-1:i];
        e[k,i] = (utrue[k,i]+nse[k,i])-u[k,i];
    
    # total error
    emax=np.sum(np.power(e.ravel(),2));
    Emax[iter,0] = emax;
    
    # simplified gradent method
    if( (iter>0) and (emax>emaxlast) ):
        emaxlast = emax;
        Dm = Dm/2.0;
        continue;
    emaxlast = emax;

    # adjoint field, step backward in time, source is error
    lam = np.zeros((Lx,Lt));
    for i in reversed(range(1,Lt)):
        lam[0:Lx,i-1:i] = np.matmul(D,lam[0:Lx,i:i+1]) + Dt*e[0:Lx,i:i+1];
        
    # save starting u and e
    if( iter==0 ):
        u0 = u;
        e0 = e;
        lam0 = lam;

    # dE/dmi is adjoint field at t=0
    dEdm = -2.0*lam[0:Lx,0:1];
    # change in -dEdm direction
    mg = mg - Dm * dEdm;

# plot data
fig1 = plt.figure(1,figsize=(12,5));   # smallish figure
ax1 = plt.subplot(1, 1, 1);
plt.axis( [0, maxiter, 0, 1.1*np.max(Emax) ] );
plt.plot(np.linspace(0,maxiter-1,maxiter),Emax,"k-");
plt.xlabel('i');
plt.ylabel('E');
plt.show();

# plot error surface
fig2 = plt.figure(3,figsize=(12,5));

ax1=plt.subplot(2,4,1);
mycmap=matplotlib.colormaps['jet'];
plt.axis( [0, tmax, 0, xmax] );
left=0;
right=tmax;
bottom=xmax;
top=0;
plt.imshow( utrue, cmap=mycmap, vmin=0, vmax=1, extent=(left,right,bottom,top) );
plt.colorbar();
plt.gca().invert_yaxis();
plt.xlabel('t');
plt.ylabel('x');

ax1=plt.subplot(2,4,2);
mycmap=matplotlib.colormaps['jet'];
plt.axis( [0, tmax, 0, xmax] );
plt.imshow( u0, cmap=mycmap, vmin=0, vmax=1, extent=(left,right,bottom,top) );
plt.colorbar();
plt.gca().invert_yaxis();
plt.xlabel('t');
plt.ylabel('x');

ax1=plt.subplot(2,4,3);
mycmap=matplotlib.colormaps['jet'];
plt.axis( [0, tmax, 0, xmax] );
plt.imshow( np.abs(e0), cmap=mycmap, vmin=0, vmax=1, extent=(left,right,bottom,top) );
plt.colorbar();
plt.gca().invert_yaxis();
plt.xlabel('t');
plt.ylabel('x');

ax1=plt.subplot(2,4,4);
mycmap=matplotlib.colormaps['jet'];
plt.axis( [0, tmax, 0, xmax] );
vm = np.max(np.abs(lam0));
plt.imshow( lam0, cmap=mycmap, vmin=-vm, vmax=vm, extent=(left,right,bottom,top) );
plt.colorbar();
plt.gca().invert_yaxis();
plt.xlabel('t');
plt.ylabel('x');

ax1=plt.subplot(2,4,5);
mycmap=matplotlib.colormaps['jet'];
plt.axis( [0, tmax, 0, xmax] );
plt.imshow( utrue, cmap=mycmap, vmin=0, vmax=1, extent=(left,right,bottom,top) );
plt.colorbar();
plt.gca().invert_yaxis();
plt.xlabel('t');
plt.ylabel('x');

ax1=plt.subplot(2,4,6);
mycmap=matplotlib.colormaps['jet'];
plt.axis( [0, tmax, 0, xmax] );
plt.imshow( u, cmap=mycmap, vmin=0, vmax=1, extent=(left,right,bottom,top) );
plt.colorbar();
plt.gca().invert_yaxis();
plt.xlabel('t');
plt.ylabel('x');

ax1=plt.subplot(2,4,7);
mycmap=matplotlib.colormaps['jet'];
plt.axis( [0, tmax, 0, xmax] );
plt.imshow( np.abs(e), cmap=mycmap, vmin=0, vmax=1, extent=(left,right,bottom,top) );
plt.colorbar();
plt.gca().invert_yaxis();
plt.xlabel('t');
plt.ylabel('x');

ax1=plt.subplot(2,4,8);
mycmap=matplotlib.colormaps['jet'];
plt.axis( [0, tmax, 0, xmax] );
plt.imshow( lam, cmap=mycmap, vmin=-vm, vmax=vm, extent=(left,right,bottom,top) );
plt.colorbar();
plt.gca().invert_yaxis();
plt.xlabel('t');
plt.ylabel('x');

plt.show();
gdapy14_10
No description has been provided for this image
No description has been provided for this image
In [16]:
# gdapy11_11
# Supports Problem 11.6

print("gdapy14_11");

# In order for this script to actually write the datafile
# you must set dowrite to 1
dowrite=0;
 
# known constants
c0 = 0.7;
b = 0.4;
 
# time t axis
N = 101;
Dt = 0.1;
i0 = int(floor(N/10));
t = gda_cvec(Dt*np.linspace(-i0+1,N-i0,N));
tmin=t[0,0];
tmax=t[N-1,0];
 
# heterogeities
t01 = 1;
a1 = 0.1;
s1 = 0.25;
t02 = 4;
a2 = 0.05;
s2 = 0.5;
dm = a1*np.exp(-np.power(t-t01,2)/(2.0*s1*s1))+a2*np.exp(-np.power(t-t02,2)/(2.0*s2*s2));

fig1 = plt.figure(1,figsize=(12,5));
ax1=plt.subplot(1,1,1);
mr = 0.11;
plt.axis( [tmin, tmax, -mr, mr] );
plt.plot( t, dm, "k-", lw=3 );
plt.xlabel("time t");
plt.ylabel("dm");
plt.show();
print("Caption: model perturbation, dm");
 
# reference solution

u0 = np.multiply(t>0.0,np.exp(-c0*t));
fig2 = plt.figure(2,figsize=(12,5));
ax1=plt.subplot(1,1,1);
ur = 1.1;
plt.axis( [tmin, tmax, -ur, ur] );
plt.plot( t, u0, "k-", lw=3 );
plt.xlabel("time t");
plt.ylabel("u");

# sum up point scatterers to get perturbation in solution
du = np.zeros((N,1));
for i in range(N):
    t0i = t[i,0];
    dui = -dm[i,0]*u0[i,0]*np.multiply(t>t0i,np.exp(-c0*(t-t0i)));
    du = du + dui;
u = u0 + du;
plt.plot( t, u, "r-", lw=2 );
plt.show();
print("Caption: field u (red) and reference field u0 (black)");
 
# data predicted by reference solution
d0 = gda_cvec(b*Dt*np.cumsum(np.multiply(t>0,u0).ravel()));
# data predicted by heterogenous solution
d  = gda_cvec(b*Dt*np.cumsum(np.multiply(t>0,u).ravel()));

fig3 = plt.figure(3,figsize=(12,5));
ax1=plt.subplot(1,1,1);
dr = 1.1;
plt.axis( [tmin, tmax, -dr, dr] );
plt.plot( t, d0, "k-", lw=3 );
plt.plot( t, d, "r-", lw=3 );
plt.xlabel("time t");
plt.ylabel("d");
plt.show();
print("Caption: data d (red) and reference data d0 (black)");

# write files
if( dowrite ):
    D = np.concatenate((t,dm,d),axis=1);
    np.savetxt("../Data/gda1106_data.txt",D,delimiter="\t");
    np.savetxt("../TestFolder/gda1106_data.txt",D,delimiter="\t");
    print("Files written.")
gdapy14_11
No description has been provided for this image
Caption: model perturbation, dm
No description has been provided for this image
Caption: field u (red) and reference field u0 (black)
No description has been provided for this image
Caption: data d (red) and reference data d0 (black)
In [ ]: