In [79]:
# gdapy15_00 clears all variables and import various modules
# you must run this cell first, before running any of others

# note that these examples use inverse theory techniques that
# are only covered in later chapters of the book

# History:
# 2023/05/04 - Created by W. Menke from MATLAB(R) version
# 2024/05/23 -  Patches to accomodate new verions of numpy, scipy, matplotlib
#               patched dot product to return scalar value, and
#               las.bicg keyword tol changes to rtol, and
#               convert las.bicg to float, and
#               converted bicg() output to float, and
#               in gda15_03, reset maxit, and
#               changed interactive backend to QtAgg in gda15_12

# A reset deletes any previously-created variables from memory
%reset -f

# import various packages

import numpy as np                    # matrices & etc
from matplotlib import pyplot as plt  # general plotting 
from matplotlib import patches        # plotting shapes
from datetime import date             # date and time arithmetic
from math import exp, pi, sin, cos, tan, sqrt, floor, log10, nan, acos, atan, atan2, sinh   # math functions
from scipy.special import erf
import scipy.linalg as la             # linear algebra functions
import scipy.signal as sg             # signal processing
import scipy.special as sp            # special functions
import scipy.optimize as opt          # optimization functions such as linear programming
import os                             # operating system
import matplotlib.cm as cm
from matplotlib.colors import ListedColormap
import scipy.signal as sg
import scipy.stats as st
import scipy.sparse.linalg as las
from scipy.interpolate import griddata
from scipy import sparse
import matplotlib

# function to make a numpy N-by-1 column vector
# c=eda_cvec(v1, v2, ...) from a list of several
# array-like entities v1, v2, including a number
# a list of numbers, a tuple of numbers, an N-by-0 np array
# and a N-by-1 np array. The function
# also insures that, if v is an np array, that
# c is a copy, as contrasted to a view, of it
# It promotes integers to floats, and integers
# and floats to complex, by context.
# This version concatenates many argments,
# whereas c=eda_cvec1(v1) takes just one argiment.
# I recommend always using eda_cvec(v1, v2, ...)
def gda_cvec(*argv):
    t = int;
    Nt = 0;
    for a in argv:
        v = gda_cvec1(a);
        N,M = np.shape(v);
        Nt = Nt + N;
        if( N==0 ):
            continue; # skip vector of zero length
        if (t==int) and isinstance(v[0,0],float):
            t=float;
        elif isinstance(v[0,0],complex):
            t=complex;
    w = np.zeros((Nt,1),dtype=t);
    Nt = 0;
    for a in argv:
        v = gda_cvec1(a);
        N,M = np.shape(v);
        #w[Nt:Nt+N,0] = v[0:N,0];
        w[Nt:Nt+N,0:1] = v;  # patch 20230418 was #w[Nt:Nt+N,0] = v[0:N,0];
        Nt = Nt + N;
    return w;

# function to make a numpy N-by-1 column vector
# c=gda_cvec1(v) from entity v that is array-like,
# including a number, a list of numbers, a tuple
# of numbers, an N-by-0 np array and a N-by1 np array.
# It promotes integers to floats, and integers
# and floats to complex, by context. The function
# also insures that, if v is an np array, that
# c is a copy, as contrasted to a view, of it.
# This version takes just one input argment.
# whereas c=gda_cvec(v1,v2,...) concatenates
# many argiments.
def gda_cvec1(v):
    if isinstance(v, int) or isinstance(v, np.int32):
        w = np.zeros((1,1),dtype=int);
        w[0,0] = v;
        return w;
    elif isinstance(v, float):
        w = np.zeros((1,1),dtype=float);
        w[0,0] = v;
        return w;
    elif isinstance(v, complex):
        w = np.zeros((1,1),dtype=complex);
        w[0,0] = v;
        return w;
    elif isinstance(v, np.ndarray):
        s = np.shape(v);
        if len(s) == 1:
            return np.copy(np.reshape(v,(s[0],1)));
        else:
            [r,c]=s;
            if( c==1 ):
                return(np.copy(v));
            elif(r==1):
                return(np.copy(v.T));
            else:
                raise TypeError("gda_cvec: %d by %d ndarray not allowed" % (r, c));
    elif isinstance(v, list):
        r = len(v);
        t = int;
        for vi in v:
            if isinstance(vi,int) or isinstance(vi, np.int32): #patch v->vi 20230418
                pass;
            elif isinstance(vi,float):
                t=float;
            elif isinstance(vi,complex):
                t=complex;
                break;
            else:
                raise TypeError("gda_cvec: list contains unsupported type %s" % type(vi));
        w = np.zeros((r,1),dtype=t);
        w[:,0] = np.array(v); # patch v -> np.array(v)
        return w;
    elif isinstance(v, tuple):
        r = len(v);
        t = int;
        for vi in v:
            if isinstance(vi,int) or isinstance(vi, np.int32): #patch v->vi 20230418
                pass;
            elif isinstance(vi,float):
                t=float;
            elif isinstance(vi,complex):
                t=complex;
                break;
            else:
                raise TypeError("gda_cvec: tuple contains unsupported type %s" % type(vi));
        w = np.zeros((r,1),dtype=t);
        w[:,0] = np.array(list(v)); # patch v -> np.array(list(v));
        return w;
    else:
        raise TypeError("gda_cvec: %s not supported" % type(v));

        
# gda_draw function makes a "pictorial matrix equation"
# arguments are vectors, matrices and strings
# which are plotted in the order that the appear
# except that strings starting with 'title ' are plotted
# under the subseqeunt matrix or vector
# always returns a status of 1
def gda_draw(*argv):
    DOCOLOR=True;
    if( DOCOLOR ):
        bwcmap = matplotlib.colormaps['jet'];
    else:
        bw = np.zeros((256,4));
        v = 0.9*(256 - np.linspace( 0, 255, 256 ))/255;
        bw[:,0] = v;
        bw[:,1] = v;
        bw[:,2] = v;
        bw[:,3] = np.ones(256);
        bwcmap = ListedColormap(bw);
    # size of plot
    W = 16;
    H = 4;
    fig1 = plt.figure(1);
    # figsize width and height in inches
    fig1.set_size_inches(W,H);
    ax1 = plt.subplot(1,1,1);
    plt.axis([0, W, -H/2, H/2]);
    plt.axis('off');
    LM = W/6;    # matrix width and heoght
    LV = W/40;   # vector width
    FS = 0.12;    # character width
    TO = 0.4;    # title vertical offset
    SP = 0.2;    # space between objects
    LS = 0.2;    # leading space
    p = LS; # starting x-position
    istitle=0; # flags presence of a title
    for a in argv:
        if isinstance(a,np.ndarray):
            sh = np.shape(a);
            if len(sh) == 1:  # conversion to nx1 array
                n = sh[0];
                m = 1;
                ap = a;
                a = np.zeros((n,1));
                a[:,0] = ap;
            else:
                n = sh[0];
                m = sh[1];
            if m==1:
                pold=p;
                left=p;
                right=p+LV;
                bottom=-LM/2;
                top=LM/2;
                plt.imshow( a, cmap=bwcmap, vmin=np.min(a), vmax=np.max(a), extent=(left,right,bottom,top) );
                p = p+LV;
                pm = (p+pold)/2;
                if istitle:
                    plt.text(pm,-(LM/2)-TO,titlestr,horizontalalignment='center');
                    istitle=0;
                p = p+SP;
            else:
                pold=p;
                left=p;
                right=p+LM;
                bottom=-LM/2;
                top=LM/2;
                plt.imshow( a, cmap=bwcmap, vmin=np.min(a), vmax=np.max(a), extent=(left,right,bottom,top) );
                p = p+LM;
                pm = (p+pold)/2;
                if istitle:
                    plt.text(pm,-(LM/2)-TO,titlestr,horizontalalignment='center');
                    istitle=0;
                p = p+SP;
        elif isinstance(a,str):
            ns = len(a);
            istitle=0;
            if( ns>=6 ):
                if 'title ' in a[0:6]:
                    istitle=1;
                    titlestr=a[6:];
            if( istitle != 1):
                plt.text(p,0,a);
                p = p + ns*FS + SP;
    plt.show();
    return 1;

# bandpass filter, used in seismological example, but hand
# in a variety of settings involving time series
def gda_chebyshevfilt(d, Dt, flow, fhigh):
    # (dout,u,v)=gda_chebyshevfilt(d, Dt, flow, fhigh);
    # chebyshev IIR bandpass filter
    # d - input array of data
    # Dt - sampling interval
    # flow - low pass frequency, Hz
    # fhigh - high pass frequency, Hz
    # dout - output array of data
    # u - the numerator filter
    # v - the denominator filter
    # these filters can be used again using dout=filter(u,v,din);

    # make sure input timeseries is a column vector
    s = np.shape(d);
    N = s[0];
    if(N==1):
        dd = np.zeros((N,1));
        dd[:,0] = d;
    else:
        dd=d;
        
    # sampling rate
    rate=1/Dt;

    # ripple parameter, set to ten percent
    ripple=0.1;  

    # normalise frequency
    fl=2.0*flow/rate;
    fh=2.0*fhigh/rate;

    # center frequency 
    cf = 4 * tan( (fl*pi/2) ) * tan( (fh*pi/2) );

    # bandwidth
    bw = 2 * ( tan( (fh*pi/2) ) - tan( (fl*pi/2) ) );

    # ripple parameter factor
    rpf = sqrt((sqrt((1.0+1.0/(ripple*ripple))) + 1.0/ripple));
    a = 0.5*(rpf-1.0/rpf);
    b = 0.5*(rpf+1.0/rpf);

    u=np.zeros((5,1));
    v=np.zeros((5,1));
    theta = 3*pi/4;
    sr = a * cos(theta);
    si = b * sin(theta);
    es = sqrt(sr*sr+si*si);
    tmp= 16.0 - 16.0*bw*sr + 8.0*cf + 4.0*es*es*bw*bw - 4.0*bw*cf*sr + cf*cf;
    v[0,0] = 1.0;
    v[1,0] = 4.0*(-16.0 + 8.0*bw*sr - 2.0*bw*cf*sr + cf*cf)/tmp;
    v[2,0] = (96.0 - 16.0*cf - 8.0*es*es*bw*bw + 6.0*cf*cf)/tmp;
    v[3,0] = (-64.0 - 32.0*bw*sr + 8.0*bw*cf*sr + 4.0*cf*cf)/tmp;
    v[4,0] = (16.0 + 16.0*bw*sr + 8.0*cf + 4.0*es*es*bw*bw + 4.0*bw*cf*sr + cf*cf)/tmp;
    tmp = 4.0*es*es*bw*bw/tmp;
    u[0,0] = tmp;
    u[1,0] = 0.0;
    u[2,0] = -2.0*tmp;
    u[3,0] = 0.0;
    u[4,0] = tmp;

    dout = sg.lfilter(u.ravel(),v.ravel(),dd.ravel());
    return (gda_cvec(dout),gda_cvec(u),gda_cvec(v));

# function to perform the multiplication FT (F v)
def gda_FTFmul(v):
    # this function is used by bicongugate gradient to
    # solve the generalized least squares problem Fm=F.
    # Note that "F" must be called "gdaFsparse".
    global gdaFsparse;
    s = np.shape(v);
    if(len(s)==1):
        vv = np.zeros((s[0],1));
        vv[:,0] = v;
    else:
        vv=v;
    # weirdly, "*" multiplies sparse matrices just fine
    temp = gdaFsparse*vv;
    return gdaFsparse.transpose()*temp;


# function to perform the multiplication FT f
def gda_FTFrhs(f):
    # this function is used by bicongugate gradient to
    # solve the generalized least squares problem Fm=F.
    # Note that "F" must be called "gdaFsparse".
    global gdaFsparse;
    return gdaFsparse.transpose()*f;

def gda_matrixmin(E):
    E1 = np.min(E,axis=0);
    k = np.argmin(E,axis=0);
    Emin = np.min(E1);
    ic = np.argmin(E1);
    ir = k[ic];
    return ir, ic, Emin;

def gda_Esurface( xc, yr, E, sigmad2 ):
# gda_Esurface: analysis of 2D error surface
# input:
#     E: matrix of errors xc increases with columns, yr with rows
#     xc, yr: corresponding vectors of (xc, yr) values; must be evenly-spaced
#     sigmad2: variance of data that went into E=e'*e with e=dobs-dpre
# output:
#    x0, y0: location of minimum in E
#     E0: value of E at minimum
#     covxy: covariance matrix
#     status: 1 on success, 0 on fail
# method: quadratic approximation

# default return values
    x0=0.0;
    y0=0.0;
    E0=0.0;
    covxy=np.zeros((2,2));
    status=0;
    Nx,i = np.shape(xc);
    Ny,i = np.shape(yr);
    Dx = xc[1,0]-xc[0,0];
    Dy = yr[1,0]-yr[0,0];

    ir,ic,Emin = gda_matrixmin(E)

    if( (ir<1) or (ir>(Ny-2)) or (ic<1) or (ic>(Nx-2)) ):
        # minimum not in interior
        return x0, y0, E0, covxy, status;

    # quadratic model with 9 data
    # E = m1 + m2*x + m3*y + m4*x*x/2 + m5*x*y + m6*y*y/2
    # dE/dx = m2 + m4*x + m5*y
    # dE/dy = m3 + m5*x + m6*y
    # at minimum
    # 0 = d1 + D2 [x,y]'  so [x,y] = -inv(D2)*d1;

    x = Dx*gda_cvec(-1, 0, 1, -1, 0, 1, -1, 0, 1);
    y = Dy*gda_cvec(-1, -1, -1, 0, 0, 0, 1, 1, 1);
    d = gda_cvec( E[ir-1,ic-1], E[ir-1,ic], E[ir-1,ic+1],
                  E[ir,ic-1],   E[ir,ic],   E[ir,ic+1],
                  E[ir+1,ic-1], E[ir+1,ic], E[ir+1,ic+1]);
    G = np.concatenate((np.ones((9,1)),x,y,
                        0.5*np.multiply(x,x),np.multiply(x,y),0.5*np.multiply(y,y)),axis=1);
    GTG = np.matmul(G.T,G);
    GTd = np.matmul(G.T,d);
    m = la.solve(GTG,GTd);
    d1 = gda_cvec(m[1,0], m[2,0] );
    D2 = np.array( [[m[3,0], m[4,0]], [m[4,0], m[5,0]]] );
    invD2 = la.inv(D2);
    v = -np.matmul(invD2,d1);
    x0 = xc[ic,0]+v[0,0];
    y0 = yr[ir,0]+v[1,0];
    #E0= m(1) +   m(2)*v(1) +     m(3)*v(2) +     m(4)*v(1)*v(1)/2 +         m(5)*v(1)*v(2) +       m(6)*v(2)*v(2)/2;
    E0 = m[0,0] + m[1,0]*v[0,0] + m[2,0]*v[1,0] + m[3,0]*v[0,0]*v[0,0]/2.0 + m[4,0]*v[0,0]*v[1,0] + m[5,0]*v[1,0]*v[1,0]/2.0;
    covxy = (sigmad2)*2.0*invD2;
    status=1;
    return x0, y0, E0, covxy, status;

# function to perform the "damped minimum length" multiplication
# (GGT + epsilon I )v = G (GT v) + epsilon v
def gda_dmlmul(v):
    # this function is used by bicongugate gradient to
    # solve the damped minimum length problem Gm=d.
    # Note that "G" must be called "gdaGsparse",
    # and epsilon must be called gdaepsilon
    global gdaGsparse, gdaepsilon;
    s = np.shape(v);
    if(len(s)==1):
        vv = np.zeros((s[0],1));
        vv[:,0] = v;
    else:
        vv=v;
    # weirdly, "*" multiplies sparse matrices just fine
    temp = gdaGsparse.transpose()*vv;
    return (gdaepsilon*vv)+(gdaGsparse*temp);

def gda_filtermul(v):
    # this function is used by the bicongugate gradient solver to solve the
    # geneneralized least squares problem Fm=f with F=[G;eH] and f=[d;e*h],
    # where G is a toeplitz matrix built from the filter g
    # Note that "H" must be sparse and called "edaHsparse" and the
    # thefilter "g" bust be called edafilterg and must be a column vector
    # and that the parameter "e" must be called eda_e
    global gdaepsilon;
    global gdaHsparse;
    global gdafilterg;
    # rearrange as column-vector
    s = np.shape(v);
    M = s[0];
    if(len(s)==1): # convert to column-vector
        vv = np.zeros((s[0],1));
        vv[:,0] = v;
    else:
        vv=v;
    N, i = np.shape(gdafilterg);
    temp1 = np.zeros((N+M-1,1));
    temp1[:,0] = np.convolve(gdafilterg.ravel(),vv.ravel());
    a = np.zeros((N,1));
    a[:,0] = temp1[0:N,0];
    b = gdaepsilon*(gdaHsparse*vv); 
    temp2 = np.zeros((N+N-1,1));
    temp2[:,0] = np.convolve(
        (np.flipud(gdafilterg)).ravel(),a.ravel());
    a2 = gda_cvec(temp2[N-1:N+M-1,0]);
    b2 = gdaepsilon*(gdaHsparse.transpose()*b);
    # a2+b2 = FT F v = GT G v + (e^**2) HT H v
    return (a2+b2);

def gda_filterrhs(dobs,h):
    # companion function to GLSFilterMul()
    # creates RHS of generalized least squares equattion
    # set up right hand side, F'f = GT qobs + e HT h
    # note that H must be sparse
    global gdaepsilon;
    global gdaHsparse;
    global gdafilterg;
    N,i = np.shape(dobs);
    i,M = np.shape(gdaHsparse);
    temp = gda_cvec( np.convolve(np.flipud(gdafilterg).ravel(),dobs.ravel()) );
    FTfa = gda_cvec( temp[N-1:N+M-1,0] );
    FTfb = (gdaepsilon**2)*(gdaHsparse.transpose()*h);
    FTf=FTfa+FTfb;
    return (FTf);

# implements (GTG + epilon I) m = (GT d) when G is a sparse matrix
def gda_dlsmul(v):
    # this function is used by bicongugate gradient to
    # solve the damped least squares problem [Gm=F; epsilon*Im=0].
    # Note that "G" must be called "gdaGsparse".
    global gdaGsparse, gdaepsilon;
    s = np.shape(v);
    if(len(s)==1):
        vv = np.zeros((s[0],1));
        vv[:,0] = v;
    else:
        vv=v;
    # impliments (G'G + epsilon I) v = G'(G v) + epsilon v
    # weirdly, "*" multiplies sparse matrices just fine
    temp = gdaGsparse*vv;
    return gdaGsparse.transpose()*temp + gdaepsilon*vv;

# function to perform the multiplication FT f
def gda_dlsrhs(d):
    # this function is used by bicongugate gradient to
    # solve the generalized least squares problem Fm=F.
    # Note that "G" must be called "gdaGsparse".
    global gdaGsparse, gdaepsilon;
    return gdaGsparse.transpose()*d;

def gdaarrow2(ax,rbar,mys,myl):
    # pretty crazy way to draw an arrowhead!
    rbarsq = np.matmul(rbar.T,rbar); rbarsq=rbarsq[0,0];
    tangent = rbar/sqrt(rbarsq);
    per1 = np.cross( tangent, gda_cvec(0.0, 0.0, 1.0), axis=0 );
    per1sq = np.matmul(per1.T,per1); per1sq=per1sq[0,0];
    per1 = per1/sqrt(per1sq);
    per2 = np.cross( tangent, per1, axis=0 );
    per2sq = np.matmul(per2.T,per2); per2sq=per2sq[0,0]
    per2 = per2/sqrt(per2sq);
    L = 0.05;
    px=gda_cvec(0.0,rbar[0,0]);
    py=gda_cvec(0.0,rbar[1,0]);
    pz=gda_cvec(0.0,rbar[2,0]);
    ax.plot3D( px.ravel(), py.ravel(), pz.ravel(), mys, lw=myl, zorder=10.0 );
    v1 = rbar - L*tangent + 0.25*L*per1;
    v2 = rbar - L*tangent - 0.25*L*per1;
    v3 = rbar - L*tangent + 0.25*L*per2;
    v4 = rbar - L*tangent - 0.25*L*per2;
    x0=gda_cvec(rbar[0,0], v1[0,0]);
    y0=gda_cvec(rbar[1,0], v1[1,0]);
    z0=gda_cvec(rbar[2,0], v1[2,0]);
    ax.plot3D( x0.ravel(), y0.ravel(), z0.ravel(), mys, lw=myl, zorder=10.0 );
    x0=gda_cvec(rbar[0,0], v2[0,0]);
    y0=gda_cvec(rbar[1,0], v2[1,0]);
    z0=gda_cvec(rbar[2,0], v2[2,0]);
    ax.plot3D( x0.ravel(), y0.ravel(), z0.ravel(), mys, lw=myl, zorder=10.0 );
    x0=gda_cvec(rbar[0,0], v3[0,0]);
    y0=gda_cvec(rbar[1,0], v3[1,0]);
    z0=gda_cvec(rbar[2,0], v3[2,0]);
    ax.plot3D( x0.ravel(), y0.ravel(), z0.ravel(), mys, lw=myl, zorder=10.0 );
    x0=gda_cvec(rbar[0,0], v4[0,0]);
    y0=gda_cvec(rbar[1,0], v4[1,0]);
    z0=gda_cvec(rbar[2,0], v4[2,0]);
    ax.plot3D( x0.ravel(), y0.ravel(), z0.ravel(), mys, lw=myl, zorder=10.0 );
    return 1;

def gdabox2(ax,xmin,xmax,ymin,ymax,zmin,zmax):
    ax.plot3D( gda_cvec(xmin,xmin).ravel(), gda_cvec(ymin,ymin).ravel(), gda_cvec(zmin,zmax).ravel(), "k-", lw=2, zorder=100.0 );
    ax.plot3D( gda_cvec(xmin,xmin).ravel(), gda_cvec(ymin,ymax).ravel(), gda_cvec(zmin,zmin).ravel(), "k-", lw=2, zorder=100.0 );
    ax.plot3D( gda_cvec(xmin,xmax).ravel(), gda_cvec(ymin,ymin).ravel(), gda_cvec(zmin,zmin).ravel(), "k-", lw=2, zorder=100.0 );
    ax.plot3D( gda_cvec(xmax,xmax).ravel(), gda_cvec(ymax,ymax).ravel(), gda_cvec(zmax,zmin).ravel(), "k-", lw=2, zorder=100.0 );
    ax.plot3D( gda_cvec(xmax,xmax).ravel(), gda_cvec(ymax,ymin).ravel(), gda_cvec(zmax,zmax).ravel(), "k-", lw=2, zorder=100.0 );
    ax.plot3D( gda_cvec(xmax,xmin).ravel(), gda_cvec(ymax,ymax).ravel(), gda_cvec(zmax,zmax).ravel(), "k-", lw=2, zorder=100.0 );
    ax.plot3D( gda_cvec(xmax,xmin).ravel(), gda_cvec(ymin,ymin).ravel(), gda_cvec(zmax,zmax).ravel(), "k-", lw=2, zorder=100.0 );
    ax.plot3D( gda_cvec(xmax,xmax).ravel(), gda_cvec(ymin,ymin).ravel(), gda_cvec(zmax,zmin).ravel(), "k-", lw=2, zorder=100.0 );
    ax.plot3D( gda_cvec(xmin,xmin).ravel(), gda_cvec(ymax,ymin).ravel(), gda_cvec(zmax,zmax).ravel(), "k-", lw=2, zorder=100.0 );
    ax.plot3D( gda_cvec(xmin,xmin).ravel(), gda_cvec(ymax,ymax).ravel(), gda_cvec(zmax,zmin).ravel(), "k-", lw=2, zorder=100.0 );
    ax.plot3D( gda_cvec(xmax,xmax).ravel(), gda_cvec(ymax,ymin).ravel(), gda_cvec(zmin,zmin).ravel(), "k-", lw=2, zorder=100.0 );
    ax.plot3D( gda_cvec(xmax,xmin).ravel(), gda_cvec(ymax,ymax).ravel(), gda_cvec(zmin,zmin).ravel(), "k-", lw=2, zorder=100.0 );
    return 1;

def gda_stereo( phi ):
    R = sin(phi)/(1.0+cos(phi));
    return R;
                  
In [49]:
# gdapy15_01
# image deblurring example
 
print("gdapy15_01");

# read in true image and demean it
mtrue = plt.imread("../Data/tern.tif").astype(float);
mtrue = mtrue-np.mean(mtrue);
I, J = np.shape(mtrue);
dmin = np.min(mtrue);
dmax = np.max(mtrue);

# plot closeup
I1=900;
I2=1700;
J1=600;
J2=1200;
 
# plot true image
fig1 = plt.figure(1,figsize=(12,8));
ax1=plt.subplot(2,3,1);
mycmap=cm.gray;
plt.axis( [0, J, 0, I] );
left=0;
right=J;
bottom=0;
top=I;
plt.imshow( mtrue, cmap=mycmap, vmin=dmin, vmax=dmax, extent=(left,right,bottom,top) );
plt.xlabel('pixel j');
plt.ylabel('pixel i');
ax4=plt.subplot(2,3,4);
mycmap=cm.gray;
plt.axis( [J1, J2, I1, I2] );
left=0;
right=J;
bottom=0;
top=I;
plt.imshow( mtrue[I1:I2,J1:J2], cmap=mycmap, vmin=dmin, vmax=dmax, extent=(left,right,bottom,top) );
plt.xlabel('pixel j');
plt.ylabel('pixel i');
 
# data is blurred version of image
dobs = np.zeros((I,J));

# deblurred image
mest = np.zeros((I,J));
 
# blur is over rows of image with this filter
Lb = 100;
 
# try 2 kinds of blurs
if( 0 ):
    blur = np.flipud(gda_cvec(np.linspace(1,Lb,Lb)/Lb)); # linear ramp-down
else:
    blur = np.ones((Lb,1))/Lb;  # boxcar

# crease sparse data kernel
# in this case, one colud use the simple command
# G = spdiags( ones(J,1)*blur', [0:Lb-1], J, J );
# however, thst only works when the blur is parallel
# to the rows or columns of the image.  So, instead
# I use the row-column-value method
NELEMENTS = Lb*J+20;
Gr = np.zeros((NELEMENTS,1),dtype=int);
Gc = np.zeros((NELEMENTS,1),dtype=int);
Gv = np.zeros((NELEMENTS,1));
Nel=0; # element counter
for i in range(J): # loop over model pixel
    # MATLAB(R) for j=(i:min(i+Lb-1,J))
    for j in range(i,min(i+Lb,J)):
        Gr[Nel,0] = i;
        Gc[Nel,0] = j;
        Gv[Nel,0] = blur[j-i,0];
        Nel=Nel+1;

# truncate
Gr=Gr[0:Nel,0:1];
Gc=Gc[0:Nel,0:1];
Gv=Gv[0:Nel,0:1]; 

# make sparse matrix
gdaGsparse=sparse.coo_matrix((Gv.ravel(),(Gr.ravel(),Gc.ravel())),shape=(J,J));
del Gr; # clear index arrays
del Gc;
del Gv;

# data is blurred version of image
for i in range(I):
    #                 Note * OK since G sparse
    dobs[i:i+1,0:J] = (gdaGsparse*(mtrue[i:i+1,0:J].T)).T;
    
# plot blurred image
ax2=plt.subplot(2,3,2);
mycmap=cm.gray;
plt.axis( [0, J, 0, I] );
left=0;
right=J;
bottom=0;
top=I;
plt.imshow( dobs, cmap=mycmap, vmin=dmin, vmax=dmax, extent=(left,right,bottom,top) );
plt.xlabel('pixel j');
plt.ylabel('pixel i');
ax5=plt.subplot(2,3,5);
mycmap=cm.gray;
plt.axis( [J1, J2, I1, I2] );
left=0;
right=J;
bottom=0;
top=I;
plt.imshow( dobs[I1:I2,J1:J2], cmap=mycmap, vmin=dmin, vmax=dmax, extent=(left,right,bottom,top) );
plt.xlabel('pixel j');
plt.ylabel('pixel i');

gdaepsilon=1.0e-6; # damping, just in case GGT is singular

# the sparse solver seems rather slow compared to the MATLAB
# implementation, so I have added a dense implentation
DOSPARSE = 0;
if( DOSPARSE ):
    # damped minimum length solution
    tol = 1e-4;      # tolerance
    maxit = 3*J;      # maximum number of iterations allowed
    # define linear operator needed for biconjugate gradient solver
    LO=las.LinearOperator(shape=(J,J),matvec=gda_dmlmul,rmatvec=gda_dmlmul);

    print("This part slow, so show progress: %d %d" % (i,I) );

    # damped minimum length  m = GT inv(GGT + epsiI) d
    # solved as              m = GTu  with  (GGT + epsiI)u=d
    for i in range(I): # process image row-wise
        if( (i%100)==0 ):
            print("%d out of %d" % (i,I) );
        dobsrow = (dobs[i:i+1,0:J]).T;
        q=las.bicg(LO,dobsrow,rtol=tol,maxiter=maxit);
        u = gda_cvec(q[0].astype(float));
        mestrow = (gdaGsparse.T)*u;
        mest[i:i+1,0:J]=mestrow.T;
else: # dense
    G = gdaGsparse.toarray();
    GGT = np.matmul(G,G.T)+gdaepsilon*np.identity(J);
    GMG = np.matmul(G.T,la.inv(GGT));
    for i in range(I): # process image row-wise
        dobsrow = (dobs[i:i+1,0:J]).T;
        mestrow = np.matmul(GMG,dobsrow);
        mest[i:i+1,0:J]=mestrow.T;
    
# plot blurred image
ax2=plt.subplot(2,3,3);
mycmap=cm.gray;
plt.axis( [0, J, 0, I] );
left=0;
right=J;
bottom=0;
top=I;
plt.imshow( mest, cmap=mycmap, vmin=dmin, vmax=dmax, extent=(left,right,bottom,top) );
plt.xlabel('pixel j');
plt.ylabel('pixel i');
ax6=plt.subplot(2,3,6);
mycmap=cm.gray;
plt.axis( [J1, J2, I1, I2] );
left=0;
right=J;
bottom=0;
top=I;
plt.imshow( mest[I1:I2,J1:J2], cmap=mycmap, vmin=dmin, vmax=dmax, extent=(left,right,bottom,top) );
plt.xlabel('pixel j');
plt.ylabel('pixel i');
plt.show();
print("Caption: (left) True image, (middle) blurred image. (right)");
print("unblurred image.");

# rather brute force here
# plot generalized inverse and resolution matrix
if( DOSPARSE ):
    G = gdaGsparse.toarray();
    GGT = np.matmul(G,G.T)+gdaepsilon*np.identity(J);
    GMG = np.matmul(G.T,la.inv(GGT));
R = np.matmul(GMG,G);
gda_draw('title G', G, 'title GMG', GMG, 'title R', R);
print("Caption: Data kernel G (left), generlized inverse GMG (middle),");
print("Model resolution matrix (right)");

fig2 = plt.figure(2,figsize=(12,8));
ax1=plt.subplot(2,1,1);
i,j = np.shape(GMG);
ic = int(i/2);
tmp = GMG[ic:ic+1,0:j].T;
dmax = np.max(np.abs(tmp));
plt.axis( [0, j, -1.1*dmax, 1.1*dmax] );
plt.xlabel('column index');
plt.ylabel('GMG');
plt.plot(gda_cvec(np.linspace(0,j-1,j)), tmp, 'k-', lw=2 );

ax2=plt.subplot(2,1,2);
i,j = np.shape(R);
ic = int(i/2);
tmp = R[ic:ic+1,0:j].T;
dmax = np.max(np.abs(tmp));
plt.axis( [0, j, -1.1*dmax, 1.1*dmax] );
plt.xlabel('column index');
plt.ylabel('R');
plt.plot(gda_cvec(np.linspace(0,j-1,j)), tmp, 'k-', lw=2 );
plt.show();
print("Caption: (Top) Central row of the generalized inverse GMG,");
print('(bottom) Central row the model resolution matrix R')
gdapy15_01
No description has been provided for this image
Caption: (left) True image, (middle) blurred image. (right)
unblurred image.
No description has been provided for this image
Caption: Data kernel G (left), generlized inverse GMG (middle),
Model resolution matrix (right)
No description has been provided for this image
Caption: (Top) Central row of the generalized inverse GMG,
(bottom) Central row the model resolution matrix R
In [51]:
# gdapy15_02
# deconvolution example, d=g*m and m=ginv*d
 
# this uses generalized least squares
# the data equation is G m = rhs
# the constraint equation is H m = h
# the combined equations are Fm = f
# solution uses the bicg() solver set up to
# solve FTF = FT f, but in a clever way so that
# FTF, G and f are never explicitly
 
# however, the code also implements the brute
# force solution m = (FTF)\(FT f) inside if statements
# for test purposes

print("gdapy15_02");

D = np.genfromtxt("../Data/airgun.txt", delimiter="\t");
Mo,i = np.shape(D);
# digitized airgun pulse, data after:
# Smith, SG, 
# Measurement of Airgun Waveforms,
# Geophys. J. R. astr. Soc. (1975) 42, 273-280.

t=D[0:Mo,0:1];  # time
d=D[0:Mo,1:2];  # this is the filter for which we seek an inverse filter
tmin = t[0,0];
tmax = t[Mo-1,0];
Dt = t[1,0]-t[0,0];
 
# pad with zeros
M=2*Mo;
t = gda_cvec(tmin+Dt*np.linspace(0,M-1,M));
gdag = gda_cvec( d, np.zeros((M-Mo,1)) );
tmax = t[M-1,0];
gmax = np.max(np.abs(gdag));
 
N=M; # inverse filter same length as filter
# plot g
fig1 = plt.figure(1,figsize=(12,5));
ax1=plt.subplot(1,1,1);
plt.axis( [tmin, tmax, -1.1*gmax, 1.1*gmax] );
plt.plot( t, gdag, "k-", lw=3 );
plt.xlabel("time, t");
plt.ylabel("g(t)");
plt.show();
print("Caption: Airgun pulse");

# I don't need G explicitly, but here it is for test purposes
test=1;
if( test ):
    G=np.zeros((N,M));
    for j in range(M):
        G[j:N,j]=gdag[0:N-j,0];

# set up H and h
K=2*M;
L=N+K;
# use row-col-value method to make sparse matrix H
# an alternative would be the command
Hr = np.zeros((3*L,1),dtype=int);
Hc = np.zeros((3*L,1),dtype=int);
Hv = np.zeros((3*L,1));
Nel=0;
 
# two sets of constaint equations, curvature and length
# epsilon parameters adjusted empirically
# curvature
for j in range(1,M):
    Hr[Nel,0] = j;
    Hc[Nel,0] = j-1;
    Hv[Nel,0] = 1.0;
    Nel=Nel+1;
for j in range(M):
    Hr[Nel,0] = j;
    Hc[Nel,0] = j;
    Hv[Nel,0] = -2.0;  
    Nel=Nel+1;
for j in range(M-1):
    Nel=Nel+1;
    Hr[Nel,0] = j;
    Hc[Nel,0] = j+1;
    Hv[Nel,0] = 1.0;
# damp size, especially towards the end
wt=100.0
for j in range(M):
    Nel=Nel+1;
    Hr[Nel,0] = j+M;
    Hc[Nel,0] = j;
    Hv[Nel,0] = wt*sqrt(j);

# truncate to actual length used
Hr = Hr[0:Nel,0:1];
Hc = Hc[0:Nel,0:1];
Hv = Hv[0:Nel,0:1];
gdaHsparse=sparse.coo_matrix((Hv.ravel(),(Hr.ravel(),Hc.ravel())),shape=(K,M));
del Hr; del Hc; del Hv;

# rhs of data equaltion is a spike delayed by Nd samples
# (a little delay sometimes leads to a better result)
rhs=np.zeros((N,1));
Nd=20;
rhs[Nd,0]=1.0;      # right hand side of convolution eqn
h=np.zeros((K,1));  # right hand side of prior info eqn

tol = 1e-10;     # tolerance
maxit = 3*L;     # maximum number of iterations allowed
# define linear operator needed for biconjugate gradient solver
gdaepsilon=0.001*np.max(np.abs(gdag));
gdafilterg = gdag;
LO=las.LinearOperator(shape=(M,M),matvec=gda_filtermul,rmatvec=gda_filtermul);
FTf = gda_filterrhs(rhs,h);
q=las.bicg(LO,FTf,rtol=tol,maxiter=maxit);
ginv = gda_cvec(q[0].astype(float));

if( test ): # don't need F, but here it is for test purposes
    H=gdaHsparse.toarray()
    F=np.concatenate( (G,gdaepsilon*H), axis=0 );
    gda_draw('title G',G,'title H',H);
    
if( test ): # don't need f, but here it is for test purposes
    f=np.zeros((L,1));
    f[0:N,0:1]=rhs;
    f[N:N+K,0:1]=gdaepsilon*h;

if( test ): # don't need brute force solution, but here it is for test purposes
    FTF = np.matmul(F.T,F);
    FTf = np.matmul(F.T,f);
    ginvbf = la.solve(FTF,FTf);

# don't need GT*rhs, but here it is for test purposes
if( test ):
    Gtrhs = np.matmul(G.T,rhs);

# plot inverse filter
fig2 = plt.figure(2,figsize=(12,5));
ax1=plt.subplot(1,1,1);
ginvmax = np.max(np.abs(ginv));
plt.axis( [tmin, tmax, -1.1*ginvmax, 1.1*ginvmax] );
plt.plot( t, ginv, "k-", lw=4 );
if( test ): # plot brute force result
    plt.plot( t, ginvbf, "r-", lw=2 );
plt.xlabel("time, t");
plt.ylabel("ginv}(t)");
plt.show();
print("Caption: Inverse filter");

# plot convolution of inverse filter and filter
c = np.convolve( ginv.ravel(), gdag.ravel() );
c = gda_cvec(c[0:N]);
fig3 = plt.figure(3,figsize=(12,5));
ax1=plt.subplot(1,1,1);
cmax = np.max(np.abs(c));
plt.axis( [tmin, tmax, -1.1*cmax, 1.1*cmax] );
plt.plot( t, c, "k-", lw=3 );
plt.xlabel("time, t");
plt.ylabel("ginv(t)*g(t)");
plt.show();
print("Caption: Deconvolved (spiked) airgun pulse.");

# error
E = np.matmul((rhs-c).T,(rhs-c)); E=E[0,0];
print("damping factor %f wt %f   prediction error %f" % (gdaepsilon,wt,E) );

# as an example, deconvolve three pulses with additive noise
sd=0.1;
glong = np.concatenate((3.0*gdag,2.0*gdag,gdag),axis=0)+np.random.normal(loc=0.0,scale=sd,size=(3*M,1));
clong = np.convolve(ginv.ravel(), glong.ravel() );
clong = gda_cvec(clong[0:3*N]);
 
# plot sequence of airgin pulses
fig4 = plt.figure(4,figsize=(12,5));
ax1=plt.subplot(1,1,1);
glongmax = np.max(np.abs(glong));
plt.axis( [tmin, tmin+3*(tmax-tmin), -1.1*glongmax, 1.1*glongmax] );
plt.plot(gda_cvec(tmin+Dt*np.linspace(0,3*M-1,3*M)),glong,"k-",lw=2 );
plt.xlabel("time, t");
plt.ylabel("d(t)");
plt.show();
print("Caption: Sequence of airgin pulses.");
 
# plot deconvolved data
fig5 = plt.figure(5,figsize=(12,5));
ax1=plt.subplot(1,1,1);
clongmax = np.max(np.abs(clong));
plt.axis( [tmin, tmin+3*(tmax-tmin), -clongmax, clongmax] );
plt.plot( gda_cvec(tmin+Dt*np.linspace(0,3*M-1,3*M)),clong,"k-",lw=2 );
plt.xlabel("time, t");
plt.ylabel("ginv(t)*d(t)");
plt.show();
print("Caption: Deconvolved airgun pulses");
gdapy15_02
No description has been provided for this image
Caption: Airgun pulse
No description has been provided for this image
No description has been provided for this image
Caption: Inverse filter
No description has been provided for this image
Caption: Deconvolved (spiked) airgun pulse.
damping factor 0.001000 wt 100.000000   prediction error 0.160612
No description has been provided for this image
Caption: Sequence of airgin pulses.
No description has been provided for this image
Caption: Deconvolved airgun pulses
In [52]:
# gdapy15_03
# correcting gravity cross-over errors
 
print("gdapy15_03");

# load gravity(lat,lon) data
D = np.genfromtxt("../Data/gravity.txt", delimiter="\t");
N,i = np.shape(D);
lon=D[0:N,0:1];
lat=D[0:N,1:2];
grav=D[0:N,2:3];
Ng, i = np.shape(lon);
 
# setup axes
mingrav = np.min(grav);
maxgrav = np.max(grav);
meangrav = np.mean(grav);
lonmin=np.min(lon);
lonmax=np.max(lon);
latmin=np.min(lat);
latmax=np.max(lat);
Nlon=np.sum(lat==lat[0,0]);
Nlat=np.sum(lon==lon[1,0]);
print("Dataset has %d lats and %d lons" % (Nlat, Nlon));
Dlon = (lonmax-lonmin)/(Nlon-1);
Dlat = (latmax-latmin)/(Nlat-1);

# put data into grid
# Note the messing about with trasnposes and
# flipup() to make it come out rightside up
LON=np.reshape(lon,(Nlat,Nlon));
LAT=np.reshape(lat,(Nlat,Nlon));
GRAV=np.reshape(grav,(Nlat,Nlon));

# plot true gravity
fig1 = plt.figure(1,figsize=(12,12));
ax1=plt.subplot(2,2,1);
mycmap=matplotlib.colormaps['jet'];
plt.axis( [lonmin, lonmax, latmin, latmax] );
left=lonmin;
right=lonmax;
bottom=latmin;
top=latmax;
# test orientation
TEST=0;
if( TEST ):
    GRAV = mingrav*np.zeros((Nlon,Nlat));
    GRAV[0:5,0:40]=maxgrav; # 5 in lat, 40 in lon
plt.imshow( GRAV, cmap=mycmap, vmin=mingrav, vmax=maxgrav, extent=(left,right,bottom,top) );
plt.xlabel("longitude");
plt.ylabel("latitude");
plt.title('true gravity');

# plot true gravity
ax1=plt.subplot(2,2,2);
mycmap=matplotlib.colormaps['jet'];
plt.axis( [lonmin, lonmax, latmin, latmax] );
left=lonmin;
right=lonmax;
bottom=latmin;
top=latmax;
plt.imshow( GRAV, cmap=mycmap, vmin=mingrav, vmax=maxgrav, extent=(left,right,bottom,top) );
plt.xlabel("longitude");
plt.ylabel("latitude");
plt.title('tracks');

# the tracks are synthetic; that is, constructed in this program
Ntracks = 35;
Ntlon = Nlon;
Dtlon = (lonmax-lonmin)/(Ntlon-1);
tlon = gda_cvec(lonmin + Dtlon*np.linspace(0,Ntlon-1,Ntlon));

# construct ascending tracks
NA = Ntracks;
Atracklat = np.zeros((Ntlon, NA));
C = 1.0;
L = 2.0*(lonmax-lonmin);
for i in range(NA):
    Atracklat[0:Ntlon,i:i+1] = tlon + C*np.cos(2.0*pi*tlon/L) + i/2.0 - 9.0;
    plt.plot( tlon, Atracklat[0:Ntlon,i:i+1], "k-", lw=1 );

# construct descending tracks
ND = Ntracks;
Dtracklat = np.zeros((Ntlon, NA));
C = 1.0;
L = 2.0*(lonmax-lonmin);
for i in range(ND):
    Dtracklat[0:Ntlon,i:i+1] = latmax - (tlon + C*np.cos(2.0*pi*tlon/L) + i/2.0 -9);
    plt.plot( tlon, Dtracklat[0:Ntlon,i:i+1], "k-", lw=1 );

M = NA+ND; # number of model parameters are number of tracks

# model parameters are gravity shifts (offets) along each track.
# they are randomply chosen.
# store the shifts in vector with ascending tracks first
sm=0.1;
mtrue = np.random.normal(loc=0.0,scale=sm*(maxgrav-mingrav)/2.0,size=(M,1));
 
# create a dataset of all the data beneath the tracks,
# but offset by the shifts
slonv = np.zeros((M*Ntlon,1));
slatv = np.zeros((M*Ntlon,1));
sgrav = np.zeros((M*Ntlon,1));
slon = tlon;
Nsamples=0;
for i in range(NA):  # ascending tracks
    slat = Atracklat[0:Ntlon,i:i+1];
    ii = (np.floor((slat-latmin)/Dlat)).astype(int);
    jj = (np.floor((slon-lonmin)/Dlon)).astype(int);
    for kk in range(Ntlon):
        if((ii[kk,0]>=0)&(ii[kk,0]<Nlat)&(jj[kk,0]>=0)&(jj[kk,0]<Nlon)):
            slonv[Nsamples,0] = slon[kk,0];
            slatv[Nsamples,0] = slat[kk,0];
            sgrav[Nsamples,0] = GRAV[ii[kk,0],jj[kk,0]]+mtrue[i,0];
            Nsamples=Nsamples+1;

for j in range(ND): # decending tracks
    slat = Dtracklat[0:Ntlon,j:j+1];
    ii = (np.floor((slat-latmin)/Dlat)).astype(int);
    jj = (np.floor((slon-lonmin)/Dlon)).astype(int);
    for kk in range(Ntlon):
        if((ii[kk,0]>=0)&(ii[kk,0]<Nlat)&(jj[kk,0]>=0)&(jj[kk,0]<Nlon)):
            slonv[Nsamples,0] = slon[kk,0];
            slatv[Nsamples,0] = slat[kk,0];
            sgrav[Nsamples,0] = GRAV[ii[kk,0],jj[kk,0]]+mtrue[NA+j,0];
            Nsamples=Nsamples+1;

# truncate
slonv=slonv[0:Nsamples,0:1];
slatv=slatv[0:Nsamples,0:1];
sgrav=sgrav[0:Nsamples,0:1];
print("%d samples to fill out Nlat by Nlon = %d plane"
      % (Nsamples, Nlat*Nlon) );

# interpolate data along tracks to a 2D image
# First: remove duplicates points
isorted = np.lexsort( (slonv.ravel(), slatv.ravel()) );
A = np.concatenate( (slonv, slatv, sgrav), axis=1 );
B = A[isorted,0:4];
Nsort,i=np.shape(B);
flag = np.ones((Nsort,1),dtype=int);
for i in range(1,Nsort):
    if( (B[i,0]==B[i-1,0]) & (B[i,1]==B[i-1,1]) ):
        flag[i,0]=0;

q = np.where(flag==1);
kk = q[0];
CC = B[kk,0:3];
NC,i = np.shape(CC);
obsgrav = griddata( (CC[0:NC,0], CC[0:NC,1]), CC[0:NC,2], (lon, lat),
                   method='cubic', fill_value=meangrav );
OBSGRAV = np.flipud(np.reshape(obsgrav,(Nlon,Nlat)));

# plot observed gravity
ax1=plt.subplot(2,2,3);
mycmap=matplotlib.colormaps['jet'];
plt.axis( [lonmin, lonmax, latmin, latmax] );
left=lonmin;
right=lonmax;
bottom=latmin;
top=latmax;
plt.imshow( OBSGRAV, cmap=mycmap, vmin=mingrav, vmax=maxgrav, extent=(left,right,bottom,top) );
plt.xlabel("longitude");
plt.ylabel("latitude");
plt.title('obs gravity');

# as the track analysis section is complicated, but
# relatively fast, I just copied it in-total, rather
# than to edit our just the part that are really
# needed ...

# find all track intersections (cross-overs)
# Xlon(i,j) is lon where ascending track i and descending track j cross
Xlon = np.empty((NA,ND)) * np.nan;
# Xlat(i,j) is lat where ascending track i and descending track j cross
Xlat = np.empty((NA,ND))  * np.nan;
# Xgrav(i,j) is grav where ascending track i and descending track j cross
Xgrav = np.empty((NA,ND)) * np.nan;
# iiX, jjX are nearest indices in GRAV(iiX,iiJ) array
iiX = -np.ones((NA,ND));
jjX = -np.empty((NA,ND));
count=0;
for i in range(NA):
    for j in range(ND):
        signAD = np.sign(Atracklat[0:Ntlon,i:i+1]-Dtracklat[0:Ntlon,j:j+1]);
        sign1 = signAD[0,0];
        q = np.where( signAD != sign1 );
        if( not q[0].any() ): # no intersection
            continue;
        k2 = q[0][0];
        if( k2 == 0 ): # intersects at left edge; ignore
            continue;
        # linearly interpolate
        k1 = k2-1;
        A = Atracklat[k1,i]-Dtracklat[k1,j];
        B = Atracklat[k2,i]-Dtracklat[k2,j];
        slope = (B-A)/Dtlon;
        deltalon = -A/slope;
        Xlon[i,j] = tlon[k1,0]+deltalon;
        Xlat[i,j] = Xlon[i,j] + C*cos(2.0*pi*Xlon[i,j]/L) + (i/2.0-9.0);
        jjXp=int((Xlon[i,j]-lonmin)/Dlon);
        iiXp=int((Xlat[i,j]-latmin)/Dlat);
        if( (iiXp>=1) & (iiXp<=Nlat) & (jjXp>=1) & (jjXp<=Nlon) ):
            count=count+1;
            iiX[i,j] = iiXp;
            jjX[i,j] = jjXp;
            # check if these are right by making a dot on the
            # image and comparing a displayed version of the lat, lon
            # with the position of the dot
            debg = 0;
            if( debg & (count==20) ):
                GRAV[ iiXp, jjXp ] = mingrav;
                print("lon %f lat %f" % (Xlon[i,j], Xlat[i,j]) );
            Xgrav[i,j] = GRAV[iiXp, jjXp];
            # plot intersection point on map
            debg = 0;
            if( debg ):
                plt.plot( Xlon[i,j], Xlat[i,j], "r.", lw=2);

N = count; # number of data is number of intersections
 
# gravity values for each track at its intersection point
Agrav = np.zeros((N,1));
Dgrav = np.zeros((N,1));
# back indices to i,j
iA = np.zeros((N,1),dtype=int);
jD = np.zeros((N,1),dtype=int);
k=0;
for i in range(NA):
    for j in range(ND):
        if( iiX[i,j] == (-1) ):
            continue;
        Agrav[k,0] = Xgrav[i,j]+mtrue[i,0];
        Dgrav[k,0] = Xgrav[i,j]+mtrue[j+NA,0];
        iA[k,0]=i;
        jD[k,0]=j;
        k=k+1;
        
# data equations are that, at an intersection point,
# grav measured by ascending track - offset of that track =
# grav measured by decending track - offset of that track
# use sparse matrix created by the row-column-value method
NELEMENTS = 2*N+10;
Gr = np.zeros((NELEMENTS,1),dtype=int);
Gc = np.zeros((NELEMENTS,1),dtype=int);
Gv = np.zeros((NELEMENTS,1));
d = np.zeros((N,1)); # right hand side
Nel=0;
for k in range(N):  # loop row-wise
    Gr[Nel,0] = k;
    Gc[Nel,0] = iA[k,0];
    Gv[Nel,0] = 1.0;
    Nel=Nel+1;
    Gr[Nel,0] = k;
    Gc[Nel,0] = NA+jD[k,0];
    Gv[Nel,0] = -1.0;
    Nel=Nel+1;
    d[k,0] = Agrav[k,0]-Dgrav[k,0];
    debg = 0;
    if( debg ):
    	print("%d   %d %d   %f" % (k, iA[k,0], jD[k,0], d[k,0]) );

# make sparse matrix
gdaGsparse=sparse.coo_matrix((Gv.ravel(),(Gr.ravel(),Gc.ravel())),shape=(N,M));
del Gr; # clear index arrays
del Gc;
Gmax=np.max(np.abs(Gv));
del Gv;

# solve via damped least squares
gdaepsilon = 1.0e-5*Gmax;
tol = 1e-10;     # tolerance
maxit = 3*N;     # maximum number of iterations allowed
# define linear operator needed for biconjugate gradient solver
LO=las.LinearOperator(shape=(M,M),matvec=gda_dlsmul,rmatvec=gda_dlsmul);
GTd = gda_dlsrhs(d);
q=las.bicg(LO,GTd,rtol=tol,maxiter=maxit);
mest = gda_cvec(q[0].astype(float));

# recreate the dataset of all the data beneath the tracks,
# but correct the shifts
slonv = np.zeros((M*Ntlon,1));
slatv = np.zeros((M*Ntlon,1));
sgrav = np.zeros((M*Ntlon,1));
slon = tlon;
Nsamples=0;
for i in range(NA):  # ascending tracks
    slat = Atracklat[0:Ntlon,i:i+1];
    ii = (np.floor((slat-latmin)/Dlat)).astype(int);
    jj = (np.floor((slon-lonmin)/Dlon)).astype(int);
    for kk in range(Ntlon):
        if((ii[kk,0]>=0)&(ii[kk,0]<Nlat)&(jj[kk,0]>=0)&(jj[kk,0]<Nlon)):
            slonv[Nsamples,0] = slon[kk,0];
            slatv[Nsamples,0] = slat[kk,0];
            sgrav[Nsamples,0] = GRAV[ii[kk,0],jj[kk,0]]+mtrue[i,0]-mest[i,0];
            Nsamples=Nsamples+1;

for j in range(ND): # decending tracks
    slat = Dtracklat[0:Ntlon,j:j+1];
    ii = (np.floor((slat-latmin)/Dlat)).astype(int);
    jj = (np.floor((slon-lonmin)/Dlon)).astype(int);
    for kk in range(Ntlon):
        if((ii[kk,0]>=0)&(ii[kk,0]<Nlat)&(jj[kk,0]>=0)&(jj[kk,0]<Nlon)):
            slonv[Nsamples,0] = slon[kk,0];
            slatv[Nsamples,0] = slat[kk,0];
            sgrav[Nsamples,0] = GRAV[ii[kk,0],jj[kk,0]]+mtrue[NA+j,0]-mest[NA+j,0];
            Nsamples=Nsamples+1;

# truncate
slonv=slonv[0:Nsamples,0:1];
slatv=slatv[0:Nsamples,0:1];
sgrav=sgrav[0:Nsamples,0:1];

# interpolate data along tracks to a 2D image
# First: remove duplicates points
isorted = np.lexsort( (slonv.ravel(), slatv.ravel()) );
A = np.concatenate( (slonv, slatv, sgrav), axis=1 );
B = A[isorted,0:4];
Nsort,i=np.shape(B);
flag = np.ones((Nsort,1),dtype=int);
for i in range(1,Nsort):
    if( (B[i,0]==B[i-1,0]) & (B[i,1]==B[i-1,1]) ):
        flag[i,0]=0;

q = np.where(flag==1);
kk = q[0];
CC = B[kk,0:3];
NC,i = np.shape(CC);
obsgrav = griddata( (CC[0:NC,0], CC[0:NC,1]), CC[0:NC,2], (lon, lat),
                   method='cubic', fill_value=meangrav );
OBSGRAV = np.flipud(np.reshape(obsgrav,(Nlon,Nlat)));

# plot observed gravity
ax1=plt.subplot(2,2,4);
mycmap=matplotlib.colormaps['jet'];
plt.axis( [lonmin, lonmax, latmin, latmax] );
left=lonmin;
right=lonmax;
bottom=latmin;
top=latmax;
plt.imshow( OBSGRAV, cmap=mycmap, vmin=mingrav, vmax=maxgrav, extent=(left,right,bottom,top) );
plt.xlabel("longitude");
plt.ylabel("latitude");
plt.title('pre gravity');

plt.show();
print("Caption: Maps of (top left) True gravity, (top right) tracks");
print("(bottom left) Observed gravity, (bottom right) predicted gravity.");

# plot observed gravity
fig2 = plt.figure(2,figsize=(12,8));
ax1=plt.subplot(2,2,4);
mycmap=matplotlib.colormaps['jet'];
plt.axis( [lonmin, lonmax, latmin, latmax] );
left=lonmin;
right=lonmax;
bottom=latmin;
top=latmax;
plt.imshow( OBSGRAV, cmap=mycmap, vmin=mingrav, vmax=maxgrav, extent=(left,right,bottom,top) );
plt.xlabel("longitude");
plt.ylabel("latitude");
plt.show();
print("Caption: Predicted gravity.");
gdapy15_03
Dataset has 120 lats and 120 lons
4787 samples to fill out Nlat by Nlon = 14400 plane
No description has been provided for this image
Caption: Maps of (top left) True gravity, (top right) tracks
(bottom left) Observed gravity, (bottom right) predicted gravity.
No description has been provided for this image
Caption: Predicted gravity.
In [53]:
# gdapy15_04
 
# a simple tomography example with a 2D model m(x,y)
# the imaged region is a rectangle, the rays are straight lines
# sources and receivers are colocated on all four edges of the rectangle
 
print("gdapy15_04");
 
# shortest ray allowed
Rmin=5;
 
# true model m(x,y) is read from a file
Sbig=plt.imread("../Data/magma.tif");
Nybig, Nxbig = np.shape(Sbig);
Sbig = -(Sbig-128.0*np.ones((Nybig, Nxbig)));
                
# decimate the model
idec=4;
Nx=floor(Nxbig/idec);
Ny=floor(Nybig/idec);
Nx2 = int(Nx/2);
Ny2 = int(Ny/2);
S=Sbig[0:Nybig:idec, 0:Nxbig:idec];
 
# independent variable x
xmin = 0.0;
xmax = float(Nx);
dx=(xmax-xmin)/(Nx-1);
x = gda_cvec(xmin+dx*np.linspace(0,Nx-1,Nx));
Dx = xmax-xmin;
 
# independent variable y
ymin = 0.0;
ymax = Ny;
dy=(ymax-ymin)/(Ny-1);
y = gda_cvec(ymin+dy*np.linspace(0,Ny-1,Ny));
Dy = ymax-ymin;
 
# rearrage model into vector
# order of model parameters in vector: index = (ix-1)*Ny + iy
mtrue = np.zeros((Nx*Ny,1));
rowindex = np.zeros((Nx*Ny,1),dtype=int);
colindex = np.zeros((Nx*Ny,1),dtype=int);
for ix in range(Nx):
    for iy in range(Ny):
        q = ix*Ny + iy;
        mtrue[q,0]=S[ix,iy];
        rowindex[q,0]=ix;
        colindex[q,0]=iy;

# plot true model
fig1 = plt.figure(1,figsize=(12,8));
ax1=plt.subplot(1,1,1);
mycmap=matplotlib.colormaps['jet'];
plt.axis( [xmin, xmax, ymin, ymax] );
left=xmin;
right=xmax;
bottom=xmax;
top=xmin;
plt.imshow( S, cmap=mycmap, vmin=-128, vmax=128, extent=(left,right,bottom,top) );
plt.colorbar();
plt.gca().invert_yaxis();
plt.xlabel("y");
plt.ylabel("x");
plt.show();
print("Caption: True model.");
print(" ");

# define sources and receivers on the edges of the box
Nside = 50;
Ns = 4*Nside;
xs = np.zeros((Ns, 2));
# side 1
xs[0:Nside,0:1] = (xmin+dx/10.0)*np.ones((Nside,1));
xs[0:Nside,1:2] = gda_cvec(ymin+Dy*np.linspace(0,Nside-1,Nside)/(Nside-1));
# side  2
xs[Nside:2*Nside,0:1] = (xmax-dx/10)*np.ones((Nside,1));
xs[Nside:2*Nside,1:2] = gda_cvec(ymin+Dy*np.linspace(0,Nside-1,Nside)/(Nside-1));
# side 3
xs[2*Nside:3*Nside,0:1] = gda_cvec(xmin+Dx*np.linspace(0,Nside-1,Nside)/(Nside-1));
xs[2*Nside:3*Nside,1:2] = (ymin+dy/10.0)*np.ones((Nside,1));
# side 4
xs[3*Nside:4*Nside,0:1] = gda_cvec(xmin+Dx*np.linspace(0,Nside-1,Nside)/(Nside-1));
xs[3*Nside:4*Nside,1:2] = (ymax-dy/10.0)*np.ones((Nside,1));
 
# source receiver pairs
N = int(Ns*Ns/2);  # bigger than the actual number of rays
ray = np.zeros((N,4));
k=0; # ray counter
for i in range(Ns-1):
    for j in range(i+1,Ns):
        R=sqrt( (xs[i,0]-xs[j,0])**2+(xs[i,1]-xs[j,1])**2 );
        if (R<Rmin): # exclude really short rays
            continue;
        ray[k,0] = xs[i,0];
        ray[k,1] = xs[i,1];
        ray[k,2] = xs[j,0];
        ray[k,3] = xs[j,1];
        k = k+1;
N=k;
print("%d rays" % (N));
ray = np.copy(ray[0:N,0:4]);
 
fig2 = plt.figure(2,figsize=(12,8));
ax1=plt.subplot(1,1,1);
mycmap=matplotlib.colormaps['jet'];
plt.axis( [xmin, xmax, ymin, ymax] );
left=xmin;
right=xmax;
bottom=xmax;
top=xmin;
plt.imshow( S, cmap=mycmap, vmin=-128, vmax=128, extent=(left,right,bottom,top) );
plt.colorbar();
plt.gca().invert_yaxis();
plt.xlabel("y");
plt.ylabel("x");
for k in range(0,Ns):
    plt.plot( xs[k,0], xs[k,1], "k^", lw=1 );
for k in range(N):
    if( np.random.randint(low=0,high=100)==0):
        plt.plot( gda_cvec(ray[k,0], ray[k,2]), gda_cvec(ray[k,1], ray[k,3]), "k-", lw=1 );
plt.show();
print("Caption: Stations (triangles) and every 100th ray (lines).");
print("  ");

# build data kernel
# order of model parameters in vector: index = (ix-1)*Ny + iy
M = Nx*Ny;
# use row-column-value method, instead of gdaGsparse=spalloc(N,M,2*N*Nx);
NELEMENTS = 2*N*Nx;
Gr = np.zeros((NELEMENTS,1),dtype=int);
Gc = np.zeros((NELEMENTS,1),dtype=int);
Gv = np.zeros((NELEMENTS,1));
Nel=0;
Nr = 2000;
binmin=0;
binmax=Nx*Ny;
rowsum=np.zeros((N,1)); # row sums of the data kernel, needed for backprojection
for k in range(N):
    # also use build non-sparse row Gk first method
    Gk = np.zeros((M,1));
    x1 = ray[k,0];
    y1 = ray[k,1];
    x2 = ray[k,2];
    y2 = ray[k,3];
    r = sqrt( (x2-x1)**2 + (y2-y1)**2 );
    dr = r/Nr;
    xv = x1 + (x2-x1)*np.linspace(0,Nr-1,Nr)/Nr;
    yv = y1 + (y2-y1)*np.linspace(0,Nr-1,Nr)/Nr;
    ixv = np.floor( Nx*(xv-xmin)/Dx ).astype(int);
    iyv = np.floor( Ny*(yv-ymin)/Dy ).astype(int);
    qv = ixv*Ny + iyv;
    # now count the ray segments in each pixel of the
    # image, and use the count to increment the appropriate
    # element of G.  The use of the histogram() method to do
    # the counting is a bit weird, but it seems to work
    h, e = np.histogram(qv, bins=M, range=(binmin,binmax));
    h = gda_cvec(h);
    kk = np.where(h != 0 );
    icount = kk[0];
    Gk[icount,0:1]=Gk[icount,0:1]+h[icount,0:1]*dr;
    rowsum[k,0]=np.sum(Gk);
    for i in range(M):
        if( Gk[i,0] != 0.0 ):
            Gr[Nel,0]=k;
            Gc[Nel,0]=i;
            Gv[Nel,0]=Gk[i,0];
            Nel=Nel+1;

# delete unused elements
Gr=Gr[0:Nel,0:1];
Gc=Gc[0:Nel,0:1];
Gv=Gv[0:Nel,0:1];
Gmax = np.max(np.abs(Gv));

print("Data kernel has %d non-zero elements" % (Nel))

# build sparse matrix
gdaGsparse=sparse.coo_matrix((Gv.ravel(),(Gr.ravel(),Gc.ravel())),shape=(N,M));
del Gr; # clear index arrays
del Gc;
del Gv;

# compute true travel times
d = gdaGsparse*mtrue;  # * OK sing G is sparse

# define linear operator needed for biconjugate gradient solver
gdaepsilon = 1.0e-2*Gmax;
LO=las.LinearOperator(shape=(M,M),matvec=gda_dlsmul,rmatvec=gda_dlsmul);
# solve using conjugate gradient algorithm
mest = np.zeros((M,1));
tol = 1e-12;     # tolerance
maxit = 3*(N+M); # maximum number of iterations allowed

# solve least squares problem Fm=f by biconjugate gradients
GTd=gda_dlsrhs(d);
q=las.bicg(LO,GTd,rtol=tol,maxiter=maxit);
mest = gda_cvec(q[0].astype(float));

# rearrange m back into image
Sest = np.zeros((Nx,Ny));
for k in range(M):
    ix=rowindex[k,0];
    iy=colindex[k,0];
    Sest[ix,iy]=mest[k,0];

# plot least squares model
fig4 = plt.figure(4,figsize=(12,8));
ax1=plt.subplot(1,1,1);
mycmap=matplotlib.colormaps['jet'];
plt.axis( [xmin, xmax, ymin, ymax] );
left=xmin;
right=xmax;
bottom=xmax;
top=xmin;
plt.imshow( Sest, cmap=mycmap, vmin=-128, vmax=128, extent=(left,right,bottom,top) );
plt.colorbar();
plt.gca().invert_yaxis();
plt.xlabel("y");
plt.ylabel("x");
plt.show();
print("Caption: Damped least squares estimate of model.");

# plot true travel times on a 2D diagram
# arranged by the rays's closest approach to the origin
# and its orientation
 
# want to plot traveltimes on NR by Ntheta grid
# independent variable R
NR = 100;
Rmin = 0.0;
Rmax = 2.0*sqrt((Dx/2)**2+(Dy/2)**2);
dR = (Rmax-Rmin)/(NR-1);
 
# independent variable theta
Ntheta = 100;
thetamin = -180.0;
thetamax = 180.0;
dtheta = (thetamax-thetamin)/(Ntheta-1);
 
# build traveltime grid for plotting purposes
TT = np.zeros((NR,Ntheta));
s = np.zeros((N,1));
R = np.zeros((N,1));
theta = np.zeros((N,1));
print("Ray diagram N: %d NR %d Ntheta %d", (N, NR, Ntheta));
for k in range(N):
    x1 = ray[k,0];
    y1 = ray[k,1];
    x2 = ray[k,2];
    y2 = ray[k,3];
    # compute closest approach to origin and angle
    xd = (x2-x1);
    yd = (y2-y1);
    s[k,0] = -(x1*xd+y1*yd)/(xd**2+yd**2);
    xs = x1+xd*s[k,0];
    ys = y1+yd*s[k,0];
    R[k,0] = sqrt( xs**2 + ys**2 );
    theta[k,0] = (180.0/pi)*atan2(ys,xs);
    iR = int((R[k,0]-Rmin)/dR);
    if( iR<0 ):
        iR=0;
    elif ( iR>=NR ):
        iR=NR-1;
    itheta = int((theta[k,0]-thetamin)/dtheta);
    if( itheta<0 ):
        itheta=0;
    elif ( itheta>=Ntheta ):
        itheta=Ntheta-1;
    TT[iR,itheta]=d[k,0];
    # out in data for reverse orientation of ray, for symmetery
    theta2 = (180.0/pi)*atan2(-ys,-xs);
    itheta = int((theta2-thetamin)/dtheta);
    if( itheta<0 ):
        itheta=0;
    elif ( itheta>=Ntheta ):
        itheta=Ntheta-1;
    TT[iR,itheta]=d[k,0];

# plot TT(R,theta)
maxTT = np.max(TT);
fig3 = plt.figure(3,figsize=(12,8));
ax1=plt.subplot(1,1,1);
mycmap=matplotlib.colormaps['jet'];
plt.axis( [thetamin, thetamax, Rmin, Rmax] );
left=thetamin;
right=thetamax;
bottom=Rmin;
top=Rmax;
plt.imshow( np.flipud(TT), cmap=mycmap, vmin=0, vmax=maxTT, extent=(left,right,bottom,top) );
plt.colorbar(shrink=0.5);
plt.gca().invert_yaxis();
plt.xlabel("theta");
plt.ylabel("R");
plt.show();
print("Caption: Traveltimes.");
gdapy15_04
No description has been provided for this image
Caption: True model.
 
19264 rays
No description has been provided for this image
Caption: Stations (triangles) and every 100th ray (lines).
  
Data kernel has 1171344 non-zero elements
No description has been provided for this image
Caption: Damped least squares estimate of model.
Ray diagram N: %d NR %d Ntheta %d (19264, 100, 100)
No description has been provided for this image
Caption: Traveltimes.
In [54]:
# gdapy15_05
 
# a simple tomography example with a 2D model m(x,y)
# the imaged region is a rectangle, the rays are straight lines
# sources and receivers are colocated on three of the four
# edges of the rectangle, a situation which is perhaps analagous
# to seismic tomography problems
 
print("gdapy15_05");
 
# shortest ray allowed
Rmin=5;
 
# true model m(x,y) is read from a file
Sbig=plt.imread("../Data/magma.tif");
Nybig, Nxbig = np.shape(Sbig);
Sbig = -(Sbig-128.0*np.ones((Nybig, Nxbig)));
                
# decimate the model
idec=4;
Nx=floor(Nxbig/idec);
Ny=floor(Nybig/idec);
Nx2 = int(Nx/2);
Ny2 = int(Ny/2);
S=Sbig[0:Nybig:idec, 0:Nxbig:idec];
 
# independent variable x
xmin = 0.0;
xmax = float(Nx);
dx=(xmax-xmin)/(Nx-1);
x = gda_cvec(xmin+dx*np.linspace(0,Nx-1,Nx));
Dx = xmax-xmin;
 
# independent variable y
ymin = 0.0;
ymax = Ny;
dy=(ymax-ymin)/(Ny-1);
y = gda_cvec(ymin+dy*np.linspace(0,Ny-1,Ny));
Dy = ymax-ymin;
 
# rearrage model into vector
# order of model parameters in vector: index = (ix-1)*Ny + iy
mtrue = np.zeros((Nx*Ny,1));
rowindex = np.zeros((Nx*Ny,1),dtype=int);
colindex = np.zeros((Nx*Ny,1),dtype=int);
for ix in range(Nx):
    for iy in range(Ny):
        q = ix*Ny + iy;
        mtrue[q,0]=S[ix,iy];
        rowindex[q,0]=ix;
        colindex[q,0]=iy;

# plot true model
fig1 = plt.figure(1,figsize=(12,8));
ax1=plt.subplot(1,1,1);
mycmap=matplotlib.colormaps['jet'];
plt.axis( [xmin, xmax, ymin, ymax] );
left=xmin;
right=xmax;
bottom=xmax;
top=xmin;
plt.imshow( S, cmap=mycmap, vmin=-128, vmax=128, extent=(left,right,bottom,top) );
plt.colorbar();
plt.gca().invert_yaxis();
plt.xlabel("y");
plt.ylabel("x");
plt.show();
print("Caption: True model.");
print(" ");

# define sources and receivers on the edges of the box
Nside = 50;
Ns = 3*Nside;
xs = np.zeros((Ns, 2));
# side 1
xs[0:Nside,0:1] = (xmin+dx/10.0)*np.ones((Nside,1));
xs[0:Nside,1:2] = gda_cvec(ymin+Dy*np.linspace(0,Nside-1,Nside)/(Nside-1));
# side  2
xs[Nside:2*Nside,0:1] = (xmax-dx/10)*np.ones((Nside,1));
xs[Nside:2*Nside,1:2] = gda_cvec(ymin+Dy*np.linspace(0,Nside-1,Nside)/(Nside-1));
# side 3
xs[2*Nside:3*Nside,0:1] = gda_cvec(xmin+Dx*np.linspace(0,Nside-1,Nside)/(Nside-1));
xs[2*Nside:3*Nside,1:2] = (ymin+dy/10.0)*np.ones((Nside,1));
 
# source receiver pairs
N = int(Ns*Ns/2);  # bigger than the actual number of rays
ray = np.zeros((N,4));
k=0; # ray counter
for i in range(Ns-1):
    for j in range(i+1,Ns):
        R=sqrt( (xs[i,0]-xs[j,0])**2+(xs[i,1]-xs[j,1])**2 );
        if (R<Rmin): # exclude really short rays
            continue;
        ray[k,0] = xs[i,0];
        ray[k,1] = xs[i,1];
        ray[k,2] = xs[j,0];
        ray[k,3] = xs[j,1];
        k = k+1;
N=k;
print("%d rays" % (N));
ray = np.copy(ray[0:N,0:4]);
 
fig2 = plt.figure(2,figsize=(12,8));
ax1=plt.subplot(1,1,1);
mycmap=matplotlib.colormaps['jet'];
plt.axis( [xmin, xmax, ymin, ymax] );
left=xmin;
right=xmax;
bottom=xmax;
top=xmin;
plt.imshow( S, cmap=mycmap, vmin=-128, vmax=128, extent=(left,right,bottom,top) );
plt.colorbar();
plt.gca().invert_yaxis();
plt.xlabel("y");
plt.ylabel("x");
for k in range(0,Ns):
    plt.plot( xs[k,0], xs[k,1], "k^", lw=1 );
for k in range(N):
    if( np.random.randint(low=0,high=100)==0):
        plt.plot( gda_cvec(ray[k,0], ray[k,2]), gda_cvec(ray[k,1], ray[k,3]), "k-", lw=1 );
plt.show();
print("Caption: Stations (triangles) and every 100th ray (lines).");
print("  ");

# build data kernel
# order of model parameters in vector: index = (ix-1)*Ny + iy
M = Nx*Ny;
# use row-column-value method, instead of gdaGsparse=spalloc(N,M,2*N*Nx);
NELEMENTS = 2*N*Nx;
Gr = np.zeros((NELEMENTS,1),dtype=int);
Gc = np.zeros((NELEMENTS,1),dtype=int);
Gv = np.zeros((NELEMENTS,1));
Nel=0;
Nr = 2000;
binmin=0;
binmax=Nx*Ny;
rowsum=np.zeros((N,1)); # row sums of the data kernel, needed for backprojection
for k in range(N):
    # also use build non-sparse row Gk first method
    Gk = np.zeros((M,1));
    x1 = ray[k,0];
    y1 = ray[k,1];
    x2 = ray[k,2];
    y2 = ray[k,3];
    r = sqrt( (x2-x1)**2 + (y2-y1)**2 );
    dr = r/Nr;
    xv = x1 + (x2-x1)*np.linspace(0,Nr-1,Nr)/Nr;
    yv = y1 + (y2-y1)*np.linspace(0,Nr-1,Nr)/Nr;
    ixv = np.floor( Nx*(xv-xmin)/Dx ).astype(int);
    iyv = np.floor( Ny*(yv-ymin)/Dy ).astype(int);
    qv = ixv*Ny + iyv;
    # now count the ray segments in each pixel of the
    # image, and use the count to increment the appropriate
    # element of G.  The use of the histogram() method to do
    # the counting is a bit weird, but it seems to work
    h, e = np.histogram(qv, bins=M, range=(binmin,binmax));
    h = gda_cvec(h);
    kk = np.where(h != 0 );
    icount = kk[0];
    Gk[icount,0:1]=Gk[icount,0:1]+h[icount,0:1]*dr;
    rowsum[k,0]=np.sum(Gk);
    for i in range(M):
        if( Gk[i,0] != 0.0 ):
            Gr[Nel,0]=k;
            Gc[Nel,0]=i;
            Gv[Nel,0]=Gk[i,0];
            Nel=Nel+1;

# delete unused elements
Gr=Gr[0:Nel,0:1];
Gc=Gc[0:Nel,0:1];
Gv=Gv[0:Nel,0:1];
Gmax = np.max(np.abs(Gv));
print("Data kernel has %d non-zero elements" % (Nel))

# build sparse matrix
gdaGsparse=sparse.coo_matrix((Gv.ravel(),(Gr.ravel(),Gc.ravel())),shape=(N,M));
del Gr; # clear index arrays
del Gc;
del Gv;

# compute true travel times
d = gdaGsparse*mtrue;  # * OK sing G is sparse

# define linear operator needed for biconjugate gradient solver
gdaepsilon = 0.1*Gmax;
LO=las.LinearOperator(shape=(M,M),matvec=gda_dlsmul,rmatvec=gda_dlsmul);
# solve using conjugate gradient algorithm
mest = np.zeros((M,1));
tol = 1e-5;     # tolerance
maxit = 3*(N+M); # maximum number of iterations allowed

# solve least squares problem Fm=f by biconjugate gradients
GTd=gda_dlsrhs(d);
q=las.bicg(LO,GTd,rtol=tol,maxiter=maxit);
mest = gda_cvec(q[0].astype(float));

# rearrange m back into image
Sest = np.zeros((Nx,Ny));
for k in range(M):
    ix=rowindex[k,0];
    iy=colindex[k,0];
    Sest[ix,iy]=mest[k,0];

# plot least squares model
fig4 = plt.figure(4,figsize=(12,8));
ax1=plt.subplot(1,1,1);
mycmap=matplotlib.colormaps['jet'];
plt.axis( [xmin, xmax, ymin, ymax] );
left=xmin;
right=xmax;
bottom=xmax;
top=xmin;
plt.imshow( Sest, cmap=mycmap, vmin=-128, vmax=128, extent=(left,right,bottom,top) );
plt.colorbar();
plt.gca().invert_yaxis();
plt.xlabel("y");
plt.ylabel("x");
plt.show();
print("Caption: Damped least squares estimate of model.");

# plot true travel times on a 2D diagram
# arranged by the rays's closest approach to the origin
# and its orientation
 
# want to plot traveltimes on NR by Ntheta grid
# independent variable R
NR = 100;
Rmin = 0.0;
Rmax = 2.0*sqrt((Dx/2)**2+(Dy/2)**2);
dR = (Rmax-Rmin)/(NR-1);
 
# independent variable theta
Ntheta = 100;
thetamin = -180.0;
thetamax = 180.0;
dtheta = (thetamax-thetamin)/(Ntheta-1);
 
# build traveltime grid for plotting purposes
TT = np.zeros((NR,Ntheta));
s = np.zeros((N,1));
R = np.zeros((N,1));
theta = np.zeros((N,1));
for k in range(N):
    x1 = ray[k,0];
    y1 = ray[k,1];
    x2 = ray[k,2];
    y2 = ray[k,3];
    # compute closest approach to origin and angle
    xd = (x2-x1);
    yd = (y2-y1);
    s[k,0] = -(x1*xd+y1*yd)/(xd**2+yd**2);
    xs = x1+xd*s[k,0];
    ys = y1+yd*s[k,0];
    R[k,0] = sqrt( xs**2 + ys**2 );
    theta[k,0] = (180.0/pi)*atan2(ys,xs);
    iR = int((R[k,0]-Rmin)/dR);
    if( iR<0 ):
        iR=0;
    elif ( iR>=NR ):
        iR=NR-1;
    itheta = int((theta[k,0]-thetamin)/dtheta);
    if( itheta<0 ):
        itheta=0;
    elif ( itheta>=Ntheta ):
        itheta=Ntheta-1;
    TT[iR,itheta]=d[k,0];
    # out in data for reverse orientation of ray, for symmetery
    theta2 = (180.0/pi)*atan2(-ys,-xs);
    itheta = int((theta2-thetamin)/dtheta);
    if( itheta<0 ):
        itheta=0;
    elif ( itheta>=Ntheta ):
        itheta=Ntheta-1;
    TT[iR,itheta]=d[k,0];

# plot TT(R,theta)
maxTT = np.max(TT);
fig3 = plt.figure(3,figsize=(12,8));
ax1=plt.subplot(1,1,1);
mycmap=matplotlib.colormaps['jet'];
plt.axis( [thetamin, thetamax, Rmin, Rmax] );
left=thetamin;
right=thetamax;
bottom=Rmin;
top=Rmax;
plt.imshow( np.flipud(TT), cmap=mycmap, vmin=0, vmax=maxTT, extent=(left,right,bottom,top) );
plt.colorbar(shrink=0.5);
plt.gca().invert_yaxis();
plt.xlabel("theta");
plt.ylabel("R");
plt.show();
print("Caption: Traveltimes.");
gdapy15_05
No description has been provided for this image
Caption: True model.
 
10713 rays
No description has been provided for this image
Caption: Stations (triangles) and every 100th ray (lines).
  
Data kernel has 613578 non-zero elements
No description has been provided for this image
Caption: Damped least squares estimate of model.
No description has been provided for this image
Caption: Traveltimes.
In [55]:
# gdapy15_06
 
# temperature inversion problem

# The data are a temperature profile made at
# a time tobs>=0.  The model parameters are the
# initial temperature profile (at time t0=0).
# The problem is solved many times, for
# different tobs's, so that one can examine
# how the ability to reconstruct the initial
# temperature degrades as the data are measured
# later and later in time.
 
# Thus, time in the plot is the time of observation,
# tobs. In all cases, the quantity being determined is
# the inital temperature distributon at the inital time,
# t0=0.
 
print("gdapy15_06");

# independent spartial variable x
Nx = 100;
xmin = -100.0;
xmax = 100.0;
Dx = (xmax-xmin)/(Nx-1);
x = gda_cvec(xmin + Dx*np.linspace(0,Nx-1,Nx));
 
# independent temporal variable t
Nt = 100;
tmin = 0.0;
tmax = 200.0;
Dt = (tmax-tmin)/(Nt-1);
t = gda_cvec(tmin + Dt*np.linspace(0,Nt-1,Nt));
 
# time/space matrices
tt = np.matmul(t,np.ones((1,Nx)));
xx = np.matmul(np.ones((Nt,1)),x.T);
 
# model parameters, one at each distance
s = 1.0;
mtrue = np.zeros((Nx,1));
L=5.0;
i1 = int(4.0*Nx/10.0);
i2 = int(6.0*Nx/10.0);
w=gda_cvec(np.linspace(i1,i2,i2-i1));
mtrue[i1:i2,0:1]=(2.0+np.cos(2.0*pi*(w-Nx/2.0)/L));
 
# the true data as a function of observation time and distance
# are constructed by evaluating an analytic solution
# to a heat flow equation
dtxtrue = np.zeros((Nt,Nx));
for i in range(Nx):
    # data kernel, Gij, gives temperature at xi due to source
    # at xj for a time, t0
    t0=t[i,0];
    if( t0==0.0 ):
        G=np.identity(Nx);
    else:
        X0 = np.matmul(x,np.ones((1,Nx)));
        XN = np.matmul(np.ones((Nx,1)),(x-Dx/2.0).T);
        XP = np.matmul(np.ones((Nx,1)),(x+Dx/2.0).T);
        erfA = sp.erf((X0-XN)/sqrt(t0));
        erfB = sp.erf((X0-XP)/sqrt(t0));
        G = 0.5*(erfA-erfB);
    dtxtrue[i:i+1,0:Nx] = np.matmul(G,mtrue).T;

# plot the true data
fig1 = plt.figure(1,figsize=(12,8));
ax1=plt.subplot(1,1,1);
mycmap=matplotlib.colormaps['jet'];
plt.axis( [xmin, xmax, tmin, tmax] );
left=xmin;
right=xmax;
bottom=tmin;
top=tmax;
plt.imshow( np.flipud(dtxtrue), cmap=mycmap, vmin=0, vmax=np.max(dtxtrue), extent=(left,right,bottom,top) );
plt.gca().invert_yaxis();
plt.ylabel("observation time");
plt.xlabel("distance");
plt.title("true temperature");
plt.colorbar();
plt.show();
print("Caption: True temperature as as function of time.");

# true model, replicated at all times
mtxtrue = np.matmul(np.ones((Nt,1)),mtrue.T);
 
# plot the true true model, replicated at all times
# plot the true data
fig2 = plt.figure(2,figsize=(12,8));
ax1=plt.subplot(1,1,1);
mycmap=matplotlib.colormaps['jet'];
plt.axis( [xmin, xmax, tmin, tmax] );
left=xmin;
right=xmax;
bottom=tmin;
top=tmax;
plt.imshow( np.flipud(mtxtrue), cmap=mycmap, vmin=0, vmax=np.max(dtxtrue), extent=(left,right,bottom,top) );
plt.gca().invert_yaxis();
plt.ylabel("observation time");
plt.xlabel("distance");
plt.title("true temperature");
plt.colorbar();
plt.show();
print("Caption: True temperature as as function of time.");

# times at which to calculate resolution kernels
R1at = 10;
R2at = 40;
 
# damped minimum length case
# estimate the model using the data each time
mtxest = np.zeros((Nt,Nx));
for i in range(Nt):
    # true data
    dtrue=(dtxtrue[i:i+1,0:Nx]).T;
    
    # add noise
    sd = 0.01;
    dobs = dtrue + np.random.normal(loc=0.0,scale=sd,size=(Nx,1));
    
    # compute data kernel
    t0=t[i,0];
    if( t0==0.0 ):
        G=np.identity(Nx);
    else:
        X0 = np.matmul(x,np.ones((1,Nx)));
        XN = np.matmul(np.ones((Nx,1)),(x-Dx/2.0).T);
        XP = np.matmul(np.ones((Nx,1)),(x+Dx/2.0).T);
        erfA = sp.erf((X0-XN)/sqrt(t0));
        erfB = sp.erf((X0-XP)/sqrt(t0));
        G = 0.5*(erfA-erfB);
    
    # damped minimum length solution
    epsilon=1.0e-3;
    GGT = np.matmul(G,G.T)+epsilon*np.identity(Nx);
    GMG = np.matmul( G.T, la.inv(GGT) );
    mtxest[i:i+1,0:Nx] = (np.matmul(GMG,dobs)).T;
    
    # save resolving kernels
    if( i==R1at ):
        R1ML = GMG*G;
        print("Computed R1ML");
    elif (i==R2at):
        R2ML = GMG*G;
        print("Computed R2ML");
        
fig3 = plt.figure(3,figsize=(12,8));
ax1=plt.subplot(1,1,1);
mycmap=matplotlib.colormaps['jet'];
plt.axis( [xmin, xmax, tmin, tmax] );
left=xmin;
right=xmax;
bottom=tmin;
top=tmax;
plt.imshow( np.flipud(mtxest), cmap=mycmap, vmin=0, vmax=np.max(dtxtrue), extent=(left,right,bottom,top) );
plt.gca().invert_yaxis();
plt.ylabel("observation time");
plt.xlabel("distance");
plt.title("ML mest");
plt.colorbar();
plt.show();
print("Caption: Minimum length solution.");

# Backus-Gilbert case
# estimate the model using the data each time
mtxest = np.zeros((Nt,Nx));
for i in range(Nt):
    # true data
    dtrue=dtxtrue[i:i+1,0:Nx].T;
    
    # add noise
    sd = 0.01;
    dobs = dtrue + np.random.normal(loc=0.0,scale=sd,size=(Nx,1));
    
    # compute data kernel
    t0=t[i,0];
    if( t0==0.0 ):
        G=np.identity(Nx);
    else:
        X0 = np.matmul(x,np.ones((1,Nx)));
        XN = np.matmul(np.ones((Nx,1)),(x-Dx/2.0).T);
        XP = np.matmul(np.ones((Nx,1)),(x+Dx/2.0).T);
        erfA = sp.erf((X0-XN)/sqrt(t0));
        erfB = sp.erf((X0-XP)/sqrt(t0));
        G = 0.5*(erfA-erfB);
    
    # construct Backus-Gilbert generalized inverse
    GMG = np.zeros((Nx,Nx));
    u = np.matmul(G,np.ones((Nx,1)));
    for k in range(Nx):
        w=np.power(np.linspace(0,Nx-1,Nx)-k,2);
        S = np.matmul(G,np.matmul(np.diag(w.ravel()),G));
        epsilon=1e-3;
        S = S+epsilon*np.identity(Nx);
        uSinv = np.matmul(u.T,la.inv(S));
        GMG[k:k+1,0:Nx] = uSinv / np.matmul(uSinv,u);
    
    # estimate solution
    mtxest[i:i+1,0:Nx] = (np.matmul(GMG,dobs)).T;
    
    # save resolving kernels
    if( i==R1at ):
        R1BG = GMG*G;
        print("Computed R1BG");
    elif (i==R2at):
        R2BG = GMG*G;
        print("Computed R2BG");
            
fig4 = plt.figure(4,figsize=(12,8));
ax1=plt.subplot(1,1,1);
mycmap=matplotlib.colormaps['jet'];
plt.axis( [xmin, xmax, tmin, tmax] );
left=xmin;
right=xmax;
bottom=tmin;
top=tmax;
plt.imshow( np.flipud(mtxest), cmap=mycmap, vmin=0, vmax=np.max(dtxtrue), extent=(left,right,bottom,top) );
plt.gca().invert_yaxis();
plt.ylabel("observation time");
plt.xlabel("distance");
plt.title("BG mest");
plt.colorbar();
plt.show();
print("Caption: Backus-Gilbert solution.");

# plot resolution kernels
fig5 = plt.figure(5,figsize=(12,12));

ax1=plt.subplot(2,2,1);
mycmap=matplotlib.colormaps['jet'];
plt.axis( [xmin, xmax, xmin, xmax] );
left=xmin;
right=xmax;
bottom=xmin;
top=xmax;
plt.imshow( np.flipud(R1ML), cmap=mycmap, vmin=np.min(R1ML), vmax=np.max(R1ML), extent=(left,right,bottom,top) );
plt.gca().invert_yaxis();
plt.ylabel("observation time");
plt.xlabel("distance");
plt.title("ML t=%.1f" % (t[R1at,0]) );
plt.colorbar();

ax1=plt.subplot(2,2,2);
mycmap=matplotlib.colormaps['jet'];
plt.axis( [xmin, xmax, xmin, xmax] );
left=xmin;
right=xmax;
bottom=xmin;
top=xmax;
plt.imshow( np.flipud(R2ML), cmap=mycmap, vmin=np.min(R2ML), vmax=np.max(R2ML), extent=(left,right,bottom,top) );
plt.gca().invert_yaxis();
plt.ylabel("observation time");
plt.xlabel("distance");
plt.title("ML t=%.1f" % (t[R2at,0]) );
plt.colorbar();

ax1=plt.subplot(2,2,3);
mycmap=matplotlib.colormaps['jet'];
plt.axis( [xmin, xmax, xmin, xmax] );
left=xmin;
right=xmax;
bottom=xmin;
top=xmax;
plt.imshow( np.flipud(R1BG), cmap=mycmap, vmin=np.min(R1BG), vmax=np.max(R1BG), extent=(left,right,bottom,top) );
plt.gca().invert_yaxis();
plt.ylabel("observation time");
plt.xlabel("distance");
plt.title("BG t=%.1f" % (t[R1at,0]) );
plt.colorbar();

ax1=plt.subplot(2,2,4);
mycmap=matplotlib.colormaps['jet'];
plt.axis( [xmin, xmax, xmin, xmax] );
left=xmin;
right=xmax;
bottom=xmin;
top=xmax;
plt.imshow( np.flipud(R2BG), cmap=mycmap, vmin=np.min(R2BG), vmax=np.max(R2BG), extent=(left,right,bottom,top) );
plt.gca().invert_yaxis();
plt.ylabel("observation time");
plt.xlabel("distance");
plt.title("BG t=%d" % (t[R2at,0]) );
plt.colorbar();

plt.show();
print("Caption Minimum Length (ML) and Backus-Gilbert (BG) resolution");
print("matrices using data collected at two different time");
gdapy15_06
No description has been provided for this image
Caption: True temperature as as function of time.
No description has been provided for this image
Caption: True temperature as as function of time.
Computed R1ML
Computed R2ML
No description has been provided for this image
Caption: Minimum length solution.
Computed R1BG
Computed R2BG
No description has been provided for this image
Caption: Backus-Gilbert solution.
No description has been provided for this image
Caption Minimum Length (ML) and Backus-Gilbert (BG) resolution
matrices using data collected at two different time
In [56]:
# gdapy15_07

# L1, L2 and Linf norm; fitting of a straight line
# L1 and Linf are via transformation to a linear
# programming problem; Ls is via least squares.

print("gdapy15_07");

# auxillary variable, z
N=10;
zmin = 0.0;
zmax = 1.0;
Dz = (zmax-zmin)/(N-1);
z = gda_cvec(zmin + Dz*np.linspace(0,N-1,N));
 
# set up for linear fit
M=2;
mtrue = gda_cvec( 1.0, 3.0 );
G = np.concatenate( (np.ones((N,1)), z), axis=1 );
dtrue = np.matmul(G,mtrue);
 
# syntehtic data, with noise drawn from a two-sided exponential
# distribution with variance sd**2.
sd = 0.4 * np.ones((N,1));
sdi = np.reciprocal(sd);
sdi2 = np.power(sdi,2);
r = np.random.exponential( scale=sd/sqrt(2), size=(N,1) );
s = 2.0*(np.random.randint(low=0,high=2,size=(N,1))-0.5);
dobs = dtrue +np.multiply(r,s);

# Part 1: L2 Norm
# least squares solution (sure the easiest!)
CI = np.diag(sdi2.ravel());
GTG = np.matmul(G.T,np.matmul(CI,G));
GTd = np.matmul(G.T,np.matmul(CI,dobs));
mls = la.solve(GTG,GTd);
dls = np.matmul(G,mls);
 
# Part 2: L1 norm
# set up for linear programming problem
# min f*x subject to A x <= b,  Aeq x = beq
 
# variables
# m = mp - mpp
# x = [mp', mpp', alpha', x', xp']
# with mp, mpp length M and alpha, x, xp, length N
L = 2*M+3*N;
x = np.zeros((L,1));
 
# f is length L
# minimize sum aplha(i)/sd(i)
f = np.zeros((L,1));
f[2*M:2*M+N]= np.reciprocal(sd);
 
# equality constraints
Aeq = np.zeros((2*N,L));
beq = np.zeros((2*N,1));
# first equation G(mp-mpp)+x-alpha=d
Aeq[0:N,0:M]             =  G;
Aeq[0:N,M:2*M]           = -G;
Aeq[0:N,2*M:2*M+N]       = -np.identity(N);
Aeq[0:N,2*M+N:2*M+2*N]   =  np.identity(N)
beq[0:N,0:1]             =  dobs;
# second equation G(mp-mpp)-xp+alpha=d
Aeq[N:2*N,0:M]               =  G;
Aeq[N:2*N,M:2*M]             = -G;
Aeq[N:2*N,2*M:2*M+N]         =  np.identity(N);
Aeq[N:2*N,2*M+2*N:2*M+3*N]   = -np.identity(N);
beq[N:2*N,0:1]               =  dobs;
 
# inequality constraints A x <= b
# part 1: everything positive
A = np.zeros((L+2*M,L));
b = np.zeros((L+2*M,1));
A[0:L,0:L] = -np.identity(L);
b[0:L,0:1] = np.zeros((L,1));
# part 2; mp and mpp have an upper bound.  Note: 
# Without this constraint, a potential problem is that
# mp and mpp are individually unbounded, though their
# difference, m=mp-mpp, is not.  Thus there might be cases
# where the algroithm strays to very large mp and mpp.
A[L:L+2*M,0:L] = np.eye(2*M,M=L);
mupperbound=10.0*np.max(np.abs(mls)); # might need to be adjusted for problem at hand
b[L:L+2*M,0:1] = mupperbound;

# solve linear programming problem
res = opt.linprog(c=f,A_ub=A,b_ub=b,A_eq=Aeq,b_eq=beq);
x = gda_cvec(res.x);
fmin = -res.fun;
status = res.success;
if( status ):
    print("L1 linear programming succeeded" );
else:
    print("L1 linear programming failed" );
mestL1 = x[0:M,0:1] - x[M:2*M,0:1];
dpreL1 = np.matmul(G,mestL1);

# Part 3: Linf Norm
# set up for linear programming problem
# min f*x subject to A x <= b,  Aeq x = beq
 
# variables
# m = mp - mpp
# x = [mp', mpp', alpha', x', xp']
# with mp, mpp length M; alpha length 1, x, xp, length N
L = 2*M+1+2*N;
x = np.zeros((L,1));
 
# f is length L
# minimize alpha
f = np.zeros((L,1));
f[2*M:2*M+1]=1.0;
 
# equality constraints
Aeq = np.zeros((2*N,L));
beq = np.zeros((2*N,1));
# first equation G(mp-mpp)+x-alpha=d
Aeq[0:N,0:M]             =  G;
Aeq[0:N,M:2*M]           = -G;
Aeq[0:N,2*M:2*M+1]       = -np.reciprocal(sd);
Aeq[0:N,2*M+1:2*M+1+N] = np.identity(N);
beq[0:N,0:1]             = dobs;
# second equation G(mp-mpp)-xp+alpha=d
Aeq[N:2*N,0:M]               =  G;
Aeq[N:2*N,M:2*M]             = -G;
Aeq[N:2*N,2*M:2*M+1]         =  np.reciprocal(sd);
Aeq[N:2*N,2*M+1+N:2*M+1+2*N] = -np.identity(N);
beq[N:2*N,0:1]               =  dobs;
 
# inequality constraints A x <= b
# part 1: everything positive
A = np.zeros((L+2*M,L));
b = np.zeros((L+2*M,1));
A[0:L,0:L] = -np.identity(L);
b[0:L,0:1] = np.zeros((L,1));
# part 2; mp and mpp have an upper bound.  Note: 
# A potential problem without this constraint is that
# mp and mpp are individually unbounded, though their
# difference, m=mp-mpp, is not.  Thus there might be cases
# where the algroithm strays to very large mp and mpp.
A[L:L+2*M,0:L] = np.eye(2*M,M=L);
mupperbound=10*np.max(np.abs(mls)); # might need to be adjusted for problem at hand
b[L:L+2*M,0:1] = mupperbound;
 
# solve linear programming problem
res = opt.linprog(c=f,A_ub=A,b_ub=b,A_eq=Aeq,b_eq=beq);
x = gda_cvec(res.x);
fmin = -res.fun;
status = res.success;
if( status ):
    print("Linf linear programming succeeded" );
else:
    print("Linf linear programming failed" );
mestLinf = x[0:M,0:1] - x[M:2*M,0:1];
dpreLinf = np.matmul(G,mestLinf);
 
# plot the observed and presicted data
fig1 = plt.figure(1,figsize=(12,5));
ax1=plt.subplot(1,1,1);
plt.axis( [zmin, zmax, 0.0, 5.0 ] );
plt.plot( z, dtrue, "k-", lw=3);
plt.plot( z, dpreL1, "g-", lw=3);
plt.plot( z, dls, "b-", lw=3);
plt.plot( z, dpreLinf, "r-", lw=3);
plt.plot( z, dobs, "ko", lw=3);
plt.xlabel("z");
plt.ylabel("d");
plt.show();
print("Caption: Linear fits to data. True data (black line),");
print("observed (crcles), predicted by L1 (green), predicted");
print("by L2 (blue), predicted by Linf (red).");
print(" ");
print("      m1        m1");
print("True: %.2f      %.2f" % (mtrue[0,0], mtrue[1,0]) );
print("L1:   %.2f      %.2f" % (mls[0,0], mls[1,0]) );
print("L2:   %.2f      %.2f" % (mestL1[0,0], mestL1[1,0]) );
print("Linf: %.2f      %.2f" % (mestLinf[0,0], mestLinf[1,0]) );
gdapy15_07
L1 linear programming succeeded
Linf linear programming succeeded
No description has been provided for this image
Caption: Linear fits to data. True data (black line),
observed (crcles), predicted by L1 (green), predicted
by L2 (blue), predicted by Linf (red).
 
      m1        m1
True: 1.00      3.00
L1:   0.84      3.46
L2:   0.97      3.03
Linf: 0.87      3.55
In [26]:
# gdapy15_08

# Depiction of a group of vectors in 3D space
# scattering about a central vector.
 
print("gdapy15_08");

xmin=0.0;
xmax=1.0;
ymin=0.0;
ymax=1.0;
zmin=0.0;
zmax=1.0;

# setup for 3D figure
fig1 = plt.figure(1,figsize=(12,5));   # smallish figure
ax = fig1.add_subplot(111, projection='3d');
plt.axis('off');
plt.grid(b=None);
ax.axes.set_xlim3d(left=xmin, right=xmax);
ax.axes.set_ylim3d(bottom=ymin, top=ymax);
ax.axes.set_zlim3d(bottom=zmin, top=zmax);
 
# 3D box
gdabox2(ax,xmin,xmax,ymin,ymax,zmin,zmax);

rbar=gda_cvec(1, 1, 1);       # the central unit vector
rbarsq = np.matmul(rbar.T,rbar); rbarsq=rbarsq[0,0];
rlen = sqrt(rbarsq);
rbar = rbar/rlen;

# Note: this is not a Fisher distribution, but it is azimuthually uniform
for i in range(20):
    rvec = np.random.uniform(low=-1.0, high=1., size=(3,1)); # random vector
    b = gda_cvec(np.cross(rvec.ravel(),rbar.ravel())); # vector perpendicular to rvec
    rnew = rbar + 0.15*b; # add small deviation to rvec
    rnewsq = np.matmul(rnew.T,rnew); rnewsq=rnewsq[0,0];
    rnew = rnew/sqrt(rnewsq); # normalize to unit length
    gdaarrow2( ax, rnew, "k-", 2 );
gdaarrow2( ax, rbar, "r-", 4 );
plt.show();
print("Caption: Vectors (black) scattering about a central vector (red).")
               
gdapy15_08
No description has been provided for this image
Caption: Vectors (black) scattering about a central vector (red).
In [29]:
# gdapy15_09
# depicts vector relationships in 3D space

# this Python version is much worse than my MATLAB(R)
# version, even after quite a lot of tinkering. Sorry
# about that - Bill Menke

def gdaarrowmod(ax, rend, rbar, mys,myl):
    # pretty crazy way to draw an arrowhead!
    rbarsq = np.matmul(rbar.T,rbar); rbarsq=rbarsq[0,0]
    tangent = rbar/sqrt(rbarsq);
    per1 = np.cross( tangent, gda_cvec(0.0, 0.0, 1.0), axis=0 );
    per1sq = np.matmul(per1.T,per1); per1sq=per1sq[0,0];
    per1 = per1/sqrt(per1sq);
    per2 = np.cross( tangent, per1, axis=0 );
    per2sq = np.matmul(per2.T,per2); per2sq=per2sq[0,0];
    per2 = per2/sqrt(per2sq);
    L = 0.05;
    px=gda_cvec(rend[0,0],rbar[0,0]);
    py=gda_cvec(rend[1,0],rbar[1,0]);
    pz=gda_cvec(rend[2,0],rbar[2,0]);
    ax.plot3D( px.ravel(), py.ravel(), pz.ravel(), mys, lw=myl, zorder=10.0 );
    v1 = rbar - L*tangent + 0.25*L*per1;
    v2 = rbar - L*tangent - 0.25*L*per1;
    v3 = rbar - L*tangent + 0.25*L*per2;
    v4 = rbar - L*tangent - 0.25*L*per2;
    x0=gda_cvec(rbar[0,0], v1[0,0]);
    y0=gda_cvec(rbar[1,0], v1[1,0]);
    z0=gda_cvec(rbar[2,0], v1[2,0]);
    ax.plot3D( x0.ravel(), y0.ravel(), z0.ravel(), mys, lw=myl, zorder=10.0 );
    x0=gda_cvec(rbar[0,0], v2[0,0]);
    y0=gda_cvec(rbar[1,0], v2[1,0]);
    z0=gda_cvec(rbar[2,0], v2[2,0]);
    ax.plot3D( x0.ravel(), y0.ravel(), z0.ravel(), mys, lw=myl, zorder=10.0 );
    x0=gda_cvec(rbar[0,0], v3[0,0]);
    y0=gda_cvec(rbar[1,0], v3[1,0]);
    z0=gda_cvec(rbar[2,0], v3[2,0]);
    ax.plot3D( x0.ravel(), y0.ravel(), z0.ravel(), mys, lw=myl, zorder=10.0 );
    x0=gda_cvec(rbar[0,0], v4[0,0]);
    y0=gda_cvec(rbar[1,0], v4[1,0]);
    z0=gda_cvec(rbar[2,0], v4[2,0]);
    ax.plot3D( x0.ravel(), y0.ravel(), z0.ravel(), mys, lw=myl, zorder=10.0 );
    return 1;

print("gdapy15_09");

from matplotlib.colors import LightSource

xmin=-1.0;
xmax=1.0;
ymin=-1.0;
ymax=1.0;
zmin=-1.0;
zmax=1.0;

# setup for 3D figure
fig1 = plt.figure(1,figsize=(6,6));   # smallish figure
ax = fig1.add_subplot(111, projection='3d');
plt.axis('off');
plt.grid(b=None);
ax.axes.set_xlim3d(left=xmin, right=xmax);
ax.axes.set_ylim3d(bottom=ymin, top=ymax);
ax.axes.set_zlim3d(bottom=zmin, top=zmax);

# center of diagram
rbar = gda_cvec(0.0, 0.0, 0.0);

# draw sphere using wireframe technique
Nu = 210;
u0 = 2.0*pi*np.linspace(0,Nu-1,Nu)/(Nu-1);
Nv = 101;
v0 = 2.0*pi*np.linspace(0,Nv-1,Nv)/(Nv-1);
u, v = np.meshgrid(u0,v0);
radius=0.75;
x0 = radius*np.cos(u)*np.sin(v)+rbar[0,0];
y0 = radius*np.sin(u)*np.sin(v)+rbar[1,0];
z0 = radius*np.cos(v)+rbar[2,0];

ls = LightSource(azdeg=0, altdeg=10)
rgb = ls.shade(x0, plt.cm.Reds)
surf = ax.plot_surface(x0, y0, z0, linewidth=0, rstride=1, cstride=1, antialiased=True, facecolors=rgb, zorder=0.0);

# central vector
a1end = gda_cvec(1,1,1);
a1endsq = np.matmul(a1end.T,a1end); a1endsq=a1endsq[0,0];
a1end = radius*a1end/a1endsq;
a1 = 2.0*a1end;
gdaarrowmod( ax, a1end, a1, "k-", 4 );

# another vector
a2end = gda_cvec(1.8,1.81,1);
a2endsq = np.matmul(a2end.T,a2end); a2endsq=a2endsq[0,0];
a2end = radius*a2end/a2endsq;
a2 = 2.0*a2end;
gdaarrowmod( ax, a2end, a2, "b-", 4 );

# co-latitudinal arc from central vector to the other vector
a1dota2 = np.matmul(a1end.T,a2end); a1dota2=a1dota2[0,0];
th=180.0*acos(a1dota2)/pi;
Nth = int(th)+1;
alpha = gda_cvec(np.linspace(0,Nth-1,Nth)/(Nth-1));
arc1=np.zeros((Nth,3));
for i in range(Nth):
    ai = alpha[i,0];
    t = ai*a1end + (1.0-ai)*a2end;
    arc1[i:i+1,0:3] = t.T;
ax.plot3D( arc1[0:Nth,0:1].ravel(), arc1[0:Nth,1:2].ravel(), arc1[0:Nth,2:3].ravel(), "k-", lw=2, zorder=20.0 );

# longitudinal arc
lena1sq = np.matmul(a1end.T,a1end); lena1sq=lena1sq[0,0];
lena1 = sqrt(lena1sq);
lena2sq = np.matmul(a2end.T,a2end); lena2sq=lena2sq[0,0];
lena2 = sqrt(lena2sq);
a3 = gda_cvec(np.cross(a1end.ravel()/lena1,a2end.ravel()/lena2));
lena3sq = np.matmul(a3.T,a3); lena3sq=lena3sq[0,0];
lena3 = sqrt(lena3sq);
a3 = a3/lena3; # unit vector perpendicular to a1end and a2end
Nth=41; # must be odd
Nth2 = int(Nth/2);
alpha = gda_cvec(np.linspace(-Nth2,Nth2,Nth)/90.0);
arc2=np.zeros((Nth,3));
for i in range(Nth):
    ai = alpha[i,0];
    if( ai > 0 ):
        t = ai*a3 + (1.0-ai)*a2end/lena2;
    else:
        aia = abs(ai)
        t = aia*(-a3) + (1.0-aia)*a2end/lena2;
    lent = np.sqrt(np.matmul(t.T,t));
    arc1[i:i+1,0:3] = 1.01*(lena2/lent)*t.T;
ax.plot3D( arc1[0:Nth,0:1].ravel(), arc1[0:Nth,1:2].ravel(), arc1[0:Nth,2:3].ravel(), "k-", lw=4, zorder=20.0 );
gdapy15_09
No description has been provided for this image
In [57]:
# gdapy15_10
# plots Fisher distribution for two different
# values of the precision parameter
 
print("gdapy15_10");

# independent variable x
Nx = 101;
xmin = -pi;
xmax = pi;
Dx = (xmax-xmin)/(Nx-1);
x = gda_cvec(xmin + Dx*np.linspace(0,Nx-1,Nx));
 
# precision parameters
k1=1.0;
k2=5.0;
 
# Fisher p.d.f.'s
p1 = (k1/(4.0*pi*sinh(k1)))*np.exp(k1*np.cos(x));
p2 = (k2/(4.0*pi*sinh(k2)))*np.exp(k2*np.cos(x));

# plot from -pi to pi so they look symmetrical,
# like a Normal distribution
fig1 = plt.figure(1,figsize=(12,5));
ax1=plt.subplot(1,1,1);
plt.axis( [-3.5, 3.5, 0.0, 1.0] );
plt.xlabel("theta");
plt.ylabel("p(theta)");
plt.plot( x, p1, "r-", lw=3);
plt.plot( x, p2, "b-", lw=3);
plt.plot( gda_cvec(-pi,-pi), gda_cvec(0.0, 0.1), "k-", lw=2);
plt.plot( gda_cvec(pi,pi), gda_cvec(0.0, 0.1), "k-", lw=2);

print("Caption: Two Fisher pdfs on a sphere, with large (blue) and");
print("small (red) precision parameters.");
gdapy15_10
Caption: Two Fisher pdfs on a sphere, with large (blue) and
small (red) precision parameters.
No description has been provided for this image
In [58]:
# gdapy15_11
 
# example of estimating central vector of Fisher distribution
# using principle of Maximum Liklihood.  The data are P-axes of
# moment tensors of deep earthquakes on the Kurile-Kamchatka
# subduction zone.  The boostrap method is used to compute
# confidence limits of the aximuth and plunge of the central vectorgdaFsparse
 
print("gdapy15_11");

# plot set-up
fig1 = plt.figure(1,figsize=(8,8));
ax1=plt.subplot(1,1,1);
plt.axis([-1.5, 1.5, -1.5, 1.5]);
 
# draw stereonet
# steronet angles:
#     theta, angle E of N, so theta=0 is north
#     phi, vertical angle, 0 rhi=0 when down
theta = gda_cvec((2*pi/360)*np.linspace(0,360,361));
plt.plot( np.sin(theta), np.cos(theta), "k", lw=3 );
theta = (pi/180.0)*0.0;
plt.text( 1.05*sin(theta), 1.05*cos(theta), "N" );
theta = (pi/180.0)*90.0;
plt.text( 1.05*sin(theta), 1.05*cos(theta), "E" );
plotgrid=1;
if( plotgrid == 1 ):
    phi = (pi/180.0)*gda_cvec(30.0, 60.0);
    theta = gda_cvec((2.0*pi/360.0)*np.linspace(0,360,361));
    for p in phi.ravel():
        plt.plot(gda_stereo(p)*np.sin(theta),gda_stereo(p)*np.cos(theta),"k--",lw=2);
    theta = gda_cvec((pi/180.0)*np.linspace(0,330,12));
    r = gda_cvec(np.linspace(0,1,2));
    for t in theta.ravel():
        plt.plot( r*sin(t), r*cos(t), 'k--', lw=2 );

# load data from file

X = np.genfromtxt("../Data/moments.txt", delimiter="\t");
N,i = np.shape(X);
# file: lon lat depth mrr mtt mpp mrt mrp mtp iexp
#      1   2   3     4   5   6   7   8   9   10
# moment tensor elements as read from file:
RR=X[0:N,3:4]; TT=X[0:N,4:5]; PP=X[0:N,5:6];
RT=X[0:N,6:7]; RP=X[0:N,7:8]; TP=X[0:N,8:9];
 
# restrict calculation to data in a specific depth range
dmin=300; dmax=600; # depth range
kk=np.where( (X[0:N,2:3]>=dmin) & (X[0:N,2:3]<=dmax) );
j=gda_cvec(kk[0]);
N,i = np.shape(j);
Pgs=np.zeros((N,3));
 
# plot all vectors
k=0;
for i in j.ravel(): # loop over earthquakes
    
    # I'm using an [up, east, north] coordinate system
    # the CMT is in [R=up, T=south, P=east] coordinate system
    M = np.zeros((3,3)); # moment tensor
    M[0,0]=RR[i,0]; M[1,1]=PP[i,0]; M[2,2]=TT[i,0];
    M[0,1]=RP[i,0]; M[1,0]=RP[i,0];
    M[0,2]=-RT[i,0]; M[2,0]=-RT[i,0];
    M[1,2]=-TP[i,0]; M[2,1]=-TP[i,0];

    # extract eigenvectors.  Note ue of eigh() instead of eig(),
    # because M is symmetric.  Note eigh() sorts eigenvalues, whereas
    # eig() does not
    D,V = la.eigh(M);
 
    Pg = V[0:3,0:1]; # P-axis in [up, east, north] coordinate system, is third eigenvector
    if( Pg[0,0] > 0.0 ): # want axis to point down
        Pg=(-Pg);

    # save in array
    Pgs[k:k+1,0:3] = Pg.T;
    k = k+1;
    
    # angle east of north, north=0
    theta = atan2( Pg[1,0], Pg[2,0] );
    # vertical angle from down, down=0
    phi = atan( sqrt(Pg[1,0]**2 + Pg[2,0]**2)/ (-Pg[0,0]) );
    
    r = gda_stereo(phi);
    
    plt.plot( r*sin(theta), r*cos(theta), "ko", lw=4 );

# estimate central vector

Pgsum = gda_cvec(np.sum(Pgs,axis=0));
normsq = np.matmul(Pgsum.T,Pgsum); normsq=normsq[0,0];
norm = sqrt(normsq);
Pgsum = Pgsum/norm;

theta = atan2( Pgsum[1,0], Pgsum[2,0] );
phi = atan( sqrt(Pgsum[1,0]**2 + Pgsum[2,0]**2)/(-Pgsum[0,0]));
r = gda_stereo(phi);
plt.plot( r*sin(theta), r*cos(theta), "ro", lw=4 );

# bootstrap estimates saved in these arrays
Nboot=10000;
thetaboot=np.zeros((N,1));
phiboot=np.zeros((N,1));
 
# bootstrap calculation: resample data with duplications,
# and compute central vector estimate of each
for i in range(N):
    # resample
    Pboot = Pgs[np.random.randint(low=0,high=N,size=(N,)),0:3];
    # central vector calculation
    Pbootsum = gda_cvec(np.sum(Pboot,axis=0));
    normsq = np.matmul(Pbootsum.T,Pbootsum); normsq=normsq[0,0];
    norm = sqrt(normsq);
    Pbootsum = Pbootsum/norm;
    # convert to angles
    thetaboot[i,0] = atan2( Pbootsum[1,0], Pbootsum[2,0] );
    phiboot[i,0] = atan( sqrt(Pbootsum[1,0]**2 + Pbootsum[2,0]**2)/(-Pbootsum[0,0]));
    # save, plot as cyan dot
    r = gda_stereo(phiboot[i,0]);
    plt.plot( r*sin(thetaboot[i,0]), r*cos(thetaboot[i,0]), "c.", lw=2 );

# replot estimate, because dots from bootstrap obscure its symbol
r = gda_stereo(phi);
plt.plot( r*sin(theta), r*cos(theta), "ro", lw=4 );
plt.show();
print("Caption: Earthquake P-axes (cirlces) for a subduction zone displayed on a");
print("stereo net, with the Fisher estimate of the mean angle (red) and bootstrap");
print("confidence region (blue).");
print("  ");

# compute standard deviation as a quick estimate of confidence intervals
sigmatheta = np.std(thetaboot);
sigmaphi = np.std(phiboot);
 
# display results
print("Maximum Liklihood Estimate");
print("azimuthal angle (North=0), theta = %.2f +/- %.2f (2 sigma)" % (180.0*theta/pi, 2.0*180.0*sigmatheta/pi) );
print("vertical angle  (Down=0),  phi   = %.2f +/- %.2f (2 sigma)" % (180.0*phi/pi, 2.0*180.0*sigmaphi/pi) );
gdapy15_11
No description has been provided for this image
Caption: Earthquake P-axes (cirlces) for a subduction zone displayed on a
stereo net, with the Fisher estimate of the mean angle (red) and bootstrap
confidence region (blue).
  
Maximum Liklihood Estimate
azimuthal angle (North=0), theta = -74.37 +/- 9.00 (2 sigma)
vertical angle  (Down=0),  phi   = 42.12 +/- 7.81 (2 sigma)
In [59]:
# gdapy15_12
 
# model Mars rover Mosbauer spectra using both
# Gaussian and Lorentzian curves, fit via nonlinear
# least squares (Newton's Method).

# Note: The code requires you to click on the bottom of the
# peaks, and when you're done, to click to the left of the
# x-axis. This process defines the number of peaks and their
# starting positions and amplitudes.

#  interactive graphics from withing the Jupyter Notebook is a bit dicey
# if METHOD=1 fails, try METHOD=2 (or vice-versa)
print("gdapy15_12");

METHOD=1;

# get current backend
if( METHOD==1):
    bkend = matplotlib.get_backend();
    
#  use interactive plotting
if( METHOD==1 ):
    matplotlib.use('QtAgg');
else:
    %matplotlib qt5
    
# load data
D = np.genfromtxt("../Data/mars_soil.txt", delimiter="\t");
N,M = np.shape(D);
v=np.copy(D[0:N,0:1]);
d=np.copy(D[0:N,1:2]);
d=d/np.max(d); # normalize

# delete negative velocities
r = np.where(v>0.0); # find first index of t greater than tc
k = r[0];
v=v[k,0:1];
d=d[k,0:1];
N,M = np.shape(d);

# plot data

fig99 = plt.figure(99,figsize=(8,4)); # interactive figure
ax1 = plt.subplot(1,1,1);
plt.axis( [np.min(v)-1.0, np.max(v), np.min(d), np.max(d)] );
plt.plot(v,d,"r-",lw=2);
plt.plot(v,d,"ro",lw=3);
plt.xlabel("velocity, mm/s");
plt.ylabel("counts");
plt.title("click bottom of each peak, then left of v=0");

A = np.max(d);   # estimate of backgroind level
 
rtp = sqrt(2*pi);  # shorthand constant

# initialize arrays to hold peak info
MAXPEAKS=100;
a =  np.zeros((MAXPEAKS,1)); # amplitude a
v0 = np.zeros((MAXPEAKS,1)); # center position v0=
c =  np.zeros((MAXPEAKS,1)); # width c

# get approximate peak position and height from mouse click
K=0;
for k in range(MAXPEAKS):
    p = plt.ginput(1);  # returns list of (x,y) tupples
    if( p[0][0] < 0 ):
        break;
    a[k,0] = p[0][1]-A;   # amplitude a, center position v0, width c
    v0[k,0]= p[0][0];
    c[k,0]=0.1;
    temp = np.power(v-v0[k,0],2) + c[k,0]**2;        # plot one peak
    dpre = A+np.divide( a[k,0]*(c[k,0]**2), temp );
    plt.plot(v,dpre,"b:",lw=1);
    plt.draw();  # must draw to update plot
    K=K+1; # peak counter

plt.show();
plt.close();     # close interactive figure

# switch back to standard plotting
if( METHOD==1 ):
    matplotlib.use(bkend);
else:
    %matplotlib inline

# truncate arrays to actual number of clicks
print("%d peaks identfied" % (K));
a =  a[0:K, 0:1];
v0 = v0[0:K, 0:1];
c =  c[0:K, 0:1];

# save for Gaussian calculation
asave = np.copy(a);
v0save = np.copy(v0);
csave = np.copy(c);

# lorentzian curve of peak amplitude a, center velocity v0 and width c
# f(v) = a c^2 / ( (v-v0)^2 + c^2) )
# derivatives of Lorentzian (needed for Newton's method)
# df/da = c^2 / ( (v-v0)^2 + c^2) )
# df/dv0 = 2 a c^2 (v-v0) / ( (v-v0)^2 + c^2) )^2
# df/dc = 2 a c / ( (v-v0)^2 + c^2) ) - a c^3 / ( (v-v0)^2 + c^2) )^2
# 3 model parameters per lorentzian, (a, v0, c)

# Normal curve of peak amplitude a, center velocity v0 and width c
# f(v)   = ( a / ( sqrt(2 pi) c )) exp( -0.5 * (v-v0)^2 c^-2 )
# df/da  = ( 1 / ( sqrt(2 pi) c )) exp( -0.5 * (v-v0)^2 c^-2 )
# df/dv0 = ( a / ( sqrt(2 pi) c )) ((v - v0 )/(c^2)) exp( -0.5 * (v-v0)^2 c^-2 )
# df/dc =  ( a / ( sqrt(2 pi) c^2 )) (((v - v0)^2/(c^2))-1) exp( -0.5 * (v-v0)^2 c^-2 )
# 3 model parameters per Normal curve, (a, v0, c)

# model parameters
M=K*3;
# starting solution
m = np.concatenate( (a,v0,c), axis=0 );

# Newtons' method to determine lorentzian model parameters
MAXITER=20;
for iter in range(MAXITER):
    
    # predict data based on current estimate of (a,v0,c)
    dpre = A*np.ones((N,1));
    for i in range(K):
        temp = np.power(v-m[K+i,0],2) + m[2*K+i,0]**2;
        dpre = dpre + np.divide( m[i,0]*(m[2*K+i,0]**2), temp );

    # linearized data kernel (derivatives of dpre with respect to (a,v0,c))
    G=np.zeros((N,M));
    for i in range(K):
        temp = np.power(v-m[K+i,0],2) + m[2*K+i,0]**2;
        temp2 = np.power(temp,2);
        G[0:N,i:i+1] =          np.divide(m[2*K+i,0]**2, temp);                           # d/da
        G[0:N,K+i:K+i+1] =      2.0*m[i,0]*(m[2*K+i,0]**2)*np.divide(v-m[K+i,0], temp2);  # d/dv0
        temp3 = np.divide(2.0*m[i,0]*m[2*K+i,0],temp);
        temp4 = np.divide(2.0*m[i,0]*(m[2*K+i,0]**3),temp2);
        G[0:N:,2*K+i:2*K+i+1] = temp3 - temp4;                                            # d/dc

    # deviations in data
    dd = d - dpre;
    
    # total error
    E = np.matmul( dd.T, dd );
          
    # updated model
    GTG = np.matmul(G.T,G) + 0.001*np.identity(M);
    GTd = np.matmul(G.T,dd);
    dm = la.solve(GTG,GTd);
    mold=np.copy(m);
    m = m+dm;
    
    
    # least squares error
    if( iter==0 ):
        E0=E;
    
# final predicted data
dpre = A*np.ones((N,1));
for i in range(K):
    temp = np.power(v-m[K+i,0],2) + m[2*K+i,0]**2;
    dpre = dpre + np.divide(m[i,0]*(m[2*K+i,0]**2), temp );
    
# final error
dd = d - dpre;
E = np.matmul( dd.T, dd ); E=E[0,0];

# save lorentzian info
mL = np.copy(m);
dpreL = np.copy(dpre);
EL = E;

# plot data and prediction
fig2 = plt.figure(2,figsize=(8,4)); # non-interactive figure
ax1 = plt.subplot(1,1,1);      
plt.axis( [np.min(v), np.max(v), np.min(d), np.max(d)] );
plt.plot(v,d,"r-",lw=2);
plt.plot(v,d,"ro",lw=3);
plt.xlabel('velocity, mm/s');
plt.ylabel('counts');

plt.plot(v,dpreL, "g-",lw=3);

# Newtons' method to determine Gaussian model parameters
# MATLAB(R): m = [a'.*(rtp.*c'), v0', c']';
m = np.concatenate( (rtp*np.multiply(asave,csave),v0save,csave), axis=0 );
MAXITER=20;
for iter in range(MAXITER):
    # predict data based on current estimate of (a,v0,c)
    dpre = A*np.ones((N,1));
    for i in range(K):
        a0 = m[i,0];     # amplitude
        v0 = m[K+i,0];   # position
        c0 = m[2*K+i,0]; # std dev
        c02r = 1.0/(c0**2);
        n0 = 1.0/(rtp*c0);
        dpre = dpre + a0*n0*np.exp(-0.5*np.power(v-v0,2)*c02r);

    # linearized data kernel (derivatives of dpre with respect to (a,v0,c))
    G=np.zeros((N,M));
    for i in range(K):
        a0 = m[i,0];     # amplitude
        v0 = m[K+i,0];   # position
        c0 = m[2*K+i,0]; # std dev
        c0r = 1.0/c0;
        c02r = 1.0/(c0**2);
        n0 = 1.0/(rtp*c0);
        f = np.exp(-0.5*np.power(v-v0,2)*c02r);
        G[0:N,i:i+1]=n0*f; # d/da
        temp1 = (v - v0)*c02r;
        G[0:N,K+i:K+i+1]=a0*n0*np.multiply(temp1,f);
        temp2 = np.power(v - v0,2)*c02r - np.ones((N,1));
        G[0:N,2*K+i:2*K+i+1] = a0*n0*c0r*np.multiply(temp2,f);

    # deviations in data
    dd = d - dpre;
    
    # total error
    E = np.matmul( dd.T, dd ); E=E[0,0];
          
    # updated model
    epsi=0.001; # was 0.001
    GTG = np.matmul(G.T,G) + epsi*np.identity(M);
    GTd = np.matmul(G.T,dd);
    dm = la.solve(GTG,GTd);
    mold=np.copy(m);
    m = m+dm;
    
    # least squares error
    if( iter==0 ):
        E0=E;
    
# final predicted data
dpre = A*np.ones((N,1));
for i in range(K):
    a0 = m[i,0];     # amplitude
    v0 = m[K+i,0];   # position
    c0 = m[2*K+i,0]; # std dev
    c02r = 1.0/(c0**2);
    n0 = 1.0/(rtp*c0);
    dpre = dpre + a0*n0*np.exp(-0.5*np.power(v-v0,2)*c02r);
    
# final error
dd = d - dpre;
E = np.matmul( dd.T, dd ); E=E[0,0];

# save Gaussian info
mG = np.copy(m);
dpreG = np.copy(dpre);
EG = E;
plt.plot(v,dpreG, "b-",lw=3);

plt.show();

print("Lorentzian Parameters");
for i in range(K):
    print("%d %f %f %f" % (i,mL[i,0],mL[K+i,0],mL[2*K+i,0]) );
print("Lorentzian: final rms error: %f" % (sqrt(EL/N)) );
print(" ");
print("Gaussian Parameters");
for i in range(K):
    print("%d %f %f %f" % (i,mG[i,0],mG[K+i,0],mG[2*K+i,0]) );
print("Gaussian:   final rms error: %f" % (sqrt(EG/N)) );
print(" ");
gdapy15_12
10 peaks identfied
No description has been provided for this image
Lorentzian Parameters
0 -0.102593 1.759097 0.407391
1 -0.295406 3.104726 0.114698
2 -0.110407 3.617962 0.194304
3 -0.310497 4.274460 0.129160
4 -0.234740 5.454255 0.132734
5 -0.190866 6.343239 0.115596
6 -0.312464 7.541285 0.126498
7 -0.099213 6.845976 0.156691
8 -0.335230 8.733409 0.157521
9 -0.124274 10.480350 0.332168
Lorentzian: final rms error: 0.012264
 
Gaussian Parameters
0 -0.150644 1.937038 0.743166
1 -0.070712 3.101658 0.103610
2 -0.077040 3.624974 0.238095
3 -0.106454 4.279952 0.149182
4 -0.099296 5.462718 0.208316
5 -0.066506 6.327125 0.149660
6 -0.107642 7.543191 0.150298
7 -0.055329 6.868416 0.188404
8 -0.138156 8.724868 0.185961
9 -0.131487 10.422882 0.562367
Gaussian:   final rms error: 0.025199
 
In [60]:
# gdapy15_13
 
# tidal periods using Fast Fourier Transform
# to compute amplitude spectrum of sea height time series
 
print("gdapy15_13");

# load data
D = np.genfromtxt("../Data/tides.txt", delimiter="\t");
N,i = np.shape(D);
N = 2*int(N/2);   #  truncate to an even number of points
d = D[0:N,1:2];   #  sea surface in feet sampple every hour
d = d - np.mean(d); # demean
 
# t-axis in days
Dt = 1.0/24.0;
t = gda_cvec(Dt*np.linspace(0,N-1,N));
tmin=0.0;
tmax=Dt*(N-1);
 
fig1 = plt.figure(1,figsize=(12,8));
ax1=plt.subplot(1,1,1);
plt.axis( [0, 365, -10, 10]);
plt.plot(t,d,"k-",lw=2);
plt.xlabel("time (days)");
plt.ylabel("sea height (ft)");
plt.show();
print("Caption: Sea surface height time series (compete).");

fig2 = plt.figure(2,figsize=(12,8));
ax1=plt.subplot(1,1,1);
plt.axis( [150, 210, -5, 5]);
plt.plot(t,d,"k-",lw=2);
plt.xlabel("time (days)");
plt.ylabel("sea height (ft)");
plt.show();
print("Caption: Sea surface height time series (short).");

# frequency axis, cycles per day
fmax = 1.0/(2.0*Dt);
Df = fmax/(N/2);
M = int(N/2+1);  # non-negative frequencies
f = gda_cvec(Df*np.linspace(0,M-1,M));
p = np.zeros((M,1)); # periods
# correspondind periods (except zero frequence)
p[1:M,0:1] = np.reciprocal(f[1:M,0:1]);   
 
# hamming taper, a bell-shaped curve
a0 = 0.53836;
a1 = 0.46164;
ham = gda_cvec( a0 - a1 * np.cos( 2.0*pi*np.linspace(0,N-1,N)/(N-1)) );
 
fig3 = plt.figure(3,figsize=(12,8));
ax1=plt.subplot(1,1,1);
plt.axis( [tmin, tmax, 0, 1.1]);
plt.plot(t,ham,"k-",lw=2);
plt.xlabel('t');
plt.ylabel('w(t)');
plt.show();
print("Caption: Hamming taper.");

# amplitude spectrim
dp = np.multiply( ham, d-np.mean(d)); # remove mean and hamming taper

m = np.fft.fft(dp, axis=0); # fourier transform. Note axis=0 necessary
# compute a.s.d. Note normalization of sqrt(2/N) is appropriate for
# stationary timeseries
s = sqrt(2.0/N)*np.abs(m[0:M,0:1]);

fig4 = plt.figure(4,figsize=(12,8));
ax1=plt.subplot(1,1,1);
plt.axis( [0.0, 2.0, 0.0, 1.05*np.max(s) ]);
plt.plot(p[1:M,0:1],s[1:M,0:1],"k-",lw=2);
plt.xlabel("period (days)");
plt.ylabel("amplitude spectral density");
plt.show();
print("Caption: Amplitude spectral amplitude as a function of period.");
gdapy15_13
No description has been provided for this image
Caption: Sea surface height time series (compete).
No description has been provided for this image
Caption: Sea surface height time series (short).
No description has been provided for this image
Caption: Hamming taper.
No description has been provided for this image
Caption: Amplitude spectral amplitude as a function of period.
In [77]:
# gdapy15_14
 
# earthquake location example
# rectangular earth volume, straight line rays
# data are travel times of P and S waves
 
# #note: the problem is formulated here so that all
# the earthquakes are located simultaneously, with
# a single data kernel.  Thus allows for the possibility
# of later adding constraints thet involve several earthquakes
 
print("gdapy15_14");

gdaepsilon=1.0e-6;
 
# material constants
vpvs = 1.78;
vp=6.5;
vs=vp/vpvs;
 
# bounds of box
xmin=-10.0;
xmax=10.0;
ymin=-10.0;
ymax=10.0;
zmin=-10.0;
zmax=0.0;

# setup for 3D figure
fig1 = plt.figure(1,figsize=(12,12));
ax = fig1.add_subplot(111, projection='3d');
plt.axis('off');
plt.grid(b=None);
ax.axes.set_xlim3d(left=xmin, right=xmax);
ax.axes.set_ylim3d(bottom=ymin, top=ymax);
ax.axes.set_zlim3d(bottom=zmin, top=zmax);
 
# 3D box
gdabox2(ax,xmin,xmax,ymin,ymax,zmin,zmax);

# stations
xv = gda_cvec( -8.0, -6.0, -4.0, -2.0, 0.0, 2.0, 4.0, 6.0, 8.0 );
yv = gda_cvec( -8.0, -6.0, -4.0, -2.0, 0.0, 2.0, 4.0, 6.0, 8.0 );
Nx,i = np.shape(xv);
Ny,i = np.shape(yv);
sxm = np.matmul(xv,np.ones((1,9)));
sym = np.matmul(np.ones((Nx,1)),yv.T);
Ns = Nx*Ny;

# plot stations
sx = np.reshape(sxm,(Ns,1) );
sy = np.reshape(sym,(Ns,1) );
sz = (zmax+0.001)*np.ones((Ns,1));
ax.plot3D( sx.ravel(), sy.ravel(), sz.ravel(), 'k+', lw=3 );

# earthquakes
Ne = 30;  # number of earthquakes
M = 4*Ne; # 4 model parameters, x, y, z, and t0, per earthquake
extrue = np.random.uniform(low=xmin,high=xmax,size=(Ne,1));
eytrue = np.random.uniform(low=ymin,high=ymax,size=(Ne,1));
eztrue = np.random.uniform(low=zmin,high=zmax,size=(Ne,1));
t0true = np.random.uniform(low=0.0,high=0.2,size=(Ne,1));
mtrue = np.concatenate( (extrue, eytrue, eztrue, t0true), axis=0);

Nr = Ne*Ns;   # number of rays, that is, earthquake-stations pairs
N = 2*Ne*Ns;  # data: P and S wave for each earthquake at each sta
              # P times stored first in data array
    
# true data
# traveltime = length of ray divided by velocity
# ray is straight line connecting earthquake and station
dtrue=np.zeros((N,1));
for i in range(Ns): # loop over stations
    for j in range(Ne): # loop over earthquakes
        dx = mtrue[j,0]-sx[i,0];
        dy = mtrue[Ne+j,0]-sy[i,0];
        dz = mtrue[2*Ne+j,0]-sz[i,0];
        r = sqrt( (dx**2) + (dy**2) + (dz**2) );
        k=i*Ne+j;
        dtrue[k,0]=r/vp+mtrue[3*Ne+j,0];
        dtrue[Nr+k,0]=r/vs+mtrue[3*Ne+j,0];

# observed data is true data plus random noise
sd = 0.1;
dobs=dtrue+np.random.normal(loc=0.0,scale=sd,size=(N,1));

# inital guess of earthquake locations is random
mest = np.concatenate( (
    np.random.uniform(low=xmin,high=xmax,size=(Ne,1)),
    np.random.uniform(low=ymin,high=ymax,size=(Ne,1)),
    np.random.uniform(low=zmin+2,high=zmax-2,size=(Ne,1)),
    np.random.uniform(low=-0.1,high=0.1,size=(Ne,1)) ), axis=0 );

# Geiger's method
for iter in range(10):
    # row-col-value method of constructing a sparse matrix
    Gr = np.zeros((4*N,1),dtype=int);
    Gc = np.zeros((4*N,1),dtype=int);
    Gv = np.zeros((4*N,1));
    Nel=0;
    dpre = np.zeros((N,1));
    for i in range(Ns): # loop over stations
        for j in range(Ne): # loop over earthquakes
            dx = mest[j,0]-sx[i,0];
            dy = mest[Ne+j,0]-sy[i,0];
            dz = mest[2*Ne+j,0]-sz[i,0];
            r = sqrt( (dx**2) + (dy**2) + (dz**2) );
            k=i*Ne+j;
            dpre[k]=r/vp+mest[3*Ne+j];
            dpre[Nr+k]=r/vs+mest[3*Ne+j,0];
            # gdaG(k,j) = dx/(r*vp);
            Gr[Nel,0] = k;
            Gc[Nel,0] = j;
            Gv[Nel,0] = dx/(r*vp);
            Nel=Nel+1;
            # gdaG(k,Ne+j) = dy/(r*vp);
            Gr[Nel,0] = k;
            Gc[Nel,0] = Ne+j;
            Gv[Nel,0] = dy/(r*vp);
            Nel=Nel+1;
            # gdaG(k,2*Ne+j) = dz/(r*vp);
            Gr[Nel,0] = k;
            Gc[Nel,0] = 2*Ne+j;
            Gv[Nel,0] = dz/(r*vp);
            Nel=Nel+1;
            # gdaG(k,3*Ne+j) = 1;
            Gr[Nel,0] = k;
            Gc[Nel,0] = 3*Ne+j;
            Gv[Nel,0] = 1.0;
            Nel=Nel+1;
            # gdaG(Nr+k,j) = dx/(r*vs);
            Gr[Nel,0] = Nr+k;
            Gc[Nel,0] = j;
            Gv[Nel,0] = dx/(r*vs);
            Nel=Nel+1;
            # gdaG(Nr+k,Ne+j) = dy/(r*vs);
            Gr[Nel,0] = Nr+k;
            Gc[Nel,0] = Ne+j;
            Gv[Nel,0] = dy/(r*vs);
            Nel=Nel+1;
            # gdaG(Nr+k,2*Ne+j) = dz/(r*vs);
            Gr[Nel,0] = Nr+k;
            Gc[Nel,0] = 2*Ne+j;
            Gv[Nel,0] = dz/(r*vs);
            Nel=Nel+1;
            # gdaG(Nr+k,3*Ne+j) = 1;
            Gr[Nel,0] = Nr+k;
            Gc[Nel,0] = 3*Ne+j;
            Gv[Nel,0] = 1.0;
            Nel=Nel+1;
    
    # truncate to actual length
    Gr = Gr[0:Nel,0:1];
    Gc = Gc[0:Nel,0:1];
    Gv = Gv[0:Nel,0:1];

    # build sparse matrix
    gdaGsparse=sparse.coo_matrix((Gv.ravel(),(Gr.ravel(),Gc.ravel())),shape=(N,M));

    # delete lists
    del Gr; del Gc; del Gv;
    
    # define linear operator needed for biconjugate gradient solver
    LO=las.LinearOperator(shape=(M,M),matvec=gda_dlsmul,rmatvec=gda_dlsmul);
    
    # solve using cdamped least squares
    dd = dobs-dpre;
    GTd = gda_dlsrhs(dd);
    tol = 1e-12;     # tolerance
    maxit = 3*(N+M); # maximum number of iterations allowed
    # solve least squares problem Fm=f by biconjugate gradients
    q=las.bicg(LO,GTd,rtol=tol,maxiter=maxit);
    if( q[1] != 0 ):
        print('Warning: bicg() failed');
    dm = gda_cvec(q[0].astype(float));
    
    # update
    mest = mest+dm;
    
# final predicted data
dpre=np.zeros((N,1));
for i in range(Ns): # loop over stations
    for j in range(Ne): # loop over earthquakes
        dx = mest[j,0]-sx[i,0];
        dy = mest[Ne+j,0]-sy[i,0];
        dz = mest[2*Ne+j,0]-sz[i,0];
        r = sqrt( (dx**2) + (dy**2) + (dz**2) );
        k=i*Ne+j;
        dpre[k,0]=r/vp+mest[3*Ne+j,0];
        dpre[Nr+k,0]=r/vs+mest[3*Ne+j,0];
        
# display summary of results
expre = mest[0:Ne,0:1];
eypre = mest[Ne:2*Ne,0:1];
ezpre = mest[2*Ne:3*Ne,0:1];
t0pre = mest[3*Ne:4*Ne,0:1];
dd = dobs-dpre;
E = np.matmul(dd.T,dd); E=E[0,0];
print("RMS traveltime error: %f" % (sqrt(E/N)) );

Emx = np.matmul( (extrue-expre).T, extrue-expre); Emx=Emx[0,0];
Emy = np.matmul( (eytrue-eypre).T, eytrue-eypre); Emy=Emy[0,0];
Emz = np.matmul( (eztrue-ezpre).T, eztrue-ezpre); Emz=Emz[0,0];
Emt = np.matmul( (t0true-t0pre).T, t0true-t0pre); Emt=Emt[0,0];
print("RMS model misfit: x %.4f y %.4f z %.4f t0 %.4f" %
           (sqrt(Emx/Ne), sqrt(Emy/Ne), sqrt(Emz/Ne), sqrt(Emt/Ne)) );

# plot results in 3D space
ax.plot3D( extrue.ravel(), eytrue.ravel(), eztrue.ravel(), "ro", lw=4 );
dx = 0.2;
dy = 0.2;
dz = 0.5;
for i in range(Ne):
    ax.plot3D( gda_cvec(expre[i,0]-dx, expre[i,0]+dx).ravel(),
               gda_cvec(eypre[i,0], eypre[i,0]).ravel(),
               gda_cvec(ezpre[i,0], ezpre[i,0]).ravel(), "g-", lw=2 );
    ax.plot3D( gda_cvec(expre[i,0], expre[i,0]).ravel(),
               gda_cvec(eypre[i,0]-dy, eypre[i,0]+dy).ravel(),
               gda_cvec(ezpre[i,0], ezpre[i,0]).ravel(), 'g-', lw=2 );
    ax.plot3D( gda_cvec(expre[i,0], expre[i,0]).ravel(),
               gda_cvec(eypre[i,0], eypre[i,0]).ravel(),
               gda_cvec(ezpre[i,0]-dz, ezpre[i,0]+dz).ravel(), 'g-', lw=2 );
plt.show();
print("Caption: Earthquake location problem with stations (black circles),");
print("true eartquake locations (red) and estimated earthquake locations (green).");
gdapy15_14
RMS traveltime error: 0.101280
RMS model misfit: x 0.1195 y 0.0953 z 0.4504 t0 0.0296
No description has been provided for this image
Caption: Earthquake location problem with stations (black circles),
true eartquake locations (red) and estimated earthquake locations (green).
In [78]:
# gdapy12_15
 
# determine velocity structure v(x)=v0+v1(x)
# from frequencies of vibration
 
# unperturbed differetial equation -w^2 p = [v0+v1(x)]^2 d2p/dx2
# boundary conditions top: p=0 at x=0; bottom dp/dx=0 at x=h
 
# unperturbed solutions are analytic
# p(n,x) = A sin( (n-0.5) pi x / h ) n=1,2,3,...
# w(n) = (n-0.5)*pi*v0/h
# normalization so that (integral 0 to h) p(n,x) p(m,x) v0^(-2) dx = delta(n,m)
# A = sqrt( 2 v0^2 / h )
 
print("gdapy12_15");

# independent variable x
h=2.0;
v0=5.0;
Nx = 101;
xmin=0.0;
xmax=h;
Dx = (xmax-xmin)/(Nx-1);
x = gda_cvec(xmin + Dx*np.linspace(0,Nx-1,Nx));
 
# plot some wave p's and check normalization
fig1 = plt.figure(1,figsize=(12,5));

verbose=0;
if( verbose ):
    print("normalization (should be unity)");

for n in range (5):
    ax1=plt.subplot(5,1,n+1);

    p = sqrt(2.0*(v0**2)/h) * np.sin( ((n+1)-0.5) * pi * x / h );
    plt.plot(x,p,'k-',lw=3);
    I = Dx*np.sum(np.power(p,2))/(v0**2);
    if( verbose ):
        print("    n %d area %f" % (n,I) );
    plt.xlabel('x');
    plt.ylabel("p%d(x)"%(n));
                  
plt.show();
print("Caption, The first several eigenfunctions.");

# build a true v1
v1true = np.zeros((Nx,1));
v1true[19:30,0:1] = v1true[19:30,0:1] - (v0/10);
v1true[59:90,0:1] = v1true[59:90,0:1] + (v0/10);
v1true[69:80,0:1] = v1true[69:80,0:1] + (v0/10);

# unperturbed frequencies
N=200;
w0 = gda_cvec((np.linspace(1,N,N))-0.5)*pi*v0/h;

# perturbed frequencies
w1true = np.zeros((N,1));
for n in range(N):
    p = sqrt(2.0*(v0**2)/h) * np.sin( ((n+1)-0.5) * pi * x / h );
    w1true[n,0]=w0[n,0]*Dx*np.sum(np.multiply(v1true,np.power(p,2)))/(v0**3);

# observations are perturbed frequencies plus random noise
sigmad = 0.1*w0[0,0];
dobs = w1true + np.random.normal(loc=0.0,scale=sigmad,size=(N,1));

# ladder diagram; plot only a few frequencies
Nfew=10;
fig3 = plt.figure(3,figsize=(12,5));
ax1=plt.subplot(1,1,1);
plt.axis( [0, np.max(w0[0:Nfew,0:1]), 0.0, 1.0] );
for n in range(Nfew):
    plt.plot( gda_cvec(w0[n,0],w0[n,0]), gda_cvec(0.0,1.0) ,"k-", lw=3);
    plt.plot( gda_cvec(w0[n,0]+w1true[n,0], w0[n,0]+w1true[n,0]), gda_cvec(0.0,0.8) ,'r-', lw=3);
    plt.plot( gda_cvec(w0[n,0]+dobs[n,0],w0[n,0]+dobs[n,0]), gda_cvec(0.0,0.6) ,"g-", lw=3);
plt.xlabel("angular frequency w");
plt.show();
print("Caption: Ladder diagram showing unperturbed eigenfrequencies (black),");
print("true eigenfrequencies (red) and observed eigenfrequencies (green).");

# model parameters are velocity perturbations
M=Nx;
mest = np.zeros((M,1));
 
# data kernel is integral linking them
if( 0 ): # brute force way
    G = np.zeros((N,M));
    for i in range(N):
        for j in range(M):
            pij = sqrt(2.0*(v0**2)/h) * np.sin(((i+1)-0.5)*pi*x[j,0]/h);
            G[i,j] = Dx*w0[i,0]*(pij**2)/(v0**3);
else: # tensor product way
    w = gda_cvec(np.linspace(1,N,N));
    pijm = sqrt(2.0*(v0**2)/h) * np.sin( pi * np.matmul((w-0.5),x.T) / h );
    G = Dx*np.multiply(w0*np.ones((1,M)),np.power(pijm,2))/(v0**3);
    
# damped least squares solution, but weight lower frequencies more
e2 = 1.0e-6;
wr = gda_cvec(np.reciprocal(np.linspace(1,M,M)));
GTG = np.matmul(G.T,G) + e2*np.diag(wr.ravel());
GMG = np.matmul( la.inv(GTG), G.T);
mest = np.matmul(GMG,dobs);

R = np.matmul(GMG,G);

fig2 = plt.figure(2,figsize=(12,5));
ax1=plt.subplot(1,1,1);
plt.axis( [xmin, xmax, 0.0, 10.0] );
plt.plot( x, v0+v1true, "k-", lw=3 );
plt.plot( x, v0+mest, "r-", lw=3 );
plt.xlabel("x");
plt.ylabel("velocity m(x)");
plt.show();

gda_draw("title G",G,"title R",R);
print("Caption: (Left) Data kernel G, (Right) Model resolution matrix.");
gdapy12_15
No description has been provided for this image
Caption, The first several eigenfunctions.
No description has been provided for this image
Caption: Ladder diagram showing unperturbed eigenfrequencies (black),
true eigenfrequencies (red) and observed eigenfrequencies (green).
No description has been provided for this image
No description has been provided for this image
Caption: (Left) Data kernel G, (Right) Model resolution matrix.
In [ ]: