In [11]:
# gdapy02_00 clears all variables and import various modules
# you must run this cell first, before running any of others

# note that these examples use inverse theory techniques that
# are only covered in later chapters of the book

# History:
# 2022/11/14 - Created by W. Menke, using third edition MATLAB script as template
# 2024/05/23 -  Patches to accomodate new verions of numpy, scipy, matplotlib
#               patched dot product to return scalar value
#               patched front end for interactive graphic

# A reset deletes any previously-created variables from memory
%reset -f

# import various packages

import numpy as np                    # matrices & etc
from matplotlib import pyplot as plt  # general plotting 
from matplotlib import patches        # plotting shapes
from datetime import date             # date and time arithmetic
from math import exp, pi, sin, cos, tan, sqrt, floor   # math functions
import scipy.linalg as la             # linear algebra functions
import os                             # operating system
import matplotlib.cm as cm
from matplotlib.colors import ListedColormap
import scipy.signal as sg
import matplotlib

# function to make a numpy N-by-1 column vector
# c=eda_cvec(v1, v2, ...) from a list of several
# array-like entities v1, v2, including a number
# a list of numbers, a tuple of numbers, an N-by-0 np array
# and a N-by-1 np array. The function
# also insures that, if v is an np array, that
# c is a copy, as contrasted to a view, of it
# It promotes integers to floats, and integers
# and floats to complex, by context.
# This version concatenates many argments,
# whereas c=eda_cvec1(v1) takes just one argiment.
# I recommend always using eda_cvec(v1, v2, ...)
def gda_cvec(*argv):
    t = int;
    Nt = 0;
    for a in argv:
        v = gda_cvec1(a);
        N,M = np.shape(v);
        Nt = Nt + N;
        if( N==0 ):
            continue; # skip vector of zero length
        if (t==int) and isinstance(v[0,0],float):
            t=float;
        elif isinstance(v[0,0],complex):
            t=complex;
    w = np.zeros((Nt,1),dtype=t);
    Nt = 0;
    for a in argv:
        v = gda_cvec1(a);
        N,M = np.shape(v);
        w[Nt:Nt+N,0:1] = v;  # patch 20230418 was #w[Nt:Nt+N,0] = v[0:N,0];
        Nt = Nt + N;
    return w;

# function to make a numpy N-by-1 column vector
# c=gda_cvec1(v) from entity v that is array-like,
# including a number, a list of numbers, a tuple
# of numbers, an N-by-0 np array and a N-by1 np array.
# It promotes integers to floats, and integers
# and floats to complex, by context. The function
# also insures that, if v is an np array, that
# c is a copy, as contrasted to a view, of it.
# This version takes just one input argment.
# whereas c=gda_cvec(v1,v2,...) concatenates
# many argiments.
def gda_cvec1(v):
    if isinstance(v, int) or isinstance(v, np.int32):
        w = np.zeros((1,1),dtype=int);
        w[0,0] = v;
        return w;
    elif isinstance(v, float):
        w = np.zeros((1,1),dtype=float);
        w[0,0] = v;
        return w;
    elif isinstance(v, complex):
        w = np.zeros((1,1),dtype=complex);
        w[0,0] = v;
        return w;
    elif isinstance(v, np.ndarray):
        s = np.shape(v);
        if len(s) == 1:
            return np.copy(np.reshape(v,(s[0],1)));
        else:
            [r,c]=s;
            if( c==1 ):
                return(np.copy(v));
            elif(r==1):
                return(np.copy(v.T));
            else:
                raise TypeError("gda_cvec: %d by %d ndarray not allowed" % (r, c));
    elif isinstance(v, list):
        r = len(v);
        t = int;
        for vi in v:
            if isinstance(vi,int) or isinstance(vi, np.int32): #patch v->vi 20230418
                pass;
            elif isinstance(vi,float):
                t=float;
            elif isinstance(vi,complex):
                t=complex;
                break;
            else:
                raise TypeError("gda_cvec: list contains unsupported type %s" % type(vi));
        w = np.zeros((r,1),dtype=t);
        w[:,0] = np.array(v); # patch v -> np.array(v)
        return w;
    elif isinstance(v, tuple):
        r = len(v);
        t = int;
        for vi in v:
            if isinstance(vi,int) or isinstance(vi, np.int32): #patch v->vi 20230418
                pass;
            elif isinstance(vi,float):
                t=float;
            elif isinstance(vi,complex):
                t=complex;
                break;
            else:
                raise TypeError("gda_cvec: tuple contains unsupported type %s" % type(vi));
        w = np.zeros((r,1),dtype=t);
        w[:,0] = np.array(list(v)); # patch v -> np.array(list(v));
        return w;
    else:
        raise TypeError("gda_cvec: %s not supported" % type(v));

        
# gda_draw function makes a "pictorial matrix equation"
# arguments are vectors, matrices and strings
# which are plotted in the order that the appear
# except that strings starting with 'title ' are plotted
# under the subseqeunt matrix or vector
# always returns a status of 1
def gda_draw(*argv):
    DOCOLOR=True;
    if( DOCOLOR ):
        bwcmap = matplotlib.colormaps['jet'];
    else:
        bw = np.zeros((256,4));
        v = 0.9*(256 - np.linspace( 0, 255, 256 ))/255;
        bw[:,0] = v;
        bw[:,1] = v;
        bw[:,2] = v;
        bw[:,3] = np.ones(256);
        bwcmap = ListedColormap(bw);
    # size of plot
    W = 16;
    H = 4;
    fig1 = plt.figure(1);
    # figsize width and height in inches
    fig1.set_size_inches(W,H);
    ax1 = plt.subplot(1,1,1);
    plt.axis([0, W, -H/2, H/2]);
    plt.axis('off');
    LM = W/6;    # matrix width and heoght
    LV = W/40;   # vector width
    FS = 0.12;    # character width
    TO = 0.4;    # title vertical offset
    SP = 0.2;    # space between objects
    LS = 0.2;    # leading space
    p = LS; # starting x-position
    istitle=0; # flags presence of a title
    for a in argv:
        if isinstance(a,np.ndarray):
            sh = np.shape(a);
            if len(sh) == 1:  # conversion to nx1 array
                n = sh[0];
                m = 1;
                ap = a;
                a = np.zeros((n,1));
                a[:,0] = ap;
            else:
                n = sh[0];
                m = sh[1];
            if m==1:
                pold=p;
                left=p;
                right=p+LV;
                bottom=-LM/2;
                top=LM/2;
                plt.imshow( a, cmap=bwcmap, vmin=np.min(a), vmax=np.max(a), extent=(left,right,bottom,top) );
                p = p+LV;
                pm = (p+pold)/2;
                if istitle:
                    plt.text(pm,-(LM/2)-TO,titlestr,horizontalalignment='center');
                    istitle=0;
                p = p+SP;
            else:
                pold=p;
                left=p;
                right=p+LM;
                bottom=-LM/2;
                top=LM/2;
                plt.imshow( a, cmap=bwcmap, vmin=np.min(a), vmax=np.max(a), extent=(left,right,bottom,top) );
                p = p+LM;
                pm = (p+pold)/2;
                if istitle:
                    plt.text(pm,-(LM/2)-TO,titlestr,horizontalalignment='center');
                    istitle=0;
                p = p+SP;
        elif isinstance(a,str):
            ns = len(a);
            istitle=0;
            if( ns>=6 ):
                if 'title ' in a[0:6]:
                    istitle=1;
                    titlestr=a[6:];
            if( istitle != 1):
                plt.text(p,0,a);
                p = p + ns*FS + SP;
    plt.show();
    return 1;

# bandpass filter, used in seismological example, but hand
# in a variety of settings involving time series
def gda_chebyshevfilt(d, Dt, flow, fhigh):
    # (dout,u,v)=gda_chebyshevfilt(d, Dt, flow, fhigh);
    # chebyshev IIR bandpass filter
    # d - input array of data
    # Dt - sampling interval
    # flow - low pass frequency, Hz
    # fhigh - high pass frequency, Hz
    # dout - output array of data
    # u - the numerator filter
    # v - the denominator filter
    # these filters can be used again using dout=filter(u,v,din);

    # make sure input timeseries is a column vector
    s = np.shape(d);
    N = s[0];
    if(N==1):
        dd = np.zeros((N,1));
        dd[:,0] = d;
    else:
        dd=d;
        
    # sampling rate
    rate=1/Dt;

    # ripple parameter, set to ten percent
    ripple=0.1;  

    # normalise frequency
    fl=2.0*flow/rate;
    fh=2.0*fhigh/rate;

    # center frequency 
    cf = 4 * tan( (fl*pi/2) ) * tan( (fh*pi/2) );

    # bandwidth
    bw = 2 * ( tan( (fh*pi/2) ) - tan( (fl*pi/2) ) );

    # ripple parameter factor
    rpf = sqrt((sqrt((1.0+1.0/(ripple*ripple))) + 1.0/ripple));
    a = 0.5*(rpf-1.0/rpf);
    b = 0.5*(rpf+1.0/rpf);

    u=np.zeros((5,1));
    v=np.zeros((5,1));
    theta = 3*pi/4;
    sr = a * cos(theta);
    si = b * sin(theta);
    es = sqrt(sr*sr+si*si);
    tmp= 16.0 - 16.0*bw*sr + 8.0*cf + 4.0*es*es*bw*bw - 4.0*bw*cf*sr + cf*cf;
    v[0,0] = 1.0;
    v[1,0] = 4.0*(-16.0 + 8.0*bw*sr - 2.0*bw*cf*sr + cf*cf)/tmp;
    v[2,0] = (96.0 - 16.0*cf - 8.0*es*es*bw*bw + 6.0*cf*cf)/tmp;
    v[3,0] = (-64.0 - 32.0*bw*sr + 8.0*bw*cf*sr + 4.0*cf*cf)/tmp;
    v[4,0] = (16.0 + 16.0*bw*sr + 8.0*cf + 4.0*es*es*bw*bw + 4.0*bw*cf*sr + cf*cf)/tmp;
    tmp = 4.0*es*es*bw*bw/tmp;
    u[0,0] = tmp;
    u[1,0] = 0.0;
    u[2,0] = -2.0*tmp;
    u[3,0] = 0.0;
    u[4,0] = tmp;

    dout = sg.lfilter(u.ravel(),v.ravel(),dd.ravel());
    return (gda_cvec(dout),gda_cvec(u),gda_cvec(v));
In [12]:
# gdapy02_01
# straight-line fit to global temperature data

# data from:
# Hansen, J., Mki. Sato, R. Ruedy, K. Lo, D.W. Lea, and M. Medina-Elizade,
# 2006: Global temperature change. Proc. Natl. Acad. Sci., 103, 14288-14293,
# doi:10.1073/pnas.0606291103. 

# load rectangular, tab-separated table of numbers into a N by M matrix D
D = np.genfromtxt("../Data/global_temp.txt", delimiter="\t");
N, M = np.shape(D);

t = np.copy( D[0:N,0:1] );  # time in column 1
d = np.copy( D[0:N,1:2] );  # temperature in column 2

M=2;                          # two model paramters, intercept and slope
G = np.zeros((N,M));          # start with zeros
G[0:N,0:1] = np.ones((N,1));  # first column of G
G[0:N,1:2] = t;               # second column of G

# The concatenate method is an alternative
# G = np.concatenate( (np.ones((N,1)),t), axis=1);

# solve invere problem
GTG = np.matmul(G.T,G);    # G-trasnpose G
GTd = np.matmul(G.T,d);   # G-trasnpose d
m = la.solve(GTG,GTd);    # solve GT G m = GT d for m

dpre = np.matmul(G,m);

# plot the data and the predicted line
fig1 = plt.figure(1,figsize=(8,4));   # smallish figure
ax1 = plt.subplot(1, 1, 1);           # only one subplot
plt.plot(t,d,"k-");                   # plot d(t) with black line
plt.plot(t,d,"ro");                   # plot d(t) with black line
plt.xlabel("time (years)");           # label time axis
plt.plot(t,dpre,"b-",lw=3);           # plot dpre(t) with blue line
plt.show();                           # display plot

# estimate variance
e = d - dpre              # individual errors 
E = np.matmul(e.T, e); E=E[0,0]; # total least squares error
sd2 = E/N;                # posterior estimate of variance of data
Cm = sd2 * la.inv(GTG);   # posterior covariance of model parameters
print("intercept %.4f +/- %.4f (95%%)" % (m[0,0], 2*sqrt(Cm[0,0])));
print("slope     %.4f +/- %.4f (95%%)" % (m[1,0], 2*sqrt(Cm[1,1])));
intercept -36.1148 +/- 2.9200 (95%)
slope     0.0183 +/- 0.0015 (95%)
In [13]:
# gdapy02_02
# parabolic fit to global temperature data

# data from:
# Hansen, J., Mki. Sato, R. Ruedy, K. Lo, D.W. Lea, and M. Medina-Elizade,
# 2006: Global temperature change. Proc. Natl. Acad. Sci., 103, 14288-14293,
# doi:10.1073/pnas.0606291103. 

# load rectangular, tab-separated table of numbers into a N by M matrix D
D = np.genfromtxt("../Data/global_temp.txt", delimiter="\t");
N, M = np.shape(D);

t = np.copy( D[0:N,0:1] );  # time in column 1
d = np.copy( D[0:N,1:2] );  # temperature in column 2

t0 = t - t[0,0];            # time starting at zero

M=3;                          # two model paramters, intercept and slope
G = np.zeros((N,M));          # start with zeros
G[0:N,0:1] = np.ones((N,1));  # first column of G
G[0:N,1:2] = t0;               # second column of G
G[0:N,2:3] = np.power(t0,2);   # second column of G

# solve invere problem
GTG = np.matmul(G.T,G);    # G-trasnpose G
GTd = np.matmul(G.T,d);   # G-trasnpose d
m = la.solve(GTG,GTd);    # solve GT G m = GT d for m

dpre = np.matmul(G,m);

# plot the data and the predicted line
fig1 = plt.figure(1,figsize=(8,4));   # smallish figure
ax1 = plt.subplot(1, 1, 1);           # only one subplot
plt.plot(t,d,"k-");                   # plot d(t) with black line
plt.plot(t,d,"ro");                   # plot d(t) with black line
plt.xlabel("time (years)");           # label time axis
plt.plot(t,dpre,"b-",lw=3);           # plot dpre(t) with blue line
plt.show();                           # display plot

# estimate variance
e = d - dpre              # individual errors 
E = np.matmul(e.T, e); E=E[0,0]; # total least squares error
sd2 = E/N;                # posterior estimate of variance of data
Cm = sd2 * la.inv(GTG);   # posterior covariance of model parameters
print("these coefficients for the parabola d = m1 + m1 (t-t0) + m2 (t-t0)^2");
print("where t0=%.0f" % (t[0,0]));
print("m1  %.6f +/- %.6f (95%%)" % (m[0,0], 2*sqrt(Cm[0,0])));
print("m2  %.6f +/- %.6f (95%%)" % (m[1,0], 2*sqrt(Cm[1,1])));
print("m3  %.6f +/- %.6f (95%%)" % (m[2,0], 2*sqrt(Cm[2,2])));
these coefficients for the parabola d = m1 + m1 (t-t0) + m2 (t-t0)^2
where t0=1965
m1  -0.075055 +/- 0.067589 (95%)
m2  0.013006 +/- 0.005581 (95%)
m3  0.000095 +/- 0.000096 (95%)
In [14]:
# gdapy02_03
#
# G for various cases
# Supports Section 1.3.3 and 1.3.4

# set up z-axis
z = gda_cvec( 1, 2, 4, 8, 9, 12, 15, 20 );
N,M=np.shape(z);

# straight line case
M=2;
G=np.zeros((N,M));
G[0:N,0:1]=np.ones((N,1));
G[0:N,1:2]=z;

print("z-axis");
print(z.T);
print(" ");

print("Data Kernel for linear case");
print(G);
print(" ");

# quadratic curve case
M=3;
G=np.zeros((N,M));
G[0:N,0:1]=np.ones((N,1));
G[0:N,1:2]=z;
G[0:N,2:3]=np.power(z,2);

print("Data Kernel for quadratic curve case");
print(G);
print(" ");

# acoustic tomography case
N=8;
M=16;
G=np.zeros((N,M));
h=1;
for i in range(4):
    for j in range(4):
        # measurements over rows
        k = i*4 + j;
        G[i,k]=h;
        # measurements over columns
        k = j*4 + i;
        G[i+4,k]=h;

print("Data Kernel for acoustic tomography case");
print(G);
print(" ");


# CAT scan case
# let the image be square, from (0 to 1) in both x and y
# let it be divided into M=KxL pixels which constitute model parameters
# the pixel array is unwrapped using k=i+j*K
# let N rays be collected with endpoits at (x1,y1) and (x2,y2).
K=10;
L=10;
M=K*L;
N=3;    # only three rays in this example
Dx=1/K; # x dimension of pixel
Dy=1/L; # y dimension of pixel

# endpoints of rays
X1 = np.array( [ [0,0], [0,1], [0,0.5] ] ).T;
X2 = np.array( [ [1,1], [1,0], [1, 0.5] ] ).T;

# smapp increment along a ray
Ds = min(Dx,Dy)/10.0;

I = np.zeros((K,L)); # image of rays, for plotting purposes
                     # I(ix,iy) is set to 1 whenever at least
                     # one ray crosses pixel (ix,iy)
G = np.zeros((N,M)); # data kernel

# G[n,k] is the length of ray n in pixed k.
# This code uses an appoximation to calculate it.
# Each ray is divided into many small increments of length ds
# that are smaller than a pixel.  G[n,k] is incremented
# by ds whenever the left-hand end of the increment is in pixel k,
# irrespective of whether the whole increment is in the pixel
# or not.

for n in range(N):  # ray n
    x1=X1[0:2,n:n+1]; x2=X2[0:2,n:n+1]; # end points of ray
    dx = x2-x1;
    R2 = np.matmul(dx.T,dx); R2=R2[0,0];
    R = sqrt(R2); # length of ray
    t = dx/R; # tangent to ray
    Nr = floor( R/Ds ); # number of ray segments
    x=x1; # starting point along ray
    for ir in range(Nr): # step along ray
        x = x1 + ir*Ds*t; # point along ray
        # indices of pixel containing the point
        px = floor( x[0,0] / Dx );
        if( px<0 ): # check that is withing bounds
            px=0;
        elif( px>(K-1) ):
            px=(K-1);
        py = floor( x[1,0] / Dy );
        if( py<1 ): # check that is withing bounds
            py=0;
        elif( py>(L-1) ):
            py=(L-1);
        # index in unwrapped vector of model parameters
        k=px+py*K;
        I[px,py] = 1.0;   # make inage of rays
        G[n,k]=G[n,k]+Ds; # increment data kernel
        
gda_draw('title Rays', I);

print("first 5 rows of G, transposed");
print(G[0:5,0:M].T);
z-axis
[[ 1  2  4  8  9 12 15 20]]
 
Data Kernel for linear case
[[ 1.  1.]
 [ 1.  2.]
 [ 1.  4.]
 [ 1.  8.]
 [ 1.  9.]
 [ 1. 12.]
 [ 1. 15.]
 [ 1. 20.]]
 
Data Kernel for quadratic curve case
[[  1.   1.   1.]
 [  1.   2.   4.]
 [  1.   4.  16.]
 [  1.   8.  64.]
 [  1.   9.  81.]
 [  1.  12. 144.]
 [  1.  15. 225.]
 [  1.  20. 400.]]
 
Data Kernel for acoustic tomography case
[[1. 1. 1. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
 [0. 0. 0. 0. 1. 1. 1. 1. 0. 0. 0. 0. 0. 0. 0. 0.]
 [0. 0. 0. 0. 0. 0. 0. 0. 1. 1. 1. 1. 0. 0. 0. 0.]
 [0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1. 1. 1. 1.]
 [1. 0. 0. 0. 1. 0. 0. 0. 1. 0. 0. 0. 1. 0. 0. 0.]
 [0. 1. 0. 0. 0. 1. 0. 0. 0. 1. 0. 0. 0. 1. 0. 0.]
 [0. 0. 1. 0. 0. 0. 1. 0. 0. 0. 1. 0. 0. 0. 1. 0.]
 [0. 0. 0. 1. 0. 0. 0. 1. 0. 0. 0. 1. 0. 0. 0. 1.]]
 
first 5 rows of G, transposed
[[0.15 0.   0.  ]
 [0.   0.   0.  ]
 [0.   0.   0.  ]
 [0.   0.   0.  ]
 [0.   0.   0.  ]
 [0.   0.   0.  ]
 [0.   0.   0.  ]
 [0.   0.   0.  ]
 [0.   0.   0.  ]
 [0.   0.13 0.  ]
 [0.   0.   0.  ]
 [0.14 0.   0.  ]
 [0.   0.   0.  ]
 [0.   0.   0.  ]
 [0.   0.   0.  ]
 [0.   0.   0.  ]
 [0.   0.   0.  ]
 [0.   0.   0.  ]
 [0.   0.14 0.  ]
 [0.   0.   0.  ]
 [0.   0.   0.  ]
 [0.   0.   0.  ]
 [0.14 0.   0.  ]
 [0.   0.   0.  ]
 [0.   0.   0.  ]
 [0.   0.   0.  ]
 [0.   0.   0.  ]
 [0.   0.15 0.  ]
 [0.   0.   0.  ]
 [0.   0.   0.  ]
 [0.   0.   0.  ]
 [0.   0.   0.  ]
 [0.   0.   0.  ]
 [0.14 0.   0.  ]
 [0.   0.   0.  ]
 [0.   0.   0.  ]
 [0.   0.14 0.  ]
 [0.   0.   0.  ]
 [0.   0.   0.  ]
 [0.   0.   0.  ]
 [0.   0.   0.  ]
 [0.   0.   0.  ]
 [0.   0.   0.  ]
 [0.   0.   0.  ]
 [0.14 0.   0.  ]
 [0.   0.14 0.  ]
 [0.   0.   0.  ]
 [0.   0.   0.  ]
 [0.   0.   0.  ]
 [0.   0.   0.  ]
 [0.   0.   0.1 ]
 [0.   0.   0.1 ]
 [0.   0.   0.11]
 [0.   0.   0.09]
 [0.   0.14 0.1 ]
 [0.14 0.   0.11]
 [0.   0.   0.09]
 [0.   0.   0.1 ]
 [0.   0.   0.1 ]
 [0.   0.   0.1 ]
 [0.   0.   0.  ]
 [0.   0.   0.  ]
 [0.   0.   0.  ]
 [0.   0.14 0.  ]
 [0.   0.   0.  ]
 [0.   0.   0.  ]
 [0.14 0.   0.  ]
 [0.   0.   0.  ]
 [0.   0.   0.  ]
 [0.   0.   0.  ]
 [0.   0.   0.  ]
 [0.   0.   0.  ]
 [0.   0.14 0.  ]
 [0.   0.   0.  ]
 [0.   0.   0.  ]
 [0.   0.   0.  ]
 [0.   0.   0.  ]
 [0.15 0.   0.  ]
 [0.   0.   0.  ]
 [0.   0.   0.  ]
 [0.   0.   0.  ]
 [0.   0.14 0.  ]
 [0.   0.   0.  ]
 [0.   0.   0.  ]
 [0.   0.   0.  ]
 [0.   0.   0.  ]
 [0.   0.   0.  ]
 [0.   0.   0.  ]
 [0.14 0.   0.  ]
 [0.   0.   0.  ]
 [0.   0.15 0.  ]
 [0.   0.   0.  ]
 [0.   0.   0.  ]
 [0.   0.   0.  ]
 [0.   0.   0.  ]
 [0.   0.   0.  ]
 [0.   0.   0.  ]
 [0.   0.   0.  ]
 [0.   0.   0.  ]
 [0.13 0.   0.  ]]
In [24]:
# gdapy02_04
#
# model Mars rover Mossbauer spectra using Lorentzian curves
# supports Figure 1.4


#  interactive graphics from withing the Jupyter Notebook is a bit dicey
# if METHOD=1 fails, try METHOD=2 (or vice-versa)
METHOD=1;

# get current backend
if( METHOD==1):
    bkend = matplotlib.get_backend();
    
#  use interactive plotting
if( METHOD==1 ):
    matplotlib.use('QtAgg');  # 2024/05/23 changed from Qt5Agg
else:
    %matplotlib qt5
    
# load data
D = np.genfromtxt("../data/mars_soil.txt", delimiter="\t");
N,M = np.shape(D);
v=np.copy(D[0:N,0:1]);
d=np.copy(D[0:N,1:2]);
d=d/np.max(d); # normalize

# delete negative velocities
r = np.where(v>0.0); # find first index of t greater than tc
k = r[0];
v=v[k,0:1];
d=d[k,0:1];
N,M = np.shape(d);

# plot data

fig99 = plt.figure(99,figsize=(8,4)); # interactive figure
ax1 = plt.subplot(1,1,1);
plt.axis( [np.min(v)-1.0, np.max(v), np.min(d), np.max(d)] );
plt.plot(v,d,"r-",lw=2);
plt.plot(v,d,"ro",lw=3);
plt.xlabel("velocity, mm/s");
plt.ylabel("counts");
plt.title("click bottom of each peak, then left of v=0");

A = np.max(d);   # estimate of backgroind level

# initialize arrays to hold peak info
MAXPEAKS=100;
a =  np.zeros((MAXPEAKS,1)); # amplitude a
v0 = np.zeros((MAXPEAKS,1)); # center position v0=
c =  np.zeros((MAXPEAKS,1)); # width c

# get approximate peak position and height from mouse click
for k in range(MAXPEAKS):
    p = plt.ginput(1);  # returns list of (x,y) tupples
    if( p[0][0] < 0 ):
        break;
    a[k,0] = p[0][1]-A;   # amplitude a, center position v0, width c
    v0[k,0]= p[0][0];
    c[k,0]=0.1;
    temp = np.power(v-v0[k,0],2) + c[k,0]**2;        # plot one peak
    dpre = A+np.divide( a[k,0]*(c[k,0]**2), temp );
    plt.plot(v,dpre,"b:",lw=1);
    plt.draw();  # must draw to update plot
    K=k+1; # peak counter

plt.show();
plt.close();     # close interactive figure

# switch back to standard plotting
if( METHOD==1 ):
    matplotlib.use(bkend);
else:
    %matplotlib inline

# truncate arrays to actual number of clicks
print("%d peaks identfied" % (K));
a =  a[0:K, 0:1];
v0 = v0[0:K, 0:1];
c =  c[0:K, 0:1];

# lorentzian curve of peak amplitude a, center velocity v0 and width c
# f(v) = a c^2 / ( (v-v0)^2 + c^2) )
# derivatives of Lorentzian (needed for Newton's method)
# df/da = c^2 / ( (v-v0)^2 + c^2) )
# df/dv0 = 2 a c^2 (v-v0) / ( (v-v0)^2 + c^2) )^2
# df/dc = 2 a c / ( (v-v0)^2 + c^2) ) - a c^3 / ( (v-v0)^2 + c^2) )^2

# 3 model parameters per lorentzian, (a, v0, c)
# model parameters
M=K*3;
m = np.concatenate( (a,v0,c), axis=0 );

# Newtons' method to determine model parameters
MAXITER=1;
for iter in range(MAXITER):
    
    # predict data based on current estimate of (a,v0,c)
    dpre = A*np.ones((N,1));
    for i in range(K):
        temp = np.power(v-m[K+i,0],2) + m[2*K+i,0]**2;
        dpre = dpre + np.divide( m[i,0]*(m[2*K+i,0]**2), temp );

    # linearized data kernel (derivatives of dpre with respect to (a,v0,c))
    G=np.zeros((N,M));
    for i in range(K):
        temp = np.power(v-m[K+i,0],2) + m[2*K+i,0]**2;
        temp2 = np.power(temp,2);
        G[0:N,i:i+1] =          np.divide(m[2*K+i,0]**2, temp);                           # d/da
        G[0:N,K+i:K+i+1] =      2.0*m[i,0]*(m[2*K+i,0]**2)*np.divide(v-m[K+i,0], temp2);  # d/dv0
        temp3 = np.divide(2.0*m[i,0]*m[2*K+i,0],temp);
        temp4 = np.divide(2.0*m[i,0]*(m[2*K+i,0]**3),temp2);
        G[0:N:,2*K+i:2*K+i+1] = temp3 - temp4;                                            # d/dc

    # deviations in data
    dd = d - dpre;
    
    # total error
    E = np.matmul( dd.T, dd ); E=E[0,0];
          
    # updated model
    GTGeI = np.matmul(G.T,G) + 0.001*np.identity(M);
    GTd = np.matmul(G.T,dd);
    dm = la.solve(GTGeI,GTd);
    mold=np.copy(m);
    m = m+dm;
    
    # least squares error
    if( iter==0 ):
        E0=E;
    
# final predicted data
dpre = A*np.ones((N,1));
for i in range(K):
    temp = np.power(v-m[K+i,0],2) + m[2*K+i,0]**2;
    dpre = dpre + np.divide(m[i,0]*(m[2*K+i,0]**2), temp );
    
# final error
dd = d - dpre;
E = np.matmul( dd.T, dd ); E=E[0,0];
print("final error in percent %.2f" % (100.0*E/E0) );

# plot data and prediction
fig2 = plt.figure(2,figsize=(8,4)); # non-interactive figure
ax1 = plt.subplot(1,1,1);      
plt.axis( [np.min(v), np.max(v), np.min(d), np.max(d)] );
plt.plot(v,d,"r-",lw=2);
plt.plot(v,d,"ro",lw=3);
plt.plot(v,dpre,"g-",lw=3);
plt.xlabel('velocity, mm/s');
plt.ylabel('counts');
plt.title('Fit of Lorentzian Peaks');
plt.show();
10 peaks identfied
final error in percent 28.96
No description has been provided for this image
In [26]:
# gdapy02_05
#
# plot for mixing example figure
# supports Figure 1.5

M=5; # number of elements
s1 = gda_cvec(1, 0, 2, 0.5, 3);   # endmember (factor) 1
s2 = gda_cvec(3, 0.7, 1, 2.5, 2); # endmember (factor) 2
# sample 1
x=0.1;
sx1 = x*s2 + (1-x)*s1;
# sample 2
x=0.3;
sx2 = x*s2 + (1-x)*s1;
# sample 3
x=0.5;
sx3 = x*s2 + (1-x)*s1;
# sample 4
x=0.7;
sx4 = x*s2 + (1-x)*s1;
# sample 5
x=0.9;
sx5 = x*s2 + (1-x)*s1;
# make some simple vector plots
gda_draw(' ', 'title A', s1, ' ', 'title s1',sx1, ' ', 'title s2',sx2, ' ',
            'title s3',sx3, ' ', 'title s4', sx4, ' ', 'title s5', sx5, ' ', 'title B',s2);
No description has been provided for this image
In [27]:
# gdapy02_06
#
# filter example, using seismometer response as an exemplary filter
#
# Note: this routine uses fairly advanced techniques to
# construct a seismometer response filter that are not
# going to be explained in sufficient detail for a reader
# to follow. Sorry about that ... Note that the response
# (which is just a time series) is stored in a the vector
# uoutf1() and in the file ../data/seismometer_response.txt.


# Note that this example defines two functions
# needed to handle the fairly complicated seismometer
# respone functions

# FUNCTION 1
def gda_read_resp( respfile ):
# (Nzeroes, zero, Npoles, pole, F, SP, status ) = read_resp( respfile );
# reads a single channel IRIS-style RESP file to extract the
# poles and zeros representation of a instrument/recorder
# dispalcement response filter.  This filter takes displacement in
# meters into digital counts.
# Warning: works for single channel RESP file only !!!
# Input:
# respfile: text file containing the RESP, which can be retrieved
#           from the IRIS DMC using the web-interface
#           http://service.iris.edu/irisws/resp/docs/1/builder/
#           and then cut-and-pasted into a text file
# Output:
# Nzeroes, number of zeroes
# zero, array of zeros
# Npoles, number of poles
# pole, array of poles
# F = A0 * SP, overall gain constant
# SP, product of sensitivities
# status: 0=fail, 1=succeed
#
# the following ducmentation on RESP files might be helpful
# http://ds.iris.edu/ds/nodes/dmc/data/formats/resp/#what-is-a-resp-file

# default values of returned variables
    zero = np.zeros((0,1),dtype=complex);
    pole = np.zeros((0,1),dtype=complex);
    Nzeroes = 0;
    Npoles = 0;
    F = 0.0;
    SP = 0.0;
    status = 0;
    
# open respfile
    fdr = open(respfile);

# A0
    A0 = 0.0;
    while True:
        line = fdr.readline();
        if not line:
            print("cant find A0 in %s" % (respfile) );
            fdr.close();
            return (Nzeroes, zero, Npoles, pole, F, SP, status );
        Nline = len(line);
        j1 = line.find("A0");
        j2 = line.find("normalization");
        j3 = line.find("+");
        if( (j1==(-1)) or (j2==(-1)) or (j3==(-1)) ):
            continue;
        A0 = float( line[j3+1:Nline] );
        break;
    if( A0==0.0 ):
        print( "cant find A0 in %s" % (respfile) );
        fdr.close();
        return (Nzeroes, zero, Npoles, pole, F, SP, status );

# Number of zeroes
    Nzeroes = 0;
    while True:
        line = fdr.readline();
        if not line:
            print("cant find A0 in %s" % (respfile) );
            fdr.close();
            return (Nzeroes, zero, Npoles, pole, F, SP, status );
        Nline = len(line);
        j1 = line.find("Number");
        j2 = line.find("zeroes");
        j3 = line.find(":");
        if( (j1==(-1)) or (j2==(-1)) or (j3==(-1)) ):
            continue;
        Nzeroes = int( line[j3+1:Nline] );
        break;
    if( Nzeroes==0 ):
        print("cant find Nzeroes in %s" % (respfile) );
        fdr.close();
        return (Nzeroes, zero, Npoles, pole, F, SP, status );

# Number of poles
    Npoles = 0;
    while True:
        line = fdr.readline();
        if not line:
            print("cant find A0 in %s" % (respfile) );
            fdr.close();
            return (Nzeroes, zero, Npoles, pole, F, SP, status );
        Nline = len(line);
        j1 = line.find("Number");
        j2 = line.find("poles");
        j3 = line.find(":");
        if( (j1==(-1)) or (j2==(-1)) or (j3==(-1)) ):
            continue;
        Npoles = int( line[j3+1:Nline] );
        break;
    if( Npoles==0 ):
        print("cant find Npoles in %s" % (respfile) );
        fdr.close();
        return (Nzeroes, zero, Npoles, pole, F, SP, status );

# skip 2 lines
    line = fdr.readline();
    line = fdr.readline();

# read zeroes
# an extra zero for displacement is added as zero[0,0]
    Nzeroes=Nzeroes+1;
    zero = np.zeros((Nzeroes,1),dtype=complex);
    for i in range(Nzeroes-1):
        line = fdr.readline();
        if not line:
            print("cant find zero %d in %s" % (i, respfile) );
            fdr.close();
            return (Nzeroes, zero, Npoles, pole, F, SP, status );
        Nline = len(line);
        v=line[11:Nline].split();
        Nv = len(v);
        if( Nv<3 ):
            print("cant read zero %d in %s" % (i, respfile) );
            fdr.close();
            return (Nzeroes, zero, Npoles, pole, F, SP, status );
        a = np.zeros((Nv,1));
        for j in range(Nv):
            a[j,0]=float(v[j]);
        zero[i+1,0] = complex(a[1,0],a[2,0]);
    
# skip 2 lines
    line = fdr.readline();
    line = fdr.readline();

# read poles
    pole = np.zeros((Npoles,1),dtype=complex);
    for i in range(Npoles):
        line = fdr.readline();
        if not line:
            print("cant find pole %d in %s" % (i, respfile) );
            fdr.close();
            return (Nzeroes, zero, Npoles, pole, F, SP, status );
        Nline = len(line);
        v=line[11:Nline].split();
        Nv = len(v);
        if( Nv<3 ):
            print("cant read pole %d in %s" % (i, respfile) );
            fdr.close();
            return (Nzeroes, zero, Npoles, pole, F, SP, status );
        a = np.zeros((Nv,1));
        for j in range(Nv):
            a[j,0]=float(v[j]);
        pole[i,0] = complex(a[1,0],a[2,0]);

# read sensitivities
    NSMAX = 100;
    Sx = np.zeros((NSMAX,1));
    NS = 0;
    while True:
        line = fdr.readline();
        if not line:
            break;
        Nline=len(line);
        j1 = line.find("Station:");
        # next two lines are a patch added by Menke, 2021-05-20
        # to handle case of duplicate station information
        if( not (j1==(-1)) ):
            break
        j1 = line.find("Sensitivity");
        j2 = line.find("+");
        if((j1==(-1)) or (j2==(-1))):
            continue;
        Sx[NS,0] = float( line[j2+1:Nline] );
        NS = NS+1;
    S = np.zeros((NS,1));
    S[:,0] = Sx[0:NS,0];

    fdr.close();

    if( NS<2 ):
        print("cant read sensitivities in %s" % (respfile) );
        return (Nzeroes, zero, Npoles, pole, F, SP, status );

    SP = np.prod( S[0:NS-1,0], axis=0 );
    E = abs(SP-S[NS-1,0])/S[NS-1,0];
    F = A0 * SP;

    if( E>1e-4 ):
        print("Error %s: last sensitivity not product of others (error %f)" % (respfile,E));
        for i in range(NS):
            print("S[%d] %e" % (i, S[i,0]) );
        print("product %e", (SP) );
        return (Nzeroes, zero, Npoles, pole, F, SP, status );
    
    status=1;
    return (Nzeroes, zero, Npoles, pole, F, SP, status );


# FUNCTION 2
def gda_apply_response( uin, Dt, respfile):
# (uout, status) = response( uin, Dt, respfile, flag );
# apply response from digital counts timeseries
# input:
# uin: digital counts timeseries (must have even length)
# Dt: sampling interval, seconds
# respfile: filename of IRIS-sytle single channel RESP file

# output: uout, timeseries in displacement after response has been removed
# warning: this is a unstable process at low frequencies; you must bandpass filter sensibly
# status: 0=fail, 1=succeed

    # flag: 0=insert, 1=remove
    flag = 0;  # hard-wired to apply reponse

    (N,i) = np.shape(uin);
    uout = np.zeros( (0,1) );
    status=0;

# read resp file to obtain poles and zeroes
    (Nzeroes, zero, Npoles, pole, F, SP, status ) = gda_read_resp( respfile );
    if( status==0 ):
        print("remove_response: can read response from file %s" % (respfile) );
        return (uout, status);
    
# generic frequency up
    No2 = floor(N/2);  # integer version of N/2
    fmax=1/(2.0*Dt);   # nyquist frequency 
    Df=fmax/No2;       # frequency smaping
    Nf=No2+1;          # number on non-negative frequencies
    f=gda_cvec(Df*np.concatenate((np.linspace(0,No2,Nf),np.linspace(-No2+1,-1,No2-1)),axis=0));
    Dw=2*pi*Df;        # angular frequeny increment
    w=2*pi*f;          # full (pos and neg) ana
    Nw=Nf;             # number of non-negative angular frequencies
    fpos=gda_cvec(Df * np.linspace(0,No2,Nf)); # non-negative frequencies
    wpos=2*pi*fpos;    # non-negative angular frequencies

# evaluate numerator of response
    numer = np.ones((Nw,1),dtype=complex);
    for ip in range(Nzeroes):
        numer = np.multiply( numer, ( complex(0.0,1.0) * wpos - zero[ip,0]) );

# evaluate denominator of response
    denom = np.ones((Nw,1),dtype=complex);
    for ip in range(Npoles):
        denom = np.multiply( denom , ( complex(0.0,1.0) * wpos - pole[ip,0] ) );

# construct reciprocal of response
    rpos = np.zeros((Nw,1),dtype=complex);
    if flag==1:
        av = np.absolute(numer);
        for i in range(Nw):
            if av[i,0]==0.0:
                rpos[i,0]=0.0;
            else:
                rpos[i,0] = (1/F) * denom[i,0] / numer[i,0];
    else:
        av = np.absolute(denom);
        for i in range(Nw):
            if av[i,0]==0.0:
                rpos[i,0]=0.0;
            else:
                rpos[i,0] = (F) * numer[i,0] / denom[i,0];
        
# artificially set some values to zero
    rpos[0,0]=complex(0.0,0.0);  # zero-frequency
    rpos[Nw-1,0]=complex(0.0,0.0); # nyquist frequency
    
# artificial zeroing of some values, in "remove" case
    if( flag==1 ):
        rmin = np.amax(rpos);
        absrpos = np.absolute(rpos);
        for i in range(Nw):
            if (absrpos[i,0] != 0) and (absrpos[i,0]<rmin):
                rmin = absrpos[i,0];
        rlimit = 10000.0*rmin;
        for i in range(Nw):
            if absrpos[i,0] > rlimit:
                rpos[i,0]=complex(0.0,0.0);

# fold out frequencies
    rfull=np.zeros((N,1),dtype=complex);
    rfull[0:Nw,0] = rpos.ravel();
    rfull[Nw:N,0] = np.conjugate(np.flipud(rpos[1:Nw-1])).ravel();

# change response of data
    ut = np.fft.fft(uin,axis=0);
    ut = np.multiply(ut,rfull);
    uout = np.real(np.fft.ifft(ut, axis=0));

    status = 1;
    return (uout, status);

# MAIN PART OF CODE
# response of the Broadband East component of station J59A of
# of the US Transportable array is in this file (in inscruitable
# IRIS RESP format).
respfile1="../data/TA_J59A_BHE.txt";
# read the IRIS RESP file for this seismometer
Nzeroes, zero, Npoles, pole, F, SP, status = gda_read_resp(respfile1);
# set up an input ground motion with a single small spike
N=100;
Dt = 0.025;
t = gda_cvec(np.linspace(0,Dt*(N-1),N));
uin = np.zeros((N,1));
uin[1,0] = 1.0e-6;
# apply station 1 response, filter result, and plot
uout1, status = gda_apply_response( uin, Dt, respfile1 );
flow = 0.005;
fhigh = 1.0;
uoutf1,u,v = gda_chebyshevfilt(uout1, Dt, flow, fhigh);
# save the response filter in a text file
DF = np.concatenate( (t,uoutf1), axis=1); 
np.savetxt("../TestFolder/response.txt",DF,delimiter="\t");


# plot
fig1 = plt.figure(1,figsize=(8,4));
ax1 = plt.subplot(2, 1, 1);
plt.axis( [0, Dt*(N-1), -np.abs(np.max(uin)), np.abs(np.max(uin)) ] );
plt.plot( t, uin, "k-", lw=3 );
plt.title("model parameters m");
plt.xlabel("time (s)");
plt.ylabel("velocity, microns");   
ax2 = plt.subplot(2, 1, 2);
plt.axis( [0, Dt*(N-1), -np.max(np.abs(uoutf1)), np.max(np.abs(uoutf1)) ] );
plt.plot( t, uoutf1, "k-", lw=3 );
plt.title('response 1');
plt.xlabel('time (s)');
plt.ylabel('digital counts');
plt.show();
     
#  set up an input seismogram with several small spikes
# and a smoothly varying function
N=1024;
Dt = 0.025;
t = gda_cvec(np.linspace(0.0,Dt*(N-1),N));
uin = np.zeros((N,1));
uin[floor(N/4),0] = (1e-6);
uin[floor(8+N/2),0] = 0.5*(1e-6);
uin[floor(17+N/2),0] = 0.5*(1e-6);
uin[750:850,0:1] = 0.5*(1e-6)*np.sin(pi*gda_cvec(np.linspace(750.0,849.0,100)-750.0)/100.0);

# apply the response
uout1,status= gda_apply_response( uin, Dt, respfile1 );
flow = 0.005;
fhigh = 1.0;
uoutf1,u,v = gda_chebyshevfilt(uout1, Dt, flow, fhigh);

# plot the complicated ground motion
fig2 = plt.figure(2,figsize=(8,4));
ax1 = plt.subplot(2, 1, 1);
plt.axis( [0, Dt*(N-1), -np.max(np.abs(uin)), np.max(np.abs(uin)) ] );
plt.plot( t, uin, "r-", lw=3 );
plt.title("model parameters m");
plt.xlabel("time (s)");
plt.ylabel("velocity, microns");
# plot the corresponding seismometer response
ax2 = plt.subplot(2, 1, 2);
plt.axis( [0, Dt*(N-1), -np.max(np.abs(uoutf1)), np.max(np.abs(uoutf1)) ] );
plt.plot( t, uoutf1, "b-", lw=3 );
plt.title("response 1");
plt.xlabel("time (s)");
plt.ylabel("digital counts");
     
No description has been provided for this image
No description has been provided for this image
In [28]:
# gdapy02_07
# plot example of filter, both as wiggles and a matrix
# supports figures 

#load exemplary filter (for a seismometer)
D = np.genfromtxt("../data/seismometer_response.txt", delimiter="\t");
N,M = np.shape(D);
t=np.copy(D[0:N,0:1]);
g=np.copy(D[0:N,1:2]);
Dt=t[1,0]-t[0,0];
maxabsg = np.max(np.abs(g));

# build data kernel corresponding to convolution by this filter
grow = np.zeros((1,N));
grow[0,0]=g[0,0];
G = la.toeplitz( g, grow );

# plot filter, as wiggles
fig2 = plt.figure(2,figsize=(12,6));

# select columns of G to be plotted
# note cast to int so can be used as array index
Nc = 6;
clist = gda_cvec(np.floor(np.linspace(0,N-2*Nc,Nc))).astype(int);  

for i in range(Nc):
    j = clist[i,0];
    ax1 = plt.subplot(1, Nc+2, i+1);
    plt.axis( [-maxabsg, maxabsg, t[0,0], t[N-1,0] ] );
    plt.plot(G[0:N,j:j+1],t,"k-",lw=3);
    plt.xlabel("g(t-%.1f)" % (j*Dt) );
    if( i==0 ):
        plt.ylabel("time (s)");
    plt.gca().invert_yaxis();

# build model parameters
m = np.zeros((N,1));
j = floor(N/5);
tj = gda_cvec(np.linspace(0,1,j));
m[0:j,0:1] = 0.5*1e-6*np.sin(pi*tj);
maxabsm = np.max(np.abs(m));

# plot input (model parameters, ground motion
ax1 = plt.subplot(1, Nc+2, Nc+1);
plt.axis( [-maxabsm, maxabsm, t[0,0], t[N-1,0] ] );
plt.plot(m,t,"b-",lw=3);
plt.xlabel("m");
plt.gca().invert_yaxis();

d = np.matmul(G,m); # perform convolution
maxabsd = np.max(np.abs(d));
         
# plot output (data, seismometer response)
ax1 = plt.subplot(1, Nc+2, Nc+2);
plt.axis( [-maxabsd, maxabsd, t[0,0], t[N-1,0] ] );
plt.plot(d,t,"r-",lw=3);
plt.xlabel("d");
plt.gca().invert_yaxis();
plt.show();

# plot as matrix
gda_draw("title G", G, "title m", m, "=", "title d", d);
No description has been provided for this image
No description has been provided for this image
In [29]:
# gdspy02_08
# illustrations of different types of solutions

M = 2;
Dm = 1.0;
Nm = 101;
m = gda_cvec(np.linspace(0,Dm*(Nm-1),Nm));
mmin = m[0,0];
mmax = m[Nm-1,0];
mbar = gda_cvec(40.0, 60.0);
sm = gda_cvec(5.0, 10.0);
sm2 = np.power(sm,2);

fig1 = plt.figure(1,figsize=(12,6));

# solution is estimate and error bars
ax1 = plt.subplot(1, 3, 1);
ax1.set_aspect('equal');
plt.axis( [mmin, mmax, mmin, mmax] );
plt.plot( gda_cvec(mbar[0,0], mbar[0,0]), gda_cvec(mbar[1,0]-2*sm[1,0], mbar[1,0]+2*sm[1,0]), "r-", lw=3 );
plt.plot( gda_cvec(mbar[0,0]-2*sm[0,0], mbar[0,0]+2*sm[0,0]), gda_cvec(mbar[1,0], mbar[1,0]), 'r-', lw=3 );
plt.plot( mbar[0,0], mbar[1,0], "ko", lw=3 );
plt.xlabel("m_1");
plt.ylabel("m_2");

# solution is pdf
# use tensor product, m1 increases along rows, m2 along columns
p1 = np.exp(-0.5*np.power(m-mbar[0,0],2)/sm2[0,0]);
p2 = np.exp(-0.5*np.power(m-mbar[1,0],2)/sm2[1,0]);
p = np.matmul(p1,p2.T);
pnorm = (Dm**2)*np.sum(p);
p = p/pnorm;
ax2 = plt.subplot(1, 3, 2);
ax2.set_aspect('equal');
plt.axis( [mmin, mmax, mmin, mmax] );
mycmap = matplotlib.colormaps['jet'];
# trick for checking that orientation is right
# put a defect at m1=20, m2=50 and see if it plots correctly
p[20,40] = np.max(p);
plt.imshow( p.T, cmap=mycmap, vmin=np.min(p), vmax=np.max(p), extent=(mmin,mmax,mmax,mmin) );
plt.xlabel("m_1");
plt.ylabel("m_2");

N = 100;
m1real = np.random.normal( loc=mbar[0,0], scale=sm[0,0], size=(N,1) );
m2real = np.random.normal( loc=mbar[1,0], scale=sm[1,0], size=(N,1) );
ax3 = plt.subplot(1, 3, 3);
ax3.set_aspect('equal');
plt.axis( [mmin, mmax, mmin, mmax] );
plt.plot( m1real, m2real, 'k.', 'LineWidth', 3 );
plt.xlabel('m_1');
plt.ylabel('m_2');
plt.show();
No description has been provided for this image
In [30]:
# gdapy02_09
#
# range of p.d.f.'s, from simple to complicated

# m-axis
Dm = 0.1;
N = 101;
m1 = gda_cvec(np.linspace(0,Dm*(N-1),N));
mmin=m1[0,0];
mmax=m1[N-1,0];

# unimodal distribution
sm = 1.0;
sm2 = sm**2;
m1bar = 5.0;
p1a = np.exp(-0.5*np.power(m1-m1bar,2)/sm2);
norm = Dm*np.sum(p1a);
p1a = p1a/norm;

# bimodal distribution
sma = 0.7;
sma2 = sma**2;
m1bara = 3.0;
smb = 1.4;
smb2 = smb**2;
m1barb = 8.0;
p1b = np.exp(-0.5*np.power(m1-m1bara,2)/sma2)+0.4*np.exp(-0.5*np.power(m1-m1barb,2)/smb2);
norm = Dm*np.sum(p1b);
p1b = p1b/norm;

# absurdly complicated distribution
p1c = np.zeros((N,1));
for i in range(10):
    Ac=np.random.uniform(low=0,high=1);
    m1barc=np.random.uniform(low=0,high=10);
    smc=np.random.uniform(low=0.05,high=1);
    smc2=smc**2;
    tmp=Ac*np.exp(-0.5*np.power(m1-m1barc,2)/smc2);
    p1c = p1c + tmp;
norm = Dm*np.sum(p1c);
p1c = p1c/norm;

fig2 = plt.figure(2,figsize=(12,6));

# plot unimodal distribution
ax1 = plt.subplot(1, 3, 1);
plt.axis( [mmin, mmax, 0, 0.5 ] );
plt.plot(m1,p1a,"b-",lw=3);
plt.xlabel("m");
plt.ylabel("p(m)");

# plot bimodal distribution
ax1 = plt.subplot(1, 3, 2);
plt.axis( [mmin, mmax, 0, 0.5 ] );
plt.plot(m1,p1b,"b-",lw=3);
plt.xlabel("m");
plt.ylabel("p(m)");

# plot complicated distribution
ax1 = plt.subplot(1, 3, 3);
plt.axis( [mmin, mmax, 0, 0.5 ] );
plt.plot(m1,p1c,'b-',lw=3);
plt.xlabel("m");
plt.ylabel("p(m)");
No description has been provided for this image
In [ ]: