% gdama08
% W. Menke (2024), Geophysical Data Analysis and Inverse Theory
% with MATLAB(R) or Python, 5th Edition
% Code History:
% 2022-10-01, Imported from 4th Edition by W. Menke
% 2023-04-06, Edit and integration with text
% gdama08_01
%
% upper/lower bounds on localized average using linear programming
% data kernel G composed of arithmetic average of model parameters
 
clear all;
fprintf('gdama08_01\n');
gdama08_01
 
% data equation: mean of d
M=21; % must be odd
N=1;
mtrue = random('Uniform',-1,1,M,1);
mtrue = mtrue - mean(mtrue);
G=ones(N,M)/M;
dtrue=G*mtrue;
dobs=dtrue;
 
% upper bound: mi <= 1
ub = ones(M,1);
 
% lower bound: mi >= -1
lb = -ones(M,1);
 
M2 = floor(M/2)+1;
L=0; % counter
options = optimset('linprog');
options.Display = 'off'; % this option turns off annoying messages
for i=(0:floor(M/2))
L=L+1;
% averageing vector centered at N/2 of width 2*i+1
f = zeros(M,1);
f(M2-i:M2+i) = 1;
I(L)=sum(f);
f=f/I(L);
[mest1, fmin(L)] = linprog( f, [], [], G, dobs, lb, ub, options );
[mest2, fmax(L)] = linprog( -f, [], [], G, dobs, lb, ub, options );
end
 
figure();
set(gca,'LineWidth',3);
hold on;
axis( [1, M, 0, 1.5] );
plot(I,-fmax, 'k-', 'LineWidth', 2);
plot(I,-fmax, 'ko', 'LineWidth', 4);
xlabel('width');
ylabel('bound on |<m>|');
fprintf('Caption: Lower bound on the mean of several adacent model parameters,\n');
Caption: Lower bound on the mean of several adacent model parameters,
fprintf('as a function of the width of the averaging kernel.\n');
as a function of the width of the averaging kernel.
 
% gdama08_02
%
% upper/lower bounds on localized average using linear programming
% data kernel G composed of decaying exponentials
 
clear all;
fprintf('gdama08_02\n');
gdama08_02
 
% model, m(z), moztly zero but a few spikes
M=101;
zmin=0;
zmax=10;
Dz=(zmax-zmin)/(M-1);
z=zmin+Dz*[0:M-1]';
mtrue = 0.5+0.03*z;
 
% experiment: exponential smoothing of model
N=40;
G = zeros(N,M);
for i = (1:N)
j = floor(i*M/N );
% G(i,1:j) = [j:-1:1]/j;
G(i,1:j)=[j:-1:1]/j+random('Normal',0,0.15,1,j);
end
 
fprintf('det %f\n',det(G*G'));
det 0.040484
 
% observed data is true data plus noise
sd=0.001;
dtrue = G*mtrue;
dobs = dtrue + random('Normal',0,sd,N,1);
 
% draw the data kernel
gda_draw(dtrue,'caption d','=',G,'caption G',mtrue,'caption m');
fprintf('Caption: Graphical depiction of the equation dtrue = G mtrue\n');
Caption: Graphical depiction of the equation dtrue = G mtrue
 
% minimum length solution
epsilon=1e-12;
GMG = G'/(G*G'+epsilon*eye(N,N));
mest = GMG * dobs;
Rres = GMG*G;
 
% plot
figure();
clf;
 
% plot scale
pmmin=-0.5;
pmmax=1.5;
 
% plto the true model and the minimum length estimate
set(gca,'LineWidth',3);
set(gca,'FontSize',14);
hold on;
axis( [zmin, zmax, pmmin, pmmax ]' );
plot( z, mtrue, 'r-', 'LineWidth', 2);
plot( z, mest, 'b-', 'LineWidth', 2);
xlabel('z');
ylabel('m');
 
% upper bound: mi <= 1
mub = ones(M,1);
 
% lower bound: mi >= -1
mlb = -ones(M,1);
 
% half width of averaging kernel
hw = 10;
 
options = optimset('linprog');
options.Display = 'off'; % this option turns off annoying messages
 
% compute upper, lower bounds on average
amin=zeros(M,1);
amax=zeros(M,1);
for i=(1:M)
% averageing vector centered at M of width w
a = zeros(M,1);
j=i-hw;
if( j<1 )
j=1;
end
k=i+hw;
if(k>M)
k=M;
end
a(j:k)=1/(k-j+1);
[mest1, amin(i)] = linprog( a, [], [], G, dobs, mlb, mub, options );
[mest2, amax(i)] = linprog( -a, [], [], G, dobs, mlb, mub, options );
amax(i)=-amax(i);
end
 
% plot bounds
plot( z, amin, 'k-', 'LineWidth', 2);
plot( z, amax, 'k-', 'LineWidth', 2);
fprintf('Caption: Exemplary inverse problem with model parameters m(z), showing\n');
Caption: Exemplary inverse problem with model parameters m(z), showing
fprintf('true solution (red), damped least squares solution (blue), and\n');
true solution (red), damped least squares solution (blue), and
fprintf('upper and lower bounds (black)\n');
upper and lower bounds (black)
 
% gdama08_03
%
% Squeezing example
% The data kernel is a series of decaying exponentials in
% an auxillary variable z and is underdetermined. Three
% models, all that fit the data to similar accuracy, are
% constructed: Simple damped least squares; solution
% squeezed shallow ("up"), solution squeezed deep ("dn")
 
% Note: Becaue the data are noisy with noise that randomly
% varies between runs, you may have to run the scipt a few
% times until the least squares solution (black curve in
% figure 2 (top) is a good approximation to the true
% model (blue curve).
 
clear all;
fprintf('gdama08_03\n');
gdama08_03
 
% auxilliary variable, z
M=101;
zmin=0;
zmax=10;
Dz=(zmax-zmin)/(M-1);
z=zmin+Dz*[0:M-1]';
 
% true solution is a boxcar
mtrue = zeros(M,1);
i1 = floor( 5*M/11)+1;
i2 = floor( 7*M/11)+1;
ic = floor(0.5*(i1+i2));
zc = z(ic);
mtrue(i1:i2) = 1.0;
 
% data kernel is declining exponentials, normalized
% so that they compute a weighted average of the model
N=floor(M/2);
G = zeros(N,M);
c0=0.01;
for i = (1:N)
c = c0*(i-1);
v = exp(-c*z);
vn = sum(v);
G(i,1:M)=v'/vn;
end
 
% draw the data kernel
gda_draw(G,'caption G');
fprintf('Caption: Graphical depiction of the data kernel G\n');
Caption: Graphical depiction of the data kernel G
 
% create synthetic data by adding random noise to the
% true data
sd=0.0001;
dtrue = G*mtrue;
dobs = dtrue + random('Normal',0,sd,N,1);
 
% damped least squares
epsilon2 = 1e-9;
W = eye(M,M);
mest0 = (G'*G + epsilon2*W)\(G'*dobs);
e = dobs - G*mest0;
E0 = (e'*e); % error
Ed = (dobs'*dobs); % energy in data, for comparison
 
% squeeze up; the weights are low to the left
% of z=zc and high to the right of it. The error
% function erf(z) ramps up smoothly from -1 for
% z<<0 to +1 for z>>0, so I can use a scaled
% and shifted version of it to achieve weights
% that smoothly ramp up/down from one constant
% value to another at the point z0. (Any
% "sigmoidal" shaped function would have sufficed;
% Matlab's sigmf() would be fine, too.
 
epsilon2 = 5e-9;
n=2;
A=1;
B=10;
Wup = diag((B+1)+B*erf((z-zc)/A)); % weights grow with z
mest_up = (G'*G + epsilon2*Wup)\(G'*dobs);
e = dobs - G*mest_up;
Eup = e'*e;
 
% squeeze down
epsilon2 = 5e-9;
Wdn = diag((B+1)-B*erf((z-zc)/A)); % weights decline with z
mest_dn = (G'*G + epsilon2*Wdn)\(G'*dobs);
e = dobs - G*mest_dn;
Edn = e'*e;
 
% I have adusted all the coefficients by trial and error
% so that E0/Ed is about 10-6 (a very good fit)
% Eup/E0 and Edn/E0 are both about 1.05 (almost as good)
fprintf('Normalized error %.2e ratio: up %.3f dn %.3f\n', E0/Ed, Eup/E0, Edn/E0 );
Normalized error 4.21e-07 ratio: up 1.081 dn 1.101
 
% plot true and unsqueezed models
figure();
clf;
subplot(3,1,1);
set(gca,'LineWidth',3);
set(gca,'FontSize',14);
hold on;
axis( [zmin, zmax, -1.2, 1.2] );
plot( [zmin, zmax]', [0, 0]', 'k:', 'LineWidth', 2 );
plot( z, mtrue, 'b-', 'LineWidth', 3 );
plot( z, mest0, 'k-', 'LineWidth', 2 );
xlabel('z');
ylabel('m(z)');
 
% plot unsqueezed and squeezed models
subplot(3,1,2);
set(gca,'LineWidth',3);
set(gca,'FontSize',14);
hold on;
axis( [zmin, zmax, -1.2, 1.2] );
plot( [zmin, zmax]', [0, 0]', 'k:', 'LineWidth', 2 );
plot( z, mest_up, 'r-', 'LineWidth', 2 );
plot( z, mest_dn, 'g-', 'LineWidth', 2 );
xlabel('z');
ylabel('m(z)');
 
% plot weights
subplot(3,1,3);
set(gca,'LineWidth',3);
set(gca,'FontSize',14);
hold on;
w = diag(Wup);
axis( [zmin, zmax, 0, 1.1*max(w)] );
plot( z, w, 'r-', 'LineWidth', 2 );
w = diag(Wdn);
plot( z, w, 'g-', 'LineWidth', 2 );
xlabel('z');
ylabel('W');
fprintf('Caption: (Top) True solution m(z) (blue) and damped least squares solution (blue).\n');
Caption: (Top) True solution m(z) (blue) and damped least squares solution (blue).
fprintf('(Middle) Upward squeezed solution (red) and downward squeezed solution (green)\n');
(Middle) Upward squeezed solution (red) and downward squeezed solution (green)
fprintf('(Bottom) Weighting frunction for upward squeeze (green) and downward squeese (red)\n');
(Bottom) Weighting frunction for upward squeeze (green) and downward squeese (red)