% gdama10
% W. Menke (2024), Geophysical Data Analysis and Inverse Theory
% with MATLAB(R) or Python, 5th Edition
% Code History:
% 2022-10-01, Imported from 4th Edition by W. Menke
% 2023-04-07, Edit and integration with text
% gdama10_01
% comparizon of Normal and Exponential pdfs
 
clear all;
fprintf('gdama10_01\n');
gdama10_01
 
% d axis
N=101;
dmin = -5;
dmax = 5;
Dd = (dmax-dmin)/(N-1);
d = dmin + Dd*[0:N-1]';
 
% exponential distribution
dbar=0;
sd=1;
pE = (1/sqrt(2)) * (1/sd)* exp( -sqrt(2)*abs((d-dbar))/sd );
AEcheck = Dd*sum(pE);
dbarEcheck = Dd*sum(d.*pE);
sdEcheck = sqrt(Dd*sum((d-dbarEcheck).^2 .* pE));
 
% Normal distribution
pN = (1/sqrt(2*pi)) * (1/sd)* exp( -0.5*((d-dbar).^2)/(sd^2) );
ANcheck = Dd*sum(pN);
dbarNcheck = Dd*sum(d.*pN);
sdNcheck = sqrt(Dd*sum((d-dbarNcheck).^2 .* pN));
 
% area, stddev to make sure it's what I expect
fprintf('exact A %f dbar %f sd %f\n', 1, dbar, sd);
exact A 1.000000 dbar 0.000000 sd 1.000000
fprintf('PE A %f dbar %f sd %f\n', AEcheck, dbarEcheck, sdEcheck);
PE A 1.000875 dbar -0.000000 sd 0.986603
fprintf('PN A %f dbar %f sd %f\n', ANcheck, dbarNcheck, sdNcheck);
PN A 1.000000 dbar 0.000000 sd 0.999994
 
% plot distribution
figure(1);
clf;
set(gca,'LineWidth',3);
hold on;
set(gca,'FontSize',14);
axis( [dmin, dmax, 0, 1] );
plot(d, pE, 'r-', 'LineWidth', 3 );
plot(d, pN, 'B-', 'LineWidth', 3 );
xlabel('d');
ylabel('p(d)');
fprintf('Caption: Exponential pdf (red) and Normal pdf (blue) with same\n');
Caption: Exponential pdf (red) and Normal pdf (blue) with same
fprintf('mean and variance)\n');
mean and variance)
 
% realizations of two-sided exponential distribution, using one-sided
% MatLab function
Nr = 10000;
mu = sd/sqrt(2); % scale factor
rs = (2*(random('unid',2,Nr,1)-1)-1); % random sign
re = random('exponential',mu,Nr,1); % random 1-sided exp
dr = dbar + rs.*re;
% histogram
bins=-5+10*[0:100]'/100;
Db=bins(2)-bins(1);
pdest=hist(dr,bins)/(Nr*Db);
% max likelihood estimate of mean and stddev
dbarest = median(dr);
sdest = (sqrt(2)/Nr)*sum(abs(dr-dbarest));
 
% plot histogram
figure();
clf;
set(gca,'LineWidth',3);
hold on;
set(gca,'FontSize',14);
axis( [-5, 5, 0, max(pE)] );
plot(bins,pdest,'k-','LineWidth',3);
plot(d, pE, 'r-', 'LineWidth', 2 );
xlabel('d');
ylabel('p(d)');
fprintf('Caption: Exponential pdf (red) hidtogrsm of an ensemble of reslizstions\n');
Caption: Exponential pdf (red) hidtogrsm of an ensemble of reslizstions
fprintf('drawn from it\n');
drawn from it
 
% some further info for verification purposes
fprintf('Maximum liklihood estimates for exponential distribution\n');
Maximum liklihood estimates for exponential distribution
fprintf('mean %f sd %f\n', dbarest, sdest);
mean 0.000034 sd 1.009892
% gdama10_02
% comparizon of L1 and L2 error in estimating the mean of data
 
clear all;
fprintf('gdama10_02\n');
gdama10_02
 
figure();
clf;
 
% this is a one-dimensional grid search
% define limits of m, the mean of the data
M=1001;
mmin = 0;
mmax = 2;
Dm = (mmax-mmin)/(M-1);
m = mmin + Dm*[0:M-1]';
 
% randomly generate the data
N=12;
d = random('Uniform', mmin, mmax, N, 1 );
 
% L1 norm with even number of data
E1 = zeros(M,1);
for i=(1:M)
e=d-(m(i)*ones(N,1));
E1(i)=sum(abs(e));
end
id = floor((d-mmin)/Dm)+1;
 
% plot E(m) for this case
subplot(1,3,1)
set(gca,'LineWidth',3);
set(gca,'FontSize',14);
hold on;
mminp=0;
mmaxp=2;
axis( [mminp, mmaxp, 0, 10] );
plot(m, E1, 'r-', 'LineWidth', 3 );
plot(m(id), E1(id), 'ko', 'LineWidth', 3 );
xlabel('m');
ylabel('E(m)');
 
% L1 norm with odd number of data, using the
% same data as previously, except omitting last point
N=N-1;
d = d(1:N);
E2 = zeros(M,1);
for i=(1:M)
e=d-(m(i)*ones(N,1));
E2(i)=sum(abs(e));
end
id = floor((d-mmin)/Dm)+1;
 
% plot E(m) for this case
subplot(1,3,2)
set(gca,'LineWidth',3);
set(gca,'FontSize',14);
hold on;
mminp=0;
mmaxp=2;
axis( [mminp, mmaxp, 0, 10] );
plot(m, E2, 'r-', 'LineWidth', 3 );
plot(m(id), E2(id), 'ko', 'LineWidth', 3 );
xlabel('m');
ylabel('E(m)');
 
% L2 norm
E3 = zeros(M,1);
for i=(1:M)
e=d-(m(i)*ones(N,1));
E3(i)=sqrt(sum(e.^2));
end
id = floor((d-mmin)/Dm)+1;
 
% plot E(m) for this case
subplot(1,3,3)
set(gca,'LineWidth',3);
set(gca,'FontSize',14);
hold on;
mminp=0;
mmaxp=2;
axis( [mminp, mmaxp, 0, 10] );
plot(m, E3, 'r-', 'LineWidth', 3 );
plot(m(id), E3(id), 'ko', 'LineWidth', 3 );
xlabel('m');
ylabel('sqrt E(m)');
 
fprintf('Caption: Error E(m) for mean of data (circles), for (left) L1 error, even\n');
Caption: Error E(m) for mean of data (circles), for (left) L1 error, even
fprintf('number of points, (middle) L1 error, odd number of points, and (right) L2 error.\n');
number of points, (middle) L1 error, odd number of points, and (right) L2 error.
% gdama10_03
%
% L1 norm inverse problem, overdetermined case
% solved by transformation to a linear programming problem
 
clear all;
fprintf('gdama10_03\n');
gdama10_03
 
% auxillary variable, z
N=20;
zmin = 0;
zmax = 1;
Dz = (zmax-zmin)/(N-1);
z = zmin + Dz*[0:N-1]';
 
% set up for cubic fit
M=4;
mtrue = [4, 3, 2, 1]';
G = [ones(N,1), z, z.^2, z.^3];
dtrue = G*mtrue;
sd = 1 * ones(N,1);
dobs=zeros(N,1);
 
% add exponentially-distributed random noise
for i=(1:N)
r=random('exp',sd(i)/sqrt(2)).*(2*(random('unid',2)-1.5));
dobs(i) = dtrue(i) + r;
end
 
% outlier
dobs(N)=5;
 
% least squares solution (sure easier!)
mls = (G'*G)\(G'*dobs);
dls = G*mls;
 
% L1 solution
% set up for linear programming problem
% min f*x subject to A x <= b, Aeq x = beq
 
% variables
% m = mp - mpp
% x = [mp', mpp', alpha', x', xp']
% with mp, mpp length M and alpha, x, xp, length N
L = 2*M+3*N; % number of unknowns
x = zeros(L,1); % unknowns in vector x
 
% f is length L
% minimize sum aplha(i)/sd(i)
f = zeros(L,1);
f(2*M+1:2*M+N)=1./sd;
 
% equality constraints
Aeq = zeros(2*N,L);
beq = zeros(2*N,1);
% first equation G(mp-mpp)+x-alpha=d
Aeq(1:N,1:M) = G;
Aeq(1:N,M+1:2*M) = -G;
Aeq(1:N,2*M+1:2*M+N) = -eye(N,N);
Aeq(1:N,2*M+N+1:2*M+2*N) = eye(N,N);
beq(1:N) = dobs;
% second equation G(mp-mpp)-xp+alpha=d
Aeq(N+1:2*N,1:M) = G;
Aeq(N+1:2*N,M+1:2*M) = -G;
Aeq(N+1:2*N,2*M+1:2*M+N) = eye(N,N);
Aeq(N+1:2*N,2*M+2*N+1:2*M+3*N) = -eye(N,N);
beq(N+1:2*N) = dobs;
 
% inequality constraints A x <= b
% part 1: everything positive
A = zeros(L+2*M,L);
b = zeros(L+2*M,1);
A(1:L,:) = -eye(L,L);
b(1:L) = zeros(L,1);
% part 2; mp and mpp have an upper bound. Note:
% Without this constraint, a potential problem is that
% mp and mpp are individually unbounded, though their
% difference, m=mp-mpp, is not. Thus there might be cases
% where the algroithm strays to very large mp and mpp.
A(L+1:L+2*M,:) = eye(2*M,L);
mupperbound=10*max(abs(mls)); % might need to be adjusted for problem at hand
b(L+1:L+2*M) = mupperbound;
 
% solve linear programming problem
[x, fmin] = linprog(f,A,b,Aeq,beq);
Optimal solution found.
fmin=-fmin;
mest = x(1:M) - x(M+1:2*M);
dpre = G*mest;
 
% plot true, observed & predcted data
figure(1);
clf;
set(gca,'LineWidth',3);
set(gca,'FontSize',14);
hold on;
axis( [zmin, zmax, 0, max(dtrue) ] );
plot( z, dtrue, 'r-', 'Linewidth', 2);
plot( z, dpre, 'g-', 'Linewidth', 3);
plot( z, dls, 'b-', 'Linewidth', 2);
plot( z, dobs, 'ro', 'Linewidth', 2);
xlabel('z');
ylabel('d');
fprintf('Caption: Observed data (circles) scatter around a cubic polyunomial (red)\n');
Caption: Observed data (circles) scatter around a cubic polyunomial (red)
fprintf('and are fit by L1 (green) and L2 (blue) methods.');
and are fit by L1 (green) and L2 (blue) methods.
 
% gdama10_04
% example of solving an L1 problem by iterating
% a weighted solution to an L2 problem. The example chosen here is
% [d1 d2 ... dN]' = [1 1 ... 1]' m
% since we know that the solution is the mean when the dobs is
% Normally-distributed, that is, mest=mean(dobs) and that the
% solution is the median when dobs is exponentially-distributed,
% that is mest=median(dobs). The quantity f = (dmean-mest)/(dmean-dmedian)
% is used as a quality of solution indicator. It measures how
% much closer the solution is to the median than the mean.
% The scripts creates the empirical probability density function p(f).
 
% Note: The script is a bit sluggish because the L1 problem
% is being solved many time to generate a histogram
 
clear all;
fprintf('gdama10_04\n');
gdama10_04
 
% Allows you to toggle between different ways of initializing
% the solution.
USEL2 = 1;
USETRUE = 0;
USEZERO = 0;
USEONE = 0;
 
% Allows you to toggle between simpe formula for weigths and
% Frommlet & Nuel's (2016) more complicated (but equivalent and
% more numerically stable version)
USEFN = 1;
 
% true solution
mtrue = 0;
 
% setup for histogram
Nbins=101;
binleft = (-2);
binright = 2;
Dbin=(binright-binleft);
bins = binleft + Dbin*[0:Nbins-1]'/(Nbins-1);
 
Nreal = 100; % number of realizations
f = zeros(Nreal,1); % (mest-mmedian)/(mmean-mmedian)
for j=(1:Nreal) % loop over realizations
 
% exponentially-distributed noise
N = 101; % must be odd so that median is unique
 
% generate exponentially-distributed random noise
smp = 0.1;
mu = smp/sqrt(2);
rsign = (2*(random('unid',2,N,1)-1.5));
dn = rsign.*random('exponential',mu,N,1);
 
% set up problem
G = ones(N,1); % data kernel says d(i) = 1 m(1)
dtrue = G*mtrue; % true data
dobs = dtrue + dn; % observed data is true data plus noise
dmean=mean(dobs); % sample mean is poor estimate of m(1)
dmedian = median(dobs); % median is good estimate of m(1)
H = abs( dmean - dmedian ); % for plotting purposes
 
Ni=50; % number of iterations of solution
if( j==1 ) % plot solution as a function of iteration,
% but only for first realization
figure(1);
clf;
set(gca,'LineWidth',3);
set(gca,'FontSize',14);
hold on;
axis( [0, Ni+1, min(dmean,dmedian)-H/4, max(dmean,dmedian)+H/4] );
plot( [0; Ni], [dmean, dmean], 'k:', 'LineWidth', 2 );
plot( [0; Ni], [dmedian, dmedian], 'k-', 'LineWidth', 2 );
xlabel('iteration k');
ylabel('m(k)');
end
 
% setup for iterative solution
delta = 1e-5;
gamma = 2;
n = 1;
% initial solution
if( USEL2 == 1 )
W = eye(N);
mest = (G'*W*G)\(G'*W*dobs); % L2 solution
elseif( USETRUE == 1 )
mest = mtrue; % true solution
elseif( USEZERO == 1 )
mest = 0; % zero solution
elseif( USEONE == 1 )
mest = 1; % unity solution
end
for i = (1:Ni) % iterative solution
e = dobs-G*mest; % prediction error
ae = abs(e);
if( USEFN ) % Frommlet & Nuel's (2016) weight formula
w = zeros(N,1);
for k=(1:N)
if( ae(k)>=delta )
w(k)=(delta^(n-2))*exp(((n-2)/gamma)*log1p((ae(k)/delta)^gamma));
else
w(k)=(ae(k)^(n-2))*exp(((n-2)/gamma)*log1p((delta/ae(k))^gamma));
end
end
else % simple weight formula
w = ((ae.^gamma) + (delta^gamma) ).^((n-2)/gamma); % weight matrix
end
W = diag(w);
mest = (G'*W*G)\(G'*W*dobs); % L2 solution
if( j==1 ) % plot, but only first realization
plot( [i], [mest], 'ko', 'LineWidth', 2 );
end
end % end loop over iterations in solution
 
% f is a quality of solution indicator
% the solution should be the median, not the mean
% f is normalized closeness of solution from medium
f(j) = (dmean-mest)/(dmean-dmedian);
 
end % end loop over realizations
fprintf('Caption: Evolution of solution m as a function of iteration number k.\n');
Caption: Evolution of solution m as a function of iteration number k.
fprintf('Mean (dashed line) and median (solid line) are shown for reference\n');
Mean (dashed line) and median (solid line) are shown for reference
 
 
% probability density for f
p = hist( f, bins );
p = p/(Dbin*sum(p));
 
figure(2);
clf;
set(gca,'LineWidth',3);
set(gca,'FontSize',14);
hold on;
pmax = 0.3;
axis( [binleft, binright, 0, pmax] );
plot( [0;0], [0;pmax], 'k:', 'LineWidth', 2 );
plot( [1;1], [0;pmax], 'k:', 'LineWidth', 2 );
plot( bins(2:end-1), p(2:end-1), 'k-', 'LineWidth', 3 );
xlabel('f');
ylabel('p(f)');
title('probability density function p(f)');
 
fprintf('Caption: Empirical pdf of the solution quality parameter f for %d trials\n',Ni);
Caption: Empirical pdf of the solution quality parameter f for 50 trials
fprintf('of the reweighting method. The pdf is strongly peaked st the medium f=1,\n');
of the reweighting method. The pdf is strongly peaked st the medium f=1,
fprintf('andd has negligible probability at the mesn f=0.\n');
andd has negligible probability at the mesn f=0.
 
% gdama10_05
%
% Linf norm inverse problem, overdetermined case
% via transformation to a linear programming problem
 
clear all;
fprintf('gdama10_05\n');
gdama10_05
 
% auxillary variable, z
N=20;
zmin = 0;
zmax = 1;
Dz = (zmax-zmin)/(N-1);
z = zmin + Dz*[0:N-1]';
 
% set up for cubic fit
M=4;
mtrue = [4, 3, 2, 1]';
G = [ones(N,1), z, z.^2, z.^3];
dtrue = G*mtrue;
sd = 1 * ones(N,1);
dobs=zeros(N,1);
 
for i=(1:N)
r=random('exp',sd(i)/sqrt(2)).*(2*(random('unid',2)-1.5));
dobs(i) = dtrue(i) + r;
end
 
% outlier
dobs( floor(0.37*N) ) = 2.5;
 
% least squares solution (sure eassier!)
mls = (G'*G)\(G'*dobs);
dls = G*mls;
 
% Linf solution
% set up for linear programming problem
% min f*x subject to A x <= b, Aeq x = beq
 
% variables
% m = mp - mpp
% x = [mp', mpp', alpha', x', xp']
% with mp, mpp length M; alpha length 1, x, xp, length N
L = 2*M+1+2*N; % number of unknowns
x = zeros(L,1); % unknowns in vector x
 
% f is length L
% minimize alpha
f = zeros(L,1);
f(2*M+1:2*M+1)=1;
 
% equality constraints
Aeq = zeros(2*N,L);
beq = zeros(2*N,1);
% first equation G(mp-mpp)+x-alpha=d
Aeq(1:N,1:M) = G;
Aeq(1:N,M+1:2*M) = -G;
Aeq(1:N,2*M+1:2*M+1) = -1./sd;
Aeq(1:N,2*M+1+1:2*M+1+N) = eye(N,N);
beq(1:N) = dobs;
% second equation G(mp-mpp)-xp+alpha=d
Aeq(N+1:2*N,1:M) = G;
Aeq(N+1:2*N,M+1:2*M) = -G;
Aeq(N+1:2*N,2*M+1:2*M+1) = 1./sd;
Aeq(N+1:2*N,2*M+1+N+1:2*M+1+2*N) = -eye(N,N);
beq(N+1:2*N) = dobs;
 
% inequality constraints A x <= b
% part 1: everything positive
A = zeros(L+2*M,L);
b = zeros(L+2*M,1);
A(1:L,:) = -eye(L,L);
b(1:L) = zeros(L,1);
% part 2; mp and mpp have an upper bound. Note:
% Without this constraint, a potential problem is that
% mp and mpp are individually unbounded, though their
% difference, m=mp-mpp, is not. Thus there might be cases
% where the algroithm strays to very large mp and mpp.
A(L+1:L+2*M,:) = eye(2*M,L);
mupperbound=10*max(abs(mls)); % might need to be adjusted for problem at hand
b(L+1:L+2*M) = mupperbound;
 
% solve linear programming problem
[x, fmin] = linprog(f,A,b,Aeq,beq);
Optimal solution found.
fmin=-fmin;
mest = x(1:M) - x(M+1:2*M);
dpre = G*mest;
 
% plot
figure();
clf;
set(gca,'LineWidth',3);
set(gca,'FontSize',14);
hold on;
axis( [zmin, zmax, 0, max(dtrue) ] );
plot( z, dtrue, 'r-', 'Linewidth', 2);
plot( z, dpre, 'g-', 'Linewidth', 3);
plot( z, dls, 'b-', 'Linewidth', 2);
plot( z, dobs, 'ro', 'Linewidth', 2);
xlabel('z');
ylabel('d');
fprintf('Caption: Observed data (circles) scatter around a cubic curve (red).\n');
Caption: Observed data (circles) scatter around a cubic curve (red).
fprintf('Data are fit with L2 (blue) and Linf (green) methods.\n');
Data are fit with L2 (blue) and Linf (green) methods.
% gdama10_06
 
% An example of solving a minimum length problem under
% the L0 and L1 norms, by the itertive reweighting algorithm
% The example chosen here is
% d(t) = g(t) * m(t) where * is convolution
% The convolution is represented by a Toeplitz matrix G
clear all;
fprintf('gdama10_06\n');
gdama10_06
 
% time variable t
N=100;
M=N;
t = [0:N-1]';
 
% a gaussian pulse g(t)
t0 = 10;
s = 2.5;
g = exp( -((t-t0).^2)/(2*s^2) ); % gaussian g(t)
 
% matrix equivalent to convolution by g
G = toeplitz( g, [g(1), zeros(1,N-1)] );
 
% true solution is a collection of spikes
mtrue = zeros(N,1);
mtrue(5)=1; mtrue(20)=0.5; mtrue(40)=0.25;
 
dtrue = G*mtrue; % true data
 
% observed data is true data plus random noise
sd = 0.05;
dobs = dtrue+random('Normal',0,sd,N,1);
 
% setup for L1 norm
n = 1;
delta = 1e-5;
gamma = 2;
mu = 0.01;
w = ones(M,1);
for j=(1:10) % reweighting algorithm
mest = (G'*G + mu*diag(w))\(G'*dobs);
dpre = G*mest;
e = dobs-dpre;
E = e'*e;
if( j==1 ) % save L2 solution
mL2 = mest;
EL2 = E;
end
am = abs(mest); % Frommlet & Nuel's (2016) weight formula
for k=(1:M)
if( am(k)>=delta )
w(k)=(delta^(n-2))*exp(((n-2)/gamma)*log1p((am(k)/delta)^gamma));
else
w(k)=(am(k)^(n-2))*exp(((n-2)/gamma)*log1p((delta/am(k))^gamma));
end
end
end
mL1 = mest;
EL1 = E;
 
% setup for L0 norm (actually 0.1 norm)
n=0.1;
mu = 0.01;
epsi = 1.0e-6;
w = ones(M,1);
for j=(1:10) % reweighting algorithm
mest = (G'*G + mu*diag(w))\(G'*dobs);
dpre = G*mest;
e = dobs-dpre;
E = e'*e;
am = abs(mest); % Frommlet & Nuel's (2016) weight formula
for k=(1:M)
if( am(k)>=delta )
w(k)=(delta^(n-2))*exp(((n-2)/gamma)*log1p((am(k)/delta)^gamma));
else
w(k)=(am(k)^(n-2))*exp(((n-2)/gamma)*log1p((delta/am(k))^gamma));
end
end
end
mL0 = mest;
EL0 = E;
 
fprintf('EL2/N %f EL1/N %f EL0/N %f sd %f\n', EL2, EL1, EL0, sd );
EL2/N 0.208761 EL1/N 0.225852 EL0/N 0.302935 sd 0.050000
 
figure();
clf;
subplot(3,1,1); % plot g(t)
set(gca,'LineWidth',3);
set(gca,'FontSize',14);
hold on;
axis( [0, N-1, -0.5, 1.5] );
xlabel('t (s)');
ylabel('g(t)');
%title('gtrue (black)');
plot( t, g, 'k-', 'LineWidth', 4);
 
subplot(3,1,3); % plot d(t)
set(gca,'LineWidth',3);
set(gca,'FontSize',14);
hold on;
axis( [0, N-1, -0.5, 1.5] );
xlabel('t (s)');
ylabel('d(t)');
%title('dtrue (black), dobs (red), dpre (green)');
plot( t, dtrue, 'k-', 'LineWidth', 6);
plot( t, dobs, 'r-', 'LineWidth', 2);
plot( t, dpre, 'g-', 'LineWidth', 2);
 
subplot(3,1,2); % plot m(t)
set(gca,'LineWidth',3);
set(gca,'FontSize',14);
hold on;
axis( [0, N-1, -0.5, 1.0] );
xlabel('t (s)');
ylabel('m(t)');
%title('mtrue (black), L2 (green) L1 (blue), L0 (red)');
plot( t, mtrue, 'k-', 'LineWidth', 4);
plot( t, mL2, 'g-', 'LineWidth', 4);
plot( t, mL1, 'b-', 'LineWidth', 4);
plot( t, mL0, 'r-', 'LineWidth', 2);
fprintf('Caption: (Top) Filter g(t). (Middle) True solution m (black) and estimates by\n');
Caption: (Top) Filter g(t). (Middle) True solution m (black) and estimates by
fprintf('L0 (red), L1 (blue) and L2 (green) methods. (Bottom) True data d=m*g (black)\n');
L0 (red), L1 (blue) and L2 (green) methods. (Bottom) True data d=m*g (black)
fprintf('snd predicted by L0, L1 and L2 methods (all identical, all green)\n');
snd predicted by L0, L1 and L2 methods (all identical, all green)
 
figure(); % close up of one of the spikes
clf;
set(gca,'LineWidth',3);
set(gca,'FontSize',14);
hold on;
axis( [0, 10, -0.5, 1.0] );
xlabel('t (s)');
ylabel('m(t)');
title('mtrue (black), L2 (green) L1 (blue), L0 (red)');
plot( t, mtrue, 'k-', 'LineWidth', 4);
plot( t, mL2, 'g-', 'LineWidth', 4);
plot( t, mL1, 'b-', 'LineWidth', 4);
plot( t, mL0, 'r-', 'LineWidth', 2);
fprintf('Caption: Enlargement of previous figure\n');
Caption: Enlargement of previous figure
fprintf('(Top) Filter g(t). (Middle) True solution m (black) and estimates by\n');
(Top) Filter g(t). (Middle) True solution m (black) and estimates by
fprintf('L0 (red), L1 (blue) and L2 (green) methods. (Bottom) True data d=m*g (black)\n');
L0 (red), L1 (blue) and L2 (green) methods. (Bottom) True data d=m*g (black)
fprintf('snd predicted by L0, L1 and L2 methods (all identical, all green)\n');
snd predicted by L0, L1 and L2 methods (all identical, all green)
% gdama10_07
 
% An example of solving a minimum gradient problem under
% the L0 and L1 norms using the iterative reweighting algorithm
% (The L1 solution is the TV solution).
% The example chosen here is
% d(t) = g(t) * m(t) where * is convolution
% The convolution is represented by a Toeplitz matrix G
 
clear all;
fprintf('gdama10_07\n');
gdama10_07
 
% time variable t
N=100;
M=N;
t = [0:N-1]';
 
% gaussian pulse g(t)
t0 = 10;
s = 2.5;
g = exp( -((t-t0).^2)/(2*s^2) ); % gaussian g(t)
 
% matrix equivalent to convolution by g
G = toeplitz( g, [g(1), zeros(1,N-1)] );
 
% true solution is a boxcar function
mtrue = zeros(N,1);
mtrue(30:60)=0.8;
 
% tue data
dtrue = G*mtrue;
 
% observed data is true data plus random noise
sd = 0.05;
dobs = dtrue+random('Normal',0,sd,N,1);
 
% first difference operator
D = toeplitz( [-1; zeros(M-2,1)], [-1, 1, zeros(1,M-2) ] );
 
% setup for L1 norm
n = 1;
delta = 1e-5;
gamma = 2;
mu = 0.01;
w = ones(M-1,1);
for j=(1:10) % reweighting algorithm
mest = (G'*G + mu*D'*diag(w)*D)\(G'*dobs);
dpre = G*mest;
e = dobs-dpre;
E = e'*e;
if( j==1 ) % save L2 solution
mL2 = mest;
EL2 = E;
end
v = D*mest;
av = abs(v); % Frommlet & Nuel's (2016) weight formula
for k=(1:M-1)
if( av(k)>=delta )
w(k)=(delta^(n-2))*exp(((n-2)/gamma)*log1p((av(k)/delta)^gamma));
else
w(k)=(av(k)^(n-2))*exp(((n-2)/gamma)*log1p((delta/av(k))^gamma));
end
end
end
mL1 = mest;
EL1 = E;
 
% setup for L0 norm (actually 0.1 norm)
n=0.1;
mu = 0.01;
epsi = 1.0e-6;
w = ones(M-1,1);
for j=(1:10) % reweighting algorithm
mest = (G'*G + mu*D'*diag(w)*D)\(G'*dobs);
dpre = G*mest;
e = dobs-dpre;
E = e'*e;
v = D*mest;
av = abs(v); % Frommlet & Nuel's (2016) weight formula
for k=(1:M-1)
if( av(k)>=delta )
w(k)=(delta^(n-2))*exp(((n-2)/gamma)*log1p((av(k)/delta)^gamma));
else
w(k)=(av(k)^(n-2))*exp(((n-2)/gamma)*log1p((delta/av(k))^gamma));
end
end
end
mL0 = mest;
EL0 = E;
 
% print out sumamry statistics
fprintf('EL2/N %f EL1/N %f EL0/N %f sd %f\n', EL2, EL1, EL0, sd );
EL2/N 0.116816 EL1/N 0.130686 EL0/N 0.176502 sd 0.050000
 
figure();
clf;
 
subplot(3,1,1); % plot g(t)
set(gca,'LineWidth',3);
set(gca,'FontSize',14);
hold on;
axis( [0, N-1, -0.5, 1.5] );
xlabel('t (s)');
ylabel('g(t)');
%title('gtrue (black)');
plot( t, g, 'k-', 'LineWidth', 4);
 
subplot(3,1,3); % plot d(t)
set(gca,'LineWidth',3);
set(gca,'FontSize',14);
hold on;
axis( [0, N-1, -5, 10] );
xlabel('t (s)');
ylabel('d(t)');
%title('dtrue (black), dobs (red), dpre (green)');
plot( t, dtrue, 'k-', 'LineWidth', 6);
plot( t, dobs, 'r-', 'LineWidth', 2);
plot( t, dpre, 'g-', 'LineWidth', 2);
 
subplot(3,1,2); % plot m(t)
set(gca,'LineWidth',3);
set(gca,'FontSize',14);
hold on;
axis( [0, N-1, -0.5, 1.0] );
xlabel('t (s)');
ylabel('m(t)');
%title('mtrue (black), L2 (green) L1 (blue), L0 (red)');
plot( t, mtrue, 'k-', 'LineWidth', 4);
plot( t, mL2, 'g-', 'LineWidth', 4);
plot( t, mL1, 'b-', 'LineWidth', 4);
plot( t, mL0, 'r-', 'LineWidth', 2);
fprintf('Caption: (Top) Filter g(t). (Middle) True solution m (black) and estimates by\n');
Caption: (Top) Filter g(t). (Middle) True solution m (black) and estimates by
fprintf('L0 (red), L1 (blue) and L2 (green) methods. (Bottom) True data d=m*g (black)\n');
L0 (red), L1 (blue) and L2 (green) methods. (Bottom) True data d=m*g (black)
fprintf('snd predicted by L0, L1 and L2 methods (all identical, all green)\n');
snd predicted by L0, L1 and L2 methods (all identical, all green)
 
figure(); % close up of edge of boxcar
clf;
set(gca,'LineWidth',3);
set(gca,'FontSize',14);
hold on;
axis( [25, 35, -0.5, 1.0] );
xlabel('t (s)');
ylabel('m(t)');
%title('mtrue (black), L2 (green) L1 (blue), L0 (red)');
plot( t, mtrue, 'k-', 'LineWidth', 4);
plot( t, mL2, 'g-', 'LineWidth', 4);
plot( t, mL1, 'b-', 'LineWidth', 4);
plot( t, mL0, 'r-', 'LineWidth', 2);
fprintf('Caption: Enlargement of previous figure.\n');
Caption: Enlargement of previous figure.
fprintf('(Top) Filter g(t). (Middle) True solution m (black) and estimates by\n');
(Top) Filter g(t). (Middle) True solution m (black) and estimates by
fprintf('L0 (red), L1 (blue) and L2 (green) methods. (Bottom) True data d=m*g (black)\n');
L0 (red), L1 (blue) and L2 (green) methods. (Bottom) True data d=m*g (black)
fprintf('snd predicted by L0, L1 and L2 methods (all identical, all green)\n');
snd predicted by L0, L1 and L2 methods (all identical, all green)