% gdama07
% W. Menke (2024), Geophysical Data Analysis and Inverse Theory
% with MATLAB(R) or Python, 5th Edition
% Code History:
% 2022-10-01, Imported from 4th Edition by W. Menke
% 2023-04-05, Edit and integration with text
% 2023-04-23, A few correvtions
% 2024-03-06, Corrections to 07_05 and 07_06 to BC's, cnstruction
% of f and plotting scale for errors
% gdama07_01
% makes random images with a exponential and Gaussian autocorrelation
clear all;
fprintf('gdama07_01\n');
% note: in order to prevent overwriting file '../Data/random_fields.txt'
% the filename changed to '../Data/my_random_fields.txt'
% field shpuld have variance gamma2
sigmam2=1;
sigmam = sqrt(sigmam2);
% field should have autocorrelation with scale factor, s
s = 5; % scale factor
s2 = s^2; % scale factor
% standard set-up of position x and wavenumber kx-axis
Nx=40;
Nkx=Nx/2+1;
Dx=1;
x=Dx*[0:Nx-1]';
xmax = Dx*(Nx-1);
kxmax=2*pi/(2*Dx);
Dkx=kxmax/(Nx/2);
kx=Dkx*[0:Nx/2,-Nx/2+1:-1]';
ixmid = floor(Nx/2);
% standard set-up of position y and wavenumber ky-axis
Ny=40;
Nky=Ny/2+1;
Dy=1;
y=Dy*[0:Ny-1]';
ymax = Dy*(Ny-1);
kymax=2*pi/(2*Dy);
Dky=kymax/(Ny/2);
ky=Dky*[0:Ny/2,-Ny/2+1:-1]';
iymid = floor(Ny/2);
% PART 1: exponential autocorrelation, Cxy (unnormalized)
CxyE = zeros(Nx,Ny); % target amplitude spectral density
for ix=(1:Nx)
for iy=(1:Ny)
r = abs(x(ix)-x(ixmid))+abs(y(iy)-y(iymid));
CxyE(ix,iy) = exp(-r/s);
end
end
% target covariance
c = CxyE;
% make m(x,y) with desired autocovariance, c using fact that
% amplitude spectrum of autocorrelation is power spectrum of data
sqrtabsctt = abs(fft2(c)).^0.5; % target asd of m
n = random('Normal',0.0,1.0,Nx,Ny); % random noise
m = ifft2(sqrtabsctt.*fft2(n)); % shape spectrum on n into d
sigmamest = std(m(:)); % sqrt variance
m = (sigmam/sigmamest)*m; % normalize to correct variance
% save result to variable with a better name
SEtrue = m;
CxyEest = ifft2(abs(fft2(SEtrue)).^2); % unnormalized autocorrelation
% put into more reasonableorder
CxyEest2=zeros(Nx,Ny);
CxyEest2(Nx-Nkx+1:Nx,Ny-Nky+1:Ny) = CxyEest(1:Nkx,1:Nky);
CxyEest2(1:Nkx-2,Ny-Nky+1:Ny) = CxyEest(Nkx+1:Nx,1:Nky);
CxyEest2(1:Nkx-2,1:Nky-2) = CxyEest(Nkx+1:Nx,Nky+1:Ny);
CxyEest2(Nkx-1:Nx,1:Nky-2) = CxyEest(1:Nkx,Nky+1:Ny);
% PART 2: Gaussian autocorrelation, Cxy (unnormalized)
CxyG = zeros(Nx,Ny); % target amplitude spectral density
for ix=(1:Nx)
for iy=(1:Ny)
r2 = abs(x(ix)-x(ixmid))^2+abs(y(iy)-y(iymid))^2;
CxyG(ix,iy) = exp(-0.5*r2/s2);
end
end
% target auto-covariance
c = CxyG;
% make m(x,y) with desired autocovariance, c using fact that
% amplitude spectrum of autocorrelation is power spectrum of data
sqrtabsctt = abs(fft2(c)).^0.5; % target asd of m
% n = random('Normal',0.0,1.0,Nx,Ny); % use same random noise
m = ifft2(sqrtabsctt.*fft2(n)); % shape spectrum on n into d
sigmamest = std(m(:)); % sqrt variance
m = (sigmam/sigmamest)*m; % normalize to correct variance
% save result to variable with a better name
SGtrue = m;
CxyGest = ifft2(abs(fft2(SGtrue)).^2); % unnormalized autocorrelation
% put into more reasonable order
CxyGest2=zeros(Nx,Ny);
CxyGest2(Nx-Nkx+1:Nx,Ny-Nky+1:Ny) = CxyGest(1:Nkx,1:Nky);
CxyGest2(1:Nkx-2,Ny-Nky+1:Ny) = CxyGest(Nkx+1:Nx,1:Nky);
CxyGest2(1:Nkx-2,1:Nky-2) = CxyGest(Nkx+1:Nx,Nky+1:Ny);
CxyGest2(Nkx-1:Nx,1:Nky-2) = CxyGest(1:Nkx,Nky+1:Ny);
% randomly sample images
N = 256;
ix = randi(Nx,N,1);
iy = randi(Nx,N,1);
xd = x(ix);
yd = y(iy);
dE = zeros(N,1);
dG = zeros(N,1);
for i=(1:N)
dE(i) = SEtrue(ix(i),iy(i));
dG(i) = SGtrue(ix(i),iy(i));
end
gda_draw(CxyE,'caption true',CxyEest2,'caption est');
fprintf('Cption: 2D exponential autocovariance, (left) true, (right) realized');
fprintf('\n');
gda_draw(CxyG,'caption true',CxyGest2,'caption est');
fprintf('Cption: 2D Gaussian autocovariance, (left) true, (right) realized');
figure();
clf;
subplot(1,2,1);
hold on;
axis([0,xmax,0,ymax])
set(gca,'Ydir','reverse');
% ix=40, iy=20; Strue=zeros(Nx,Ny), Strue(ix,iy)=1; to check orientation
colormap('jet');
imagesc([0,xmax],[0,ymax],SEtrue');
plot(xd,yd,'k.');
xlabel('x');
ylabel('y');
subplot(1,2,2);
hold on;
axis([0,xmax,0,ymax])
set(gca,'Ydir','reverse');
colormap('jet');
imagesc([0,xmax],[0,ymax],SGtrue');
plot(xd,yd,'k.');
xlabel('x');
ylabel('y');
fprintf('(left) 2D field with exponential autocovariance, (right) 2D field\n');
fprintf('with Gaussian autocovariance. Both fields are randomly sampled (dots)\n');
dlmwrite( '../Data/my_random_fields.txt',[xd,yd,dE,dG],'delimiter','\t');
% gdama07_02
% comparison of three verdions of [cov m]
% and their operator versions D=[cov m]^(-1/2)
clear all
fprintf('gdama07_02\n');
% x-axis
N=100;
Dx=1;
x=Dx*[0:N-1]';
xmax = Dx*(N-1);
% first derivative
D1=toeplitz( [1; -1; zeros(N-2,1) ], [1,zeros(1,N-1)] );
D1(1,N)=1; % modify to make boundsry condition symmetric
D1 = (1/Dx)*D1;
% second and fourth derivative
D2 = D1*D1;
D4 = D2*D2;
% Exponential Covariance Matrix
s=10/xmax;
C = zeros(N,N);
for i=(1:N)
for j=(1:N)
y = s*Dx*abs(i-j);
C(i,j)=exp(-y);
end
end
% corresponding D
D = (1/s)*D1+eye(N);
% corresponding covariance
DTD = D'*D;
C2 = inv(DTD);
% save
CE=C;
CE2=C2;
% Long-Tailed Bell Covariance Matrix
s=20/xmax;
C = zeros(N,N);
for i=(1:N)
for j=(1:N)
y = s*Dx*abs(i-j);
C(i,j)=(1+y)*exp(-y);
end
end
% corresponding D
D = (1/s^2)*D2-eye(N);
% corresponding covariance
DTD = D'*D;
C2 = inv(DTD);
% save
CL=C;
CL2=C2;
% Short-Tailed Bell (Gaussian) Covariance Matrix
s=20/xmax;
C = zeros(N,N);
for i=(1:N)
for j=(1:N)
y = s*Dx*abs(i-j);
C(i,j)=exp(-y^2);
end
end
% corresponding D
D = eye(N) - (1/4)*(1/s^2)*D2 +(1/32)*(1/s^4)*D4;
% corresponding covariance
DTD = D'*D;
C2 = inv(DTD);
% save
CG=C;
CG2=C2;
fprintf('Exact covariance\n');
% gda_draw( CE, 'caption CE', CL, 'caption CL',CG,'caption CG');
fprintf('Covariance implied by D\n');
% gda_draw( CE2, 'caption CE', CL2, 'caption CL',CG2,'caption CG');
figure();
clf;
subplot(2,3,1);
hold on;
axis ij
axis off
axis([1,N,1,N]);
set(gca,'LineWidth',2);
set(gca,'FontSize',14);
colormap('jet');
caxis([-1,1]);
imagesc(CE);
subplot(2,3,2);
hold on;
axis ij
axis off
axis([1,N,1,N]);
set(gca,'LineWidth',2);
set(gca,'FontSize',14);
colormap('jet');
caxis([-1,1]);
imagesc(CL);
subplot(2,3,3);
hold on;
axis ij
axis off
axis([1,N,1,N]);
colormap('jet');
caxis([-1,1]);
imagesc(CG);
subplot(2,3,4);
hold on;
axis ij
axis off
axis([1,N,1,N]);
colormap('jet');
caxis([-1,1]);
imagesc(CE2/max(max(CE2)));
subplot(2,3,5);
hold on;
axis ij
axis off
axis([1,N,1,N]);
colormap('jet');
caxis([-1,1]);
imagesc(CL2/max(max(CL2)));
subplot(2,3,6);
hold on;
axis ij
axis off
axis([1,N,1,N]);
colormap('jet');
caxis([-1,1]);
imagesc(CG2/max(max(CG2)));
fprintf('Caption: (Top) true autocovariance (left) exponential (middle) long-tailed bell (right) Gaussian.\n');
fprintf('Caption: (bottom) approximate autocovariance (left) exponential (middle) long-tailed bell (right) Gaussian.\n');
% plot middle column
figure();
clf;
subplot(3,1,1);
hold on;
axis([0,xmax,0,1])
ic = floor(N/2);
plot(x,CE(1:N,ic)/max(CE(1:N,ic)),'r-','Linewidth',2);
plot(x,CE2(1:N,ic)/max(CE2(1:N,ic)),'g--','Linewidth',3);
xlabel('row i');
ylabel('Cij');
subplot(3,1,2);
hold on;
axis([0,xmax,0,1])
ic = floor(N/2);
plot(x,CL(1:N,ic)/max(CL(1:N,ic)),'r-','Linewidth',2);
plot(x,CL2(1:N,ic)/max(CL2(1:N,ic)),'g--','Linewidth',3);
xlabel('row i');
ylabel('Cij');
subplot(3,1,3);
hold on;
axis([0,xmax,0,1])
ic = floor(N/2);
plot(x,CG(1:N,ic)/max(CG(1:N,ic)),'r-','Linewidth',2);
plot(x,CG2(1:N,ic)/max(CG2(1:N,ic)),'g--','Linewidth',3);
xlabel('row i');
ylabel('Cij');
fprintf('Caption: Central row of autocovariance matrix. (Top) exponential, with exact (red) and approximate (green).\n');
fprintf('(Middle) Long-tailed bell, with exact (red) and approximate (green).\n');
fprintf('(Bottom) Gaussian, with exact (red) and approximate (green).\n');
% gdama07_03
% Exploration of variance of Gaussian Process Regression
% The solution a generalized least squares problem is
% mest = GMG dobs + (I- GMG G) m0
% and covariance
% cov_mest = sigma2d GMG GMG' + (I- GMG G) cov_m (I- GMG G)' = c1 + c2
% and variance
% vari_mest = diag(c1) + diag(c2) = v1 + v2
% this code explores the relative contributions of v1 and v2
clear all;
fprintf('gdama07_03\n');
% the true random field has variance sigmam2
sigmam=1;
sigmam2 = sigmam^2;
% field should has a Gaussian autocorrelation with scale factor, s
s = 5; % scale factor
s2 = s^2;
% standard set-up of position x and wavenumber kx-axis
Nx=256;
Nkx=Nx/2+1;
Dx=1;
x=Dx*[0:Nx-1]';
xmax = Dx*(Nx-1);
kxmax=2*pi/(2*Dx);
Dkx=kxmax/(Nx/2);
kx=Dkx*[0:Nx/2,-Nx/2+1:-1]';
ixmid = floor(Nkx);
% evaluate the Gaussian autocovariance, centered in the interval
c = zeros(Nx,1); % target amplitude spectral density
for ix=(1:Nx)
r=Dx*abs(ix-Nkx);
c(ix) = exp(-0.5*(r^2)/s2);
end
% make m(x) with desired autocovariance, c using fact that
% amplitude spectrum of autocorrelation is power spectrum of data
sqrtabsctt = sqrt(abs(fft(c))); % target asd of m
n = random('Normal',0.0,1.0,Nx,1); % random noise
mtrue = real(ifft(sqrtabsctt.*fft(n))); % shape spectrum on n into d
sigmamest = std(mtrue); % sqrt variance
mtrue = (sigmam/sigmamest)*mtrue; % normalize to correct variance
% sample data, pattern of expanding space between samples
Ns=16;
id=[];
for i = (1:Ns)
idi = (i-1)*Ns+(1:i:Ns);
id = [id, idi];
end
N=length(id);
xd = x(id);
dtrue = mtrue(id);
% data kernel
M=Nx;
G = zeros(N,M);
for i=(1:N)
G(i, id(i) ) = 1.0;
end
% variance of data
sigmad = 0.1;
sigmad2 = sigmad^2;
% plot the true model and the true data
figure();
clf;
hold on;
set(gca,'LineWidth',2);
axis([0,xmax,-4*sigmam,4*sigmam]);
plot(x,mtrue,'k-','LineWidth',2);
plot(xd,dtrue,'ko','LineWidth',1);
xlabel('x');
ylabel('m(x) and d(x)');
% control-control covariance matri
Ccc = zeros(N,N);
for i=(1:N)
for j=(1:N)
r = abs(xd(i)-xd(j));
Ccc(i,j) = sigmam2*exp(-0.5*(r^2)/s2);
end
end
% target-control covariance matrix
Ctc = zeros(M,N);
for i=(1:M)
for j=(1:N)
r = abs(x(i)-xd(j));
Ctc(i,j) = sigmam2*exp(-0.5*(r^2)/s2);
end
end
% target-target covariance matrix
Ctt = zeros(M,M);
for i=(1:M)
for j=(1:M)
r = abs(x(i)-x(j));
Ctt(i,j) = sigmam2*exp(-0.5*(r^2)/s2);
end
end
% generalized inverse & resolution matrix
GMG = (Ctc / (Ccc + sigmad2*eye(N) ));
R = GMG * G;
IMR = eye(M)-R;
% variane of estimates
v1 = diag(sigmad2* GMG * GMG'); % part associated with data
v2 = diag(IMR * Ctt * IMR'); % part associalted with prior information
% noisy data
dobs = dtrue + random('normal',0,sigmad,N,1);
% solution
mest = GMG * dobs;
plot(x,mest,'r:','LineWidth',2);
fprintf('Caption: True ata (circles) and true model (black curve), estimated model (red curve)\n');
figure();
clf;
hold on;
set(gca,'LineWidth',2);
axis([0,xmax,0,sigmam]);
xlabel('x');
ylabel('sigma');
% many realizations of the solution, each time with new data
% noise, and holding m0 at zero
NR = 10000;
MEST = zeros(M,NR);
for i = (1:NR)
dobsir = dtrue + random('normal',0,sigmad,N,1);
MEST(:,i) = GMG * dobsir;
end
% standard deviation of these solutions
S1 = zeros(M,1);
for i=(1:M)
S1(i) = std(MEST(i,:));
end
plot(x,S1,'k-','LineWidth',3);
plot(x,sqrt(v1),'y:','LineWidth',1);
fprintf('Caption: Part of standard error of solution arising from measurement error\n');
fprintf('(solid) from ensemble of realizations (dotted) from formula\n');
figure();
clf;
hold on;
set(gca,'LineWidth',2);
axis([0,xmax,0,sigmam]);
xlabel('x');
ylabel('sigma');
% many realizations of the solution, each time with new data
% noise, and creating a new m0 with correct covariance
NR = 10000;
MEST = zeros(M,NR);
for i = (1:NR)
dobsir = dtrue + random('normal',0,sigmad,N,1);
n = random('Normal',0.0,1.0,Nx,1); % random noise
m0 = real(ifft(sqrtabsctt.*fft(n))); % shape spectrum on n into d
sigmamest = std(m0); % sqrt variance
m0 = (sigmam/sigmamest)*m0; % normalize to correct variance
MEST(:,i) = GMG * dobsir + IMR * m0;
end
S2 = zeros(M,1);
for i=(1:M)
S2(i) = std(MEST(i,:));
end
plot(x,S2,'k-','LineWidth',3);
plot(x,sqrt(v1+v2),'y:','LineWidth',1);
fprintf('Caption: Complete standard error of solution\n');
fprintf('(solid) from ensemble of realizations (dotted) from formula\n');
figure();
clf;
hold on;
set(gca,'LineWidth',2);
axis([0,xmax,0,sigmam]);
xlabel('x');
ylabel('sigma mest');
plot(x,S1,'k-','LineWidth',2);
plot(x,S2,'r-','LineWidth',2);
fprintf('Caption: Comparison of the part of standard error of solution arising\n');
fprintf('from measurement error (solid) and the complete standard error (red)\n');
% gdama07_04
% Gaussian Process Regression to reconstruct field from data
% in file '../Data/random_fields.txt'. Two versions, with
% exponential and Gaussian auto-covariance, are made.
clear all;
fprintf('gdama07_04\n');
% field shpuld have variance gamma2
gamma2=1;
gamma = sqrt(gamma2);
% field should have autocorrelation with scale factor, s
s = 5; % scale factor
s2 = s^2; % scale factor
% load data (the fields sampled at random locations)
D = load( '../Data/random_fields.txt');
xd=D(:,1);
yd=D(:,2);
dE=D(:,3);
dG=D(:,4);
N = length(xd);
% add noise to data
sd = 0.01;
sd2 = sd^2;
n = random('normal',0.0,sd,N,1);
dE = dE+n;
dG = dG+n;
% x-axis
Nx=41;
Nkx=Nx/2+1;
Dx=1;
x=Dx*[0:Nx-1]';
xmax = Dx*(Nx-1);
% y-axis
Ny=41;
Nky=Ny/2+1;
Dy=1;
y=Dy*[0:Ny-1]';
ymax = Dy*(Ny-1);
M = Nx*Ny; % total number of model parameters
% lookup tables
iofk = zeros(M,1);
jofk = zeros(M,1);
kofij = zeros(Nx,Ny);
k=0;
for i=(1:Nx)
for j=(1:Ny)
k=k+1;
iofk(k)=i;
jofk(k)=j;
kofij(i,j)=k;
end
end
% positions of model parameters
xm = zeros(M,1);
ym = zeros(M,1);
for k=(1:M)
xm(k)=x(iofk(k));
ym(k)=y(jofk(k));
end
% covariance matrices, Exponential case
Ccc = zeros(N,N);
for i=(1:N)
for j=(1:N)
r = abs(xd(i)-xd(j)) + abs(yd(i)-yd(j));
Ccc(i,j) = gamma2*exp(-r/s);
end
end
Ctc = zeros(M,N);
for i=(1:M)
for j=(1:N)
r = abs(xm(i)-xd(j)) + abs(ym(i)-yd(j));
Ctc(i,j) = gamma2*exp(-r/s);
end
end
% solution
epsi = 1.0e-2;
% mest = (Ctc / (Ccc + epsi*eye(N) )) * dE;
% this version more efficient
u = (Ccc + epsi*eye(N) ) \ dE;
mest = Ctc * u;
% posterior covariance
Ctt = zeros(M,M);
for i=(1:M)
for j=(1:M)
r = abs(xm(i)-xm(j)) + abs(ym(i)-ym(j));
Ctt(i,j) = gamma2*exp(-r/s);
end
end
Cttest = Ctt - (Ctc/(Ccc + epsi*eye(N)))*(Ctc');
sigmatcpos = sqrt(diag(Cttest));
% fold into image
Sest = zeros(Nx,Ny);
sigmaest = zeros(Nx,Ny);
for k=(1:M)
Sest(iofk(k),jofk(k))=mest(k);
sigmaest(iofk(k),jofk(k))=sigmatcpos(k);
end
figure();
clf;
subplot(1,3,1);
hold on
axis([0,xmax,0,xmax])
set(gca,'Ydir','reverse');
caxis([-2,2]);
colormap('jet');
imagesc([0,xmax],[0,ymax],Sest');
plot(xd,yd,'k.');
xlabel('x');
ylabel('y');
colorbar;
subplot(1,3,2);
hold on
axis([0,xmax,0,xmax])
set(gca,'Ydir','reverse');
caxis([-2,2]);
colormap('jet');
imagesc([0,xmax],[0,ymax],Sest');
xlabel('x');
ylabel('y');
colorbar;
subplot(1,3,3);
hold on
axis([0,xmax,0,xmax])
set(gca,'Ydir','reverse');
caxis([-2,2]);
colormap('jet');
imagesc([0,xmax],[0,ymax],sigmaest');
xlabel('x');
ylabel('y');
colorbar;
fprintf('Caption: Reconstruction of 2D field, presuming exponetial autocovariance\n');
fprintf('(left) Estimated field (colors) with data (dots) superimposed\n');
fprintf('(middle) Estimated field (colors). (Right) Standard error of the estimate\n');
% estimated data
dEest = zeros(N,1);
for k=(1:N)
i = floor(Nx*xd(k)/xmax)+1;
if(i<1)
i=1;
elseif (i>Nx)
i=Nx;
end
j = floor(Ny*yd(k)/ymax)+1;
if(j<1)
j=1;
elseif (j>Ny)
j=Ny;
end
dEest(k) = Sest(i,j);
end
figure();
clf;
hold on;
plot(dE,dEest,'k.','LineWidth',2)
xlabel('dobs');
ylabel('dpre');
fprintf('Caption: Scatter plot of obsered and predicted dats\n');
% covariance matrices, Gaussian case
Ccc = zeros(N,N);
for i=(1:N)
for j=(1:N)
r2 = abs(xd(i)-xd(j))^2 + abs(yd(i)-yd(j))^2;
Ccc(i,j) = gamma2*exp(-0.5*r2/s2);
end
end
Ctc = zeros(M,N);
for i=(1:M)
for j=(1:N)
r2= abs(xm(i)-xd(j))^2 + abs(ym(i)-yd(j))^2;
Ctc(i,j) = gamma2*exp(-0.5*r2/s2);
end
end
% solution
epsi = 1.0e-2;
u = (Ccc + epsi*eye(N) ) \ dG;
mest = Ctc * u;
% mest = (Ctc / (Ccc + epsi*eye(N) )) * dE;
% posterior covariance
Ctt = zeros(M,M);
for i=(1:M)
for j=(1:M)
r2= abs(xm(i)-xm(j))^2 + abs(ym(i)-ym(j))^2;
Ctt(i,j) = gamma2*exp(-0.5*r2/s2);
end
end
Cttest = Ctt - (Ctc/(Ccc + epsi*eye(N)))*(Ctc');
sigmatcpos = sqrt(diag(Cttest));
% fold into image
Sest = zeros(Nx,Ny);
sigmaest = zeros(Nx,Ny);
for k=(1:M)
Sest(iofk(k),jofk(k))=mest(k);
sigmaest(iofk(k),jofk(k))=sigmatcpos(k);
end
figure();
subplot(1,3,1);
hold on;
axis([0,xmax,0,xmax])
set(gca,'Ydir','reverse');
caxis([-2,2]);
colormap('jet');
imagesc([0,xmax],[0,ymax],Sest');
colorbar;
plot(xd,yd,'k.');
xlabel('x');
ylabel('y');
subplot(1,3,2);
hold on
axis([0,xmax,0,xmax])
set(gca,'Ydir','reverse');
caxis([-2,2]);
colormap('jet');
imagesc([0,xmax],[0,ymax],Sest');
colorbar;
xlabel('x');
ylabel('y');
subplot(1,3,3);
hold on
axis([0,xmax,0,xmax])
set(gca,'Ydir','reverse');
caxis([-2,2]);
colormap('jet');
imagesc([0,xmax],[0,ymax],sigmaest');
colorbar;
xlabel('x');
ylabel('y');
fprintf('Caption: Reconstruction of 2D field, presuming Gaussian autocovariance\n');
fprintf('(left) Estimated field (colors) with data (dots) superimposed\n');
fprintf('(middle) Estimated field (colors). (Right) Standard error of the estimate\n');
% estimated data
dGest = zeros(N,1);
for k=(1:N)
i = floor(Nx*xd(k)/xmax)+1;
if(i<1)
i=1;
elseif (i>Nx)
i=Nx;
end
j = floor(Ny*yd(k)/ymax)+1;
if(j<1)
j=1;
elseif (j>Ny)
j=Ny;
end
dGest(k) = Sest(i,j);
end
figure();
clf;
hold on
plot(dG,dGest,'k.','LineWidth',2)
xlabel('dobs');
ylabel('dpre');
fprintf('Caption: Scatter plot of obsered and predicted dats\n');
% gdama07_05
% data assimilation by Generalized Least Squares
% for the thermal diffusion equation
% This version uses non-sparse matrices
clear all;
fprintf('gdama07_05\n');
% prior error in data (moderately accurate)
sigmad = 0.1;
sigmad2 = sigmad^2;
sigmadi = 1.0/sigmad;
% prior error in diff eqn (highly accurate)
sigmaeqn = 1e-4;
sigmaeqn2 = sigmaeqn^2;
sigmaeqni = 1.0/sigmaeqn;
% prior error in bc (highly accurate)
sigmabc = 1e-4;
sigmabc2 = sigmabc^2;
sigmabci = 1.0/sigmabc;
% prior error in ic (poorly accurate)
sigmaic = 1.0;
sigmaic2 = sigmaic^2;
sigmaici = 1.0/sigmaic;
% x-axis
Lx = 41;
Dx = 1.0;
x = Dx*[0:Lx-1]';
xmax = Dx*(Lx-1);
Dxr2 = 1/(Dx^2);
ixm = floor(Lx/2)+1;
xm = x(ixm);
% t-axis
Lt = 41;
Dt = 1.0;
t = Dt*[0:Lt-1]';
tmax = Dt*(Lt-1);
Dtr = 1/(Dt);
M = Lx*Lt;
% lookup tables
iofk = zeros(M,1);
jofk = zeros(M,1);
kofij = zeros(Lx,Lt);
k=0;
for i=(1:Lx)
for j=(1:Lt)
k=k+1;
iofk(k)=i;
jofk(k)=j;
kofij(i,j)=k;
end
end
D = zeros(M,M);
s = zeros(M,1);
% thermal constant
kappa = 0.1;
% row counter
nr = 0;
% interior
for n=(2:Lx-1)
for m=(2:Lt)
nr = nr+1;
nm = kofij(n,m);
nmommo = kofij(n-1,m-1);
npommo = kofij(n+1,m-1);
nmmo = kofij(n,m-1);
D(nr,nmmo) = (-Dtr+2.0*kappa*Dxr2)*sigmaeqni;
D(nr,nmommo) = -kappa*Dxr2*sigmaeqni;
D(nr,npommo) = -kappa*Dxr2*sigmaeqni;
D(nr,nm) = Dtr*sigmaeqni;
s(nr) = 0.0;
end
end
% boundary conditions
for m=(1:Lt)
nr = nr+1;
% left
n=1;
nm = kofij(n,m);
D(nr,nm) = sigmabci;
s(nr) = 0.0;
% right
nr = nr+1;
n=Lx;
nm = kofij(n,m);
D(nr,nm) = sigmabci;
s(nr) = 0.0;
end
nrnoic = nr;
% initial conditions
m0 = zeros(Lx,1);
sx = 2;
sx2 = sx^2;
m0 = exp( - 0.5 * ((x-xm).^2) / sx2 );
m0(1)=0;
m0(Lx)=0;
for n=(2:Lx-1)
nr = nr+1;
m = 1;
nm = kofij(n,m);
D(nr,nm) = 1.0*sigmaici;
s(nr) = m0(n)*sigmaici;
end
fprintf('D: expected %d rows, got %d\n',M,nr);
% true solution
mtrue = D\s;
Strue = zeros(Lx,Lt);
for k=(1:M)
i = iofk(k);
j = jofk(k);
Strue(i,j)=mtrue(k);
end
% randomly sample images
Nd = 15;
N = Nd*Lx;
ix = randi(Lx,N,1);
it = randi(Lt-1,N,1)+1; % no data at t=0
xd = x(ix);
td = t(it);
dtrue = zeros(N,1);
for i=(1:N)
dtrue(i) = Strue(ix(i),it(i));
end
% add noise to data
nse = random('Normal',0.0,sigmad,N,1);
dobs = dtrue + nse;
% data kernel
G = zeros(N,M);
for i=(1:N)
k = kofij( ix(i), it(i) );
G(i,k) = 1.0;
end
% generalized least squares
% use zeros instead of s, so that
% no info on ic's enter into solution
F = [D;G*sigmadi];
f = [s;dobs*sigmadi]; % correction, s replaces zeros(nr,1)
mest = (F'*F)\(F'*f);
dpre = G*mest;
figure();
clf;
hold on;
plot(dobs,dpre,'k.');
xlabel('dobs');
ylabel('dpre');
fprintf('Caption: Scatter plot of observed and predicted data\n');
Sest = zeros(Lx,Lt);
for k=(1:M)
i = iofk(k);
j = jofk(k);
Sest(i,j)=mest(k);
end
m0est = zeros(Lx,1);
for i=(1:Lx)
m0est(i)=Sest(i,1);
end
figure();
clf;
hold on;
plot(x,m0,'k-');
plot(x,m0est,'r--');
xlabel('x');
ylabel('m0');
fprintf('Caption: True (back) and estimated (red) solution m0=m(t=0)');
Sdata = zeros(Lx,Lt);
for i=(1:N)
Sdata(ix(i),it(i))=dobs(i);
end
figure();
clf;
subplot(2,2,1);
hold on;
axis equal
axis([0,xmax,0,tmax])
set(gca,'Ydir','reverse');
caxis([-0.2,1.2]);
colormap('jet');
imagesc([0,xmax],[0,tmax],Strue');
colorbar;
xlabel('x');
ylabel('t');
title('true');
subplot(2,2,2);
hold on;
axis equal
axis([0,xmax,0,tmax])
set(gca,'Ydir','reverse');
caxis([-0.2,1.2]);
colormap('jet');
imagesc([0,xmax],[0,tmax],Sdata');
colorbar;
xlabel('x');
ylabel('t');
title('data');
subplot(2,2,3);
hold on;
axis equal
axis([0,xmax,0,tmax])
set(gca,'Ydir','reverse');
caxis([-0.2,1.2]);
colormap('jet');
imagesc([0,xmax],[0,tmax],Sest');
colorbar;
xlabel('x');
ylabel('t');
title('est');
subplot(2,2,4);
hold on;
axis equal
axis([0,xmax,0,tmax])
set(gca,'Ydir','reverse');
caxis([-1.2,1.2]); % correction to plotting scale
colormap('jet');
imagesc([0,xmax],[0,tmax],(Strue-Sest)');
colorbar;
xlabel('x');
ylabel('t');
title('error');
fprintf('Caption: (top left) True solution, (top right) observed data\n');
fprintf('(bottom left) Estimated solution (bottom right) error\n');
% gdama07_06
% data assimilation by Generalized Least Squares
% for the thermal diffusion equation
% This version uses sparse matrices and the biconjugate gradient solver
clear all
fprintf('gdama07_06\n');
% global variable for sparse version of F
clear gdaFsparse;
global gdaFsparse;
% prior error in data (moderately accurate)
sigmad = 0.1;
sigmad2 = sigmad^2;
sigmadi = 1.0/sigmad;
% prior error in diff eqn (highly accurate)
sigmaeqn = 1e-2;
sigmaeqn2 = sigmaeqn^2;
sigmaeqni = 1.0/sigmaeqn;
% prior error in bc (highly accurate)
sigmabc = 1e-2;
sigmabc2 = sigmabc^2;
sigmabci = 1.0/sigmabc;
% prior error in ic (poorly accurate)
sigmaic = 1.0;
sigmaic2 = sigmaic^2;
sigmaici = 1.0/sigmaic;
% x-axis
Lx = 101;
Dx = 1.0;
x = Dx*[0:Lx-1]';
xmax = Dx*(Lx-1);
Dxr2 = 1/(Dx^2);
ixm = floor(Lx/2)+1;
xm = x(ixm);
% t-axis
Lt = 101;
Dt = 1.0;
t = Dt*[0:Lt-1]';
tmax = Dt*(Lt-1);
Dtr = 1/(Dt);
M = Lx*Lt;
% lookup tables
iofk = zeros(M,1);
jofk = zeros(M,1);
kofij = zeros(Lx,Lt);
k=0;
for i=(1:Lx)
for j=(1:Lt)
k=k+1;
iofk(k)=i;
jofk(k)=j;
kofij(i,j)=k;
end
end
P=0;
Pmax = 100000;
Fr = zeros(Pmax,1);
Fc = zeros(Pmax,1);
Fv = zeros(Pmax,1);
s = zeros(M,1);
% thermal constant
kappa = 0.1;
% row counter
nr = 0;
% interior
for n=(2:Lx-1)
for m=(2:Lt)
nr = nr+1;
nm = kofij(n,m);
nmommo = kofij(n-1,m-1);
npommo = kofij(n+1,m-1);
nmmo = kofij(n,m-1);
% D(nr,nmmo) = (-Dtr+2.0*kappa*Dxr2)*sigmaeqni;
P=P+1; % increment element number
Fr(P,1)=nr; % row index
Fc(P,1)=nmmo; % column index
Fv(P,1)=(-Dtr+2.0*kappa*Dxr2)*sigmaeqni; % value
% D(nr,nmommo) = -kappa*Dxr2*sigmaeqni;
P=P+1; % increment element number
Fr(P,1)=nr; % row index
Fc(P,1)=nmommo; % column index
Fv(P,1)=-kappa*Dxr2*sigmaeqni; % value
% D(nr,npommo) = -kappa*Dxr2*sigmaeqni;
P=P+1; % increment element number
Fr(P,1)=nr; % row index
Fc(P,1)=npommo; % column index
Fv(P,1)=-kappa*Dxr2*sigmaeqni; % value
% D(nr,nm) = Dtr*sigmaeqni;
P=P+1; % increment element number
Fr(P,1)=nr; % row index
Fc(P,1)=nm; % column index
Fv(P,1)=Dtr*sigmaeqni; % value
s(nr) = 0.0;
end
end
% boundary conditions
for m=(1:Lt)
nr = nr+1;
% left
n=1;
nm = kofij(n,m);
npom = kofij(n+1,m);
% D(nr,nm) = sigmabci;
P=P+1; % increment element number
Fr(P,1)=nr; % row index
Fc(P,1)=nm; % column index
Fv(P,1)=sigmabci; % value
s(nr) = 0.0;
% right
nr = nr+1;
n=Lx;
nm = kofij(n,m);
% D(nr,nm) = sigmabci;
P=P+1; % increment element number
Fr(P,1)=nr; % row index
Fc(P,1)=nm; % column index
Fv(P,1)=sigmabci; % value; correction factor of (-Dtr) removed
s(nr) = 0.0;
end
nrnoic = nr;
% initial conditions
m0 = zeros(Lx,1);
sx = 2;
sx2 = sx^2;
m0 = exp( - 0.5 * ((x-xm).^2) / sx2 );
m0(1)=0;
m0(Lx)=0;
for n=(2:Lx-1)
nr = nr+1;
m = 1;
nm = kofij(n,m);
% D(nr,nm) = 1.0*sigmaici;
P=P+1; % increment element number
Fr(P,1)=nr; % row index
Fc(P,1)=nm; % column index
Fv(P,1)=1.0*sigmaici; % value
s(nr) = m0(n)*sigmaici;
end
fprintf('D: expected %d rows, got %d\n',M,nr);
nel = 4*(Lx-2)*(Lt-1) + 2*Lt + (Lx-2);
fprintf('D: expected %d elements, got %d\n',nel,P);
% build sparse array (dont truncate, dont clear, because later
% we will add the data kernel)
gdaFsparse = sparse(Fr(1:P),Fc(1:P),Fv(1:P),M,M);
% true solution
tol = 1e-5; % tolerance
maxit = 3*(M+M); % maximum number of iterations allowed
FTf = gda_FTFrhs(s);
mtrue=bicg(@gda_FTFmul,FTf,tol,maxit);
Strue = zeros(Lx,Lt);
for k=(1:M)
i = iofk(k);
j = jofk(k);
Strue(i,j)=mtrue(k);
end
gda_draw(Strue);
% randomly sample images
Nd = 15;
N = Nd*Lx;
ix = randi(Lx,N,1);
it = randi(Lt-1,N,1)+1; % no data at t=0
xd = x(ix);
td = t(it);
dtrue = zeros(N,1);
for i=(1:N)
dtrue(i) = Strue(ix(i),it(i));
end
% add noise to data
nse = random('Normal',0.0,sigmad,N,1);
dobs = dtrue + nse;
% build data kernel G on the fly, adding it to the botom of Fr,Fc,Fv
for i=(1:N)
k = kofij( ix(i), it(i) );
% G(i,k) = 1.0;
P=P+1; % increment element number
Fr(P,1)=i+M; % row index
Fc(P,1)=k; % column index
Fv(P,1)=1.0*sigmadi; % value
end
nel = 4*(Lx-2)*(Lt-1) + 2*Lt + (Lx-2) + N;
fprintf('D+G: expected %d elements, got %d\n',nel,P);
% new sparse matrix
gdaFsparse = sparse(Fr(1:P),Fc(1:P),Fv(1:P),M+N,M);
clear Fr Fc Fv; % delete no longer needed arrays
% least squared solution
tol = 1e-5; % tolerance
maxit = 3*(M+N); % maximum number of iterations allowed
% no ic information, so use zeros, not s
f = [s; dobs*sigmadi ]; % correction: s replaces zeros(M,1)
FTf = gda_FTFrhs(f);
mest=bicg(@gda_FTFmul,FTf,tol ,maxit);
Sest = zeros(Lx,Lt);
for k=[1:M]
i = iofk(k);
j = jofk(k);
Sest(i,j)=mest(k);
end
% obs data on (x,t) plane, predicted data
Sdata = zeros(Lx,Lt);
dpre = zeros(N,1);
for i=(1:N)
Sdata(ix(i),it(i))=dobs(i);
k = kofij(ix(i),it(i));
dpre(i) = mest(k);
end
figure();
clf;
hold on;
plot(dobs,dpre,'k.');
xlabel('dobs');
ylabel('dpre');
fprintf('Caption: Scatter plot of observed and predicted data\n');
m0est = zeros(Lx,1);
for i=(1:Lx)
m0est(i)=Sest(i,1);
end
figure();
clf;
hold on;
plot(x,m0,'k-');
plot(x,m0est,'r--');
xlabel('x');
ylabel('m0');
fprintf('Caption: True (back) and estimated (red) solution m0=m(t=0)');
figure();
clf;
subplot(2,2,1);
hold on;
axis equal
axis([0,xmax,0,tmax])
set(gca,'Ydir','reverse');
caxis([-0.2,1.2]);
colormap('jet');
imagesc([0,xmax],[0,tmax],Strue');
colorbar;
xlabel('x');
ylabel('t');
title('true');
subplot(2,2,2);
hold on;
axis equal
axis([0,xmax,0,tmax])
set(gca,'Ydir','reverse');
caxis([-0.2,1.2]);
colormap('jet');
imagesc([0,xmax],[0,tmax],Sdata');
colorbar;
xlabel('x');
ylabel('t');
title('data');
subplot(2,2,3);
hold on;
axis equal
axis([0,xmax,0,tmax])
set(gca,'Ydir','reverse');
caxis([-0.2,1.2]);
colormap('jet');
imagesc([0,xmax],[0,tmax],Sest');
colorbar;
xlabel('x');
ylabel('t');
title('est');
subplot(2,2,4);
hold on;
axis equal
axis([0,xmax,0,tmax])
set(gca,'Ydir','reverse');
caxis([-1.2,1.2]); % correction to plotting scale
colormap('jet');
imagesc([0,xmax],[0,tmax],(Strue-Sest)');
colorbar;
xlabel('x');
ylabel('t');
title('error');
fprintf('Caption: (top left) True solution, (top right) observed data\n');
fprintf('(bottom left) Estimated solution (bottom right) error\n');
% gdama07_07
% Thomas recursion used to solve a MxM block tri-diagonal system
% system has Lt variable diagonal blocks, Ai, each Lx by Lx
% and constant off-diagonal blocks, Lx by Lx, all randomly generated
clear all
fprintf('gdama07_07\n');
% size of blocks and matrices
Lx = 5;
Lt = 30;
M = Lx*Lt;
% build random symmetric block tridiagonal matrix, A
% variable diagonal blocks
A = zeros(M,M);
Ai = zeros(Lx,Lx,Lt);
for i=(1:Lt)
At = random('normal',0,1,Lx,Lx);
Ai(:,:,i) = 0.5*(At+At');
j = Lx*(i-1)+1; % upper left element in A
A(j:j+Lx-1,j:j+Lx-1) = Ai(:,:,i);
end
% constant off-diagonal blocks
B = random('normal',0,1,Lx,Lx);
for i=(2:Lt)
j = Lx*(i-1)+1; % upper left element in A
k = Lx*(i-2)+1;
A(j:j+Lx-1,k:k+Lx-1) = B(:,:);
A(k:k+Lx-1,j:j+Lx-1) = B(:,:)';
end
% true solution
mtrue = random('normal',0,1,M,1);
% right hand side
a = A*mtrue;
ai = zeros(Lx,Lt);
for i=(1:Lt)
j = Lx*(i-1)+1; % firstt element in a
ai(:,i) = a(j:j+Lx-1);
end
% forward recursion
Ahinvi = zeros(Lx,Lx,Lt);
ahi = zeros(Lx,Lt);
Ahinvi(:,:,1) = inv(squeeze(Ai(:,:,1)));
ahi(:,1) = ai(:,1);
for i=(2:Lt)
Ahinvi(:,:,i) = inv( Ai(:,:,i) - B*squeeze(Ahinvi(:,:,i-1))*(B') );
ahi(:,i) = ai(:,i) - B*squeeze(Ahinvi(:,:,i-1))*ahi(:,i-1);
end
% backward recursion
mi = zeros(Lx,Lt);
mi(:,Lt) = squeeze(Ahinvi(:,:,Lt))*ahi(:,Lt);
for i=(Lt-1:-1:1)
mi(:,i) = squeeze(Ahinvi(:,:,i)) * (ahi(:,i)-B'*mi(:,i+1));
end
% assemble solution
m = zeros(M,1);
for i=(1:Lt)
j = Lx*(i-1)+1; % firstt element in m
m(j:j+Lx-1)=mi(1:Lx,i);
end
% error
e = mtrue-m;
E = sqrt(e'*e/M);
fprintf('rms error %e\n',E);
% gdama07_08
% Thomas recursion used in data assimilation problem
% with thermal diffusion equation.
% This version has extensive error checking.
% I've deleted all the error checking in the version in the next section
clear all
fprintf('gdama07_08\n');
% thermal constant
kappa = 0.1;
% size of blocks and matrices
Lx = 41;
Lt = 41;
M = Lx*Lt;
% prior error in data (moderately accurate)
sigmad = 0.05;
sigmad2 = sigmad^2;
sigmadi = 1.0/sigmad;
sigmadi2 = 1/sigmad2;
% prior error in diff eqn and bc (highly accurate)
sigmas = 1e-2;
sigmas2 = sigmas^2;
sigmasi = 1.0/sigmas;
sigmasi2 = 1.0/sigmas2;
% prior error in ic (poorly accurate)
sigmam0 = 1.0;
sigmam02 = sigmam0^2;
sigmam0i = 1.0/sigmam0;
sigmam0i2 = 1.0/sigmam02;
% x-axis
Dx = 1.0;
x = Dx*[0:Lx-1]';
xmax = Dx*(Lx-1);
Dxr = 1/Dx;
Dxr2 = 1/(Dx^2);
ixm = floor(Lx/2)+1;
xm = x(ixm);
% t-axis
Dt = 1.0;
t = Dt*[0:Lt-1]';
tmax = Dt*(Lt-1);
Dtr = 1/(Dt);
% lookup tables, must ensure times contoguous
iofk = zeros(M,1);
jofk = zeros(M,1);
kofij = zeros(Lx,Lt);
k=0;
for j=(1:Lt)
for i=(1:Lx)
k=k+1;
iofk(k)=i;
jofk(k)=j;
kofij(i,j)=k;
end
end
% 2nd derivative with zero value bc's
D2 = Dxr2*toeplitz([-2;1;zeros(Lx-2,1)],[-2,1,zeros(1,Lx-2)]);
D2(1,1)=1.0;
D2(1,2)=0.0;
D2(Lx,Lx-1)=0.0;
D2(Lx,Lx)=1.0;
% differential operator, D
D = Dt*kappa*D2+eye(Lx);
% initial conditions
m0 = zeros(Lx,1);
sx = 2;
sx2 = sx^2;
m0 = exp( - 0.5 * ((x-xm).^2) / sx2 );
m0(1)=0;
m0(Lx)=0;
% execute the recursion
% to get the true solution
mtrue = zeros(Lx,Lt);
mtrue(:,1)=m0;
for i=(2:Lt)
mtrue(:,i) = D*mtrue(:,i-1);
end
gda_draw(mtrue);
fprintf('Caption: recursive solution\n');
H = zeros(M,M);
h = zeros(M,1);
for i=(1:Lt)
j=Lx*(i-1)+1;
H(j:j+Lx-1,j:j+Lx-1)= eye(Lx);
end
h(1:Lx) = m0;
for i=(2:Lt)
j=Lx*(i-1)+1;
k=Lx*(i-2)+1;
H(j:j+Lx-1,k:k+Lx-1) = -D;
end
% alternate version of true solution
% checks H and h
malt = H\h;
S=zeros(Lx,Lt);
for k=(1:M)
i=iofk(k);
j=jofk(k);
S(i,j)=malt(k);
end
% error
e = S-mtrue;
fprintf('Differece beteern recursion and m=Hinv*h: %e\n', max(abs(e(:))));
% sample data
Nd = 5; % number of data per time slice
d = zeros(Nd,Lt);
dobs = zeros(Nd,Lt);
id = zeros(Nd,Lt);
Sdata = zeros(Lx,Lt);
for j=(2:Lt) % no data at t=1
i = randperm(Lx-4,Nd)'+2; % no data at x=1,2,Lx-1,or Lx
id(:,j)=i;
d(:,j)=S(i,j); % true data
% observed data = true data + noise
dobs(:,j)= d(:,j) + random('normal',0,sigmad,Nd,1);
Sdata(i,j)=dobs(:,j);
end
gda_draw(Sdata);
fprintf('Caption: observed data\n');
% indidual data kernels
Gi = zeros(Nd,Lx,Lt);
for i=(2:Lt) % no data at t=1
for j=(1:Nd)
k=id(j,i);
Gi(j,k,i)=1.0;
end
end
% full data kernel
G = zeros(Nd*(Lt-1),M);
dv = zeros(Nd*(Lt-1),1);
for i=(2:Lt)
j = (i-2)*Nd+1;
k = (i-1)*Lx+1;
G(j:j+Nd-1,k:k+Lx-1) = squeeze(Gi(:,:,i));
dv(j:j+Nd-1,1) = dobs(:,i);
end
sigmahi2 = [sigmam0i2*ones(Lx,1); sigmasi2*ones(Lx*(Lt-1),1)];
HTH = H'*diag(sigmahi2)*H;
HTh = H'*diag(sigmahi2)*h;
GTG = sigmadi2*G'*G;
GTd = sigmadi2*G'*dv;
FTF = GTG+HTH;
FTf = HTh+GTd;
% check solution of data eqn alone as a way of checking GTG and GTd
mest = (GTG+1e-8*eye(M))\(GTd);
Sest = zeros(Lx,Lt);
for k=(1:M) %
i=iofk(k);
j=jofk(k);
Sest(i,j)=mest(k,1);
end
gda_draw(Sest);
fprintf('Caption: The data, by solving d=Gm by dampled least squares\n');
mGLS = FTF\FTf;
SGLS = zeros(Lx,Lt);
for k=(1:M) %
i=iofk(k);
j=jofk(k);
SGLS(i,j)=mGLS(k,1);
end
gda_draw(SGLS);
fprintf('Caption: estimated solution using generalized least squares\n');
% setup for Thomas Recursion
Ai=zeros(Lx,Lx,Lt);
ai=zeros(Lx,Lt);
% A1 and a1
si = zeros(Lx,1);
sim1 = zeros(Lx,1);
Ai(:,:,1) = sigmasi2*D'*D + sigmam0i2*eye(Lx);
ai(:,1) = -sigmasi2*D'*si + sigmam0i2*m0;
e = Ai(:,:,1) - FTF(1:Lx,1:Lx);
fprintf('error in A1: %e\n', max(abs(e(:))));
e = ai(:,1) - FTf(1:Lx,1);
fprintf('error in a1: %e\n', max(abs(e(:))));
% interior A's and a's
EA = 0.0;
Ea = 0.0;
for i=(2:Lt-1)
Ai(:,:,i) = sigmasi2*D'*D + sigmasi2*eye(Lx) + sigmadi2*Gi(:,:,i)'*Gi(:,:,i);
ai(:,i) = -sigmasi2*D'*si + sigmasi2*sim1 + sigmadi2*Gi(:,:,i)'*dobs(:,i);
j = Lx*(i-1)+1;
e = Ai(:,:,i) - FTF(j:j+Lx-1,j:j+Lx-1);
EAi = max(abs(e(:)));
if( EAi > EA )
EA = EAi;
end
e = ai(:,i) - FTf(j:j+Lx-1,1);
Eai = max(abs(e(:)));
if( Eai > Ea )
Ea = Eai;
end
end
fprintf('max interior error, A: %e and a: %e\n', EA, Ea);
% final A and a
sLtm1 = zeros(Lx,1);
Ai(:,:,Lt) = sigmasi2*eye(Lx) + sigmadi2*Gi(:,:,Lt)'*Gi(:,:,Lt);
ai(:,Lt) = sigmasi2*sLtm1 + sigmadi2*Gi(:,:,Lt)'*dobs(:,Lt);
e = Ai(:,:,Lt) - FTF(M-Lx+1:M,M-Lx+1:M);
fprintf('error in Alast: %e\n', max(abs(e(:))));
e = ai(:,Lt) - FTf(M-Lx+1:M,1);
fprintf('error in alast: %e\n', max(abs(e(:))));
% B
B = -sigmasi2*D;
e = B - FTF(Lx+1:2*Lx,1:Lx);
fprintf('error in B: %e\n', max(abs(e(:))));
% forward recursion
Ahinvi = zeros(Lx,Lx,Lt);
ahi = zeros(Lx,Lt);
Ahinvi(:,:,1) = inv(squeeze(Ai(:,:,1)));
ahi(:,1) = ai(:,1);
for i=(2:Lt)
Ahinvi(:,:,i) = inv( Ai(:,:,i) - B*squeeze(Ahinvi(:,:,i-1))*(B') );
ahi(:,i) = ai(:,i) - B*squeeze(Ahinvi(:,:,i-1))*ahi(:,i-1);
end
% backward recursion
mi = zeros(Lx,Lt);
mi(:,Lt) = squeeze(Ahinvi(:,:,Lt))*ahi(:,Lt);
for i=(Lt-1:-1:1)
mi(:,i) = squeeze(Ahinvi(:,:,i)) * (ahi(:,i)-B'*mi(:,i+1));
end
gda_draw(mi);
fprintf('Caption: Estimated solution using the Thomas method\n');
e = SGLS-mi;
fprintf('difference between GLS and Thomas soln: %e\n', max(abs(e(:))));
figure();
clf;
subplot(2,2,1);
hold on;
axis equal
axis([0,xmax,0,tmax])
set(gca,'Ydir','reverse');
caxis([-0.2,1.2]);
colormap('jet');
imagesc([0,xmax],[0,tmax],mtrue');
colorbar;
xlabel('x');
ylabel('t');
title('true');
subplot(2,2,2);
hold on;
axis equal
axis([0,xmax,0,tmax])
set(gca,'Ydir','reverse');
caxis([-0.2,1.2]);
colormap('jet');
imagesc([0,xmax],[0,tmax],Sdata');
colorbar;
xlabel('x');
ylabel('t');
title('data');
subplot(2,2,3);
hold on;
axis equal
axis([0,xmax,0,tmax])
set(gca,'Ydir','reverse');
caxis([-0.2,1.2]);
colormap('jet');
imagesc([0,xmax],[0,tmax],SGLS');
colorbar;
xlabel('x');
ylabel('t');
title('GLS');
subplot(2,2,4);
hold on;
axis equal
axis([0,xmax,0,tmax])
set(gca,'Ydir','reverse');
caxis([-0.2,1.2]);
colormap('jet');
imagesc([0,xmax],[0,tmax],mi');
colorbar;
xlabel('x');
ylabel('t');
title('Thomas');
fprintf('Caption: (Upper left) true solution, (upper right) data,\n');
fprintf('(lower left) Estimated solution using Generalized least squares\n');
fprintf('(lower right) Estimated solution using Thomas recursion.\n');
% gdama07_09
% Thomas recursion used in data assimilation problem
% with thermal diffusion equation.
% The previous section is the same code with extensive error checking.
% I've deleted all the error checking in this version
clear all
fprintf('gdama07_09');
% thermal constant
kappa = 0.1;
% size of blocks and matrices
Lx = 41;
Lt = 41;
M = Lx*Lt;
% prior error in data (moderately accurate)
sigmad = 0.05;
sigmad2 = sigmad^2;
sigmadi = 1.0/sigmad;
sigmadi2 = 1/sigmad2;
% prior error in diff eqn and bc (highly accurate)
sigmas = 1e-2;
sigmas2 = sigmas^2;
sigmasi = 1.0/sigmas;
sigmasi2 = 1.0/sigmas2;
% prior error in ic (poorly accurate)
sigmam0 = 1.0;
sigmam02 = sigmam0^2;
sigmam0i = 1.0/sigmam0;
sigmam0i2 = 1.0/sigmam02;
% x-axis
Dx = 1.0;
x = Dx*[0:Lx-1]';
xmax = Dx*(Lx-1);
Dxr = 1/Dx;
Dxr2 = 1/(Dx^2);
ixm = floor(Lx/2)+1;
xm = x(ixm);
% t-axis
Dt = 1.0;
t = Dt*[0:Lt-1]';
tmax = Dt*(Lt-1);
Dtr = 1/(Dt);
% lookup tables, must ensure times contiguous
iofk = zeros(M,1);
jofk = zeros(M,1);
kofij = zeros(Lx,Lt);
k=0;
for j=(1:Lt)
for i=(1:Lx)
k=k+1;
iofk(k)=i;
jofk(k)=j;
kofij(i,j)=k;
end
end
% 2nd derivative with zero value bc's
D2 = Dxr2*toeplitz([-2;1;zeros(Lx-2,1)],[-2,1,zeros(1,Lx-2)]);
D2(1,1)=1.0;
D2(1,2)=0.0;
D2(Lx,Lx-1)=0.0;
D2(Lx,Lx)=1.0;
% differential operator, D
D = Dt*kappa*D2+eye(Lx);
% initial conditions
m0 = zeros(Lx,1);
sx = 2;
sx2 = sx^2;
m0 = exp( - 0.5 * ((x-xm).^2) / sx2 );
m0(1)=0;
m0(Lx)=0;
% execute the recursion
% to get the true solution
mtrue = zeros(Lx,Lt);
mtrue(:,1)=m0;
for i=(2:Lt)
mtrue(:,i) = D*mtrue(:,i-1);
end
% sample data
Nd = 15; % number of data per time slice
d = zeros(Nd,Lt);
dobs = zeros(Nd,Lt);
id = zeros(Nd,Lt);
Sdata = zeros(Lx,Lt);
for j=(2:Lt) % no data at t=1
i = randperm(Lx-4,Nd)'+2; % no data at x=1,2,Lx-1,or Lx
id(:,j)=i;
d(:,j)=mtrue(i,j); % true data
% observed data = true data + noise
dobs(:,j)=d(:,j)+random('normal',0,sigmad,Nd,1);
Sdata(i,j)=dobs(:,j);
end
% indidual data kernels
Gi = zeros(Nd,Lx,Lt);
for i=(2:Lt) % no data at t=1
for j=(1:Nd)
k=id(j,i);
Gi(j,k,i)=1.0;
end
end
% setup for Thomas Recursion
Ai=zeros(Lx,Lx,Lt);
ai=zeros(Lx,Lt);
% A1 and a1
si = zeros(Lx,1);
sim1 = zeros(Lx,1);
Ai(:,:,1) = sigmasi2*D'*D + sigmam0i2*eye(Lx);
ai(:,1) = -sigmasi2*D'*si + sigmam0i2*m0;
% interior A's and a's
for i=(2:Lt-1)
Ai(:,:,i) = sigmasi2*D'*D + sigmasi2*eye(Lx) + sigmadi2*Gi(:,:,i)'*Gi(:,:,i);
ai(:,i) = -sigmasi2*D'*si + sigmasi2*sim1 + sigmadi2*Gi(:,:,i)'*dobs(:,i);
end
% final A and a
sLtm1 = zeros(Lx,1);
Ai(:,:,Lt) = sigmasi2*eye(Lx) + sigmadi2*Gi(:,:,Lt)'*Gi(:,:,Lt);
ai(:,Lt) = sigmasi2*sLtm1 + sigmadi2*Gi(:,:,Lt)'*dobs(:,Lt);
% B
B = -sigmasi2*D;
% forward recursion
Ahinvi = zeros(Lx,Lx,Lt);
ahi = zeros(Lx,Lt);
Ahinvi(:,:,1) = inv(squeeze(Ai(:,:,1)));
ahi(:,1) = ai(:,1);
for i=(2:Lt)
Ahinvi(:,:,i) = inv( Ai(:,:,i) - B*squeeze(Ahinvi(:,:,i-1))*(B') );
ahi(:,i) = ai(:,i) - B*squeeze(Ahinvi(:,:,i-1))*ahi(:,i-1);
end
% backward recursion
mi = zeros(Lx,Lt);
mi(:,Lt) = squeeze(Ahinvi(:,:,Lt))*ahi(:,Lt);
for i=(Lt-1:-1:1)
mi(:,i) = squeeze(Ahinvi(:,:,i)) * (ahi(:,i)-B'*mi(:,i+1));
end
figure();
clf;
subplot(2,2,1);
hold on;
axis equal
axis([0,xmax,0,tmax])
set(gca,'Ydir','reverse');
caxis([-0.2,1.2]);
colormap('jet');
imagesc([0,xmax],[0,tmax],mtrue');
colorbar;
xlabel('x');
ylabel('t');
title('true');
subplot(2,2,2);
hold on;
axis equal
axis([0,xmax,0,tmax])
set(gca,'Ydir','reverse');
caxis([-0.2,1.2]);
colormap('jet');
imagesc([0,xmax],[0,tmax],Sdata');
colorbar;
xlabel('x');
ylabel('t');
title('data');
subplot(2,2,3);
hold on;
axis equal
axis([0,xmax,0,tmax])
set(gca,'Ydir','reverse');
caxis([-0.2,1.2]);
colormap('jet');
imagesc([0,xmax],[0,tmax],mi');
colorbar;
xlabel('x');
ylabel('t');
title('Thomas');
subplot(2,2,4);
hold on;
axis equal
axis([0,xmax,0,tmax])
set(gca,'Ydir','reverse');
caxis([-0.2,1.2]);
colormap('jet');
imagesc([0,xmax],[0,tmax],(mtrue-mi)');
colorbar;
xlabel('x');
ylabel('t');
title('Error');
fprintf('Caption: (Upper left) true solution, (upper right) data,\n');
fprintf('(lower left) Estimated solution using Thomas recursion\n');
fprintf('(lower right) True solution minus that estimated using Thomas recursion.\n');
% gdama07_10
% Thomas recursion and Kalman filtering used in the present time
% data assimilation problem with the thermal diffusion equation.
% I also compute the global time solution for comparison
clear all
fprintf('gdama07_10\n');
% thermal constant
kappa = 0.05;
% size of blocks and matrices
Lx = 41;
Lt = 41;
M = Lx*Lt;
% prior error in data
sigmad = 0.05;
sigmad2 = sigmad^2;
sigmadi = 1.0/sigmad;
sigmadi2 = 1/sigmad2;
% prior error in diff eqn and bc
sigmas = 1;
sigmas2 = sigmas^2;
sigmasi = 1.0/sigmas;
sigmasi2 = 1.0/sigmas2;
% prior error in ic
sigmam0 = 1;
sigmam02 = sigmam0^2;
sigmam0i = 1.0/sigmam0;
sigmam0i2 = 1.0/sigmam02;
% x-axis
Dx = 1.0;
x = Dx*[0:Lx-1]';
xmax = Dx*(Lx-1);
Dxr = 1/Dx;
Dxr2 = 1/(Dx^2);
ixm = floor(Lx/2)+1;
xm = x(ixm);
% t-axis
Dt = 1.0;
t = Dt*[0:Lt-1]';
tmax = Dt*(Lt-1);
Dtr = 1/(Dt);
% lookup tables, must ensure times contiguous
iofk = zeros(M,1);
jofk = zeros(M,1);
kofij = zeros(Lx,Lt);
k=0;
for j=(1:Lt)
for i=(1:Lx)
k=k+1;
iofk(k)=i;
jofk(k)=j;
kofij(i,j)=k;
end
end
% 2nd derivative with zero value bc's
D2 = Dxr2*toeplitz([-2;1;zeros(Lx-2,1)],[-2,1,zeros(1,Lx-2)]);
D2(1,1)=1.0;
D2(1,2)=0.0;
D2(Lx,Lx-1)=0.0;
D2(Lx,Lx)=1.0;
% differential operator, D
D = Dt*kappa*D2+eye(Lx);
DTD = (D'*D);
% initial conditions
m0 = zeros(Lx,1);
sx = 2;
sx2 = sx^2;
m0 = exp( - 0.5 * ((x-xm).^2) / sx2 );
m0(1)=0;
m0(Lx)=0;
% execute the recursion
% to get the true solution
mtrue = zeros(Lx,Lt);
mtrue(:,1)=m0;
for i=(2:Lt)
mtrue(:,i) = D*mtrue(:,i-1);
end
% sample data
Nd = 15; % number of data per time slice
d = zeros(Nd,Lt);
dobs = zeros(Nd,Lt);
id = zeros(Nd,Lt);
Sdata = zeros(Lx,Lt);
Sd = zeros(Lx,Lt);
for j=(2:Lt) % no data at t=1
i = randperm(Lx-2,Nd)'+1; % no data at x=1 or Lx
id(:,j)=i;
d(:,j)=mtrue(i,j); % true data
% observed data = true data + noise
nse=random('normal',0,sigmad,Nd,1);
dobs(:,j)=mtrue(i,j)+nse;
Sd(i,j)=d(:,j);
Sdata(i,j)=dobs(:,j);
end
% individual data kernels
Gi = zeros(Nd,Lx,Lt);
for i=(2:Lt) % no data at t=1
for j=(1:Nd)
k=id(j,i);
Gi(j,k,i)=1.0;
end
end
% SOLUTION 1: Global time solution by full Thomas Recursion
% setup for Thomas Recursion
Ai=zeros(Lx,Lx,Lt);
ai=zeros(Lx,Lt);
% A1 and a1
si = zeros(Lx,1);
sim1 = zeros(Lx,1);
Ai(:,:,1) = sigmasi2*DTD + sigmam0i2*eye(Lx);
ai(:,1) = -sigmasi2*D'*si + sigmam0i2*m0;
% interior A's and a's
for i=(2:Lt-1)
Gs = squeeze(Gi(:,:,i));
Ai(:,:,i) = sigmasi2*DTD + sigmasi2*eye(Lx) + sigmadi2*(Gs'*Gs);
ai(:,i) = -sigmasi2*D'*si + sigmasi2*sim1 + sigmadi2*Gs'*dobs(:,i);
end
% final A and a
sLtm1 = zeros(Lx,1);
Gs = squeeze(Gi(:,:,Lt));
Ai(:,:,Lt) = sigmasi2*eye(Lx) + sigmadi2*(Gs'*Gs);
ai(:,Lt) = sigmasi2*sLtm1 + sigmadi2*Gs'*dobs(:,Lt);
% B
B = -sigmasi2*D;
% forward recursion
Ahinvi = zeros(Lx,Lx,Lt);
ahi = zeros(Lx,Lt);
Ahinvi(:,:,1) = inv(squeeze(Ai(:,:,1)));
ahi(:,1) = ai(:,1);
for i=(2:Lt)
Ahinvi(:,:,i) = inv( Ai(:,:,i) - (B*squeeze(Ahinvi(:,:,i-1))*B') );
ahi(:,i) = ai(:,i) - B*squeeze(Ahinvi(:,:,i-1))*ahi(:,i-1);
end
% backward recursion
mi = zeros(Lx,Lt);
mi(:,Lt) = squeeze(Ahinvi(:,:,Lt))*ahi(:,Lt);
for i=(Lt-1:-1:1)
mi(:,i) = squeeze(Ahinvi(:,:,i)) * (ahi(:,i)-B'*mi(:,i+1));
end
% SOLUTION 2: Thomas present time solution
% B
B = -sigmasi2*D;
% A1 and a1
si = zeros(Lx,1);
sim1 = zeros(Lx,1);
A1 = sigmasi2*(D'*D) + sigmam0i2*eye(Lx);
a1 = -sigmasi2*D'*si + sigmam0i2*m0;
Ah1 = A1;
ah1 = a1;
mPT = zeros(Lx,Lt); % present time solution
mPT(:,1) = m0; % first soln is just initial condition
Ahim1 = Ah1;
ahim1 = ah1;
% interior A's and a's
for i=(2:Lt)
Gs = squeeze(Gi(:,:,i));
AiPT = sigmasi2*eye(Lx) + sigmadi2*(Gs'*Gs);
Ai = sigmasi2*(D'*D) + AiPT;
aiPT = sigmasi2*sim1 + sigmadi2*Gs'*dobs(:,i);
ai = -sigmasi2*D'*si + aiPT;
BAI = B/Ahim1;
BAIBT = BAI*B';
AhiPT = AiPT - BAIBT;
Ahi = Ai - BAIBT;
ahiPT = aiPT - BAI*ahim1;
ahi = ai - BAI*ahim1;
eps = 1e-6*max(abs(AhiPT(:)));
mPT(:,i) = (AhiPT+eps*eye(Lx))\ahiPT;
Ahim1 = Ahi;
ahim1 = ahi;
end
% SOLUTION 3: Present time solution by Kalman Filtering
sim1 = zeros(Lx,1);
mK = zeros(Lx,Lt); % Kalman solution
varm = zeros(Lx,Lt);
mK(:,1) = m0; % first soln is just initial condition
covm1 = sigmam02*eye(Lx);
varm(:,1) = diag(covm1);
covmAKim1 = covm1;
for i=(2:Lt)
mAi = D*mK(:,i-1)+sim1;
covmAi = D*covmAKim1*D' + sigmas2*eye(Lx);
Gs = squeeze(Gi(:,:,i));
AK=inv(covmAi) + sigmadi2*(Gs'*Gs);
mK(:,i) = AK\( covmAi\mAi + sigmadi2*Gs'*dobs(:,i) );
AKinv=inv(AK);
varm(:,i)=diag(AKinv);
covmAKim1 = AKinv;
end
figure();
clf;
subplot(2,2,1);
hold on;
axis equal
axis([0,xmax,0,tmax])
set(gca,'Ydir','reverse');
caxis([-0.2,1.2]);
colormap('jet');
imagesc([0,xmax],[0,tmax],mtrue');
colorbar;
xlabel('x');
ylabel('t');
title('true');
subplot(2,2,2);
hold on;
axis equal
axis([0,xmax,0,tmax])
set(gca,'Ydir','reverse');
caxis([-0.2,1.2]);
colormap('jet');
imagesc([0,xmax],[0,tmax],mi');
colorbar;
xlabel('x');
ylabel('t');
title('GLS');
subplot(2,2,3);
hold on;
axis equal
axis([0,xmax,0,tmax])
set(gca,'Ydir','reverse');
caxis([-0.2,1.2]);
colormap('jet');
imagesc([0,xmax],[0,tmax],mPT');
colorbar;
xlabel('x');
ylabel('t');
title('Thomas PT');
subplot(2,2,4);
hold on;
axis equal
axis([0,xmax,0,tmax])
set(gca,'Ydir','reverse');
caxis([-0.2,1.2]);
colormap('jet');
imagesc([0,xmax],[0,tmax],(mK)');
colorbar;
xlabel('x');
ylabel('t');
title('Kalman');
fprintf('Caption: Solutons. (Upper left) true , (upper right) Generalized Least Squares,\n');
fprintf('(lower left) Thomas Present-time, (lower right) Kalman\n');
figure();
clf;
subplot(1,2,1);
hold on;
axis equal
axis([0,xmax,0,tmax])
set(gca,'Ydir','reverse');
caxis([-0.2,1.2]);
colormap('jet');
imagesc([0,xmax],[0,tmax],(mi-mK)');
colorbar;
xlabel('x');
ylabel('t');
title('mi-mK');
subplot(1,2,2);
hold on;
axis equal
axis([0,xmax,0,tmax])
set(gca,'Ydir','reverse');
caxis([-0.2,1.2]);
colormap('jet');
imagesc([0,xmax],[0,tmax],(mPT-mK)');
colorbar;
xlabel('x');
ylabel('t');
title('mPT-mK');
fprintf('Caption: Errors (Left) Thomas minus Kalman, (right) Thomas Present-time - Kalman\n');
figure();
clf;
subplot(1,2,1);
hold on;
axis equal
axis([0,xmax,0,tmax])
set(gca,'Ydir','reverse');
caxis([-0.2,1.2]);
colormap('jet');
imagesc([0,xmax],[0,tmax],Sd');
colorbar;
xlabel('x');
ylabel('t');
title('dtrue');
subplot(1,2,2);
hold on;
axis equal
axis([0,xmax,0,tmax])
set(gca,'Ydir','reverse');
caxis([-0.2,1.2]);
colormap('jet');
imagesc([0,xmax],[0,tmax],Sdata');
colorbar;
xlabel('x');
ylabel('t');
title('dobs');
fprintf('Caption: Data (Left) true, (right) predicted\n');
figure();
clf;
hold on;
axis equal
axis([0,xmax,0,tmax])
set(gca,'Ydir','reverse');
caxis([0,5]);
colormap('jet');
imagesc([0,xmax],[0,tmax],sqrt(varm)');
colorbar;
xlabel('x');
ylabel('t');
title('sqrt(varm)');
fprintf('Caption: Standard error of the solution\n');