Contents

clear all

Repeat the Experiment setup for Wiener filter example for the design of an adaptive filter of order 3

Generate N = N_realiz * N0 (see below the significance of N_realiz and N0)

N_realiz = 100;
N0 = 1000;
N = 2*N_realiz * N0;
% initialize d and x
d = zeros(N,1); x = zeros(N,1);
% Generate independent random variable v_1
% Generate N i.i.d (independent and identically distributed) random variables with distribution N(0,0.27)
v1= sqrt(0.27)*randn(N,1);
% Generate the variable  d
a = 0.8458;
d(1) = 0;
for n= 2:N
    d(n) = -a*d(n-1) +v1(n);
end
% Generate the variable  x
b = -0.9458;
x(1) = 0;
for n= 2:N
    x(n) = -b*x(n-1) +d(n);
end
% Generate the variable  u
v2 = sqrt(0.1)*randn(N,1);
u = x(:) + v2(:);

Compute the correlations needed for optimal Wiener Design and for Steepest Descent (they are not needed in LMS algorithm)

k = (N/4):N;  % interval over which to compute the correlations
for tau = 0:2
    r_u1(tau+1) = sum(u(k).*u(k-tau))/length(k);
end
r_u1
for tau = 0:2
    r_ud1(tau+1) = sum(d(k).*u(k-tau))/length(k);
end
r_ud1
r_u1 =

    1.0848    0.4852    0.8331


r_ud1 =

    0.5243   -0.4445    0.3734

Wiener_Hopf design using estimated correlations from data

Estimated correlation matrix

R1 = [r_u1(1) r_u1(2) r_u1(3);
      r_u1(2) r_u1(1) r_u1(2);
      r_u1(3) r_u1(2) r_u1(1)];
% Estimated cross-correlation vector
pd = r_ud1(:);
w_WHd = R1\pd
w_WHd =

    0.7362
   -0.8001
    0.1367

Stability condition for convergence of LMS and Steepest descent algorithms

[Q Lambda] = eig(R1);

lambda_max = max(diag(Lambda))
mu_max = 2/lambda_max % mu_max should be the maximum step sized allowed to be used with the steepest descent algorithm
lambda_max =

    2.3042


mu_max =

    0.8680

Run the steepest descent algorithm for three values of step size

R_u = R1; % the autocovariance matrix
p= pd; % the cross-correlation vector
s_factors = [1/2, 1/10, 1/100] % scaling factors for the step size (mu = mu_max * s_factors(i_ss))
figure(3),clf
W_o = w_WHd; % use this as a reference when ploting the results
for i_ss = 1:3
    W=[-1;-1;1;];
    mu= mu_max/4; % Initial values for the adaptation algorithm
    mu= mu_max * s_factors(i_ss);
    Wt=W; % Wt will record the evolution of vector W
    nWt = [];
    for k=1:10000 % Iterate 1000 times the adaptation step
        W=W +mu*(p-R_u*W); % Adaptation Equation ! Quite simple!
        Wt=[Wt W]; % Wt records the evolution of vector W
        nWt = [nWt ((W-W_o)'*(W-W_o)/3)]; % nWt records the norm of W
    end % Is W the Wiener Filter?

    [Wt(:,end) w_WHd ];
    figure(3)
    semilogy(nWt),hold on
end
legend([num2str(s_factors(1)) '*\mu_m_a_x'],...
    [num2str(s_factors(2)) '*\mu_m_a_x'],...
    [num2str(s_factors(3)) '*\mu_m_a_x'])
s_factors =

    0.5000    0.1000    0.0100

LMS algorithm

s_factors = [1/2, 1/10, 1/30];
i_ss = 3;
mu= mu_max * s_factors(i_ss); % fix the step size to mu_max/10
figure(4),clf
W_o = w_WHd; % use this as the ideal value of the parameters when ploting the results
% N0 = 1000;
nWt0 = zeros(N0,1); % keep here the average of the parameter error ||W-Wo||^2
nJ0 = zeros(N0,1); % keep here the average of the parameter error J = e^2
for i_realiz = 1:N_realiz
    % use as the new data for n= 1,...,N0 the values in "indices"
    indices = (N/2)+(i_realiz-1)*N0 + (1:N0);
    dn = d(indices);
    un  = u(indices);
    W = randn(3,1);
    W=[-1;-1;1;]; % initialize the vector of parameters
    Wt=W; % Wt will record the evolution of vector W
    nWt = zeros(N0,1); J = zeros(N0,1);
    for k=3:N0 % Iterate N times the adaptation step
        % first compute the output of the filter with the current
        % parameters
        y(k) = W(1)*un(k) + W(2)*un(k-1) + W(3)*un(k-2);
        e(k) = dn(k) - y(k);
        J(k) = e(k)^2;
        W = W +mu*e(k)*[un(k);un(k-1);un(k-2)]; % Adaptation Equation ! Quite simple!
        Wt=[Wt W]; % Wt records the evolution of vector W
        nWt(k) = ((W-W_o)'*(W-W_o)/3); % nWt records the norm of (W-W0)
    end %
    nWt0 = nWt0 + nWt;
    nJ0 = nJ0 + J;
end
figure(4)
semilogy(nWt0/N_realiz)
xlabel('Iteration number')
ylabel('Norm of parameter error')
title('Learning curve ||w-w_o||^2')
figure(5)
semilogy(nJ0/N_realiz)
xlabel('Iteration number')
ylabel('Squarred filter error')
title('Learning curve J = e^2')