Skip to content

Commit

Permalink
Initial commit
Browse files Browse the repository at this point in the history
  • Loading branch information
tabassm1 authored and tabassm1 committed Mar 19, 2018
1 parent f165a89 commit 0536851
Show file tree
Hide file tree
Showing 6 changed files with 211 additions and 0 deletions.
Binary file added Demo.mlx
Binary file not shown.
61 changes: 61 additions & 0 deletions Example.m
@@ -0,0 +1,61 @@
%% Example
%% Direction of Arrival Estimation with a Uniform Linear Array in Compressed Beamforming Application.
%
% This example shows 3 cases, where SAEN successfully recovers the exact
% support in all three cases. However, Lasso fails to recover exact support
% in all three cases. Nevertheless, due to other \alpha than unity, elastic
% net was able to recover exact support once.
% NOTE: These 3 cases are for set-up 4 taken from 1000 cases of Table IV in the paper.
% Code by Muhammad Naveed Tabassum and Esa Ollila, Aalto University. <2018>

% Firstly, generate the design matrix (i.e., dictionary) X for 'p' possible
% look directions (angles) using a uniform linear array (ULA) composed 'n'
% sensor elements. The ULA has an inter-element spacing of half a wavelength.

clearvars; close all hidden; clc;

p = 180; % possible look directions
n = 40; % #of Sensors in ULA
TH_deg = 180*(0:p-1)/p - 90;
X = (1/sqrt(n)) * exp( 1i*pi*(0:n-1)'*sin(deg2rad(TH_deg)) );
sdX = sqrt( sum(X.*conj(X)) );
X = bsxfun(@rdivide, X, sdX);
load('seed_data.mat');

%%
sigVar = sum(srcPow.^2) / K; % signal variance = average source power = (|s1|^2+|s2|^2+...+|sK|^2)/K
noiseVar = 10^(-0.1*SNRdB)*sigVar; % noise variance based on SNR-level in dB.
[~, srcIndx] = ismember(srcLoc, TH_deg);
Xs = X(:,srcIndx);
m = 51;
AL = linspace(1,5e-1, m); % Grid of elastic net tunning parameter with 'm' values.X = (1/sqrt(n))*exp(1i*pi*(0:(n-1))'*sin(TH));
ALGOz = {'SAEN', 'EN', 'Lasso'};
rng(s);
nERS = zeros(length(ALGOz), 1); % no. of exact recover of support (ERS)
for mc = 1:3
srcTrueK = srcPow.*exp(1i*unifrnd(0,2*pi, [K 1])); % signal = |s|exp(j*U(0,2*pi))
cscg_noise = sqrt(noiseVar/2)*(randn(n, 1) + 1i*randn(n, 1)); % circularly symmetric complex gaussian (CSCG) noise
y = Xs*srcTrueK + cscg_noise; % Noisy observations

% SAEN
[betaK_saen] = saen(y,X,K,AL);
nzi = find(abs(betaK_saen));
if sum(ismember(srcIndx, nzi))==K && length(nzi)<=K
nERS(1) = nERS(1) + 1;
end

% EN
[betaK_en,~,~] = cpwwen(y,X,K,AL);
nzi = find(abs(betaK_en));
if sum(ismember(srcIndx, nzi))==K && length(nzi)<=K
nERS(2) = nERS(2) + 1;
end

% Lasso
[betaK_lasso,~,~] = cpwwen(y,X,K);
nzi = find(abs(betaK_lasso));
if sum(ismember(srcIndx, nzi))==K && length(nzi)<=K
nERS(3) = nERS(3) + 1;
end
end
table(nERS, 'rowNames', ALGOz, 'VariableNames', {'Exact_support_recovery'})
74 changes: 74 additions & 0 deletions clarswlasso.m
@@ -0,0 +1,74 @@
function [lam,A,B1] = clarswlasso(y,X, intcpt,scaleX,maxp,w)
%-< Complex-valued LARS for Weighted Lasso (c-LARS-WLasso) >----
% Finds the knots (and respective solutions) for WLasso
% using the complex-valued extention of LARS method.
%-< Outputs >-----
% lam = {lam0,...,lamK}, i.e., set of knot-values from lam0 to lamK
% A = support set in the predictor-entering order
% B1 = set of solutions using {lam0,...,lamK} for Lasso(alpha=1 in EN)
% Code by Muhammad Naveed Tabassum and Esa Ollila, Aalto University. <2018>

[n, p] = size(X);

if nargin < 6, w = ones(p,1); end
if nargin < 5, maxp = min(n,p); end
if nargin < 4, scaleX=1; end
if nargin < 3, intcpt=1; end

if intcpt % if intercept is in the model, center the data
meanX= mean(X); meany = mean(y);
X = bsxfun(@minus, X, meanX);
y = y-meany;
end
if scaleX
% Default: scale X to have column norms eq to sqrt(n) (n = nr of rows)
sdX = sqrt(sum(X.*conj(X)));
X = bsxfun(@rdivide, X, sdX);
end

X = X./repmat(w',n,1); % for weighted Lasso from Adaptive Lasso paper (section 3.5)
lam = zeros(1,maxp);
B1 = zeros(p,maxp);
beta = zeros(p,1);
r = y - X*beta; % residual vector
c = X'*r; % correlation of predictors with residual vector
[lam0, A] = max(abs(c)); % compute lambda0
lam(1)=lam0;
Delta = zeros(p,1);
for k=2:maxp
delta = (1/lam(k-1))*(X(:,A) \ r) ;
Delta(A) = delta;
notA = setdiff(1:p,A);
al = zeros(1,p);
for ell=notA
cl = X(:,ell)'*r;
bl = X(:,ell)'*(X(:,A)*delta);
ax2 = abs(bl)^2 - 1;
bx1 = 2*(lam(k-1)-real(cl*conj(bl)));
cx0 = abs(cl)^2 - lam(k-1)^2;
qp = [ax2, bx1, cx0];
ALz = roots(qp);

if all(ALz>0)
al(ell) = min(ALz);
else
al(ell) = subplus(max(ALz));
end

if ~isreal(ALz)
fprintf('complex root\n');
al(ell) = 0;
end
c(ell) = cl - al(ell)*bl;
end
[almin, tmp] = min(al(notA));
j = notA(tmp);
lam(k) = lam(k-1) - almin;
A = [A, j];
beta = beta + almin*Delta;
B1(:,k) = beta;
r = y - X*beta;
end
B1 = B1./repmat(w,1,maxp); % from Adaptive Lasso paper (section 3.5)
if scaleX, B1 = bsxfun(@rdivide,B1, sdX'); end
if intcpt, B1 = [meany - meanX*B1; B1]; end
57 changes: 57 additions & 0 deletions cpwwen.m
@@ -0,0 +1,57 @@
function [betaK,Lam,A] = cpwwen(y,X,K,AL,w,debias)
%-< Complex-valued Path Wise Weighted Elastic Net (c-PW-WEN) >----
% Finds the knots (and respective solution) for WEN using the c-LARS-WLasso.
%-< Outputs >-----
% betaK = Final K-sparse WEN estimate
% Lam = {lam0,...,lamK}, i.e., set of knot-values from lam0 to lamK
% A = support set in the predictor-entering order
% Code by Muhammad Naveed Tabassum and Esa Ollila, Aalto University. <2018>

[n, p] = size(X);
if nargin < 6, debias = 'true'; end % Default case is to debias the solution
if nargin < 5, w = ones(p,1); end % Default case is uniformly weighted
if nargin < 4, AL = 1; end % Default case is Lasso (\alpha=1)

maxp = min(K+1, n);
m = length(AL);
RSS = zeros(1, m);
Lam = zeros(maxp, m); % EN knots for m-alphas
A = zeros(maxp, m); % Predictor-entering orders for m-alphas
B = zeros(p, m);

[lam,A(:,1),B1] = clarswlasso(y,X,0,0,maxp,w);
Lam(:,1) = lam; B(:,1) = B1(:,maxp);
lamen0 = lam; A(1,:) = A(1,1); % 1st entering var is same for all-alphas
Xa = X(:, abs(B(:,1))~=0); % active-set of predictors
b_ls = Xa \ y; % LSE for current active-set
r = y - Xa*b_ls; % Residual for current active-set
RSS(1) = r'*r;
ya = [y; zeros(p,1)]; % augmented form of y

for ii= 2:m
al = AL(ii);
Aen = zeros(maxp-1,1); Ben = zeros(size(B1));
lamen = zeros(maxp,1); lamen(1) = lam(1)/al; % lam(alpha=1)/alpha_i
for jj=2:maxp
Xa = [X; sqrt(lamen0(jj)*(1-al))*eye(p)]; % augmented form of X
[tmpL,tmpA,tmpB] = clarswlasso(ya,Xa,0,0,jj,w);
lamen(jj) = tmpL(jj)/al; Aen(jj-1) = tmpA(jj);
eta_k = lamen(jj)*(1-al);
Ben(:,jj) = (1+eta_k)*tmpB(:,jj); % correction for double shrinkage
end
A(2:end,ii) = Aen; B(:,ii) = Ben(:,maxp);
Lam(:,ii) = lamen; lamen0 = lamen;

Xa = X(:, abs(B(:,ii))~=0); % active-set of predictors
b_ls = Xa \ y; % LSE for current active-set
r = y - Xa*b_ls; % Residual for current active-set
RSS(ii) = r'*r;
end
ali = find(RSS<=min(RSS),1); % best \alpha's index
betaK = B(:,ali); % Final K-sparse solution
Lam = Lam(:,ali); % Knots (lambda values)
A = A(:,ali); % Order of predictors in which they entered in solution

if debias % debiased solution using LSE of active predictors
betaK(A(1:end-1)) = X(:,A(1:end-1)) \ y;
end
19 changes: 19 additions & 0 deletions saen.m
@@ -0,0 +1,19 @@
function [betaK] = saen(y,X,K,AL)
%-< Sequential adaptive elastic net (SAEN) approach >----
% Finds the SAEN estimate using c-PW-WEN algorithm.
%-< Outputs >-----
% betaK = Final K-sparse SAEN estimate
% Code by Muhammad Naveed Tabassum and Esa Ollila, Aalto University. <2018>

if nargin < 4, AL = 1; end % Default case is Lasso (\alpha=1)
[~,p] = size(X);
betaK = zeros(p,1);
w = ones(p,1); % initial weights
supp = 1:p; % initial support set
for itr=3:-1:1
[beta0,~,A] = cpwwen(y,X(:,supp), itr*K, AL,w, itr==1);
nzi = A(1:end-1);
w = 1 ./ abs(beta0(nzi)); % current weights
supp = supp(nzi); % current support set
end
betaK(supp) = beta0(nzi); % Final K-sparse solution
Binary file added seed_data.mat
Binary file not shown.

0 comments on commit 0536851

Please sign in to comment.