Previous Next get_started_ok.m

ckbs_nonlinear: A Simple Example and Test

Syntax
[ok] = get_started_ok(draw_plot)

Running This Example
Change into the example directory, start octave (or matlab), and then execute the commands
 
	test_path
	get_started_ok(true)


draw_plot
If this optional argument is true, a plot is drawn displaying the results. If draw_plot is no present or false, no plot is drawn.

ok
If the return value ok is true, the test passed, otherwise the test failed.

Simulated State Vector
The state vector for this example is in  \B{R}^n and the sequence of state vector values is simulated with  x_{j,k} = 1 for  j = 1 , \ldots , n and  k = 1 , \ldots N .

Transition Model
We simulate imperfect knowledge of this dynamical system using the persistence model. To be specific,  \[
     x_k = x_{k-1} + w_k
\] 
where  w_k \sim \B{N} ( 0 , Q_k ) and the variance  Q_k = I_n (and  I_n is the  n \times n identity matrix. The routine persist_g.m calculates the mean for  x_k given  x_{k-1} . Note that the initial state estimate is returned by this routine as  \hat{x} = g_1 (x) and has  \hat{x}_{j,k} = 1 for  j = 1 , \ldots , n . Also not that the corresponding variance  Q_1 = I_n .

Measurements
For this example, the measurements are direct observations of the state  \[
     z_k = x_k + v_k
\] 
where  v_k \sim \B{N} ( 0 , R_k ) and the variance  R_k = I_n . The routine direct_h.m calculates the mean for  z_k , given the value of  x_k .

Constraints
For this example, the constraints are  \[
     0.5 \leq x_{k,j} \leq 1.5
\] 
for  j = 1 , \ldots , n and  k = 1 , \ldots , N . The routine box_f.m represents these constraints in the form  f_k ( x_k ) \leq 0 as expected by ckbs_nonlinear .

Call Back Functions
persist_g.m ckbs_nonlinear: Example of Persistence Transition Function
direct_h.m ckbs_nonlinear: Example Direct Measurement Model
box_f.m ckbs_nonlinear: Example of Box Constraints

Source Code
 
function [ok] = get_started_ok(draw_plot)
if nargin < 1
	draw_plot = false;
end
% --------------------------------------------------------------------------
max_itr = 20;      % maximum number of iterations
epsilon = 1e-5;    % convergence criteria
N       = 40;      % number of time points
n       = 1;       % number of state vector components per time point
sigma_r = 1;       % variance of measurement noise
sigma_q = 1;       % variance of transition noise
sigma_e = 1;       % variance of initial state estimate
initial = ones(n); % estimate for initial state value
randn('seed', 4321)
% ---------------------------------------------------------------------------
% Level of tracing during optimization
if draw_plot
	level = 1;
else
	level = 0;
end
m       = n;       % number of measurements per time point
ell     = 2 * n;   % number of constraints
index   = 1 : n;   % index vector
% ---------------------------------------------------------------------------
% global information used by box_f
global box_f_info
box_f_info.lower = 0.5 * ones(n, 1);
box_f_info.upper = 1.5 * ones(n, 1);
box_f_info.index = index;
% ---------------------------------------------------------------------------
% global information used by persist_g
global persist_g_info
persist_g_info.initial = initial;
% ---------------------------------------------------------------------------
% global information used by direct_h
global direct_h_info
direct_h_info.index = index;
% ---------------------------------------------------------------------------
% simulated true trajectory 
x_true  = ones(n, N);
% initial vector during optimization process
x_in    = 0.5 * ones(n, N);
% measurement variances
rinv    = zeros(m, m, N);
qinv    = zeros(n, n, N);
z       = zeros(m, N);
for k = 1 : N
	xk           = x_true(:, k);
	ek           = randn(m, 1);
	hk           = direct_h(k, xk);
	z(:, k)      = hk + ek;
	qinv(:,:, k) = eye(n) / (sigma_q * sigma_q);
	rinv(:,:, k) = eye(n) / (sigma_r * sigma_r);
end
qinv(:,:,1)       = eye(n) / (sigma_e * sigma_e);
% ----------------------------------------------------------------------
% call back functions
f_fun = @ box_f;
g_fun = @ persist_g;
h_fun = @ direct_h;
% ----------------------------------------------------------------------
% call optimizer
[x_out, u_out, info] = ckbs_nonlinear( ...
	f_fun,    ...
	g_fun,    ...
	h_fun,    ...
	max_itr,  ...
	epsilon,  ...
	x_in,     ...
	z,        ...
	qinv,     ...
	rinv,     ...
	level     ...
);
% ----------------------------------------------------------------------
ok     = size(info, 1) <= max_itr;
%
% Set up affine problem corresponding to x_out (for a change in x).
% Check constraint feasibility and complementarity.
f_out  = zeros(ell, N);
g_out  = zeros(n,   N);
h_out  = zeros(m,   N);
df_out = zeros(ell, n, N);
dg_out = zeros(n, n,   N);
dh_out = zeros(m, n,   N);
xk1    = zeros(n, 1);
for k = 1 : N
	xk       = x_out(:, k);
	uk       = u_out(:, k);
	[fk, Fk] = f_fun(k, xk);
	[gk, Gk] = g_fun(k, xk1);
	[hk, Hk] = h_fun(k, xk);
	%
	ok       = ok & all( fk <= epsilon);              % feasibility
	ok       = ok & all( abs(fk) .* uk <= epsilon );  % complementarity
	%
	f_out(:, k)    = fk;
	g_out(:, k)    = gk - xk;
	h_out(:, k)    = hk;
	df_out(:,:, k) = Fk;
	dg_out(:,:, k) = Gk;
	dh_out(:,:, k) = Hk;
	xk1 = xk;
end
ok     = ok & all( all( u_out >= 0 ) );
%
% Compute gradient of unconstrained affine problem at dx equal to zero
% and then check the gradient of the Lagrangian
dx_out = zeros(n, N);
grad  = ckbs_sumsq_grad(dx_out, z, g_out, h_out, dg_out, dh_out, qinv, rinv);
for k = 1 : N
	uk  = u_out(:, k);
	Fk  = df_out(:,:, k);
	dk  = grad(:, k);
	%
	ok  = ok & all( abs(Fk' * uk + dk) <= epsilon );
end
% ----------------------------------------------------------------------
if draw_plot
	figure(1);
	clf
	hold on
	time = 1 : N;
	plot(time, x_true(1,:),    'b-');
	plot(time, x_out(1,:),     'g-');
	plot(time, z(1,:),         'ko'); 
	plot(time, 0.5*ones(1,N),  'r-');
	plot(time, 1.5*ones(1,N),  'r-');
	title('Position: blue=truth, green=estimate, red=constraint, o=data');
	axis( [ 0 , N , -1, 3 ] )
	hold off
return
end

Input File: example/nonlinear/get_started_ok.m