## Solution of lab session on state estimation

## Import

import numpy as np
import matplotlib.pyplot as plt
import control as clt


## Parameters

# Moment of inertia
J=0.01; # Kg m^2 /s^2

# Friction coefficient
b=0.1; # N m s

# Dc motor field gain
Km=0.01; # N m /A

# Resistance
Ra=1; # Ohm

# Inductance
La=1; # H

# Sampling time
Tc=0.01

# Discrete-time system
A=np.array([[1-Tc*b/J, Tc*Km/J],
            [-Tc*Km/La, 1-Tc*Ra/La]])
B=np.array([[0],
            [Tc/La]])
C=np.array([[1, 0]])


# System order
n=2


# Loading data

datafile=np.load('data_kf_dcmotor.npz')
X=datafile['X']
Y=datafile['Y']
U=datafile['U']
bias=datafile['bias']
Yb=datafile['Ybias']


# Plot data
Tend = 8;
T=np.arange(0,Tend+Tc,Tc);
N=T.size;
fig=plt.figure()
ax1=plt.subplot(2,1,1)
ax1.plot(T,Y);
plt.xlabel("$t$");
plt.ylabel("$y(t)$");
plt.title("velocity mesurement $y(t)$ [rad/sec]");
ax2=plt.subplot(2,1,2)
ax2.plot(T,U);
plt.xlabel("$t$");
plt.ylabel("$u(t)$");
plt.title("input voltage $u(t)$ [V]");
fig.tight_layout()
plt.show()

# Kalman filter initialization
stdw=1
Q=np.array([[stdw**2]])
stdv=0.1
R=stdv**2
G=np.array([[0],
            [Tc/La]])
x0=np.array([[0.5], 
             [0.5]])
P0=np.eye(n)
xhat=np.zeros((N,n))
Pkfd=np.zeros((N,n))
xp=x0.copy()
Pp=P0.copy()


# Kalman filter recursion
for k in range(N):
    # correction step
    K=Pp@C.T@np.linalg.inv(C@Pp@C.T+R)
    x=xp+K*(Y[k]-C@xp)
    P=Pp@(np.eye(n)-C.T@K.T)
    # prediction step
    xp=A@x+B*U[k]
    Pp=A@P@A.T+G@Q@G.T
    xhat[k, :]=x.T
    Pkfd[k, :]=np.diag(P)


# Asymoptotic Kalman filter 
outdlqe=clt.dlqe(A,G,C,Q,R)
Kinf=outdlqe[0]
xhatinf=np.zeros((N,n))
xpinf=x0.copy()
for k in range(N):
    # correction step
    xinf=xpinf+Kinf*(Y[k]-C@xpinf)
    # prediction step
    xpinf=A@xinf+B*U[k]
    xhatinf[k, :]=xinf.T


# Plots
fig=plt.figure()
ax1=plt.subplot(2,1,1)
ax1.plot(T,X[:,0],label=r'$x_1(t)$')
ax1.plot(T,xhat[:,0],label=r'$\hat{x}_1(t)$')
ax1.plot(T,xhatinf[:,0],label=r'$\hat{x}^{\infty}_1(t)$')
plt.xlabel("$t$")
plt.ylabel("$x_1(t)$")
plt.title("Velocity $x_1(t)$")
plt.legend()
ax1=plt.subplot(2,1,2)
ax1.plot(T,X[:,1],label=r'$x_2(t)$')
ax1.plot(T,xhat[:,1],label=r'$\hat{x}_2(t)$');
ax1.plot(T,xhatinf[:,1],label=r'$\hat{x}^{\infty}_2(t)$')
plt.xlabel("$t$")
plt.ylabel("$x_2(t)$")
plt.title("Current $x_2(t)$")
plt.legend()
fig.tight_layout()

fig=plt.figure()
ax2=plt.subplot(2,1,1)
ax2.plot(T,xhat[:,0]-X[:,0],'b',label=r'$x_1(t)-\hat{x}_1(t)$')
ax2.plot(T,3*np.sqrt(Pkfd[:,0]),'r',label='confidence intervals')
ax2.plot(T,-3*np.sqrt(Pkfd[:,0]),'r')
plt.xlabel("$t$")
plt.ylabel(r"$x_1(t)-\hat{x}_1(t)$")
plt.title(R'Error $x_1(t)-\hat{x}_1(t)$ and confidence intervals')
plt.legend()
ax2=plt.subplot(2,1,2)
ax2.plot(T,xhat[:,1]-X[:,1],'b',label=r'$x_2(t)-\hat{x}_2(t)$')
ax2.plot(T,3*np.sqrt(Pkfd[:,1]),'r',label='confidence intervals')
ax2.plot(T,-3*np.sqrt(Pkfd[:,1]),'r')
plt.xlabel("$t$")
plt.ylabel(r"$x_2(t)-\hat{x}_2(t)$")
plt.title(r'Error $x_2(t)-\hat{x}_2(t)$ and confidence intervals')
plt.legend()
fig.tight_layout()
plt.show()

# biased measurements
fig=plt.figure()
ax=plt.subplot()
ax.plot(T,Yb);
plt.xlabel("$t$");
plt.ylabel("$y(t)$");
plt.title("biased velocity measurement [rad/s]");


# Kalman filter initialization
Z=np.array([[0, 0]])
Ab=np.block(
    [
     [A, Z.T],
     [Z, 1]
    ]
)
Bb=np.block([[B],
             [0]])
Cb=np.block([[C, 1]])
stdwd=1e-6
Qb=np.array([[stdw**2, 0],
            [0, stdwd**2]])
Gb=np.array([[0, 0],
            [Tc/La, 0],
            [0, 1]])
R=stdv**2
x0b=np.array([[0.5], 
             [0.5],
             [1]]);
P0b=np.eye(n+1)
xhat=np.zeros((N,n+1))
Pkfd=np.zeros((N,n+1))
xp=x0b.copy()
Pp=P0b.copy()


# Kalman filter recursion
for k in range(N):
    # correction step
    K=Pp@Cb.T@np.linalg.inv(Cb@Pp@Cb.T+R)
    x=xp+K*(Yb[k]-Cb@xp)
    P=Pp@(np.eye(n+1)-Cb.T@K.T)
    # prediction step
    xp=Ab@x+Bb*U[k]
    Pp=Ab@P@Ab.T+Gb@Qb@Gb.T
    xhat[k, :]=x.T
    Pkfd[k, :]=np.diag(P)


# Plots
fig=plt.figure()
ax1=plt.subplot(3,1,1)
ax1.plot(T,X[:,0],label='$x_1(t)$')
ax1.plot(T,xhat[:,0],label=r'$\hat{x}_1(t)$')
plt.xlabel("$t$")
plt.ylabel("$x_1(t)$")
plt.title("Velocity $x_1(t)$")
plt.legend(loc = 'lower right')
ax1=plt.subplot(3,1,2)
ax1.plot(T,X[:,1],label='$x_2(t)$')
ax1.plot(T,xhat[:,1],label=r'$\hat{x}_2(t)$');
plt.xlabel("$t$")
plt.ylabel("$x_2(t)$")
plt.title("Current $x_2(t)$")
plt.legend(loc = 'lower right')
ax1=plt.subplot(3,1,3)
ax1.plot(T,bias*np.ones(N),label='$d$')
ax1.plot(T,xhat[:,2],label=r'$\hat{d}(t)$');
plt.xlabel("$t$")
plt.ylabel("$x_3(t)$")
plt.title("Bias $x_3(t)$")
plt.legend(loc = 'lower right')
fig.tight_layout()

fig=plt.figure()
ax2=plt.subplot(3,1,1)
ax2.plot(T,xhat[:,0]-X[:,0],'b',label=r'$x_1(t)-\hat{x}_1(t)$')
ax2.plot(T,3*np.sqrt(Pkfd[:,0]),'r',label='confidence intervals')
ax2.plot(T,-3*np.sqrt(Pkfd[:,0]),'r')
plt.xlabel("$t$")
plt.title(r'Error $x_1(t)-\hat{x}_1(t)$ and confidence intervals')
plt.legend(loc = 'upper right')
ax2=plt.subplot(3,1,2)
ax2.plot(T,xhat[:,1]-X[:,1],'b',label=r'$x_2(t)-\hat{x}_2(t)$')
ax2.plot(T,3*np.sqrt(Pkfd[:,1]),'r',label='confidence intervals')
ax2.plot(T,-3*np.sqrt(Pkfd[:,1]),'r')
plt.xlabel("$t$")
plt.title(r'Error $x_2(t)-\hat{x}_2(t)$ and confidence intervals')
plt.legend(loc = 'upper right')
fig.tight_layout()
ax2=plt.subplot(3,1,3)
ax2.plot(T,xhat[:,2]-bias*np.ones(N),'b',label=r'$b-\hat{b}(t)$')
ax2.plot(T,3*np.sqrt(Pkfd[:,2]),'r',label='confidence intervals')
ax2.plot(T,-3*np.sqrt(Pkfd[:,2]),'r')
plt.xlabel("$t$")
plt.title(r'Error $b-\hat{b}(t)$ and confidence intervals')
plt.legend(loc = 'upper right')
fig.tight_layout()
plt.show()

# Kalman filter without bias
xhatnb=np.zeros((N,n))
Pkfdnb=np.zeros((N,n))
xpnb=x0.copy()
Ppnb=P0.copy()
for k in range(N):
    # correction step
    K=Ppnb@C.T@np.linalg.inv(C@Ppnb@C.T+R)
    xnb=xpnb+K*(Yb[k]-C@xpnb)
    Pnb=Ppnb@(np.eye(n)-C.T@K.T)
    # prediction step
    xpnb=A@xnb+B*U[k]
    Ppnb=A@Pnb@A.T+G@Q@G.T
    xhatnb[k, :]=xnb.T
    Pkfdnb[k, :]=np.diag(Pnb)


# plots
fig=plt.figure()
ax1=plt.subplot(2,1,1)
ax1.plot(T,X[:,0],label='$x_1(t)$')
ax1.plot(T,xhat[:,0],label=r'$\hat{x}_1(t)$')
ax1.plot(T,xhatnb[:,0],label=r'$\hat{x}^{nb}_1(t)$')
plt.xlabel("$t$")
plt.ylabel("$x_1(t)$")
plt.title("Velocity $x_1(t)$")
plt.legend(loc = 'lower right')
ax1=plt.subplot(2,1,2)
ax1.plot(T,X[:,1],label='$x_2(t)$')
ax1.plot(T,xhat[:,1],label=r'$\hat{x}_2(t)$');
ax1.plot(T,xhatnb[:,1],label=r'$\hat{x}^{nb}_2(t)$')
plt.xlabel("$t$")
plt.ylabel("$x_2(t)$")
plt.title("Current $x_2(t)$")
plt.legend(loc = 'lower right')
fig.tight_layout()

fig=plt.figure()
ax2=plt.subplot(2,1,1)
ax2.plot(T,xhat[:,0]-X[:,0],'b',label=r'$x_1(t)-\hat{x}_1(t)$')
ax2.plot(T,3*np.sqrt(Pkfd[:,0]),'r',label='confidence intervals')
ax2.plot(T,-3*np.sqrt(Pkfd[:,0]),'r')
ax2.plot(T,xhatnb[:,0]-X[:,0],'b--',label=r'$x_1(t)-\hat{x}^{nb}_1(t)$')
ax2.plot(T,3*np.sqrt(Pkfdnb[:,0]),'r--',label='conf. int. no biasidence intervals')
ax2.plot(T,-3*np.sqrt(Pkfdnb[:,0]),'r--')
plt.xlabel("$t$")
plt.title(r'Error $x_1(t)-\hat{x}_1(t)$ and confidence intervals')
plt.legend(loc = 'upper right')
ax2=plt.subplot(2,1,2)
ax2.plot(T,xhat[:,1]-X[:,1],'b',label=r'$x_2(t)-\hat{x}_2(t)$')
ax2.plot(T,3*np.sqrt(Pkfd[:,1]),'r',label='confidence intervals')
ax2.plot(T,-3*np.sqrt(Pkfd[:,1]),'r')
ax2.plot(T,xhatnb[:,1]-X[:,1],'b--',label=r'$x_2(t)-\hat{x}^{nb}_2(t)$')
ax2.plot(T,3*np.sqrt(Pkfdnb[:,1]),'r--',label='conf. int. no bias')
ax2.plot(T,-3*np.sqrt(Pkfdnb[:,1]),'r--')
plt.xlabel("$t$")
plt.title(r'Error $x_2(t)-\hat{x}_2(t)$ and confidence intervals')
plt.legend(loc = 'upper right')
fig.tight_layout()
plt.show()



