Code covered by the BSD License  

Highlights from
Recurrent Fuzzy Neural Network (RFNN) Library for Simulink

image thumbnail

Recurrent Fuzzy Neural Network (RFNN) Library for Simulink

by

 

12 Aug 2013 (Updated )

Dynamic, Recurrent Fuzzy Neural Network (RFNN) for on-line Supervised Learning.

rfnn_mimo_grid(~,Xt,u,flag,itaVector,alphaVector, NumInVars,NumInTerms,NumOutVars,x0,T)
function [out,Xt,str,ts] = rfnn_mimo_grid(~,Xt,u,flag,itaVector,alphaVector, NumInVars,NumInTerms,NumOutVars,x0,T)

% This program is an implementation of the on-line RFNN (MIMO) system.
% The structure of the network is determined by the user.
% Inputs space is partitioned using the grid-type method.
% All parameters of the network are estimated by Gradient Descent (GD)
% through error backpropagation. 

NumRules = NumInTerms^NumInVars;  % Grid-Type Input Space Partitioning.
         
%  ----------------------- Initial Information --------------
if flag==0
  
  ns = 4*NumInVars*NumInTerms + NumOutVars*NumRules;
  nds = 3*NumInVars*NumInTerms + NumOutVars*NumRules;
  ninps = NumInVars+NumOutVars+1;                         % number of inputs to sfunction block [ x e LE ]
  out = [0,ns+nds,NumOutVars+ns+nds,ninps,0,1,1];    % states, outputs, inputs, ?, df, #ts
  str = [];                                                                    % API block consistency
  ts = T;                                                                     % sample time
  Xt = x0;
  
%  ----------------------- State Derivatives -----------------
elseif flag == 2
   
            x = u(1:NumInVars);
            e = u(NumInVars+1:NumInVars+NumOutVars);
   learning = u(NumInVars+NumOutVars+1);

if learning == 1 
    
  % Learning Rates
   ita1 = itaVector(1); ita2 = itaVector(2);
   ita3 = itaVector(3); ita4 = itaVector(4);
  
  % Momentum Constants.
   alpha1 = alphaVector(1); alpha2 = alphaVector(2); 
   alpha3 = alphaVector(3); alpha4 = alphaVector(4);
  
  % Unroll the states:  
   off=1;
   off_end=NumInVars*NumInTerms;
   mean2=reshape(Xt(off:off_end),NumInVars,NumInTerms);  
   
   off=off_end+1;
   off_end=off + NumInVars*NumInTerms-1;
   sigma2=reshape(Xt(off:off_end),NumInVars,NumInTerms);
   
   off=off_end+1;
   off_end=off+NumInVars*NumInTerms-1;
   Theta2=reshape(Xt(off:off_end),NumInVars,NumInTerms);
   
   off=off_end+1;
   off_end=off + NumOutVars*NumRules-1;
   W = reshape(Xt(off:off_end),NumOutVars,NumRules);
      
   off=off_end+1;
   off_end=off+NumInVars*NumInTerms-1;
   Out2 = reshape(Xt(off:off_end),NumInVars,NumInTerms);
   
   % Unroll the differential states:
   off=off_end+1;
   off_end=off + NumInVars*NumInTerms-1;
   dmean2=reshape(Xt(off:off_end),NumInVars,NumInTerms);
   
   off=off_end+1;
   off_end=off + NumInVars*NumInTerms-1;
   dsigma2=reshape(Xt(off:off_end),NumInVars,NumInTerms);
   
   off=off_end+1;
   off_end=off + NumInVars*NumInTerms-1;
   dTheta2=reshape(Xt(off:off_end),NumInVars,NumInTerms);
   
   off=off_end+1;
   off_end=off + NumOutVars*NumRules-1;
   dW = reshape(Xt(off:off_end),NumOutVars,NumRules);
         
  %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
  %                                                    FEEDFORWARD OPERATION                                         %
  %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
  % LAYER 2 - INPUT TERM NODES
  Out2_pr = Out2;
  In2 = x*ones(1,NumInTerms) + Out2_pr.*Theta2;
  Out2 = exp(-((In2-mean2)./sigma2).^2);
 
% LAYER 3 - RULE (PRODUCT) NODES
precond = comb(Out2);
 Out3 = prod(precond,2);
 
%%%%%%%%%%%% END OF NETWORK FUNCTIONALITY SECTION %%%%%%%%%%%

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%			 					                 PARAMETER LEARNING SECTION	              	                    %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% BACKWARD PASS. Error Backpropagation
% LAYER 3
delta3 = e.'*W;

% LAYER 2
Q = zeros(NumInVars,NumInTerms,NumRules);  
for i=1:NumInVars
     for j=1:NumInTerms
          for k=1:NumRules  
       	       if Out2(i,j)== precond(k,i) && Out2(i,j)~=0
      			  Q(i,j,k) = (Out3(k)/Out2(i,j))*delta3(k);
               end
          end 
   	  end 
 end

 ThetaE = sum(Q,3);
              
% LAYER 2 PARAMETER ADJUSTMENT BY GRADIENT DESCENT.  
deltamean2  =  2*ThetaE.*Out2.*(In2-mean2)./((sigma2).^2);
deltasigma2 =  2*ThetaE.*Out2.*((In2-mean2).^2)./(sigma2.^3);
deltaTheta2 = -2*ThetaE.*Out2.*(In2-mean2).*Out2_pr./sigma2.^2; 

dmean2 = ita2*deltamean2 + alpha2*dmean2;
 mean2  = mean2 + dmean2;

dsigma2 = ita3*deltasigma2 + alpha3*dsigma2;
  sigma2 = sigma2 + dsigma2;

dTheta2 = ita4*deltaTheta2 + alpha4*dTheta2;
 Theta2  = Theta2 + dTheta2;

% LAYER 4 PARAMETER ADJUSTMENT
 deltaW = e*Out3.';
 dW = ita1*deltaW + alpha1*dW;
  W = W + dW;

%%%%%%%%%%%   END OF PARAMETER LEARNING PROCESS %%%%%%%%%
% State Vector Storage.
% Xt = [mean2 sigma2 Theta2 W Out2 dmean2 dsigma2 dTheta2 dW];

Xt = [ reshape(mean2,NumInVars*NumInTerms,1);
         reshape(sigma2,NumInVars*NumInTerms,1);
         reshape(Theta2,NumInVars*NumInTerms,1);
         reshape(W,NumOutVars*NumRules,1);
         reshape(Out2,NumInVars*NumInTerms,1);
         reshape(dmean2,NumInVars*NumInTerms,1);
         reshape(dsigma2,NumInVars*NumInTerms,1);
         reshape(dTheta2,NumInVars*NumInTerms,1);
         reshape(dW,NumOutVars*NumRules,1);];
end

out=Xt;

%  ----------------------- Outputs -------------------------
elseif flag == 3
   
  % Unpack the network's parameters first...
   off=1;
   off_end=NumInVars*NumInTerms;
   mean2=reshape(Xt(off:off_end),NumInVars,NumInTerms);  
   
   off=off_end+1;
   off_end=off + NumInVars*NumInTerms-1;
   sigma2=reshape(Xt(off:off_end),NumInVars,NumInTerms);
   
   off=off_end+1;
   off_end=off+NumInVars*NumInTerms-1;
   Theta2 =reshape(Xt(off:off_end),NumInVars,NumInTerms);
   
   off=off_end+1;
   off_end=off + NumOutVars*NumRules - 1;
   W = reshape(Xt(off:off_end),NumOutVars,NumRules);
   
   off=off_end+1;
   off_end=off+NumInVars*NumInTerms-1;
   Out2 = reshape(Xt(off:off_end),NumInVars,NumInTerms);
         
  %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
  %                                                       FEEDFORWARD OPERATION                                         %
  %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
  % LAYER 2 - INPUT TERM NODES
  Out2_pr = Out2;

  x = u(1:NumInVars);
 In2 = x*ones(1,NumInTerms) + Out2_pr.*Theta2;
 Out2 = exp(-((In2-mean2)./sigma2).^2);
 
% LAYER 3 - RULE (PRODUCT) NODES
precond = comb(Out2);
 Out3 = prod(precond,2);
 
 % LAYER 4 
  outact = W*Out3;

  % Block Outputs Vector Formation.
   out=[outact;Xt];            
     
else
   out=[];
end

Contact us