Code covered by the BSD License  

Highlights from
DEM: Diffused expectation maximisation for image segmentation

image thumbnail

DEM: Diffused expectation maximisation for image segmentation

by

 

Color image segmentation using a variant of the Expectation-Maximization (EM) algorithm.

gmmdem(mix, x, options, diffoptions)
function [mix, options, errlog] = gmmdem(mix, x, options, diffoptions)
%GMMDEM	DEM algorithm for Gaussian mixture model.
%
%	Description
%	[MIX, OPTIONS, ERRLOG] = GMMDEM(MIX, X, OPTIONS, DIFFOPTIONS) 
%   uses the algorithm described in the paper:
%
%   G Boccignone, M Ferraro, P Napoletano
%   Diffused expectation maximisation for image segmentation
%   Electronics letters 40 (18), 1107-1108
%
%   describing a variant of the Expectation
%	Maximization (EM) algorithm of Dempster et al. to estimate the parameters
%	of a Gaussian mixture model defined by a data structure MIX. 
%   The method is useful to model an image as a finite mixture, where each 
%   mixture component corresponds to a region class.
%
%   To such purpose, the E-step and M-step of the EM algorithm, 
%   are coupled with a D-step consisting in  anisotropic diffusion on 
%   posterior probabilities of classes
%   in order to account for the spatial dependencies among pixels.
% 
%	The matrix X represents the data whose expectation is maximized, with
%	each row corresponding to a vector.    The optional parameters have
%	the following interpretations.
%
%	OPTIONS(1) is set to 1 to display error values; also logs error
%	values in the return argument ERRLOG. If OPTIONS(1) is set to 0, then
%	only warning messages are displayed.  If OPTIONS(1) is -1, then
%	nothing is displayed.
%
%	OPTIONS(3) is a measure of the absolute precision required of the
%	error function at the solution. If the change in log likelihood
%	between two steps of the EM algorithm is less than this value, then
%	the function terminates.
%
%	OPTIONS(5) is set to 1 if a covariance matrix is reset to its
%	original value when any of its singular values are too small (less
%	than MIN_COVAR which has the value eps).   With the default value of
%	0 no action is taken.
%
%	OPTIONS(14) is the maximum number of iterations; default 100.
%
%   DIFFOPTIONS is a structure containing the parameters for a 
%   PERONA-MALIK type anisotropic diffusion
%
%	The optional return value OPTIONS contains the final error value
%	(i.e. data log likelihood) in OPTIONS(8).
%
%	See also
%	GMM, GMMINIT, GMMEM
%
%   
%	Coded as a modified version of the original Netlab GMMEM function
%   written by Ian T Nabney (1996-2001)

% Check that inputs are consistent
errstring = consist(mix, 'gmm', x);
if ~isempty(errstring)
  error(errstring);
end

[ndata, xdim] = size(x);

% Sort out the options
if (options(14))
  niters = options(14);
else
  niters = 100;
end

display = options(1);
store = 0;
if (nargout > 2)
  store = 1;	% Store the error values to return them
  errlog = zeros(1, niters);
end
test = 0;
if options(3) > 0.0
  test = 1;	% Test log likelihood for termination
end

check_covars = 0;
if options(5) >= 1
  if display >= 0
    disp('check_covars is on');
  end
  check_covars = 1;	% Ensure that covariances don't collapse
  MIN_COVAR = eps;	% Minimum singular value of covariance matrix
  init_covars = mix.covars;
end


rows  = diffoptions.rows;
cols  = diffoptions.cols;
kappa = diffoptions.kappa;
lambda= diffoptions.lambda;

hmap = zeros(rows,cols);


% Main loop of algorithm
for n = 1:niters 
    
  %%%%%%%%%%%%%%%%%%%%% E-step
  % Calculate posteriors based on old parameters
  [post, act] = gmmpost(mix, x);
  
  
  %%%%%%%%%%%%%%%%%%%%% D-step  
  %  Diffusion step  over posteriors
  post2=post'; %K x N
  
  for k = 1:mix.ncentres
    % transforming posteriors in a posterior probability map
    % of image dimension
    hmap = reshape(post2(k,:),rows,cols);
      
    % Perona-Malik  anisotropic diffusion 
    % on the k-th class posterior probability map    
    for i = 1:diffoptions.iter
      %fprintf('\rIteration %d',i);

      % Construct diffl which is the same as pmap but
      % has an extra padding of zeros around it.
      diffl = zeros(rows+2, cols+2);
      diffl(2:rows+1, 2:cols+1) = hmap;

      % North, South, East and West differences
      deltaN = diffl(1:rows,2:cols+1)   - hmap;
      deltaS = diffl(3:rows+2,2:cols+1) - hmap;
      deltaE = diffl(2:rows+1,3:cols+2) - hmap;
      deltaW = diffl(2:rows+1,1:cols)   - hmap;

      % Setting conduction coefficient types
      if diffoptions.PeronaMalikEq ==1
        cN = exp(-(deltaN/kappa).^2);
        cS = exp(-(deltaS/kappa).^2);
        cE = exp(-(deltaE/kappa).^2);
        cW = exp(-(deltaW/kappa).^2);
      elseif diffoptions.PeronaMalikEq == 2
        cN = 1./(1 + (deltaN/kappa).^2);
        cS = 1./(1 + (deltaS/kappa).^2);
        cE = 1./(1 + (deltaE/kappa).^2);
        cW = 1./(1 + (deltaW/kappa).^2);
      end

      % discretized Perona-Malik's eq. (3) in the Electronics Letters paper
      hmap = hmap + lambda*(cN.*deltaN + cS.*deltaS + cE.*deltaE + cW.*deltaW);    
    end
    %reshaping the diffused map in the k x N vector
    post2(k,:) = reshape(hmap,1,rows*cols);
  end
  % [0,1] normalizing the probability  vector
  post2=normalise(post2,1);
  
  post=post2'; % netlab N x K vector 
  %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
  
    
  % Calculate error value if needed
  if (display | store | test)
    prob = act*(mix.priors)';
    % Error value is negative log likelihood of data
    e = - sum(log(prob));
    if store
      errlog(n) = e;
    end
    if display > 0
      fprintf(1, 'Cycle %4d  Error %11.6f\n', n, e);
    end
    if test
      if (n > 1 & abs(e - eold) < options(3))
        options(8) = e;
        return;
      else
        eold = e;
      end
    end
  end
  
  %%%%%%%%%%%%%%%%%%%%% M-step 
  % Adjust the new estimates for the parameters
  new_pr = sum(post, 1);
  new_c = post' * x;
  
  % Now move new estimates to old parameter vectors
  mix.priors = new_pr ./ ndata;
  
  mix.centres = new_c ./ (new_pr' * ones(1, mix.nin));
  
  switch mix.covar_type
  case 'spherical'
    n2 = dist2(x, mix.centres);
    for j = 1:mix.ncentres
      v(j) = (post(:,j)'*n2(:,j));
    end
    mix.covars = ((v./new_pr))./mix.nin;
    if check_covars
      % Ensure that no covariance is too small
      for j = 1:mix.ncentres
        if mix.covars(j) < MIN_COVAR
          mix.covars(j) = init_covars(j);
        end
      end
    end
  case 'diag'
    for j = 1:mix.ncentres
      diffs = x - (ones(ndata, 1) * mix.centres(j,:));
      mix.covars(j,:) = sum((diffs.*diffs).*(post(:,j)*ones(1, ...
        mix.nin)), 1)./new_pr(j);
    end
    if check_covars
      % Ensure that no covariance is too small
      for j = 1:mix.ncentres
        if min(mix.covars(j,:)) < MIN_COVAR
          mix.covars(j,:) = init_covars(j,:);
        end
      end
    end
  case 'full'
    for j = 1:mix.ncentres
      diffs = x - (ones(ndata, 1) * mix.centres(j,:));
      diffs = diffs.*(sqrt(post(:,j))*ones(1, mix.nin));
      mix.covars(:,:,j) = (diffs'*diffs)/new_pr(j);
    end
    if check_covars
      % Ensure that no covariance is too small
      for j = 1:mix.ncentres
        if min(svd(mix.covars(:,:,j))) < MIN_COVAR
          mix.covars(:,:,j) = init_covars(:,:,j);
        end
      end
    end
  case 'ppca'
    for j = 1:mix.ncentres
      diffs = x - (ones(ndata, 1) * mix.centres(j,:));
      diffs = diffs.*(sqrt(post(:,j))*ones(1, mix.nin));
      [tempcovars, tempU, templambda] = ...
	ppca((diffs'*diffs)/new_pr(j), mix.ppca_dim);
      if length(templambda) ~= mix.ppca_dim
	error('Unable to extract enough components');
      else 
        mix.covars(j) = tempcovars;
        mix.U(:, :, j) = tempU;
        mix.lambda(j, :) = templambda;
      end
    end
    if check_covars
      if mix.covars(j) < MIN_COVAR
        mix.covars(j) = init_covars(j);
      end
    end
    otherwise
      error(['Unknown covariance type ', mix.covar_type]);               
  end
end

options(8) = -sum(log(gmmprob(mix, x)));
if (display >= 0)
  disp(maxitmess);
end
  

Contact us