clear all;
close all;
clc

% -------------------------------------------------------------------------
%
%   This file reads in data files from a cue integration experiment from a 
%   directory and performs an analysis using the MLE_Analysis.m file.
%   please consult the documentation MLEtoolbox_documentation.pdf or type 
%   'help MLE_Analysis' for information how to prepare the data.
%
%   Note that this analysis assumes you have used a 2AFC paradigm in which
%   a standard stimulus is compared to a comparison stimulus on each trial
%   (that is nrStim = 2).
%   Type "help MLE_Analysis" or "help MLE_SingleSubject" for further
%   options.
%
%   by Marieke Rohde, 2014
%   marieke.rohde@uni-bielefeld.de
%
%   patch added by Loes van Dam, 2015
%   - include nrStim question and stuff
%   - use filesep to delimit directories such that it works on all systems
%
% -------------------------------------------------------------------------


% query user for data directory
dataDir =  uigetdir('','Please Select Folder with experimental results'); 
dataDir = [dataDir filesep];
files = dir(dataDir);
alldata =[];

% we count up separately the actual data files, in case there are different
% files also present in the directory 

datafileindex = 1;

% loop through all files in the directory
for index = 1:length(files)
    try
        % search for files ending in .mat
        if ~files(index).isdir && sum(files(index).name(end+(-3:0)) == '.mat') ==4
            data = load([dataDir files(index).name]);
            var = fieldnames(data);
            data = eval(['data.' var{1}]);
            
            % append data from all data files and add subject number as
            % additional column
            alldata = [alldata; data ones(size(data,1),1)*datafileindex];
            datafileindex = datafileindex+1;
            disp(['Reading in ' (files(index).name)]);
        end
        if ~files(index).isdir && sum(files(index).name(end+(-3:0)) == '.txt') ==4
            data = load([dataDir files(index).name]);
            alldata = [alldata; data ones(size(data,1),1)*datafileindex];
            
            % append data from all data files and add subject number as
            % additional column
            datafileindex = datafileindex+1;
            disp(['Reading in ' (files(index).name)]);
        end
    catch
        disp(['There was a problem reading in the file ' files(index).name ...
            ' - do all files have the correct and identical format?'])
    end
end

% ask the user how many stimuli were compared in each trial. E.g. if two
% stimuli were compared the JND represents Sqrt(2)*noise in the signal.

nrStim = questdlg('Nr of intervals/stimuli in each trial? (usually 2)', ...
	'', ...
	'1','2','2');


% call MLE_Analysis file with all the data - the fifth term indicates 
% how much visual information is desired.

[results,pop] = MLE_Analysis(alldata,str2num(nrStim),0,0,[0,0,1]);

% generate text output that summarizes the results on the screen 

fprintf('\n\n*************************** \n');
fprintf('*** Results single cues *** \n');
fprintf('*************************** \n\n');
for i = 1:size(results.singleCues,2)
    for j = 1: size(results.singleCues{1,i}.noiselevels,2)
        fprintf(['Cue: ' int2str(i) ' noise level: ' num2str(results.singleCues{1,i}.noiselevels(j)) '\n\n'])
        fprintf([' PSE: ' num2str(results.singleCues{1,i}.params(j,1))  ',  conf. interval ['   ...
            num2str(results.singleCues{1,i}.params_CI(j,1,1)) ' ' num2str(results.singleCues{1,i}.params_CI(j,1,2)) ']\n']);
        fprintf([' sigma: ' num2str(results.singleCues{1,i}.params(j,2))  ',  conf. interval ['   ...
            num2str(results.singleCues{1,i}.params_CI(j,2,1)) ' ' num2str(results.singleCues{1,i}.params_CI(j,2,2)) ']\n\n']);
        sigmas(j,i,:)=results.singleCues{1,i}.params_CI(j,2,:) ;
    end
end

fprintf('\n****************************************** \n');
fprintf('*** Predictions and results multimodal ***\n');
fprintf('******************************************\n');
for j = 1: size(results.cueCombos{1,1}.noiselevels,1)
    fprintf('\n** ')
    for i = 1: size(results.cueCombos{1,1}.noiselevels,2)
        fprintf(['noise cue ' int2str(i) ': ' num2str(results.cueCombos{1,1}.noiselevels(j,i)) '  '])
        
        sigmas_b(j,i,:) = sigmas((results.singleCues{1,i}.noiselevels == results.cueCombos{1,1}.noiselevels(j,i)),i,:);
    end
    fprintf('\n\n')
    
    
    fprintf('Weights: \n');
    for i = 1: size(results.cueCombos{1,1}.weights,2)
        fprintf(['\n cue ' int2str(i) ':\n   predicted ' num2str(results.cueCombos{1,1}.predicted_weights(j,i)) ...
              ',  conf. interval [' num2str(results.cueCombos{1,1}.predicted_weights_CI(j,i,1)) ' ' num2str(results.cueCombos{1,1}.predicted_weights_CI(j,i,2)) ...
            ']\n   empirical '  num2str(results.cueCombos{1,1}.weights(j,i)) ... 
              ',  conf. interval [' num2str(results.cueCombos{1,1}.weights_CI(j,i,1)) ' ' num2str(results.cueCombos{1,1}.weights_CI(j,i,2)) ']\n'])
    
          %% not significantly different from prediction?
        significance_weight(j,i)= results.cueCombos{1,1}.predicted_weights_CI(j,i,1)<results.cueCombos{1,1}.weights_CI(j,i,2) ...
            && results.cueCombos{1,1}.predicted_weights_CI(j,i,2)>results.cueCombos{1,1}.weights_CI(j,i,1) ;
    end
    fprintf('\n');

    fprintf('Sigma: \n\n');
    fprintf(['   predicted ' num2str(results.cueCombos{1,1}.predicted_sigma(j))  ... 
              ',  conf. interval [' num2str(results.cueCombos{1,1}.predicted_sigma_CI(j,1)) ' ' num2str(results.cueCombos{1,1}.predicted_sigma_CI(j,2)) ']']); 
    fprintf(['\n   empirical ' num2str(results.cueCombos{1,1}.sigma(j))  ... 
              ',  conf. interval [' num2str(results.cueCombos{1,1}.sigma_CI(j,1)) ' ' num2str(results.cueCombos{1,1}.sigma_CI(j,2)) ']\n'])
    fprintf('\n');
    
    %% significantly smaller than better unimodal?
    significance_sigma(j,1) = results.cueCombos{1,1}.sigma_CI(j,2)<min(sigmas_b(j,:,1),[],2);
    
    %% not significantly larger than prediction?
    significance_sigma(j,2) = results.cueCombos{1,1}.sigma_CI(j,1)<results.cueCombos{1,1}.predicted_sigma_CI(j,2);
    
    %% not significantly smaller than prediction?
    significance_sigma(j,3) = results.cueCombos{1,1}.sigma_CI(j,2)>results.cueCombos{1,1}.predicted_sigma_CI(j,1);    
    
end

fprintf('\n***************************************** \n');
fprintf('*** Significance of results (summary) ***\n');
fprintf('*****************************************\n');

fprintf(['\nResults for ' int2str(results.nrSub(1)) ' participants (' int2str(results.nrSub(2)-results.nrSub(1)) ' discarded)\n']);

for j = 1: size(results.cueCombos{1,1}.noiselevels,1)
       fprintf('\n** ')
    for i = 1: size(results.cueCombos{1,1}.noiselevels,2)
        fprintf(['noise cue ' int2str(i) ': ' num2str(results.cueCombos{1,1}.noiselevels(j,i)) '  '])
    end
    fprintf('\n')
    
    for i = 1: size(results.cueCombos{1,1}.weights,2)
        fprintf(['\n weigth cue ' int2str(i) ': '])
        if significance_weight(j,i)
            fprintf(' optimal ');
        else
            fprintf(' not optimal ');
        end
    end
    
    text = '';
    switch  char(significance_sigma(j,:) + 48)
        case '111'
            text = 'optimal';
        case '101'
            text = 'near-optimal';
        case '011'
            text = 'ambiguous';
        case '110'
            text = 'super-optimal';
        case '001'
            text = 'not optimal';
    end
    
    fprintf(['\n\n sigma ' text])
    
    fprintf('\n\n')
    
end
fprintf('(see Rohde, van Dam & Ernst, Multisensory Research (2015) for explanation of the labels)\n\n');