diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000..f4f4aea --- /dev/null +++ b/.gitattributes @@ -0,0 +1 @@ +* text=auto: This tells Git to automatically handle EOL conversion for files it detects as text. \ No newline at end of file diff --git a/README.md b/README.md index f8360ea..4aed955 100644 --- a/README.md +++ b/README.md @@ -1,2 +1,2 @@ -# AOCompObserver -Computational observer calculations for our AO psychopysical experiments +# AOMicroRepeat +AO psychophysics analysis diff --git a/analysisDir/combinedData.mat b/analysisDir/combinedData.mat new file mode 100644 index 0000000..2175733 Binary files /dev/null and b/analysisDir/combinedData.mat differ diff --git a/code/CheckDataSnippet.m b/code/CheckDataSnippet.m new file mode 100644 index 0000000..2847328 --- /dev/null +++ b/code/CheckDataSnippet.m @@ -0,0 +1,11 @@ + +% Load a QUEST data file and run this to find maximum trial intensity with +% incorrect response. Intensity is raw log10, not LUT corrected. +index = find(response_matrix == 0 & ~isnan(theThreshold) ); +max(trial_matrix(index)) + +% Load a MOCS data file and run this to find maximum trial intensity with +% incorrect response. Intensity is raw log10, not LUT corrected. +index = find(response_vector); +max(trial_vector(index)) +unique(trial_vector) \ No newline at end of file diff --git a/code/CombineTrials.m b/code/CombineTrials.m new file mode 100644 index 0000000..aba8546 --- /dev/null +++ b/code/CombineTrials.m @@ -0,0 +1,312 @@ +%% CombineTrials +% +% Combine all of the psychophysical data files into one big .mat +% file. We do this to simplify the other programs, since the logic +% here involves lots of checks that everything is as it should be. +% +% Also identify catch trials in QUEST runs, which are somewhat obscurely +% indicated, and fix them up so later programs don't have to worry about +% this. +% +% Although this might look like it is splitting the data into two halves, that +% actually happens elsewhere, and here all the data are stored under all the splits. +% This is a vestige from the time this code was part of the full data analysis loop, and +% if we were better people we'd skip this step here. Maybe we will become better at some +% point. +% +% The output of this program is a mat file 'CombinedData.mat' that gets stored in the +% analysis output tree, and that contains one big cell array with all of the data. +% +% See also: FitTrials, FigureN + +%% Initialize +close all; clear all; + +%% Get path to data +% +% Path to data tree on this machine +% +% This is set up by TbTb local hook file, but you can also do it +% by setting up the relevant preferences one time on your machine, +% for example like this: + setpref('AOMicroRepeat','dataDir','C:\Users\niveg\Aguirre-Brainard Lab Dropbox\Nivedhitha Govindasamy\AO-Microperimetry Repeatability Paper\Data_for_paper\David_code_analysis\New_analysis_20250912\dataDir'); + setpref('AOMicroRepeat','analysisDir','C:\Users\niveg\Aguirre-Brainard Lab Dropbox\Nivedhitha Govindasamy\AO-Microperimetry Repeatability Paper\Data_for_paper\David_code_analysis\figure-rerun'); +% +% Do not hard code directories into this program. +dataDir = getpref('AOMicroRepeat','dataDir'); + +%% Also analysis output dir, same idea +analysisDir = getpref('AOMicroRepeat','analysisDir'); + +%% Some parameters +log0Value = -3.5; + +%% Define what data we are analyzing here +% +% Generally speaking, this routine loops over everything and does its thing +% +% Define subjects +theParticipants = {'11002' '11108' '11118' '11119' '11125'}; + +% Define sessions (1 or 2) +theSessions = [1 2]; + +% Define session splits +% +% We just do 'All' in the loop, and then hand code in the 'Group1' and 'Group2' splits, +% because the way the code is structured makes that easier to write clearly. Be sure not +% to change this order because it is used in FitTrials.m +% theSplits = {'All','Group1','Group2'}; +theSplits = {'All'}; + +% Define sizes (8 or 43) +theDiameters = [8 43]; + +% When we create COMBINED data below, we count +% on this being as it is. Don't change wihtout care. +theMethods = {'MOCS' 'QUEST', 'COMBINED'}; + +%% Get the AOM lookup table info. +% +% This is loaded and saved here with the data, so that the programs that +% read the output of this program don't have to go and find it. +% +% As far as I can guess +% Column 1: nominal linear intensities +% Column 2: LUT linear intensities +% Column 3: nominal log10 intensities +% Column 4: looks like a non-linear mapping from 10 to 8 bits +AOM = load('green_AOM_LUT_processing'); + +%% Lock the rng +rng(0); + +%% Loop over everything +tableRow = 1; +for pp = 1:length(theParticipants) + for dd = 1:length(theDiameters) + for ss = 1:length(theSessions) + for hh = 1:length(theSplits) + checkSessionDate = []; + checkSessionTime = []; + MOCSFileTimes = []; + QUESTFileTimes = []; + runOrderSplitThisSessionForBothMethods = Shuffle([1 1 2 2]); + for mm = 1:length(theMethods) + + % Store info for what we are analyzing in this run + theMethod{tableRow,1} = theMethods{mm}; + theSubject{tableRow,1} = theParticipants{pp}; + theDiameter(tableRow,1) = theDiameters(dd); + theSession(tableRow,1) = theSessions(ss); + theSplit{tableRow,1} = theSplits{hh}; + + % Handle MOCS and QUEST from the data + if (strcmp(theMethods{mm},'MOCS') | strcmp(theMethods{mm},'QUEST')) + + % Form path the the directory with the data sitting in it + pathToData = fullfile(dataDir,theSubject{tableRow},['Session' num2str(theSession(tableRow))],['Size' num2str(theDiameter(tableRow))],theMethod{tableRow}); + + % Get list of data files + dirOffset = 1; + trial_videos = dir(fullfile(pathToData,'*.mat')); + num_trial_videos = length(trial_videos(dirOffset:end)); + if (num_trial_videos ~= 4) + error('Expect 4 data files'); + end + + % Read and concatenate the data + all_trials{pp,dd,ss,hh,mm} = {}; + all_trials_unpacked{pp,dd,ss,hh,mm} = []; + fprintf('\tReadng videos\n'); + for i = 1:num_trial_videos + % Get check date from filename. This should be the + % same for all the MOCS and QUEST files from the + % same subject session. The method is the inner + % loop variable, so we can do the check here. + if (isempty(checkSessionDate)) + checkSessionDate = trial_videos(i+dirOffset-1).name(7:16); + else + if (~strcmp(checkSessionDate,trial_videos(i+dirOffset-1).name(7:16))) + error('Not all data files from same session are on the same date'); + end + end + + % Check that QUEST file times are later than last + % MOCS time. This code assumes that it runs to + % completion on the same calendar day on which it + % was started, which will almost always be true. + % + % Count underscores to find start of the time + % string. + nUnderscore = 0; + for cc = 1:length(trial_videos(i+dirOffset-1).name) + if (trial_videos(i+dirOffset-1).name(cc) == '_') + nUnderscore = nUnderscore + 1; + end + if (nUnderscore == 4) + timeStartIndex = cc + 1; + break; + end + end + fileTimeStr = trial_videos(i+dirOffset-1).name(timeStartIndex:timeStartIndex+4); + + % Handle way single digit time numbers get written + % in the filename. Brute force this. Ugh. + if (fileTimeStr(2) == '_') + fileTimeStr = ['0' fileTimeStr]; + end + if (fileTimeStr(5) == '_') + fileTimeStr(5) = fileTimeStr(4); + fileTimeStr(4) = '0'; + end + fileTimeStr = fileTimeStr(1:5); + + % Convert to format MATLAB can operate on + fileTime = datetime(fileTimeStr,'InputFormat','HH_mm'); + if (strcmp(theMethod(tableRow),'MOCS')) + MOCSFileTimes = [MOCSFileTimes ; fileTime]; + else + QUESTFileTimes = [QUESTFileTimes ; fileTime]; + end + + % Read the data file + % fprintf('Reading file %s\n',fullfile(pathToData,trial_videos(i+dirOffset-1).name)); + loadedData{pp,dd,ss,hh,mm,i} = load(fullfile(pathToData,trial_videos(i+dirOffset-1).name)); + + % Grab the trial data we need. Also do some + % MOCS and QUEST specific reality checks + if (isfield(loadedData{pp,dd,ss,hh,mm,i},'trial_vector')) + % It's a MOCS data file by what's in it + if (~strcmp(theMethod{tableRow},'MOCS')) + fprintf('File %s\n\tUnder QUEST, appears to be MOCS\n\tTrials: %d\n',fullfile(pathToData,trial_videos(i+dirOffset-1).name),length(loadedData{pp,dd,ss,hh,mm,i}.trial_vector)); + end + if (length(loadedData{pp,dd,ss,hh,mm,i}.trial_vector) ~= 90 & length(loadedData{pp,dd,ss,hh,mm,i}.trial_vector) ~= 100) + fprintf('File %s: Wrong number of trials in MOCS data file\n'); + end + all_trials{pp,dd,ss,hh,mm}{i,1} = loadedData{pp,dd,ss,hh,mm,i}.trial_vector; + all_trials{pp,dd,ss,hh,mm}{i,2} = loadedData{pp,dd,ss,hh,mm,i}.response_vector; + + % Reality check on MOCS stimulus levels. + % This should be the same across runs + % within session, I think, but are not + % always. This doesn't error out, but does + % report the unexpected cases. + for j = i-1:-1:1 + if (any(unique(loadedData{pp,dd,ss,hh,mm,j}.trial_vector) ~= unique(loadedData{pp,dd,ss,hh,mm,i}.trial_vector))) + fprintf('\t\tMOCS mismatch in trial levels across runs\n') + fprintf('\t\t%s, session %d, size %d, run loaded #%d vs run loaded #%d\n',theParticipants{pp},theSessions(ss), theDiameters(dd),i,j); + fprintf('\t\tFile loaded #%d: %s\n',i,fullfile(trial_videos(i+dirOffset-1).name)); + fprintf('\t\tFile loaded #%d: %s\n',j,fullfile(trial_videos(j+dirOffset-1).name)); + end + end + + elseif (isfield(loadedData{pp,dd,ss,hh,mm,i},'trial_matrix')) + % It's a QUEST data file by what's in it + if (~strcmp(theMethod{tableRow},'QUEST')) + fprintf('File %s\n\tUnder MOCS, appears to be QUEST\n\tTrials: %d\n',fullfile(pathToData,trial_videos(i+dirOffset-1).name),length(loadedData{pp,dd,ss,hh,mm,i}.trial_matrix)); + end + if (length(loadedData{pp,dd,ss,hh,mm,i}.trial_matrix) ~= 44) + fprintf('File %s: Wrong number of trials in QUEST data file\n'); + end + all_trials{pp,dd,ss,hh,mm}{i,1} = loadedData{pp,dd,ss,hh,mm,i}.trial_matrix; + questCatchTrialIndex = find(isnan(loadedData{pp,dd,ss,hh,mm,i}.theThreshold)); + if (length(questCatchTrialIndex) ~= 4) + fprintf('Wrong number of catch trials in QUEST data file'); + end + all_trials{pp,dd,ss,hh,mm}{i,1}(questCatchTrialIndex) = log0Value; + all_trials{pp,dd,ss,hh,mm}{i,2} = loadedData{pp,dd,ss,hh,mm,i}.response_matrix; + else + error('Do not understand data format'); + end + + % Get/check number of trials and make sure they + % are the same for each run within + % method/session. + if (i == 1) + nTrialsPerSession = length(all_trials{pp,dd,ss,hh,mm}{i,1}); + else + if (length(all_trials{pp,dd,ss,hh,mm}{i,1}) ~= nTrialsPerSession) + error('Trial mismatch across runs'); + end + end + + % Concatenate across runs into one long pair of vectors. + all_trials_unpacked{pp,dd,ss,hh,mm} = [all_trials_unpacked{pp,dd,ss,hh,mm} ; [all_trials{pp,dd,ss,hh,mm}{i,1} all_trials{pp,dd,ss,hh,mm}{i,2}] ]; + + % Do the splits for this session. Half the runs go into one + % split, half into the other. Here's what the + % pre-registration says about how we'll do: What the + % pre-registration says we'll do is: "To measure intra-session + % variation in sensitivity threshold, the data from each + % session will be randomly split in half by experimental run, + % psychometric functions will be fit to each half of the data + % and thresholds will be estimated and compared for each + % condition." This is not completely clear about whether we + % should keep MOCS and QUEST paired runs together, but we + % decided that is most sensible. + + % Make sure hh == 1 here, because logic now depends on it + % never being bigger than 1. + if (hh ~= 1) + error('Someone added splits, but the logic does not support that anymore here'); + end + if (runOrderSplitThisSessionForBothMethods(i) == 1) + all_trials_unpacked{pp,dd,ss,2,mm} = [all_trials_unpacked{pp,dd,ss,hh,mm} ; [all_trials{pp,dd,ss,hh,mm}{i,1} all_trials{pp,dd,ss,hh,mm}{i,2}] ]; + else + all_trials_unpacked{pp,dd,ss,3,mm} = [all_trials_unpacked{pp,dd,ss,hh,mm} ; [all_trials{pp,dd,ss,hh,mm}{i,1} all_trials{pp,dd,ss,hh,mm}{i,2}] ]; + end + + end + + % Checks + if (strcmp(theMethod{tableRow},'MOCS') & size(MOCSFileTimes,1) ~= 4) + error('Wrong number of MOCS files somewhere in data tree'); + end + if (strcmp(theMethod{tableRow},'QUEST') & size(QUESTFileTimes,1) ~= 4) + error('Wrong number of QUEST files somewhere in data tree'); + end + if (strcmp(theMethod{tableRow},'QUEST')) + QUESTFileTimesSorted = sort(QUESTFileTimes); + MOCSFileTimesSorted = sort(MOCSFileTimes); + for ff = 1:size(MOCSFileTimes,1) + if (QUESTFileTimesSorted(ff) < MOCSFileTimesSorted(ff)) + fprintf('File time order error\n'); + end + end + end + else + % Concatenate MOCS and QUEST data into COMBINED. If we're here, + % mm indeices COMBINED and we know that indices 1 and 2 index MOCS + % and QUEST. + if (~strcmp(theMethods{mm},'COMBINED') | ~strcmp(theMethods{1},'MOCS') | ~strcmp(theMethods{2},'QUEST') ) + error('Code counts on specific order of methods in theMethods, and someone has changed that'); + end + if (~strcmp(theSplits{hh},'All')) + error('In this program, the only split explicitly specified should be ''All'''); + end + + % Check that hh never exceeds 1 + if (hh ~= 1) + error('Someone added splits, but the logic does not support that anymore here'); + end + + % Combine across methods for 'All' and the two groups. + all_trials_unpacked{pp,dd,ss,hh,mm} = [all_trials_unpacked{pp,dd,ss,hh,1} ; all_trials_unpacked{pp,dd,ss,hh,2}]; + all_trials_unpacked{pp,dd,ss,2,mm} = [all_trials_unpacked{pp,dd,ss,2,1} ; all_trials_unpacked{pp,dd,ss,hh,2}]; + all_trials_unpacked{pp,dd,ss,3,mm} = [all_trials_unpacked{pp,dd,ss,3,1} ; all_trials_unpacked{pp,dd,ss,hh,2}]; + + end + + % Bump table row + tableRow = tableRow + 1; + end + end + end + end +end + +% Save out one nice big combined file +save(fullfile(analysisDir,'combinedData.mat'),'all_trials','all_trials_unpacked','log0Value','theParticipants','theDiameters','theSessions','theSplits','theMethods','AOM','-v7.3'); + + diff --git a/code/CombineTrialsDHB.m b/code/CombineTrialsDHB.m new file mode 100644 index 0000000..84efd09 --- /dev/null +++ b/code/CombineTrialsDHB.m @@ -0,0 +1,264 @@ +%% CombineTrials +% +% Combine all of the psychophysical data files into one big .mat +% file. We do this to simplify the other programs, since the logic +% here involves lots of checks that everything is as it should be. +% +% Also identify catch trials in QUEST runs, which are somewhat obscurely +% indicated, and fix them up so later programs don't have to worry about +% this. +% +% Although this might look like it is splitting the data into two halves, that +% actually happens elsewhere, and here all the data are stored under all the splits. +% This is a vestige from the time this code was part of the full data analysis loop, and +% if we were better people we'd skip this step here. Maybe we will become better at some +% point. +% +% The output of this program is a mat file 'CombinedData.mat' that gets stored in the +% analysis output tree, and that contains one big cell array with all of the data. +% +% See also: FitTrials, FigureN + +%% Initialize +close all; clear all; + +%% Get path to data +% +% Path to data tree on this machine +% +% This is set up by TbTb local hook file, but +% you can also use +% setpath('AOMicroRepeat','dataDir',theDataDir) +% to do this, where theDataDir is the path to the +% files. +% +% In the code example I got, this was +% path = 'W:\Data\11125\20231103\AO_Psychophysics\MOCS\8\group2'; +% but that does not match the actuall data tree I received for 11002. +dataDir = getpref('AOMicroRepeat','dataDir'); + +%% Also analysis output dir, same idea +analysisDir = getpref('AOMicroRepeat','analysisDir'); + +%% Some parameters +log0Value = -3.5; + +%% Define what data we are analyzing here +% +% Generally speaking, this routine loops over everything and does its thing +% +% Define subjects +theParticipants = {'11002' '11108' '11118' '11119' '11125'}; + +% Define sessions (1 or 2) +theSessions = [1 2]; + +% Define session splits +theSplits = {'All', 'FirstHalf', 'SecondHalf'}; + +% Define sizes (8 or 43) +theDiameters = [8 43]; + +% Define methods ('MOCS' or 'QUEST') +% +% When we create COMBINED data below, we count +% on this being as it is. Don't change wihtout care. +theMethods = {'MOCS' 'QUEST', 'COMBINED'}; + +%% Get the AOM lookup table info. +% +% This is loaded and saved here with the data, so that the programs that +% read the output of this program don't have to go and find it. +% +% As far as I can guess +% Column 1: nominal linear intensities +% Column 2: LUT linear intensities +% Column 3: nominal log10 intensities +% Column 4: looks like a non-linear mapping from 10 to 8 bits +AOM = load('green_AOM_LUT_processing'); + +%% Loop over everything +tableRow = 1; +for pp = 1:length(theParticipants) + for dd = 1:length(theDiameters) + for ss = 1:length(theSessions) + for hh = 1:length(theSplits) + checkSessionDate = []; + MOCSFileTimes = []; + QUESTFileTimes = []; + for mm = 1:length(theMethods) + + % Store info for what we are analyzing in this run + theMethod{tableRow,1} = theMethods{mm}; + theSubject{tableRow,1} = theParticipants{pp}; + theDiameter(tableRow,1) = theDiameters(dd); + theSession(tableRow,1) = theSessions(ss); + theSplit{tableRow,1} = theSplits{hh}; + + % Handle MOCS and QUEST from the data + if (strcmp(theMethods{mm},'MOCS') | strcmp(theMethods{mm},'QUEST')) + + % Form path the the directory with the data sitting in it + pathToData = fullfile(dataDir,theSubject{tableRow},['Session' num2str(theSession(tableRow))],['Size' num2str(theDiameter(tableRow))],theMethod{tableRow}); + + % Get list of data files + dirOffset = 1; + trial_videos = dir(fullfile(pathToData,'*.mat')); + num_trial_videos = length(trial_videos(dirOffset:end)); + if (num_trial_videos ~= 4) + error('Expect 4 data files'); + end + + % Read and concatenate the data + all_trials{pp,dd,ss,hh,mm} = {}; + all_trials_unpacked{pp,dd,ss,hh,mm} = []; + fprintf('\tReadng videos\n'); + for i = 1:num_trial_videos + % Get check date from filename. This should be the + % same for all the MOCS and QUEST files from the + % same subject session. The method is the inner + % loop variable, so we can do the check here. + if (isempty(checkSessionDate)) + checkSessionDate = trial_videos(i+dirOffset-1).name(7:16); + else + if (~strcmp(checkSessionDate,trial_videos(i+dirOffset-1).name(7:16))) + error('Not all data files from same session are on the same date'); + end + end + + % Check that QUEST file times are later than last + % MOCS time. This code assumes that it runs to + % completion on the same calendar day on which it + % was started, which will almost always be true. + % + % Count underscores to find start of the time + % string. + nUnderscore = 0; + for cc = 1:length(trial_videos(i+dirOffset-1).name) + if (trial_videos(i+dirOffset-1).name(cc) == '_') + nUnderscore = nUnderscore + 1; + end + if (nUnderscore == 4) + timeStartIndex = cc + 1; + break; + end + end + fileTimeStr = trial_videos(i+dirOffset-1).name(timeStartIndex:timeStartIndex+4); + + % Handle way single digit time numbers get written + % in the filename. Brute force this. Ugh. + if (fileTimeStr(2) == '_') + fileTimeStr = ['0' fileTimeStr]; + end + if (fileTimeStr(5) == '_') + fileTimeStr(5) = fileTimeStr(4); + fileTimeStr(4) = '0'; + end + fileTimeStr = fileTimeStr(1:5); + + % Convert to format MATLAB can operate on + fileTime = datetime(fileTimeStr,'InputFormat','HH_mm'); + if (strcmp(theMethod(tableRow),'MOCS')) + MOCSFileTimes = [MOCSFileTimes ; fileTime]; + else + QUESTFileTimes = [QUESTFileTimes ; fileTime]; + end + + % Read the data file + % fprintf('Reading file %s\n',fullfile(pathToData,trial_videos(i+dirOffset-1).name)); + loadedData{pp,dd,ss,hh,mm,i} = load(fullfile(pathToData,trial_videos(i+dirOffset-1).name)); + + % Grab the trial data we need. Also do some + % MOCS and QUEST specific reality checks + if (isfield(loadedData{pp,dd,ss,hh,mm,i},'trial_vector')) + % It's a MOCS data file by what's in it + if (~strcmp(theMethod{tableRow},'MOCS')) + fprintf('File %s\n\tUnder QUEST, appears to be MOCS\n\tTrials: %d\n',fullfile(pathToData,trial_videos(i+dirOffset-1).name),length(loadedData{pp,dd,ss,hh,mm,i}.trial_vector)); + end + if (length(loadedData{pp,dd,ss,hh,mm,i}.trial_vector) ~= 90 & length(loadedData{pp,dd,ss,hh,mm,i}.trial_vector) ~= 100) + fprintf('File %s: Wrong number of trials in MOCS data file\n'); + end + all_trials{pp,dd,ss,hh,mm}{i,1} = loadedData{pp,dd,ss,hh,mm,i}.trial_vector; + all_trials{pp,dd,ss,hh,mm}{i,2} = loadedData{pp,dd,ss,hh,mm,i}.response_vector; + + % Reality check on MOCS stimulus levels. + % This should be the same across runs + % within session, I think, but are not + % always. This doesn't error out, but does + % report the unexpected cases. + for j = i-1:-1:1 + if (any(unique(loadedData{pp,dd,ss,hh,mm,j}.trial_vector) ~= unique(loadedData{pp,dd,ss,hh,mm,i}.trial_vector))) + fprintf('\t\tMOCS mismatch in trial levels across runs\n') + fprintf('\t\t%s, session %d, size %d, run loaded #%d vs run loaded #%d\n',theParticipants{pp},theSessions(ss), theDiameters(dd),i,j); + fprintf('\t\tFile loaded #%d: %s\n',i,fullfile(trial_videos(i+dirOffset-1).name)); + fprintf('\t\tFile loaded #%d: %s\n',j,fullfile(trial_videos(j+dirOffset-1).name)); + end + end + + elseif (isfield(loadedData{pp,dd,ss,hh,mm,i},'trial_matrix')) + % It's a QUEST data file by what's in it + if (~strcmp(theMethod{tableRow},'QUEST')) + fprintf('File %s\n\tUnder MOCS, appears to be QUEST\n\tTrials: %d\n',fullfile(pathToData,trial_videos(i+dirOffset-1).name),length(loadedData{pp,dd,ss,hh,mm,i}.trial_matrix)); + end + if (length(loadedData{pp,dd,ss,hh,mm,i}.trial_matrix) ~= 44) + fprintf('File %s: Wrong number of trials in QUEST data file\n'); + end + all_trials{pp,dd,ss,hh,mm}{i,1} = loadedData{pp,dd,ss,hh,mm,i}.trial_matrix; + questCatchTrialIndex = find(isnan(loadedData{pp,dd,ss,hh,mm,i}.theThreshold)); + if (length(questCatchTrialIndex) ~= 4) + fprintf('Wrong number of catch trials in QUEST data file'); + end + all_trials{pp,dd,ss,hh,mm}{i,1}(questCatchTrialIndex) = log0Value; + all_trials{pp,dd,ss,hh,mm}{i,2} = loadedData{pp,dd,ss,hh,mm,i}.response_matrix; + else + error('Do not understand data format'); + end + + % Get/check number of trials and make sure they + % are the same for each run within + % method/session. + if (i == 1) + nTrialsPerSession = length(all_trials{pp,dd,ss,hh,mm}{i,1}); + else + if (length(all_trials{pp,dd,ss,hh,mm}{i,1}) ~= nTrialsPerSession) + error('Trial mismatch across runs'); + end + end + + % Concatenate across runs into one long pair of + % vectors. + all_trials_unpacked{pp,dd,ss,hh,mm} = [all_trials_unpacked{pp,dd,ss,hh,mm} ; [all_trials{pp,dd,ss,hh,mm}{i,1} all_trials{pp,dd,ss,hh,mm}{i,2}] ]; + end + + if (strcmp(theMethod{tableRow},'MOCS') & size(MOCSFileTimes,1) ~= 4) + error('Wrong number of MOCS files somewhere in data tree'); + end + if (strcmp(theMethod{tableRow},'QUEST') & size(QUESTFileTimes,1) ~= 4) + error('Wrong number of QUEST files somewhere in data tree'); + end + if (strcmp(theMethod{tableRow},'QUEST')) + QUESTFileTimesSorted = sort(QUESTFileTimes); + MOCSFileTimesSorted = sort(MOCSFileTimes); + for ff = 1:size(MOCSFileTimes,1) + if (QUESTFileTimesSorted(ff) < MOCSFileTimesSorted(ff)) + fprintf('File time order error\n'); + end + end + end + else + % Concatenate MOCS and QUEST data into COMBINED + all_trials_unpacked{pp,dd,ss,hh,mm} = [all_trials_unpacked{pp,dd,ss,hh,1} ; all_trials_unpacked{pp,dd,ss,hh,2}]; + end + + % Bump table row + tableRow = tableRow + 1; + end + end + end + end +end + +% Save out one nice big combined file +save(fullfile(analysisDir,'combinedData.mat'),'all_trials','all_trials_unpacked','log0Value','theParticipants','theDiameters','theSessions','theSplits','theMethods','AOM','-v7.3'); + + diff --git a/code/Figure3.m b/code/Figure3.m new file mode 100644 index 0000000..229602e --- /dev/null +++ b/code/Figure3.m @@ -0,0 +1,258 @@ +%% Make Figure 3 for the paper +% +% TODO +% a) Make 3a and 3b match in terms of size, fonts, etc. + +%% Clear +clear; close all; + +%% Output variant +outputVariant = 'SlopeFree1'; + +%% Set up for the figures and read data +FigureSetup; + +%% Get all MOCS data for full sessions +theSubjects = unique(dataTable.Subject); +theDiameters = unique(dataTable.Diameter); +theSessions = unique(dataTable.Session); +for pp = 1:length(theSubjects) + for dd = 1:length(theDiameters) + for ss = 1:length(theSessions) + index = strcmp(dataTable.Subject,theSubjects{pp}) & (dataTable.Diameter == theDiameters(dd)) & (dataTable.Session == theSessions(ss) & ... + strcmp(dataTable.Method,'MOCS') & strcmp(dataTable.Split,'All')); + if (sum(index) ~= 1) + error('Have not properly set up condition to pick out just one sensitivity'); + end + sensitivityMOCS(pp,dd,ss) = -dataTable.CorrectedThreshold_dB_(index); + CILower_MOCS(pp,dd,ss)= -dataTable.CIHigh(index); %negative sensitivity, so high value corresonds to lower CI) + CIUpper_MOCS(pp,dd,ss)= -dataTable.CILow(index);%negative sensitivity, so low value corresonds to higher CI) + xneg(pp,dd,ss) = sensitivityMOCS(pp,dd,ss) - CIUpper_MOCS(pp,dd,ss); + xpos(pp,dd,ss) = sensitivityMOCS(pp,dd,ss)- CILower_MOCS(pp,dd,ss); + + end + end +end + +%% Get all QUEST data for full sessions +for pp = 1:length(theSubjects) + for dd = 1:length(theDiameters) + for ss = 1:length(theSessions) + index = strcmp(dataTable.Subject,theSubjects{pp}) & (dataTable.Diameter == theDiameters(dd)) & (dataTable.Session == theSessions(ss) & ... + strcmp(dataTable.Method,'QUEST') & strcmp(dataTable.Split,'All')); + if (sum(index) ~= 1) + error('Have not properly set up condition to pick out just one sensitivity'); + end + sensitivityQUEST(pp,dd,ss) = -dataTable.CorrectedThreshold_dB_(index); + CILower_QUEST(pp,dd,ss)= -dataTable.CIHigh(index); %negative sensitivity, so high value corresonds to lower CI + CIUpper_QUEST(pp,dd,ss)= -dataTable.CILow(index);%negative sensitivity, so low value corresonds to higher CI + yneg(pp,dd,ss) = sensitivityQUEST(pp,dd,ss) - CIUpper_QUEST(pp,dd,ss); + ypos(pp,dd,ss) = sensitivityQUEST(pp,dd,ss)- CILower_QUEST(pp,dd,ss); + + end + end +end + +%% Make Figure 3a +plotSize = [100 100 400 400]; +f = figure('Position',plotSize); clf; hold on + +errorbar(sensitivityMOCS(:,1,1),sensitivityQUEST(:,1,1),yneg(:,1,1),ypos(:,1,1),xneg(:,1,1),xpos(:,1,1),'bo','MarkerFaceColor','b','MarkerSize',markerSize); +errorbar(sensitivityMOCS(:,1,2),sensitivityQUEST(:,1,2),yneg(:,1,2),ypos(:,1,2),xneg(:,1,2),xpos(:,1,2),'b^','MarkerFaceColor','b','MarkerSize',markerSize); +errorbar(sensitivityMOCS(:,2,1),sensitivityQUEST(:,2,1),yneg(:,2,1),ypos(:,2,1),xneg(:,2,1),xpos(:,2,1),'ro','MarkerFaceColor','r','MarkerSize',markerSize); +errorbar(sensitivityMOCS(:,2,2),sensitivityQUEST(:,2,2),yneg(:,2,2),ypos(:,2,2),xneg(:,2,2),xpos(:,2,2),'r^','MarkerFaceColor','r','MarkerSize',markerSize); + +plot([limMin limMax],[limMin limMax],'k:','LineWidth',2); +xlabel('MOCS Sensitivity (dB)', 'FontWeight','bold', 'FontSize', 12, 'FontName', 'Times New Roman'); +ylabel('QUEST Sensitivity (dB)','FontWeight','bold', 'FontSize', 12, 'FontName', 'Times New Roman'); +legend( ... + {sprintf('Session %d, %d pixels',theSessions(1),theDiameters(1)) ; ... + sprintf('Session %d, %d pixels',theSessions(2),theDiameters(1)) ; ... + sprintf('Session %d, %d pixels',theSessions(1),theDiameters(2)) ; ... + sprintf('Session %d, %d pixels',theSessions(2),theDiameters(2)) ; ... + sprintf('Line of Equality') + }, ... + 'Location','SouthEast'); +axis('square'); + +lgd = legend('show'); +lgd.FontSize = 8; +ax=gca; +set(gca, 'FontName', 'Arial','FontWeight','bold') +ax.XAxis.LineWidth = 2; +ax.YAxis.LineWidth = 2; +ax.XAxis.FontSize = 12; +ax.YAxis.FontSize = 12; +axis([limMin limMax limMin limMax]); +ticks = 16:2:32; +xticks(ticks); +yticks(ticks); +% saveas(gcf,fullfile(analysisDir,outputVariant,'Figure3a.pdf'),'pdf'); +set(gcf, 'PaperPositionMode', 'auto'); +print(gcf, 'figure3a.png', '-dpng', '-r600'); + +print(gcf,fullfile(analysisDir,outputVariant,'Figure3a.pdf'),'-dpdf','-fillpage','-r600'); + +%% t-tests +[~,p(1,1)] = ttest(sensitivityMOCS(:,1,1),sensitivityQUEST(:,1,1)); +[~,p(1,2)] = ttest(sensitivityMOCS(:,1,2),sensitivityQUEST(:,1,2)); +[~,p(2,1)] = ttest(sensitivityMOCS(:,2,1),sensitivityQUEST(:,2,1)); +[~,p(2,2)] = ttest(sensitivityMOCS(:,2,2),sensitivityQUEST(:,2,2)); +fprintf('MOCS vs QUEST t-test p values\n'); +for dd = 1:length(theDiameters) + for ss = 1:length(theSessions) + fprintf('\t%d pixels, session %d, p = %0.3f\n',theDiameters(dd),theSessions(ss),p(dd,ss)); + end +end + +%% Wilcoxon test% Wilcoxon signed-rank test +[p(1,1),h,~] = signrank(sensitivityMOCS(:,1,1),sensitivityQUEST(:,1,1)); +[p(1,2),h,~] = signrank(sensitivityMOCS(:,1,2),sensitivityQUEST(:,1,2)); +[p(2,1),h,~] = signrank(sensitivityMOCS(:,2,1),sensitivityQUEST(:,2,1)); +[p(2,2),h,~] = signrank(sensitivityMOCS(:,2,2),sensitivityQUEST(:,2,2)); +% Results +fprintf('MOCS vs QUEST t-test p values-Wilcoxon Test\n'); +for dd = 1:length(theDiameters) + for ss = 1:length(theSessions) + fprintf('\t%d pixels, session %d, p = %0.3f\n',theDiameters(dd),theSessions(ss),p(dd,ss)); + end +end + +%% Figure 3b (Bland Altman plot) %% MOCS VS QUEST +% Recall that indices are subject, size (8 and 43), session (1 and 2) +Session1_8pixels_M = sensitivityMOCS(:,1,1); +Session2_8pixels_M = sensitivityMOCS(:,1,2); +Session1_43pixels_M = sensitivityMOCS(:,2,1); +Session2_43pixels_M = sensitivityMOCS(:,2,2); +Session1_8pixels_Q = sensitivityQUEST(:,1,1); +Session2_8pixels_Q = sensitivityQUEST(:,1,2); +Session1_43pixels_Q = sensitivityQUEST(:,2,1); +Session2_43pixels_Q = sensitivityQUEST(:,2,2); + +[mean_Session1_8pixels, diff_Session1_8pixels] = calculate_bland_altman(Session1_8pixels_M, Session1_8pixels_Q); +[mean_Session2_8pixels, diff_Session2_8pixels] = calculate_bland_altman(Session2_8pixels_M, Session2_8pixels_Q); +[mean_Session1_43pixels, diff_Session1_43pixels] = calculate_bland_altman(Session1_43pixels_M, Session1_43pixels_Q); +[mean_Session2_43pixels, diff_Session2_43pixels] = calculate_bland_altman(Session2_43pixels_M, Session2_43pixels_Q); + +mean_diff_Session1_8pixels = mean(diff_Session1_8pixels); +std_diff_Session1_8pixels = std(diff_Session1_8pixels); +mean_diff_Session2_8pixels = mean(diff_Session2_8pixels); +std_diff_Session2_8pixels = std(diff_Session2_8pixels); +mean_diff_Session1_43pixels = mean(diff_Session1_43pixels); +std_diff_Session1_43pixels = std(diff_Session1_43pixels); +mean_diff_Session2_43pixels = mean(diff_Session2_43pixels); +std_diff_Session2_43pixels = std(diff_Session2_43pixels); + + +%% Plotting seperately +%% + +%%(3b) Session1 8 Pixels +plotSize = [100 100 200 400]; +figure('Position', plotSize); +scatter(mean_Session1_8pixels, diff_Session1_8pixels, 25, 'blue', 'filled', 'o', 'MarkerFaceAlpha', 0.6, 'DisplayName', 'SS8S1');% Plot Bland-Altman data for each comparison +LoA_Session1_8pixels = [mean_diff_Session1_8pixels - 1.96 * std_diff_Session1_8pixels, mean_diff_Session1_8pixels + 1.96 * std_diff_Session1_8pixels]; +line([18, 23], [mean_diff_Session1_8pixels, mean_diff_Session1_8pixels], 'Color', 'blue', 'LineWidth', 4, 'LineStyle', '-', 'DisplayName', 'Mean Difference (SS8, S1)');% Plot mean differences and limits of agreement +line([18, 23], [mean_diff_Session1_8pixels + 1.96 * std_diff_Session1_8pixels, mean_diff_Session1_8pixels + 1.96 * std_diff_Session1_8pixels], 'Color', 'blue', 'LineWidth', 2, 'LineStyle', '-', 'DisplayName', 'Upper Limit (SS8, S1)'); +line([18, 23], [mean_diff_Session1_8pixels - 1.96 * std_diff_Session1_8pixels, mean_diff_Session1_8pixels - 1.96 * std_diff_Session1_8pixels], 'Color', 'blue', 'LineWidth', 2, 'LineStyle', '-', 'DisplayName', 'Lower Limit (SS8, S1)'); +ax = gca; +set(gca, 'FontName', 'Arial','FontWeight','bold','FontSize', 12) +ax.XLim = [16, 24]; +ax.YLim = [-2.5, 2.5]; +ax.XAxis.LineWidth = 2; +ax.YAxis.LineWidth = 2; +ax.XAxis.FontSize = 12; +ax.YAxis.FontSize = 12; + +xlabel({'Mean (MOCS,';'QUEST) Sensitivity(dB)'}''); +ylabel('(MOCS - QUEST) Sensitivitity (dB)'); + +% saveas(gcf,fullfile(analysisDir,outputVariant,'Figure3b.pdf'),'pdf'); +set(gcf, 'PaperPositionMode', 'auto'); +print(gcf, 'figure3b.png', '-dpng', '-r600'); +print(gcf,fullfile(analysisDir,outputVariant,'Figure3b.pdf'),'-dpdf'); + + +%% (3c) Session 2 8 pixels +figure('Position', plotSize); +scatter(mean_Session2_8pixels, diff_Session2_8pixels, 25, 'blue', 'filled', '^', 'MarkerFaceAlpha', 0.6, 'DisplayName', 'SS8S2'); +line([18, 23], [mean_diff_Session2_8pixels, mean_diff_Session2_8pixels], 'Color', 'blue', 'LineWidth', 4, 'LineStyle', '--', 'DisplayName', 'Mean Difference (SS8, S2)'); +line([18, 23], [mean_diff_Session2_8pixels + 1.96 * std_diff_Session2_8pixels, mean_diff_Session2_8pixels + 1.96 * std_diff_Session2_8pixels], 'Color', 'blue', 'LineWidth', 2, 'LineStyle', '--', 'DisplayName', 'Upper Limit (SS8, S2)'); +line([18, 23], [mean_diff_Session2_8pixels - 1.96 * std_diff_Session2_8pixels, mean_diff_Session2_8pixels - 1.96 * std_diff_Session2_8pixels], 'Color', 'blue', 'LineWidth', 2, 'LineStyle', '--', 'DisplayName', 'Lower Limit (SS8, S2)'); +ax = gca; +set(gca, 'FontName', 'Arial','FontWeight','bold','FontSize', 12) +ax.XLim = [16, 24]; +ax.YLim = [-2.5, 2.5]; +ax.XAxis.LineWidth = 2; +ax.YAxis.LineWidth = 2; +ax.XAxis.FontSize = 12; +ax.YAxis.FontSize = 12; +LoA_Session2_8pixels = [mean_diff_Session2_8pixels - 1.96 * std_diff_Session2_8pixels,mean_diff_Session2_8pixels + 1.96 * std_diff_Session2_8pixels]; +xlabel({'Mean (MOCS,';'QUEST) Sensitivity(dB)'}''); +ylabel('(MOCS - QUEST) Sensitivitity (dB)'); +% saveas(gcf,fullfile(analysisDir,outputVariant,'Figure3c.pdf'),'pdf'); +set(gcf, 'PaperPositionMode', 'auto'); +print(gcf, 'figure3c.png', '-dpng', '-r600'); +%% (3d) Session 1 43 pixels +figure('Position', plotSize); +scatter(mean_Session1_43pixels, diff_Session1_43pixels, 25, 'red', 'filled', 'o', 'MarkerFaceAlpha', 0.6, 'DisplayName', 'SS43S1'); +line([26, 30], [mean_diff_Session1_43pixels, mean_diff_Session1_43pixels], 'Color', 'red', 'LineWidth', 4, 'LineStyle', '-', 'DisplayName', 'Mean Difference (SS43, S1)'); +line([26, 30], [mean_diff_Session1_43pixels - 1.96 * std_diff_Session1_43pixels, mean_diff_Session1_43pixels - 1.96 * std_diff_Session1_43pixels], 'Color', 'red', 'LineWidth', 2, 'LineStyle', '-', 'DisplayName', 'Lower Limit (SS43, S1)'); +line([26, 30], [mean_diff_Session1_43pixels + 1.96 * std_diff_Session1_43pixels, mean_diff_Session1_43pixels + 1.96 * std_diff_Session1_43pixels], 'Color', 'red', 'LineWidth', 2, 'LineStyle', '-', 'DisplayName', 'Upper Limit (SS43, S1)'); +ax = gca; +set(gca, 'FontName', 'Arial','FontWeight','bold','FontSize', 12) +ax.XLim = [24, 32]; +ax.YLim = [-2.5, 2.5]; +ax.XAxis.LineWidth = 2; +ax.YAxis.LineWidth = 2; +ax.XAxis.FontSize = 12; +ax.YAxis.FontSize = 12; +ticks = 24:2:32; +xticks(ticks); +LOA_Session1_43ppixels = [mean_diff_Session1_43pixels - 1.96 * std_diff_Session1_43pixels,mean_diff_Session1_43pixels + 1.96 * std_diff_Session1_43pixels]; + +xlabel({'Mean (MOCS,';'QUEST) Sensitivity(dB)'}''); +ylabel('(MOCS - QUEST) Sensitivitity (dB)'); +% saveas(gcf,fullfile(analysisDir,outputVariant,'Figure3d.pdf'),'pdf'); +set(gcf, 'PaperPositionMode', 'auto'); +print(gcf, 'figure3d.png', '-dpng', '-r600'); +%% (3e) Session 2 43 pixels +figure('Position', plotSize); +scatter(mean_Session2_43pixels, diff_Session2_43pixels, 25, 'red', 'filled', '^', 'MarkerFaceAlpha', 0.6, 'DisplayName', 'SS43S2'); +line([26, 30], [mean_diff_Session2_43pixels, mean_diff_Session2_43pixels], 'Color', 'red', 'LineWidth', 4, 'LineStyle', '--', 'DisplayName', 'Mean Difference (SS43, S2)'); +line([26, 30], [mean_diff_Session2_43pixels + 1.96 * std_diff_Session2_43pixels, mean_diff_Session2_43pixels + 1.96 * std_diff_Session2_43pixels], 'Color', 'red', 'LineWidth', 2, 'LineStyle', '--', 'DisplayName', 'Upper Limit (SS43, S2)'); +line([26, 30], [mean_diff_Session2_43pixels - 1.96 * std_diff_Session2_43pixels, mean_diff_Session2_43pixels - 1.96 * std_diff_Session2_43pixels], 'Color', 'red', 'LineWidth', 2, 'LineStyle', '--', 'DisplayName', 'Lower Limit (SS43, S2)'); +ax = gca; +set(gca, 'FontName', 'Arial','FontWeight','bold','FontSize', 12) +ax.XLim = [24, 32]; +ax.YLim = [-2.5, 2.5]; +ax.XAxis.LineWidth = 2; +ax.YAxis.LineWidth = 2; +ax.XAxis.FontSize = 12; +ax.YAxis.FontSize = 12; +ticks = 24:2:32; +xticks(ticks); +LOA_Session2_43ppixels = [mean_diff_Session2_43pixels - 1.96 * std_diff_Session2_43pixels,mean_diff_Session2_43pixels + 1.96 * std_diff_Session2_43pixels]; +xlabel({'Mean (MOCS,';'QUEST) Sensitivity(dB)'}''); +ylabel('(MOCS - QUEST) Sensitivitity (dB)'); +% saveas(gcf,fullfile(analysisDir,outputVariant,'Figure3e.pdf'),'pdf'); +set(gcf, 'PaperPositionMode', 'auto'); +print(gcf, 'figure3e.png', '-dpng', '-r600'); + +%% t-test for 8 Vs 43 pixel stimulus +[~,p(1,1)] = ttest(Session1_8pixels_M,Session1_43pixels_M); +[~,p(1,2)] = ttest(Session1_8pixels_Q,Session1_43pixels_Q); +[~,p(2,1)] = ttest(Session2_8pixels_M,Session2_43pixels_M); +[~,p(2,2)] = ttest(Session2_8pixels_Q,Session2_43pixels_Q); +theSplits= {'MOCS','QUEST'}; % to compare 43 Vs 8 stimulus sizes for two testing paradigms for two sessions +fprintf('Within-Session (8 Vs 43 pixels) t-test p values\n'); +for dd = 1:length(theSplits) + for ss = 1:length(theSessions) + fprintf('\t%s , session %d, p = %0.3f\n',theSplits{dd},theSessions(ss),p(dd,ss)); + end +end +%% Print limits of agreement +fprintf('MOCS Vs QUEST : Session1, 8 pixels, LoA: %.2f, %.2f\n', LoA_Session1_8pixels); +fprintf('MOCS Vs QUEST : Session1, 43 pixels, LoA: %.2f, %.2f\n', LOA_Session1_43ppixels); +fprintf('MOCS Vs QUEST : Session2, 8 pixels, LoA: %.2f, %.2f\n', LoA_Session2_8pixels); +fprintf('MOCS Vs QUEST : Session2, 43 pixels, LoA: %.2f, %.2f\n', LOA_Session2_43ppixels); diff --git a/code/Figure4.asv b/code/Figure4.asv new file mode 100644 index 0000000..396878c --- /dev/null +++ b/code/Figure4.asv @@ -0,0 +1,244 @@ +% Figure 4 +clear; close all; + +%% Output variant +outputVariant = 'SlopeFree1'; +%% Set up for the figures and read data +FigureSetup; +%% Get Group1 data for combined trial (all participants, 2 sessions, and 2 stimulus sizes) +theSubjects = unique(dataTable.Subject); +theDiameters = unique(dataTable.Diameter); +theSessions = unique(dataTable.Session); +theSplits= [1,2]; +for pp = 1:length(theSubjects) + for dd = 1:length(theDiameters) + for ss = 1:length(theSessions) + index = strcmp(dataTable.Subject,theSubjects{pp}) & (dataTable.Diameter == theDiameters(dd)) & (dataTable.Session == theSessions(ss) & ... + strcmp(dataTable.Method,'COMBINED') & strcmp(dataTable.Split,'Group1')); + if (sum(index) ~= 1) + error('Have not properly set up condition to pick out just one sensitivity'); + end + sensitivityGroup1(pp,dd,ss) = -dataTable.CorrectedThreshold_dB_(index); + CILower_Group1(pp,dd,ss)= -dataTable.CIHigh(index); %negative sensitivity, so high value corresonds to lower CI + CIUpper_Group1(pp,dd,ss)= -dataTable.CILow(index);%negative sensitivity, so low value corresonds to higher CI + xneg(pp,dd,ss) = sensitivityGroup1(pp,dd,ss) - CIUpper_Group1(pp,dd,ss); + xpos(pp,dd,ss) = sensitivityGroup1(pp,dd,ss)- CILower_Group1(pp,dd,ss); + end + end +end + +%% Get Group2 data for combined trial (all participants, 2 sessions, and 2 stimulus sizes) +theSubjects = unique(dataTable.Subject); +theDiameters = unique(dataTable.Diameter); +theSessions = unique(dataTable.Session); +for pp = 1:length(theSubjects) + for dd = 1:length(theDiameters) + for ss = 1:length(theSessions) + index = strcmp(dataTable.Subject,theSubjects{pp}) & (dataTable.Diameter == theDiameters(dd)) & (dataTable.Session == theSessions(ss) & ... + strcmp(dataTable.Method,'COMBINED') & strcmp(dataTable.Split,'Group2')); + if (sum(index) ~= 1) + error('Have not properly set up condition to pick out just one sensitivity'); + end + sensitivityGroup2(pp,dd,ss) = -dataTable.CorrectedThreshold_dB_(index); + CILower_Group2(pp,dd,ss)= -dataTable.CIHigh(index); %negative sensitivity, so high value corresonds to lower CI + CIUpper_Group2(pp,dd,ss)= -dataTable.CILow(index);%negative sensitivity, so low value corresonds to higher CI + yneg(pp,dd,ss) = sensitivityGroup1(pp,dd,ss) - CIUpper_Group2(pp,dd,ss); + ypos(pp,dd,ss) = sensitivityGroup1(pp,dd,ss)- CILower_Group2(pp,dd,ss); + end + end +end +%% Make figure 4a +plotSize = [100, 100,400,400]; +f = figure('Position',plotSize); clf; hold on + +errorbar(sensitivityGroup1(:,1,1),sensitivityGroup2(:,1,1),yneg(:,1,1),ypos(:,1,1),xneg(:,1,1),xpos(:,1,1),'bo','MarkerFaceColor','b','MarkerSize',markerSize); +errorbar(sensitivityGroup1(:,1,2),sensitivityGroup2(:,1,2),yneg(:,1,2),ypos(:,1,2),xneg(:,1,2),xpos(:,1,2),'b^','MarkerFaceColor','b','MarkerSize',markerSize); +errorbar(sensitivityGroup1(:,2,1),sensitivityGroup2(:,2,1),yneg(:,2,1),ypos(:,2,1),xneg(:,2,1),xpos(:,2,1),'ro','MarkerFaceColor','r','MarkerSize',markerSize); +errorbar(sensitivityGroup1(:,2,2),sensitivityGroup2(:,2,2),yneg(:,2,2),ypos(:,2,2),xneg(:,2,2),xpos(:,2,2),'r^','MarkerFaceColor','r','MarkerSize',markerSize); + + +plot([limMin limMax],[limMin limMax],'k:','LineWidth',2); +set(gca, 'FontName', 'Arial') +legend( ... + {sprintf('Session %d, %d pixels',theSessions(1),theDiameters(1)) ; ... + sprintf('Session %d, %d pixels',theSessions(2),theDiameters(1)) ; ... + sprintf('Session %d, %d pixels',theSessions(1),theDiameters(2)) ; ... + sprintf('Session %d, %d pixels',theSessions(2),theDiameters(2)) ; ... + sprintf('Line of Equality') + ''}, ... + 'Location','SouthEast'); +axis('square'); +lgd = legend('show'); +lgd.FontSize = 8; +ax=gca; +% set(gca, 'FontName', 'Arial','Fontweight', 'Bold') +xlabel('Group 1 sensitivity (dB)','FontName','Arial','FontWeight','Bold'); +ylabel('Group 2 sensitivity (dB)','FontName','Arial','FontWeight','Bold'); +ax = gca; % Get current axes handle +ax.XAxis.FontWeight = 'bold'; % Make x-axis tick labels bold +ax.YAxis.FontWeight = 'bold'; % Make y-axis tick labels bold +ticks = 16:2:32; +xticks(ticks); +yticks(ticks); + +ax.XAxis.LineWidth = 2; +ax.YAxis.LineWidth = 2; +ax.XAxis.FontSize = 12; +ax.YAxis.FontSize = 12; +axis([limMin limMax limMin limMax]); +% saveas(gcf,fullfile(analysisDir,outputVariant,'Figure4a.pdf'),'pdf'); +set(gcf, 'PaperPositionMode', 'auto'); +print(gcf, 'figure4a.png', '-dpng', '-r600'); + +%% %% t-tests +[~,p(1,1)] = ttest(sensitivityGroup1(:,1,1),sensitivityGroup2(:,1,1)); +[~,p(1,2)] = ttest(sensitivityGroup1(:,1,2),sensitivityGroup2(:,1,2)); +[~,p(2,1)] = ttest(sensitivityGroup1(:,2,1),sensitivityGroup2(:,2,1)); +[~,p(2,2)] = ttest(sensitivityGroup1(:,2,2),sensitivityGroup2(:,2,2)); +fprintf('Within-Session t-test p values\n'); +for dd = 1:length(theDiameters) + for ss = 1:length(theSessions) + fprintf('\t%d pixels, session %d, p = %0.3f\n',theDiameters(dd),theSessions(ss),p(dd,ss)); + end +end + +%% %% Wilcoxon test% Wilcoxon signed-rank test +[p(1,1),h,~] = signrank(sensitivityGroup1(:,1,1),sensitivityGroup2(:,1,1)); +[p(1,2),h,~] = signrank(sensitivityGroup1(:,1,2),sensitivityGroup2(:,1,2)); +[p(2,1),h,~] = signrank(sensitivityGroup1(:,2,1),sensitivityGroup2(:,2,1)); +[p(2,2),h,~] = signrank(sensitivityGroup1(:,2,2),sensitivityGroup2(:,2,2)); +% Results +fprintf('Within-Session p values-Wilcoxon Test\n'); +for dd = 1:length(theDiameters) + for ss = 1:length(theSessions) + fprintf('\t%d pixels, session %d, p = %0.3f\n',theDiameters(dd),theSessions(ss),p(dd,ss)); + end +end +%% Figure 4b (Bland Altman plot) %% Group1 Vs Group2 +% Recall that indices are subject, size (8 and 43), session (1 and 2) +Session1_8pixels_G1 = sensitivityGroup1(:,1,1); +Session2_8pixels_G1 = sensitivityGroup1(:,1,2); +Session1_43pixels_G1 = sensitivityGroup1(:,2,1); +Session2_43pixels_G1 = sensitivityGroup1(:,2,2); +Session1_8pixels_G2 = sensitivityGroup2(:,1,1); +Session2_8pixels_G2 = sensitivityGroup2(:,1,2); +Session1_43pixels_G2 = sensitivityGroup2(:,2,1); +Session2_43pixels_G2 = sensitivityGroup2(:,2,2); + +[mean_Session1_8pixels, diff_Session1_8pixels] = calculate_bland_altman(Session1_8pixels_G1, Session1_8pixels_G2); +[mean_Session2_8pixels, diff_Session2_8pixels] = calculate_bland_altman(Session2_8pixels_G1, Session2_8pixels_G2); +[mean_Session1_43pixels, diff_Session1_43pixels] = calculate_bland_altman(Session1_43pixels_G1, Session1_43pixels_G2); +[mean_Session2_43pixels, diff_Session2_43pixels] = calculate_bland_altman(Session2_43pixels_G1, Session2_43pixels_G2); + +mean_diff_Session1_8pixels = mean(diff_Session1_8pixels); +std_diff_Session1_8pixels = std(diff_Session1_8pixels); +mean_diff_Session2_8pixels = mean(diff_Session2_8pixels); +std_diff_Session2_8pixels = std(diff_Session2_8pixels); +mean_diff_Session1_43pixels = mean(diff_Session1_43pixels); +std_diff_Session1_43pixels = std(diff_Session1_43pixels); +mean_diff_Session2_43pixels = mean(diff_Session2_43pixels); +std_diff_Session2_43pixels = std(diff_Session2_43pixels); + +%% Figure 4b + +% Plot Bland-Altman data for each comparison +plotSize = [100 100 200 400]; +figure('Position', plotSize); +scatter(mean_Session1_8pixels, diff_Session1_8pixels, 150, 'blue', 'filled', 'o', 'MarkerFaceAlpha', 0.6, 'DisplayName', 'SS8S1'); +hold on; +line([18, 23], [mean_diff_Session1_8pixels, mean_diff_Session1_8pixels], 'Color', 'blue', 'LineWidth', 4, 'LineStyle', '-', 'DisplayName', 'Mean Difference (SS8, S1)'); +line([18, 23], [mean_diff_Session1_8pixels + 1.96 * std_diff_Session1_8pixels, mean_diff_Session1_8pixels + 1.96 * std_diff_Session1_8pixels], 'Color', 'blue', 'LineWidth', 2, 'LineStyle', '-', 'DisplayName', 'Upper Limit (SS8, S1)'); +line([18, 23], [mean_diff_Session1_8pixels - 1.96 * std_diff_Session1_8pixels, mean_diff_Session1_8pixels - 1.96 * std_diff_Session1_8pixels], 'Color', 'blue', 'LineWidth', 2, 'LineStyle', '-', 'DisplayName', 'Lower Limit (SS8, S1)'); +ax = gca; +set(gca, 'FontName', 'Arial','FontWeight','bold','FontSize', 12) +ax.XLim = [16, 24]; +ax.YLim = [-2.5, 2.5]; +ax.XAxis.LineWidth = 2; +ax.YAxis.LineWidth = 2; +ax.XAxis.FontSize = 12; +ax.YAxis.FontSize = 12; + +xlabel({'Mean (Groups 1, 2)';' Sensitivity (dB)'}'); +ylabel('Group 1 - Group 2 Sensitivity (dB)'); +% saveas(gcf,fullfile(analysisDir,outputVariant,'Figure4b.pdf'),'pdf'); +set(gcf, 'PaperPositionMode', 'auto'); +print(gcf, 'figure4b.png', '-dpng', '-r600'); +%% Figure 4c + +figure('Position', plotSize); +scatter(mean_Session2_8pixels, diff_Session2_8pixels, 150, 'blue', 'filled', '^', 'MarkerFaceAlpha', 0.6, 'DisplayName', 'SS8S2'); +hold on; +line([18, 23], [mean_diff_Session2_8pixels, mean_diff_Session2_8pixels], 'Color', 'blue', 'LineWidth', 4, 'LineStyle', '--', 'DisplayName', 'Mean Difference (SS8, S2)'); +line([18, 23], [mean_diff_Session2_8pixels + 1.96 * std_diff_Session2_8pixels, mean_diff_Session2_8pixels + 1.96 * std_diff_Session2_8pixels], 'Color', 'blue', 'LineWidth', 2, 'LineStyle', '--', 'DisplayName', 'Upper Limit (SS8, S2)'); +line([18, 23], [mean_diff_Session2_8pixels - 1.96 * std_diff_Session2_8pixels, mean_diff_Session2_8pixels - 1.96 * std_diff_Session2_8pixels], 'Color', 'blue', 'LineWidth', 2, 'LineStyle', '--', 'DisplayName', 'Lower Limit (SS8, S2)'); +ax = gca; +set(gca, 'FontName', 'Arial','FontWeight','bold','FontSize', 12) +ax.XLim = [16, 24]; +ax.YLim = [-2.5, 2.5]; +ax.XAxis.LineWidth = 2; +ax.YAxis.LineWidth = 2; +ax.XAxis.FontSize = 12; +ax.YAxis.FontSize = 12; +xlabel({'Mean (Groups 1, 2)';' Sensitivity (dB)'}'); +ylabel('Group 1 - Group 2 Sensitivity (dB)'); +% saveas(gcf,fullfile(analysisDir,outputVariant,'Figure4c.pdf'),'pdf'); +set(gcf, 'PaperPositionMode', 'auto'); +print(gcf, 'figure4c.png', '-dpng', '-r600'); +%% Figure 4d +figure('Position', plotSize); +scatter(mean_Session1_43pixels, diff_Session1_43pixels, 150, 'red', 'filled', 'o', 'MarkerFaceAlpha', 0.6, 'DisplayName', 'SS43S1'); +hold on; +line([24, 32], [mean_diff_Session1_43pixels, mean_diff_Session1_43pixels], 'Color', 'red', 'LineWidth', 4, 'LineStyle', '-', 'DisplayName', 'Mean Difference (SS43, S1)'); +line([24, 32], [mean_diff_Session1_43pixels - 1.96 * std_diff_Session1_43pixels, mean_diff_Session1_43pixels - 1.96 * std_diff_Session1_43pixels], 'Color', 'red', 'LineWidth', 2, 'LineStyle', '-', 'DisplayName', 'Lower Limit (SS43, S1)'); +line([24, 32], [mean_diff_Session1_43pixels + 1.96 * std_diff_Session1_43pixels, mean_diff_Session1_43pixels + 1.96 * std_diff_Session1_43pixels], 'Color', 'red', 'LineWidth', 2, 'LineStyle', '-', 'DisplayName', 'Upper Limit (SS43, S1)'); + +ax = gca; +set(gca, 'FontName', 'Arial','FontWeight','bold','FontSize', 18) +ax.XLim = [22, 32]; +ax.YLim = [-2.5, 2.5]; +ax.XAxis.LineWidth = 2; +ax.YAxis.LineWidth = 2; +ax.XAxis.FontSize = 12; +ax.YAxis.FontSize = 12; +% saveas(gcf,fullfile(analysisDir,outputVariant,'Figure4d.pdf'),'pdf'); +xlabel({'Mean (Groups 1, 2)';' Sensitivity (dB)'}'); +ylabel('Group 1 - Group 2 Sensitivity (dB)'); +set(gcf, 'PaperPositionMode', 'auto'); +print(gcf, 'figure4d.png', '-dpng', '-r600'); + +%% Figure 4e + +figure('Position', plotSize); +scatter(mean_Session2_43pixels, diff_Session2_43pixels, 150, 'red', 'filled', '^', 'MarkerFaceAlpha', 0.6, 'DisplayName', 'SS43S2'); +hold on; +line([24, 32], [mean_diff_Session2_43pixels, mean_diff_Session2_43pixels], 'Color', 'red', 'LineWidth', 4, 'LineStyle', '--', 'DisplayName', 'Mean Difference (SS43, S2)'); +line([24, 32], [mean_diff_Session2_43pixels + 1.96 * std_diff_Session2_43pixels, mean_diff_Session2_43pixels + 1.96 * std_diff_Session2_43pixels], 'Color', 'red', 'LineWidth', 2, 'LineStyle', '--', 'DisplayName', 'Upper Limit (SS43, S2)'); +line([24, 32], [mean_diff_Session2_43pixels - 1.96 * std_diff_Session2_43pixels, mean_diff_Session2_43pixels - 1.96 * std_diff_Session2_43pixels], 'Color', 'red', 'LineWidth', 2, 'LineStyle', '--', 'DisplayName', 'Lower Limit (SS43, S2)'); + +ax = gca; +set(gca, 'FontName', 'Arial','FontWeight','bold','FontSize', 18) +ax.XLim = [22, 32]; +ax.YLim = [-2.5, 2.5]; +ax.XAxis.LineWidth = 2; +ax.YAxis.LineWidth = 2; +ax.XAxis.FontSize = 12; +ax.YAxis.FontSize = 12; +% saveas(gcf,fullfile(analysisDir,outputVariant,'Figure4e.pdf'),'pdf'); +xlabel({'Mean (Groups 1, 2)';' Sensitivity (dB)'}'); +ylabel('Group 1 - Group 2 Sensitivity (dB)'); +set(gcf, 'PaperPositionMode', 'auto'); +print(gcf, 'figure4e.png', '-dpng', '-r600'); + + +%% t-test for 8 Vs 43 pixel stimulus +[~,p(1,1)] = ttest(Session1_8pixels_G1,Session1_43pixels_G1); +[~,p(1,2)] = ttest(Session1_8pixels_G2,Session1_43pixels_G2); +[~,p(2,1)] = ttest(Session2_8pixels_G1,Session2_43pixels_G1); +[~,p(2,2)] = ttest(Session2_8pixels_G2,Session2_43pixels_G2); +fprintf('Within-Session (8 Vs 43 pixels) t-test p values\n'); +for dd = 1:length(theSplits) + for ss = 1:length(theSessions) + fprintf('\t Group %d, session %d, p = %0.3f\n',theSplits(dd),theSessions(ss),p(dd,ss)); + end +end + diff --git a/code/Figure4.m b/code/Figure4.m new file mode 100644 index 0000000..0e65b21 --- /dev/null +++ b/code/Figure4.m @@ -0,0 +1,251 @@ +% Figure 4 +clear; close all; + +%% Output variant +outputVariant = 'SlopeFree1'; +%% Set up for the figures and read data +FigureSetup; +%% Get Group1 data for combined trial (all participants, 2 sessions, and 2 stimulus sizes) +theSubjects = unique(dataTable.Subject); +theDiameters = unique(dataTable.Diameter); +theSessions = unique(dataTable.Session); + +for pp = 1:length(theSubjects) + for dd = 1:length(theDiameters) + for ss = 1:length(theSessions) + index = strcmp(dataTable.Subject,theSubjects{pp}) & (dataTable.Diameter == theDiameters(dd)) & (dataTable.Session == theSessions(ss) & ... + strcmp(dataTable.Method,'COMBINED') & strcmp(dataTable.Split,'Group1')); + if (sum(index) ~= 1) + error('Have not properly set up condition to pick out just one sensitivity'); + end + sensitivityGroup1(pp,dd,ss) = -dataTable.CorrectedThreshold_dB_(index); + CILower_Group1(pp,dd,ss)= -dataTable.CIHigh(index); %negative sensitivity, so high value corresonds to lower CI + CIUpper_Group1(pp,dd,ss)= -dataTable.CILow(index);%negative sensitivity, so low value corresonds to higher CI + xneg(pp,dd,ss) = sensitivityGroup1(pp,dd,ss) - CIUpper_Group1(pp,dd,ss); + xpos(pp,dd,ss) = sensitivityGroup1(pp,dd,ss)- CILower_Group1(pp,dd,ss); + end + end +end + +%% Get Group2 data for combined trial (all participants, 2 sessions, and 2 stimulus sizes) +theSubjects = unique(dataTable.Subject); +theDiameters = unique(dataTable.Diameter); +theSessions = unique(dataTable.Session); +for pp = 1:length(theSubjects) + for dd = 1:length(theDiameters) + for ss = 1:length(theSessions) + index = strcmp(dataTable.Subject,theSubjects{pp}) & (dataTable.Diameter == theDiameters(dd)) & (dataTable.Session == theSessions(ss) & ... + strcmp(dataTable.Method,'COMBINED') & strcmp(dataTable.Split,'Group2')); + if (sum(index) ~= 1) + error('Have not properly set up condition to pick out just one sensitivity'); + end + sensitivityGroup2(pp,dd,ss) = -dataTable.CorrectedThreshold_dB_(index); + CILower_Group2(pp,dd,ss)= -dataTable.CIHigh(index); %negative sensitivity, so high value corresonds to lower CI + CIUpper_Group2(pp,dd,ss)= -dataTable.CILow(index);%negative sensitivity, so low value corresonds to higher CI + yneg(pp,dd,ss) = sensitivityGroup1(pp,dd,ss) - CIUpper_Group2(pp,dd,ss); + ypos(pp,dd,ss) = sensitivityGroup1(pp,dd,ss)- CILower_Group2(pp,dd,ss); + end + end +end +%% Make figure 4a +plotSize = [100, 100,400,400]; +f = figure('Position',plotSize); clf; hold on + +errorbar(sensitivityGroup1(:,1,1),sensitivityGroup2(:,1,1),yneg(:,1,1),ypos(:,1,1),xneg(:,1,1),xpos(:,1,1),'bo','MarkerFaceColor','b','MarkerSize',markerSize); +errorbar(sensitivityGroup1(:,1,2),sensitivityGroup2(:,1,2),yneg(:,1,2),ypos(:,1,2),xneg(:,1,2),xpos(:,1,2),'b^','MarkerFaceColor','b','MarkerSize',markerSize); +errorbar(sensitivityGroup1(:,2,1),sensitivityGroup2(:,2,1),yneg(:,2,1),ypos(:,2,1),xneg(:,2,1),xpos(:,2,1),'ro','MarkerFaceColor','r','MarkerSize',markerSize); +errorbar(sensitivityGroup1(:,2,2),sensitivityGroup2(:,2,2),yneg(:,2,2),ypos(:,2,2),xneg(:,2,2),xpos(:,2,2),'r^','MarkerFaceColor','r','MarkerSize',markerSize); + + +plot([limMin limMax],[limMin limMax],'k:','LineWidth',2); +set(gca, 'FontName', 'Arial') +legend( ... + {sprintf('Session %d, %d pixels',theSessions(1),theDiameters(1)) ; ... + sprintf('Session %d, %d pixels',theSessions(2),theDiameters(1)) ; ... + sprintf('Session %d, %d pixels',theSessions(1),theDiameters(2)) ; ... + sprintf('Session %d, %d pixels',theSessions(2),theDiameters(2)) ; ... + sprintf('Line of Equality') + ''}, ... + 'Location','SouthEast'); +axis('square'); +lgd = legend('show'); +lgd.FontSize = 8; +ax=gca; +% set(gca, 'FontName', 'Arial','Fontweight', 'Bold') +xlabel('Group 1 sensitivity (dB)','FontName','Arial','FontWeight','Bold'); +ylabel('Group 2 sensitivity (dB)','FontName','Arial','FontWeight','Bold'); +ax = gca; % Get current axes handle +ax.XAxis.FontWeight = 'bold'; % Make x-axis tick labels bold +ax.YAxis.FontWeight = 'bold'; % Make y-axis tick labels bold +ticks = 16:2:32; +xticks(ticks); +yticks(ticks); + +ax.XAxis.LineWidth = 2; +ax.YAxis.LineWidth = 2; +ax.XAxis.FontSize = 12; +ax.YAxis.FontSize = 12; +axis([limMin limMax limMin limMax]); +% saveas(gcf,fullfile(analysisDir,outputVariant,'Figure4a.pdf'),'pdf'); +set(gcf, 'PaperPositionMode', 'auto'); +print(gcf, 'figure4a.png', '-dpng', '-r600'); + +%% %% t-tests +[~,p(1,1)] = ttest(sensitivityGroup1(:,1,1),sensitivityGroup2(:,1,1)); +[~,p(1,2)] = ttest(sensitivityGroup1(:,1,2),sensitivityGroup2(:,1,2)); +[~,p(2,1)] = ttest(sensitivityGroup1(:,2,1),sensitivityGroup2(:,2,1)); +[~,p(2,2)] = ttest(sensitivityGroup1(:,2,2),sensitivityGroup2(:,2,2)); +fprintf('Within-Session t-test p values\n'); +for dd = 1:length(theDiameters) + for ss = 1:length(theSessions) + fprintf('\t%d pixels, session %d, p = %0.3f\n',theDiameters(dd),theSessions(ss),p(dd,ss)); + end +end + +%% %% Wilcoxon test% Wilcoxon signed-rank test +[p(1,1),h,~] = signrank(sensitivityGroup1(:,1,1),sensitivityGroup2(:,1,1)); +[p(1,2),h,~] = signrank(sensitivityGroup1(:,1,2),sensitivityGroup2(:,1,2)); +[p(2,1),h,~] = signrank(sensitivityGroup1(:,2,1),sensitivityGroup2(:,2,1)); +[p(2,2),h,~] = signrank(sensitivityGroup1(:,2,2),sensitivityGroup2(:,2,2)); +% Results +fprintf('Within-Session p values-Wilcoxon Test\n'); +for dd = 1:length(theDiameters) + for ss = 1:length(theSessions) + fprintf('\t%d pixels, session %d, p = %0.3f\n',theDiameters(dd),theSessions(ss),p(dd,ss)); + end +end +%% Figure 4b (Bland Altman plot) %% Group1 Vs Group2 +% Recall that indices are subject, size (8 and 43), session (1 and 2) +Session1_8pixels_G1 = sensitivityGroup1(:,1,1); +Session2_8pixels_G1 = sensitivityGroup1(:,1,2); +Session1_43pixels_G1 = sensitivityGroup1(:,2,1); +Session2_43pixels_G1 = sensitivityGroup1(:,2,2); +Session1_8pixels_G2 = sensitivityGroup2(:,1,1); +Session2_8pixels_G2 = sensitivityGroup2(:,1,2); +Session1_43pixels_G2 = sensitivityGroup2(:,2,1); +Session2_43pixels_G2 = sensitivityGroup2(:,2,2); + +[mean_Session1_8pixels, diff_Session1_8pixels] = calculate_bland_altman(Session1_8pixels_G1, Session1_8pixels_G2); +[mean_Session2_8pixels, diff_Session2_8pixels] = calculate_bland_altman(Session2_8pixels_G1, Session2_8pixels_G2); +[mean_Session1_43pixels, diff_Session1_43pixels] = calculate_bland_altman(Session1_43pixels_G1, Session1_43pixels_G2); +[mean_Session2_43pixels, diff_Session2_43pixels] = calculate_bland_altman(Session2_43pixels_G1, Session2_43pixels_G2); + +mean_diff_Session1_8pixels = mean(diff_Session1_8pixels); +std_diff_Session1_8pixels = std(diff_Session1_8pixels); +mean_diff_Session2_8pixels = mean(diff_Session2_8pixels); +std_diff_Session2_8pixels = std(diff_Session2_8pixels); +mean_diff_Session1_43pixels = mean(diff_Session1_43pixels); +std_diff_Session1_43pixels = std(diff_Session1_43pixels); +mean_diff_Session2_43pixels = mean(diff_Session2_43pixels); +std_diff_Session2_43pixels = std(diff_Session2_43pixels); + +%% Figure 4b + +% Plot Bland-Altman data for each comparison +plotSize = [100 100 200 400]; +figure('Position', plotSize); +scatter(mean_Session1_8pixels, diff_Session1_8pixels, 25, 'blue', 'filled', 'o', 'MarkerFaceAlpha', 0.6, 'DisplayName', 'SS8S1'); +hold on; +line([18, 23], [mean_diff_Session1_8pixels, mean_diff_Session1_8pixels], 'Color', 'blue', 'LineWidth', 4, 'LineStyle', '-', 'DisplayName', 'Mean Difference (SS8, S1)'); +line([18, 23], [mean_diff_Session1_8pixels + 1.96 * std_diff_Session1_8pixels, mean_diff_Session1_8pixels + 1.96 * std_diff_Session1_8pixels], 'Color', 'blue', 'LineWidth', 2, 'LineStyle', '-', 'DisplayName', 'Upper Limit (SS8, S1)'); +line([18, 23], [mean_diff_Session1_8pixels - 1.96 * std_diff_Session1_8pixels, mean_diff_Session1_8pixels - 1.96 * std_diff_Session1_8pixels], 'Color', 'blue', 'LineWidth', 2, 'LineStyle', '-', 'DisplayName', 'Lower Limit (SS8, S1)'); +ax = gca; +set(gca, 'FontName', 'Arial','FontWeight','bold','FontSize', 12) +ax.XLim = [16, 24]; +ax.YLim = [-2.5, 2.5]; +ax.XAxis.LineWidth = 2; +ax.YAxis.LineWidth = 2; +ax.XAxis.FontSize = 12; +ax.YAxis.FontSize = 12; +LoA_Session1_8pixels = [mean_diff_Session1_8pixels - 1.96 * std_diff_Session1_8pixels,mean_diff_Session1_8pixels + 1.96 * std_diff_Session1_8pixels]; +xlabel({'Mean (Groups 1, 2)';' Sensitivity (dB)'}'); +ylabel('Group 1 - Group 2 Sensitivity (dB)'); +% saveas(gcf,fullfile(analysisDir,outputVariant,'Figure4b.pdf'),'pdf'); +set(gcf, 'PaperPositionMode', 'auto'); +print(gcf, 'figure4b.png', '-dpng', '-r600'); +%% Figure 4c + +figure('Position', plotSize); +scatter(mean_Session2_8pixels, diff_Session2_8pixels, 25, 'blue', 'filled', '^', 'MarkerFaceAlpha', 0.6, 'DisplayName', 'SS8S2'); +hold on; +line([18, 23], [mean_diff_Session2_8pixels, mean_diff_Session2_8pixels], 'Color', 'blue', 'LineWidth', 4, 'LineStyle', '--', 'DisplayName', 'Mean Difference (SS8, S2)'); +line([18, 23], [mean_diff_Session2_8pixels + 1.96 * std_diff_Session2_8pixels, mean_diff_Session2_8pixels + 1.96 * std_diff_Session2_8pixels], 'Color', 'blue', 'LineWidth', 2, 'LineStyle', '--', 'DisplayName', 'Upper Limit (SS8, S2)'); +line([18, 23], [mean_diff_Session2_8pixels - 1.96 * std_diff_Session2_8pixels, mean_diff_Session2_8pixels - 1.96 * std_diff_Session2_8pixels], 'Color', 'blue', 'LineWidth', 2, 'LineStyle', '--', 'DisplayName', 'Lower Limit (SS8, S2)'); +ax = gca; +set(gca, 'FontName', 'Arial','FontWeight','bold','FontSize', 12) +ax.XLim = [16, 24]; +ax.YLim = [-2.5, 2.5]; +ax.XAxis.LineWidth = 2; +ax.YAxis.LineWidth = 2; +ax.XAxis.FontSize = 12; +ax.YAxis.FontSize = 12; +LoA_Session2_8pixels = [mean_diff_Session2_8pixels - 1.96 * std_diff_Session2_8pixels,mean_diff_Session2_8pixels + 1.96 * std_diff_Session2_8pixels]; +xlabel({'Mean (Groups 1, 2)';' Sensitivity (dB)'}'); +ylabel('Group 1 - Group 2 Sensitivity (dB)'); +% saveas(gcf,fullfile(analysisDir,outputVariant,'Figure4c.pdf'),'pdf'); +set(gcf, 'PaperPositionMode', 'auto'); +print(gcf, 'figure4c.png', '-dpng', '-r600'); +%% Figure 4d +figure('Position', plotSize); +scatter(mean_Session1_43pixels, diff_Session1_43pixels, 25, 'red', 'filled', 'o', 'MarkerFaceAlpha', 0.6, 'DisplayName', 'SS43S1'); +hold on; +line([24, 32], [mean_diff_Session1_43pixels, mean_diff_Session1_43pixels], 'Color', 'red', 'LineWidth', 4, 'LineStyle', '-', 'DisplayName', 'Mean Difference (SS43, S1)'); +line([24, 32], [mean_diff_Session1_43pixels - 1.96 * std_diff_Session1_43pixels, mean_diff_Session1_43pixels - 1.96 * std_diff_Session1_43pixels], 'Color', 'red', 'LineWidth', 2, 'LineStyle', '-', 'DisplayName', 'Lower Limit (SS43, S1)'); +line([24, 32], [mean_diff_Session1_43pixels + 1.96 * std_diff_Session1_43pixels, mean_diff_Session1_43pixels + 1.96 * std_diff_Session1_43pixels], 'Color', 'red', 'LineWidth', 2, 'LineStyle', '-', 'DisplayName', 'Upper Limit (SS43, S1)'); +LoA_Session1_43pixels = [mean_diff_Session1_43pixels - 1.96 * std_diff_Session1_43pixels,mean_diff_Session1_43pixels + 1.96 * std_diff_Session1_43pixels]; +ax = gca; +set(gca, 'FontName', 'Arial','FontWeight','bold','FontSize', 18) +ax.XLim = [22, 32]; +ax.YLim = [-2.5, 2.5]; +ax.XAxis.LineWidth = 2; +ax.YAxis.LineWidth = 2; +ax.XAxis.FontSize = 12; +ax.YAxis.FontSize = 12; +% saveas(gcf,fullfile(analysisDir,outputVariant,'Figure4d.pdf'),'pdf'); +xlabel({'Mean (Groups 1, 2)';' Sensitivity (dB)'}'); +ylabel('Group 1 - Group 2 Sensitivity (dB)'); +set(gcf, 'PaperPositionMode', 'auto'); +print(gcf, 'figure4d.png', '-dpng', '-r600'); + +%% Figure 4e + +figure('Position', plotSize); +scatter(mean_Session2_43pixels, diff_Session2_43pixels, 25, 'red', 'filled', '^', 'MarkerFaceAlpha', 0.6, 'DisplayName', 'SS43S2'); +hold on; +line([24, 32], [mean_diff_Session2_43pixels, mean_diff_Session2_43pixels], 'Color', 'red', 'LineWidth', 4, 'LineStyle', '--', 'DisplayName', 'Mean Difference (SS43, S2)'); +line([24, 32], [mean_diff_Session2_43pixels + 1.96 * std_diff_Session2_43pixels, mean_diff_Session2_43pixels + 1.96 * std_diff_Session2_43pixels], 'Color', 'red', 'LineWidth', 2, 'LineStyle', '--', 'DisplayName', 'Upper Limit (SS43, S2)'); +line([24, 32], [mean_diff_Session2_43pixels - 1.96 * std_diff_Session2_43pixels, mean_diff_Session2_43pixels - 1.96 * std_diff_Session2_43pixels], 'Color', 'red', 'LineWidth', 2, 'LineStyle', '--', 'DisplayName', 'Lower Limit (SS43, S2)'); +LoA_Session2_43pixels = [mean_diff_Session2_43pixels - 1.96 * std_diff_Session2_43pixels,mean_diff_Session2_43pixels + 1.96 * std_diff_Session2_43pixels]; +ax = gca; +set(gca, 'FontName', 'Arial','FontWeight','bold','FontSize', 18) +ax.XLim = [22, 32]; +ax.YLim = [-2.5, 2.5]; +ax.XAxis.LineWidth = 2; +ax.YAxis.LineWidth = 2; +ax.XAxis.FontSize = 12; +ax.YAxis.FontSize = 12; +% saveas(gcf,fullfile(analysisDir,outputVariant,'Figure4e.pdf'),'pdf'); +xlabel({'Mean (Groups 1, 2)';' Sensitivity (dB)'}'); +ylabel('Group 1 - Group 2 Sensitivity (dB)'); +set(gcf, 'PaperPositionMode', 'auto'); +print(gcf, 'figure4e.png', '-dpng', '-r600'); + + +%% t-test for 8 Vs 43 pixel stimulus +[~,p(1,1)] = ttest(Session1_8pixels_G1,Session1_43pixels_G1); +[~,p(1,2)] = ttest(Session1_8pixels_G2,Session1_43pixels_G2); +[~,p(2,1)] = ttest(Session2_8pixels_G1,Session2_43pixels_G1); +[~,p(2,2)] = ttest(Session2_8pixels_G2,Session2_43pixels_G2); +theSplits= [1,2]; %Group 1 and Group 2 to compare between stimulus sizes for two sessions +fprintf('Within-Session (8 Vs 43 pixels) t-test p values\n'); +for dd = 1:length(theSplits) + for ss = 1:length(theSessions) + fprintf('\t Group %d, session %d, p = %0.3f\n',theSplits(dd),theSessions(ss),p(dd,ss)); + end +end +%% Print limits of agreement +fprintf('Group1 Vs Group2 : Session1, 8 pixels, LoA: %.2f, %.2f\n', LoA_Session1_8pixels); +fprintf('Group1 Vs Group2 : Session1, 43 pixels, LoA: %.2f, %.2f\n', LoA_Session1_43pixels); +fprintf('Group1 Vs Group2 : Session2, 8 pixels, LoA: %.2f, %.2f\n', LoA_Session2_8pixels); +fprintf('Group1 Vs Group2 : Session2, 43 pixels, LoA: %.2f, %.2f\n', LoA_Session2_43pixels); + diff --git a/code/Figure5.m b/code/Figure5.m new file mode 100644 index 0000000..1bd2c47 --- /dev/null +++ b/code/Figure5.m @@ -0,0 +1,160 @@ +%% Session 1 VS Session 2 plots +%% Clear +clear; close all; + +%% Output variant +outputVariant = 'SlopeFree1'; +%% Set up for the figures and read data +FigureSetup; +%% %% Get all Session 1 and Session 2 data for combined trials +theSubjects = unique(dataTable.Subject); +theDiameters = unique(dataTable.Diameter); +theSessions = unique(dataTable.Session); +for pp = 1:length(theSubjects) + for dd = 1:length(theDiameters) + for ss = 1:length(theSessions) + index = strcmp(dataTable.Subject,theSubjects{pp}) & (dataTable.Diameter == theDiameters(dd)) & (dataTable.Session == theSessions(ss) & ... + strcmp(dataTable.Method,'COMBINED') & strcmp(dataTable.Split,'All')); + if (sum(index) ~= 1) + error('Have not properly set up condition to pick out just one sensitivity'); + end + sensitivitySessionwise(pp,dd,ss) = -dataTable.CorrectedThreshold_dB_(index); + CILower_SessionWise(pp,dd,ss)= -dataTable.CIHigh(index);%negative sensitivity, so high value corresonds to lower CI) + CIUpper_SessionWise(pp,dd,ss)= -dataTable.CILow(index);%negative sensitivity, so low value corresonds to higher CI) + neg(pp,dd,ss) = sensitivitySessionwise(pp,dd,ss) - CIUpper_SessionWise(pp,dd,ss); + pos(pp,dd,ss) = sensitivitySessionwise(pp,dd,ss)- CILower_SessionWise(pp,dd,ss); + end + end +end + +%% Figure 5a +plotSize = [100 100 400 400]; +f = figure('Position',plotSize); clf; hold on +errorbar(sensitivitySessionwise(:,1,1),sensitivitySessionwise(:,1,2),neg(:,1,1),pos(:,1,1),neg(:,1,2),pos(:,1,2),'bo','MarkerFaceColor','b','MarkerSize',markerSize); +errorbar(sensitivitySessionwise(:,2,1),sensitivitySessionwise(:,2,2),neg(:,2,1),pos(:,2,1),neg(:,2,2),pos(:,2,2),'ro','MarkerFaceColor','r','MarkerSize',markerSize); +plot([limMin limMax],[limMin limMax],'k:','LineWidth',2); +xlabel('Session 1 Sensitivity (dB)'); +ylabel('Session 2 Sensitivity (dB)'); +legend( ... + {sprintf('%d arcmin',theDiameters(1)) ; ... + sprintf('%d arcmin',theDiameters(2)) ; ... + sprintf('Line of Equality') + ''}, ... + 'Location','SouthEast'); +axis('square'); +ax=gca; +set(gca, 'FontName', 'Arial','FontWeight','bold') +ax.XAxis.LineWidth = 2; +ax.YAxis.LineWidth = 2; +ax.XAxis.FontSize = 12; +ax.YAxis.FontSize = 12; +ticks = 16:2:32; +xticks(ticks); +yticks(ticks); +lgd = legend('show'); +lgd.FontSize = 12; +axis([limMin limMax limMin limMax]); +% saveas(gcf,fullfile(analysisDir,outputVariant,'Figure5a.pdf'),'pdf'); +set(gcf, 'PaperPositionMode', 'auto'); +print(gcf, 'figure5a.png', '-dpng', '-r600'); + %% t-tests for between session comparision +[~,p(1,1)] = ttest(sensitivitySessionwise(:,1,1),sensitivitySessionwise(:,1,2)); +[~,p(1,2)] = ttest(sensitivitySessionwise(:,2,1),sensitivitySessionwise(:,2,2)); + +fprintf('Session 1 vs Session 2 t-test p values\n'); +for dd = 1:length(theDiameters) + + fprintf('\t%d arcmin, p = %0.3f\n',theDiameters(dd),p(dd)); + +end + +%% Wilcoxon test% Wilcoxon signed-rank test +[p(1,1),h,~] = signrank(sensitivitySessionwise(:,1,1),sensitivitySessionwise(:,1,2)); +[p(1,2),h,~] = signrank(sensitivitySessionwise(:,2,1),sensitivitySessionwise(:,2,2)); + +% Results +fprintf('Session 1 vs Session 2 t-test p values (Wilcoxon-test)\n'); +for dd = 1:length(theDiameters) + + fprintf('\t%d arcmin, p = %0.3f\n',theDiameters(dd),p(dd)); + +end + + +%% Figure 5b (Bland Altman plot) +Session1_8arcmin = sensitivitySessionwise(:,1,1); +Session2_8arcmin = sensitivitySessionwise(:,1,2); +Session1_43arcmin = sensitivitySessionwise(:,2,1); +Session2_43arcmin = sensitivitySessionwise(:,2,2); + +[mean_a, diff_a] = calculate_bland_altman(Session1_8arcmin, Session2_8arcmin); +[mean_b, diff_b] = calculate_bland_altman(Session1_43arcmin, Session2_43arcmin); + +mean_diff_SS8 = mean(diff_a); +std_diff_SS8 = std(diff_a); + +mean_diff_SS43 = mean(diff_b); +std_diff_SS43 = std(diff_b); + +% Open the figures +%% Figure 5b +plotSize = [100 100 200 400]; +figure('Position', plotSize); + +% Plot Bland-Altman data for each comparison +scatter(mean_a, diff_a, 25, 'blue', 'filled', 'o', 'MarkerFaceAlpha', 0.6, 'DisplayName', 'SS8'); +hold on; +line([18, 23], [mean_diff_SS8, mean_diff_SS8], 'Color', 'blue', 'LineWidth', 4, 'LineStyle', '-', 'DisplayName', 'Mean Difference SS8'); +line([18, 23], [mean_diff_SS8 + 1.96 * std_diff_SS8, mean_diff_SS8 + 1.96 * std_diff_SS8], 'Color', 'blue', 'LineWidth', 2, 'LineStyle', '-', 'DisplayName', 'Upper Limit (SS8)'); +line([18, 23], [mean_diff_SS8 - 1.96 * std_diff_SS8, mean_diff_SS8 - 1.96 * std_diff_SS8], 'Color', 'blue', 'LineWidth', 2, 'LineStyle', '-', 'DisplayName', 'Lower Limit (SS8)'); +ax = gca; +set(gca, 'FontName', 'Arial','FontWeight','bold','FontSize', 12) +ax.XLim = [16, 24]; +ax.YLim = [-2.5, 2.5]; +ax.XAxis.LineWidth = 2; +ax.YAxis.LineWidth = 2; +ax.XAxis.FontSize = 12; +ax.YAxis.FontSize = 12; +LOA_8pixels = [mean_diff_SS8 - 1.96 * std_diff_SS8,mean_diff_SS8 + 1.96 * std_diff_SS8]; +xlabel({'Mean (Session 1, Session 2)'; 'Sensitivity (dB)'}); +ylabel('(Session 1 - Session 2) Sensitivity(dB)'); +% saveas(gcf,fullfile(analysisDir,outputVariant,'Figure5b.pdf'),'pdf'); +set(gcf, 'PaperPositionMode', 'auto'); +print(gcf, 'figure5b.png', '-dpng', '-r600'); +%% Figure 5c + +figure('Position', plotSize); +scatter(mean_b, diff_b, 25, 'red', 'filled', 'o', 'MarkerFaceAlpha', 0.6, 'DisplayName', 'SS43'); +hold on; +line([26, 30], [mean_diff_SS43, mean_diff_SS43], 'Color', 'red', 'LineWidth', 4, 'LineStyle', '-', 'DisplayName', 'Mean Difference SS43'); +line([26, 30], [mean_diff_SS43 - 1.96 * std_diff_SS43, mean_diff_SS43 - 1.96 * std_diff_SS43], 'Color', 'red', 'LineWidth', 2, 'LineStyle', '-', 'DisplayName', 'Lower Limit (SS43)'); +line([26, 30], [mean_diff_SS43 + 1.96 * std_diff_SS43, mean_diff_SS43 + 1.96 * std_diff_SS43], 'Color', 'red', 'LineWidth', 2, 'LineStyle', '-', 'DisplayName', 'Upper Limit (SS43)'); +ax = gca; +set(gca, 'FontName', 'Arial','FontWeight','bold','FontSize', 12) +ax.XLim = [24, 32]; +ax.YLim = [-2.5, 2.5]; +ax.XAxis.LineWidth = 2; +ax.YAxis.LineWidth = 2; +ax.XAxis.FontSize = 12; +ax.YAxis.FontSize = 12; +ticks = 24:2:32; +xticks(ticks); +LOA_43pixels = [mean_diff_SS43 - 1.96 * std_diff_SS43,mean_diff_SS43 + 1.96 * std_diff_SS43]; +xlabel({'Mean (Session 1, Session 2)'; 'Sensitivity (dB)'}); +ylabel('(Session 1 - Session 2) Sensitivity(dB)'); +% saveas(gcf,fullfile(analysisDir,outputVariant,'Figure5c.pdf'),'pdf'); +set(gcf, 'PaperPositionMode', 'auto'); +print(gcf, 'figure5c.png', '-dpng', '-r600'); + +%% %% t-test for 8 Vs 43 pixel stimulus +[~,p(1,1)] = ttest(Session1_8arcmin,Session1_43arcmin); +[~,p(1,2)] = ttest(Session2_8arcmin,Session2_43arcmin); + +for ss = 1:length(theSessions) + + fprintf('\t Session %d, p = %0.3f\n',theSessions(ss),p(ss)); + +end +%% %% Print limits of agreement +fprintf('Session 1 Vs Session 2 : 8 pixels, LoA: %.2f, %.2f\n', LOA_8pixels); +fprintf('Session 1 Vs Session 2 : 43 pixels, LoA: %.2f, %.2f\n', LOA_43pixels); \ No newline at end of file diff --git a/code/FigureSetup.m b/code/FigureSetup.m new file mode 100644 index 0000000..2e2bb54 --- /dev/null +++ b/code/FigureSetup.m @@ -0,0 +1,28 @@ +%% FigureSetup +% +% Set some parameters that we want to keep the same across figures and read in the output +% of the analysis. Saves redoing this for each figure script. + +%% Parameters +plotFont = 'Times'; +axisFontSize = 12; +labelFontSize = 12; +titleFontSize = 20; +legendFontSize = 12; +markerSize = 5; +% plotSize = [100, 100,600,600]; +limMin = 16; limMax = 32; + +%% Path to analysis output dir + + analysisDir = getpref('New_analysis_20250912','analysisDir'); + +% analysisDir = getpref('AOMicroRepeat','analysisDir'); +% setpref('AOMicroRepeat','setpref','dataDir','C:\Users\niveg\Aguirre-Brainard Lab Dropbox\Nivedhitha Govindasamy\AO-Microperimetry Repeatability Paper\Data_for_paper\David_code_analysis\New_analysis_20250912\dataDir'); +setpref('New_analysis_20250912','analysisDir','C:\Users\niveg\Aguirre-Brainard Lab Dropbox\Nivedhitha Govindasamy\AO-Microperimetry Repeatability Paper\Data_for_paper\David_code_analysis\New_analysis_20250912\analysisDir'); +analysisDir = getpref('New_analysis_20250912','analysisDir'); + +%% Read table of fit information +wState = warning('off','MATLAB:table:ModifiedAndSavedVarnames'); +dataTable = readtable(fullfile(analysisDir,outputVariant,'fitTable.xlsx'),'FileType','spreadsheet'); %,'VariableNamingRule','preserve'); +warning(wState); diff --git a/code/FitPsychometricFunction.m b/code/FitPsychometricFunction.m new file mode 100644 index 0000000..b6f32cb --- /dev/null +++ b/code/FitPsychometricFunction.m @@ -0,0 +1,83 @@ +% FitPsychometricFunction +% +% Massage data, fit and plot psychometric function +function [plot_log_intensities,plot_psychometric,corrected_psychometric, ... + log_threshold,corrected_log_threshold,psiParamsValues,h] = FitPsychometricFunction(log_intensity,responses, ... + log0Value,thresholdCriterion,convert_to_db,make_plot,figure_vis, ... + guessUpper,lapseUpper,slopeLower,slopeUpper) + +% Sort data and get what we need +sorted_data = sortrows([log_intensity responses],1); +sorted_log_intensities = sorted_data(:,1); +index = find(isinf(sorted_log_intensities)); +if (~isempty(index)) + sorted_log_intensities(isinf(sorted_log_intensities)) = log0Value; +end +sorted_responses = sorted_data(:,2); + +% Get unique stimulus levels and data for those +[unique_log_intensities] = unique(sorted_log_intensities,'legacy'); +num_presented = zeros(size(unique_log_intensities)); +for i = 1:length(num_presented) + num_presented(i) = length(find(sorted_log_intensities == unique_log_intensities(i))); + num_seen(i) = sum(sorted_responses(find(sorted_log_intensities == unique_log_intensities(i)))); +end + +% Fit using a wrapper into Palemedes +if (make_plot) + fprintf('\t\tCalling Palemedes wrapper ... '); +end +[plot_log_intensities,plot_psychometric,corrected_psychometric, ... + log_threshold,corrected_log_threshold, psiParamsValues] = ... + fitPsychometric(unique_log_intensities,num_seen',num_presented,thresholdCriterion,convert_to_db,guessUpper,lapseUpper,slopeLower,slopeUpper); +if (make_plot) + fprintf('done\n'); +end + +% Plot +if (make_plot) + if (convert_to_db) + maxIntensityLog= 1; + minIntensityLog=-36; + else + maxIntensityLog= 0.1; + minIntensityLog=-3.6; + end + + fontsize = 14; fwidth = 3; fheight = 3; + h = figure('Units', 'inches','Position', [400 200 fwidth fheight],'visible',figure_vis); hold on; + a0 = gca; + ax.LineWidth = 2;ax.LineWidth = 2; + set(a0,'FontName','Arial','FontSize',14); + if (convert_to_db) + xlabel('Trial intensity (dB)','FontSize',fontsize); + else + xlabel('Log trial intensity (au)','FontSize',fontsize); + end + ylabel('Fraction seen','FontSize',fontsize); + xlim([minIntensityLog maxIntensityLog]); + ylim([0 1]); + set(a0,'FontSize',fontsize); + set(a0,'LineWidth',1,'TickLength',[0.025 0.025]); + set(a0,'Color','none'); + set(h,'Color',[1 1 1]); + set(h,'PaperPositionMode','auto'); + set(h, 'renderer', 'painters'); + baseMarkerSize = 5; + sizeNormalizer = 40; + for tt = 1:length(unique_log_intensities) + markerSize = 4+round(baseMarkerSize*num_presented(tt)/sizeNormalizer); + plot(unique_log_intensities(tt), num_seen(tt)' ./ num_presented(tt)','ro','MarkerSize',markerSize,'MarkerFaceColor','r'); + end + + plot(plot_log_intensities,corrected_psychometric,'k-','LineWidth',3,'HandleVisibility', 'off'); + plot(plot_log_intensities,plot_psychometric,'r:','LineWidth',2,'HandleVisibility', 'off'); + plot([minIntensityLog corrected_log_threshold],[thresholdCriterion thresholdCriterion],'k:','LineWidth',2,'HandleVisibility', 'off'); + plot([corrected_log_threshold corrected_log_threshold],[0 thresholdCriterion],'k:','LineWidth',2,'HandleVisibility', 'off'); + +else + h = []; +end + +end + diff --git a/code/FitTrials.m b/code/FitTrials.m new file mode 100644 index 0000000..f333d02 --- /dev/null +++ b/code/FitTrials.m @@ -0,0 +1,535 @@ + %% FitTrials +% +% Read in the combined data file and fit it all. + +%% Initialize +close all hidden; clear all; + +%% Variant +% +% The variant is a string that determines a set of fitting parameters. Output for each +% variant is stored separately. As an example, maybe we want to allow the slope of the +% psychometric function to be a free parameter versus fix it to a particular value, or +% maybe we want to experiment with different limits on the guess or lapse rate. The +% variant string allows you to go look at how paremeters for each variant were set and +% then go find the output for that variant. +outputVariant = 'SlopeFree1'; + +%% Some parameters +guessUpper = 0.05; +log0TrialThreshold = -3; +thresholdCriterion = 0.78; +nBootstraps = 500; +convertToDb = true; + +% This parameter determins how much above threshold a trial intensity +% needs to be for us to use it to estimate lapse rate. Different numerical +% values depending on whether you are using log10 units or dB. Indeed, this sort of +% numerical adjustment happens in other places below. +if (convertToDb) + lapseEstIntensityThreshold = 6.0; +else + lapseEstIntensityThreshold = 0.6; +end + +%% Determine other parameters based on variant +% +% Note that locking lapse rate to 0 for slope fixed leads +% to some pathalogical fits, for reasons I don't fully understand. +% Finesse by using 1%, which is small enough for practical +% purposes. +% +% To get fixed slope values, first run with 'SlopeFree', and then +% use SummarizePFSlopes to get useful info, including the +% mean slopes from the slope free fits. +switch (outputVariant) + case 'SlopeFree' + if (convertToDb) + slopeLower8 = 0.5; + slopeUpper8 = 7; + slopeLower43 = 0.5; + slopeUpper43 = 7; + else + slopeLower8 = 5; + slopeUpper8 = 70; + slopeLower43 = 5; + slopeUpper43 = 70; + end + lapseUpper = 0.05; + case 'SlopeFree1' + if (convertToDb) + slopeLower8 = 0.5; + slopeUpper8 = 7; + slopeLower43 = 0.5; + slopeUpper43 = 7; + else + slopeLower8 = 5; + slopeUpper8 = 70; + slopeLower43 = 5; + slopeUpper43 = 70; + end + lapseUpper = 0.01; + case 'SlopeFixed' + if (convertToDb) + slopeLower8 = 1.08; + slopeUpper8 = 1.08; + slopeLower43 = 2.69; + slopeUpper43 = 2.69; + else + slopeLower8 = 10.8; + slopeUpper8 = 10.8; + slopeLower43 = 26.9; + slopeUpper43 = 26.9; + end + lapseUpper = 0.01; +end + +%% Figure visibility +% +% Maybe making invisible figures will avoid crashes? +% They still get saved out so easy to look at later. +figureVis = 'off'; + +%% Read combined data produced by CombineTrials +% +% See comments in CombineTrials about how to set preferences for where data and analsis +% directories live. Do no hard code the paths. +setpref('AOMicroRepeat','dataDir','C:\Users\niveg\Aguirre-Brainard Lab Dropbox\Nivedhitha Govindasamy\AO-Microperimetry Repeatability Paper\Data_for_paper\David_code_analysis\New_analysis_20250912\dataDir'); +setpref('AOMicroRepeat','analysisDir','C:\Users\niveg\Aguirre-Brainard Lab Dropbox\Nivedhitha Govindasamy\AO-Microperimetry Repeatability Paper\Data_for_paper\David_code_analysis\figure-rerun'); +analysisDir = getpref('AOMicroRepeat','analysisDir'); +d = load(fullfile(analysisDir,'combinedData.mat'),'all_trials','all_trials_unpacked','log0Value','theParticipants','theDiameters','theSessions','theSplits','theMethods','AOM'); +all_trials_unpacked = d.all_trials_unpacked; +log0Value = d.log0Value; +theParticipants = d.theParticipants; +theDiameters = d.theDiameters; +theSessions = d.theSessions; +theMethods = d.theMethods; +AOM = d.AOM; + +% Check and handle how theSplits comes in and goes +% out. This is tricky handshaking between this routine +% and CombineTrials. Despite d.theSplits saying there +% is just one split, there are 3, with the latter two being +% the session based splits done according to the pre-registration. +% +% The way things are coded is a bit kluged up, but we check like +% mad that our various assumptions hold. +if (length(d.theSplits) > 1 | ~strcmp(d.theSplits,'All')) + error('Unexpected form of d.theSplits. Did you re-run CombineData?'); +end +theSplits = {'All' 'Group1' 'Group2'}; + +%% Freeze rng seed for repeatability +rng(101); + +%% Loop over everything +tableRow = 1; +for pp = 1:length(theParticipants) + for dd = 1:length(theDiameters) + for ss = 1:length(theSessions) + for hh = 1:length(theSplits) + checkSessionDate = []; + MOCSFileTimes = []; + QUESTFileTimes = []; + for mm = 1:length(theMethods) + + % Store info for what we are analyzing in this run + theMethod{tableRow,1} = theMethods{mm}; + theSubject{tableRow,1} = theParticipants{pp}; + theDiameter(tableRow,1) = theDiameters(dd); + theSession(tableRow,1) = theSessions(ss); + theSplit{tableRow,1} = theSplits{hh}; + + % If just checking, break and don't do anything else + fprintf('\tLUT correcting trial data\n'); + + % Clear out variables from previous time through + % the loop. + clear log_intensity_nominal lin_intensity_nominal lin_intensity_rounded log_intensity_nominal log_intensity_rounded log_intensity_rounded_chk + clear lut_row_index dac_intensity_lut lin_intensity_lut + % Do the lookup table conversion + fprintf('\tEntering LUT correction loop over trials ... ') + for i = 1:size(all_trials_unpacked{pp,dd,ss,hh,mm},1) + % Dir for this output + pathToAnalysis = fullfile(analysisDir,outputVariant,theSubject{tableRow},['Session' num2str(theSession(tableRow))],['Size' num2str(theDiameter(tableRow))],theMethod{tableRow},theSplit{tableRow}); + if (~exist(pathToAnalysis,"dir")) + mkdir(pathToAnalysis); + end + + % Get the nominal log intensity from the data + log_intensity_nominal(i) = all_trials_unpacked{pp,dd,ss,hh,mm}(i,1); + + % Convert to nominal lin intensity + lin_intensity_nominal(i) = 10.^log_intensity_nominal(i); + + % Log10 trial values less than 3 get rounded + % down to 0 intensity. Deal with this. + if (log_intensity_nominal(i) < log0TrialThreshold) + log_intensity_nominal(i) = log0Value; + lin_intensity_nominal(i) = 0; + end + + % Round linear intensity nominal. Should match a row first column of lut + lin_intensity_rounded(i) = round(lin_intensity_nominal(i)*1000)/1000; + + % Get the rounded log intensity, which we think is what + % is in the third column of the lut. Except that if + % the linear intensity is 0, then the entry of the + % third column is zero and not -Inf. Handle + % this for the check, and map the -Inf to the + % log value we use for 0 for further analysis. + log_intensity_rounded(i) = log10(lin_intensity_rounded(i)); + log_intensity_rounded_chk(i) = log_intensity_rounded(i); + if (lin_intensity_rounded(i) == 0) + log_intensity_rounded(i) = log0Value; + log_intensity_rounded_chk(i) = 0; + end + + % Compute the row index into the lut based on the linear intensity + lut_row_index(i) = lin_intensity_rounded(i)*1000+1; + + % Get the corrected linear intensity by + % averaging the nominal linear intensities that + % correspond to the DAC value actually used. + % Since we don't know precisely what the AOM + % does in this case, since the LUT was created + % by interpolation of measurements that were + % made long ago, that seems as clever as + % anything else I can think of. One could also + % use the min or the max of the ambiguous set + % of values, or any weighted average. + dac_intensity_lut(i) = AOM.green_AOM_lut(lut_row_index(i),4); + lutRowIndex = find(AOM.green_AOM_lut(:,4) == dac_intensity_lut(i) ); + if (isempty(lutRowIndex)) + error('No LUT rows correspond to trial dac intensity'); + end + lin_intensity_lut(i) = mean(AOM.green_AOM_lut(lutRowIndex,1)); + + % Convert to log, taking log10(0) to be the + % log0Value + if (lin_intensity_lut(i) == 0) + log_intensity_lut(i) = log0Value; + else + log_intensity_lut(i) = log10(lin_intensity_lut(i)); + end + + % Sanity checks + if (abs(lin_intensity_rounded(i)-AOM.green_AOM_lut(lut_row_index(i),1)) > 1e-5) + error('Do not understand lut table and/or its indexing'); + end + if (abs(log_intensity_rounded_chk(i)-AOM.green_AOM_lut(lut_row_index(i),3)) > 1e-5) + error('Do not understand lut table and/or its indexing'); + end + + % Tag corrected log and linear intensities into the + % all_trials_unpacked variable that we save + all_trials_unpacked{pp,dd,ss,hh,mm}(i,3) = lin_intensity_lut(i); + all_trials_unpacked{pp,dd,ss,hh,mm}(i,4) = log_intensity_lut(i); + end + fprintf('\tdone\n'); + + + % Figure to make sure LUT conversion is basically + % the identity. Uncomment if you want to look at + % this. + h1 = figure('visible',figureVis); subplot(1,2,1); hold on; + plot(lin_intensity_nominal,lin_intensity_lut,'ro','MarkerFaceColor','r'); + axis('square'); axis([0 1 0 1]); + plot([0 1],[0 1],'k:'); + xlabel('Linear intensity nominal'); + ylabel('Linear intensity LUT'); + title({ sprintf('%s, %s, session %d, split %s, size %d',theSubject{tableRow},theMethod{tableRow},theSession(tableRow),theSplit{tableRow},theDiameter(tableRow)) ; ... + ''},'FontSize',10); + subplot(1,2,2); hold on; + plot(lin_intensity_nominal,lin_intensity_lut,'ro','MarkerFaceColor','r'); + axis('square'); + if (theDiameters(dd) == min(theDiameters)) + axis([0 0.1 0 0.1]); + plot([0 0.1],[0 0.1],'k:'); + else + axis([0 0.01 0 0.01]); + plot([0 0.01],[0 0.01],'k:'); + end + xlabel('Linear intensity nominal'); + ylabel('Linear intensity LUT'); + title({ sprintf('%s, %s, session %d, split %s, size %d',theSubject{tableRow},theMethod{tableRow},theSession(tableRow),theSplit{tableRow},theDiameter(tableRow)) ; ... + ''},'FontSize',10); + drawnow; + saveas(h1,fullfile(pathToAnalysis,'quantizationFig.tif'),'tif'); + + h2 = figure('visible',figureVis); hold on; + if (convertToDb) + plot(10*log_intensity_nominal,10*log_intensity_lut,'ro','MarkerFaceColor','r'); + plot([-36 1],[-36 1],'k:'); + xlabel('Intensity nominal (dB)'); + ylabel('Intensity LUT (dB)'); + axis([-36 1 -36 1]); + else + plot(log_intensity_nominal,log_intensity_lut,'ro','MarkerFaceColor','r'); + plot([-3.6 0.1],[-3.6 0.1],'k:'); + xlabel('Log10 intensity nominal'); + ylabel('Log10 intensity LUT'); + axis([-3.6 0.1 -3.6 0.1]); + end + title({ sprintf('%s, %s, session %d, split %s, size %d',theSubject{tableRow},theMethod{tableRow},theSession(tableRow),theSplit{tableRow},theDiameter(tableRow)) ; ... + ''},'FontSize',10); + axis('square'); + drawnow; +% saveas(h2,fullfile(pathToAnalysis,'quantizationLogFig.tif'),'tif'); + print(gcf,fullfile(pathToAnalysis,'quantizationLogFig.tif'), '-dpng', '-r600'); % saves current figure as PNG at 600 dpi + + % Here is the format of all_trials_unpacked + % + % These values are not adjusted for power measurements. + % they are scaled with the instrument maximum having a + % linear intensity of 1. + % + % Column 1 - nominal log10 intensity + % Column 2 - 1 = seen, 0 = not seen + % Column 3 - lut corrected linear intensity + % Column 4 - lut corrected log intensity + trial_log_intensities = all_trials_unpacked{pp,dd,ss,hh,mm}(:,4); + trial_responses = all_trials_unpacked{pp,dd,ss,hh,mm}(:,2); + + % Convert to dB? + if (convertToDb) + trial_log_intensities = 10*trial_log_intensities; + log0ValueUse = 10*log0Value; + end + + % Staircase plot. We only do this for full sessions and only for MOCS + % and QUEST, not COMBINED. + % + % We take advantage of the fact that we know the data + % were concatenated run-by-run. Maybe not the most secure coding, but + % OK at least for now. + switch (theSplit{tableRow}) + case 'All' + if (strcmp(theMethod{tableRow},'MOCS') | strcmp(theMethod{tableRow},'QUEST')) + nRuns = 4; + nTrialsPerRun = length(trial_responses)/nRuns; + if (round(nTrialsPerRun) ~= nTrialsPerRun) + error('Do not understand how data were concatenated'); + end + for zz = 1:nRuns + run_log_intensities = trial_log_intensities((zz-1)*nTrialsPerRun+1:zz*nTrialsPerRun); + run_responses = trial_responses((zz-1)*nTrialsPerRun+1:zz*nTrialsPerRun); + fontsize = 14; fwidth = 3; fheight = 3; + hs = figure('Units', 'inches','Position', [400 200 fwidth fheight]); hold on; + set(gca,'FontName','Arial','FontSize',14); + ax.LineWidth = 2; + ax.LineWidth = 2; + runIndicator = 1:nTrialsPerRun; + plot(runIndicator,run_log_intensities,'r','LineWidth',2); + index = find(run_responses == 1); + plot(runIndicator(index),run_log_intensities(index),'o','MarkerSize',4,'Color',[0.5 1 0.5],'MarkerFaceColor',[0.5 1 0.5]); + index = find(run_responses == 0); + plot(runIndicator(index),run_log_intensities(index),'^','MarkerSize',4,'Color',[0.5 0.5 1],'MarkerFaceColor',[0.5 0.5 1]); + xlabel('Trial number','FontSize',14) + if (convertToDb) + ylabel('Trial intensity (dB)','FontSize',14); + ylim([-35 -15]); + else + ylabel('Log trial intensity (au)','FontSize',14); + ylim([-3.6 0.1]); + end + xlim(([0 100])); + title({ sprintf('Subject %s, %s, session %d, split %s',theSubject{tableRow},theMethod{tableRow},theSession(tableRow),theSplit{tableRow}) ; ... + sprintf('Diameter %d arcmin, run %d',theDiameter(tableRow),zz) ; ''},'FontSize',20); + drawnow; + saveas(hs,fullfile(pathToAnalysis,sprintf('staircasePlot_run%d.tif',zz)),'tif'); + title(''); + drawnow; +% saveas(hs,fullfile(pathToAnalysis,sprintf('staircasePlotNoTitle_run%d.tif',zz)),'tif'); + print(gcf, fullfile(pathToAnalysis,sprintf('staircasePlotNoTitle_run%d.tif',zz)), '-dpng', '-r600'); % saves current figure as PNG at 600 dpi + + end + end + end + + % Set beta for this size + switch (theDiameter(tableRow)) + case 8 + slopeLowerUse(tableRow,1) = slopeLower8; + slopeUpperUse(tableRow,1) = slopeUpper8; + case 43 + slopeLowerUse(tableRow,1) = slopeLower43; + slopeUpperUse(tableRow,1) = slopeUpper43; + otherwise + end + guessUpperUse(tableRow,1) = guessUpper; + lapseUpperUse(tableRow,1) = lapseUpper; + + % Fit the psychometric function. The fitting routine makes + % a plot and we adjust the title here for things the + % fitting routine doesn't know about. + fprintf('\tCalling top level PF fit routine\n'); + [plot_log_intensities,plot_psychometric,corrected_psychometric, ... + log_threshold,corrected_log_threshold,psiParamsValues,h] = FitPsychometricFunction(trial_log_intensities,trial_responses,log0ValueUse,thresholdCriterion, ... + convertToDb,true,figureVis,guessUpperUse(tableRow),lapseUpperUse(tableRow),slopeLowerUse(tableRow),slopeUpperUse(tableRow)); + fprintf('\tDone with top level PF fit\n'); + if (convertToDb) + title({ sprintf('Subject %s, %s, session %d, split %s',theSubject{tableRow},theMethod{tableRow},theSession(tableRow),theSplit{tableRow}) ; ... + sprintf('Diameter %d arcmin, threshold (dB) %0.2f',theDiameter(tableRow),corrected_log_threshold) ; ... + sprintf('Slope %0.1f, guess %0.2f, lapse %0.2f',psiParamsValues(2),psiParamsValues(3),psiParamsValues(4)) ; ''},'FontSize',20); + else + title({ sprintf('Subject %s, %s, session %d, split %s',theSubject{tableRow},theMethod{tableRow},theSession(tableRow),theSplit{tableRow}) ; ... + sprintf('Diameter %d arcmin, log threshold %0.2f',theDiameter(tableRow),corrected_log_threshold) ; ... + sprintf('Slope %0.1f, guess %0.2f, lapse %0.2f',psiParamsValues(2),psiParamsValues(3),psiParamsValues(4)) ; ''},'FontSize',20); + end + drawnow; + saveas(h,fullfile(pathToAnalysis,'psychometricFcn.tif'),'tif'); + + % Estimate guess and lapse rates from the data + index = find(trial_log_intensities == log0ValueUse); + if (~isempty(index)) + guessEstFromData(tableRow,1) = mean(trial_responses(index)); + else + guessEstFromData(tableRow,1) = -1; + end + nTrialsGuessEstFromData(tableRow,1) = length(index); + index = find(trial_log_intensities > corrected_log_threshold + lapseEstIntensityThreshold); + if (~isempty(index)) + lapseEstFromData(tableRow,1) = 1-mean(trial_responses(index)); + else + lapseEstFromData(tableRow,1) = -1; + end + nTrialsLapseEstFromData(tableRow,1) = length(index); + + % Bootstrap the data + fprintf('\tBootstrapping data\n') + nTrialsForBootstrap = length(trial_log_intensities); + bootstrap_log_intensities = zeros(nTrialsForBootstrap,nBootstraps); + bootstrap_responses = zeros(nTrialsForBootstrap,nBootstraps); + switch(theMethod{tableRow}) + case {'QUEST', 'COMBINED'} + % For QUEST and COMBINED, we draw with replacement from all + % the trials except the catch trials, which we draw from separately. + for bb = 1:nBootstraps + temp_boot_intensities = []; + temp_boot_responses = []; + + % Catch trials + uindex = find(trial_log_intensities == log0ValueUse); + utemp_intensities = trial_log_intensities(uindex); + utemp_responses = trial_responses(uindex); + bootIndex = randi(length(uindex),length(uindex),1); + temp_boot_intensities = [temp_boot_intensities ; utemp_intensities(bootIndex)]; + temp_boot_responses = [temp_boot_responses ; utemp_responses(bootIndex)]; + + % Non catch trials + uindex = find(trial_log_intensities ~= log0ValueUse); + utemp_intensities = trial_log_intensities(uindex); + utemp_responses = trial_responses(uindex); + bootIndex = randi(length(uindex),length(uindex),1); + temp_boot_intensities = [temp_boot_intensities ; utemp_intensities(bootIndex)]; + temp_boot_responses = [temp_boot_responses ; utemp_responses(bootIndex)]; + + bootstrap_log_intensities(:,bb) = temp_boot_intensities; + bootstrap_responses(:,bb) = temp_boot_responses; + end + clear lutRowIndex + + case 'MOCS' + % For MOCS, we respect the experimental design + % and bootstrap each stimulus intensity separately. + unique_log_intensities = unique(trial_log_intensities); + for bb = 1:nBootstraps + temp_boot_intensities = []; + temp_boot_responses = []; + for uu = 1:length(unique_log_intensities) + uindex = find(trial_log_intensities == unique_log_intensities(uu)); + utemp_intensities = trial_log_intensities(uindex); + utemp_responses = trial_responses(uindex); + + bootIndex = randi(length(uindex),length(uindex),1); + temp_boot_intensities = [temp_boot_intensities ; utemp_intensities(bootIndex)]; + temp_boot_responses = [temp_boot_responses ; utemp_responses(bootIndex)]; + end + bootstrap_log_intensities(:,bb) = temp_boot_intensities; + bootstrap_responses(:,bb) = temp_boot_responses; + end + otherwise + error('Unknown method specified'); + end + + % Bootstrap the fits and extract CI. Add CI to plot a + % a thick blue horizontal bar. + fprintf('\tFitting bootstrapped data\n'); + for bb = 1:nBootstraps + if (rem(bb,100) == 0) + fprintf('\t\tBootstrap fit %d of %d\n',bb,nBootstraps); + end + [~,~,~,~,boot_corrected_threshold(bb),~,~] = FitPsychometricFunction(bootstrap_log_intensities(:,bb),bootstrap_responses(:,bb),log0ValueUse,thresholdCriterion, ... + convertToDb,false,false,guessUpperUse(tableRow),lapseUpperUse(tableRow),slopeLowerUse(tableRow),slopeUpperUse(tableRow)); + end + fprintf('\tDone fitting bootstrapped data\n'); + boot_quantiles = quantile(boot_corrected_threshold,[0.025 0.5 0.975]); + % figure(h); + plot([boot_quantiles(1) boot_quantiles(3)],[thresholdCriterion thresholdCriterion],'b','LineWidth',4); + saveas(h,fullfile(pathToAnalysis,'psychometricFcnCI.tif'),'tif'); + + % Save a version without our informative title, for + % paper figures + title(''); +% saveas(h,fullfile(pathToAnalysis,'psychometricFcnCINoTitle.tif'),'tif'); + print(gcf,fullfile(pathToAnalysis,'psychometricFcnCINoTitle.tif'), '-dpng', '-r600'); % saves current figure as PNG at 600 dpi + + % Save what we learned + fprintf('\tSaving ... '); + this_all_trials_unpacked = all_trials_unpacked{pp,dd,ss,hh,mm}; + save(fullfile(pathToAnalysis,'fitOutput.mat'),'this_all_trials_unpacked','plot_log_intensities','plot_psychometric', ... + 'corrected_psychometric','log_threshold','corrected_log_threshold','psiParamsValues','boot_corrected_threshold','boot_quantiles','-v7.3'); + fprintf('done\n'); + + % Accumulate data + theTableLogThreshold(tableRow,1) = log_threshold; + theTableCorrectedLogThreshold(tableRow,1) = corrected_log_threshold; + theTableCorrectedLogThresholdBootMedian(tableRow,1)= boot_quantiles(2); + theTableCorrectedBootCILow(tableRow,1) = boot_quantiles(1); + theTableCorrectedBootCIHigh(tableRow,1) = boot_quantiles(3); + theTablePsiParamsValues(tableRow,:) = psiParamsValues; + theTableThresholdCriterion(tableRow,1) = thresholdCriterion; + + % Clear and close + clear bootstrap_log_intensities bootstrap_responses boot_corrected_threshold boot_quantiles + clear dac_intensity_lut lin_intensity_lut lin_intensity_nominal lin_intensity_rounded + clear log_intensity_lut log_intensity_nominal log_intensity_rounded log_intensity_rounded_chk lut_row_index + clear plot_log_intensities plot_psychometric shuffleIndex + clear temp_boot_responses temp_boot_responses trial_log_intensities trial_responses + + % Bump table row + tableRow = tableRow + 1; + end + end + end + end + + % Close figures this subject + close all hidden; + + % Could make summary figures for this subject here + +end + +% Write out full analysis data table +fprintf('\nWriting xlsx file ... ') +if (convertToDb) + tableVariableNames = {'Subject','Diameter','Session','Method','Split','Threshold (dB)','Corrected Threshold (dB)','CI Low','CI High', 'Alpha','Beta','Guess','Lapse', ... + 'Criterion','SlopeLimitLow','SlopeLimitHigh','guessLimit','lapseLimit','guessEstFromData','nTrialsGuessEstFromData','lapseEstFromData','nTrialsLapseEstFromData'}; + thresholdPlaces = 1; +else + tableVariableNames = {'Subject','Diameter','Session','Method','Split','Log10 Threshold','Corrected Log10 Threshold','CI Low','CI High', 'Alpha','Beta','Guess','Lapse', ... + 'Criterion','SlopeLimitLow','SlopeLimitHigh','guessLimit','lapseLimit','guessEstFromData','nTrialsGuessEstFromData','lapseEstFromData','nTrialsLapseEstFromData'}; + thresholdPlaces = 2; +end +dataTable = table(theSubject,theDiameter,theSession,theMethod,theSplit, ... + round(theTableLogThreshold,thresholdPlaces),round(theTableCorrectedLogThreshold,thresholdPlaces),round(theTableCorrectedBootCILow,thresholdPlaces), round(theTableCorrectedBootCIHigh,thresholdPlaces), ... + round(theTablePsiParamsValues(:,1),2),round(theTablePsiParamsValues(:,2),2), round(theTablePsiParamsValues(:,3),3), round(theTablePsiParamsValues(:,4),3), ... + theTableThresholdCriterion, ... + slopeLowerUse,slopeUpperUse,guessUpperUse,lapseUpperUse, ... + round(guessEstFromData,3), nTrialsGuessEstFromData,round(lapseEstFromData,3),nTrialsLapseEstFromData, ... + 'VariableNames',tableVariableNames); +writetable(dataTable,fullfile(analysisDir,outputVariant,'fitTable.xlsx'),'FileType','spreadsheet'); +fprintf('done\n'); + diff --git a/code/FitTrialsDHB.m b/code/FitTrialsDHB.m new file mode 100644 index 0000000..08f676c --- /dev/null +++ b/code/FitTrialsDHB.m @@ -0,0 +1,533 @@ +%% FitTrials +% +% Read in the combined data file and fit it all. + +%% Initialize +close all hidden; clear all; + +%% Variant +% +% The variant is a string that determines a set of fitting parameters. Output for each +% variant is stored separately. As an example, maybe we want to allow the slope of the +% psychometric function to be a free parameter versus fix it to a particular value, or +% maybe we want to experiment with different limits on the guess or lapse rate. The +% variant string allows you to go look at how paremeters for each variant were set and +% then go find the output for that variant. +outputVariant = 'SlopeFree1'; + +%% Some parameters +guessUpper = 0.05; +log0TrialThreshold = -3; +thresholdCriterion = 0.78; +nBootstraps = 500; +convertToDb = true; + +% This parameter determins how much above threshold a trial intensity +% needs to be for us to use it to estimate lapse rate. Different numerical +% values depending on whether you are using log10 units or dB. Indeed, this sort of +% numerical adjustment happens in other places below. +if (convertToDb) + lapseEstIntensityThreshold = 6.0; +else + lapseEstIntensityThreshold = 0.6; +end + +%% Determine other parameters based on variant +% +% Note that locking lapse rate to 0 for slope fixed leads +% to some pathalogical fits, for reasons I don't fully understand. +% Finesse by using 1%, which is small enough for practical +% purposes. +% +% To get fixed slope values, first run with 'SlopeFree', and then +% use SummarizePFSlopes to get useful info, including the +% mean slopes from the slope free fits. +switch (outputVariant) + case 'SlopeFree' + if (convertToDb) + slopeLower8 = 0.5; + slopeUpper8 = 7; + slopeLower43 = 0.5; + slopeUpper43 = 7; + else + slopeLower8 = 5; + slopeUpper8 = 70; + slopeLower43 = 5; + slopeUpper43 = 70; + end + lapseUpper = 0.05; + case 'SlopeFree1' + if (convertToDb) + slopeLower8 = 0.5; + slopeUpper8 = 7; + slopeLower43 = 0.5; + slopeUpper43 = 7; + else + slopeLower8 = 5; + slopeUpper8 = 70; + slopeLower43 = 5; + slopeUpper43 = 70; + end + lapseUpper = 0.01; + case 'SlopeFixed' + if (convertToDb) + slopeLower8 = 1.08; + slopeUpper8 = 1.08; + slopeLower43 = 2.69; + slopeUpper43 = 2.69; + else + slopeLower8 = 10.8; + slopeUpper8 = 10.8; + slopeLower43 = 26.9; + slopeUpper43 = 26.9; + end + lapseUpper = 0.01; +end + +%% Figure visibility +% +% Maybe making invisible figures will avoid crashes? +% They still get saved out so easy to look at later. +figureVis = 'off'; + +%% Read combined data produced by CombineData +analysisDir = getpref('AOMicroRepeat','analysisDir'); +d = load(fullfile(analysisDir,'combinedData.mat'),'all_trials','all_trials_unpacked','log0Value','theParticipants','theDiameters','theSessions','theSplits','theMethods','AOM'); +all_trials_unpacked = d.all_trials_unpacked; +log0Value = d.log0Value; +theParticipants = d.theParticipants; +theDiameters = d.theDiameters; +theSessions = d.theSessions; +theSplits = d.theSplits; +theMethods = d.theMethods; +AOM = d.AOM; + +%% Freeze rng seed for repeatability +rng(101); + +%% Loop over everything +tableRow = 1; +for pp = 1:length(theParticipants) + for dd = 1:length(theDiameters) + for ss = 1:length(theSessions) + for hh = 1:length(theSplits) + checkSessionDate = []; + MOCSFileTimes = []; + QUESTFileTimes = []; + for mm = 1:length(theMethods) + + % Store info for what we are analyzing in this run + theMethod{tableRow,1} = theMethods{mm}; + theSubject{tableRow,1} = theParticipants{pp}; + theDiameter(tableRow,1) = theDiameters(dd); + theSession(tableRow,1) = theSessions(ss); + theSplit{tableRow,1} = theSplits{hh}; + + % If just checking, break and don't do anything else + fprintf('\tLUT correcting trial data\n'); + + % Clear out variables from previous time through + % the loop. + clear log_intensity_nominal lin_intensity_nominal lin_intensity_rounded log_intensity_nominal log_intensity_rounded log_intensity_rounded_chk + clear lut_row_index dac_intensity_lut lin_intensity_lut + % Do the lookup table conversion + fprintf('\tEntering LUT correction loop over trials ... ') + for i = 1:size(all_trials_unpacked{pp,dd,ss,hh,mm},1) + % Dir for this output + pathToAnalysis = fullfile(analysisDir,outputVariant,theSubject{tableRow},['Session' num2str(theSession(tableRow))],['Size' num2str(theDiameter(tableRow))],theMethod{tableRow},theSplit{tableRow}); + if (~exist(pathToAnalysis,"dir")) + mkdir(pathToAnalysis); + end + + % Get the nominal log intensity from the data + log_intensity_nominal(i) = all_trials_unpacked{pp,dd,ss,hh,mm}(i,1); + + % Convert to nominal lin intensity + lin_intensity_nominal(i) = 10.^log_intensity_nominal(i); + + % Log10 trial values less than 3 get rounded + % down to 0 intensity. Deal with this. + if (log_intensity_nominal(i) < log0TrialThreshold) + log_intensity_nominal(i) = log0Value; + lin_intensity_nominal(i) = 0; + end + + % Round linear intensity nominal. Should match a row first column of lut + lin_intensity_rounded(i) = round(lin_intensity_nominal(i)*1000)/1000; + + % Get the rounded log intensity, which we think is what + % is in the third column of the lut. Except that if + % the linear intensity is 0, then the entry of the + % third column is zero and not -Inf. Handle + % this for the check, and map the -Inf to the + % log value we use for 0 for further analysis. + log_intensity_rounded(i) = log10(lin_intensity_rounded(i)); + log_intensity_rounded_chk(i) = log_intensity_rounded(i); + if (lin_intensity_rounded(i) == 0) + log_intensity_rounded(i) = log0Value; + log_intensity_rounded_chk(i) = 0; + end + + % Compute the row index into the lut based on the linear intensity + lut_row_index(i) = lin_intensity_rounded(i)*1000+1; + + % Get the corrected linear intensity by + % averaging the nominal linear intensities that + % correspond to the DAC value actually used. + % Since we don't know precisely what the AOM + % does in this case, since the LUT was created + % by interpolation of measurements that were + % made long ago, that seems as clever as + % anything else I can think of. One could also + % use the min or the max of the ambiguous set + % of values, or any weighted average. + dac_intensity_lut(i) = AOM.green_AOM_lut(lut_row_index(i),4); + lutRowIndex = find(AOM.green_AOM_lut(:,4) == dac_intensity_lut(i) ); + if (isempty(lutRowIndex)) + error('No LUT rows correspond to trial dac intensity'); + end + lin_intensity_lut(i) = mean(AOM.green_AOM_lut(lutRowIndex,1)); + + % Convert to log, taking log10(0) to be the + % log0Value + if (lin_intensity_lut(i) == 0) + log_intensity_lut(i) = log0Value; + else + log_intensity_lut(i) = log10(lin_intensity_lut(i)); + end + + % Sanity checks + if (abs(lin_intensity_rounded(i)-AOM.green_AOM_lut(lut_row_index(i),1)) > 1e-5) + error('Do not understand lut table and/or its indexing'); + end + if (abs(log_intensity_rounded_chk(i)-AOM.green_AOM_lut(lut_row_index(i),3)) > 1e-5) + error('Do not understand lut table and/or its indexing'); + end + + % Tag corrected log and linear intensities into the + % all_trials_unpacked variable that we save + all_trials_unpacked{pp,dd,ss,hh,mm}(i,3) = lin_intensity_lut(i); + all_trials_unpacked{pp,dd,ss,hh,mm}(i,4) = log_intensity_lut(i); + end + fprintf('\tdone\n'); + + % Figure to make sure LUT conversion is basically + % the identity. Uncomment if you want to look at + % this. + h1 = figure('visible',figureVis); subplot(1,2,1); hold on; + plot(lin_intensity_nominal,lin_intensity_lut,'ro','MarkerFaceColor','r'); + axis('square'); axis([0 1 0 1]); + plot([0 1],[0 1],'k:'); + xlabel('Linear intensity nominal'); + ylabel('Linear intensity LUT'); + title({ sprintf('%s, %s, session %d, split %s, size %d',theSubject{tableRow},theMethod{tableRow},theSession(tableRow),theSplit{tableRow},theDiameter(tableRow)) ; ... + ''},'FontSize',10); + subplot(1,2,2); hold on; + plot(lin_intensity_nominal,lin_intensity_lut,'ro','MarkerFaceColor','r'); + axis('square'); + if (theDiameters(dd) == min(theDiameters)) + axis([0 0.1 0 0.1]); + plot([0 0.1],[0 0.1],'k:'); + else + axis([0 0.01 0 0.01]); + plot([0 0.01],[0 0.01],'k:'); + end + xlabel('Linear intensity nominal'); + ylabel('Linear intensity LUT'); + title({ sprintf('%s, %s, session %d, split %s, size %d',theSubject{tableRow},theMethod{tableRow},theSession(tableRow),theSplit{tableRow},theDiameter(tableRow)) ; ... + ''},'FontSize',10); + drawnow; + saveas(h1,fullfile(pathToAnalysis,'quantizationFig.tif'),'tif'); + + h2 = figure('visible',figureVis); hold on; + if (convertToDb) + plot(10*log_intensity_nominal,10*log_intensity_lut,'ro','MarkerFaceColor','r'); + plot([-36 1],[-36 1],'k:'); + xlabel('Intensity nominal (dB)'); + ylabel('Intensity LUT (dB)'); + axis([-36 1 -36 1]); + else + plot(log_intensity_nominal,log_intensity_lut,'ro','MarkerFaceColor','r'); + plot([-3.6 0.1],[-3.6 0.1],'k:'); + xlabel('Log10 intensity nominal'); + ylabel('Log10 intensity LUT'); + axis([-3.6 0.1 -3.6 0.1]); + end + title({ sprintf('%s, %s, session %d, split %s, size %d',theSubject{tableRow},theMethod{tableRow},theSession(tableRow),theSplit{tableRow},theDiameter(tableRow)) ; ... + ''},'FontSize',10); + axis('square'); + drawnow; + saveas(h2,fullfile(pathToAnalysis,'quantizationLogFig.tif'),'tif'); + + % Here is the format of all_trials_unpacked + % + % These values are not adjusted for power measurements. + % they are scaled with the instrument maximum having a + % linear intensity of 1. + % + % Column 1 - nominal log10 intensity + % Column 2 - 1 = seen, 0 = not seen + % Column 3 - lut corrected linear intensity + % Column 4 - lut corrected log intensity + % + % Split the data + % + % This currently does not respect the details of the experimental + % design (e.g. does not split the trials for each MOCS level). + % It's possible this should be moved into CombineTrials.m + fprintf('\tSplitting data as specified\n') + nTrials = size(all_trials_unpacked{pp,dd,ss,hh,mm},1); + shuffleIndex = Shuffle(1:nTrials); + switch (theSplit{tableRow}) + case 'All' + dataIndex = 1:nTrials; + case 'FirstHalf' + dataIndex = shuffleIndex(1:round(nTrials/2)); + case 'SecondHalf' + dataIndex = shuffleIndex(round(nTrials/2)+1:nTrials); + otherwise + error('Unknown split specified'); + end + all_trials_unpacked{pp,dd,ss,hh,mm} = all_trials_unpacked{pp,dd,ss,hh,mm}(dataIndex,:); + nTrials = size(all_trials_unpacked{pp,dd,ss,hh,mm},1); + + % Extract the core data to fit + trial_log_intensities = all_trials_unpacked{pp,dd,ss,hh,mm}(:,4); + trial_responses = all_trials_unpacked{pp,dd,ss,hh,mm}(:,2); + + % Convert to dB? + if (convertToDb) + trial_log_intensities = 10*trial_log_intensities; + log0ValueUse = 10*log0Value; + end + + % Staircase plot. We only do this for full sessions and only for MOCS + % and QUEST, not COMBINED. + % + % We take advantage of the fact that we know the data + % were concatenated run-by-run. Maybe not the most secure coding, but + % OK at least for now. + switch (theSplit{tableRow}) + case 'All' + if (strcmp(theMethod{tableRow},'MOCS') | strcmp(theMethod{tableRow},'QUEST')) + nRuns = 4; + nTrialsPerRun = length(trial_responses)/nRuns; + if (round(nTrialsPerRun) ~= nTrialsPerRun) + error('Do not understand how data were concatenated'); + end + for zz = 1:nRuns + run_log_intensities = trial_log_intensities((zz-1)*nTrialsPerRun+1:zz*nTrialsPerRun); + run_responses = trial_responses((zz-1)*nTrialsPerRun+1:zz*nTrialsPerRun); + hs = figure('Visible',figureVis); hold on + set(gca,'FontName','Helvetica','FontSize',14); + runIndicator = 1:nTrialsPerRun; + plot(runIndicator,run_log_intensities,'r','LineWidth',2); + index = find(run_responses == 1); + plot(runIndicator(index),run_log_intensities(index),'o','MarkerSize',8,'Color',[0.5 1 0.5],'MarkerFaceColor',[0.5 1 0.5]); + index = find(run_responses == 0); + plot(runIndicator(index),run_log_intensities(index),'^','MarkerSize',8,'Color',[0.5 0.5 1],'MarkerFaceColor',[0.5 0.5 1]); + xlabel('Trial number','FontSize',18) + if (convertToDb) + ylabel('Trial intensity (dB)','FontSize',18); + ylim([-36 1]); + else + ylabel('Log trial intensity (au)','FontSize',18); + ylim([-3.6 0.1]); + end + xlim(([0 100])); + title({ sprintf('Subject %s, %s, session %d, split %s',theSubject{tableRow},theMethod{tableRow},theSession(tableRow),theSplit{tableRow}) ; ... + sprintf('Diameter %d arcmin, run %d',theDiameter(tableRow),zz) ; ''},'FontSize',20); + drawnow; + saveas(hs,fullfile(pathToAnalysis,sprintf('staircasePlot_run%d.tif',zz)),'tif'); + title(''); + drawnow; + saveas(hs,fullfile(pathToAnalysis,sprintf('staircasePlotNoTitle_run%d.tif',zz)),'tif'); + end + end + end + + % Set beta for this size + switch (theDiameter(tableRow)) + case 8 + slopeLowerUse(tableRow,1) = slopeLower8; + slopeUpperUse(tableRow,1) = slopeUpper8; + case 43 + slopeLowerUse(tableRow,1) = slopeLower43; + slopeUpperUse(tableRow,1) = slopeUpper43; + otherwise + end + guessUpperUse(tableRow,1) = guessUpper; + lapseUpperUse(tableRow,1) = lapseUpper; + + % Fit the psychometric function. The fitting routine makes + % a plot and we adjust the title here for things the + % fitting routine doesn't know about. + fprintf('\tCalling top level PF fit routine\n'); + [plot_log_intensities,plot_psychometric,corrected_psychometric, ... + log_threshold,corrected_log_threshold,psiParamsValues,h] = FitPsychometricFunction(trial_log_intensities,trial_responses,log0ValueUse,thresholdCriterion, ... + convertToDb,true,figureVis,guessUpperUse(tableRow),lapseUpperUse(tableRow),slopeLowerUse(tableRow),slopeUpperUse(tableRow)); + fprintf('\tDone with top level PF fit\n'); + if (convertToDb) + title({ sprintf('Subject %s, %s, session %d, split %s',theSubject{tableRow},theMethod{tableRow},theSession(tableRow),theSplit{tableRow}) ; ... + sprintf('Diameter %d arcmin, threshold (dB) %0.2f',theDiameter(tableRow),corrected_log_threshold) ; ... + sprintf('Slope %0.1f, guess %0.2f, lapse %0.2f',psiParamsValues(2),psiParamsValues(3),psiParamsValues(4)) ; ''},'FontSize',20); + else + title({ sprintf('Subject %s, %s, session %d, split %s',theSubject{tableRow},theMethod{tableRow},theSession(tableRow),theSplit{tableRow}) ; ... + sprintf('Diameter %d arcmin, log threshold %0.2f',theDiameter(tableRow),corrected_log_threshold) ; ... + sprintf('Slope %0.1f, guess %0.2f, lapse %0.2f',psiParamsValues(2),psiParamsValues(3),psiParamsValues(4)) ; ''},'FontSize',20); + end + drawnow; + saveas(h,fullfile(pathToAnalysis,'psychometricFcn.tif'),'tif'); + + % Estimate guess and lapse rates from the data + index = find(trial_log_intensities == log0ValueUse); + if (~isempty(index)) + guessEstFromData(tableRow,1) = mean(trial_responses(index)); + else + guessEstFromData(tableRow,1) = -1; + end + nTrialsGuessEstFromData(tableRow,1) = length(index); + index = find(trial_log_intensities > corrected_log_threshold + lapseEstIntensityThreshold); + if (~isempty(index)) + lapseEstFromData(tableRow,1) = 1-mean(trial_responses(index)); + else + lapseEstFromData(tableRow,1) = -1; + end + nTrialsLapseEstFromData(tableRow,1) = length(index); + + % Bootstrap the data + fprintf('\tBootstrapping data\n') + nTrialsForBootstrap = length(trial_log_intensities); + bootstrap_log_intensities = zeros(nTrialsForBootstrap,nBootstraps); + bootstrap_responses = zeros(nTrialsForBootstrap,nBootstraps); + switch(theMethod{tableRow}) + case {'QUEST', 'COMBINED'} + % For QUEST and COMBINED, we draw with replacement from all + % the trials except the catch trials, which we draw from separately. + for bb = 1:nBootstraps + temp_boot_intensities = []; + temp_boot_responses = []; + + % Catch trials + uindex = find(trial_log_intensities == log0ValueUse); + utemp_intensities = trial_log_intensities(uindex); + utemp_responses = trial_responses(uindex); + bootIndex = randi(length(uindex),length(uindex),1); + temp_boot_intensities = [temp_boot_intensities ; utemp_intensities(bootIndex)]; + temp_boot_responses = [temp_boot_responses ; utemp_responses(bootIndex)]; + + % Non catch trials + uindex = find(trial_log_intensities ~= log0ValueUse); + utemp_intensities = trial_log_intensities(uindex); + utemp_responses = trial_responses(uindex); + bootIndex = randi(length(uindex),length(uindex),1); + temp_boot_intensities = [temp_boot_intensities ; utemp_intensities(bootIndex)]; + temp_boot_responses = [temp_boot_responses ; utemp_responses(bootIndex)]; + + bootstrap_log_intensities(:,bb) = temp_boot_intensities; + bootstrap_responses(:,bb) = temp_boot_responses; + end + clear lutRowIndex + + case 'MOCS' + % For MOCS, we respect the experimental design + % and bootstrap each stimulus intensity separately. + unique_log_intensities = unique(trial_log_intensities); + for bb = 1:nBootstraps + temp_boot_intensities = []; + temp_boot_responses = []; + for uu = 1:length(unique_log_intensities) + uindex = find(trial_log_intensities == unique_log_intensities(uu)); + utemp_intensities = trial_log_intensities(uindex); + utemp_responses = trial_responses(uindex); + + bootIndex = randi(length(uindex),length(uindex),1); + temp_boot_intensities = [temp_boot_intensities ; utemp_intensities(bootIndex)]; + temp_boot_responses = [temp_boot_responses ; utemp_responses(bootIndex)]; + end + bootstrap_log_intensities(:,bb) = temp_boot_intensities; + bootstrap_responses(:,bb) = temp_boot_responses; + end + otherwise + error('Unknown method specified'); + end + + % Bootstrap the fits and extract CI. Add CI to plot a + % a thick blue horizontal bar. + fprintf('\tFitting bootstrapped data\n'); + for bb = 1:nBootstraps + if (rem(bb,100) == 0) + fprintf('\t\tBootstrap fit %d of %d\n',bb,nBootstraps); + end + [~,~,~,~,boot_corrected_threshold(bb),~,~] = FitPsychometricFunction(bootstrap_log_intensities(:,bb),bootstrap_responses(:,bb),log0ValueUse,thresholdCriterion, ... + convertToDb,false,false,guessUpperUse(tableRow),lapseUpperUse(tableRow),slopeLowerUse(tableRow),slopeUpperUse(tableRow)); + end + fprintf('\tDone fitting bootstrapped data\n'); + boot_quantiles = quantile(boot_corrected_threshold,[0.025 0.5 0.975]); + % figure(h); + plot([boot_quantiles(1) boot_quantiles(3)],[thresholdCriterion thresholdCriterion],'b','LineWidth',4); + saveas(h,fullfile(pathToAnalysis,'psychometricFcnCI.tif'),'tif'); + + % Save a version without our informative title, for + % paper figures + title(''); + saveas(h,fullfile(pathToAnalysis,'psychometricFcnCINoTitle.tif'),'tif'); + + % Save what we learned + fprintf('\tSaving ... '); + this_all_trials_unpacked = all_trials_unpacked{pp,dd,ss,hh,mm}; + save(fullfile(pathToAnalysis,'fitOutput.mat'),'this_all_trials_unpacked','plot_log_intensities','plot_psychometric', ... + 'corrected_psychometric','log_threshold','corrected_log_threshold','psiParamsValues','boot_corrected_threshold','boot_quantiles','-v7.3'); + fprintf('done\n'); + + % Accumulate data + theTableLogThreshold(tableRow,1) = log_threshold; + theTableCorrectedLogThreshold(tableRow,1) = corrected_log_threshold; + theTableCorrectedLogThresholdBootMedian(tableRow,1)= boot_quantiles(2); + theTableCorrectedBootCILow(tableRow,1) = boot_quantiles(1); + theTableCorrectedBootCIHigh(tableRow,1) = boot_quantiles(3); + theTablePsiParamsValues(tableRow,:) = psiParamsValues; + theTableThresholdCriterion(tableRow,1) = thresholdCriterion; + + % Clear and close + clear bootstrap_log_intensities bootstrap_responses boot_corrected_threshold boot_quantiles + clear dac_intensity_lut lin_intensity_lut lin_intensity_nominal lin_intensity_rounded + clear log_intensity_lut log_intensity_nominal log_intensity_rounded log_intensity_rounded_chk lut_row_index + clear plot_log_intensities plot_psychometric shuffleIndex + clear temp_boot_responses temp_boot_responses trial_log_intensities trial_responses + + % Bump table row + tableRow = tableRow + 1; + end + end + end + end + + % Close figures this subject + close all hidden; + + % Could make summary figures for this subject here + +end + +% Write out full analysis data table +fprintf('\nWriting xlsx file ... ') +if (convertToDb) + tableVariableNames = {'Subject','Diameter','Session','Method','Split','Threshold (dB)','Corrected Threshold (dB)','CI Low','CI High', 'Alpha','Beta','Guess','Lapse', ... + 'Criterion','SlopeLimitLow','SlopeLimitHigh','guessLimit','lapseLimit','guessEstFromData','nTrialsGuessEstFromData','lapseEstFromData','nTrialsLapseEstFromData'}; + thresholdPlaces = 1; +else + tableVariableNames = {'Subject','Diameter','Session','Method','Split','Log10 Threshold','Corrected Log10 Threshold','CI Low','CI High', 'Alpha','Beta','Guess','Lapse', ... + 'Criterion','SlopeLimitLow','SlopeLimitHigh','guessLimit','lapseLimit','guessEstFromData','nTrialsGuessEstFromData','lapseEstFromData','nTrialsLapseEstFromData'}; + thresholdPlaces = 2; +end +dataTable = table(theSubject,theDiameter,theSession,theMethod,theSplit, ... + round(theTableLogThreshold,thresholdPlaces),round(theTableCorrectedLogThreshold,thresholdPlaces),round(theTableCorrectedBootCILow,thresholdPlaces), round(theTableCorrectedBootCIHigh,thresholdPlaces), ... + round(theTablePsiParamsValues(:,1),2),round(theTablePsiParamsValues(:,2),2), round(theTablePsiParamsValues(:,3),3), round(theTablePsiParamsValues(:,4),3), ... + theTableThresholdCriterion, ... + slopeLowerUse,slopeUpperUse,guessUpperUse,lapseUpperUse, ... + round(guessEstFromData,3), nTrialsGuessEstFromData,round(lapseEstFromData,3),nTrialsLapseEstFromData, ... + 'VariableNames',tableVariableNames); +writetable(dataTable,fullfile(analysisDir,outputVariant,'fitTable.xlsx'),'FileType','spreadsheet'); +fprintf('done\n'); + diff --git a/code/SummarizePFSlopes.m b/code/SummarizePFSlopes.m new file mode 100644 index 0000000..c4f4e7e --- /dev/null +++ b/code/SummarizePFSlopes.m @@ -0,0 +1,121 @@ + +%% Clear +clear; close all; + +%% Output variant +outputVariant = 'SlopeFree'; + +%% Parameters +axisFontSize = 14; +labelFontSize = 18; +titleFontSize = 20; +legendFontSize = 14; +markerSize = 12; + +%% Path to analysis output dir +analysisDir = getpref('AOMicroRepeat','analysisDir'); + +%% Read table of fit information +wState = warning('off','MATLAB:table:ModifiedAndSavedVarnames'); +dataTable = readtable(fullfile(analysisDir,outputVariant,'fitTable.xlsx'),'FileType','spreadsheet'); %,'VariableNamingRule','preserve'); +warning(wState); + +%% Get all MOCS beta for full sessions at 8 arcmin +theDiameter = 8; +theSubjects = unique(dataTable.Subject); +theDiameters = unique(dataTable.Diameter); +theSessions = unique(dataTable.Session); +for pp = 1:length(theSubjects) + for dd = find(theDiameters == theDiameter) + for ss = 1:length(theSessions) + index = strcmp(dataTable.Subject,theSubjects{pp}) & (dataTable.Diameter == theDiameters(dd)) & (dataTable.Session == theSessions(ss) & ... + strcmp(dataTable.Method,'MOCS') & strcmp(dataTable.Split,'All')); + betaMOCS(pp,dd,ss) = dataTable.Beta(index); + lapseFromFitMOCS(pp,dd,ss) = dataTable.Lapse(index); + lapseFromDataMOCS(pp,dd,ss) = dataTable.lapseEstFromData(index); + lapseNTrialsFromDataMOCS(pp,dd,ss) = dataTable.nTrialsLapseEstFromData(index); + guessFromFitMOCS(pp,dd,ss) = dataTable.Guess(index); + guessFromDataMOCS(pp,dd,ss) = dataTable.guessEstFromData(index); + guessNTrialsFromDataMOCS(pp,dd,ss) = dataTable.nTrialsGuessEstFromData(index); + end + end +end +fprintf('MOCS, size %d, mean beta %0.2f, stdev %0.3f, lapse from fit %0.3f, lapse from data %0.3f (%0.1f), guess from fit %0.3f, guess from data %0.3f (%0.1f)\n', ... + theDiameter,mean(betaMOCS(:)),std(betaMOCS(:)), ... + mean(lapseFromFitMOCS(:)),mean(lapseFromDataMOCS(:)),mean(lapseNTrialsFromDataMOCS(:)), ... + mean(guessFromFitMOCS(:)),mean(guessFromDataMOCS(:)),mean(guessNTrialsFromDataMOCS(:))); + +%% Get all QUEST beta for full sessions at 8 arcmin +theSubjects = unique(dataTable.Subject); +theDiameters = unique(dataTable.Diameter); +theSessions = unique(dataTable.Session); +for pp = 1:length(theSubjects) + for dd = find(theDiameters == theDiameter) + for ss = 1:length(theSessions) + index = strcmp(dataTable.Subject,theSubjects{pp}) & (dataTable.Diameter == theDiameters(dd)) & (dataTable.Session == theSessions(ss) & ... + strcmp(dataTable.Method,'QUEST') & strcmp(dataTable.Split,'All')); + betaQUEST(pp,dd,ss) = dataTable.Beta(index); + lapseFromFitQUEST(pp,dd,ss) = dataTable.Lapse(index); + lapseFromDataQUEST(pp,dd,ss) = dataTable.lapseEstFromData(index); + lapseNTrialsFromDataQUEST(pp,dd,ss) = dataTable.nTrialsLapseEstFromData(index); + guessFromFitQUEST(pp,dd,ss) = dataTable.Guess(index); + guessFromDataQUEST(pp,dd,ss) = dataTable.guessEstFromData(index); + guessNTrialsFromDataQUEST(pp,dd,ss) = dataTable.nTrialsGuessEstFromData(index); + end + end +end +index = find(lapseNTrialsFromDataQUEST(:) >= 10); +fprintf('QUEST, size %d, mean beta %0.2f, stdev %0.3f, lapse from fit %0.3f, lapse from data %0.3f (%0.1f), guess from fit %0.3f, guess from data %0.3f (%0.1f)\n', ... + theDiameter,mean(betaQUEST(:)),std(betaQUEST(:)), ... + mean(lapseFromFitQUEST(:)),mean(lapseFromDataQUEST(index)),mean(lapseNTrialsFromDataQUEST(index)), ... + mean(guessFromFitQUEST(:)),mean(guessFromDataQUEST(:)),mean(guessNTrialsFromDataQUEST(:))); + +%% Get all MOCS beta for full sessions at 43 arcmin +theDiameter = 43; +theSubjects = unique(dataTable.Subject); +theDiameters = unique(dataTable.Diameter); +theSessions = unique(dataTable.Session); +for pp = 1:length(theSubjects) + for dd = find(theDiameters == theDiameter) + for ss = 1:length(theSessions) + index = strcmp(dataTable.Subject,theSubjects{pp}) & (dataTable.Diameter == theDiameters(dd)) & (dataTable.Session == theSessions(ss) & ... + strcmp(dataTable.Method,'MOCS') & strcmp(dataTable.Split,'All')); + betaMOCS(pp,dd,ss) = dataTable.Beta(index); + lapseFromFitMOCS(pp,dd,ss) = dataTable.Lapse(index); + lapseFromDataMOCS(pp,dd,ss) = dataTable.lapseEstFromData(index); + lapseNTrialsFromDataMOCS(pp,dd,ss) = dataTable.nTrialsLapseEstFromData(index); + guessFromFitMOCS(pp,dd,ss) = dataTable.Guess(index); + guessFromDataMOCS(pp,dd,ss) = dataTable.guessEstFromData(index); + guessNTrialsFromDataMOCS(pp,dd,ss) = dataTable.nTrialsGuessEstFromData(index); + end + end +end +fprintf('MOCS, size %d, mean beta %0.2f, stdev %0.3f, lapse from fit %0.3f, lapse from data %0.3f (%0.1f), guess from fit %0.3f, guess from data %0.3f (%0.1f)\n', ... + theDiameter,mean(betaMOCS(:)),std(betaMOCS(:)), ... + mean(lapseFromFitMOCS(:)),mean(lapseFromDataMOCS(:)),mean(lapseNTrialsFromDataMOCS(:)), ... + mean(guessFromFitMOCS(:)),mean(guessFromDataMOCS(:)),mean(guessNTrialsFromDataMOCS(:))); + +%% Get all QUEST beta for full sessions at 43 arcmin +theSubjects = unique(dataTable.Subject); +theDiameters = unique(dataTable.Diameter); +theSessions = unique(dataTable.Session); +for pp = 1:length(theSubjects) + for dd = find(theDiameters == theDiameter) + for ss = 1:length(theSessions) + index = strcmp(dataTable.Subject,theSubjects{pp}) & (dataTable.Diameter == theDiameters(dd)) & (dataTable.Session == theSessions(ss) & ... + strcmp(dataTable.Method,'QUEST') & strcmp(dataTable.Split,'All')); + betaQUEST(pp,dd,ss) = dataTable.Beta(index); + lapseFromFitQUEST(pp,dd,ss) = dataTable.Lapse(index); + lapseFromDataQUEST(pp,dd,ss) = dataTable.lapseEstFromData(index); + lapseNTrialsFromDataQUEST(pp,dd,ss) = dataTable.nTrialsLapseEstFromData(index); + guessFromFitQUEST(pp,dd,ss) = dataTable.Guess(index); + guessFromDataQUEST(pp,dd,ss) = dataTable.guessEstFromData(index); + guessNTrialsFromDataQUEST(pp,dd,ss) = dataTable.nTrialsGuessEstFromData(index); + end + end +end +index = find(lapseNTrialsFromDataQUEST(:) >= 10); +fprintf('QUEST, size %d, mean beta %0.2f, stdev %0.3f, lapse from fit %0.3f, lapse from data %0.3f (%0.1f), guess from fit %0.3f, guess from data %0.3f (%0.1f)\n', ... + theDiameter,mean(betaQUEST(:)),std(betaQUEST(:)), ... + mean(lapseFromFitQUEST(:)),mean(lapseFromDataQUEST(index)),mean(lapseNTrialsFromDataQUEST(index)), ... + mean(guessFromFitQUEST(:)),mean(guessFromDataQUEST(:)),mean(guessNTrialsFromDataQUEST(:))); diff --git a/code/TerminalSnippets.m b/code/TerminalSnippets.m new file mode 100644 index 0000000..aff24bb --- /dev/null +++ b/code/TerminalSnippets.m @@ -0,0 +1,16 @@ +% Terminal snippets to open various psychometric functions + +cd /Users/dhb/Aguirre-Brainard\ Lab\ Dropbox/David\ Brainard/AOPY_analysis/AOMicroRepeat +cd SlopeFree +cd SlopeFixed + +open */*/Size43/COMBINED/All/psycho*CI.tif + +open */*/*/MOCS/All/psycho*CI.tif +open */*/Size8/MOCS/All/psycho*CI.tif +open */*/Size8/QUEST/All/psycho*CI.tif +open */*/Size43/MOCS/All/psycho*CI.tif +open */*/Size43/QUEST/All/psycho*CI.tif + +open */*/Size8/COMBINED/All/psycho*CI.tif */*/Size43/COMBINED/All/psycho*CI.tif + diff --git a/code/blandAltmanPlots/BlandAltman.m b/code/blandAltmanPlots/BlandAltman.m new file mode 100644 index 0000000..1c70c0a --- /dev/null +++ b/code/blandAltmanPlots/BlandAltman.m @@ -0,0 +1,870 @@ +%BlandAltman Draws a Bland-Altman and correlation graph for two datasets. +% +% BlandAltman(Data1, Data2) - Data1 and Data2 have to be of the same size +% and can be grouped for display purposes. 3rd dimension is encoded by +% colors and 2nd dimension by symbols. The 1st dimension contains +% measurements within the groups. +% +% BlandAltman(Data1,Data2,Label) - Names of data sets. Formats can be +% - {'Name1'} +% - {'Name1, 'Name2'} +% - {'Name1, 'Name2', 'Units'} +% +% BlandAltman(Data1,Data2,Label,Tit) - Specifies the title to display at +% the top of the figure. +% +% BlandAltman(Data1,Data2,Label,Tit,GNames) - Specifies the names of the +% groups for the legend. +% +% BlandAltman(Fig, ...) - specify a figure handle in which to display +% figure in which the Bland-Altman and correlation will be displayed. +% +% BlandAltman(ah, ...) - specify an axes which will be replaced by the +% Bland-Altman and correlation axes. +% +% rpc = BlandAltman(...) - return the coefficient of reproducibility +% (1.96 times the standard deviation of the differnces). +% +% [rpc fig] = BlandAltman(...) - also return the figure handles. +% +% [rpc fig sstruct] = BlandAltman(...) - also return the structure of +% statistics for the analysis. +% +% BlandAltman(..., GNames, Parameter, Value) - call with parameter and +% value pairs using the following parameters: +% +% 'corrInfo' - specifies what information to display on the correlation +% plot as a cell of string in order of top to bottom. The following codes +% are available: +% - 'eq' - slope and intercept equation +% - 'r' - Pearson r-value +% - 'r2' - Pearson r-value squared +% - 'p' - Pearson correlation p-value +% - 'rho' - Spearman rho value +% - 'rho (p)' - Spearman rho value and p-value +% - 'R' - Coefficient of correlation. See R2. +% - 'R2' - Coefficient of determination. Equal to coefficient of +% correlation (R) squared. As opposed to Pearson correlation, which +% is used to qualify agreement between datasets, coefficient of +% determination is used to qualify a model (linear regression of +% the correlation plot) fit. +% - 'SSE' - sum of squared error for the linear regression fit (note +% that this is not the same as SSE for the Bland-Altman analysis) +% - 'RMSE' - root mean squared error for the linear regression fit +% (note that this is not the same as RMSE for the Bland-Altman +% analysis) +% - 'n' - number of data points used +% {default = {'eq';'r2';'SSE';'n'} } +% +% 'baInfo' - specifies what information to display on the Bland-Altman plot +% similar to corrInfo, but with the following codes: +% - 'SD' - standard-deviation of the differences +% - 'RPC' - reproducibility coefficient (1.96*SD) +% - 'LOA' - limits of agreement (1.96*SD) - same as RPC but different labelling +% - 'RPC(%)' - reproducibility coefficient and % of values +% - 'LOA(%)' - limits of agreement and % of values +% - 'CV' - coefficient of variation (SD of mean values in %) +% - 'IQR' - interquartile range. +% - 'RPCnp' - RPC estimate based on IQR (non-parametric statistics) where +% RPCnp = 1.45*IQR ~ RPC (if distribution of differences is +% normal). +% See: Peck, Olsen and Devore, Introduction to Statistics and +% Data Analysis. Nelson Education, 2011. +% - 'ks' - Kolmogorov-Smirnov test that difference-data is Gaussian +% - 'kurtosis' - Kurtosis test that difference-data is Gaussian +% - 'skewness' - skewness test results +% - 'SSE' - sum of squared of the differences (note that this is not +% the same as SSE for the correlation analysis) +% - 'RMSE' - root mean squared of the differences (note that this is +% not the same as RMSE for the correlation analysis) +% {default = {'RPC(%)';'CV'} } +% +% 'axesLimits' - specifies the axes limits: +% - scalar - lower limit (eg. 0) +% - [min max] - specifies minimum and maximum +% - 'tight' - minimum and maximum of data. +% - 'auto' - plot default. {default} +% - 'auto0 - same as auto with 0,0 minma. +% +% 'colors' - specify the order of group colors. (eg. 'brg' for blue, +% red, green) or RGB columns. {default = 'rbgmcky'} +% +% 'symbols' - specify the order of symbols. (eg. 'sod.' for squares, +% circles, diamonds, dots). Alternatively can be set to 'Num' to display +% the subject number. {default = 'sodp^v'}; +% +% 'markerSize' - set the size of the symbols on the plot (or font size +% if using 'Num' mode for symbols. {default is 4} +% +% 'data1Mode' - how to treat data set 1: +% - 'Compare' - data sets 1 and 2 are being compared. Means of data1 +% and data2 are used for x-coordinates on Bland-Altman. {default} +% - 'Truth' - data set 1 is considered a true reference by which data2 +% is being evaluated. Data 1 values are used for x-coordinates on +% Bland-Altman. +% +% 'forceZeroIntercept' - force the y-intercept of the linear fit on the +% correlation analysis to zero. {default is 'off'} +% +% 'showFitCI' - show fit line confidence intervals on correlation plot. +% {default is 'off'}; +% +% 'diffValueMode' - Units for differences: +% - 'Absolute' - same units as the data {default} +% - 'relative' - differences are normalized to the reference data +% (mean or data 1 depending on dataOneMode option). +% - 'percent' - same as relative, but in percent units. +% +% 'baYLimMode' - Mode for setting y-lim on BA axes. +% - 'Auto' - Automatically fit to the data. +% - 'Square' - Preserve 1:1 aspect ratio with x-axis and 0 is +% centered. {default} +% - [min max] - specifies minimum and maximum +% +% 'baStatsMode' - Statistical analysis mode for Bland-Altman (differnces). +% - 'Normal' - normal (Gaussian) distributed statistics +% - 'Gaussian' - same as 'Normal'. +% - 'Non-parametric' - non-parametric statistics. +% * NOTE: Gaussian distribution is tested using the Kolmogorov- +% Smirnov test. If the data seems to violate the assumption of +% distribution type, a warning message is generated. +% +% 'legend' - show legend. +% - 'On' - show legend {default} +% - 'Off' - don't display the legend. +% +% See also correlationPlot + +% By Ran Klein, The Ottawa Hospital, Department of Nuclear Medicine + +% 2013-02-20 RK Added unit labeling in SSE and RPC labels. +% 2013-02-20 RK Switched to equal scaling on BA y axis. +% 2014-01-15 RK Added colors and symbols input arguments. +% 2016-08-10 RK Major overhaul with addition of parameter-value pairs to +% support the following new features: data1Mode, +% forceZeroIntercept, showFitCI, diffValueMode, baYlimMode, +% baStatsMode. +% 2017-05-05 RK Added support for data consisting of nan values. +% 2017-09-05 RK Seperated out corelationPlot function so that can be +% called independently of BA (unsupprted feature for now). +% Correction of baYLimMode not working bug and improved +% labelling in absence of units. +% 2018-12-12 RK Formally added correlationPlot function to only plot the +% correlation plot without the Bland-Altman. +% Text is moved to place if axes limits change. +% 2019-08-23 RK Replaced ranksum with singnrank as data is paired +% (feedback from Smilla on Mathworks Community credited). +% 2024-01-15 RK Added baYLimMode = [min max] - specifies minimum and +% maximum of the Bland-Altman y-axis. +% 2024-01-17 RK Added SSE and RMSE for differences on the Bland-Altman +% plots. +% 2024-01-17 RK Added coefficient of determination and coefficient of +% correlation (R2 and R respectively) to correlation +% analysis. + +function [rpc, fig, stats] = BlandAltman(varargin) + +[fig, data, params] = ParseInputArguments(varargin{:}); +[cAH, baAH, fig, drawAreaPos] = ConfigAxes(fig, params.processCorrelationOnly); + +% Correlation plot +stats = CalcCorrelationStats(data, params); +PlotCorrelation(cAH, data, params); +params = FormatPlotAxes(cAH, data, params); +DisplayCorrelationStats(cAH, params, stats); + +addTitle(cAH, baAH, drawAreaPos, params); +if strcmpi(params.Legend,'On') + addLegend(cAH, drawAreaPos, params); +end + +if strcmpi(params.processCorrelationOnly,'On') + rpc = []; + return +end + +% Bland-Altman plot of differences plot +[stats, data, params] = CalcBAStats(stats, data, params); +params = PlotBA(baAH, data, stats, params); +DisplayBAStats(baAH, params, stats) + +rpc = stats.rpc; + +%% Helper functions + +function [fig, data, params] = ParseInputArguments(varargin) + +if nargin<1 + msgID='BlandAltman:narginchk:notEnoughInputs'; + error(msgID,'No inputs specified. At least two equal sized datasets are required.') +end + +% If 1st parameter is a figure/axes handle than all other parameters are +% shifted by one. +if isscalar(varargin{1}) && numel(varargin{1})==1 && ishandle(varargin{1}) + shift = 1; + fig = varargin{1}; +else % no figure/handle is specified. A new figure will be created. + shift = 0; + fig = []; +end + +% followed by two data sets of equal size +if nargin=shift+3 + label = varargin{shift+3}; +else + label = ''; +end +if nargin>=shift+4 + params.tit = varargin{shift+4}; +else + params.tit = ''; +end +if nargin>=shift+5 + params.gnames = varargin{shift+5}; +else + params.gnames = ''; +end + +% default values +params.corrInfo = {'eq';'r2';'SSE';'n'}; +params.baInfo = {'RPC(%)';'CV'}; +params.defaultBaInfo = true; +params.axesLimits = 'auto'; +params.colors = 'brgmcky'; +params.symbols = 'sodp^v'; +params.markerSize = 4; +params.data1TreatmentMode = 'Compare'; +params.forceZeroIntercept = 'off'; +params.showFitCI = 'off'; +params.baYLimMode = 'Squared'; +params.baStatsMode = 'Normal'; +params.diffValueMode = 'Absolute'; +params.processCorrelationOnly = 'Off'; +params.Legend = 'On'; +axesLimitsSpecified = false; + +% parse parameter value pair options +i = shift+6; +while length(varargin)>i + parameter = varargin{i}; + val = varargin{i+1}; + switch upper(parameter) + case 'CORRINFO' + if ischar(val) + params.corrInfo = {val}; + else + params.corrInfo = val; + end + + case 'BAINFO' + if ischar(val) + params.baInfo = {val}; + else + params.baInfo = val; + end + params.defaultBaInfo = false; + case 'AXESLIMITS' + params.axesLimits = val; + axesLimitsSpecified = true; + case 'COLORS', params.colors = val; + case 'SYMBOLS', params.symbols = val; + case 'MARKERSIZE', params.markerSize = val; + case 'DATA1MODE', params.data1TreatmentMode = val; % use the 'Compare' mean of data1 and data2 or 'Truth' data1 + case 'FORCEZEROINTERCEPT', params.forceZeroIntercept = val; + case 'SHOWFITCI', params.showFitCI = val; + case 'BASTATSMODE', params.baStatsMode = val; + case 'DIFFVALUEMODE', params.diffValueMode = val; + case 'BAYLIMMODE', params.baYLimMode = val; + case 'PROCESSCORRELATIONONLY', params.processCorrelationOnly = val; + case 'LEGEND', params.Legend = val; + otherwise + msgID='BlandAltman:narginchk:unknownParameterName'; + error(msgID, ['Unknown parameter name ''' parameter ''' encountered' ]) + + end % of swich statement + i = i+2; +end + +% Default axes mode for a correlation plot only does not assume equal x and +% y axis limits +if ~axesLimitsSpecified && strcmpi(params.processCorrelationOnly, 'On') + params.axesLimits = 'tightNonsquare'; +end + +switch length(s) + case 1 + s = [s 1 1]; + case 2 + s = [s 1]; + case 3 + otherwise + msgID='BlandAltman:narginchk:dataDimensionTooLarge'; + error(msgID, 'Data have too many dimension. Only 1 to 3 dimensions supported.'); +end + +% reformat data as an array of elements and store grouping number +params.numElementsPerGroup = s(1); % number of elements in each group +params.numGroups = numel(data.set1)/params.numElementsPerGroup; +params.numGroupsBySymbol = s(2); +params.numGroupsByColor = s(3); + +if ~ischar(params.colors) + if size(params.colors,2)~=3 + if size(params.colors,1)==3 + params.colors = params.colors'; + else + msgID='BlandAltman:narginchk:unsupportedColorCode'; + error(msgID, 'Cannot interpret color codes. Colors must be specified in either character codes or RGB'); + end + end +elseif size(params.colors,1)==1 + params.colors = params.colors'; +end +if size(params.colors,1)=0 + corrtext = [corrtext; ['y=' mynum2str(stats.slope,3,2) 'x+' mynum2str(stats.intercept,3)]]; + else + corrtext = [corrtext; ['y=' mynum2str(stats.slope,3,2) 'x' mynum2str(stats.intercept,3)]]; + end + case 'R2' + if strcmp(params.corrInfo{i},'r2') + corrtext = [corrtext; ['r^2=' mynum2str(stats.r2,4)]]; + else + corrtext = [corrtext; ['R^2=' mynum2str(stats.R2,4)]]; + end + case 'R' + if strcmp(params.corrInfo{i},'r') + corrtext = [corrtext; ['r=' mynum2str(stats.r,4)]]; + else + corrtext = [corrtext; ['r=' mynum2str(stats.R,4)]]; + end + case 'P', corrtext = [corrtext; ['p=' num2str(stats.corrP,2)]]; + case 'RHO', corrtext = [corrtext; ['rho=' mynum2str(stats.rho,4,4)]]; + case 'RHO (P)', corrtext = [corrtext; ['rho=' mynum2str(stats.rho,4,4) ' (p=' mynum2str(stats.rhoP,2) ')']]; + case 'SSE', corrtext = [corrtext; ['SSE=' mynum2str(stats.linearRegressionSSE,2) params.unitsStr]]; + case 'RMSE', corrtext = [corrtext; ['RMSE=' mynum2str(stats.linearRegressionRMSE,2) params.unitsStr]]; + case 'N', corrtext = [corrtext; ['n=' mynum2str(stats.N,4,0)]]; + end +end +text(params.axesLimits(1),params.axesLimits(4),... + corrtext,'HorizontalAlignment','left','VerticalAlignment','top','parent',cAH,'tag','CorrInfoText'); +addlistener(cAH.XRuler, 'MarkedClean', @(src,event)updateCorrAxesCallback(src,event) ); + +%% Callback function to keep results in place if the Correlation plot axis limits change +function updateCorrAxesCallback(ah, ~) +ah = get(ah, 'Parent'); +a = axis(ah); +ch = get(ah,'children'); +set(findobj(ch, 'flat', 'tag','CorrInfoText'),'Position', [a(1) a(4)]); + + + +%% Calculate statistics for BA analysis +function [stats, data, params] = CalcBAStats(stats, data, params) + +if strcmpi(params.data1TreatmentMode,'Compare') + data.maskedBaRefData = mean([data.maskedSet1,data.maskedSet2],2); + data.baRefData = mean([data.set1,data.set2],2); +else + data.maskedBaRefData = data.maskedSet1; % previous version was calaculated as RPC/mean of data. + data.baRefData = data.set1; +end +switch upper(params.diffValueMode) + case 'ABSOLUTE' + data.maskedDifferences = data.maskedSet2-data.maskedSet1; + data.differences = data.set2-data.set1; + case 'RELATIVE' + data.maskedDifferences = (data.maskedSet2-data.maskedSet1) ./ data.maskedBaRefData; + data.differences = (data.set2-data.set1) ./ data.baRefData; + case 'PERCENT' + data.maskedDifferences = (data.maskedSet2-data.maskedSet1) ./ data.maskedBaRefData*100; + data.differences = (data.set2-data.set1) ./ data.baRefData*100; +end +stats.differenceSTD = std(data.maskedDifferences); +stats.differenceMean = mean(data.maskedDifferences); +stats.differenceMedian = median(data.maskedDifferences); +[~, stats.differenceMeanP] = ttest(data.maskedDifferences,0); +stats.differenceMedianP = signrank(data.set1,data.set2); +stats.differenceSSE = sum(data.maskedDifferences.^2); % added new SSE - this one is for the differences, as opposed to those of the linear regression in the correlation plot +stats.differenceRMSE = sqrt(stats.differenceSSE/numel(data.maskedDifferences)); % likewise for RMSE +stats.rpc = 1.96*stats.differenceSTD; +stats.CV = 100*stats.differenceSTD/mean((data.maskedSet1+data.maskedSet2)/2); +stats.rpcPercent = 1.96*std(data.maskedDifferences ./ data.maskedBaRefData)*100; % previous version was calaculated as RPC/mean of data. +stats.IQR = iqr(data.maskedDifferences); +stats.rpcNP = stats.IQR * 1.45; % estimate of RPC if distribution was Gaussian: see: R. Peck, C. Olsen, and J. Devore, Introduction to Statistics and Data Analysis. Nelson Education, 2011. + +if stats.differenceSTD 0.05; diff --git a/code/blandAltmanPlots/BlandAltmanDemo.m b/code/blandAltmanPlots/BlandAltmanDemo.m new file mode 100644 index 0000000..c0b330a --- /dev/null +++ b/code/blandAltmanPlots/BlandAltmanDemo.m @@ -0,0 +1,131 @@ +% BlandAltmanDemo - demonstrate calling BlandAltman.m +% +% By Ran Klein +% 2016-08-10 RK Major overhull of Bland-Altman.m and new functionality is +% demonstrated. +% 2018-12-12 RK Added support for correlation plot only +% (CorrelationPlot.m). +% Examples added for multiple analyses on a single figure. +% 2024-01-17 RK Added example 4 for creating a figure with multiple +% Bland-Altman plots + + + +%% Generate paramteres for data generation +clear; +noise = 10/100; % percent +npatients = 30; % number of patients +bias = 1; % slope bias + +% Heart muscle territories per patient +territories = {'LAD','LCx','RCA'}; +nterritories = length(territories); + +% Patient states during measurement +states = {'Rest','Stress'}; +nstates = length(states); + +% Real flow values +restFlow = 0.7*(1+0.2*randn(npatients,nterritories)); +stressFlow = 2*(1+0.2*randn(npatients,nterritories)); + +% Test support for nan values in the data +if 0 + restFlow(3,1) = nan; +end + +%% Example 1 +% Baseline data with noise +data1 = cat(3, restFlow.*(1+noise*randn(npatients,nterritories)), stressFlow.*(1+noise*randn(npatients,nterritories))); +% Follow-up data with noise and a bias +data2 = bias * cat(3, restFlow.*(1+noise*randn(npatients,nterritories)), stressFlow.*(1+noise*randn(npatients,nterritories))); + +% BA plot parameters +tit = 'Flow Repeatability'; % figure title +gnames = {territories, states}; % names of groups in data {dimension 1 and 2} +label = {'Baseline Flow','Follow-up Flow','mL/min'}; % Names of data sets +corrinfo = {'n','SSE','r2','eq'}; % stats to display of correlation scatter plot +BAinfo = {'RPC(%)','ks'}; % stats to display on Bland-ALtman plot +limits = 'auto'; % how to set the axes limits +if 1 % colors for the data sets may be set as: + colors = 'br'; % character codes +else + colors = [0 0 1;... % or RGB triplets + 1 0 0]; +end + +% Generate figure with symbols +[cr, fig, statsStruct] = BlandAltman(data1, data2,label,tit,gnames,'corrInfo',corrinfo,'baInfo',BAinfo,'axesLimits',limits,'colors',colors, 'showFitCI',' on'); + +% Generate figure with numbers of the data points (patients) and fixed +% Bland-Altman difference data axes limits +BlandAltman(data1, data2,label,[tit ' (numbers, forced 0 intercept, and fixed BA y-axis limits)'],gnames,'corrInfo',corrinfo,'axesLimits',limits,'colors',colors,'symbols','Num','baYLimMode','square','forceZeroIntercept','on') + + +% Generate figure with differences presented as percentages and confidence +% intervals on the correlation plot +BAinfo = {'RPC'}; +BlandAltman(data1, data2,label,[tit ' (show fit confidence intervals and differences as percentages)'],gnames,'diffValueMode','percent', 'showFitCI','on','baInfo',BAinfo) + + +% Display statistical results that were returned from analyses +disp('Statistical results:'); +disp(statsStruct); +% * Note that non-Gaussian warning may result for the first two analysis as +% the two data sets (rest and stress) make the difference data marginally +% Gaussian (two overlapping Gaussians). This should not be the case with +% the third analysis as it is in percent difference of the means. + +%% Example 2 - using non-Gaussian data and one figure with 2 analyses +disp('Analysis with non-Gaussian distribution data') +% Baseline data with non-Gaussian noise +data1 = cat(3, restFlow.*(1+noise*rand(npatients,nterritories)), stressFlow.*(1+noise*rand(npatients,nterritories))); +% Follow-up data with non-Gaussian noise and a bias +data2 = bias * cat(3, restFlow.*(1+noise*rand(npatients,nterritories)), stressFlow.*(1+noise*rand(npatients,nterritories))); + +figure('Color','w','Units','centimeters', 'Position',[2 2 20 20]) +ah1 = subplot(211); +ah2 = subplot(212); + +BAinfo = {'RPC(%)','ks'}; + +[cr, fig, statsStruct] = BlandAltman(ah1, data1, data2,label,[tit ' (inappropriate Gaussian stats)'],gnames,'corrInfo',corrinfo,'baInfo',BAinfo,'axesLimits',limits,'colors',colors); +% A warning should appear indicating detection of non-Gaussian +% distribution. + +% Repeat analysis using non-parametric analysis, no warning should appear. +BAinfo = {'RPCnp','ks'}; +[cr, fig, statsStruct] = BlandAltman(ah2, data1, data2,label,[tit ' (using non-parametric stats)'],gnames,'corrInfo',corrinfo,'baInfo',BAinfo,'axesLimits',limits,'colors',colors,'baStatsMode','non-parametric'); +% Keep in mind that alpha is set to 0.05 so there is a 1/20 chace of false +% warnings. + + +%% Example 3 - Repeat last analysis, but only with correlation +[cr, fig, statsStruct] = correlationPlot(data1, data2,label,[tit ' (using non-parametric stats)'],gnames,'corrInfo',corrinfo,'colors',colors); + +%% Example 4 - Two Bland-Altman plots in one figure +% This is a bit of a hack, as does not (yet) support BlandAltman-only mode, +% so draw on temporary figure and copy only the BA axes elements. +fig = figure('Color','w','Units','centimeters', 'Position',[2 2 20 10]); +ah1 = subplot(121); +ah2 = subplot(122); +tempFig = figure; + +BAinfo = {'RPC(%)','ks','SSE','RMSE'}; +baYLim = [-1 1]; % Set the Y axis (difference) limits on the Bland-Altman plots. + +[~, tempFig] = BlandAltman(tempFig, data1, data2, label, tit, gnames,'corrInfo',corrinfo,'baInfo',BAinfo,'axesLimits',limits,'colors',colors,'baYlimMode',baYLim); +ah = copyobj(findobj(get(tempFig,'children'),'flat','tag','Bland Altman Plot'),fig); +ah.Position = ah1.Position; +delete(ah1) +title(ah,'Baseline') + +% Repeat analysis with more variance on data2. +clf(tempFig); +[~, tempFig] = BlandAltman(tempFig, data1, data2.*(1+0.1*randn(size(data2))), label,[tit ' (with noise)'], gnames,'corrInfo',corrinfo,'baInfo',BAinfo,'axesLimits',limits,'colors',colors,'baYlimMode',baYLim); +ah = copyobj(findobj(get(tempFig,'children'),'flat','tag','Bland Altman Plot'),fig); +ah.Position = ah2.Position; +delete(ah2) +title(ah,'With noise') +close(tempFig) + diff --git a/code/blandAltmanPlots/correlationPlot.m b/code/blandAltmanPlots/correlationPlot.m new file mode 100644 index 0000000..4752067 --- /dev/null +++ b/code/blandAltmanPlots/correlationPlot.m @@ -0,0 +1,12 @@ +% correlationPlot Wrapper function for Balnd-Altman to generate only the +% correlation plot. For more infomation please refer to the help on +% BlandAltman. +% +% See also BlandAltman + +% By Ran Klein, The Ottawa Hospital, Department of Nuclear Medicine + +function [r, fig, stats] = correlationPlot(varargin) +varargin = [varargin, 'ProcessCorrelationOnly','On']; +[~, fig, stats] = BlandAltman(varargin{:}); +r = stats.r; diff --git a/code/blandAltmanPlots/license.txt b/code/blandAltmanPlots/license.txt new file mode 100644 index 0000000..a0d3d34 --- /dev/null +++ b/code/blandAltmanPlots/license.txt @@ -0,0 +1,27 @@ +Copyright (c) 2017, Ran Klein +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution + +* Neither the name of The Ottawa Hospital nor the names of its + contributors may be used to endorse or promote products derived from this + software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/code/blandAltmanPlots/mynum2str.m b/code/blandAltmanPlots/mynum2str.m new file mode 100644 index 0000000..81912bb --- /dev/null +++ b/code/blandAltmanPlots/mynum2str.m @@ -0,0 +1,58 @@ +% str = mynum2str(data, sigfig, maxdec) - converts a number to a string +% +% mynum2str examples (sigfig=3 and maxdec=2) +% 123456.0 --> 123000 +% 123.4560 --> 123 +% 12.34560 --> 12.3 +% 1.234560 --> 1.23 +% 0.1234560 --> 0.12 + +% By Ran Klein 2012??? +% Modified: +% 2013-12-31 RK Added NA result for empty data. + +function str = mynum2str(data, sigfig, maxdec) +if isempty(data) || ~isfinite(data) + str = 'NA'; + return +end + +if nargin<2 + sigfig = 3; % number of significant figures to display (before and after .) +end +if nargin<3 + maxdec = 2; % maximum of decimals (after the .) +end + +if isempty(sigfig) + sigfig = floor(log10(abs(data)))+ 1 + maxdec; +end +e = max(-sigfig+1,floor(log10(abs(data)))); +prec = max(-maxdec,e-sigfig+1); % precision +p = round(data/10^(prec))*10^(prec); +str = num2str(p); +i = find(str=='.'); +% number of figures after the decimal +if isempty(i) + i = length(str)+1; + na = 0; +else + na = length(str)-i; +end +% number of figures befor the decimal +t = str(1:i-1); +if strcmpi(t,'0') + nb = 0; % a preceding zero doesn't count +else + nb = sum(t>='0' & t<='9'); +end + +% number of trailing zeros to add +n = min(sigfig-(na+nb),... + maxdec - na); +if n>0 + if i>length(str) + str = [str '.']; + end + str = [str '0'*ones(1,n)]; +end \ No newline at end of file diff --git a/code/blandAltmanPlots/polyfitZero.m b/code/blandAltmanPlots/polyfitZero.m new file mode 100644 index 0000000..e6732fd --- /dev/null +++ b/code/blandAltmanPlots/polyfitZero.m @@ -0,0 +1,68 @@ +function [p,S,mu] = polyfitZero(x,y,degree) +% POLYFITZERO Fit polynomial to data, forcing y-intercept to zero. +% P = POLYFITZERO(X,Y,N) is similar POLYFIT(X,Y,N) except that the +% y-intercept is forced to zero, i.e. P(N) = 0. In the same way as +% POLYFIT, the coefficients, P(1:N-1), fit the data Y best in the least- +% squares sense. You can also use Y = POLYVAL(PZERO,X) to evaluate the +% polynomial because the output is the same as POLYFIT. +% +% [P,S,MU] = POLYFITZERO() Return structure, S, similar to POLYFIT for use +% with POLYVAL to calculate error estimates of predictions with P. +% +% [P,S,MU] = POLYFITZERO() Scale X by std(X), returns MU = [0,std(X)]. +% +% See also POLYVAL, POLYFIT +% +% Also see POLYFITN by John D'Errico +% + +% Copyright (c) 2013 Mark Mikofski +% Version 1-1, 2013-10-15 +% add delta output +% center and scale +% Version 1-0, 2011-06-29 + +%% check args +% X & Y should be numbers +assert(isnumeric(x) && isnumeric(y),'polyfitZero:notNumeric', ... + 'X and Y must be numeric.') +dim = numel(x); % number of elements in X +% DEGREE should be scalar positive number between 1 & 10 inclusive +assert(isnumeric(degree) && isscalar(degree) && degree>0 && degree<=10, ... + 'polyfitZero:degreeOutOfRange', ... + 'DEGREE must be an integer between 1 and 10.') +% DEGREE must be less than number of elements in X & Y +assert(degree 2 + mu = [0; std(x)]; + x = (x - mu(1))/mu(2); +end +% using pow() is actually as fast or faster than looping, same # of flops! +z = zeros(dim,degree); +for n = 1:degree + z(:,n) = x.^(degree-n+1); +end +p = z\y; % solve +p = [p;0]; % set y-intercept to zero +%% error estimates +% attribution: this is based on code from POLYFIT by The MathWorks Inc. +if nargout > 1 + V = [z,ones(dim,1)]; % append constant term for Vandermonde matrix + % Return upper-triangular factor of QR-decomposition for error estimates + R = triu(qr(V,0)); + r = y - V*p; + S.R = R(1:size(R,2),:); + S.df = max(0,length(y) - (degree+1)); + S.normr = norm(r); +end +p = p'; % polynomial output is row vector by convention +end diff --git a/code/calculate_bland_altman.m b/code/calculate_bland_altman.m new file mode 100644 index 0000000..6fbc3ea --- /dev/null +++ b/code/calculate_bland_altman.m @@ -0,0 +1,7 @@ +%% Function to calculate mean and differences +% +% This is useful for creating Bland-Altman plots +function [mean_val, diff_val] = calculate_bland_altman(x1, x2) + mean_val = (x1 + x2) / 2; + diff_val = x1 - x2; +end \ No newline at end of file diff --git a/code/fitPsychometric.m b/code/fitPsychometric.m new file mode 100644 index 0000000..53e3288 --- /dev/null +++ b/code/fitPsychometric.m @@ -0,0 +1,96 @@ +% fitPsychometric +% +% Wrapper into Palamedes that fits the logistic with parameters reasonable +% for our data. +function [fit_log_intensity,fit_psychometric,corrected_fit_psychometric, ... + log_threshold,corrected_log_threshold,psiParamsValues] = fitPsychometric(log_intensity,num_pos,out_of_num,threshold_criterion,convert_to_db, ... + guessUpper,lapseUpper,slopeLower,slopeUpper) +% +% Usage: +% [fit_log_intensity,fit_psychometric,corrected_fit_psychonmetric, ... +% log_threshold,corrected_log_threshold,psiParamsValues] = fitPsychometric(log_intensity,num_pos,out_of_num,threshold_criterion) +% +% Inputs: +% log_intensity : (vector) log10 stimulus levels tested +% num_pos : (vector) for each stimulus level tested, the number of trials a positive response was given +% out_of_num : (vector) for each stimulus level tested, the total number of trials +% threshold_criterion : [scalar] criterion for threshold +% +% History: +% 07/05/21 amn Wrote it. +% 06/28/25 dhb Adopted for this project + +% Calculate x-axis values to plot +% +% Stimulus values to plot for a smooth fit. +if (convert_to_db) + fit_log_intensity = linspace(-35,0,1000); +else + fit_log_intensity = linspace(-3.5,0,1000); +end + +% Psychometric function form (alternative: PAL_Gumbel). +PF = @PAL_Logistic; + +% 'paramsFree' is a boolean vector that determines what parameters get +% searched over (1: free parameter, 0: fixed parameter). +paramsFree = [1 1 1 1]; + +% Set up starting points: +if (convert_to_db) + searchGrid.alpha = -35:.1:0; + searchGrid.beta = slopeLower:0.1:slopeUpper; +else + searchGrid.alpha = -3.5:.01:0; + searchGrid.beta = slopeLower:1:slopeUpper; +end +searchGrid.gamma = linspace(0,guessUpper,3); +searchGrid.lambda = linspace(0,lapseUpper,6); + +% Set up lapse limits. +guessLimits = [0 guessUpper]; +lapseLimits = [0 lapseUpper]; + +% Set up standard options for Palamedes search. +options = PAL_minimize('options'); + +% Fit with Palemedes Toolbox. +[psiParamsValues] = PAL_PFML_Fit(log_intensity,num_pos,out_of_num,searchGrid,paramsFree,PF, ... + 'lapseLimits',lapseLimits,'guessLimits',guessLimits,'searchOptions',options,'gammaEQlambda',false, ... + 'checkLimits',false); + +% Check whether beta is bigger or smaller than we would like, +% constrain it if so. +if (psiParamsValues(2) > max(searchGrid.beta)) + searchGrid.beta = max(searchGrid.beta); + paramsFree = [1 0 1 1]; + [psiParamsValues] = PAL_PFML_Fit(log_intensity,num_pos,out_of_num,searchGrid,paramsFree,PF, ... + 'lapseLimits',lapseLimits,'guessLimits',guessLimits,'searchOptions',options,'gammaEQlambda',false, ... + 'checkLimits',false); + % disp('Overrun on beta') + % psiParamsValues +elseif (psiParamsValues(2) < min(searchGrid.beta)) + searchGrid.beta = min(searchGrid.beta); + paramsFree = [1 0 1 1]; + [psiParamsValues] = PAL_PFML_Fit(log_intensity,num_pos,out_of_num,searchGrid,paramsFree,PF, ... + 'lapseLimits',lapseLimits,'guessLimits',guessLimits,'searchOptions',options,'gammaEQlambda',false, ... + 'checkLimits',false); + % disp('Underunrun on beta') + % psiParamsValues +end + +% Get fitted curve values. +fit_psychometric = PF(psiParamsValues,fit_log_intensity); + +% Calculate threshold: the difference between the stimulus levels at +% performances equal to 0.7602 and 0.5. +log_threshold = PF(psiParamsValues,threshold_criterion,'inverse'); + +% Now correct for guessing and lapse rate +psiParamsValuesCorrect = psiParamsValues; +psiParamsValuesCorrect(3) = 0; +psiParamsValuesCorrect(4) = 0; +corrected_fit_psychometric = PF(psiParamsValuesCorrect,fit_log_intensity); +corrected_log_threshold = PF(psiParamsValuesCorrect,threshold_criterion,'inverse'); + +end \ No newline at end of file diff --git a/code/green_AOM_LUT_processing.mat b/code/green_AOM_LUT_processing.mat new file mode 100644 index 0000000..a308e5a Binary files /dev/null and b/code/green_AOM_LUT_processing.mat differ diff --git a/configuration/AOCompObserver.json b/configuration/AOCompObserver.json deleted file mode 100644 index c856605..0000000 --- a/configuration/AOCompObserver.json +++ /dev/null @@ -1,28 +0,0 @@ -[ - { - "flavor": "", - "hook": "", - "importance": "", - "localHookTemplate": "", - "name": "ISETBioCSFGenerator", - "pathPlacement": "append", - "subfolder": "", - "toolboxRoot": "", - "type": "include", - "update": "", - "url": "" - }, - { - "flavor": "", - "hook": "", - "importance": "", - "localHookTemplate": "configuration/AOCompObserverLocalHookTemplate.m", - "name": "AOCompObserver", - "pathPlacement": "append", - "subfolder": "", - "toolboxRoot": "", - "type": "git", - "update": "", - "url": "https://github.com/brainardlab/AOCompObserver.git" - } -] diff --git a/configuration/AOMicroRepeat.json b/configuration/AOMicroRepeat.json new file mode 100644 index 0000000..a3f3672 --- /dev/null +++ b/configuration/AOMicroRepeat.json @@ -0,0 +1,80 @@ +[ + { + "flavor": "", + "hook": "", + "importance": "", + "localHookTemplate": "", + "name": "BrainardLabToolbox", + "pathPlacement": "append", + "subfolder": "", + "toolboxRoot": "", + "type": "include", + "update": "", + "url": "" + }, + { + "flavor": "", + "hook": "", + "importance": "", + "localHookTemplate": "", + "name": "Psychtoolbox-3", + "pathPlacement": "append", + "subfolder": "Psychtoolbox", + "toolboxRoot": "", + "type": "include", + "update": "", + "url": "" + }, + { + "flavor": "", + "hook": "", + "importance": "", + "localHookTemplate": "", + "name": "Palamedes", + "pathPlacement": "append", + "subfolder": "", + "toolboxRoot": "", + "type": "include", + "update": "never", + "url": "" + }, + { + "flavor": "", + "hook": "", + "importance": "", + "localHookTemplate": "", + "name": "UnitTestToolbox", + "pathPlacement": "append", + "subfolder": "", + "toolboxRoot": "", + "type": "include", + "update": "", + "url": "" + }, + { + "flavor": "", + "hook": "", + "importance": "", + "localHookTemplate": "", + "name": "ExampleTestToolbox", + "pathPlacement": "append", + "subfolder": "", + "toolboxRoot": "", + "type": "include", + "update": "", + "url": "" + }, + { + "flavor": "", + "hook": "", + "importance": "", + "localHookTemplate": "configuration/AOMicroRepeatLocalHookTemplate.m", + "name": "AOMicroRepeat", + "pathPlacement": "append", + "subfolder": "", + "toolboxRoot": "", + "type": "git", + "update": "", + "url": "https://github.com/brainardlab/AOMicroRepeat.git" + } +] diff --git a/configuration/AOCompObserverLocalHookTemplate.m b/configuration/AOMicroRepeatLocalHookTemplate.m similarity index 53% rename from configuration/AOCompObserverLocalHookTemplate.m rename to configuration/AOMicroRepeatLocalHookTemplate.m index b9d2da8..b059fe8 100644 --- a/configuration/AOCompObserverLocalHookTemplate.m +++ b/configuration/AOMicroRepeatLocalHookTemplate.m @@ -13,8 +13,7 @@ % to match what is true on your computer. %% Say hello -theProject = 'AOCompObserver'; -fprintf('Running %s local hook\n',theProject); +theProject = 'AOMicroRepeat'; %% Remove old preferences if (ispref(theProject)) @@ -27,25 +26,34 @@ projectName = theProject; projectBaseDir = tbLocateProject(theProject); -%% Figure out where baseDir for other kinds of data files is. -sysInfo = GetComputerInfo(); -switch (sysInfo.localHostName) - case 'eagleray' - % DHB's desktop - baseDir = fullfile(filesep,'Volumes','Users1','Dropbox (Aguirre-Brainard Lab)'); +if (exist('GetComputerInfo','file')) + sysInfo = GetComputerInfo(); + switch (sysInfo.localHostName) - otherwise - % Some unspecified machine, try user specific customization - switch(sysInfo.userShortName) - % Could put user specific things in, but at the moment generic - % is good enough. - otherwise - baseDir = fullfile('/Users',sysInfo.userShortName,'Dropbox (Aguirre-Brainard Lab)'); - end + otherwise + % Some unspecified machine, try user specific customization + switch(sysInfo.userShortName) + % Could put user specific things in, but at the moment generic + % is good enough. + otherwise + % Some unspecified machine, try our generic approach, + % which works on a mac to find the dropbox for business + % path. + if ismac + dbJsonConfigFile = '~/.dropbox/info.json'; + fid = fopen(dbJsonConfigFile); + raw = fread(fid,inf); + str = char(raw'); + fclose(fid); + val = jsondecode(str); + baseDir = val.business.path; + end + end + end end %% Set preferences for project i/o - +% % This is where the psychophysical data is stored dataDir = fullfile(baseDir,'AOPY_data',theProject); diff --git a/twoSpot/computeSummation.m b/twoSpot/computeSummation.m deleted file mode 100644 index d46b3fe..0000000 --- a/twoSpot/computeSummation.m +++ /dev/null @@ -1,225 +0,0 @@ -function computeSummation(options) -% Compute summation curve for circular spots. -% -% Description: -% Use ISETBioCSFGenerator to run out a summation curve for circular -% spots. -% - -% History: -% 03/16/21 dhb Wrote it from color threshold example in ISETBioCSFGenerator. - -%% Pick up optional arguments -arguments - options.defocusDiopters (1,1) double = 0.05; - options.pupilDiameterMm (1,1) double = 6; - options.visualizeMosaicResponses (1,1) logical = false; - options.testing (1,1) logical = false; - options.write (1,1) logical = true; - options.verbose (1,1) logical = false; - options.smoothingParam (1,1) double = 0.995; -end - -%% Clear and close -close all; ieInit; - -%% Directory stuff -baseProject = 'AOCompObserver'; -analysisBaseDir = getpref(baseProject,'analysisDir'); - -%% Testing or running out full computations? -if (options.testing) - nPixels = 128; - nTrialsTest = 64; - minTrials = 1024; - diameterList = [0.25 0.5 1 2]/60; -else - nPixels = 256; - nTrialsTest = 32; - minTrials = 5000; - diameterList = logspace(log10(0.15),log10(2),20)/60; -end - -% Ancillary stimulus parameters -% -% Define basic parameters of the AO stimulus -% -% Measured power was 76 nW for 1.5 by 1 degree field. -% Need to adjust for what it would have been for same -% radiance with a different size field. -wls = 400:10:750; -spotWavelengthNm = 550; -fieldSizeDegs = 0.25; -pupilDiameterMm = options.pupilDiameterMm; -defocusDiopters = options.defocusDiopters; -analysisOutDir = fullfile(analysisBaseDir,sprintf('Summation_%s_%d',num2str(round(1000*defocusDiopters)),pupilDiameterMm)); -if (~exist(analysisOutDir,'dir')) - mkdir(analysisOutDir); -end - -% Set up two spot params -circularSpotParams = struct(... - 'type', 'basic', ... % type - 'viewingDistanceMeters', 2, ... % viewing distance: in meters - 'wls', 400:10:750, ... % wavelength support of the primary SPDs: in nanometers - 'stimDiameter', 0, ... % stimulus spot diameter in degrees. - 'spotWl', spotWavelengthNm, ... % spot wavelength: in nm - 'spotFWHM', 20, ... % spot full width at half max: in nm - 'spotBgDegs', fieldSizeDegs, ... % spot background: in degrees - 'spotBgPowerUW', (fieldSizeDegs^2)*76/1000*(2/3), ... % spot background power: in uW - 'imagingWl', 750, ... % imaging beam wavelength: in nm - 'imagingFWHM', 20, ... % imaging beam full width at half max: in nm - 'imagingBgPowerUW', 0, ... % imaging beam power entering eye: in uW - 'fovDegs', fieldSizeDegs, ... % spatial: scene field of view, in degrees - 'pixelsNum', nPixels, ... % spatial: desired size of stimulus in pixels - 'temporalModulation', 'flashed', ... % temporal modulation mode: choose between {'flashed'} - 'temporalModulationParams', struct(... % temporal: modulation params struct - 'stimOnFrameIndices', [1], ... % params relevant to the temporalModulationMode - 'stimDurationFramesNum', 1), ... % params relevant to the temporalModulationMode - 'frameDurationSeconds', 3/16, ... % temporal: frame duration, in seconds - 'pupilDiameterMm', pupilDiameterMm ... % pupil diameter mm - ); - -%% Create neural response engine -% -% This calculations isomerizations in a patch of cone mosaic with Poisson -% noise, and includes optical blur. -%neuralParams = nreAOPhotopigmentExcitationsWithNoEyeMovements; -neuralParams = nreAOPhotopigmentExcitationsWithNoEyeMovementsCMosaic; - -% Set optics params -neuralParams.opticsParams.wls = wls; -neuralParams.opticsParams.pupilDiameterMM = pupilDiameterMm; -neuralParams.opticsParams.defocusAmount = defocusDiopters; -neuralParams.opticsParams.accommodatedWl = spotWavelengthNm; -neuralParams.opticsParams.zCoeffs = zeros(66,1); -neuralParams.opticsParams.defeatLCA = false; -neuralParams.verbose = options.verbose; - -% Cone params -neuralParams.coneMosaicParams.fovDegs = fieldSizeDegs; -theNeuralEngine = neuralResponseEngine(@nreAOPhotopigmentExcitationsWithNoEyeMovementsCMosaic, neuralParams); - -%% Instantiate the PoissonTAFC responseClassifierEngine -% -% PoissonTAFC makes decision by performing the Poisson likelihood ratio test -% Also set up parameters associated with use of this classifier. -classifierEngine = responseClassifierEngine(@rcePoissonTAFC); -classifierPara = struct('trainFlag', 'none', ... - 'testFlag', 'random', ... - 'nTrain', 1, 'nTest', nTrialsTest); - -%% Parameters for threshold estimation/quest engine -% The actual threshold varies enough with the different engines that we -% need to adjust the contrast range that Quest+ searches over, as well as -% the range of psychometric function slopes. Threshold limits are computed -% as 10^-logThreshLimitVal. -thresholdPara = struct('logThreshLimitLow', 3, ... - 'logThreshLimitHigh', 0, ... - 'logThreshLimitDelta', 0.02, ... - 'slopeRangeLow', 1, ... - 'slopeRangeHigh', 50, ... - 'slopeDelta', 2.5); - -% Parameter for running the QUEST+ -% -% See t_thresholdEngine.m for more on options of the two different mode of -% operation (fixed numer of trials vs. adaptive). See t_thresholdEngine and -% questThresholdEngine for more info. -% -% Here we set maxTrial = minTrial, and don't use an additinal stop -% criterion, so fundamentally we just run minTrials trials. This is total -% trials evaluated, not number of separate constrasts. So if you increase -% nTrialsTest, you'll get fewer contrasts tested unless you also increase -% minTrials in proportion. -questEnginePara = struct('minTrial', minTrials, 'maxTrial', minTrials, ... - 'numEstimator', 1, 'stopCriterion', []); - -% Create a static two-spot AO scene with a particular incr-decr direction, -% and other relevant parameters -% Compute function handle for two-spot AO stimuli -sceneComputeFunction = @sceAOCircularSpot; - -%% Compute threshold for each spatial direction -% -% See toolbox/helpers for functions createGratingScene computeThresholdTAFC -nDirs = length(diameterList); -dataFig = figure(); -logThreshold = zeros(1, nDirs); - -for ii = 1:nDirs - - % Instantiate a sceneEngine with the above sceneComputeFunctionHandle - % and the custom params. - circularSpotParams.stimDiameter = diameterList(ii); - circularSpotScene = sceneEngine(sceneComputeFunction, circularSpotParams); - - % Compute the threshold for our grating scene with the previously - % defined neural and classifier engine. This function does a lot of - % work, see t_tresholdEngine and the function itself, as well as - % function computePerformanceTAFC. - [logThreshold(ii), questObj] = ... - computeThresholdTAFC(circularSpotScene, theNeuralEngine, classifierEngine, classifierPara, ... - thresholdPara, questEnginePara, 'visualizeAllComponents', options.visualizeMosaicResponses, ... - 'extraVerbose',options.verbose); - - % Plot stimulus - figure(dataFig); - subplot(nDirs, 3, ii * 3 - 2); - visualizationContrast = 1.0; - [theSceneSequence{ii}] = circularSpotScene.compute(visualizationContrast); - circularSpotScene.visualizeStaticFrame(theSceneSequence{ii}, ... - 'skipOutOfGamutCheck', true); - - % Plot optical image - subplot(nDirs, 3, ii * 3 - 1); - visualizationContrast = 1.0; - [theSceneSequence{ii}] = circularSpotScene.compute(visualizationContrast); - circularSpotScene.visualizeStaticFrame(theSceneSequence{ii}, ... - 'skipOutOfGamutCheck', true, ... - 'opticalImageInsteadOfScene', theNeuralEngine.neuralPipeline.optics); - - % Plot data and psychometric curve - % with a marker size of 2.5 - subplot(nDirs, 3, ii * 3 ); - questObj.plotMLE(2.5); - drawnow; -end -set(dataFig, 'Position', [0, 0, 800, 800]); -if (options.write) - print(dataFig,fullfile(analysisOutDir,sprintf('CompObsPsychoFig.tiff')), '-dtiff'); -end - -% Convert returned log threshold to linear threshold -threshold = 10 .^ logThreshold; - -% Threshold energy -thresholdEnergy = pi*((diameterList/2).^2).*threshold; - -%% Smooth curve through the data. MATLAB's smoothing spline. -% Vary smoothness parameter to control. Bigger is less smooth. -% Want this pretty big. -diameterPlot = linspace(diameterList(1),diameterList(end),100); -fObj = fit(log10(60*diameterList)',log10(thresholdEnergy)','smoothingspline','smoothingparam',options.smoothingParam); -log10ThresholdEnergySmooth = feval(fObj,log10(60*diameterPlot)); - -%% Plot -theSummationFig = figure; clf; hold on -plot(log10(60*diameterList),log10(thresholdEnergy),'ro','MarkerFaceColor','r','MarkerSize',12); -plot(log10(60*diameterPlot),log10ThresholdEnergySmooth,'b','LineWidth',3); -xlabel('Log Spot Diameter (minutes)'); -ylabel('Log Threshold Energy'); -set(theSummationFig, 'Position', [800, 0, 600, 800]); -if (options.write) - print(theSummationFig, fullfile(analysisOutDir,sprintf('CompSummation.tiff')), '-dtiff'); -end - -%% Save -close(dataFig); -close(theSummationFig); -if (options.write) - save(fullfile(analysisOutDir,'CompObserver')); -end - -end - diff --git a/twoSpot/computeTwoSpotContour.m b/twoSpot/computeTwoSpotContour.m deleted file mode 100644 index 590f9aa..0000000 --- a/twoSpot/computeTwoSpotContour.m +++ /dev/null @@ -1,264 +0,0 @@ -function computeTwoSpotContour(options) -% Compute isothreshold contour for mixtures of incremental and decremental spots. -% -% Description: -% Use ISETBioCSFGenerator to run out an isothreshold contour in the -% incr-decr contrast plane. This example uses an ideal Poisson -% TAFC observer. -% - -% History: -% 03/16/21 dhb Wrote it from color threshold example in ISETBioCSFGenerator. - -%% Pick up optional arguments -arguments - options.defocusDiopters (1,1) double = 0.05; - options.pupilDiameterMm (1,1) double = 6; - options.visualizeStimulus (1,1) logical = false; - options.visualizeMosaicResponses (1,1) logical = false; - options.testing (1,1) logical = false; - options.write (1,1) logical = true; - options.verbose (1,1) logical = false; - options.spotWidthDegs (1,1) double = 1.286/60; - options.spotHeightDegs (1,1) double = 1/60; - options.spotVerticalSepDegs (1,1) double = 1/60 - options.degsPerPixel = 1/415; - options.conditionName (1,1) string = 'IncrDecr1'; - options.angleList = []; -end - -error('Fix defocus should be converted to microns before putting into zcoeffs'); - -%% Clear and close -close all; ieInit; - -%% Directory stuff -baseProject = 'AOCompObserver'; -analysisBaseDir = getpref(baseProject,'analysisDir'); - -%% Testing or running out full computations? -if (options.testing) - nPixels = 128; - nTrialsTest = 64; - nQuestTrials = 20*nTrialsTest; - nQuestEstimators = 1; - if (isempty(options.angleList)) - angleList = [0 45 135 180 225]; - else - angleList = options.angleList; - end -else - nPixels = 256; - nTrialsTest = 128; - nQuestTrials = 60*nTrialsTest; - nQuestEstimators = 2; - if (isempty(options.angleList)) - angleList = [0 22.5 45 67.5 90 112.5 135 157.5 180 202.5 225 247.5 270 292.5 315 337.5]; - else - angleList = options.angleList; - end -end - -% Ancillary stimulus parameters -% -% Define basic parameters of the AO stimulus -% -% Measured power was 76 nW for 1.5 by 1 degree field. -% Need to adjust for what it would have been for same -% radiance with a different size field. -wls = 400:10:750; -spotWavelengthNm = 550; -fieldSizeDegs = 0.25; -pupilDiameterMm = options.pupilDiameterMm; -defocusDiopters = options.defocusDiopters; -analysisOutDir = fullfile(analysisBaseDir,sprintf('%s_%s_%d',options.conditionName,num2str(round(1000*defocusDiopters)),pupilDiameterMm)); -if (~exist(analysisOutDir,'dir')) - mkdir(analysisOutDir); -end - -% Set up two spot params -twoSpotParams = struct(... - 'type', 'basic', ... % type - 'viewingDistanceMeters', 2, ... % viewing distance: in meters - 'wls', 400:10:750, ... % wavelength support of the primary SPDs: in nanometers - 'stimAngle', 0, ... % stimulus angle in incr/decr plane - 'spotWl', spotWavelengthNm, ... % spot wavelength: in nm - 'spotFWHM', 20, ... % spot full width at half max: in nm - 'spotWidthDegs', options.spotWidthDegs, ... % spot width: in degrees - 'spotHeightDegs', options.spotHeightDegs, ... % spot height: in degrees - 'spotVerticalSepDegs', options.spotVerticalSepDegs, ... % spot center to center vertical separation: in degrees - 'spotHorizontalSepDegs', 0, ... % spot center to center horizontal separation: in degrees - 'spotBgDegs', fieldSizeDegs, ... % spot background: in degrees - 'spotBgPowerUW', (fieldSizeDegs^2)*76/1000*(2/3), ... % spot background power: in uW - 'imagingWl', 750, ... % imaging beam wavelength: in nm - 'imagingFWHM', 20, ... % imaging beam full width at half max: in nm - 'imagingBgPowerUW', 0, ... % imaging beam power entering eye: in uW - 'fovDegs', fieldSizeDegs, ... % spatial: scene field of view, in degrees - 'pixelsNum', nPixels, ... % spatial: desired size of stimulus in pixels - 'temporalModulation', 'flashed', ... % temporal modulation mode: choose between {'flashed'} - 'temporalModulationParams', struct(... % temporal: modulation params struct - 'stimOnFrameIndices', [1], ... % params relevant to the temporalModulationMode - 'stimDurationFramesNum', 1), ... % params relevant to the temporalModulationMode - 'frameDurationSeconds', 3/16, ... % temporal: frame duration, in seconds - 'pupilDiameterMm', pupilDiameterMm ... % pupil diameter mm - ); - -%% Create neural response engine -% -% This calculations isomerizations in a patch of cone mosaic with Poisson -% noise, and includes optical blur. -%neuralParams = nreAOPhotopigmentExcitationsWithNoEyeMovements; -neuralParams = nreAOPhotopigmentExcitationsWithNoEyeMovementsCMosaic; - -% Set optics params -neuralParams.opticsParams.wls = wls; -neuralParams.opticsParams.pupilDiameterMM = pupilDiameterMm; -neuralParams.opticsParams.defocusAmount = defocusDiopters; -neuralParams.opticsParams.accommodatedWl = spotWavelengthNm; -neuralParams.opticsParams.zCoeffs = zeros(66,1); -neuralParams.opticsParams.defeatLCA = false; -neuralParams.verbose = options.verbose; - -% Cone params -neuralParams.coneMosaicParams.fovDegs = fieldSizeDegs; -theNeuralEngine = neuralResponseEngine(@nreAOPhotopigmentExcitationsWithNoEyeMovementsCMosaic, neuralParams); - -%% Instantiate the PoissonTAFC responseClassifierEngine -% -% PoissonTAFC makes decision by performing the Poisson likelihood ratio test -% Also set up parameters associated with use of this classifier. -classifierEngine = responseClassifierEngine(@rcePoissonTAFC); -classifierPara = struct('trainFlag', 'none', ... - 'testFlag', 'random', ... - 'nTrain', 1, 'nTest', nTrialsTest); - -%% Parameters for threshold estimation/quest engine -% The actual threshold varies enough with the different engines that we -% need to adjust the contrast range that Quest+ searches over, as well as -% the range of psychometric function slopes. Threshold limits are computed -% as 10^-logThreshLimitVal. -thresholdPara = struct('logThreshLimitLow', 3, ... - 'logThreshLimitHigh', 1, ... - 'logThreshLimitDelta', 0.02, ... - 'slopeRangeLow', 1/20, ... - 'slopeRangeHigh', 50/20, ... - 'slopeDelta', 2.5/20, ... - 'thresholdCriterion', 0.81606); - -% Parameter for running the QUEST+ -% See t_thresholdEngine.m for more on options of the two different mode of -% operation (fixed numer of trials vs. adaptive) -questEnginePara = struct( ... - 'qpPF',@qpPFWeibullLog, ... - 'minTrial', nQuestTrials, ... - 'maxTrial', nQuestTrials, ... - 'numEstimator', nQuestEstimators, ... - 'stopCriterion', 0.05); - -% Create a static two-spot AO scene with a particular incr-decr direction, -% and other relevant parameters -% Compute function handle for two-spot AO stimuli -sceneComputeFunction = @sceAOTwoSpot; - -%% Compute threshold for each spatial direction -% -% See toolbox/helpers for functions createGratingScene computeThresholdTAFC -nDirs = length(angleList); -dataFig = figure(); -logThreshold = zeros(1, nDirs); -whichFrameToVisualize = 3; -for ii = 1:nDirs - - % Instantiate a sceneEngine with the above sceneComputeFunctionHandle - % and the custom params. - twoSpotParams.stimAngle = angleList(ii); - twoSpotScene = sceneEngine(sceneComputeFunction, twoSpotParams); - - % Compute the threshold for our grating scene with the previously - % defined neural and classifier engine. This function does a lot of - % work, see t_tresholdEngine and the function itself, as well as - % function computePerformanceTAFC. - [logThreshold(ii), questObj] = ... - computeThresholdTAFC(twoSpotScene, theNeuralEngine, classifierEngine, classifierPara, ... - thresholdPara, questEnginePara, 'visualizeStimulus',options.visualizeStimulus,'visualizeAllComponents', options.visualizeMosaicResponses, ... - 'extraVerbose',options.verbose); - - % Plot stimulus - figure(dataFig); - subplot(nDirs, 3, ii * 3 - 2); - visualizationContrast = 1.0; - [theSceneSequence{ii}] = twoSpotScene.compute(visualizationContrast); - twoSpotScene.visualizeStaticFrame(theSceneSequence{ii}, ... - 'skipOutOfGamutCheck', true); - - % Plot optical image - subplot(nDirs, 3, ii * 3 - 1); - visualizationContrast = 1.0; - [theSceneSequence{ii}] = twoSpotScene.compute(visualizationContrast); - twoSpotScene.visualizeStaticFrame(theSceneSequence{ii}, ... - 'skipOutOfGamutCheck', true, ... - 'opticalImageInsteadOfScene', theNeuralEngine.neuralPipeline.optics); - - % Plot data and psychometric curve - % with a marker size of 2.5 - subplot(nDirs, 3, ii * 3 ); - questObj.plotMLE(2.5); - drawnow; -end -set(dataFig, 'Position', [0, 0, 800, 800]); -if (options.write) - print(dataFig,fullfile(analysisOutDir,sprintf('CompObsPsychoFig.tiff')), '-dtiff'); -end - -% Convert returned log threshold to linear threshold -threshold = 10 .^ logThreshold; - -% Threshold cone contrasts -thresholdContrasts = [threshold.*cosd(angleList) ; threshold.*sind(angleList)]; - -%% Fit an ellipse to the data. See EllipseTest and FitEllipseQ. -% -% The use of scaleFactor to scale up the data and scale down the fit by the -% same amount is fmincon black magic. Doing this puts the objective -% function into a better range for the default size of search steps. -% -% We constrain the ellipse to line up with the x and y axes. Change flag -% below to relax this. Doesn't make very much difference inthis case. -scaleFactor = 1/(max(abs(thresholdContrasts(:)))); -[compEllParams,compFitA,compFitAinv,compFitQ] = FitEllipseQ(scaleFactor*thresholdContrasts(1:2,:),'lockAngleAt0',false, ... - 'initialParams',[0.9 1.1 45]); -nThetaEllipse = 200; -circleIn2D = UnitCircleGenerate(nThetaEllipse); -fitEllipse = PointsOnEllipseQ(compFitQ,circleIn2D)/scaleFactor; - -%% Plot -contrastLim = 0.05; -theContourFig = figure; clf; hold on -plot(fitEllipse(1,:),fitEllipse(2,:),'r','LineWidth',3); -plot([-contrastLim contrastLim],[0 0],'k:','LineWidth',1); -plot([0 0],[-contrastLim contrastLim],'k:','LineWidth',1); -xlabel('Contrast 1'); -ylabel('Contrsast 2'); -set(theContourFig, 'Position', [800, 0, 600, 800]); -xlim([-contrastLim contrastLim]); ylim([-contrastLim contrastLim]); -axis('square'); -if (options.write) - print(theContourFig, fullfile(analysisOutDir,sprintf('CompObsEllipse.tiff')), '-dtiff'); -end - -% Add comp data to plot -plot(thresholdContrasts(1,:), thresholdContrasts(2,:), 'ok', 'MarkerFaceColor','k', 'MarkerSize',12); -if (options.write) - print(theContourFig, fullfile(analysisOutDir,sprintf('CompObsEllipseWithData.tiff')), '-dtiff'); -end - -%% Save -spotWidthPixels = round(options.spotWidthDegs/options.degsPerPixel); -spotHeightPixels = round(options.spotHeightDegs/options.degsPerPixel); -spotVerticalSepPixels = round(options.spotVerticalSepDegs/options.degsPerPixel); - -close(dataFig); -close(theContourFig); -if (options.write) - save(fullfile(analysisOutDir,sprintf('CompObserver_%d_%d_%d',spotHeightPixels,spotWidthPixels,spotVerticalSepPixels-spotHeightPixels))); -end diff --git a/twoSpot/plotTwoSpotContours.m b/twoSpot/plotTwoSpotContours.m deleted file mode 100644 index 0b18ce7..0000000 --- a/twoSpot/plotTwoSpotContours.m +++ /dev/null @@ -1,155 +0,0 @@ -% Make some plots of the computational observer results -% -% Description: -% Run out the computational observer for the two spot experiment with a -% variety of defocus and pupil sizes. - -% Clear -clear; close all; - -% Parameters -defocusDioptersList = [0 0.05 0.10 0.15]; -pupilDiameterMmList = [2 3 4 5 6 7]; -testing = false; - -% Directory stuff -baseProject = 'AOCompObserver'; -analysisBaseDir = getpref(baseProject,'analysisDir'); - -% Do them -for dd = 1:length(defocusDioptersList) - for pp = 1:length(pupilDiameterMmList) - pupilDiameterMm = pupilDiameterMmList(pp); - defocusDiopters = defocusDioptersList(dd); - - analysisOutDir = fullfile(analysisBaseDir,sprintf('IncrDecr1_%s_%d',num2str(round(1000*defocusDiopters)),pupilDiameterMm)); - theData{dd,pp} = load(fullfile(analysisOutDir,'CompObserver')); - end -end - -%% Plot as a function of pupil size -% -% Choose one defocus -plotDefocusDiopters = 0; - -% Set up and plot -theColors = ['r' 'g' 'k' 'b' 'c' 'm']; -colorIndex = 1; -thePupilSizeFig = figure; clf; hold on -defocusIndex = find(defocusDioptersList == defocusDiopters); -legendText = {}; -for pp = 1:length(pupilDiameterMmList) - theEllipse = theData{defocusIndex,pp}.fitEllipse; - theEllipseNorm = vecnorm(theEllipse); - if (pp == 1) - theBaseEllipse = theEllipse; - theBaseEllipseNorm = vecnorm(theBaseEllipse); - end - subplot(1,2,1); hold on - plot(theEllipse(1,:),theEllipse(2,:),theColors(colorIndex),'LineWidth',3); - - % Scaled ellipse - scaleEllipse = (theEllipseNorm'\theBaseEllipseNorm')*theEllipse; - subplot(1,2,2); hold on - plot(scaleEllipse(1,:),scaleEllipse(2,:),theColors(colorIndex),'LineWidth',3); - - % Bump color index - colorIndex = colorIndex+1; - if (colorIndex > length(theColors)) - colorIndex = 1; - end - - % Legend text - legendText{pp} = sprintf('%d mm',pupilDiameterMmList(pp)); - -end - -% Plot finish up up -set(thePupilSizeFig, 'Position', [800, 0, 1000, 600]); -contrastLim = 0.10; -subplot(1,2,1); -plot([-contrastLim contrastLim],[0 0],'k:','LineWidth',1); -plot([0 0],[-contrastLim contrastLim],'k:','LineWidth',1); -xlabel('Contrast 1'); -ylabel('Contrsast 2'); -xlim([-contrastLim contrastLim]); ylim([-contrastLim contrastLim]); -axis('square'); -title(sprintf('Defocus %d D, raw scale',plotDefocusDiopters)); -legend(legendText,'Location','NorthEast'); - -subplot(1,2,2); -plot([-contrastLim contrastLim],[0 0],'k:','LineWidth',1); -plot([0 0],[-contrastLim contrastLim],'k:','LineWidth',1); -xlabel('Contrast 1'); -ylabel('Contrsast 2'); -xlim([-contrastLim contrastLim]); ylim([-contrastLim contrastLim]); -axis('square'); -title(sprintf('Defocus %d D, same scale',plotDefocusDiopters)); -legend(legendText,'Location','NorthEast'); - -%% Plot as a function of diopters -% -% Choose one defocus -plotPupilSizeMm = 7; - -% Set up and plot -theColors = ['r' 'g' 'k' 'b' 'c' 'm']; -colorIndex = 1; -legendIndex = 1; -theDioptersFig = figure; clf; hold on -pupilDiameterIndex = find(pupilDiameterMmList == plotPupilSizeMm ); -legendText = {}; -for dd = length(defocusDioptersList):-1:1 - theEllipse = theData{dd,pupilDiameterIndex}.fitEllipse; - theEllipseNorm = vecnorm(theEllipse); - if (dd == length(defocusDioptersList)) - theBaseEllipse = theEllipse; - theBaseEllipseNorm = vecnorm(theBaseEllipse); - end - subplot(1,2,1); hold on - plot(theEllipse(1,:),theEllipse(2,:),theColors(colorIndex),'LineWidth',3); - - % Scaled ellipse - scaleEllipse = (theEllipseNorm'\theBaseEllipseNorm')*theEllipse; - subplot(1,2,2); hold on - plot(scaleEllipse(1,:),scaleEllipse(2,:),theColors(colorIndex),'LineWidth',3); - - % Bump color index - colorIndex = colorIndex+1; - if (colorIndex > length(theColors)) - colorIndex = 1; - end - - % Add to legend text - legendText{legendIndex} = sprintf('%0.2f D',defocusDioptersList(dd)); - legendIndex = legendIndex+1; - -end - -% Plot finish up up -set(theDioptersFig, 'Position', [800, 0, 1000, 600]); -contrastLim = 0.02; -subplot(1,2,1); -plot([-contrastLim contrastLim],[0 0],'k:','LineWidth',1); -plot([0 0],[-contrastLim contrastLim],'k:','LineWidth',1); -xlabel('Contrast 1'); -ylabel('Contrsast 2'); -xlim([-contrastLim contrastLim]); ylim([-contrastLim contrastLim]); -axis('square'); -title(sprintf('Pupil diameter %d mm, raw scale',plotPupilSizeMm)); -legend(legendText,'Location','NorthEast'); - -subplot(1,2,2); -plot([-contrastLim contrastLim],[0 0],'k:','LineWidth',1); -plot([0 0],[-contrastLim contrastLim],'k:','LineWidth',1); -xlabel('Contrast 1'); -ylabel('Contrsast 2'); -xlim([-contrastLim contrastLim]); ylim([-contrastLim contrastLim]); -axis('square'); -title(sprintf('Pupil diameter %d mm, normalized',plotPupilSizeMm)); -legend(legendText,'Location','NorthEast'); - -% if (options.write) -% print(theContourFig, fullfile(analysisOutDir,sprintf('CompObsEllipse.tiff')), '-dtiff'); -% end - diff --git a/twoSpot/runAllComputeSummation.m b/twoSpot/runAllComputeSummation.m deleted file mode 100644 index 513a45c..0000000 --- a/twoSpot/runAllComputeSummation.m +++ /dev/null @@ -1,36 +0,0 @@ -% Run out a bunch of computational observers -% -% Description: -% Run out the computational observer for the circular spot summation experiment with a -% variety of defocus and pupil sizes. - -% Set parameters depending on mode -testMode = true; -if (testMode) - defocusDioptersList = [0]; - pupilDiameterMmList = [7]; - testing = true; - write = true; - verbose = true; - smoothingParam = 0.995; - visualizeMosaicResponses = true; -else - defocusDioptersList = [0 0.05 0.10 0.15]; - pupilDiameterMmList = [2 3 4 5 6 7]; - defocusDioptersList = [0]; - pupilDiameterMmList = [3]; - testing = false; - write = true; - verbose = false; - smoothingParam = 0.995; - visualizeMosaicResponses = false; -end - -% Do them -for dd = 1:length(defocusDioptersList) - for pp = 1:length(pupilDiameterMmList) - computeSummation('defocusDiopters',defocusDioptersList(dd),'pupilDiameterMm',pupilDiameterMmList(pp), ... - 'testing',testing,'write', write,'verbose',verbose,'smoothingParam',smoothingParam, ... - 'visualizeMosaicResponses', visualizeMosaicResponses); - end -end \ No newline at end of file diff --git a/twoSpot/runAllComputeTwoSpotContours.m b/twoSpot/runAllComputeTwoSpotContours.m deleted file mode 100644 index bf6aad2..0000000 --- a/twoSpot/runAllComputeTwoSpotContours.m +++ /dev/null @@ -1,298 +0,0 @@ -% Run out a bunch of computational observers -% -% Description: -% Run out the computational observer for the two spot experiment with a -% variety of defocus and pupil sizes. -% -% NOTE: When updating, need to change directory strings as -% well as numbers used in the computations. It would -% be better to generate the strings from the numbers. - -%% Set parameters depending on mode -testMode = false; -if (testMode) - defocusDioptersList = [0]; - pupilDiameterMmList = [7]; - testing = true; - write = false; - verbose = true; -else - defocusDioptersList = [0 0.05 0.10 0.15]; - pupilDiameterMmList = [7]; - testing = false; - write = true; - verbose = false; -end - -%% Degrees per pixel -degsPerPixel = 1/415; - -% 7x9, various separations, optics -spotHeightDegs = 7*degsPerPixel; -spotWidthDegs = 9*degsPerPixel; -spotVertSepDegs = spotHeightDegs + 0*degsPerPixel; -for dd = 1:length(defocusDioptersList) - for pp = 1:length(pupilDiameterMmList) - computeTwoSpotContour('conditionName','7_9_0','defocusDiopters',defocusDioptersList(dd),'pupilDiameterMm',pupilDiameterMmList(pp), ... - 'spotWidthDegs',spotWidthDegs ,'spotHeightDegs',spotHeightDegs,'spotVerticalSepDegs',spotHeightDegs,'spotVerticalSepDegs',spotVertSepDegs, ... - 'testing',testing,'write', write,'verbose',verbose); - end -end - -spotVertSepDegs = spotHeightDegs + 1*degsPerPixel; -for dd = 1:length(defocusDioptersList) - for pp = 1:length(pupilDiameterMmList) - computeTwoSpotContour('conditionName','7_9_1','defocusDiopters',defocusDioptersList(dd),'pupilDiameterMm',pupilDiameterMmList(pp), ... - 'spotWidthDegs',spotWidthDegs ,'spotHeightDegs',spotHeightDegs,'spotVerticalSepDegs',spotVertSepDegs, ... - 'testing',testing,'write', write,'verbose',verbose); - end -end - -spotVertSepDegs = spotHeightDegs + 2*degsPerPixel; -for dd = 1:length(defocusDioptersList) - for pp = 1:length(pupilDiameterMmList) - computeTwoSpotContour('conditionName','7_9_2','defocusDiopters',defocusDioptersList(dd),'pupilDiameterMm',pupilDiameterMmList(pp), ... - 'spotWidthDegs',spotWidthDegs ,'spotHeightDegs',spotHeightDegs,'spotVerticalSepDegs',spotVertSepDegs, ... - 'testing',testing,'write', write,'verbose',verbose); - end -end - -spotVertSepDegs = spotHeightDegs + 3*degsPerPixel; -for dd = 1:length(defocusDioptersList) - for pp = 1:length(pupilDiameterMmList) - computeTwoSpotContour('conditionName','7_9_3','defocusDiopters',defocusDioptersList(dd),'pupilDiameterMm',pupilDiameterMmList(pp), ... - 'spotWidthDegs',spotWidthDegs ,'spotHeightDegs',spotHeightDegs,'spotVerticalSepDegs',spotVertSepDegs, ... - 'testing',testing,'write', write,'verbose',verbose); - end -end - -spotVertSepDegs = spotHeightDegs + 4*degsPerPixel; -for dd = 1:length(defocusDioptersList) - for pp = 1:length(pupilDiameterMmList) - computeTwoSpotContour('conditionName','7_9_4','defocusDiopters',defocusDioptersList(dd),'pupilDiameterMm',pupilDiameterMmList(pp), ... - 'spotWidthDegs',spotWidthDegs ,'spotHeightDegs',spotHeightDegs,'spotVerticalSepDegs',spotVertSepDegs, ... - 'testing',testing,'write', write,'verbose',verbose); - end -end - -spotVertSepDegs = spotHeightDegs + 5*degsPerPixel; -for dd = 1:length(defocusDioptersList) - for pp = 1:length(pupilDiameterMmList) - computeTwoSpotContour('conditionName','7_9_5','defocusDiopters',defocusDioptersList(dd),'pupilDiameterMm',pupilDiameterMmList(pp), ... - 'spotWidthDegs',spotWidthDegs ,'spotHeightDegs',spotHeightDegs,'spotVerticalSepDegs',spotVertSepDegs, ... - 'testing',testing,'write', write,'verbose',verbose); - end -end - -spotVertSepDegs = spotHeightDegs + 6*degsPerPixel; -for dd = 1:length(defocusDioptersList) - for pp = 1:length(pupilDiameterMmList) - computeTwoSpotContour('conditionName','7_9_6','defocusDiopters',defocusDioptersList(dd),'pupilDiameterMm',pupilDiameterMmList(pp), ... - 'spotWidthDegs',spotWidthDegs ,'spotHeightDegs',spotHeightDegs,'spotVerticalSepDegs',spotVertSepDegs, ... - 'testing',testing,'write', write,'verbose',verbose); - end -end - -spotVertSepDegs = spotHeightDegs + 7*degsPerPixel; -for dd = 1:length(defocusDioptersList) - for pp = 1:length(pupilDiameterMmList) - computeTwoSpotContour('conditionName','7_9_7','defocusDiopters',defocusDioptersList(dd),'pupilDiameterMm',pupilDiameterMmList(pp), ... - 'spotWidthDegs',spotWidthDegs ,'spotHeightDegs',spotHeightDegs,'spotVerticalSepDegs',spotVertSepDegs, ... - 'testing',testing,'write', write,'verbose',verbose); - end -end - -spotVertSepDegs = spotHeightDegs + 8*degsPerPixel; -for dd = 1:length(defocusDioptersList) - for pp = 1:length(pupilDiameterMmList) - computeTwoSpotContour('conditionName','7_9_8','defocusDiopters',defocusDioptersList(dd),'pupilDiameterMm',pupilDiameterMmList(pp), ... - 'spotWidthDegs',spotWidthDegs ,'spotHeightDegs',spotHeightDegs,'spotVerticalSepDegs',spotVertSepDegs, ... - 'testing',testing,'write', write,'verbose',verbose); - end -end - -spotVertSepDegs = spotHeightDegs + 12*degsPerPixel; -for dd = 1:length(defocusDioptersList) - for pp = 1:length(pupilDiameterMmList) - computeTwoSpotContour('conditionName','7_9_12','defocusDiopters',defocusDioptersList(dd),'pupilDiameterMm',pupilDiameterMmList(pp), ... - 'spotWidthDegs',spotWidthDegs ,'spotHeightDegs',spotHeightDegs,'spotVerticalSepDegs',spotVertSepDegs, ... - 'testing',testing,'write', write,'verbose',verbose); - end -end - -spotVertSepDegs = spotHeightDegs + 13*degsPerPixel; -for dd = 1:length(defocusDioptersList) - for pp = 1:length(pupilDiameterMmList) - computeTwoSpotContour('conditionName','7_9_13','defocusDiopters',defocusDioptersList(dd),'pupilDiameterMm',pupilDiameterMmList(pp), ... - 'spotWidthDegs',spotWidthDegs ,'spotHeightDegs',spotHeightDegs,'spotVerticalSepDegs',spotVertSepDegs, ... - 'testing',testing,'write', write,'verbose',verbose); - end -end - -spotVertSepDegs = spotHeightDegs + 16*degsPerPixel; -for dd = 1:length(defocusDioptersList) - for pp = 1:length(pupilDiameterMmList) - computeTwoSpotContour('conditionName','7_9_16','defocusDiopters',defocusDioptersList(dd),'pupilDiameterMm',pupilDiameterMmList(pp), ... - 'spotWidthDegs',spotWidthDegs ,'spotHeightDegs',spotHeightDegs,'spotVerticalSepDegs',spotVertSepDegs, ... - 'testing',testing,'write', write,'verbose',verbose); - end -end - -spotVertSepDegs = spotHeightDegs + 20*degsPerPixel; -for dd = 1:length(defocusDioptersList) - for pp = 1:length(pupilDiameterMmList) - computeTwoSpotContour('conditionName','7_9_20','defocusDiopters',defocusDioptersList(dd),'pupilDiameterMm',pupilDiameterMmList(pp), ... - 'spotWidthDegs',spotWidthDegs ,'spotHeightDegs',spotHeightDegs,'spotVerticalSepDegs',spotVertSepDegs, ... - 'testing',testing,'write', write,'verbose',verbose); - end -end - -spotVertSepDegs = spotHeightDegs + 24*degsPerPixel; -for dd = 1:length(defocusDioptersList) - for pp = 1:length(pupilDiameterMmList) - computeTwoSpotContour('conditionName','7_9_24','defocusDiopters',defocusDioptersList(dd),'pupilDiameterMm',pupilDiameterMmList(pp), ... - 'spotWidthDegs',spotWidthDegs ,'spotHeightDegs',spotHeightDegs,'spotVerticalSepDegs',spotVertSepDegs, ... - 'testing',testing,'write', write,'verbose',verbose); - end -end - -spotVertSepDegs = spotHeightDegs + 35*degsPerPixel; -for dd = 1:length(defocusDioptersList) - for pp = 1:length(pupilDiameterMmList) - computeTwoSpotContour('conditionName','7_9_35','defocusDiopters',defocusDioptersList(dd),'pupilDiameterMm',pupilDiameterMmList(pp), ... - 'spotWidthDegs',spotWidthDegs ,'spotHeightDegs',spotHeightDegs,'spotVerticalSepDegs',spotVertSepDegs, ... - 'testing',testing,'write', write,'verbose',verbose); - end -end - -spotVertSepDegs = spotHeightDegs + 45*degsPerPixel; -for dd = 1:length(defocusDioptersList) - for pp = 1:length(pupilDiameterMmList) - computeTwoSpotContour('conditionName','7_9_45','defocusDiopters',defocusDioptersList(dd),'pupilDiameterMm',pupilDiameterMmList(pp), ... - 'spotWidthDegs',spotWidthDegs ,'spotHeightDegs',spotHeightDegs,'spotVerticalSepDegs',spotVertSepDegs, ... - 'testing',testing,'write', write,'verbose',verbose); - end -end - -%% 6x8 -spotHeightDegs = 6*degsPerPixel; -spotWidthDegs = 8*degsPerPixel; -spotVertSepDegs = spotHeightDegs + 0*degsPerPixel; -for dd = 1:length(defocusDioptersList) - for pp = 1:length(pupilDiameterMmList) - computeTwoSpotContour('conditionName','6_8_0','defocusDiopters',defocusDioptersList(dd),'pupilDiameterMm',pupilDiameterMmList(pp), ... - 'spotWidthDegs',spotWidthDegs ,'spotHeightDegs',spotHeightDegs,'spotVerticalSepDegs',spotVertSepDegs, ... - 'testing',testing,'write', write,'verbose',verbose); - end -end - -spotVertSepDegs = spotHeightDegs + 1*degsPerPixel; -for dd = 1:length(defocusDioptersList) - for pp = 1:length(pupilDiameterMmList) - computeTwoSpotContour('conditionName','6_8_1','defocusDiopters',defocusDioptersList(dd),'pupilDiameterMm',pupilDiameterMmList(pp), ... - 'spotWidthDegs',spotWidthDegs ,'spotHeightDegs',spotHeightDegs,'spotVerticalSepDegs',spotVertSepDegs, ... - 'testing',testing,'write', write,'verbose',verbose); - end -end - -spotVertSepDegs = spotHeightDegs + 2*degsPerPixel; -for dd = 1:length(defocusDioptersList) - for pp = 1:length(pupilDiameterMmList) - computeTwoSpotContour('conditionName','6_8_2','defocusDiopters',defocusDioptersList(dd),'pupilDiameterMm',pupilDiameterMmList(pp), ... - 'spotWidthDegs',spotWidthDegs ,'spotHeightDegs',spotHeightDegs,'spotVerticalSepDegs',spotVertSepDegs, ... - 'testing',testing,'write', write,'verbose',verbose); - end -end - -spotVertSepDegs = spotHeightDegs + 3*degsPerPixel; -for dd = 1:length(defocusDioptersList) - for pp = 1:length(pupilDiameterMmList) - computeTwoSpotContour('conditionName','6_8_3','defocusDiopters',defocusDioptersList(dd),'pupilDiameterMm',pupilDiameterMmList(pp), ... - 'spotWidthDegs',spotWidthDegs ,'spotHeightDegs',spotHeightDegs,'spotVerticalSepDegs',spotVertSepDegs, ... - 'testing',testing,'write', write,'verbose',verbose); - end -end - -spotVertSepDegs = spotHeightDegs + 4*degsPerPixel; -for dd = 1:length(defocusDioptersList) - for pp = 1:length(pupilDiameterMmList) - computeTwoSpotContour('conditionName','6_8_4','defocusDiopters',defocusDioptersList(dd),'pupilDiameterMm',pupilDiameterMmList(pp), ... - 'spotWidthDegs',spotWidthDegs ,'spotHeightDegs',spotHeightDegs,'spotVerticalSepDegs',spotVertSepDegs, ... - 'testing',testing,'write', write,'verbose',verbose); - end -end - -spotVertSepDegs = spotHeightDegs + 5*degsPerPixel; -for dd = 1:length(defocusDioptersList) - for pp = 1:length(pupilDiameterMmList) - computeTwoSpotContour('conditionName','6_8_5','defocusDiopters',defocusDioptersList(dd),'pupilDiameterMm',pupilDiameterMmList(pp), ... - 'spotWidthDegs',spotWidthDegs ,'spotHeightDegs',spotHeightDegs,'spotVerticalSepDegs',spotVertSepDegs, ... - 'testing',testing,'write', write,'verbose',verbose); - end -end - -spotVertSepDegs = spotHeightDegs + 6*degsPerPixel; -for dd = 1:length(defocusDioptersList) - for pp = 1:length(pupilDiameterMmList) - computeTwoSpotContour('conditionName','6_8_6','defocusDiopters',defocusDioptersList(dd),'pupilDiameterMm',pupilDiameterMmList(pp), ... - 'spotWidthDegs',spotWidthDegs ,'spotHeightDegs',spotHeightDegs,'spotVerticalSepDegs',spotVertSepDegs, ... - 'testing',testing,'write', write,'verbose',verbose); - end -end - -spotVertSepDegs = spotHeightDegs + 8*degsPerPixel; -for dd = 1:length(defocusDioptersList) - for pp = 1:length(pupilDiameterMmList) - computeTwoSpotContour('conditionName','6_8_8','defocusDiopters',defocusDioptersList(dd),'pupilDiameterMm',pupilDiameterMmList(pp), ... - 'spotWidthDegs',spotWidthDegs ,'spotHeightDegs',spotHeightDegs,'spotVerticalSepDegs',spotVertSepDegs, ... - 'testing',testing,'write', write,'verbose',verbose); - end -end - -spotVertSepDegs = spotHeightDegs + 12*degsPerPixel; -for dd = 1:length(defocusDioptersList) - for pp = 1:length(pupilDiameterMmList) - computeTwoSpotContour('conditionName','6_8_12','defocusDiopters',defocusDioptersList(dd),'pupilDiameterMm',pupilDiameterMmList(pp), ... - 'spotWidthDegs',spotWidthDegs ,'spotHeightDegs',spotHeightDegs,'spotVerticalSepDegs',spotVertSepDegs, ... - 'testing',testing,'write', write,'verbose',verbose); - end -end - -spotVertSepDegs = spotHeightDegs + 16*degsPerPixel; -for dd = 1:length(defocusDioptersList) - for pp = 1:length(pupilDiameterMmList) - computeTwoSpotContour('conditionName','6_8_16','defocusDiopters',defocusDioptersList(dd),'pupilDiameterMm',pupilDiameterMmList(pp), ... - 'spotWidthDegs',spotWidthDegs ,'spotHeightDegs',spotHeightDegs,'spotVerticalSepDegs',spotVertSepDegs, ... - 'testing',testing,'write', write,'verbose',verbose); - end -end - -spotVertSepDegs = spotHeightDegs + 20*degsPerPixel; -for dd = 1:length(defocusDioptersList) - for pp = 1:length(pupilDiameterMmList) - computeTwoSpotContour('conditionName','6_8_20','defocusDiopters',defocusDioptersList(dd),'pupilDiameterMm',pupilDiameterMmList(pp), ... - 'spotWidthDegs',spotWidthDegs ,'spotHeightDegs',spotHeightDegs,'spotVerticalSepDegs',spotVertSepDegs, ... - 'testing',testing,'write', write,'verbose',verbose); - end -end - -spotVertSepDegs = spotHeightDegs + 24*degsPerPixel; -for dd = 1:length(defocusDioptersList) - for pp = 1:length(pupilDiameterMmList) - computeTwoSpotContour('conditionName','6_8_24','defocusDiopters',defocusDioptersList(dd),'pupilDiameterMm',pupilDiameterMmList(pp), ... - 'spotWidthDegs',spotWidthDegs ,'spotHeightDegs',spotHeightDegs,'spotVerticalSepDegs',spotVertSepDegs, ... - 'testing',testing,'write', write,'verbose',verbose); - end -end - -%% 5x7 -spotHeightDegs = 5*degsPerPixel; -spotWidthDegs = 7*degsPerPixel; -spotVertSepDegs = spotHeightDegs + 0*degsPerPixel; -for dd = 1:length(defocusDioptersList) - for pp = 1:length(pupilDiameterMmList) - computeTwoSpotContour('conditionName','5_7_0','defocusDiopters',defocusDioptersList(dd),'pupilDiameterMm',pupilDiameterMmList(pp), ... - 'spotWidthDegs',spotWidthDegs ,'spotHeightDegs',spotHeightDegs,'spotVerticalSepDegs',spotVertSepDegs, ... - 'testing',testing,'write', write,'verbose',verbose); - end -end \ No newline at end of file diff --git a/twoSpot/tutorial/t_AOtwospot.m b/twoSpot/tutorial/t_AOtwospot.m deleted file mode 100644 index 368a5bd..0000000 --- a/twoSpot/tutorial/t_AOtwospot.m +++ /dev/null @@ -1,41 +0,0 @@ -% Run out a bunch of computational observers -% -% Description: -% Run out the computational observer for the two spot experiment with a -% variety of defocus and pupil sizes. -% -% NOTE: When updating, need to change directory strings as -% well as numbers used in the computations. It would -% be better to generate the strings from the numbers. - -%% Set parameters depending on mode -testMode = true; -if (testMode) - defocusDioptersList = [0.05]; - pupilDiameterMmList = [7]; - testing = true; - write = false; - verbose = true; -else - defocusDioptersList = [0 0.05 0.10 0.15]; - pupilDiameterMmList = [7]; - testing = false; - write = true; - verbose = false; -end - -%% Degrees per pixel -degsPerPixel = 1/415; - -% 7x9, various separations, optics -spotHeightDegs = 7*degsPerPixel; -spotWidthDegs = 9*degsPerPixel; -spotVertSepDegs = spotHeightDegs + 0*degsPerPixel; -for dd = 1:length(defocusDioptersList) - for pp = 1:length(pupilDiameterMmList) - computeTwoSpotContour('conditionName','7_9_0','defocusDiopters',defocusDioptersList(dd),'pupilDiameterMm',pupilDiameterMmList(pp), ... - 'spotWidthDegs',spotWidthDegs ,'spotHeightDegs',spotHeightDegs,'spotVerticalSepDegs',spotHeightDegs,'spotVerticalSepDegs',spotVertSepDegs, ... - 'testing',testing,'write', write,'verbose',verbose,'visualizeMosaicResponses',true,'visualizeStimulus',false,'angleList',330); - end -end -