-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathnws_tools.py
More file actions
3576 lines (3054 loc) · 148 KB
/
nws_tools.py
File metadata and controls
3576 lines (3054 loc) · 148 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
# nws_tools.py - Collection of network creation/processing/analysis/plotting routines
#
# Author: Stefan Fuertinger [stefan.fuertinger@esi-frankfurt.de]
# Created: December 22 2014
# Last modified: <2017-11-23 16:45:52>
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
import natsort
import os
import csv
import inspect
import fnmatch
try:
from scipy import weave
except:
import weave
from mpl_toolkits.mplot3d import Axes3D, proj3d
from matplotlib.patches import FancyArrowPatch, Circle
from matplotlib.colors import Normalize, colorConverter, LightSource
import math
from recipes import myglob
##########################################################################################
def strengths_und(CIJ):
"""
Compute nodal strengths in an undirected graph
Parameters
----------
CIJ : NumPy 2darray
Undirected binary/weighted connection matrix
Returns
-------
st : NumPy 1darray
Nodal strength vector
Notes
-----
This function does *not* do any error checking and assumes you know what you are doing
See also
--------
strengths_und.m : in the Brain Connectivity Toolbox (BCT) for MATLAB, currently available
`here <https://sites.google.com/site/bctnet/>`_
bctpy : An unofficial Python port of the BCT is currently available at the
`Python Package Index <https://pypi.python.org/pypi/bctpy>`_
and can be installed using `pip`.
"""
return np.sum(CIJ,axis=0)
##########################################################################################
def degrees_und(CIJ):
"""
Compute nodal degrees in an undirected graph
Parameters
----------
CIJ : NumPy 2darray
Undirected binary/weighted connection matrix
Returns
-------
deg : NumPy 1darray
Nodal degree vector
Notes
-----
This function does *not* do any error checking and assumes you know what you are doing
See also
--------
degrees_und.m : in the Brain Connectivity Toolbox (BCT) for MATLAB, currently available
`here <https://sites.google.com/site/bctnet/>`_
bctpy : An unofficial Python port of the BCT is currently available at the
`Python Package Index <https://pypi.python.org/pypi/bctpy>`_
and can be installed using `pip`.
"""
return (CIJ != 0).sum(1)
##########################################################################################
def density_und(CIJ):
"""
Compute the connection density of an undirected graph
Parameters
----------
CIJ : NumPy 2darray
Undirected binary/weighted connection matrix
Returns
-------
den : float
density (fraction of present connections to possible connections)
Notes
-----
This function does *not* do any error checking and assumes you know what you are doing
See also
--------
density_und.m : in the Brain Connectivity Toolbox (BCT) for MATLAB, currently available
`here <https://sites.google.com/site/bctnet/>`_
bctpy : An unofficial Python port of the BCT is currently available at the
`Python Package Index <https://pypi.python.org/pypi/bctpy>`_
and can be installed using `pip`.
"""
N = CIJ.shape[0] # no. of nodes
K = (np.triu(CIJ,1)!=0).sum() # no. of edges
return K/((N**2 - N)/2.0)
##########################################################################################
def get_corr(txtpath,corrtype='pearson',sublist=[],**kwargs):
"""
Compute pair-wise statistical dependence of time-series
Parameters
----------
txtpath : str
Path to directory holding ROI-averaged time-series dumped in `txt` files.
The following file-naming convention is required `sNxy_bla_bla.txt`,
where `N` is the group id (1,2,3,...), `xy` denotes the subject number
(01,02,...,99 or 001,002,...,999) and everything else is separated
by underscores. The files will be read in lexicographic order,
i.e., `s101_1.txt`, `s101_2.txt`,... or `s101_Amygdala.txt`, `s101_Beemygdala`,...
See Notes for more details.
corrtype : str
Specifier indicating which type of statistical dependence to use to compute
pairwise dependence. Currently supported options are
`pearson`: the classical zero-lag Pearson correlation coefficient
(see NumPy's `corrcoef` for details)
`mi`: (normalized) mutual information
(see the docstring of `mutual_info` in this module for details)
sublist : list or NumPy 1darray
List of subject codes to process, e.g., `sublist = ['s101','s102']`.
By default all subjects found in `txtpath` will be processed.
**kwargs : keyword arguments
Additional keyword arguments to be passed on to the function computing
the pairwise dependence (currently either NumPy's `corrcoef` or `mutual_info`
in this module).
Returns
-------
res : dict
Dictionary with fields:
corrs : NumPy 3darray
`N`-by-`N` matrices of pair-wise regional statistical dependencies
of `numsubs` subjects. Format is `corrs.shape = (N,N,numsubs)` such that
`corrs[:,:,i]` = `N x N` statistical dependence matrix of `i`-th subject
bigmat : NumPy 3darray
Tensor holding unprocessed time series of all subjects. Format is
`bigmat.shape = (tlen,N,numsubs)` where `tlen` is the maximum
time-series-length across all subjects (if time-series of different
lengths were used in the computation, any unfilled entries in `bigmat`
will be NumPy `nan`'s, see Notes for details) and `N` is the number of
regions (=nodes in the networks).
sublist : list of strings
List of processed subjects specified by `txtpath`, e.g.,
`sublist = ['s101','s103','s110','s111','s112',...]`
Notes
-----
Per-subject time-series do not necessarily have to be of the same length across
a subject cohort. However, all ROI-time-courses *within* the same subject must have
the same number of entries.
For instance, all ROI-time-courses in `s101` can have 140 entries, and time-series
of `s102` might have 130 entries. The remaining 10 values "missing" for `s102` are
filled with `NaN`'s in `bigmat`. However, if `s101_2.txt` contains 140 data-points while only
130 entries are found in `s101_3.txt`, the code will raise a `ValueError`.
See also
--------
corrcoef : Pearson product-moment correlation coefficents computed in NumPy
mutual_info : Compute (normalized) mutual information coefficients
"""
# Make sure `txtpath` doesn't contain nonsense and points to an existing location
if not isinstance(txtpath,(str,unicode)):
raise TypeError('Input has to be a string specifying the path to the txt-file directory!')
txtpath = str(txtpath)
if txtpath.find("~") == 0:
txtpath = os.path.expanduser('~') + txtpath[1:]
if not os.path.isdir(txtpath):
raise ValueError('Invalid directory: '+txtpath+'!')
# Check `corrtype`
if not isinstance(corrtype,(str,unicode)):
raise TypeError('Statistical dependence type input must be a string, not '+type(corrtype).__name__+'!')
if corrtype != 'mi' and corrtype != 'pearson':
raise ValueError("Currently, only Pearson and (N)MI supported!")
# Check `sublist`
if not isinstance(sublist,(list,np.ndarray)):
raise TypeError('Subject codes have to be provided as Python list/NumPy 1darray, not '+type(sublist).__name__+'!')
if len(np.array(sublist).shape) != 1:
raise ValueError("Subject codes have to be provided as 1-d list/array!")
# Get length of `sublist` (to see if a subject list was provided)
numsubs = len(sublist)
# Get list of all txt-files in `txtpath` and order them lexicographically
if txtpath[-1] == ' ' or txtpath[-1] == os.sep: txtpath = txtpath[:-1]
txtfiles = natsort.natsorted(myglob(txtpath,"s*.[Tt][Xx][Tt]"), key=lambda y: y.lower())
if len(txtfiles) < 2: raise ValueError('Found fewer than 2 text files in '+txtpath+'!')
# If no subject-list was provided, take first subject to get the number of ROIs to be processed
if numsubs == 0:
# Search from left in file-name for first "s" (naming scheme: sNxy_bla_bla_.txt)
firstsub = txtfiles[0]
firstsub = firstsub.replace(txtpath+os.sep,'')
s_in_name = firstsub.find('s')
# The characters right of "s" until the first "_" are the subject identifier
udrline = firstsub[s_in_name::].find('_')
subject = firstsub[s_in_name:s_in_name+udrline]
# Generate list of subjects
sublist = [subject]
for fl in txtfiles:
if fl.count(subject) == 0:
s_in_name = fl.rfind('s')
udrline = fl[s_in_name::].find('_')
subject = fl[s_in_name:s_in_name+udrline]
sublist.append(subject)
# Update `numsubs`
numsubs = len(sublist)
# Prepare output message
msg = "Found "
else:
# Just take the first entry of user-provided subject list
subject = sublist[0]
# Prepare output message
msg = "Processing "
# Talk to the user
print msg+str(numsubs)+" subjects: "+"".join(sb+", " for sb in sublist)[:-2]
# Check if the number of ROIs is consistent across subjects
nrois = np.zeros((numsubs,),dtype=int)
txtflstr = ''.join(txtfiles)
for ns, sub in enumerate(sublist):
nrois[ns] = txtflstr.count(sub+"_")
nroisu = np.unique(nrois).astype(int)
if nroisu.size > 1:
if nroisu.min() == 0:
bad_subs = ""
else:
bad_subs = "Found "
for nsu in nroisu:
if nsu == 0:
bad_subs += "No data found for Subject(s) "
else:
bad_subs += str(nsu)+" regions in Subject(s) "
bad_subs += "".join(sublist[idx]+", " for idx in np.where(nrois == nsu)[0])
msg = "Inconsisten number of time-series across subjects! "+bad_subs[:-2]
raise ValueError(msg)
else:
numregs = nroisu[0]
# Get (actual) number of subjects
numsubs = len(sublist)
# Scan files to find time-series length
tlens = np.zeros((numsubs,),dtype=int)
for k in xrange(numsubs):
roi = 0
for fl in txtfiles:
if fl.count(sublist[k]+"_"): # make sure we differentiate b/w "s1_*.txt" and "s10_.txt"...
try:
ts_vec = np.loadtxt(fl)
except:
raise ValueError("Cannot read file "+fl)
if roi == 0:
tlens[k] = ts_vec.size # Subject's first TS sets our reference length
if ts_vec.size != tlens[k]:
raise ValueError("Error reading file: "+fl+\
" Expected a time-series of length "+str(tlens[k])+", "+
"but actual length is "+str(ts_vec.size))
roi += 1
# Check the lengths of the detected time-series
if tlens.min() <= 2:
raise ValueError('Time-series of Subject '+sublist[tlens.argmin()]+' is empty or has fewer than 2 entries!')
# Allocate tensor to hold all time series
bigmat = np.zeros((tlens.max(),numregs,numsubs)) + np.nan
# Allocate tensor holding statistical dependence matrices of all subjects
corrs = np.zeros((numregs,numregs,numsubs))
# Ready to do this...
print "Extracting data and calculating "+corrtype.upper()+" coefficients"
# Cycle through subjects and save per-subject time series data column-wise
for k in xrange(numsubs):
col = 0
for fl in txtfiles:
if fl.count(sublist[k]+"_"):
ts_vec = np.loadtxt(fl)
bigmat[:tlens[k],col,k] = ts_vec
col += 1
# Compute statistical dependence based on corrtype
if corrtype == 'pearson':
corrs[:,:,k] = np.corrcoef(bigmat[:tlens[k],:,k],rowvar=0,**kwargs)
elif corrtype == 'mi':
corrs[:,:,k] = mutual_info(bigmat[:tlens[k],:,k],**kwargs)
# Happy breakdown
print "Done"
return {'corrs':corrs, 'bigmat':bigmat, 'sublist':sublist}
##########################################################################################
def corrcheck(*args,**kwargs):
"""
Sanity checks for statistical dependence matrices
Parameters
----------
Dynamic : Usage as follows
corrcheck(A) : input is NumPy 2darray
shows some statistics for the statistical dependence matrix `A`
corrcheck(A,label) : input is NumPy 2darray and `['string']`
shows some statistics for the matrix `A` and uses
`label`, a list containing one string, as title in figures.
corrcheck(A,B,C,...) : input are many NumPy 2darrays
shows some statistics for the statistical dependence matrices `A`, `B`, `C`,....
corrcheck(A,B,C,...,label) : input are many NumPy 2darrays and a list of strings
shows some statistics for the statistical dependence matrices `A`, `B`, `C`,....
and uses the list of strings `label` to generate titles in figures.
Note that `len(label)` has to be equal to the number of
input matrices.
corrcheck(T) : input is NumPy 3darray
shows some statistics for statistical dependence matrices stored
in the tensor `T`. The storage scheme has to be
`T[:,:,0] = A`
`T[:,:,1] = B`
`T[:,:,2] = C`
etc.
where `A`, `B`, `C`,... are matrices.
corrcheck(T,label) : input is NumPy 3darray and list of strings
shows some statistics for matrices stored
in the tensor `T`. The storage scheme has to be
`T[:,:,0] = A`
`T[:,:,1] = B`
`T[:,:,2] = C`
etc.
where `A`, `B`, `C`,... are matrices. The list of strings `label`
is used to generate titles in figures. Note that `len(label)`
has to be equal to `T.shape[2]`
corrcheck(...,title='mytitle') : input is any of the above
same as above and and uses the string `mytitle` as window name for figures.
Returns
-------
Nothing : None
Notes
-----
None
See also
--------
None
"""
# Plotting params used later (max. #plots per row)
cplot = 5
# Sanity checks
myin = len(args)
if myin == 0:
raise ValueError('At least one input required!')
# Assign global name for all figures if provided by additional keyword argument `title`
figtitle = kwargs.get('title',None);
nofigname = False
if figtitle is None:
nofigname = True
else:
if not isinstance(figtitle,(str,unicode)):
raise ValueError('Figure title must be a string!')
# If labels have been provided, extract them now
if isinstance(args[-1],(list)):
myin -= 1
labels = args[-1]
usrlbl = 1
elif isinstance(args[-1],(str,unicode)):
myin -= 1
labels = [args[-1]]
usrlbl = 1
else:
usrlbl = 0
# Try to get shape of input
if not isinstance(args[0],np.ndarray):
raise TypeError("Expected NumPy array(s) as input, found "+type(args[0]).__name__+"!")
szin = len(args[0].shape)
# If input is a list of matrices, store them in a tensor
if szin == 2:
rw,cl = args[0].shape
if (rw != cl) or (min(args[0].shape)==1):
raise ValueError('Input matrices must be square!')
corrs = np.zeros((rw,cl,myin))
for i in xrange(myin):
if not isinstance(args[i],np.ndarray):
raise TypeError("All but last input must be NumPy arrays!")
try:
corrs[:,:,i] = args[i]
except:
raise ValueError('All input matrices must be real and of the same size!')
# If input is a tensor, there's not much to do
elif szin == 3:
if myin > 1: raise ValueError('Not more than one input tensor supported!')
shv = args[0].shape
if (min(shv[0],shv[1]) == 1) or (shv[0]!=shv[1]):
raise ValueError('Input tensor must be of the format N-by-N-by-k!')
corrs = args[0]
else:
raise TypeError('Input has to be either a matrix/matrices or a rank1-tensor!')
# Count number of matrices and get their dimension
nmat = corrs.shape[-1]
N = corrs.shape[0]
# Check if those matrices are real and "reasonable"
if not np.issubdtype(corrs.dtype, np.number) or not np.isreal(corrs).all():
raise ValueError("Input arrays must be real-valued!")
if np.isfinite(corrs).min() == False:
raise ValueError("All matrices must be real without NaNs or Infs!")
# Check if we're dealing with Pearson or NMI matrices (or something completely unexpected)
cmin = corrs.min(); cmax = corrs.max()
if cmax > 1 or cmin < -1:
msg = "WARNING: Input has to have values between -1/+1 or 0/+1. Found "+str(cmin)+" to "+str(cmax)
print msg
maxval = 1
if corrs.min() < 0:
minval = -1
else:
minval = 0
# If labels have been provided, check if we got enough of'em; if there are no labels, generate defaults
if (usrlbl):
if len(labels) != nmat:
raise ValueError('Numbers of labels and matrices do not match up!')
for lb in labels:
if not isinstance(lb,(str,unicode)):
raise ValueError('Labels must be provided as list of strings or a single string!')
else:
labels = ['Matrix '+str(i+1) for i in xrange(nmat)]
# Set subplot params and turn on interactive plotting
rplot = int(np.ceil(nmat/cplot))
if nmat <= cplot: cplot = nmat
plt.ion()
# Now let's actually do something and plot the statistical dependence matrices (show warning matrix if is not symmetric)
fig = plt.figure(figsize=(8,8))
if nofigname: figtitle = fig.canvas.get_window_title()
fig.canvas.set_window_title(figtitle+': '+str(N)+' Nodes',)
for i in xrange(nmat):
plt.subplot(rplot,cplot,i+1)
im = plt.imshow(corrs[:,:,i],cmap='jet',interpolation='nearest',vmin=minval,vmax=maxval)
plt.axis('off')
plt.title(labels[i])
if issym(corrs[:,:,i]) == False:
print "WARNING: "+labels[i]+" is not symmetric!"
fig.subplots_adjust(right=0.8)
cbar_ax = fig.add_axes([0.85, 0.15, 0.05, 0.7])
fig.colorbar(im, cax=cbar_ax)
plt.draw()
# Plot statistical dependence histograms
meanval = np.mean([minval,maxval])
idx = np.nonzero(np.triu(np.ones((N,N)),1))
NN = (N**2 - N)/2
fig = plt.figure(figsize=(8,8))
if nofigname: figtitle = fig.canvas.get_window_title()
fig.canvas.set_window_title(figtitle+': '+"Statistical Dependence Histograms")
bars = []; ylims = []
for i in xrange(nmat):
cvec = corrs[idx[0],idx[1],i]
[corrcount,corrbins] = np.histogram(cvec,bins=20,range=(minval,maxval))
bars.append(plt.subplot(rplot,cplot,i+1))
plt.bar(corrbins[:-1],corrcount/NN,width=np.abs(corrbins[0]-corrbins[1]))
ylims.append(bars[-1].get_ylim()[1])
plt.xlim(minval,maxval)
plt.xticks((minval,meanval,maxval),(str(minval),str(meanval),str(maxval)))
plt.title(labels[i])
if np.mod(i+1,cplot) == 1: plt.ylabel('Frequency')
ymax = max(ylims)
for mybar in bars: mybar.set_ylim(top=ymax)
plt.draw()
# Show negative correlations (for Pearson matrices)
if minval < 0:
fig = plt.figure(figsize=(8,8))
if nofigname: figtitle = fig.canvas.get_window_title()
fig.canvas.set_window_title(figtitle+': '+"Negative Correlations Are BLACK")
for i in xrange(nmat):
plt.subplot(rplot,cplot,i+1)
plt.imshow((corrs[:,:,i]>=0).astype(float),cmap='gray',interpolation='nearest',vmin=0,vmax=1)
plt.axis('off')
plt.title(labels[i])
plt.draw()
# Diversity
fig = plt.figure(figsize=(8,8))
if nofigname: figtitle = fig.canvas.get_window_title()
fig.canvas.set_window_title(figtitle+': '+"Diversity of Statistical Dependencies")
xsteps = np.arange(1,N+1)
stems = []; ylims = []
for i in xrange(nmat):
stems.append(plt.subplot(rplot,cplot,i+1))
varc = np.var(corrs[:,:,i],0,ddof=1)
plt.stem(xsteps,varc)
ylims.append(stems[-1].get_ylim()[1])
plt.xlim(-1,N+1)
plt.xticks((0,N),('1',str(N)))
plt.title(labels[i])
ymax = max(ylims)
for mystem in stems: mystem.set_ylim(top=ymax)
plt.draw()
##########################################################################################
def get_meannw(nws,percval=0.0):
"""
Helper function to compute group-averaged networks
Parameters
----------
nws : NumPy 3darray
`N`-by-`N` connection matrices of `numsubs` subjects. Format is `nws.shape = (N,N,numsubs)`
such that `nws[:,:,i] = N x N` connection matrix of `i`-th subject
percval : float
Percentage value, such that connections not present in at least `percval`
percent of subjects are not considered, thus `0 <= percval <= 1`.
Default setting is `percval = 0.0`
Returns
-------
mean_wghted : NumPy 2darray
`N`-by-`N` mean value matrix of `numsubs` matrices stored in `nws` where
only connections present in at least `percval` percent of subjects
are considered
percval : float
Percentage value used to generate `mean_wghted`
Notes
-----
If the current setting of `percval` leads to a disconnected network,
the code increases `percval` in 5% steps to ensure connectedness of the group-averaged graph.
The concept of using only a certain percentage of edges present in subjects was taken from [1]_.
See also
--------
None
References
----------
.. [1] M. van den Heuvel, O. Sporns. Rich-Club Organization of the Human Connectome.
J. Neurosci, 31(44) 15775-15786, 2011.
"""
# Sanity checks
arrcheck(nws,'tensor','nws')
scalarcheck(percval,'percval',bounds=[0,1])
# Get shape of input tensor
N = nws.shape[0]
numsubs = nws.shape[-1]
# Remove self-connections
nws = rm_selfies(nws)
# Allocate memory for binary/weighted group averaged networks
mean_binary = np.zeros((N,N))
mean_wghted = np.zeros((N,N))
# Compute mean network and keep increasing `percval` until we get a connected mean network
docalc = True
while docalc:
# Reset matrices
mean_binary[:] = 0
mean_wghted[:] = 0
# Cycle through subjects to compute average network
for i in xrange(numsubs):
mean_binary = mean_binary + (nws[:,:,i]!=0).astype(float)
mean_wghted = mean_wghted + nws[:,:,i]
# Kick out connections not present in at least `percval%` of subjects (in binary and weighted NWs)
mean_binary = (mean_binary/numsubs > percval).astype(float)
mean_wghted = mean_wghted/numsubs * mean_binary
# Check connectedness of mean network
if degrees_und(mean_binary).min() == 0:
print "WARNING: Mean network disconnected for `percval` = "+str(np.round(1e2*percval))+"%"
if percval > 0:
print "Decreasing `percval` by 5%..."
percval -= 0.05
print "New value for `percval` is now "+str(np.round(1e2*percval))+"%"
else:
msg = "Mean network disconnected for `percval` = 0%. That means at least one node is "+\
"disconnected in ALL per-subject networks..."
raise ValueError(msg)
else:
docalc = False
return mean_wghted, percval
##########################################################################################
def rm_negatives(corrs):
"""
Remove negative entries from connection matrices
Parameters
----------
corrs : NumPy 3darray
An array of `K` matrices of dimension `N`-by-`N`. Format is `corrs.shape = (N,N,K)`,
such that `corrs[:,:,i]` is the `i`-th `N x N` matrix
Returns
-------
nws : NumPy 3darray
Same format as input tensor but `corrs >= 0`.
Notes
-----
None
See also
--------
None
"""
# Sanity checks
arrcheck(corrs,'tensor','corrs')
# See how many matrices are stacked in the array
K = corrs.shape[-1]
# Zero diagonals of matrices
for i in xrange(K):
np.fill_diagonal(corrs[:,:,i],0)
# Remove negative entries
nws = (corrs > 0)*corrs
# Check if we lost some nodes...
ndnum = str(corrs.shape[0])
for i in xrange(K):
deg = degrees_und(corrs[:,:,i])
if deg.min() == 0:
badidx = np.nonzero(deg==deg.min())[0]
print "WARNING: In network "+str(i)+" a total of "+str(badidx.size)+" out of "+ndnum+\
" node(s) got disconnected, namely vertices #"+str(badidx)
return nws
##########################################################################################
def rm_selfies(conns):
"""
Remove self-connections from connection matrices
Parameters
----------
conns : NumPy 3darray
An array of `K` connection matrices of dimension `N`-by-`N`. Format is `conns.shape = (N,N,K)`,
such that `conns[:,:,i]` is the `i`-th `N x N` connection matrix
Returns
-------
nws : NumPy 3darray
Same format as input array but `np.diag(conns[:,:,k]).min() = 0.0`.
Notes
-----
None
See also
--------
None
"""
# Sanity checks
arrcheck(conns,'tensor','conns')
# Create output quantity and zero its diagonals
nws = conns.copy()
for i in xrange(nws.shape[-1]):
np.fill_diagonal(nws[:,:,i],0)
return nws
##########################################################################################
def thresh_nws(nws,userdens=None,percval=0.0,force_den=False,span_tree=False):
"""
Threshold networks based on connection density
Parameters
----------
nws : NumPy 3darray
Undirected `N`-by-`N` (un)weighted connection matrices of `numsubs` subjects.
Format is `corrs.shape = (N,N,numsubs)` such that `corrs[:,:,i] = N x N`
connection matrix of `i`-th subject
userdens : int
By default, the input networks are thresholded down to the lowest common
connection density without disconnecting any nodes in the networks using
a relative thresholding strategy (`force_den = False` and `span_tree = False`).
If `userdens` is provided and `span_tree = False`, then `userdens`
is used as target density in the relative thresholding strategy. However,
if `userdens` is below the minimum density before networks fragment,
it will not be used unless `force_den = True`.
If `span_tree = True` and `userdens` is `None`, then maximum spanning
trees will be returned for all input networks. If `userdens` is provided,
the spanning trees will be populated with the strongest connections
found in the original networks up to the desired edge density.
For both relative thresholding and maximum spanning tree density reduction,
`userdens` should be either `None` or an integer between 0 and 100.
See Notes below for more details.
percval : float
Percentage value for computing mean network averaged across all thresholded
graphs, such that connections not present in at least `percval`
percent of subjects are not considered (`0 <= percval <= 1`).
Default setting is `percval = 0.0`. See `get_meannw` for details.
force_den : bool
If `force_den = True` relative thresholding is applied to the networks
until all graphs hit the desired density level defined by the user
even if nodes get disconnected in the process. This argument has no
effect if `span_tree = True`. By default, `force_den = False`.
span_tree : bool
If `span_tree` is `True` density reduction is performed by constructing maximum
spanning trees. If `userdens` is `None`, only spanning trees for all input networks
will be returned. If `userdens` is provided, spanning trees will be populated
with the strongest connections found in the original networks up to the
desired edge density. Note that `foce_den` is ignored if `span_tree` is `True`.
Returns
-------
Dictionary holding computed quantities. The fields of the dictionary depend upon
the values of the optional keyword arguments `userdens` and `span_tree`.
res : dict
Dictionary with fields
th_nws : NumPy 3darray
Sparse networks. Format is the same as for `nws`
(Not returned if `userdens` is `None` and `span_tree = True`).
den_values : NumPy 1darray
Density values of the networks stored in `th_nws`, such that `den_values[i]`
is the edge density of the graph `th_nws[:,:,i]`
(not returned if `userdens` is `None` and `span_tree = True`).
th_mnw : NumPy 2darray
Mean network averaged across all sparse networks `th_nws`
(not returned if `userdens` is `None` and `span_tree = True`).
mnw_percval: float
Percentage value used to compute `th_mnw` (see documentation of `get_meannw` for
details, not returned if `userdens` is `None` and `span_tree = True`).
tau_levels : NumPy 1darray
Cutoff values used in the relative thresholding strategy to compute
`th_nws`, i.e., `tau_levels[i]` is the threshold that generated
network `th_nws[:,:,i]` (only returned if `span_tree = False`).
nws_forest : NumPy 3darray
Maximum spanning trees calculated for all input networks
(only returned if `span_tree = True`).
mean_tree : NumPy 2darray
Mean spanning tree averaged across all spanning trees stored in
`nws_forest` (only returned if `span_tree = True`).
mtree_percval : float
Percentage value used to compute `mean_tree` (see documentation of `get_meannw` for
details, only returned if `span_tree = True`).
Notes
-----
This routine uses either a relative thresholding strategy or a maximum spanning tree
approach to decrease the density of a given set of input networks.
During relative thresholding (`span_tree = False`) edges are discarded based on their value relative to the
maximum edge weight found across all networks beginning with the weakest links. By default,
the thresholding algorithm uses the lowest common connection density across all input networks
before a node is disconnected as target edge density. That means, if networks `A`, `B` and `C`
can be thresholded down to 40%, 50% and 60% density, respectively, without disconnecting any
nodes, then the lowest common density for thresholding `A`, `B` and `C` together is 60%.
In this case the raw network `A` already has a density of 60% or lower, which is thus excluded
from thresholding and the original network is copied into `th_nws`. If a density level
is provided by the user, then the code tries to use it unless it violates connectedness
of all thresholded networks - in this case the lowest common density of all networks is used,
unless `force_den = True` which causes the code to employ the user-provided density level
for thresholding, disconnecting nodes from the networks in the process.
The maximum spanning tree approach (`span_tree = True`) can be interpreted as the inverse of relative
thresholding. Instead of chipping away weak edges in the input networks until a target density
is met (or nodes disconnect), a minimal backbone of the network is calculated and then
populated with the strongest connections found in the original network until a desired
edge density level is reached. The backbone of the network is calculated by computing the graph's maximum
spanning tree, that connects all nodes with the minimum number of maximum-weight edges.
Note, that unless each edge has a distinct unique weight value a graph has numerous different
maximum spanning trees. Thus, the spanning trees computed by this routine are usually *not* unique,
and consequently the thresholded networks may not be unique either (particularly for low
density levels, for which the computed populated networks are very similar to the underlying spanning trees).
Thus, in contrast to the more common relative thresholding strategy, this bottom-up approach
allows to reduce a given network's density to an almost arbitrary level
(>= density of the maximum spanning tree) without disconnecting nodes. However, unlike relative
thresholding, the computed sparse networks are not necessarily unique and strongly depend
on the intial maximum spanning tree. Note that if `userdens` is `None`, only maximum spanning
trees will be computed.
The code below relies on the routine `get_meannw` in this module to compute the group-averaged
network. Futher, maximum spanning trees are calculated using `backbone_wu.m` from the
Brain Connectivity Toolbox (BCT) for MATLAB via Octave. Thus, it requires Octave to be installed
with the BCT in its search path. Further, `oct2py` is needed to launch an Octave instance
from within Python.
See also
--------
get_meannw : Helper function to compute group-averaged networks
backbone_wu : in the Brain Connectivity Toolbox (BCT) for MATLAB, currently available
`here <https://sites.google.com/site/bctnet/>`_
"""
# Sanity checks
arrcheck(nws,'tensor','nws')
if userdens is not None:
scalarcheck(userdens,'userdens',kind='int',bounds=[0,100])
scalarcheck(percval,'percval',bounds=[0,1])
if not isinstance(force_den,bool):
raise TypeError("The optional argument `force_den` has to be Boolean!")
if not isinstance(span_tree,bool):
raise TypeError("The optional argument `span_tree` has to be Boolean!")
if force_den and span_tree:
print "\nWARNING: The flag `foce_den` has no effect if `span_tree == True`!"
# Try to import `octave` from `oct2py`
if span_tree:
try:
from oct2py import octave
except:
errmsg = "Could not import octave from oct2py! "+\
"To compute the maximum spanning tree octave must be installed and in the search path. "+\
"Furthermore, the Brain Connectivity Toolbox (BCT) for MATLAB must be installed "+\
"in the octave search path. "
raise ImportError(errmsg)
# Get dimension of per-subject networks
N = nws.shape[0]
numsubs = nws.shape[-1]
# Zero diagonals and check for symmetry
for i in xrange(numsubs):
np.fill_diagonal(nws[:,:,i],0)
if issym(nws[:,:,i]) == False:
raise ValueError("Matrix "+str(i)+" is not symmetric!")
# Get max. and min. weights (min weight should be >= 0 otherwise the stuff below makes no sense...)
maxw = nws.max()
if nws.min() < 0:
raise ValueError('Only non-negative weights supported!')
# Allocate vector for original densities
raw_den = np.zeros((numsubs,))
# Compute densities of raw networks
for i in xrange(numsubs):
raw_den[i] = density_und(nws[:,:,i])
# Compute min/max density in raw data
min_raw = int(np.floor(1e2*raw_den.min()))
max_raw = int(np.ceil(1e2*raw_den.max()))
# Break if a nw has density zero or if max. density is below desired dens.
if min_raw == 0:
raise ValueError('Network '+str(raw_den.argmin())+' has density 0%!')
if userdens >= max_raw:
print "All networks have density lower than desired density "+str(userdens)+"%"
th_mnw,mnw_percval = get_meannw(nws,percval)
res_dict = {'th_nws':nws, 'den_values': raw_den, \
'th_mnw': th_mnw, 'mnw_percval': mnw_percval}
# The structure of `backbone_wu.m` requires *exact* symmetry...
if span_tree:
nws_forest = np.zeros(nws.shape)
for i in xrange(numsubs):
mnw = nws[:,:,i].squeeze()
mnw = np.triu(mnw,1)
nws_forest[:,:,i] = octave.backbone_wu(mnw + mnw.T,2)
mean_tree, mtree_percval = get_meannw(nws_forest,percval)
res_dict['nws_forest'] = nws_forest
res_dict['mean_tree'] = mean_tree
res_dict['mtree_percval'] = mtree_percval
else:
res_dict['tau_levels'] = None
return res_dict
# Inform user about minimal/maximal density in raw data
print "\nRaw data has following density values: \n"
print "\tMinimal density: "+str(min_raw)+"%"
print "\tMaximal density: "+str(max_raw)+"%"
# Allocate space for output (needed for both regular thresholding and de-foresting)
th_nws = np.zeros(nws.shape)
den_values = np.zeros((numsubs,))
th_mnw = np.zeros((N,N))
# Maximum spanning tree shenanigans
if span_tree:
# Allocate space for the spanning trees
nws_forest = np.zeros(nws.shape)
# If no target density was provided, just compute trees and get out of here
if userdens is None:
print "\nCalculating maximum spanning trees..."
for i in xrange(numsubs):
mnw = nws[:,:,i].squeeze()
mnw = np.triu(mnw,1)
nws_forest[:,:,i] = octave.backbone_wu(mnw + mnw.T,2)
mean_tree,mtree_percval = get_meannw(nws_forest,percval)
return {'nws_forest': nws_forest, 'mean_tree': mean_tree, 'mtree_percval': mtree_percval}
else:
# The edge density `d` of an undirected network is given by
# (1) `d = 2*K/(N**2 - N)`,
# where `K` denotes the number of edges in the network. Thus, `K` can be approximated by
# (2) `N*avdg/2`,
# with `avdg` denoting the average nodal degree in the graph (divide by two
# to not count links twice (we have undirected links i <-> j, not i -> j and j <- i).
# Thus, substituting (2) for `K` in (1) and re-arranging terms yields
# `avdg = d*(N - 1)`. Thus, for a user-provided density value, we can compute
# the associated average degree of the wanted target network as
# avdg = np.round(userdens/100*(N**2 - N)/N)
avdg = np.round(userdens/100*(N - 1))
print "\nReducing network densities to "+str(userdens)+"% by inversely populating maximum spanning trees..."
# Use this average degree value to cut down input networks to desired density
for i in xrange(numsubs):
mnw = nws[:,:,i].squeeze()
mnw = np.triu(mnw,1)
raw_dper = int(np.round(1e2*raw_den[i]))
if raw_dper <= userdens:
print "Density of raw network #"+str(i+1)+" is "+str(raw_dper)+"%"+\
" which is already lower than thresholding density of "+str(userdens)+"%"
print "Returning original unthresholded network"
th_nws[:,:,i] = nws[:,:,i].copy()
den_values[i] = raw_den[i]
nws_forest[:,:,i] = octave.backbone_wu(mnw + mnw.T.squeeze(),2)
else:
nws_forest[:,:,i], th_nws[:,:,i] = octave.backbone_wu(mnw + mnw.T, avdg, nout=2)
den_values[i] = density_und(th_nws[:,:,i])
mean_tree,mtree_percval = get_meannw(nws_forest,percval)
# Populate results dictionary with method-specific quantities
res_dict = {'nws_forest': nws_forest, 'mean_tree': mean_tree, 'mtree_percval': mtree_percval}
# Here the good ol' relative weight thresholding
else:
# Allocate space for thresholds and thresholding stepsize
tau_levels = np.zeros((numsubs,))
dt = 1e-3
# Compute minimal admissible density per network
for i in xrange(numsubs):
mnw = nws[:,:,i]
tau = mnw.max(axis=0).min()
mnw = mnw*(mnw >= tau)
th_nws[:,:,i] = mnw.copy()
den_values[i] = density_und(mnw)
tau_levels[i] = tau - dt
# Compute minimal density before fragmentation across all subjects
densities = np.round(1e2*den_values)
print "\nMinimal admissible densities of per-subject networks are as follows: "