-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathpublications.html
More file actions
1099 lines (941 loc) · 80.8 KB
/
publications.html
File metadata and controls
1099 lines (941 loc) · 80.8 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
<!DOCTYPE html>
<!-- This site was created in Webflow. http://www.webflow.com -->
<!-- Last Published: Fri Mar 09 2018 17:05:10 GMT+0000 (UTC) -->
<html data-wf-page="59a4444cf0b9de0001da1f74" data-wf-site="59a4444cf0b9de0001da1f73" lang="nl">
<head>
<meta charset="utf-8">
<title>Coen de Vente • Publications</title>
<meta property="og:title" content="Coen de Vente • Publications" />
<meta content="width=device-width, initial-scale=1" name="viewport">
<meta content="Webflow" name="generator">
<link href="css/normalize.css?1747294411" rel="stylesheet" type="text/css">
<link href="css/webflow.css?1747294411" rel="stylesheet" type="text/css">
<link href="css/websify.webflow.css?1747294411" rel="stylesheet" type="text/css">
<script src="https://ajax.googleapis.com/ajax/libs/webfont/1.4.7/webfont.js" type="text/javascript"></script>
<script type="text/javascript">
WebFont.load({
google: {
families: ["Roboto:100,100italic,300,300italic,regular,italic,500,500italic,700,700italic,900,900italic", "Raleway:100,100italic,200,200italic,300,300italic,regular,italic,500,500italic,600,600italic,700,700italic,800,800italic,900,900italic"]
}
});
</script>
<!-- [if lt IE 9]><script src="https://cdnjs.cloudflare.com/ajax/libs/html5shiv/3.7.3/html5shiv.min.js" type="text/javascript"></script><![endif] -->
<script type="text/javascript">
! function(o, c) {
var n = c.documentElement,
t = " w-mod-";
n.className += t + "js", ("ontouchstart" in o || o.DocumentTouch && c instanceof DocumentTouch) && (n.className += t + "touch")
}(window, document);
</script>
<link href="images/favicon.png" rel="shortcut icon" type="image/x-icon">
<link href="images/favicon.png" rel="apple-touch-icon">
<!-- Global site tag (gtag.js) - Google Analytics -->
<script async src="https://www.googletagmanager.com/gtag/js?id=G-K0837QE6PJ"></script>
<script>
window.dataLayer = window.dataLayer || [];
function gtag(){dataLayer.push(arguments);}
gtag('js', new Date());
gtag('config', 'G-K0837QE6PJ');
</script>
<script src="https://cdn.jsdelivr.net/npm/lazyload@2.0.0-beta.1/lazyload.js"></script>
<meta name="description" content="Hi! I'm Coen, researcher who is enthusiastic about machine learning, computer vision and health.">
<meta property="og:description" content="Hi! I'm Coen, researcher who is enthusiastic about machine learning, computer vision and health.">
</head>
<body class="body">
<div data-collapse="medium" data-animation="default" data-duration="400" data-ix="changecolorscroll" class="navbar colored w-nav">
<div class="container w-container"><a href="index.html" class="brand w-nav-brand">
<div>
<a href="index.html" class="navlink w-nav-link no-text-transform text-logo">Coen de Vente</a>
</div>
<nav role="navigation" class="nav-menu colored w-nav-menu">
<a href="index.html" class="navlink w-nav-link">Home</a>
<a href="projects.html" class="navlink w-nav-link">Posts</a>
<a href="publications.html" class="navlink w-nav-link">Publications</a>
<!-- <a href="cv.html" class="navlink over w-nav-link">CV</a> -->
<a href="about.html" class="navlink over w-nav-link">About</a>
<a href="contact.html" class="navlink w-nav-link">Contact</a>
</nav>
<div class="menu-button w-nav-button">
<div class="colored hamburgericon w-icon-nav-menu"></div>
</div>
</div>
</div><!-- <div class="bgtarget headshort"></div> -->
<div class="section-6">
<div class="w-row">
<div class="w-col" style="width: 1400px; max-width: 100vw;">
<h1 class="heading-8">Publications</h1>
</div>
</div>
</div>
<div class="section-7 section-move-up">
<div class="w-dyn-list">
<div class="collection-list colophome listforshowall w-dyn-items w-row">
<div class="div-block-27 sameheight0source">
<div class="row-5 w-row">
<div data-ix="flowinslow" class="column-9 w-col w-col-8">
<h2 class="pub-head">International journal articles</h2>
<div class="publication-card">
<p>J. Hensman, Y. El Allali, H. Almushattat, <span class='highlight-me'>C. de Vente</span>, C.I. Sánchez and C.J. Boon. "Deep learning model for detecting cystoid fluid collections on optical coherence tomography in X-linked retinoschisis patients", <i>Acta Ophthalmologica</i>, 2025.</p><div class="publication-button-group"><a data-ix="goupbox" id="publication-modal-hensman2025deep-button" class="knop footerknop movewithmouse w-button publication-button">Cite</a></div>
</div>
<div class="publication-card">
<p>R. Schwartz, A.N. Warwick, A.P. Khawaja, R. Luben, H. Khalid, S. Phatak, M. Jhingan, <span class='highlight-me'>C. de Vente</span>, P. Valmaggia, S. Liakopoulos and others. "Genetic Distinctions Between Reticular Pseudodrusen and Drusen: A Genome-Wide Association Study", <i>American Journal of Ophthalmology</i>, 2025.</p><div class="publication-button-group"><a data-ix="goupbox" id="publication-modal-schwartz2025genetic-button" class="knop footerknop movewithmouse w-button publication-button">Cite</a></div>
</div>
<div class="publication-card">
<p><span class='highlight-me'>C. de Vente</span>, B. van Ginneken, C.B. Hoyng, C.C.W. Klaver and C.I. Sánchez. "Uncertainty-aware multiple-instance learning for reliable classification: Application to optical coherence tomography", <i>Medical Image Analysis</i>, 2024;97:103259.</p><div class="publication-button-group"><a data-ix="goupbox" id="publication-modal-vent24-button" class="knop footerknop movewithmouse w-button publication-button">Cite</a> <a data-ix="goupbox" target="_blank" class="knop footerknop movewithmouse w-button publication-button" href="https://doi.org/https://doi.org/10.1016/j.media.2024.103259">DOI</a> <a data-ix="goupbox" target="_blank" class="knop footerknop movewithmouse w-button publication-button" href="https://www.sciencedirect.com/science/article/pii/S1361841524001841">URL</a></div>
</div>
<div class="publication-card">
<p><span class='highlight-me'>C. de Vente</span>, P. Valmaggia, C.B. Hoyng, F.G. Holz, M.M. Islam, C.C. Klaver, C.J. Boon, S. Schmitz-Valckenberg, A. Tufail, M. Saßmannshausen and others. "Generalizable Deep Learning for the Detection of Incomplete and Complete Retinal Pigment Epithelium and Outer Retinal Atrophy: A MACUSTAR Report", <i>Translational Vision Science & Technology</i>, 2024;13(9):11-11.</p><div class="publication-button-group"><a data-ix="goupbox" id="publication-modal-de2024generalizable-button" class="knop footerknop movewithmouse w-button publication-button">Cite</a></div>
</div>
<div class="publication-card">
<p><span class='highlight-me'>C. de Vente</span>, K.A. Vermeer, N. Jaccard, H. Wang, H. Sun, F. Khader, D. Truhn, T. Aimyshev, Y. Zhanibekuly, T. Le, A. Galdran, M.Á. González Ballester, G. Carneiro, D.R. G, H.P. S, D. Puthussery, H. Liu, Z. Yang, S. Kondo, S. Kasai, E. Wang, A. Durvasula, J. Heras, M.Á. Zapata, T. Araújo, G. Aresta, H. Bogunović, M. Arikan, Y.C. Lee, H.B. Cho, Y.H. Choi, A. Qayyum, I. Razzak, B. van Ginneken, H.G. Lemij and C.I. Sánchez. "AIROGS: Artificial Intelligence for RObust Glaucoma Screening Challenge", <i>IEEE Transactions on Medical Imaging</i>, 2023;43(1):542-557.</p><div class="publication-button-group"><a data-ix="goupbox" id="publication-modal-vent23a-button" class="knop footerknop movewithmouse w-button publication-button">Cite</a> <a data-ix="goupbox" target="_blank" class="knop footerknop movewithmouse w-button publication-button" href="https://doi.org/10.1109/TMI.2023.3313786">DOI</a> <a data-ix="goupbox" target="_blank" class="knop footerknop movewithmouse w-button publication-button" href="http://www.ncbi.nlm.nih.gov/pubmed/37713220/">PMID</a></div>
</div>
<div class="publication-card">
<p>H.G. Lemij, <span class='highlight-me'>C. de Vente</span>, C.I. Sánchez and K.A. Vermeer. "Characteristics of a large, labeled dataset for the training of artificial intelligence for glaucoma screening with fundus photographs", <i>Ophthalmology Science</i>, 2023;100300.</p><div class="publication-button-group"><a data-ix="goupbox" id="publication-modal-lemi23-button" class="knop footerknop movewithmouse w-button publication-button">Cite</a> <a data-ix="goupbox" target="_blank" class="knop footerknop movewithmouse w-button publication-button" href="https://doi.org/10.1016/j.xops.2023.100300">DOI</a></div>
</div>
<div class="publication-card">
<p><span class='highlight-me'>C. de Vente</span>, L.H. Boulogne, K. Vaidhya Venkadesh, C. Sital, N. Lessmann, C. Jacobs, C.I. Sánchez and B. van Ginneken. "Automated COVID-19 Grading with Convolutional Neural Networks in Computed Tomography Scans: A Systematic Comparison", <i>IEEE Transactions on Artificial Intelligence</i>, 2022;3(2):129-138.</p><div class="publication-button-group"><a data-ix="goupbox" id="publication-modal-vent21-button" class="knop footerknop movewithmouse w-button publication-button">Cite</a> <a data-ix="goupbox" target="_blank" class="knop footerknop movewithmouse w-button publication-button" href="https://doi.org/10.1109/TAI.2021.3115093">DOI</a></div>
</div>
<div class="publication-card">
<p>R. Schwartz, H. Khalid, S. Liakopoulos, Y. Ouyang, <span class='highlight-me'>C. de Vente</span>, C. González-Gonzalo, A.Y. Lee, R. Guymer, E.Y. Chew, C. Egan and others. "A Deep Learning Framework for the Detection and Quantification of Reticular Pseudodrusen and Drusen on Optical Coherence Tomography", <i>Translational Vision Science & Technology</i>, 2022;11(12):3-3.</p><div class="publication-button-group"><a data-ix="goupbox" id="publication-modal-schw22-button" class="knop footerknop movewithmouse w-button publication-button">Cite</a> <a data-ix="goupbox" target="_blank" class="knop footerknop movewithmouse w-button publication-button" href="https://doi.org/10.1167/tvst.11.12.3">DOI</a></div>
</div>
<div class="publication-card">
<p>N. Lessmann, C.I. Sánchez, L. Beenen, L.H. Boulogne, M. Brink, E. Calli, J. Charbonnier, T. Dofferhoff, W.M. van Everdingen, P.K. Gerke, B. Geurts, H.A. Gietema, M. Groeneveld, L. van Harten, N. Hendrix, W. Hendrix, H.J. Huisman, I. Isgum, C. Jacobs, R. Kluge, M. Kok, J. Krdzalic, B. Lassen-Schmidt, K. van Leeuwen, J. Meakin, M. Overkamp, T. van Rees Vellinga, E.M. van Rikxoort, R. Samperna, C. Schaefer-Prokop, S. Schalekamp, E.T. Scholten, C. Sital, L. Stöger, J. Teuwen, K. Vaidhya Venkadesh, <span class='highlight-me'>C. de Vente</span>, M. Vermaat, W. Xie, B. de Wilde, M. Prokop and B. van Ginneken. "Automated Assessment of COVID-19 Reporting and Data System and Chest CT Severity Scores in Patients Suspected of Having COVID-19 Using Artificial Intelligence", <i>Radiology</i>, 2021;298(1):E18-E28.</p><div class="publication-button-group"><a data-ix="goupbox" id="publication-modal-less20-button" class="knop footerknop movewithmouse w-button publication-button">Cite</a> <a data-ix="goupbox" target="_blank" class="knop footerknop movewithmouse w-button publication-button" href="https://doi.org/10.1148/radiol.2020202439">DOI</a> <a data-ix="goupbox" target="_blank" class="knop footerknop movewithmouse w-button publication-button" href="http://www.ncbi.nlm.nih.gov/pubmed/32729810/">PMID</a></div>
</div>
<div class="publication-card">
<p>Z. Xiong, Q. Xia, Z. Hu, N. Huang, C. Bian, Y. Zheng, S. Vesal, N. Ravikumar, A. Maier, X. Yang, P. Heng, D. Ni, C. Li, Q. Tong, W. Si, E. Puybareau, Y. Khoudli, T. Graud, C. Chen, W. Bai, D. Rueckert, L. Xu, X. Zhuang, X. Luo, S. Jia, M. Sermesant, Y. Liu, K. Wang, D. Borra, A. Masci, C. Corsi, <span class='highlight-me'>C. de Vente</span>, M. Veta, R. Karim, C. Jayachandran Preetha, S. Engelhardt, M. Qiao, Y. Wang, Q. Tao, M. Nuñez-Garcia, O. Camara, N. Savioli, P. Lamata and J. Zhao. "A global benchmark of algorithms for segmenting the left atrium from late gadolinium-enhanced cardiac magnetic resonance imaging", <i>Medical Image Analysis</i>, 2021;67:101832.</p><div class="publication-button-group"><a data-ix="goupbox" id="publication-modal-xion21-button" class="knop footerknop movewithmouse w-button publication-button">Cite</a> <a data-ix="goupbox" target="_blank" class="knop footerknop movewithmouse w-button publication-button" href="http://www.ncbi.nlm.nih.gov/pubmed/33166776/">PMID</a></div>
</div>
<div class="publication-card">
<p><span class='highlight-me'>C. de Vente</span>, P. Vos, M. Hosseinzadeh, J. Pluim and M. Veta. "Deep learning regression for prostate cancer detection and grading in bi-parametric MRI", <i>IEEE Transactions on Biomedical Engineering</i>, 2020;68(2):374-383.</p><div class="publication-button-group"><a data-ix="goupbox" id="publication-modal-vent20a-button" class="knop footerknop movewithmouse w-button publication-button">Cite</a> <a data-ix="goupbox" target="_blank" class="knop footerknop movewithmouse w-button publication-button" href="https://doi.org/10.1109/TBME.2020.2993528">DOI</a> <a data-ix="goupbox" target="_blank" class="knop footerknop movewithmouse w-button publication-button" href="http://www.ncbi.nlm.nih.gov/pubmed/32396068/">PMID</a></div>
</div>
<h2 class="pub-head">Preprints</h2>
<div class="publication-card">
<p>R. Schwartz, A.N. Warwick, A.P. Khawaja, R. Luben, H. Khalid, S. Phatak, M. Jhingan, <span class='highlight-me'>C. de Vente</span>, P. Valmaggia, S. Liakopoulos and others. "Genetic Distinctions Between Reticular Pseudodrusen and Drusen: Insights from a Genome-Wide Association Study", <i>medRxiv:10.1101/2024.09.18.24313862v1</i>, 2024;2024-09.</p><div class="publication-button-group"><a data-ix="goupbox" id="publication-modal-schwartz2024genetic-button" class="knop footerknop movewithmouse w-button publication-button">Cite</a> <a data-ix="goupbox" target="_blank" class="knop footerknop movewithmouse w-button publication-button" href="https://medrxiv.org/content/10.1101/2024.09.18.24313862v1/">medRxiv</a></div>
</div>
<h2 class="pub-head">Abstracts</h2>
<div class="publication-card">
<p>R. Schwartz, A. Olvera-Barrios, A.N. Warwick, H. Khalid, S. Phatak, M. Jhingan, C. De Vente, P. Valmaggia, C. Sanchez, C.A. Egan and others. "Increasing Stroke Risk Correlated with Higher Reticular Pseudodrusen Counts: Evidence from the UK Biobank", in: <i>Association for Research in Vision and Ophthalmology</i>, 2024.</p><div class="publication-button-group"><a data-ix="goupbox" id="publication-modal-schwartz2024increasing-button" class="knop footerknop movewithmouse w-button publication-button">Cite</a></div>
</div>
<div class="publication-card">
<p><span class='highlight-me'>C. de Vente</span>, A. Tufail, S. Schmitz-Valckenberg, M. Saßmannshausen, C. Hoyng and C.I. Sánchez on behalf of the MACUSTAR consortium. "OCT Super-Resolution for Data Standardization using AI: A MACUSTAR report", in: <i>Association for Research in Vision and Ophthalmology</i>, 2023.</p><div class="publication-button-group"><a data-ix="goupbox" id="publication-modal-vent23c-button" class="knop footerknop movewithmouse w-button publication-button">Cite</a> <a data-ix="goupbox" target="_blank" class="knop footerknop movewithmouse w-button publication-button" href="https://iovs.arvojournals.org/article.aspx?articleid=2789903">URL</a></div>
</div>
<div class="publication-card">
<p>H.G. Lemij, <span class='highlight-me'>C. de Vente</span>, C.I. Sánchez, J. Cuadros, N. Jaccard and K. Vermeer. "Glaucomatous features in fundus photographs of eyes with 'Referable glaucoma' of a large population based labeled data set for training an Artificial Intelligence (AI) algorithm for glaucoma screening", in: <i>Association for Research in Vision and Ophthalmology</i>, 2022.</p><div class="publication-button-group"><a data-ix="goupbox" id="publication-modal-lemi22-button" class="knop footerknop movewithmouse w-button publication-button">Cite</a> <a data-ix="goupbox" target="_blank" class="knop footerknop movewithmouse w-button publication-button" href="https://iovs.arvojournals.org/article.aspx?articleid=2782322">URL</a></div>
</div>
<div class="publication-card">
<p>R. Schwartz, H. Khalid, S. Liakopoulos, Y. Ouyang, <span class='highlight-me'>C. de Vente</span>, C.G. Gonzalo, A.Y. Lee, C.A. Egan, C.I. Sánchez and A. Tufail. "A deep learning pipeline for the detection and quantification of drusen and reticular pseudodrusen on optical coherence tomography", in: <i>Association for Research in Vision and Ophthalmology</i>, 2022.</p><div class="publication-button-group"><a data-ix="goupbox" id="publication-modal-schw22a-button" class="knop footerknop movewithmouse w-button publication-button">Cite</a> <a data-ix="goupbox" target="_blank" class="knop footerknop movewithmouse w-button publication-button" href="https://iovs.arvojournals.org/article.aspx?articleid=2781366">URL</a></div>
</div>
<div class="publication-card">
<p><span class='highlight-me'>C. de Vente</span>, K. Vermeer, N. Jaccard, H.G. Lemij and C.I. Sánchez. "AIROGS: Artificial Intelligence for RObust Glaucoma Screening Challenge", in: <i>Imaging and Morphometry Association for Glaucoma in Europe</i>, 2022.</p><div class="publication-button-group"><a data-ix="goupbox" id="publication-modal-vent22-button" class="knop footerknop movewithmouse w-button publication-button">Cite</a> <a data-ix="goupbox" target="_blank" class="knop footerknop movewithmouse w-button publication-button" href="https://drive.google.com/file/d/11DfeyNA8I4UVZGkhn_NVIOIGbcEesfCw/view?usp=sharing">URL</a></div>
</div>
<div class="publication-card">
<p><span class='highlight-me'>C. de Vente</span>, C. González-Gonzalo, E.F. Thee, M. van Grinsven, C.C. Klaver and C.I. Sánchez. "Making AI Transferable Across OCT Scanners from Different Vendors", in: <i>Association for Research in Vision and Ophthalmology</i>, 2021.</p><div class="publication-button-group"><a data-ix="goupbox" id="publication-modal-vent21a-button" class="knop footerknop movewithmouse w-button publication-button">Cite</a> <a data-ix="goupbox" target="_blank" class="knop footerknop movewithmouse w-button publication-button" href="https://iovs.arvojournals.org/article.aspx?articleid=2775505">URL</a></div>
</div>
<div class="publication-card">
<p>C. González-Gonzalo, E.F. Thee, B. Liefers, <span class='highlight-me'>C. de Vente</span>, C.C. Klaver and C.I. Sánchez. "Hierarchical curriculum learning for robust automated detection of low-prevalence retinal disease features: application to reticular pseudodrusen", in: <i>Association for Research in Vision and Ophthalmology</i>, 2021.</p><div class="publication-button-group"><a data-ix="goupbox" id="publication-modal-gonz21-button" class="knop footerknop movewithmouse w-button publication-button">Cite</a> <a data-ix="goupbox" target="_blank" class="knop footerknop movewithmouse w-button publication-button" href="https://iovs.arvojournals.org/article.aspx?articleid=2773295">URL</a></div>
</div>
<div class="publication-card">
<p><span class='highlight-me'>C. de Vente</span>, M. van Grinsven, S. De Zanet, A. Mosinska, R. Sznitman, C. Klaver and C.I. Sánchez. "Estimating Uncertainty of Deep Neural Networks for Age-related Macular Degeneration Grading using Optical Coherence Tomography", in: <i>Association for Research in Vision and Ophthalmology</i>, 2020.</p><div class="publication-button-group"><a data-ix="goupbox" id="publication-modal-vent20-button" class="knop footerknop movewithmouse w-button publication-button">Cite</a> <a data-ix="goupbox" target="_blank" class="knop footerknop movewithmouse w-button publication-button" href="https://iovs.arvojournals.org/article.aspx?articleid=2769262">URL</a></div>
</div>
<div class="publication-card">
<p>A. Ardu, B. Liefers, <span class='highlight-me'>C. de Vente</span>, C. González-Gonzalo, C. Klaver and C.I. Sánchez. "Artificial Intelligence for the Classification and Quantification of Reticular Pseudodrusen in Multimodal Retinal Images", in: <i>European Society of Retina Specialists</i>, 2020.</p><div class="publication-button-group"><a data-ix="goupbox" id="publication-modal-ardu20-button" class="knop footerknop movewithmouse w-button publication-button">Cite</a></div>
</div>
<h2 class="pub-head">Papers in conference proceedings</h2>
<div class="publication-card">
<p>M.M. Islam, <span class='highlight-me'>C. de Vente</span>, B. Liefers, C. Klaver, E.J. Bekkers and C.I. Sánchez. "Uncertainty-aware retinal layer segmentation in OCT through probabilistic signed distance functions", in: <i>Medical Imaging with Deep Learning</i>, 2024.</p><div class="publication-button-group"><a data-ix="goupbox" id="publication-modal-isla24-button" class="knop footerknop movewithmouse w-button publication-button">Cite</a></div>
</div>
<div class="publication-card">
<p><span class='highlight-me'>C. de Vente</span>, M. Veta, O. Razeghi, S. Niederer, J. Pluim, K. Rhode and R. Karim. "Convolutional neural networks for segmentation of the left atrium from gadolinium-enhancement MRI images", in: <i>International Workshop on Statistical Atlases and Computational Models of the Heart</i>, 2018, pages 348-356.</p><div class="publication-button-group"><a data-ix="goupbox" id="publication-modal-vent18-button" class="knop footerknop movewithmouse w-button publication-button">Cite</a></div>
</div>
</div>
<div data-ix="flowinslow" class="column-10 w-clearfix w-col w-col-4"></div>
</div>
</div>
</div>
</div>
</div>
<div id="publication-modal-hensman2025deep" class="modal">
<div class="modal-content">
<span class="close" id="close-publication-modal-hensman2025deep">×</span>
<p>J. Hensman, Y. El Allali, H. Almushattat, C. de Vente, C.I. Sánchez and C.J. Boon. "Deep learning model for detecting cystoid fluid collections on optical coherence tomography in X-linked retinoschisis patients", <i>Acta Ophthalmologica</i>, 2025.</p>
<pre id="publication-modal-hensman2025deep-pre">@article{hensman2025deep,
title={Deep learning model for detecting cystoid fluid collections on optical coherence tomography in X-linked retinoschisis patients},
author={Hensman, Jonathan and El Allali, Yasmine and Almushattat, Hind and de Vente, Coen and S{\'a}nchez, Clara I and Boon, Camiel JF},
journal={Acta Ophthalmologica},
year={2025},
publisher={Wiley Online Library}
}</pre>
<div class="publication-button-group">
<a data-ix="goupbox" onclick="copy_text('publication-modal-hensman2025deep')"
class="knop footerknop movewithmouse w-button publication-button publication-button-black">
<img src="images/copy.png" class="copy-icon"> Copy
</a>
</div>
</div>
</div>
<div id="publication-modal-schwartz2025genetic" class="modal">
<div class="modal-content">
<span class="close" id="close-publication-modal-schwartz2025genetic">×</span>
<p>R. Schwartz, A.N. Warwick, A.P. Khawaja, R. Luben, H. Khalid, S. Phatak, M. Jhingan, C. de Vente, P. Valmaggia, S. Liakopoulos and others. "Genetic Distinctions Between Reticular Pseudodrusen and Drusen: A Genome-Wide Association Study", <i>American Journal of Ophthalmology</i>, 2025.</p>
<pre id="publication-modal-schwartz2025genetic-pre">@article{schwartz2025genetic,
title={Genetic Distinctions Between Reticular Pseudodrusen and Drusen: A Genome-Wide Association Study},
author={Schwartz, Roy and Warwick, Alasdair N and Khawaja, Anthony P and Luben, Robert and Khalid, Hagar and Phatak, Sumita and Jhingan, Mahima and de Vente, Coen and Valmaggia, Philippe and Liakopoulos, Sandra and others},
journal={American Journal of Ophthalmology},
year={2025},
publisher={Elsevier}
}</pre>
<div class="publication-button-group">
<a data-ix="goupbox" onclick="copy_text('publication-modal-schwartz2025genetic')"
class="knop footerknop movewithmouse w-button publication-button publication-button-black">
<img src="images/copy.png" class="copy-icon"> Copy
</a>
</div>
</div>
</div>
<div id="publication-modal-vent24" class="modal">
<div class="modal-content">
<span class="close" id="close-publication-modal-vent24">×</span>
<p>C. de Vente, B. van Ginneken, C.B. Hoyng, C.C.W. Klaver and C.I. Sánchez. "Uncertainty-aware multiple-instance learning for reliable classification: Application to optical coherence tomography", <i>Medical Image Analysis</i>, 2024;97:103259.</p>
<pre id="publication-modal-vent24-pre">@article{Vent24,
title = {Uncertainty-aware multiple-instance learning for reliable classification: Application to optical coherence tomography},
journal ={Medical Image Analysis},
volume = {97},
pages = {103259},
year = {2024},
issn = {1361-8415},
doi = {https://doi.org/10.1016/j.media.2024.103259},
url = {https://www.sciencedirect.com/science/article/pii/S1361841524001841},
author = {de Vente, Coen and Bram {van Ginneken} and Carel B. Hoyng and Caroline C.W. Klaver and Clara I. Sánchez},
keywords = {Out-of-distribution detection, Generalizability, Interpretability, Optical coherence tomography},
scholar_id = {2083973390038249415}
}</pre>
<div class="publication-button-group">
<a data-ix="goupbox" onclick="copy_text('publication-modal-vent24')"
class="knop footerknop movewithmouse w-button publication-button publication-button-black">
<img src="images/copy.png" class="copy-icon"> Copy
</a>
</div>
</div>
</div>
<div id="publication-modal-de2024generalizable" class="modal">
<div class="modal-content">
<span class="close" id="close-publication-modal-de2024generalizable">×</span>
<p>C. de Vente, P. Valmaggia, C.B. Hoyng, F.G. Holz, M.M. Islam, C.C. Klaver, C.J. Boon, S. Schmitz-Valckenberg, A. Tufail, M. Saßmannshausen and others. "Generalizable Deep Learning for the Detection of Incomplete and Complete Retinal Pigment Epithelium and Outer Retinal Atrophy: A MACUSTAR Report", <i>Translational Vision Science & Technology</i>, 2024;13(9):11-11.</p>
<pre id="publication-modal-de2024generalizable-pre">@article{de2024generalizable,
title={Generalizable Deep Learning for the Detection of Incomplete and Complete Retinal Pigment Epithelium and Outer Retinal Atrophy: A MACUSTAR Report},
author={de Vente, Coen and Valmaggia, Philippe and Hoyng, Carel B and Holz, Frank G and Islam, Mohammad M and Klaver, Caroline CW and Boon, Camiel JF and Schmitz-Valckenberg, Steffen and Tufail, Adnan and Sa{\ss}mannshausen, Marlene and others},
journal={Translational Vision Science & Technology},
volume={13},
number={9},
pages={11--11},
year={2024},
publisher={The Association for Research in Vision and Ophthalmology},
scholar_id={11005501908408658331}
}</pre>
<div class="publication-button-group">
<a data-ix="goupbox" onclick="copy_text('publication-modal-de2024generalizable')"
class="knop footerknop movewithmouse w-button publication-button publication-button-black">
<img src="images/copy.png" class="copy-icon"> Copy
</a>
</div>
</div>
</div>
<div id="publication-modal-vent23a" class="modal">
<div class="modal-content">
<span class="close" id="close-publication-modal-vent23a">×</span>
<p>C. de Vente, K.A. Vermeer, N. Jaccard, H. Wang, H. Sun, F. Khader, D. Truhn, T. Aimyshev, Y. Zhanibekuly, T. Le, A. Galdran, M.Á. González Ballester, G. Carneiro, D.R. G, H.P. S, D. Puthussery, H. Liu, Z. Yang, S. Kondo, S. Kasai, E. Wang, A. Durvasula, J. Heras, M.Á. Zapata, T. Araújo, G. Aresta, H. Bogunović, M. Arikan, Y.C. Lee, H.B. Cho, Y.H. Choi, A. Qayyum, I. Razzak, B. van Ginneken, H.G. Lemij and C.I. Sánchez. "AIROGS: Artificial Intelligence for RObust Glaucoma Screening Challenge", <i>IEEE Transactions on Medical Imaging</i>, 2023;43(1):542-557.</p>
<pre id="publication-modal-vent23a-pre">@article{Vent23a,
title={AIROGS: Artificial Intelligence for RObust Glaucoma Screening Challenge},
author={de Vente, Coen and Vermeer, Koenraad A. and Jaccard, Nicolas and Wang, He and Sun, Hongyi and Khader, Firas and Truhn, Daniel and Aimyshev, Temirgali and Zhanibekuly, Yerkebulan and Le, Tien-Dung and Galdran, Adrian and González Ballester, Miguel Ángel and Carneiro, Gustavo and G, Devika R and S, Hrishikesh P and Puthussery, Densen and Liu, Hong and Yang, Zekang and Kondo, Satoshi and Kasai, Satoshi and Wang, Edward and Durvasula, Ashritha and Heras, Jónathan and Zapata, Miguel Ángel and Araújo, Teresa and Aresta, Guilherme and Bogunović, Hrvoje and Arikan, Mustafa and Lee, Yeong Chan and Cho, Hyun Bin and Choi, Yoon Ho and Qayyum, Abdul and Razzak, Imran and van Ginneken, Bram and Lemij, Hans G. and Sánchez, Clara I.},
journal={IEEE Transactions on Medical Imaging},
year={2023},
scholar_id={12607038868340347612,10290591934597328501,16965053761187170060,12629941101464488854},
publisher={IEEE},
volume={43},
number={1},
pages={542-557},
doi={10.1109/TMI.2023.3313786},
pmid={37713220}
}</pre>
<div class="publication-button-group">
<a data-ix="goupbox" onclick="copy_text('publication-modal-vent23a')"
class="knop footerknop movewithmouse w-button publication-button publication-button-black">
<img src="images/copy.png" class="copy-icon"> Copy
</a>
</div>
</div>
</div>
<div id="publication-modal-lemi23" class="modal">
<div class="modal-content">
<span class="close" id="close-publication-modal-lemi23">×</span>
<p>H.G. Lemij, C. de Vente, C.I. Sánchez and K.A. Vermeer. "Characteristics of a large, labeled dataset for the training of artificial intelligence for glaucoma screening with fundus photographs", <i>Ophthalmology Science</i>, 2023;100300.</p>
<pre id="publication-modal-lemi23-pre">@article{Lemi23,
title={Characteristics of a large, labeled dataset for the training of artificial intelligence for glaucoma screening with fundus photographs},
author={Lemij, Hans G and de Vente, Coen and S{\'a}nchez, Clara I and Vermeer, Koen A},
journal={Ophthalmology Science},
pages={100300},
year={2023},
publisher={Elsevier},
doi={10.1016/j.xops.2023.100300},
scholar_id={2038978167705297420}
}</pre>
<div class="publication-button-group">
<a data-ix="goupbox" onclick="copy_text('publication-modal-lemi23')"
class="knop footerknop movewithmouse w-button publication-button publication-button-black">
<img src="images/copy.png" class="copy-icon"> Copy
</a>
</div>
</div>
</div>
<div id="publication-modal-vent21" class="modal">
<div class="modal-content">
<span class="close" id="close-publication-modal-vent21">×</span>
<p>C. de Vente, L.H. Boulogne, K. Vaidhya Venkadesh, C. Sital, N. Lessmann, C. Jacobs, C.I. Sánchez and B. van Ginneken. "Automated COVID-19 Grading with Convolutional Neural Networks in Computed Tomography Scans: A Systematic Comparison", <i>IEEE Transactions on Artificial Intelligence</i>, 2022;3(2):129-138.</p>
<pre id="publication-modal-vent21-pre">@article{Vent21,
author = {Coen de Vente and Luuk H. Boulogne and Kiran Vaidhya Venkadesh and Cheryl Sital and Nikolas Lessmann and Colin Jacobs and Clara I. S\'{a}nchez and Bram van Ginneken},
title = {Automated COVID-19 Grading with Convolutional Neural Networks in Computed Tomography Scans: A Systematic Comparison},
journal ={IEEE Transactions on Artificial Intelligence},
year={2022},
volume={3},
number={2},
pages={129-138},
doi={10.1109/TAI.2021.3115093},
scholar_id = {6795267621829470688,3751302600748129411,11993509375043963622}
}</pre>
<div class="publication-button-group">
<a data-ix="goupbox" onclick="copy_text('publication-modal-vent21')"
class="knop footerknop movewithmouse w-button publication-button publication-button-black">
<img src="images/copy.png" class="copy-icon"> Copy
</a>
</div>
</div>
</div>
<div id="publication-modal-schw22" class="modal">
<div class="modal-content">
<span class="close" id="close-publication-modal-schw22">×</span>
<p>R. Schwartz, H. Khalid, S. Liakopoulos, Y. Ouyang, C. de Vente, C. González-Gonzalo, A.Y. Lee, R. Guymer, E.Y. Chew, C. Egan and others. "A Deep Learning Framework for the Detection and Quantification of Reticular Pseudodrusen and Drusen on Optical Coherence Tomography", <i>Translational Vision Science & Technology</i>, 2022;11(12):3-3.</p>
<pre id="publication-modal-schw22-pre">@article{Schw22,
title={A Deep Learning Framework for the Detection and Quantification of Reticular Pseudodrusen and Drusen on Optical Coherence Tomography},
author={Schwartz, Roy and Khalid, Hagar and Liakopoulos, Sandra and Ouyang, Yanling and de Vente, Coen and Gonz{\'a}lez-Gonzalo, Cristina and Lee, Aaron Y and Guymer, Robyn and Chew, Emily Y and Egan, Catherine and others},
journal={Translational Vision Science & Technology},
volume={11},
number={12},
pages={3--3},
year={2022},
doi={10.1167/tvst.11.12.3},
publisher={The Association for Research in Vision and Ophthalmology},
scholar_id={15646198235825211737,5612937199194471457}
}</pre>
<div class="publication-button-group">
<a data-ix="goupbox" onclick="copy_text('publication-modal-schw22')"
class="knop footerknop movewithmouse w-button publication-button publication-button-black">
<img src="images/copy.png" class="copy-icon"> Copy
</a>
</div>
</div>
</div>
<div id="publication-modal-less20" class="modal">
<div class="modal-content">
<span class="close" id="close-publication-modal-less20">×</span>
<p>N. Lessmann, C.I. Sánchez, L. Beenen, L.H. Boulogne, M. Brink, E. Calli, J. Charbonnier, T. Dofferhoff, W.M. van Everdingen, P.K. Gerke, B. Geurts, H.A. Gietema, M. Groeneveld, L. van Harten, N. Hendrix, W. Hendrix, H.J. Huisman, I. Isgum, C. Jacobs, R. Kluge, M. Kok, J. Krdzalic, B. Lassen-Schmidt, K. van Leeuwen, J. Meakin, M. Overkamp, T. van Rees Vellinga, E.M. van Rikxoort, R. Samperna, C. Schaefer-Prokop, S. Schalekamp, E.T. Scholten, C. Sital, L. Stöger, J. Teuwen, K. Vaidhya Venkadesh, C. de Vente, M. Vermaat, W. Xie, B. de Wilde, M. Prokop and B. van Ginneken. "Automated Assessment of COVID-19 Reporting and Data System and Chest CT Severity Scores in Patients Suspected of Having COVID-19 Using Artificial Intelligence", <i>Radiology</i>, 2021;298(1):E18-E28.</p>
<pre id="publication-modal-less20-pre">@article{Less20,
author = {Lessmann, Nikolas and S\'{a}nchez, Clara I. and Beenen, Ludo and Boulogne, Luuk H. and Brink, Monique and Calli, Erdi and Charbonnier, Jean-Paul and Dofferhoff, Ton and van Everdingen, Wouter M. and Gerke, Paul K. and Geurts, Bram and Gietema, Hester A. and Groeneveld, Miriam and van Harten, Louis and Hendrix, Nils and Hendrix, Ward and Huisman, Henkjan J. and Isgum, Ivana and Jacobs, Colin and Kluge, Ruben and Kok, Michel and Krdzalic, Jasenko and Lassen-Schmidt, Bianca and van Leeuwen, Kicky and Meakin, James and Overkamp, Mike and van Rees Vellinga, Tjalco and van Rikxoort, Eva M. and Samperna, Riccardo and Schaefer-Prokop, Cornelia and Schalekamp, Steven and Scholten, Ernst Th. and Sital, Cheryl and St\"{o}ger, Lauran and Teuwen, Jonas and Vaidhya Venkadesh, Kiran and de Vente, Coen and Vermaat, Marieke and Xie, Weiyi and de Wilde, Bram and Prokop, Mathias and van Ginneken, Bram},
title = {Automated Assessment of {COVID}-19 Reporting and Data System and Chest {CT} Severity Scores in Patients Suspected of Having {COVID}-19 Using Artificial Intelligence},
journal ={Radiology},
year = {2021},
volume = {298},
number = {1},
pages = {E18--E28},
doi = {10.1148/radiol.2020202439},
pmid = {32729810},
algorithm = {https://grand-challenge.org/algorithms/corads-ai/},
scholar_id = {13623399645587834950,2370993400684076548}
}</pre>
<div class="publication-button-group">
<a data-ix="goupbox" onclick="copy_text('publication-modal-less20')"
class="knop footerknop movewithmouse w-button publication-button publication-button-black">
<img src="images/copy.png" class="copy-icon"> Copy
</a>
</div>
</div>
</div>
<div id="publication-modal-xion21" class="modal">
<div class="modal-content">
<span class="close" id="close-publication-modal-xion21">×</span>
<p>Z. Xiong, Q. Xia, Z. Hu, N. Huang, C. Bian, Y. Zheng, S. Vesal, N. Ravikumar, A. Maier, X. Yang, P. Heng, D. Ni, C. Li, Q. Tong, W. Si, E. Puybareau, Y. Khoudli, T. Graud, C. Chen, W. Bai, D. Rueckert, L. Xu, X. Zhuang, X. Luo, S. Jia, M. Sermesant, Y. Liu, K. Wang, D. Borra, A. Masci, C. Corsi, C. de Vente, M. Veta, R. Karim, C. Jayachandran Preetha, S. Engelhardt, M. Qiao, Y. Wang, Q. Tao, M. Nuñez-Garcia, O. Camara, N. Savioli, P. Lamata and J. Zhao. "A global benchmark of algorithms for segmenting the left atrium from late gadolinium-enhanced cardiac magnetic resonance imaging", <i>Medical Image Analysis</i>, 2021;67:101832.</p>
<pre id="publication-modal-xion21-pre">@article{Xion21,
title={A global benchmark of algorithms for segmenting the left atrium from late gadolinium-enhanced cardiac magnetic resonance imaging},
author={Xiong, Zhaohan and Xia, Qing and Hu, Zhiqiang and Huang, Ning and Bian, Cheng and Zheng, Yefeng and Vesal, Sulaiman and Ravikumar, Nishant and Maier, Andreas and Yang, Xin and Heng, Pheng-Ann and Ni, Dong and Li, Caizi and Tong, Qianqian and Si, Weixin and Puybareau, Elodie and Khoudli, Younes and Graud, Thierry and Chen, Chen and Bai, Wenjia and Rueckert, Daniel and Xu, Lingchao and Zhuang, Xiahai and Luo, Xinzhe and Jia, Shuman and Sermesant, Maxime and Liu, Yashu and Wang, Kuanquan and Borra, Davide and Masci, Alessandro and Corsi, Cristiana and de Vente, Coen and Veta, Mitko and Karim, Rashed and Jayachandran Preetha, Chandrakanth and Engelhardt, Sandy and Qiao, Menyun and Wang, Yuanyuan and Tao, Qian and Nuñez-Garcia, Marta and Camara, Oscar and Savioli, Nicolo and Lamata, Pablo and Zhao, Jichao},
journal={Medical Image Analysis},
volume={67},
pages={101832},
year={2021},
publisher={Elsevier},
pmid={33166776},
scholar_id={298995361484429794}
}</pre>
<div class="publication-button-group">
<a data-ix="goupbox" onclick="copy_text('publication-modal-xion21')"
class="knop footerknop movewithmouse w-button publication-button publication-button-black">
<img src="images/copy.png" class="copy-icon"> Copy
</a>
</div>
</div>
</div>
<div id="publication-modal-vent20a" class="modal">
<div class="modal-content">
<span class="close" id="close-publication-modal-vent20a">×</span>
<p>C. de Vente, P. Vos, M. Hosseinzadeh, J. Pluim and M. Veta. "Deep learning regression for prostate cancer detection and grading in bi-parametric MRI", <i>IEEE Transactions on Biomedical Engineering</i>, 2020;68(2):374-383.</p>
<pre id="publication-modal-vent20a-pre">@article{Vent20a,
title={Deep learning regression for prostate cancer detection and grading in bi-parametric MRI},
author={de Vente, Coen and Vos, Pieter and Hosseinzadeh, Matin and Pluim, Josien and Veta, Mitko},
journal={IEEE Transactions on Biomedical Engineering},
volume={68},
number={2},
pages={374--383},
year={2020},
publisher={IEEE},
pmid={32396068},
doi={10.1109/TBME.2020.2993528},
scholar_id={403188362461805794}
}</pre>
<div class="publication-button-group">
<a data-ix="goupbox" onclick="copy_text('publication-modal-vent20a')"
class="knop footerknop movewithmouse w-button publication-button publication-button-black">
<img src="images/copy.png" class="copy-icon"> Copy
</a>
</div>
</div>
</div>
<div id="publication-modal-schwartz2024genetic" class="modal">
<div class="modal-content">
<span class="close" id="close-publication-modal-schwartz2024genetic">×</span>
<p>R. Schwartz, A.N. Warwick, A.P. Khawaja, R. Luben, H. Khalid, S. Phatak, M. Jhingan, C. de Vente, P. Valmaggia, S. Liakopoulos and others. "Genetic Distinctions Between Reticular Pseudodrusen and Drusen: Insights from a Genome-Wide Association Study", <i>medRxiv:10.1101/2024.09.18.24313862v1</i>, 2024;2024-09.</p>
<pre id="publication-modal-schwartz2024genetic-pre">@Preprint{schwartz2024genetic,
title={Genetic Distinctions Between Reticular Pseudodrusen and Drusen: Insights from a Genome-Wide Association Study},
author={Schwartz, Roy and Warwick, Alasdair N and Khawaja, Anthony P and Luben, Robert and Khalid, Hagar and Phatak, Sumita and Jhingan, Mahima and de Vente, Coen and Valmaggia, Philippe and Liakopoulos, Sandra and others},
journal={medRxiv:10.1101/2024.09.18.24313862v1},
pages={2024--09},
year={2024},
publisher={Cold Spring Harbor Laboratory Press}
}</pre>
<div class="publication-button-group">
<a data-ix="goupbox" onclick="copy_text('publication-modal-schwartz2024genetic')"
class="knop footerknop movewithmouse w-button publication-button publication-button-black">
<img src="images/copy.png" class="copy-icon"> Copy
</a>
</div>
</div>
</div>
<div id="publication-modal-schwartz2024increasing" class="modal">
<div class="modal-content">
<span class="close" id="close-publication-modal-schwartz2024increasing">×</span>
<p>R. Schwartz, A. Olvera-Barrios, A.N. Warwick, H. Khalid, S. Phatak, M. Jhingan, C. De Vente, P. Valmaggia, C. Sanchez, C.A. Egan and others. "Increasing Stroke Risk Correlated with Higher Reticular Pseudodrusen Counts: Evidence from the UK Biobank", in: <i>Association for Research in Vision and Ophthalmology</i>, 2024.</p>
<pre id="publication-modal-schwartz2024increasing-pre">@conference{schwartz2024increasing,
title={Increasing Stroke Risk Correlated with Higher Reticular Pseudodrusen Counts: Evidence from the UK Biobank},
author={Schwartz, Roy and Olvera-Barrios, Abraham and Warwick, Alasdair N and Khalid, Hagar and Phatak, Sumita and Jhingan, Mahima and De Vente, Coen and Valmaggia, Philippe and Sanchez, Clarisa and Egan, Catherine A and others},
booktitle={Association for Research in Vision and Ophthalmology},
volume={65},
number={7},
pages={1344--1344},
year={2024},
publisher={The Association for Research in Vision and Ophthalmology},
scholar_id={3790408646115732743}
}</pre>
<div class="publication-button-group">
<a data-ix="goupbox" onclick="copy_text('publication-modal-schwartz2024increasing')"
class="knop footerknop movewithmouse w-button publication-button publication-button-black">
<img src="images/copy.png" class="copy-icon"> Copy
</a>
</div>
</div>
</div>
<div id="publication-modal-vent23c" class="modal">
<div class="modal-content">
<span class="close" id="close-publication-modal-vent23c">×</span>
<p>C. de Vente, A. Tufail, S. Schmitz-Valckenberg, M. Saßmannshausen, C. Hoyng and C.I. Sánchez on behalf of the MACUSTAR consortium. "OCT Super-Resolution for Data Standardization using AI: A MACUSTAR report", in: <i>Association for Research in Vision and Ophthalmology</i>, 2023.</p>
<pre id="publication-modal-vent23c-pre">@conference{Vent23c,
author = {de Vente, Coen and Tufail, A. and Schmitz-Valckenberg, S. and Saßmannshausen, M. and Hoyng, C. and S{\'a}nchez on behalf of the MACUSTAR consortium, Clara I},
title = {OCT Super-Resolution for Data Standardization using AI: A MACUSTAR report},
Methods: The MACUSTAR cohort, a European multicentre study, was used as a training set with 743 OCTs from 181 patients and validation set with 26 OCTs from 26 patients (n=3 no AMD, n=2 early AMD, n=18 intermediate AMD, n=3 late AMD). All scans were Heidelberg Spectralis OCTs with 241 B-scans. We trained a 3D diffusion model to generate high-resolution OCTs, which was used during evaluation to produce OCTs with 241 B-scans from OCTs with 120 B-scans. The performance was calculated using the mean squared error (MSE) on OCT volume-level between the generated B-scans and the original B-scans.
Results: The MSE between the generated B-scans from the low-resolution OCTs and the original B-scans from the high-resolution OCTs was 0.006 ± 0.004 (mean ± SD). Fig. 1 shows visual examples of the generated OCTs compared to the original B-scans in the validation set.
Conclusions: We showed the feasibility of the proposed approach to generate super-resolution OCTs, which is one of the required steps to standardize high-quality OCTs within multicenter studies. In extensions of this approach, coherence between the OCT and other modalities, such as en face imaging and other metadata, could be introduced, allowing the AI model to make better informed generative decisions.
},
year = {2023},
booktitle={Association for Research in Vision and Ophthalmology},
journal={Investigative Ophthalmology & Visual Science},
volume={64},
number={8},
pages={313--313},
publisher={The Association for Research in Vision and Ophthalmology},
url={https://iovs.arvojournals.org/article.aspx?articleid=2789903}
}</pre>
<div class="publication-button-group">
<a data-ix="goupbox" onclick="copy_text('publication-modal-vent23c')"
class="knop footerknop movewithmouse w-button publication-button publication-button-black">
<img src="images/copy.png" class="copy-icon"> Copy
</a>
</div>
</div>
</div>
<div id="publication-modal-lemi22" class="modal">
<div class="modal-content">
<span class="close" id="close-publication-modal-lemi22">×</span>
<p>H.G. Lemij, C. de Vente, C.I. Sánchez, J. Cuadros, N. Jaccard and K. Vermeer. "Glaucomatous features in fundus photographs of eyes with 'Referable glaucoma' of a large population based labeled data set for training an Artificial Intelligence (AI) algorithm for glaucoma screening", in: <i>Association for Research in Vision and Ophthalmology</i>, 2022.</p>
<pre id="publication-modal-lemi22-pre">@conference{Lemi22,
title={Glaucomatous features in fundus photographs of eyes with 'Referable glaucoma' of a large population based labeled data set for training an Artificial Intelligence (AI) algorithm for glaucoma screening},
author={Lemij, Hans G and de Vente, Coen and S{\'a}nchez, Clara I and Cuadros, Jorge and Jaccard, Nicolas and Vermeer, Koen},
booktitle={Association for Research in Vision and Ophthalmology},
volume={63},
number={7},
url={https://iovs.arvojournals.org/article.aspx?articleid=2782322},
pages={2041--A0482},
year={2022},
publisher={The Association for Research in Vision and Ophthalmology},
scholar_id={14483770842850181670}
}</pre>
<div class="publication-button-group">
<a data-ix="goupbox" onclick="copy_text('publication-modal-lemi22')"
class="knop footerknop movewithmouse w-button publication-button publication-button-black">
<img src="images/copy.png" class="copy-icon"> Copy
</a>
</div>
</div>
</div>
<div id="publication-modal-schw22a" class="modal">
<div class="modal-content">
<span class="close" id="close-publication-modal-schw22a">×</span>
<p>R. Schwartz, H. Khalid, S. Liakopoulos, Y. Ouyang, C. de Vente, C.G. Gonzalo, A.Y. Lee, C.A. Egan, C.I. Sánchez and A. Tufail. "A deep learning pipeline for the detection and quantification of drusen and reticular pseudodrusen on optical coherence tomography", in: <i>Association for Research in Vision and Ophthalmology</i>, 2022.</p>
<pre id="publication-modal-schw22a-pre">@conference{Schw22a,
title={A deep learning pipeline for the detection and quantification of drusen and reticular pseudodrusen on optical coherence tomography},
author={Schwartz, Roy and Khalid, Hagar and Liakopoulos, Sandra and Ouyang, Yanling and de Vente, Coen and Gonzalo, Cristina Gonz{\'a}lez and Lee, Aaron Y and Egan, Catherine A and S{\'a}nchez, Clara I and Tufail, Adnan},
booktitle={Association for Research in Vision and Ophthalmology},
url={https://iovs.arvojournals.org/article.aspx?articleid=2781366},
volume={63},
number={7},
pages={3856--3856},
year={2022},
publisher={The Association for Research in Vision and Ophthalmology},
scholar_id={6469598480820735470}
}</pre>
<div class="publication-button-group">
<a data-ix="goupbox" onclick="copy_text('publication-modal-schw22a')"
class="knop footerknop movewithmouse w-button publication-button publication-button-black">
<img src="images/copy.png" class="copy-icon"> Copy
</a>
</div>
</div>
</div>
<div id="publication-modal-vent22" class="modal">
<div class="modal-content">
<span class="close" id="close-publication-modal-vent22">×</span>
<p>C. de Vente, K. Vermeer, N. Jaccard, H.G. Lemij and C.I. Sánchez. "AIROGS: Artificial Intelligence for RObust Glaucoma Screening Challenge", in: <i>Imaging and Morphometry Association for Glaucoma in Europe</i>, 2022.</p>
<pre id="publication-modal-vent22-pre">@conference{Vent22,
author = {de Vente, Coen and Vermeer, Koen and Jaccard, Nicolas and Lemij, Hans G and S{\'a}nchez, Clara I},
booktitle ={Imaging and Morphometry Association for Glaucoma in Europe},
url = {https://drive.google.com/file/d/11DfeyNA8I4UVZGkhn_NVIOIGbcEesfCw/view?usp=sharing},
title = {AIROGS: Artificial Intelligence for RObust Glaucoma Screening Challenge},
Methods: 112,732 CFPs from 60,071 subjects from a population screening program for diabetic retinopathy, obtained from EyePACS, California, USA, were manually labeled by 20 carefully selected and continuously monitored ophthalmologists and optometrists. They each labeled a portion of the full set of images as “referable glaucoma” (RG), “no referable glaucoma” (NRG) or “ungradable” (U). Each CFP was graded by 2 randomly selected graders; if their labels matched, it was considered the final label. In case of disagreement, the CFP was graded by a glaucoma specialist; his label was the final label. We split the data into a development set of 101,442 CFPs and a test set of 11,290 CFPs. The challenge task was to classify CFPs as RG or NRG, while additionally providing a decision on whether images were ungradable. To encourage the development of methodologies with inherent robustness mechanisms, we only included CFPs labeled as U in the test set and not in the development set. Challenge participants submitted their solutions as Docker1 containers to our online evaluation platform2. We ran their submitted algorithms on our test set, which is not publicly available. Subsequently, we assessed glaucoma screening performance using the partial area under the receiver operator characteristic curve (pAUC) (90-100% specificity) and sensitivity at 95% specificity (S). To measure robustness, we calculated Cohen's kappa score (κU) and the area under the receiver operator characteristic curve (AUCU), using the decisions generated by the algorithms on image ungradability.
Results: The challenge is currently running and we are still accepting submissions at the time of writing. Up to now, 289 users have joined the challenge, 208 persons have requested access to the dataset, 26 teams have been formed on the challenge platform, and 13 submissions from 7 unique participants have been successfully submitted to the last preliminary test set, which contains 10% of the test data. The best pAUC, S, κU and AUCU on this preliminary test set were 89.1%, 83.8%, 44.5% and 91.5%, respectively. The means and standard deviations for these metrics over all submissions were 82.2% ± 9.6%, 72.0% ± 19.4%, 20.6% ± 14.9%, 76.8% ± 11.6%, respectively.
Conclusions: We present a challenge based on real-world data for glaucoma screening by CFP. The initial results are promising, as the performances are high and the preliminary sensitivity at 95% specificity exceeds our target of 80%. The final winners and their solutions will be presented at the 19th IMAGE meeting.
},
year = {2022}
}</pre>
<div class="publication-button-group">
<a data-ix="goupbox" onclick="copy_text('publication-modal-vent22')"
class="knop footerknop movewithmouse w-button publication-button publication-button-black">
<img src="images/copy.png" class="copy-icon"> Copy
</a>
</div>
</div>
</div>
<div id="publication-modal-vent21a" class="modal">
<div class="modal-content">
<span class="close" id="close-publication-modal-vent21a">×</span>
<p>C. de Vente, C. González-Gonzalo, E.F. Thee, M. van Grinsven, C.C. Klaver and C.I. Sánchez. "Making AI Transferable Across OCT Scanners from Different Vendors", in: <i>Association for Research in Vision and Ophthalmology</i>, 2021.</p>
<pre id="publication-modal-vent21a-pre">@conference{Vent21a,
author = {de Vente, Coen and Gonz\'{a}lez-Gonzalo, Cristina and Thee, Eric F. and van Grinsven, Mark and Klaver, Caroline C.W. and S\'{a}nchez, Clara I.},
booktitle ={Association for Research in Vision and Ophthalmology},
url = {https://iovs.arvojournals.org/article.aspx?articleid=2775505},
title = {Making AI Transferable Across OCT Scanners from Different Vendors},
Methods: 2,598 and 680 Heidelberg Spectralis OCT scans from the European Genetic Database were used for development and testing, respectively. We tested transferability with 339 AMD-enriched Topcon OCTs from the Rotterdam Study. AMD severity classification was determined manually in accordance with the Cologne Image Reading Center and Laboratory and Rotterdam Classification, respectively. Classifications were harmonized for the evaluation of the DNNs. The proposed DNN considers each B-scan separately using a 2D ResNet-18, and internally combines the intermediate outputs related to each B-scan using a multiple instance learning approach. Even though the proposed DNN provides both B-scan level and OCT-volume level decisions, the architecture is trained end-to-end using only full volume gradings. This specific architecture makes our method robust to the variability of scanning protocols across vendors, as it is invariant to B-scan spacing. We compare this approach to a baseline that classifies the full OCT scan directly using a 3D ResNet-18.
Results: The quadratic weighted kappa (QWK) for the baseline method dropped from 0.852 on the Heidelberg Spectralis dataset to 0.523 on the Topcon dataset. This QWK drop was smaller (p = 0.001) for our approach, which dropped from 0.849 to 0.717. The difference in area under the Receiver Operating Characteristic (AUC) drop was also smaller (p < 0.001) for our approach (0.969 to 0.906, -6.5%) than for the baseline method (0.971 to 0.806, -17.0%).
Conclusions: We present a DNN for AMD classification on OCT scans that transfers well to scans from vendors that were not used for development. This alleviates the need for retraining on data from these scanner types, which is an expensive process in terms of data acquisition, model development, and human annotation time. Furthermore, this increases the applicability of AI for OCT classification in broader scopes than the settings in which they were developed.},
year = {2021},
scholar_id = {504386056410585983}
}</pre>
<div class="publication-button-group">
<a data-ix="goupbox" onclick="copy_text('publication-modal-vent21a')"
class="knop footerknop movewithmouse w-button publication-button publication-button-black">
<img src="images/copy.png" class="copy-icon"> Copy
</a>
</div>
</div>
</div>
<div id="publication-modal-gonz21" class="modal">
<div class="modal-content">
<span class="close" id="close-publication-modal-gonz21">×</span>
<p>C. González-Gonzalo, E.F. Thee, B. Liefers, C. de Vente, C.C. Klaver and C.I. Sánchez. "Hierarchical curriculum learning for robust automated detection of low-prevalence retinal disease features: application to reticular pseudodrusen", in: <i>Association for Research in Vision and Ophthalmology</i>, 2021.</p>
<pre id="publication-modal-gonz21-pre">@conference{Gonz21,
author = {Gonz\'{a}lez-Gonzalo, Cristina and Thee, Eric F. and Liefers, Bart and de Vente, Coen and Klaver, Caroline C.W. and S\'{a}nchez, Clara I.},
booktitle ={Association for Research in Vision and Ophthalmology},
url = {https://iovs.arvojournals.org/article.aspx?articleid=2773295},
title = {Hierarchical curriculum learning for robust automated detection of low-prevalence retinal disease features: application to reticular pseudodrusen},
Methods: Color fundus images (CFI) from the AREDS dataset were used for DNN development (106,994 CFI) and testing (27,066 CFI). An external test set (RS1-6) was generated with 2,790 CFI from the Rotterdam Study. In both datasets CFI were graded from generic to specific features. This allows to establish a hierarchy of binary classification tasks with decreasing prevalence: presence of AMD findings (AREDS prevalence: 88%; RS1-6: 77%), drusen (85%; 73%), large drusen (40%; 24%), RPD (1%; 4%). We created a hierarchical curriculum and developed a DNN (HC-DNN) that learned each task sequentially. We computed its performance for RPD detection in both test sets and compared it to a baseline DNN (B-DNN) that learned to detect RPD from scratch disregarding hierarchical information. We studied their robustness across datasets, while reducing the size of data available for development (same prevalences)
Results: Area under the receiver operating characteristic curve (AUC) was used to measure RPD detection performance. When large development data were available, there was no significant difference between DNNs (100% data, HC-DNN: 0.96 (95% CI, 0.94-0.97) in AREDS, 0.82 (0.78-0.86) in RS1-6; B-DNN: 0.95 (0.94-0.96) in AREDS, 0.83 (0.79-0.87) in RS1-6). However, HC-DNN achieved better performance and robustness across datasets when development data were highly reduced (<50% data, p-values<0.05) (1% data, HC-DNN: 0.63 (0.60-0.66) in AREDS, 0.76 (0.72-0.80) in RS1-6; B-DNN: 0.53 (0.49-0.56) in AREDS, 0.48 (0.42-0.53) in RS1-6).
Conclusions: Hierarchical curriculum learning allows for knowledge transfer from general, higher-prevalence features and becomes beneficial for the detection of low-prevalence retinal features, such as RPD, in scarce data settings. Moreover, exploiting hierarchical information improves DNN robustness across datasets.},
year = {2021},
scholar_id = {8274882984481634472}
}</pre>
<div class="publication-button-group">
<a data-ix="goupbox" onclick="copy_text('publication-modal-gonz21')"
class="knop footerknop movewithmouse w-button publication-button publication-button-black">
<img src="images/copy.png" class="copy-icon"> Copy
</a>
</div>
</div>
</div>
<div id="publication-modal-vent20" class="modal">
<div class="modal-content">
<span class="close" id="close-publication-modal-vent20">×</span>
<p>C. de Vente, M. van Grinsven, S. De Zanet, A. Mosinska, R. Sznitman, C. Klaver and C.I. Sánchez. "Estimating Uncertainty of Deep Neural Networks for Age-related Macular Degeneration Grading using Optical Coherence Tomography", in: <i>Association for Research in Vision and Ophthalmology</i>, 2020.</p>
<pre id="publication-modal-vent20-pre">@conference{Vent20,
author = {de Vente, Coen and van Grinsven, Mark and De Zanet, Sandro and Mosinska, Agata and Sznitman, Raphael and Klaver, Caroline and S\'{a}nchez, Clara I.},
booktitle ={Association for Research in Vision and Ophthalmology},
title = {Estimating Uncertainty of Deep Neural Networks for Age-related Macular Degeneration Grading using Optical Coherence Tomography},
url={https://iovs.arvojournals.org/article.aspx?articleid=2769262},
Methods: 1,264 OCT volumes from 633 patients from the European Genetic Database (EUGENDA) were graded as one of five stages of AMD (No AMD, Early AMD, Intermediate AMD, Advanced AMD: GA, and Advanced AMD: CNV). Ten different 3D DenseNet-121 models that take a full OCT volume as input were used to predict the corresponding AMD stage. These networks were all trained on the same dataset. However, each of these networks were initialized differently. The class with the maximum average softmax output of these models was used as the final prediction. The confidence measure was the normalized average softmax output for that class.
Results: The algorithm achieved an area under the Receiver Operating Characteristic of 0.9785 and a quadratic-weighted kappa score of 0.8935. The mean uncertainty, calculated as 1 - the mean confidence score, for incorrect predictions was 1.9 times as high as the mean uncertainty for correct predictions. When only using the probability output of a single network, this ratio was 1.4. Another measure for uncertainty estimation performance is the Expected Calibration Error (ECE), where a lower value is better. When comparing the method to the probability output of a single network, the ECE improved from 0.0971 to 0.0324. Figure 1 shows examples of both confident and unconfident predictions.
Conclusions: We present a method for improving uncertainty estimation for AMD grading in OCT, by combining the output of multiple individually trained CNNs. This increased reliability of system confidences can contribute to building trust in CNNs for retinal disease screening. Furthermore, this technique is a first step towards selective prediction in retinal disease screening, where only cases with high uncertainty predictions need to be referred for expert evaluation.},
year = {2020},
month = {6},
scholar_id = {13938730969248371423}
}</pre>
<div class="publication-button-group">
<a data-ix="goupbox" onclick="copy_text('publication-modal-vent20')"
class="knop footerknop movewithmouse w-button publication-button publication-button-black">
<img src="images/copy.png" class="copy-icon"> Copy
</a>
</div>
</div>
</div>
<div id="publication-modal-ardu20" class="modal">
<div class="modal-content">
<span class="close" id="close-publication-modal-ardu20">×</span>
<p>A. Ardu, B. Liefers, C. de Vente, C. González-Gonzalo, C. Klaver and C.I. Sánchez. "Artificial Intelligence for the Classification and Quantification of Reticular Pseudodrusen in Multimodal Retinal Images", in: <i>European Society of Retina Specialists</i>, 2020.</p>
<pre id="publication-modal-ardu20-pre">@Conference{Ardu20,
author = {Ardu, Alessandro and Liefers, Bart and de Vente, Coen and Gonz\'{a}lez-Gonzalo, Cristina and Klaver, Caroline and S\'{a}nchez, Clara I.},
booktitle ={European Society of Retina Specialists},
title = {Artificial Intelligence for the Classification and Quantification of Reticular Pseudodrusen in Multimodal Retinal Images},
Reticular pseudodrusen (RPD) are retinal lesions highly correlated with the risk of developing end-stage age-related macular degeneration (AMD) and, therefore, relevant biomarkers for understanding the progression of AMD. Due to the subtle features characterizing RPD, multiple imaging modalities are often necessary to confirm the presence and extension of RPD, considerably increasing the workload of the expert graders. We propose a deep neural network (DNN) architecture that classifies and quantifies RPD using multimodal retinal images.
Setting:
A cross-sectional study that compares the performance of three expert graders with a DNN trained for identifying and quantifying RPD. Conducted on retinal images drawn from the Rotterdam Study, a population-based cohort, in three modalities: color fundus photographs (CFP), fundus autofluorescence images (FAF) and near-infrared reflectance images (NIR).
Methods:
Multimodal images of 278 eyes of 230 patients were retrieved from the Rotterdam Study database. Of those, 72 eyes showed presence of RPD, 108 had soft distinct/indistinct drusen, and 98 had no signs of drusen as confirmed by the Rotterdam Study graders. Delineations of the areas affected with RPD were made in consensus by two human experts using CFP and NIR images simultaneously and were used as reference standard (RS) for RPD area quantification. The data was randomly divided, patient-wise, in training (243) and test (35) sets for model development and evaluation. A DNN was developed for RPD classification and quantification. The proposed DNN is based on an encoder-decoder architecture. The model jointly inputs a set of co-registered retinal image modalities (CFP, NIR, FAF) and outputs a heatmap image containing, per pixel, the likelihood of RPD presence. The 99th percentile of the values contained in this heatmap measures the likelihood of RPD presence. Three independent graders manually delineated RPD in all eyes of the test set based on the CFP and NIR and their performance was compared with the DNN in the tasks of RPD classification and quantification.
Results:
The proposed DNN obtained an area under the receiver operating characteristic curve (AUROC) with 95% confidence interval (CI) of 0.939[0.818-1.0], a sensitivity (SE) of 0.928 and specificity (SP) of 0.809 for the detection of RPD in multimodal imaging. For RPD quantification, the DNN achieved a mean Dice coefficient (DSC) of 0.632+-0.261 and an intra-class correlation (ICC) of 0.676[0.294-0.999]. Comparably, for RPD classification, grader 1 obtained SE/SP pairs of 1.0/0.785, grader 2 of 1.0/0.5 and grader 3 of 1.0/0.785. For RPD quantification, the graders obtained mean DSC of 0.619+-0.196, 0.573+-0.170 and 0.697+-0.157, respectively, and an ICC of 0.721[0.340-0.999], 0.597[0.288-0.999], 0.751[0.294-0.999], respectively. Of the DNN's three false negatives, none of them was correctly classified by the three graders. The model correctly classified RPD in three of the six eyes where graders disagreed and in the only eye where none of the graders found RPD. Overall, 65.1% of the area indicated as RPD by the reference was delineated by at least one grader and only 26.5% of the total was graded as RPD by all experts. The DNN only missed 23.2% of the areas that all three graders identified correctly.
Conclusions:
The proposed DNN showed promising capacities in the tasks of classifying and quantifying RPD lesions on multimodal retinal images. The results show that the model is able to correctly classify and quantify RPD on eyes where lesions are difficult to spot. The probabilistic output of the model allows for the classification of RPD at different levels of confidence and indicates what retinal areas are most likely affected. This is in line with the manual assessment done by the graders. To this point, the model is developed to classify and quantify RPD only on CFP, FAF and NIR. However, introducing other imaging modalities, such as OCT, might help diminish ambiguities in the classification and quantification of this abnormality. Therefore, a future direction for improving the proposed method is to include OCT scans as an additional input to the model. Automatic classification and quantification of RPD using deep learning on multimodal images will enable the automatic and accurate analysis of increasingly large amounts of data for clinical studies and will facilitate AMD screening in the elderly by decreasing the workload of the expert graders.
Financial Disclosure:
None},
month = {9},
year = {2020}
}</pre>
<div class="publication-button-group">
<a data-ix="goupbox" onclick="copy_text('publication-modal-ardu20')"
class="knop footerknop movewithmouse w-button publication-button publication-button-black">
<img src="images/copy.png" class="copy-icon"> Copy
</a>
</div>
</div>
</div>
<div id="publication-modal-isla24" class="modal">
<div class="modal-content">
<span class="close" id="close-publication-modal-isla24">×</span>
<p>M.M. Islam, C. de Vente, B. Liefers, C. Klaver, E.J. Bekkers and C.I. Sánchez. "Uncertainty-aware retinal layer segmentation in OCT through probabilistic signed distance functions", in: <i>Medical Imaging with Deep Learning</i>, 2024.</p>
<pre id="publication-modal-isla24-pre">@inproceedings{Isla24,
title={Uncertainty-aware retinal layer segmentation in OCT through probabilistic signed distance functions},
author={Islam, Mohammad Mohaiminul and de Vente, Coen and Liefers, Bart and Klaver, Caroline and Bekkers, Erik J and S{\'a}nchez, Clara I},
booktitle={Medical Imaging with Deep Learning},
year={2024}
}</pre>
<div class="publication-button-group">
<a data-ix="goupbox" onclick="copy_text('publication-modal-isla24')"
class="knop footerknop movewithmouse w-button publication-button publication-button-black">
<img src="images/copy.png" class="copy-icon"> Copy
</a>
</div>
</div>
</div>
<div id="publication-modal-vent18" class="modal">
<div class="modal-content">
<span class="close" id="close-publication-modal-vent18">×</span>
<p>C. de Vente, M. Veta, O. Razeghi, S. Niederer, J. Pluim, K. Rhode and R. Karim. "Convolutional neural networks for segmentation of the left atrium from gadolinium-enhancement MRI images", in: <i>International Workshop on Statistical Atlases and Computational Models of the Heart</i>, 2018, pages 348-356.</p>
<pre id="publication-modal-vent18-pre">@inproceedings{Vent18,
title={Convolutional neural networks for segmentation of the left atrium from gadolinium-enhancement MRI images},
author={de Vente, Coen and Veta, Mitko and Razeghi, Orod and Niederer, Steven and Pluim, Josien and Rhode, Kawal and Karim, Rashed},
booktitle={International Workshop on Statistical Atlases and Computational Models of the Heart},
pages={348--356},
year={2018},
organization={Springer},
scholar_id={441491962980777638}
}</pre>
<div class="publication-button-group">
<a data-ix="goupbox" onclick="copy_text('publication-modal-vent18')"
class="knop footerknop movewithmouse w-button publication-button publication-button-black">
<img src="images/copy.png" class="copy-icon"> Copy
</a>
</div>
</div>
</div>
<div class="div-block-17">
<div data-ix="flowinslow" class="div-block-34">
<h1 class="heading-7">Want to get in contact?</h1><a data-ix="goupbox" href="contact.html" class="knop footerknop movewithmouse w-button">Drop me a line</a></div>
</div>
<div class="section-5">
<div class="row-3 w-row">
<div class="column colrightarr w-col w-col-4">
<div class="div-block-9">
<div class="text-block-8">sitemap</div>
</div>
<div class="div-block-9"><a href="index.html" class="link">home</a></div>
<div class="div-block-9"><a href="projects.html" class="link">posts</a></div>
<div class="div-block-9"><a href="publications.html" class="link">publications</a></div>
<div class="div-block-9"><a href="about.html" class="link">about</a></div>
<div class="div-block-9"><a href="contact.html" class="link">contact</a></div>
</div>
<div class="colrightarr w-col w-col-4">
<div class="div-block-9">
<div class="text-block-8">contact</div>
</div>
<div class="div-block-9"><a href="mailto:hello@coendevente.com" class="link">hello@coendevente.com</a></div>
<div class="div-block-9"><a href="contact.html" class="link">contact form</a></div>
<div class="div-block-9"><a href="https://calendar.google.com/calendar/appointments/AcZssZ1dSioNVM88OG4Ta6X_cBKh6_3TFOWJ9hHaHHc=" class="link">schedule a meeting with me</a></div>
</div>
<div class="colrightarr w-col w-col-4 norightbar">
<div class="div-block-9">
<div class="text-block-8">links</div>
</div>
<!-- <div class="div-block-9 langetekst"> -->
<div class="div-block-9">
<a target="_blank" href="https://scholar.google.com/citations?user=AqL8A60AAAAJ" class="link">
<img class="link-icon" src="images/links/scholar.svg" /> Google Scholar
</a>
</div>
<div class="div-block-9">
<a target="_blank" href="https://www.linkedin.com/in/coendevente/" class="link">
<img class="link-icon" src="images/links/linkedin.png" /> LinkedIn
</a>
</div>
<div class="div-block-9">
<a target="_blank" href="https://qurai.amsterdam/researcher/coen_de_vente/" class="link">
<img class="link-icon" src="images/links/qurai.png" /> QurAI
</a>
</div>
<div class="div-block-9">
<a target="_blank" href="https://www.diagnijmegen.nl/people/coen-de-vente/" class="link">
<img class="link-icon" src="images/links/radboudumc.png" /> DIAG Nijmegen
</a>
</div>
<div class="div-block-9">
<a target="_blank" href="https://github.com/coendevente" class="link">
<img class="link-icon" src="images/links/github.svg" /> GitHub
</a>
</div>
<!-- <div class="div-block-9">
<a target="_blank" href="https://github.com/coendevente/build-coendevente.com" class="link">
<img class="link-icon" src="images/links/github.svg" /> coendevente.com
</a>
</div> -->
<!-- </div> -->
</div>
</div>
</div>
<script src="https://code.jquery.com/jquery-3.3.1.min.js" type="text/javascript" intergrity="sha256-FgpCb/KJQlLNfOu91ta32o/NMZxltwRo8QtmkMRdAu8=" crossorigin="anonymous"></script>
<script src="js/webflow.js" type="text/javascript"></script>
<!-- [if lte IE 9]><script src="https://cdnjs.cloudflare.com/ajax/libs/placeholders/3.0.2/placeholders.min.js"></script><![endif] -->
<style>
.w-webflow-badge {
display: none !important;
}
.w-nav,
.w--nav-menu-open,
.w-nav-link {
transition: all .5s;
}
.w-nav.colored,
.w--nav-menu-open.colored {
background: #FFF;
box-shadow: 0 -10px 30px #333;
}
.w-nav-link.colored {
color: #333 !important;
}
.w-icon-nav-menu.colored {
color: #333;
}
.thumbimgbg {
position: relative;
top: 0;
z-index: 0;
}
.templatefront {
transition: opacity .3s;
}
</style>
<script>
lazyload();
function refresh() {
thresh = 0;
fromTop = $(window).scrollTop();
navHeight = $(".w-nav").height() + $(".w-nav-menu").height();
wittebalk = $(".wittebalk").html() === "1";
alwaysDarkText = $(".naastmockup").css("color") === "rgb(51, 51, 51)";
if (thresh < fromTop || $(window).width() <= 991 || wittebalk || alwaysDarkText) {
$(".w-nav").addClass("colored");
$(".w-nav-link").addClass("colored");
$(".blacklogo").show();
$(".whitelogo").hide();
$(".w-icon-nav-menu").addClass("colored");
$(".w--nav-menu-open").addClass("colored");
} else {
$(".w-nav").removeClass("colored");
$(".w-nav-link").removeClass("colored");
$(".blacklogo").hide();
$(".whitelogo").show();
$(".w-icon-nav-menu").removeClass("colored");
$(".w--nav-menu-open").removeClass("colored");
}
}
$(document).click(function() {
refresh();
});
$(document).ready(function() {
refresh();
});
$(window).scroll(function() {
refresh();
});
$(window).resize(function() {
refresh();
});
$(".movewithmouse").mousemove(function(e) {
cursorX = e.clientX;
cursorY = e.clientY;
divW = $(this).outerWidth();
divH = $(this).outerHeight();
maxXdeg = divW / 50;
maxYdeg = maxXdeg; //10;
maxZdeg = -(divW / 400); //0;//3;//2;
maxXpx = divW / 50; //10;//30;
maxYpx = divW / 50; //10;//30;
divCenterX = $(this).offset().left - $(window).scrollLeft() + (divW / 2);
divCenterY = $(this).offset().top - $(window).scrollTop() + (divH / 2);
xDist = cursorX - divCenterX;
yDist = cursorY - divCenterY;
xDeg = -maxXdeg * (xDist / (divW / 2));
yDeg = maxYdeg * (yDist / (divH / 2));
zDeg = maxZdeg * (xDist * yDist / (divW * divH / 4));
xPx = maxXpx * (xDist / (divW / 2));
yPx = maxYpx * (yDist / (divH / 2));
cssText = "scale(1.04)";
cssText += " rotateY(" + xDeg + "deg) rotateX(" + yDeg + "deg) rotateZ(" + zDeg + "deg)";
cssText += " translateX(" + xPx + "px) translateY(" + yPx + "px)";
$(this).css("transform", cssText);
});
$(".templatethumb").hover(function() {
thumbW = $(this).width();
thumbH = $(this).height();