-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathindex.html
More file actions
561 lines (479 loc) · 43.9 KB
/
index.html
File metadata and controls
561 lines (479 loc) · 43.9 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta name="viewport" content="width=device-width, initial-scale=1">
<meta name="description" content="">
<meta name="author" content="">
<title>Atlas Analytics Lab</title>
<!-- Bootstrap Core CSS -->
<link rel="stylesheet" href="css/bootstrap.min.css" type="text/css">
<!-- Custom Fonts -->
<link href='http://fonts.googleapis.com/css?family=Open+Sans:300italic,400italic,600italic,700italic,800italic,400,300,600,700,800' rel='stylesheet' type='text/css'>
<link href='http://fonts.googleapis.com/css?family=Merriweather:400,300,300italic,400italic,700,700italic,900,900italic' rel='stylesheet' type='text/css'>
<link rel="stylesheet" href="font-awesome/css/font-awesome.min.css" type="text/css">
<!-- Plugin CSS -->
<link rel="stylesheet" href="css/animate.min.css" type="text/css">
<!-- Custom CSS -->
<link rel="stylesheet" href="css/creative.css" type="text/css">
<link rel="apple-touch-icon" sizes="57x57" href="icons/apple-icon-57x57.png">
<link rel="apple-touch-icon" sizes="60x60" href="icons/apple-icon-60x60.png">
<link rel="apple-touch-icon" sizes="72x72" href="icons/apple-icon-72x72.png">
<link rel="apple-touch-icon" sizes="76x76" href="icons/apple-icon-76x76.png">
<link rel="apple-touch-icon" sizes="114x114" href="icons/apple-icon-114x114.png">
<link rel="apple-touch-icon" sizes="120x120" href="icons/apple-icon-120x120.png">
<link rel="apple-touch-icon" sizes="144x144" href="icons/apple-icon-144x144.png">
<link rel="apple-touch-icon" sizes="152x152" href="icons/apple-icon-152x152.png">
<link rel="apple-touch-icon" sizes="180x180" href="icons/apple-icon-180x180.png">
<link rel="icon" type="image/png" sizes="192x192" href="icons/android-icon-192x192.png">
<link rel="icon" type="image/png" sizes="32x32" href="icons/favicon-32x32.png">
<link rel="icon" type="image/png" sizes="96x96" href="icons/favicon-96x96.png">
<link rel="icon" type="image/png" sizes="16x16" href="icons/favicon-16x16.png">
<link rel="manifest" href="/manifest.json">
<meta name="msapplication-TileColor" content="#ffffff">
<meta name="msapplication-TileImage" content="icons/ms-icon-144x144.png">
<meta name="theme-color" content="#ffffff">
<!-- Google Fonts -->
<link href="https://fonts.googleapis.com/css?family=Open+Sans:300,300i,400,400i,600,600i,700,700i|Raleway:300,300i,400,400i,500,500i,600,600i,700,700i|Poppins:300,300i,400,400i,500,500i,600,600i,700,700i" rel="stylesheet">
<!-- Vendor CSS Files -->
<link href="assets/vendor/icofont/icofont.min.css" rel="stylesheet">
<link href="assets/vendor/remixicon/remixicon.css" rel="stylesheet">
<link href="assets/vendor/owl.carousel/assets/owl.carousel.min.css" rel="stylesheet">
<link href="assets/vendor/boxicons/css/boxicons.min.css" rel="stylesheet">
<link href="assets/vendor/venobox/venobox.css" rel="stylesheet">
</head>
<script>
(function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){
(i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),
m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)
})(window,document,'script','https://www.google-analytics.com/analytics.js','ga');
ga('create', 'UA-86672135-1', 'auto');
ga('send', 'pageview');
</script>
<body id="page-top">
<nav id="mainNav" class="navbar navbar-default navbar-fixed-top">
<div class="container-fluid">
<!-- Brand and toggle get grouped for better mobile display -->
<div class="navbar-header">
<button type="button" class="navbar-toggle collapsed" data-toggle="collapse" data-target="#bs-example-navbar-collapse-1">
<span class="sr-only">Toggle navigation</span>
<span class="icon-bar"></span>
<span class="icon-bar"></span>
<span class="icon-bar"></span>
</button>
<a class="navbar-logo page-scroll" href="#page-top"><img src='../images/logo_white_circle.png' width='50'></a>
</div>
<!-- Collect the nav links, forms, and other content for toggling -->
<div class="collapse navbar-collapse" id="bs-example-navbar-collapse-1">
<ul class="nav navbar-nav navbar-right">
<li>
<a class="page-scroll" href="#about">About</a>
</li>
<li>
<a class="page-scroll" href="#members">Our team</a>
</li>
<li>
<a class="page-scroll" href="#publications">Publications</a>
</li>
<li>
<a class="page-scroll" href="#teaching">Teaching</a>
</li>
<li>
<a class="page-scroll" href="#news">News</a>
</li>
<li>
<a class="page-scroll" href="#joinus">Join us!</a>
</li>
<li>
<a class="page-scroll" href="#contact">Contact</a>
</li>
</ul>
</div>
<!-- /.navbar-collapse -->
</div>
<!-- /.container-fluid -->
</nav>
<header>
<div class="header-content">
<div class="header-content-inner">
<h1>[Atlas Analytics Lab]</h1>
<hr>
<p>Deep Learning & Computational Pathology Lab </p>
<hr>
<p>Computer Science and Software Engineering (CSSE)</p>
<p>Concordia University, Montréal, QC, Canada </p>
<a href="#about" class="btn btn-primary btn-xl page-scroll">Find Out More</a>
</div>
</div>
</header>
<section class="bg-primary" id="about">
<div class="container">
<div class="row">
<div class="col-lg-8 col-lg-offset-2 text-center">
<h2>Our purpose</h2>
<hr class="light">
</div>
</div>
<div class="par_left">
<br> Our research purpose in Atlas Analytics Lab is to address the theoretical foundations of deep learning and computer vision algorithms applied in computational pathology. Our goal is to design efficient AI solutions in clinical pathology to improve patient outcomes in cancer diagnosis and treatments. The problems we are currently addressing are:</br>
<ul>
<li>Foundational modeling of histopathology tissues for efficient representation learning; </li>
<li>Downstream tasking for cancer diagnosis;</li>
<li>Cancer analysis and biomarker discovery;</li>
<li>Multimodal image and natural language processing for post-analytical applications.</li>
</ul>
<br>Our research in deep learning to address the above applications encompass: </br>
<ul>
<li>Self-supervised/Weakly-supervised representation learning (e.g. contrastive learning, multi-instance learning, diffusion generative modeling);</li>
<li>Deep metric designs and learning;</li>
<li>Efficient architecture designs of image encoder (e.g. CNN, ViT);</li>
<li>Efficient and explainable deep neural training.</li>
</ul>
</div>
</div>
</div>
</div>
</section>
<section id="members">
<div class="container">
<div class="row">
<div class="col-lg-12 text-center">
<h2 class="section-heading">Our team</h2>
<hr class="primary">
</div>
</div>
<div class="par_left">
<p><h3> Director </h3></p>
<a href='mailto:mahdi.hosseini@concordia.ca'><img class='img-circle' src='../images/people/mahdi_1.jpg' hspace='5' width='120' alt='Mahdi Hosseini' title='Mahdi Hosseini'></a></br></br>
<p> <span class="bold_font">Mahdi S. Hosseini</span> is an assistant professor of computer science in department of Computer Science and Software Engineering (CSSE) at Concordia University and a faculty member of Applied AI Institute at Concordia. He received his PhD from The Edward S. Rogers Sr. Department of Electrical and Computer Engineering (ECE) at the University of Toronto (UofT) in 2016 from Multimedia Lab under the supervision of Professor Konstantinos N. Plataniotis. He continued as a postdoctoral research fellow at UofT in collaboration with Huron Digital Pathology Inc at Waterloo Ontario. During his postdoctoral study at UofT, he received two fellowships of the MITACS-Elevate Award and NSERC research funding. Dr. Hosseini continues his collaboration with Huron Digital Pathology in the capacity of senior research scientist for transformational changes of digital pathology solutions in clinical healthcare systems.</p>
<p> Dr. Hosseini’s research is primarily advanced in foundational developments of deep learning and computer vision algorithms which are efficiently designed for computational pathology applications. He is currently supervising several graduate students on related topics and his vision is to develop, in collaboration with hospitals and pathologists, meaningful computer aided diagnosis systems as assistive tools in clinical pathology for cancer diagnosis and treatment. He has published more than 30 papers and two patent applications in related fields. Dr. Hosseini’s reviewing services cover well-known venues in CVF foundation, ML Conferences and IEEE SPS. Dr. Hosseini is a selected Area Chair (AC) for CVPR2024, CVPR2023, NeurIPS2023, and CVPR2022.</p>
<p><h3> Graduate students</h3></p>
<a href='mailto:amirhossein.mohammadi@mail.concordia.ca'><img class='img-circle' src='../images/people/Amirhossein.jpg' hspace='5' width='120' alt='Amirhossein Mohammadi' title='Amirhossein Mohammadi'></a></br></br>
<p><span class="bold_font">Amirhossein Mohammadi (PhD Candidate)</span> specializes in Computer Vision, tackling complex challenges in specialized domain images. His research includes automatic grayscale image colorization during his Bachelor's degree and developing innovative image-based encoding techniques for genomic sequence classification during his Master's at Sharif University of Technology.</p>
<a href='mailto:portal.yang@mail.utoronto.ca'><img class='img-circle' src='../images/people/zhiyuan.JPG' hspace='5' width='120' alt='Zhiyuan Yang' title='Zhiyuan Yang'></a></br></br>
<p> <span class="bold_font">Zhiyuan Yang (PhD Candidate)</span> earned his Bachelor's in Computer Science at the University of Toronto from 2016 to 2021, and his Master's at Western University from 2021 to 2023 under Prof. Charles Ling. He specializes in applying deep learning to computational pathology and contributed to the AI4Path project, resulting in a paper currently under review by Nature Scientific Reports.</p>
<a href='mailto:sinamaghsoudlou@sbmu.ac.ir'><img class='img-circle' src='../images/people/sina.jpg' hspace='5' width='120' alt='Sina Maghsoudlou' title='Sina Maghsoudlou'></a></br></br>
<p><span class="bold_font">Sina Maghsoudlou (Graduate Diploma)</span> is a medical doctor with a strong interest in computer science. He aspires to merge his medical expertise with technology to create a meaningful impact and is actively pursuing an academic career in this field.</p>
<a href='mailto:damien.martins-gomes@ipsa.fr'><img class='img-circle' src='../images/people/damien.jpg' hspace='5' width='120' alt='Damien Martins Gomes' title='Damien Martins Gomes'></a></br></br>
<p><span class="bold_font">Damien Martins Gomes (MSc Candidate)</span> joined a research master's program at Concordia University, specializing in Deep Learning and Computer Vision. His focus will be studying optimizers for training deep learning models, particularly second-order optimizers. He is passionate about advancing the understanding and improvement of model optimization in the field of Deep Learning.</p>
<a href='https://anasiri.github.io/'><img class='img-circle' src='../images/people/ali.png' hspace='5' width='120' alt='Ali Nasiri Sarvi' title='Ali Nasiri Sarvi'></a></br></br>
<p><span class="bold_font">Ali Nasiri Sarvi (MSc Candidate)</span> received his Bachelor's degree in Computer Engineering from the Ferdowsi University of Mashhad, Iran. Before joining Atlas Analytics Lab, he had around three years of experience working on autonomous vehicle safety, physics-based sensor simulation using deep learning, and utilizing generative models for Virtual Try-On. His main research interests are generative models and continual learning.</p>
<a href='https://www.linkedin.com/in/cassandre-notton-69ba6a131/'><img class='img-circle' src='../images/people/cassandre.jpg' hspace='5' width='120' alt='Cassandre Notton' title='Cassandre Notton'></a></br></br>
<p><span class="bold_font">Cassandre Notton (MSc Candidate)</span> is pursuing her studies at Concordia University in the Master of Applied Computer Science, holding an engineering background (IMT Atlantique, France), . After an internship on error potential evaluation via BCI for hands-free control, she is now a student of the Atlas Analytics Labs, where she works at the crossroads of engineering and health.</p>
<a href='mailto:tomas.pereira@mail.concordia.ca'><img class='img-circle' src='../images/people/tomas.jpg' hspace='5' width='120' alt='Tomas Pereira' title='Tomas Pereira'></a></br></br>
<p><span class="bold_font">Tomas Pereira (MSc Candidate)</span> an undergraduate alumni of Concordia University, brings expertise in deep learning for NLP and Computer Vision. Joining Atlas Analytics Lab in 2023, he is currently engaged in research on representation learning of histological tissues and multi-modal deep learning.</p>
<a href='https://www.linkedin.com/in/denisha-thakkar/'><img class='img-circle' src='../images/people/denisha.png' hspace='5' width='120' alt='Denisha Thakkar' title='Denisha Thakkar'></a></br></br>
<p><span class="bold_font">Denisha Thakkar (MSc Candidate)</span> is specializing in data science. With a deep passion for data and a strong focus on Deep Learning and Computer Vision. Her primary interest lies in leveraging the power of generative models and computer vision to develop innovative solutions for various medical purposes.</p>
<p><h3> Undergraduate students</h3></p>
<a href='mailto:ahmad.islah@mail.utoronto.ca'><img class='img-circle' src='../images/people/ahmad.PNG' hspace='5' width='120' alt='Ahmad Islah' title='Ahmad Islah'></a></br></br>
<p><span class="bold_font">Ahmad Islah</span> is a rising fourth-year student at the University of Toronto, studying Computer Science. He is interested in theoretical and applied machine learning, particularly at the intersection of Computer Vision and Computational Pathology, and works on developing self-supervised representation learning models for cancer diagnosis as part of the UofT Multimedia Lab and Atlas Analytics Lab at Concordia University.</p>
<a href='mailto:kaii.li@mail.utoronto.ca'><img class='img-circle' src='../images/people/kai.jpg' hspace='5' width='120' alt='Kai Li' title='Kai Li'></a></br></br>
<p><span class="bold_font">Kai Li</span> is an incoming third year Engineering Science student majoring in Machine Intelligence. His research interests include computer vision algorithms, particularly those applied in computational pathology and medical imaging. He is also interested in explainable AI and their applications in these black-box models, aiming to integrate efficient machine learning models into clinical workflows. </p>
<a href='mailto:arielle.zhang@mail.utoronto.ca'><img class='img-circle' src='../images/people/arielle.JPG' hspace='5' width='120' alt='Arielle Zhang' title='Arielle Zhang'></a></br></br>
<p> <span class="bold_font">Arielle Yang </span> is an undergraduate student at the University of Toronto, in the Division of Engineering Science. She is also a volunteer researcher at the UofT Multimedia Lab and the Atlas Analytics Lab at Concordia University. In May 2023, she started her internship at Noah’s Ark Lab, Huawei </p>
</div>
</div>
</section>
<section id="publications">
<div class="container">
<div class="row">
<div class="col-lg-12 text-center">
<h2 class="section-heading">Publications</h2>
<hr class="primary">
</div>
</div>
<p><h3>Major Publications</h3></p>
<div class="lecture-box" style="">
<button type="button" class="lecture-collapsible"><span class="fa fa-2x fa-university text-primary"></span><span class="lecture-title">Computational Pathology: A Survey Review and The Way Forward </span></button>
<div class="lecture-content">
<div class="padding-div">
<p class="text-muted" style="font-size: 12px">Medical Image Analysis | 2023 </p>
<p><strong>Authors: </strong>Mahdi S. Hosseini, Babak Ehteshami Bejnordib, Vincent Quoc-Huy Trinh, Danial Hassan, Xingwen Li, Taehyo
Kim, Haochen Zhang, Theodore Wu, Kajanan Chinniah, Sina Maghsoudlou, Ryan Zhang, Stephen Yang, Jiadai Zhu,
Lyndon Chan, Samir Khaki, Andrei Buin, Fatemeh Chaji, Ala Salehi, Alejandra Zambrano Luna, Bich Ngoc Nguyen,
Dimitris Samaras, Konstantinos N. Plataniotis,
</p>
<p>
<strong>Abstract: </strong>Computational Pathology (CoPath) is an interdisciplinary science that augments developments of computational approaches to analyze and model medical histopathology images. The main objective for CoPath is to develop infrastructure and workflows of digital diagnostics as an assistive CAD system for clinical pathology facilitating transformational changes in the diagnosis and treatment of cancer diseases. With evergrowing developments in deep learning and computer vision algorithms, and the ease of the data flow from digital pathology, currently CoPath is witnessing a paradigm shift. Despite the sheer volume of engineering and scientific works being introduced for cancer image analysis, there is still a considerable gap of adopting and integrating these algorithms in clinical practice. This raises a significant question regarding the direction and trends that are undertaken in CoPath. In this article we provide a comprehensive review of more than 700 papers to address the challenges faced in problem design all-the-way to the application and implementation viewpoints. We have catalogued each paper into a model-card by examining the key works and challenges faced to layout the current landscape in CoPath. We hope this helps the community to locate relevant works and facilitate understanding of the field's future directions. In a nutshell, we oversee the CoPath developments in cycle of stages which are required to be cohesively linked together to address the challenges associated with such multidisciplinary science. We overview this cycle from different perspectives of data-centric, model-centric, and application-centric problems. We finally sketch remaining challenges and provide directions for future technical developments and clinical integration of CoPath.
</p>
<h4>Paper:</h4>
<div class="lecture-document-list">
<a href='https://arxiv.org/abs/2304.05482'><i class='fa fa fa-file-text-o text-primary'></i><span class='lecture-document'>Computaional Pathology: A Survey Review and The Way Forward</span></a></br>
</div>
</div>
</div>
</div>
<div class="lecture-box" style="">
<button type="button" class="lecture-collapsible"><span class="fa fa-2x fa-university text-primary"></span><span class="lecture-title">End-to-End Supervised Multilabel Contrastive Learning
</span></button>
<div class="lecture-content">
<div class="padding-div">
<p class="text-muted" style="font-size: 12px">Neural Information Processing Systems (NeurIPS)| 2023 </p>
<p><strong>Authors: </strong> Ahmad Sajedi, Samir Khaki, Konstantinos N. Plataniotis, Mahdi S. Hosseini
</p>
<p>
<strong>Abstract: </strong> Multilabel representation learning is recognized as a challenging problem that can be associated with either label dependencies
between object categories or data-related issues such as the inherent imbalance of positive/negative samples. Recent advances address these challenges from model-
and data-centric viewpoints. In model-centric, the label correlation is obtained by an external model designs (e.g., graph CNN) to incorporate an inductive bias for
training. However, they fail to design an end-to-end training framework, leading to high computational complexity. On the contrary, in data-centric, the realistic
nature of the dataset is considered for improving the classification while ignoring the label dependencies. In this paper, we propose a new end-to-end training framework --
dubbed KMCL (Kernel-based Mutlilabel Contrastive Learning) -- to address the shortcomings of both model- and data-centric designs. The KMCL first transforms the embedded features
into a mixture of exponential kernels in Gaussian RKHS. It is then followed by encoding an objective loss that is comprised of (a) reconstruction loss to reconstruct kernel representation,
(b) asymmetric classification loss to address the inherent imbalance problem, and (c) contrastive loss to capture label correlation. The KMCL models the uncertainty of the feature encoder
while maintaining a low computational footprint. Extensive experiments are conducted on image classification tasks to showcase the consistent improvements of KMCL over the SOTA methods.
</p>
<h4>GitHub Repository:</h4>
<div class="lecture-document-list">
<a href="https://github.com/mahdihosseini/KMCL"><i class='fa fa fa-file-text-o text-primary'></i><span class='lecture-document'>KMCL PyTorch implementation</span></a>
</div>
<h4>Paper:</h4>
<div class="lecture-document-list">
<a href='https://arxiv.org/abs/2307.03967'><i class='fa fa fa-file-text-o text-primary'></i><span class='lecture-document'>End-to-End Supervised Multilabel Contrastive Learning</span></a></br>
</div>
</div>
</div>
</div>
<div class="lecture-box" style="">
<button type="button" class="lecture-collapsible"><span class="fa fa-2x fa-university text-primary"></span><span class="lecture-title">Pseudo-inverted bottleneck convolution for darts search space </span></button>
<div class="lecture-content">
<div class="padding-div">
<p class="text-muted" style="font-size: 12px">Accepted in ICASSP | 2023 </p>
<p><strong>Authors: </strong> Arash Ahmadian, Yue Fei, Louis S.P. Liu, Konstantinos N. Plataniotis, and Mahdi S. Hosseini
</p>
<p>
<strong>Abstract: </strong> Differentiable Architecture Search (DARTS) has attracted con-
siderable attention as a gradient-based Neural Architecture
Search (NAS) method. Since the introduction of DARTS,
there has been little work done on adapting the action space
based on state-of-art architecture design principles for CNNs.
In this work, we aim to address this gap by incrementally aug-
menting the DARTS search space with micro-design changes
inspired by ConvNeXt and studying the trade-off between ac-
curacy, evaluation layer count, and computational cost. To this
end, we introduce the Pseudo-Inverted Bottleneck conv block
intending to reduce the computational footprint of the inverted
bottleneck block proposed in ConvNeXt. Our proposed archi-
tecture is much less sensitive to evaluation layer count and
outperforms a DARTS network with similar size significantly,
at layer counts as small as 2
. Furthermore, with less layers,
not only does it achieve higher accuracy with lower GMACs
and parameter count, GradCAM comparisons show that our
network is able to better detect distinctive features of target
objects compared to DARTS </p>
<h4>Paper:</h4>
<div class="lecture-document-list">
<a href='https://www.dropbox.com/s/5zod4a3jgae06ze/ConvSearch_ICASSP2023.pdf?dl=0'><i class='fa fa fa-file-text-o text-primary'></i><span class='lecture-document'>Pseudo-inverted bottleneck convolution for darts search space</span></a></br>
</div>
</div>
</div>
</div>
<div class="lecture-box" style="">
<button type="button" class="lecture-collapsible"><span class="fa fa-2x fa-university text-primary"></span><span class="lecture-title">Exploiting Explainable Metrics for Augmented SGD </span></button>
<div class="lecture-content">
<div class="padding-div">
<p class="text-muted" style="font-size: 12px">In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR)| 2022, June 19-24, New Orleans, Louisiana </p>
<p><strong>Authors: </strong> Mahdi S. Hosseini, Mathieu Tuli and Konstantinos N. Plataniotis.
</p>
<p>
<strong>Abstract: </strong> Explaining the generalization characteristics of deep learning is an emerging topic in advanced machine learning.
There are several unanswered questions about how learning under stochastic optimization really works and why certain strategies are better than others.
In this paper, we address the following question: can we probe intermediate layers of a deep neural network to identify and quantify the learning quality
of each layer? With this question in mind, we propose new explainability metrics that measure the redundant information in a network's layers using a low-rank
factorization framework and quantify a complexity measure that is highly correlated with the generalization performance of a given optimizer, network, and dataset.
We subsequently exploit these metrics to augment the Stochastic Gradient Descent (SGD) optimizer by adaptively adjusting the learning rate in each layer to improve in generalization performance.
Our augmented SGD -- dubbed RMSGD -- introduces minimal computational overhead compared to SOTA methods and outperforms them by exhibiting strong generalization characteristics across application, architecture, and dataset. </p>
<h4>Paper:</h4>
<div class="lecture-document-list">
<a href='https://openaccess.thecvf.com/content/CVPR2022/html/Hosseini_Exploiting_Explainable_Metrics_for_Augmented_SGD_CVPR_2022_paper.html'><i class='fa fa fa-file-text-o text-primary'></i><span class='lecture-document'>Exploiting Explainable Metrics for Augmented SGD</span></a></br>
</div>
</div>
</div>
</div>
<div class="lecture-box" style="">
<button type="button" class="lecture-collapsible"><span class="fa fa-2x fa-university text-primary"></span><span class="lecture-title">Towards Robust and Automatic Hyper-Parameter Tunning
</span></button>
<div class="lecture-content">
<div class="padding-div">
<p class="text-muted" style="font-size: 12px">Optimization for Machine Learning at NeurIPS2021 </p>
<p><strong>Authors: </strong> Mathieu Tuli, Mahdi S. Hosseini, and Konstantinos N. Plataniotis.
<p>
<strong>Abstract: </strong> The task of hyper-parameter optimization (HPO) is burdened with heavy computational costs due
to the intractability of optimizing both a model’s weights and its hyper-parameters simultaneously.
In this work, we introduce a new class of HPO method and explore how the low-rank factorization
of the convolutional weights of intermediate layers of a convolutional neural network can be used to
define an analytical response surface [2] for optimizing hyper-parameters, using only training data.
We quantify how this surface behaves as a surrogate to model performance and can be solved using
a trust-region search algorithm, which we call autoHyper. The algorithm outperforms state-of-theart such as Bayesian Optimization and generalizes across model, optimizer, and dataset selection </p>
<h4>GitHub Repository:</h4>
<div class="lecture-document-list">
<a href="https://github.com/MathieuTuli/autoHyperL"><i class='fa fa fa-file-text-o text-primary'></i><span class='lecture-document'>autoHyper implementation</span></a>
</div>
<h4>Paper:</h4>
<div class="lecture-document-list">
<a href='https://opt-ml.org/papers/2021/paper17.pdf'><i class='fa fa fa-file-text-o text-primary'></i><span class='lecture-document'>Towards Robust and Automatic Hyper-Parameter Tunning</span></a></br>
</div>
</div>
</div>
</div>
<div class="lecture-box" style="">
<button type="button" class="lecture-collapsible"><span class="fa fa-2x fa-university text-primary"></span><span class="lecture-title">HistoKT: Cross Knowledge Transfer in Computational Pathology
</span></button>
<div class="lecture-content">
<div class="padding-div">
<p class="text-muted" style="font-size: 12px">Accepted in International Conference on Acoustics, Speech and Signal Processing | ICASSP 2022 </p>
<p><strong>Authors: </strong> Ryan Zhang*, Jiadai Zhu*, Stephen Yang*, Mahdi S. Hosseini*, Angelo Genovese, Lina Chen, Corwyn Rowsell,
Savvas Damaskinos, Konstantinos N. Plataniotis. (*: equal contribution)
<p>
<strong>Abstract: </strong> The lack of well-annotated datasets in computational pathology (CPath) obstructs the application of deep learning techniques for classifying medical
images. %Since pathologist time is expensive, dataset curation is intrinsically difficult. Many CPath workflows involve transferring learned knowledge between various image domains through transfer learning.
Currently, most transfer learning research follows a model-centric approach, tuning network parameters to improve transfer results over few datasets. In this paper, we take a data-centric approach to the transfer
learning problem and examine the existence of generalizable knowledge between histopathological datasets. First, we create a standardization workflow for aggregating existing histopathological data. We then measure inter-domain
knowledge by training ResNet18 models across multiple histopathological datasets, and cross-transferring between them to determine the quantity and quality of innate shared knowledge. Additionally, we use weight distillation
to share knowledge between models without additional training. We find that hard to learn, multi-class datasets benefit most from pretraining, and a two stage learning framework incorporating a large source domain such as ImageNet
allows for better utilization of smaller datasets. Furthermore, we find that weight distillation enables models trained on purely histopathological features to outperform models using external natural image data. </p>
<h4>Paper:</h4>
<div class="lecture-document-list">
<a href='https://arxiv.org/abs/2201.11246'><i class='fa fa fa-file-text-o text-primary'></i><span class='lecture-document'>HistoKT: Cross Knowledge Transfer in Computational Pathology</span></a></br>
</div>
</div>
</div>
</div>
</div>
</section>
<section id="teaching">
<div class="container">
<div class="row">
<div class="col-lg-12 text-center">
<h2 class="section-heading">Lectures</h2>
<hr class="primary">
</div>
</div>
<div class="lecture-box" style="">
<button type="button" class="lecture-collapsible"><span class="fa fa-2x fa-university text-primary"></span><span class="lecture-title">COMP 6721: Applied Artificial Intelligence (Graduate level)</span></button>
<div class="lecture-content">
<div class="padding-div">
<p class="text-muted" style="font-size: 12px">Fall 2023 | Lecture & Lab & Project</p>
<p>The course covers heuristic and adversarial searches for concrete applications. It then discusses automated reasoning, advanced knowledge representation and dealing with uncertainly for Artificial Intelligence applications. Finally, it introduces autoencoders, recurrent neural networks and sequence to sequence models. A project is required.</p>
</div>
</div>
</div>
<div class="lecture-box" style="margin-bottom: 40px;">
<button type="button" class="lecture-collapsible"><span class="fa fa-2x fa-university text-primary"></span><span class="lecture-title">COMP 6321: Machine Learning (Graduate level)</span></button>
<div class="lecture-content">
<div class="padding-div">
<p class="text-muted" style="font-size: 12px">Fall 2023 | Lecture & Lab & Project</p>
<p>Introduction to the fundamentals of machine learning. Linear models: linear and polynomial regression, overfitting, model selection, logistic regression, naive Bayes. Non-linear models: decision trees, instance-based learning, boosting, neural networks. Support vector machines and kernels. Computational learning theory. Experimental methodology, sources of error. Structured models: graphical models, deep belief networks. Unsupervised learning: k-means, mixture models, density estimation, expectation maximization, principle component analysis, eigenmaps and other dimensionality reduction methods. Learning in dynamical systems: hidden Markov models and other types of temporal/sequence models. Reinforcement learning. Survey of machine learning and its applications. A project is required.</p>
</div>
</div>
</div>
<div class="lecture-box" style="margin-bottom: 40px;">
<button type="button" class="lecture-collapsible"><span class="fa fa-2x fa-university text-primary"></span><span class="lecture-title">COMP 432: Machine Learning (Undergraduate level)</span></button>
<div class="lecture-content">
<div class="padding-div">
<p class="text-muted" style="font-size: 12px">Fall 2023 | Lecture & Lab & Project</p>
<p>Introduction to the fundamentals of machine learning.</p>
</div>
</div>
</div>
</div>
</section>
<div class="divider"></div>
<section class="bg-dark" id="news">
<div class="container text-center">
<div class="call-to-action">
<h2>Conferences and talks</h2>
<a href='https://www.youtube.com/watch?v=S3s66Q0JpEY' class="btn btn-default btn-xl wow" target="_blank">PERFORM Concordia</a>
<a href='https://www.youtube.com/watch?v=4BsuLhNYD2M' class="btn btn-default btn-xl wow" target="_blank">Tissue Image Analytics (TIA) Centre, Warwick</a>
</div>
</div>
</section>
<div class="divider"></div>
<section id="joinus">
<div class="container">
<div class="row">
<div class="col-lg-12 text-center">
<h2 class="section-heading">Join us!</h2>
<hr class="primary">
</div>
</div>
<div class="lecture-box" style="">
<button type="button" class="lecture-collapsible"><span class="fa fa-2x fa-university text-primary"></span><span class="lecture-title"> PhD positions </span></button>
<div class="lecture-content">
<div class="padding-div">
<p class="text-muted" style="font-size: 12px">Tuesday, October, 13th | 2023 </p>
<p>We are hiring PhD students!</p>
<h4>Job description:</h4>
<div class="lecture-document-list">
<a href='https://drive.google.com/file/d/1y1J_P7N8A5VUe-WmpyT5qA7Zxvd5stB0/view?usp=sharing'><i class='fa fa fa-file-text-o text-primary'></i><span class='lecture-document'>PhD Position tailored for French citizens</span></a></br>
</div>
</div>
</div>
</div>
<div class="lecture-box" style="">
<button type="button" class="lecture-collapsible"><span class="fa fa-2x fa-university text-primary"></span><span class="lecture-title"> MsC positions </span></button>
<div class="lecture-content">
<div class="padding-div">
<p class="text-muted" style="font-size: 12px">Tuesday, October, 13th | 2023 </p>
<p>We are hiring MSc students!</p>
<h4>Job description:</h4>
<div class="lecture-document-list">
<a href='https://drive.google.com/file/d/1QxLab9_ry-evJmdGMoFINRKGRjt-385j/view?usp=sharing'><i class='fa fa fa-file-text-o text-primary'></i><span class='lecture-document'>MSc Position tailored for French citizens</span></a></br>
</div>
</div>
</div>
</div>
</div>
</section>
<section id="contact">
<div class="container">
<div class="row">
<div class="col-lg-8 col-lg-offset-2 text-center">
<h2 class="section-heading">Contact us!</h2>
<hr class="primary">
<p class="running-text" style="text-align:justify">
If you have any questions or recommendations for the website, the materials, the research or the publications, feel free to contact us!
</p>
</div>
<div class="col-lg-4 col-lg-offset-2 text-center">
<i class="fa fa-map-marker fa-3x wow bounceIn"></i>
<p><a href="https://goo.gl/maps/wxmcexx2qXrd1xrj6">ER Building, 9th floor, 2155 Guy Street, H3H 2L5 Montreal, Canada</a></p>
</div>
<div class="col-lg-4 text-center">
<i class="fa fa-envelope-o fa-3x wow bounceIn" data-wow-delay=".1s"></i>
<p><a href="mailto:mahdi.hosseini@concordia.ca">mahdi.hosseini@concordia.ca</a></p>
</div>
<div class="col-lg-8 col-lg-offset-2 text-center">
<hr class="primary">
Credits: this website has been inspired by <a href="https://uvadlc.github.io">UVA Deep Learning Course</a>
</p>
</div>
</div>
</div>
</section>
<!-- jQuery -->
<script src="js/jquery.js"></script>
<!-- Bootstrap Core JavaScript -->
<script src="js/bootstrap.min.js"></script>
<!-- Plugin JavaScript -->
<script src="js/jquery.easing.min.js"></script>
<script src="js/jquery.fittext.js"></script>
<script src="js/wow.min.js"></script>
<!-- Custom Theme JavaScript -->
<script src="js/creative.js"></script>
<script src="js/collapsible.js"></script>
<!-- Vendor JS Files -->
<script src="assets/vendor/jquery/jquery.min.js"></script>
<script src="assets/vendor/jquery.easing/jquery.easing.min.js"></script>
<script src="assets/vendor/php-email-form/validate.js"></script>
<script src="assets/vendor/waypoints/jquery.waypoints.min.js"></script>
<script src="assets/vendor/counterup/counterup.min.js"></script>
<script src="assets/vendor/owl.carousel/owl.carousel.min.js"></script>
<script src="assets/vendor/isotope-layout/isotope.pkgd.min.js"></script>
<script src="assets/vendor/venobox/venobox.min.js"></script>
<script src="assets/vendor/typed.js/typed.min.js"></script>
<!-- Template Main JS File -->
<script src="assets/js/main.js"></script>
</body>
</html>