Publications
My publications on HAL and Google Scholar@misc{sivakumar2024emg2qwertylargedatasetbaselines, archiveprefix = {arXiv}, author = {Sivakumar, Viswanath and Seely, Jeffrey and Du, Alan and Bittner, Sean R and Berenzweig, Adam and Bolarinwa, Anuoluwapo and Gramfort, Alexandre and Mandel, Michael I}, eprint = {2410.20081}, primaryclass = {cs.LG}, title = {emg2qwerty: A Large Dataset with Baselines for Touch Typing using Surface Electromyography}, url = {https://arxiv.org/abs/2410.20081}, year = {2024} }
@article{2024.02.23.581779, author = {CTRL-labs at Reality Labs and Sussillo, David and Kaifosh, Patrick and Reardon, Thomas}, doi = {10.1101/2024.02.23.581779}, elocation-id = {2024.02.23.581779}, eprint = {https://www.biorxiv.org/content/early/2024/02/28/2024.02.23.581779.full.pdf}, journal = {bioRxiv}, publisher = {Cold Spring Harbor Laboratory}, title = {A generic noninvasive neuromotor interface for human-computer interaction}, url = {https://www.biorxiv.org/content/early/2024/02/28/2024.02.23.581779}, year = {2024} }
@misc{gnassounou2024multisourcetesttimedomainadaptation, archiveprefix = {arXiv}, author = {Gnassounou, Théo and Collas, Antoine and Flamary, Rémi and Lounici, Karim and Gramfort, Alexandre}, eprint = {2407.14303}, primaryclass = {cs.LG}, title = {Multi-Source and Test-Time Domain Adaptation on Multivariate Signals using Spatio-Temporal Monge Alignment}, url = {https://arxiv.org/abs/2407.14303}, year = {2024} }
@misc{lalou2024skadabenchbenchmarkingunsuperviseddomain, archiveprefix = {arXiv}, author = {Lalou, Yanis and Gnassounou, Théo and Collas, Antoine and de Mathelin, Antoine and Kachaiev, Oleksii and Odonnat, Ambroise and Gramfort, Alexandre and Moreau, Thomas and Flamary, Rémi}, eprint = {2407.11676}, primaryclass = {cs.LG}, title = {SKADA-Bench: Benchmarking Unsupervised Domain Adaptation Methods with Realistic Validation}, url = {https://arxiv.org/abs/2407.11676}, year = {2024} }
@misc{mellot2024geodesicoptimizationpredictiveshift, archiveprefix = {arXiv}, author = {Mellot, Apolline and Collas, Antoine and Chevallier, Sylvain and Gramfort, Alexandre and Engemann, Denis A.}, eprint = {2407.03878}, primaryclass = {stat.ML}, title = {Geodesic Optimization for Predictive Shift Adaptation on EEG data}, url = {https://arxiv.org/abs/2407.03878}, year = {2024} }
@article{10.1162/imag_a_00189, abstract = {{Electroencephalography (EEG) is an established method for quantifying large-scale neuronal dynamics which enables diverse real-world biomedical applications, including brain-computer interfaces, epilepsy monitoring, and sleep staging. Advances in sensor technology have freed EEG from traditional laboratory settings, making low-cost ambulatory or at-home assessments of brain function possible. While ecologically valid brain assessments are becoming more practical, the impact of their reduced spatial resolution and susceptibility to noise remain to be investigated. This study set out to explore the potential of at-home EEG assessments for biomarker discovery using the brain age framework and four-channel consumer EEG data. We analyzed recordings from more than 5200 human subjects (18–81 years) during meditation and sleep, to predict age at the time of recording. With cross-validated R2 scores between 0.3-0.5, prediction performance was within the range of results obtained by recent benchmarks focused on laboratory-grade EEG. While age prediction was successful from both meditation and sleep recordings, the latter led to higher performance. Analysis by sleep stage uncovered that N2-N3 stages contained most of the signal. When combined, EEG features extracted from all sleep stages gave the best performance, suggesting that the entire night of sleep contains valuable age-related information. Furthermore, model comparisons suggested that information was spread out across electrodes and frequencies, supporting the use of multivariate modeling approaches. Thanks to our unique dataset of longitudinal repeat sessions spanning 153 to 529 days from eight subjects, we finally evaluated the variability of EEG-based age predictions, showing that they reflect both trait- and state-like information. Overall, our results demonstrate that state-of-the-art machine-learning approaches based on age prediction can be readily applied to real-world EEG recordings obtained during at-home sleep and meditation practice.}}, author = {Banville, Hubert and Jaoude, Maurice Abou and Wood, Sean U.N. and Aimone, Chris and Holst, Sebastian C. and Gramfort, Alexandre and Engemann, Denis-Alexander}, doi = {10.1162/imag_a_00189}, eprint = {https://direct.mit.edu/imag/article-pdf/doi/10.1162/imag\_a\_00189/2385985/imag\_a\_00189.pdf}, issn = {2837-6056}, journal = {Imaging Neuroscience}, month = {06}, pages = {1-15}, title = {{Do try this at home: Age prediction from sleep and meditation with large-scale low-cost mobile EEG}}, url = {https://doi.org/10.1162/imag\_a\_00189}, volume = {2}, year = {2024} }
@misc{linhart2024diffusionposteriorsamplingsimulationbased, archiveprefix = {arXiv}, author = {Linhart, Julia and Victorino Cardoso, Gabriel and Gramfort, Alexandre and Le Corff, Sylvain and Rodrigues, Pedro L. C.}, eprint = {2404.07593}, primaryclass = {stat.ML}, title = {Diffusion posterior sampling for simulation-based inference in tall data settings}, url = {https://arxiv.org/abs/2404.07593}, year = {2024} }
@article{10.1162/imag_a_00103, abstract = {{The Brain Imaging Data Structure (BIDS) is a community-driven standard for the organization of data and metadata from a growing range of neuroscience modalities. This paper is meant as a history of how the standard has developed and grown over time. We outline the principles behind the project, the mechanisms by which it has been extended, and some of the challenges being addressed as it evolves. We also discuss the lessons learned through the project, with the aim of enabling researchers in other domains to learn from the success of BIDS.}}, author = {Poldrack, Russell A. and Markiewicz, Christopher J. and Appelhoff, Stefan and Ashar, Yoni K. and Auer, Tibor and Baillet, Sylvain and Bansal, Shashank and Beltrachini, Leandro and Benar, Christian G. and Bertazzoli, Giacomo and Bhogawar, Suyash and Blair, Ross W. and Bortoletto, Marta and Boudreau, Mathieu and Brooks, Teon L. and Calhoun, Vince D. and Castelli, Filippo Maria and Clement, Patricia and Cohen, Alexander L. and Cohen-Adad, Julien and D’Ambrosio, Sasha and de Hollander, Gilles and de la Iglesia-Vayá, María and de la Vega, Alejandro and Delorme, Arnaud and Devinsky, Orrin and Draschkow, Dejan and Duff, Eugene Paul and DuPre, Elizabeth and Earl, Eric and Esteban, Oscar and Feingold, Franklin W. and Flandin, Guillaume and Galassi, Anthony and Gallitto, Giuseppe and Ganz, Melanie and Gau, Rémi and Gholam, James and Ghosh, Satrajit S. and Giacomel, Alessio and Gillman, Ashley G. and Gleeson, Padraig and Gramfort, Alexandre and Guay, Samuel and Guidali, Giacomo and Halchenko, Yaroslav O. and Handwerker, Daniel A. and Hardcastle, Nell and Herholz, Peer and Hermes, Dora and Honey, Christopher J. and Innis, Robert B. and Ioanas, Horea-Ioan and Jahn, Andrew and Karakuzu, Agah and Keator, David B. and Kiar, Gregory and Kincses, Balint and Laird, Angela R. and Lau, Jonathan C. and Lazari, Alberto and Legarreta, Jon Haitz and Li, Adam and Li, Xiangrui and Love, Bradley C. and Lu, Hanzhang and Marcantoni, Eleonora and Maumet, Camille and Mazzamuto, Giacomo and Meisler, Steven L. and Mikkelsen, Mark and Mutsaerts, Henk and Nichols, Thomas E. and Nikolaidis, Aki and Nilsonne, Gustav and Niso, Guiomar and Norgaard, Martin and Okell, Thomas W. and Oostenveld, Robert and Ort, Eduard and Park, Patrick J. and Pawlik, Mateusz and Pernet, Cyril R. and Pestilli, Franco and Petr, Jan and Phillips, Christophe and Poline, Jean-Baptiste and Pollonini, Luca and Raamana, Pradeep Reddy and Ritter, Petra and Rizzo, Gaia and Robbins, Kay A. and Rockhill, Alexander P. and Rogers, Christine and Rokem, Ariel and Rorden, Chris and Routier, Alexandre and Saborit-Torres, Jose Manuel and Salo, Taylor and Schirner, Michael and Smith, Robert E. and Spisak, Tamas and Sprenger, Julia and Swann, Nicole C. and Szinte, Martin and Takerkart, Sylvain and Thirion, Bertrand and Thomas, Adam G. and Torabian, Sajjad and Varoquaux, Gael and Voytek, Bradley and Welzel, Julius and Wilson, Martin and Yarkoni, Tal and Gorgolewski, Krzysztof J.}, doi = {10.1162/imag_a_00103}, eprint = {https://direct.mit.edu/imag/article-pdf/doi/10.1162/imag\_a\_00103/2470738/imag\_a\_00103.pdf}, issn = {2837-6056}, journal = {Imaging Neuroscience}, month = {03}, pages = {1-19}, title = {{The past, present, and future of the brain imaging data structure (BIDS)}}, url = {https://doi.org/10.1162/imag\_a\_00103}, volume = {2}, year = {2024} }
@misc{mellot2024physicsinformedunsupervisedriemanniandomain, archiveprefix = {arXiv}, author = {Mellot, Apolline and Collas, Antoine and Chevallier, Sylvain and Engemann, Denis and Gramfort, Alexandre}, eprint = {2403.15415}, primaryclass = {eess.SP}, title = {Physics-informed and Unsupervised Riemannian Domain Adaptation for Machine Learning on Heterogeneous EEG Datasets}, url = {https://arxiv.org/abs/2403.15415}, year = {2024} }
@article{10.1371/journal.pcbi.1011108, abstract = {Biophysically detailed neural models are a powerful technique to study neural dynamics in health and disease with a growing number of established and openly available models. A major challenge in the use of such models is that parameter inference is an inherently difficult and unsolved problem. Identifying unique parameter distributions that can account for observed neural dynamics, and differences across experimental conditions, is essential to their meaningful use. Recently, simulation based inference (SBI) has been proposed as an approach to perform Bayesian inference to estimate parameters in detailed neural models. SBI overcomes the challenge of not having access to a likelihood function, which has severely limited inference methods in such models, by leveraging advances in deep learning to perform density estimation. While the substantial methodological advancements offered by SBI are promising, their use in large scale biophysically detailed models is challenging and methods for doing so have not been established, particularly when inferring parameters that can account for time series waveforms. We provide guidelines and considerations on how SBI can be applied to estimate time series waveforms in biophysically detailed neural models starting with a simplified example and extending to specific applications to common MEG/EEG waveforms using the the large scale neural modeling framework of the Human Neocortical Neurosolver. Specifically, we describe how to estimate and compare results from example oscillatory and event related potential simulations. We also describe how diagnostics can be used to assess the quality and uniqueness of the posterior estimates. The methods described provide a principled foundation to guide future applications of SBI in a wide variety of applications that use detailed models to study neural dynamics.}, author = {Tolley, Nicholas and Rodrigues, Pedro L. C. and Gramfort, Alexandre and Jones, Stephanie R.}, doi = {10.1371/journal.pcbi.1011108}, journal = {PLOS Computational Biology}, month = {02}, number = {2}, pages = {1-29}, publisher = {Public Library of Science}, title = {Methods and considerations for estimating parameters in biophysically detailed neural models with simulation based inference}, url = {https://doi.org/10.1371/journal.pcbi.1011108}, volume = {20}, year = {2024} }
@inproceedings{NEURIPS2023_21718991, author = {Gnassounou, Th\'{e}o and Flamary, R\'{e}mi and Gramfort, Alexandre}, booktitle = {Advances in Neural Information Processing Systems}, editor = {A. Oh and T. Naumann and A. Globerson and K. Saenko and M. Hardt and S. Levine}, pages = {10457--10476}, publisher = {Curran Associates, Inc.}, title = {Convolution Monge Mapping Normalization for learning on sleep data}, url = {https://proceedings.neurips.cc/paper_files/paper/2023/file/21718991f6acf19a42376b5c7a8668c5-Paper-Conference.pdf}, volume = {36}, year = {2023} }
@inproceedings{NEURIPS2023_b0313c2f, author = {Linhart, Julia and Gramfort, Alexandre and Rodrigues, Pedro}, booktitle = {Advances in Neural Information Processing Systems}, editor = {A. Oh and T. Naumann and A. Globerson and K. Saenko and M. Hardt and S. Levine}, pages = {56384--56410}, publisher = {Curran Associates, Inc.}, title = {L-C2ST: Local Diagnostics for Posterior Approximations in Simulation-Based Inference}, url = {https://proceedings.neurips.cc/paper_files/paper/2023/file/b0313c2f4501a81d0e0d4a1e8fbf4995-Paper-Conference.pdf}, volume = {36}, year = {2023} }
@misc{collas2024weaklysupervisedcovariancematrices, archiveprefix = {arXiv}, author = {Collas, Antoine and Flamary, Rémi and Gramfort, Alexandre}, eprint = {2402.03345}, primaryclass = {eess.SP}, title = {Weakly supervised covariance matrices alignment through Stiefel matrices estimation for MEG applications}, url = {https://arxiv.org/abs/2402.03345}, year = {2024} }
@article{Klopfenstein-etal:23, abstract = {For composite nonsmooth optimization problems, which are ``regular enough'', proximal gradient descent achieves model identification after a finite number of iterations. For instance, for the Lasso, this implies that the iterates of proximal gradient descent identify the non-zeros coefficients after a finite number of steps. The identification property has been shown for various optimization algorithms, such as accelerated gradient descent, Douglas--Rachford or variance-reduced algorithms, however, results concerning coordinate descent are scarcer. Identification properties often rely on the framework of ``partial smoothness'', which is a powerful but technical tool. In this work, we show that partial smooth functions have a simple characterization when the nonsmooth penalty is separable. In this simplified framework, we prove that cyclic coordinate descent achieves model identification in finite time, which leads to explicit local linear convergence rates for coordinate descent. Extensive experiments on various estimators and on real datasets demonstrate that these rates match well empirical results.}, author = {Klopfenstein, Quentin and Bertrand, Quentin and Gramfort, Alexandre and Salmon, Joseph and Vaiter, Samuel}, bdsk-url-1 = {https://doi.org/10.1007/s11590-023-01976-z}, date = {2024/01/01}, date-added = {2024-11-03 14:54:08 +0100}, date-modified = {2024-11-03 14:54:08 +0100}, doi = {10.1007/s11590-023-01976-z}, id = {Klopfenstein2024}, isbn = {1862-4480}, journal = {Optimization Letters}, number = {1}, pages = {135--154}, title = {Local linear convergence of proximal coordinate descent algorithm}, url = {https://doi.org/10.1007/s11590-023-01976-z}, volume = {18}, year = {2024} }
@article{10.1162/imag_a_00040, abstract = {{Neuroscience studies face challenges in gathering large datasets, which limits the use of machine learning (ML) approaches. One possible solution is to incorporate additional data from large public datasets; however, data collected in different contexts often exhibit systematic differences called dataset shifts. Various factors, for example, site, device type, experimental protocol, or social characteristics, can lead to substantial divergence of brain signals that can hinder the success of ML across datasets. In this work, we focus on dataset shifts in recordings of brain activity using MEG and EEG. State-of-the-art predictive approaches on magneto- and electroencephalography (M/EEG) signals classically represent the data by covariance matrices. Model-based dataset alignment methods can leverage the geometry of covariance matrices, leading to three steps: re-centering, re-scaling, and rotation correction. This work explains theoretically how differences in brain activity, anatomy, or device configuration lead to certain shifts in data covariances. Using controlled simulations, the different alignment methods are evaluated. Their practical relevance is evaluated for brain age prediction on one MEG dataset (Cam-CAN, n = 646) and two EEG datasets (TUAB, n = 1385; LEMON, n = 213). Among the same dataset (Cam-CAN), when training and test recordings were from the same subjects but performing different tasks, paired rotation correction was essential (δR2=+0.13 (rest-passive) or +0.17 (rest-smt)). When in addition to different tasks we included unseen subjects, re-centering led to improved performance (δR2=+0.096 for rest-passive, δR2=+0.045 for rest-smt). For generalization to an independent dataset sampled from a different population and recorded with a different device, re-centering was necessary to achieve brain age prediction performance close to within dataset prediction performance. This study demonstrates that the generalization of M/EEG-based regression models across datasets can be substantially enhanced by applying domain adaptation procedures that can statistically harmonize diverse datasets.}}, author = {Mellot, Apolline and Collas, Antoine and Rodrigues, Pedro L. C. and Engemann, Denis and Gramfort, Alexandre}, doi = {10.1162/imag_a_00040}, eprint = {https://direct.mit.edu/imag/article-pdf/doi/10.1162/imag\_a\_00040/2197837/imag\_a\_00040.pdf}, issn = {2837-6056}, journal = {Imaging Neuroscience}, month = {12}, pages = {1-23}, title = {{Harmonizing and aligning M/EEG datasets with covariance-based techniques to enhance predictive regression modeling}}, url = {https://doi.org/10.1162/imag\_a\_00040}, volume = {1}, year = {2023} }
@article{SABBAGH2023100145, author = {Sabbagh, David and Cartailler, Jérôme and Touchard, Cyril and Joachim, Jona and Mebazaa, Alexandre and Vallée, Fabrice and Gayat, Étienne and Gramfort, Alexandre and Engemann, Denis A.}, doi = {https://doi.org/10.1016/j.bjao.2023.100145}, issn = {2772-6096}, journal = {BJA Open}, keywords = {brain age, burst suppression, electroencephalogram (EEG), general anaesthesia, machine learning, propofol, sevoflurane}, pages = {100145}, title = {Repurposing electroencephalogram monitoring of general anaesthesia for building biomarkers of brain ageing: an exploratory study}, url = {https://www.sciencedirect.com/science/article/pii/S2772609623000242}, volume = {7}, year = {2023} }
@inproceedings{pmlr-v202-staerman23a, abstract = {Temporal point processes (TPP) are a natural tool for modeling event-based data. Among all TPP models, Hawkes processes have proven to be the most widely used, mainly due to their adequate modeling for various applications, particularly when considering exponential or non-parametric kernels. Although non-parametric kernels are an option, such models require large datasets. While exponential kernels are more data efficient and relevant for specific applications where events immediately trigger more events, they are ill-suited for applications where latencies need to be estimated, such as in neuroscience. This work aims to offer an efficient solution to TPP inference using general parametric kernels with finite support. The developed solution consists of a fast $\ell_2$ gradient-based solver leveraging a discretized version of the events. After theoretically supporting the use of discretization, the statistical and computational efficiency of the novel approach is demonstrated through various numerical experiments. Finally, the method’s effectiveness is evaluated by modeling the occurrence of stimuli-induced patterns from brain signals recorded with magnetoencephalography (MEG). Given the use of general parametric kernels, results show that the proposed approach leads to an improved estimation of pattern latency than the state-of-the-art.}, author = {Staerman, Guillaume and Allain, C\'{e}dric and Gramfort, Alexandre and Moreau, Thomas}, booktitle = {Proceedings of the 40th International Conference on Machine Learning}, editor = {Krause, Andreas and Brunskill, Emma and Cho, Kyunghyun and Engelhardt, Barbara and Sabato, Sivan and Scarlett, Jonathan}, month = {23--29 Jul}, pages = {32575--32597}, pdf = {https://proceedings.mlr.press/v202/staerman23a/staerman23a.pdf}, publisher = {PMLR}, series = {Proceedings of Machine Learning Research}, title = {{F}a{DI}n: Fast Discretized Inference for {H}awkes Processes with General Parametric Kernels}, url = {https://proceedings.mlr.press/v202/staerman23a.html}, volume = {202}, year = {2023} }
@article{caucheteux2023hierarchical, author = {Caucheteux, Charlotte and Gramfort, Alexandre and King, Jean-Remi}, doi = {10.1038/s41562-023-01534-8}, isbn = {2397-3374}, journal = {Nature Human Behaviour}, number = {3}, pages = {308--309}, title = {Hierarchical organization of language predictions in the brain}, url = {https://doi.org/10.1038/s41562-023-01534-8}, volume = {7}, year = {2023} }
@article{Caucheteux-etal:23, author = {Caucheteux, Charlotte and Gramfort, Alexandre and King, Jean-R{\'e}mi}, bdsk-url-1 = {https://doi.org/10.1038/s41562-022-01516-2}, date = {2023/03/01}, date-added = {2024-11-03 14:56:45 +0100}, date-modified = {2024-11-03 14:56:45 +0100}, doi = {10.1038/s41562-022-01516-2}, id = {Caucheteux2023}, isbn = {2397-3374}, journal = {Nature Human Behaviour}, number = {3}, pages = {430--441}, title = {Evidence of a predictive coding hierarchy in the human brain listening to speech}, url = {https://doi.org/10.1038/s41562-022-01516-2}, volume = {7}, year = {2023} }
@article{POWER2023119809, abstract = {Human neuromagnetic activity is characterised by a complex combination of transient bursts with varying spatial and temporal characteristics. The characteristics of these transient bursts change during task performance and normal ageing in ways that can inform about underlying cortical sources. Many methods have been proposed to detect transient bursts, with the most successful ones being those that employ multi-channel, data-driven approaches to minimize bias in the detection procedure. There has been little research, however, into the application of these data-driven methods to large datasets for group-level analyses. In the current work, we apply a data-driven convolutional dictionary learning (CDL) approach to detect neuromagnetic transient bursts in a large group of healthy participants from the Cam-CAN dataset. CDL was used to extract repeating spatiotemporal motifs in 538 participants between the ages of 18–88 during a sensorimotor task. Motifs were then clustered across participants based on similarity, and relevant task-related clusters were analysed for age-related trends in their spatiotemporal characteristics. Seven task-related motifs resembling known transient burst types were identified through this analysis, including beta, mu, and alpha type bursts. All burst types showed positive trends in their activation levels with age that could be explained by increasing burst rate with age. This work validated the data-driven CDL approach for transient burst detection on a large dataset and identified robust information about the complex characteristics of human brain signals and how they change with age.}, author = {Power, Lindsey and Allain, Cédric and Moreau, Thomas and Gramfort, Alexandre and Bardouille, Timothy}, doi = {https://doi.org/10.1016/j.neuroimage.2022.119809}, issn = {1053-8119}, journal = {NeuroImage}, keywords = {Transient bursts, Convolutional dictionary learning, MEG, Ageing, Clustering, Burst rate}, pages = {119809}, title = {Using convolutional dictionary learning to detect task-related neuromagnetic transients and ageing trends in a large open-access dataset}, url = {https://www.sciencedirect.com/science/article/pii/S1053811922009302}, volume = {267}, year = {2023} }
@misc{chehab2023optimizingnoiseselfsupervisedlearning, archiveprefix = {arXiv}, author = {Chehab, Omar and Gramfort, Alexandre and Hyvarinen, Aapo}, eprint = {2301.09696}, primaryclass = {stat.ML}, title = {Optimizing the Noise in Self-Supervised Learning: from Importance Sampling to Noise-Contrastive Estimation}, url = {https://arxiv.org/abs/2301.09696}, year = {2023} }
@inproceedings{NEURIPS2022_d81ecfc8, author = {Millet, Juliette and Caucheteux, Charlotte and Orhan, Pierre and Boubenec, Yves and Gramfort, Alexandre and Dunbar, Ewan and Pallier, Christophe and King, Jean-Remi}, booktitle = {Advances in Neural Information Processing Systems}, editor = {S. Koyejo and S. Mohamed and A. Agarwal and D. Belgrave and K. Cho and A. Oh}, pages = {33428--33443}, publisher = {Curran Associates, Inc.}, title = {Toward a realistic model of speech processing in the brain with self-supervised learning}, url = {https://proceedings.neurips.cc/paper_files/paper/2022/file/d81ecfc8fb18e833a3fa0a35d92532b8-Paper-Conference.pdf}, volume = {35}, year = {2022} }
@inproceedings{NEURIPS2022_e7d01932, author = {Rommel, C\'{e}dric and Moreau, Thomas and Gramfort, Alexandre}, booktitle = {Advances in Neural Information Processing Systems}, editor = {S. Koyejo and S. Mohamed and A. Agarwal and D. Belgrave and K. Cho and A. Oh}, pages = {35672--35683}, publisher = {Curran Associates, Inc.}, title = {Deep invariant networks with differentiable augmentation layers}, url = {https://proceedings.neurips.cc/paper_files/paper/2022/file/e7d019329e662fe4685be505befca3bb-Paper-Conference.pdf}, volume = {35}, year = {2022} }
@article{Rommel_2022, abstract = {Objective. The use of deep learning for electroencephalography (EEG) classification tasks has been rapidly growing in the last years, yet its application has been limited by the relatively small size of EEG datasets. Data augmentation, which consists in artificially increasing the size of the dataset during training, can be employed to alleviate this problem. While a few augmentation transformations for EEG data have been proposed in the literature, their positive impact on performance is often evaluated on a single dataset and compared to one or two competing augmentation methods. This work proposes to better validate the existing data augmentation approaches through a unified and exhaustive analysis. Approach. We compare quantitatively 13 different augmentations with two different predictive tasks, datasets and models, using three different types of experiments. Main results. We demonstrate that employing the adequate data augmentations can bring up to 45% accuracy improvements in low data regimes compared to the same model trained without any augmentation. Our experiments also show that there is no single best augmentation strategy, as the good augmentations differ on each task. Significance. Our results highlight the best data augmentations to consider for sleep stage classification and motor imagery brain–computer interfaces. More broadly, it demonstrates that EEG classification tasks benefit from adequate data augmentation.}, author = {Rommel, C\'{e}dric and Paillard, Joseph and Moreau, Thomas and Gramfort, Alexandre}, doi = {10.1088/1741-2552/aca220}, journal = {Journal of Neural Engineering}, month = {nov}, number = {6}, pages = {066020}, publisher = {IOP Publishing}, title = {Data augmentation for learning predictive models on EEG: a systematic comparison}, url = {https://dx.doi.org/10.1088/1741-2552/aca220}, volume = {19}, year = {2022} }
@misc{benchopt, author = {Moreau, Thomas and Massias, Mathurin and Gramfort, Alexandre and Ablin, Pierre and Charlier, Pierre-Antoine Bannier Benjamin and Dagréou, Mathieu and la Tour, Tom Dupré and Durif, Ghislain and Dantas, Cassio F. and Klopfenstein, Quentin and Larsson, Johan and Lai, En and Lefort, Tanguy and Malézieux, Benoit and Moufad, Badr and Nguyen, Binh T. and Rakotomamonjy, Alain and Ramzi, Zaccharie and Salmon, Joseph and Vaiter, Samuel}, copyright = {Creative Commons Attribution 4.0 International}, doi = {10.48550/ARXIV.2206.13424}, keywords = {Machine Learning (cs.LG), Optimization and Control (math.OC), Machine Learning (stat.ML), FOS: Computer and information sciences, FOS: Computer and information sciences, FOS: Mathematics, FOS: Mathematics}, publisher = {arXiv}, title = {Benchopt: Reproducible, efficient and collaborative optimization benchmarks}, url = {https://arxiv.org/abs/2206.13424}, year = {2022} }
@article{Sabbagh2022.05.05.22274610, abstract = {Background EEG is a common tool for monitoring anaesthetic depth but is rarely reused at large for biomedical research. This study sets out to explore repurposing of EEG during anaesthesia to learn biomarkers of brain ageing.Methods We focused on brain age estimation as an example. Using machine learning, we reanalysed 4-electrodes EEG of 323 patients under propofol and sevoflurane. We included spatio-spectral features from stable anaesthesia for EEG-based age prediction applying recently published reference methods. Anaesthesia was considered stable when 95\% of the total power was below a frequency between 8Hz and 13Hz.Results We considered moderate-risk patients (ASA \<= 2) with propofol anaesthesia to explore predictive EEG signatures. Average alpha-band power (8-13Hz) was informative about age. Yet, state-of-the-art prediction performance was achieved by analysing the entire power spectrum from all electrodes (MAE = 8.2y, R2 = 0.65). Clinical exploration revealed that brain age was systematically linked with intra-operative burst suppression {\textendash} commonly associated with age-related postoperative cognitive issues. Surprisingly, the brain age was negatively correlated with burst suppression in high-risk patients (ASA = 3), pointing at unknown confounding effects. Secondary analyses revealed that brain-age EEG signatures were specific to propofol anaesthesia, reflected by limited prediction performance under sevoflurane and poor cross-drug generalisation.Conclusions EEG from general anaesthesia may enable state-of-the-art brain age prediction. Yet, differences between anaesthetic drugs can impact the effectiveness of repurposing EEG from anaesthesia. To unleash the dormant potential of repurposing EEG-monitoring for clinical and health research, collecting larger datasets with precisely documented drug dosage will be key enabling factors.Competing Interest StatementD.E. is a full-time employee of F. Hoffmann-La Roche Ltd.Clinical TrialNCT03876379Funding StatementThis work was supported by a 2018 "medecine numerique" (for digital medicine) thesis grant issued by Inserm (French national institute of health and medical research) and Inria (French national research institute for the digital sciences). It was also partly supported by the European Research Council Starting Grant SLAB ERC-StG-676943.Author DeclarationsI confirm all relevant ethical guidelines have been followed, and any necessary IRB and/or ethics committee approvals have been obtained.YesThe details of the IRB/oversight body that provided approval or exemption for the research described are given below:The Ethics Advisory Committee (Chairperson Dr Jean Reignier, 48, avenue Claude Vellefaux, Paris, France) gave approval to this work on the 5 January 2016, under the reference CE SRLF 11-356. The SRLF is the French national academic society for anaesthesia and critical care consulted by the department of anaesthesiology at the Lariboisiere hospital (Paris, France).I confirm that all necessary patient/participant consent has been obtained and the appropriate institutional forms have been archived, and that any patient/participant/sample identifiers included were not known to anyone (e.g., hospital staff, patients or participants themselves) outside the research group so cannot be used to identify individuals.YesI understand that all clinical trials and any other prospective interventional studies must be registered with an ICMJE-approved registry, such as ClinicalTrials.gov. I confirm that any such study reported in the manuscript has been registered and the trial registration ID is provided (note: if posting a prospective study registered retrospectively, please provide a statement in the trial ID field explaining why the study was not registered in advance).YesI have followed all appropriate research reporting guidelines and uploaded the relevant EQUATOR Network research reporting checklist(s) and other pertinent material as supplementary files, if applicable.YesAll data produced in the present study are available upon reasonable request to the authors.}, author = {Sabbagh, David and Cartailler, J{\'e}r{\^o}me and Touchard, Cyril and Joachim, Jona and Mebazaa, Alexandre and Vall{\'e}e, Fabrice and Gayat, {\'E}tienne and Gramfort, Alexandre and Engemann, Denis A.}, doi = {10.1101/2022.05.05.22274610}, elocation-id = {2022.05.05.22274610}, eprint = {https://www.medrxiv.org/content/early/2022/05/07/2022.05.05.22274610.full.pdf}, journal = {medRxiv}, publisher = {Cold Spring Harbor Laboratory Press}, title = {Repurposing EEG monitoring of general anaesthesia for building biomarkers of brain ageing: An exploratory study}, url = {https://www.medrxiv.org/content/early/2022/05/07/2022.05.05.22274610}, year = {2022} }
@misc{janati-etal:2022, author = {Janati, Hicham and Cuturi, Marco and Gramfort, Alexandre}, copyright = {Creative Commons Attribution 4.0 International}, doi = {10.48550/ARXIV.2203.05813}, keywords = {Machine Learning (stat.ML), Machine Learning (cs.LG), FOS: Computer and information sciences, FOS: Computer and information sciences}, publisher = {arXiv}, title = {Averaging Spatio-temporal Signals using Optimal Transport and Soft Alignments}, url = {https://arxiv.org/abs/2203.05813}, year = {2022} }
@inproceedings{chehab-etal:22, author = {Chehab, Omar and Gramfort, Alexandre and Hyvarinen, Aapo}, booktitle = {The 38th Conference on Uncertainty in Artificial Intelligence}, copyright = {arXiv.org perpetual, non-exclusive license}, title = {The Optimal Noise in Noise-Contrastive Learning Is Not What You Think}, url = {https://openreview.net/forum?id=SEef8wIj5lc}, year = {2022} }
@article{banville-etal:2022, abstract = {Building machine learning models using EEG recorded outside of the laboratory setting requires methods robust to noisy data and randomly missing channels. This need is particularly great when working with sparse EEG montages (1–6 channels), often encountered in consumer-grade or mobile EEG devices. Neither classical machine learning models nor deep neural networks trained end-to-end on EEG are typically designed or tested for robustness to corruption, and especially to randomly missing channels. While some studies have proposed strategies for using data with missing channels, these approaches are not practical when sparse montages are used and computing power is limited (e.g., wearables, cell phones). To tackle this problem, we propose dynamic spatial filtering (DSF), a multi-head attention module that can be plugged in before the first layer of a neural network to handle missing EEG channels by learning to focus on good channels and to ignore bad ones. We tested DSF on public EEG data encompassing ∼4000 recordings with simulated channel corruption and on a private dataset of ∼100 at-home recordings of mobile EEG with natural corruption. Our proposed approach achieves the same performance as baseline models when no noise is applied, but outperforms baselines by as much as 29.4% accuracy when significant channel corruption is present. Moreover, DSF outputs are interpretable, making it possible to monitor the effective channel importance in real-time. This approach has the potential to enable the analysis of EEG in challenging settings where channel corruption hampers the reading of brain signals.}, author = {Banville, Hubert and Wood, Sean U.N. and Aimone, Chris and Engemann, Denis-Alexander and Gramfort, Alexandre}, doi = {https://doi.org/10.1016/j.neuroimage.2022.118994}, issn = {1053-8119}, journal = {NeuroImage}, keywords = {Electroencephalography, Mobile EEG, Deep learning, Machine learning, Noise robustness}, pages = {118994}, title = {Robust learning from corrupted {EEG} with dynamic spatial filtering}, url = {https://www.sciencedirect.com/science/article/pii/S1053811922001239}, volume = {251}, year = {2022} }
@article{Rockhill2022, author = {Rockhill, Alexander P. and Larson, Eric and Stedelin, Brittany and Mantovani, Alessandra and Raslan, Ahmed M. and Gramfort, Alexandre and Swann, Nicole C.}, doi = {10.21105/joss.03897}, journal = {Journal of Open Source Software}, number = {70}, pages = {3897}, publisher = {The Open Journal}, title = {Intracranial Electrode Location and Analysis in MNE-Python}, url = {https://doi.org/10.21105/joss.03897}, volume = {7}, year = {2022} }
@article{engemann-etal:2021, author = {Engemann, Denis A. and Mellot, Apolline and Höchenberger, Richard and Banville, Hubert and Sabbagh, David and Gemein, Lukas and Ball, Tonio and Gramfort, Alexandre}, doi = {https://doi.org/10.1016/j.neuroimage.2022.119521}, issn = {1053-8119}, journal = {NeuroImage}, keywords = {Clinical neuroscience, Brain age, Electroencephalography, Magnetoencephalography, Machine learning, Population modeling, Riemannian geometry, Random forests, Deep learning}, pages = {119521}, title = {A reusable benchmark of brain-age prediction from M/EEG resting-state signals}, url = {https://www.sciencedirect.com/science/article/pii/S105381192200636X}, volume = {262}, year = {2022} }
@inproceedings{richard-etal:21, author = {Richard, Hugo and Ablin, Pierre and Thirion, Bertrand and Gramfort, Alexandre and Hyv{\"a}rinen, Aapo}, booktitle = {Advances in Neural Information Processing Systems 34 (NeurIPS)}, month = {December}, pdf = {https://arxiv.org/pdf/2110.13502.pdf}, title = {{Shared Independent Component Analysis for Multi-Subject Neuroimaging}}, url = {https://arxiv.org/abs/2110.13502}, year = {2021} }
@misc{richard2021adaptive, archiveprefix = {arXiv}, author = {Richard, Hugo and Ablin, Pierre and Hyv{\"a}rinen, Aapo and Gramfort, Alexandre and Thirion, Bertrand}, eprint = {2102.10964}, pdf = {https://arxiv.org/pdf/2102.10964.pdf}, primaryclass = {stat.ML}, title = {{Adaptive Multi-View ICA: Estimation of noise levels for optimal inference}}, url = {https://arxiv.org/abs/2102.10964}, year = {2021} }
@misc{caucheteux2021modelbased, archiveprefix = {arXiv}, author = {Caucheteux, Charlotte and Gramfort, Alexandre and King, Jean-R{\'e}mi}, eprint = {2110.06078}, primaryclass = {q-bio.NC}, title = {Model-based analysis of brain activity reveals the hierarchy of language in 305 subjects}, url = {https://arxiv.org/abs/2110.06078}, year = {2021} }
@misc{bertrand2021implicit, archiveprefix = {arXiv}, author = {Bertrand, Quentin and Klopfenstein, Quentin and Massias, Mathurin and Blondel, Mathieu and Vaiter, Samuel and Gramfort, Alexandre and Salmon, Joseph}, eprint = {2105.01637}, primaryclass = {stat.ML}, title = {Implicit differentiation for fast hyperparameter selection in non-smooth convex learning}, url = {https://arxiv.org/abs/2105.01637}, year = {2021} }
@article{Banville_2021, author = {Banville, Hubert and Chehab, Omar and Hyv{\"a}rinen, Aapo and Engemann, Denis-Alexander and Gramfort, Alexandre}, doi = {10.1088/1741-2552/abca18}, journal = {Journal of Neural Engineering}, month = {mar}, number = {4}, pages = {046020}, publisher = {{IOP} Publishing}, title = {Uncovering the structure of clinical {EEG} signals with self-supervised learning}, url = {https://doi.org/10.1088/1741-2552/abca18}, volume = {18}, year = {2021} }
@misc{chehab2021deep, archiveprefix = {arXiv}, author = {Chehab, Omar and Defossez, Alexandre and Loiseau, Jean-Christophe and Gramfort, Alexandre and King, Jean-Remi}, eprint = {2103.02339}, primaryclass = {q-bio.NC}, title = {Deep Recurrent Encoder: A scalable end-to-end network to model brain signals}, url = {https://arxiv.org/abs/2103.02339}, year = {2021} }
@misc{Caucheteux2021.04.20.440622, abstract = {Language transformers, like GPT-2, have demonstrated remarkable abilities to process text, and now constitute the backbone of deep translation, summarization and dialogue algorithms. However, whether these models encode information that relates to human comprehension remains controversial. Here, we show that the representations of GPT-2 not only map onto the brain responses to spoken stories, but also predict the extent to which subjects understand the narratives. To this end, we analyze 101 subjects recorded with functional Magnetic Resonance Imaging while listening to 70 min of short stories. We then fit a linear model to predict brain activity from GPT-2 activations, and correlate this mapping with subjects{\textquoteright} comprehension scores as assessed for each story. The results show that GPT-2{\textquoteright}s brain predictions significantly correlate with semantic comprehension. These effects are bilaterally distributed in the language network and peak with a correlation above 30\% in the infero-frontal and medio-temporal gyri as well as in the superior frontal cortex, the planum temporale and the precuneus. Overall, this study shows how comparing deep language models and the brain paves the way to a computational model of semantic comprehension.Competing Interest StatementThe authors have declared no competing interest.}, author = {Caucheteux, Charlotte and Gramfort, Alexandre and King, Jean-R{\'e}mi}, doi = {10.1101/2021.04.20.440622}, elocation-id = {2021.04.20.440622}, eprint = {https://www.biorxiv.org/content/early/2021/06/10/2021.04.20.440622.full.pdf}, journal = {bioRxiv}, publisher = {Cold Spring Harbor Laboratory}, title = {GPT-2's activations predict the degree of semantic comprehension in the human brain}, url = {https://www.biorxiv.org/content/early/2021/06/10/2021.04.20.440622}, year = {2021} }
@article{ABLIN2021109144, author = {Ablin, Pierre and Cardoso, Jean-Fran{\c c}ois and Gramfort, Alexandre}, doi = {https://doi.org/10.1016/j.jneumeth.2021.109144}, issn = {0165-0270}, journal = {Journal of Neuroscience Methods}, keywords = {ICA, EEG, MEG, Source separation}, pages = {109144}, title = {{Spectral Independent Component Analysis with noise modeling for M/EEG source separation}}, url = {https://www.sciencedirect.com/science/article/pii/S0165027021000790}, volume = {356}, year = {2021} }
@article{flamary2021pot, author = {Flamary, R{\'e}mi and Courty, Nicolas and Gramfort, Alexandre and Alaya, Mokhtar Z. and Boisbunon, Aur{\'e}lie and Chambon, Stanislas and Chapel, Laetitia and Corenflos, Adrien and Fatras, Kilian and Fournier, Nemo and Gautheron, L{\'e}o and Gayraud, Nathalie T.H. and Janati, Hicham and Rakotomamonjy, Alain and Redko, Ievgen and Rolet, Antoine and Schutz, Antony and Seguy, Vivien and Sutherland, Danica J. and Tavenard, Romain and Tong, Alexander and Vayer, Titouan}, journal = {Journal of Machine Learning Research}, number = {78}, pages = {1-8}, title = {{POT: Python Optimal Transport}}, url = {http://jmlr.org/papers/v22/20-451.html}, volume = {22}, year = {2021} }
@article{perry2021mvlearn, author = {Perry, Ronan and Mischler, Gavin and Guo, Richard and Lee, Theodore and Chang, Alexander and Koul, Arman and Franz, Cameron and Richard, Hugo and Carmichael, Iain and Ablin, Pierre and Gramfort, Alexandre and Vogelstein, Joshua T.}, journal = {Journal of Machine Learning Research}, number = {109}, pages = {1-7}, title = {{mvlearn: Multiview Machine Learning in Python}}, url = {http://jmlr.org/papers/v22/20-1370.html}, volume = {22}, year = {2021} }
@inproceedings{rommel2022cadda, author = {Rommel, C{\'e}dric and Moreau, Thomas and Paillard, Joseph and Gramfort, Alexandre}, booktitle = {International Conference on Learning Representations}, title = {{CADDA: Class-wise Automatic Differentiable Data Augmentation for EEG Signals}}, url = {https://openreview.net/forum?id=6IYp-35L-xJ}, year = {2022} }
@inproceedings{Rodrigues2021, author = {Rodrigues, Pedro L. C. and Moreau, Thomas and Louppe, Gilles and Gramfort, Alexandre}, booktitle = {Advances in Neural Information Processing Systems 34 (NeurIPS)}, month = {December}, pdf = {https://arxiv.org/pdf/2102.06477.pdf}, title = {{HNPE}: Leveraging Global Parameters for Neural Posterior Estimation}, url = {https://arxiv.org/abs/2102.06477}, year = {2021} }
@inproceedings{jallais:hal-03156307, address = {Vancouver / Virtual, Canada}, author = {Jallais, Ma{\"e}liss and Rodrigues, Pedro L C and Gramfort, Alexandre and Wassermann, Demian}, booktitle = {{ISMRM 2021 - Annual Meeting of the International Society for Magnetic Resonance in Medicine}}, hal_id = {hal-03156307}, hal_version = {v1}, month = {May}, pdf = {https://hal.inria.fr/hal-03156307/file/ISMRM_abstract.pdf}, title = {{Diffusion MRI-Based Cytoarchitecture Measurements in Brain Gray Matter using Likelihood-Free Inference}}, url = {https://hal.inria.fr/hal-03156307}, year = {2021} }
@inproceedings{bannier:hal-03418092, address = {Sydney, Australia}, author = {Bannier, Pierre-Antoine and Bertrand, Quentin and Salmon, Joseph and Gramfort, Alexandre}, booktitle = {{Medical imaging meets NeurIPS 2021}}, hal_id = {hal-03418092}, hal_version = {v1}, month = {December}, pdf = {https://hal.archives-ouvertes.fr/hal-03418092/file/submission_camera_ready.pdf}, title = {{Electromagnetic neural source imaging under sparsity constraints with SURE-based hyperparameter tuning}}, url = {https://hal.archives-ouvertes.fr/hal-03418092}, year = {2021} }
@inproceedings{caucheteux-etal:21a, abstract = {The activations of language transformers like GPT-2 have been shown to linearly map onto brain activity during speech comprehension. However, the nature of these activations remains largely unknown and presumably conflate distinct linguistic classes. Here, we propose a taxonomy to factorize the high-dimensional activations of language models into four combinatorial classes: lexical, compositional, syntactic, and semantic representations. We then introduce a statistical method to decompose, through the lens of GPT-2’s activations, the brain activity of 345 subjects recorded with functional magnetic resonance imaging (fMRI) during the listening of 4.6 hours of narrated text. The results highlight two findings. First, compositional representations recruit a more widespread cortical network than lexical ones, and encompass the bilateral temporal, parietal and prefrontal cortices. Second, contrary to previous claims, syntax and semantics are not associated with separated modules, but, instead, appear to share a common and distributed neural substrate. Overall, this study introduces a versatile framework to isolate, in the brain activity, the distributed representations of linguistic constructs.}, author = {Caucheteux, Charlotte and Gramfort, Alexandre and King, Jean-Remi}, booktitle = {Proceedings of the 38th International Conference on Machine Learning}, editor = {Meila, Marina and Zhang, Tong}, month = {Jul}, pages = {1336--1348}, pdf = {http://proceedings.mlr.press/v139/caucheteux21a/caucheteux21a.pdf}, publisher = {PMLR}, series = {Proceedings of Machine Learning Research}, title = {Disentangling syntax and semantics in the brain with deep networks}, url = {https://proceedings.mlr.press/v139/caucheteux21a.html}, volume = {139}, year = {2021} }
@misc{jallais2021inverting, archiveprefix = {arXiv}, author = {Jallais, Ma{\"e}liss and Rodrigues, Pedro and Gramfort, Alexandre and Wassermann, Demian}, eprint = {2111.08693}, primaryclass = {q-bio.QM}, title = {Inverting brain grey matter models with likelihood-free inference: a tool for trustable cytoarchitecture measurements}, url = {https://arxiv.org/abs/2111.08693}, year = {2021} }
@inproceedings{Jallais2021a, address = {R{{\o}}nne, Denmark}, author = {Jallais, Ma{\"e}liss and Coelho Rodrigues, Pedro Luiz and Gramfort, Alexandre and Wassermann, Demian}, booktitle = {{IPMI 2021}}, hal_id = {hal-03090959}, hal_version = {v1}, keywords = {Brain Microstructure ; Diffusion MRI ; Likelihood-Free Inference}, month = {June}, pdf = {https://hal.inria.fr/hal-03090959/file/SBI_dMRI.pdf}, title = {{Cytoarchitecture Measurements in Brain Gray Matter using Likelihood-Free Inference}}, url = {https://hal.inria.fr/hal-03090959}, year = {2021} }
@misc{Rodrigues2020a, abstract = {There has been an increasing interest from the scientific community in using likelihood-free inference (LFI) to determine which parameters of a given simulator model could best describe a set of experimental data. Despite exciting recent results and a wide range of possible applications, an important bottleneck of LFI when applied to time series data is the necessity of defining a set of summary features, often hand-tailored based on domain knowledge. In this work, we present a data-driven strategy for automatically learning summary features from univariate time series and apply it to signals generated from autoregressive-moving-average (ARMA) models and the Van der Pol Oscillator. Our results indicate that learning summary features from data can compete and even outperform LFI methods based on hand-crafted values such as autocorrelation coefficients even in the linear case.}, archiveprefix = {arXiv}, author = {Rodrigues, Pedro L. C. and Gramfort, Alexandre}, eprint = {2012.02807}, journal = {arXiv 2012.02807}, keywords = {stat.ML, cs.LG, stat.AP}, month = {December}, note = {arXiv:2012.02807}, pdf = {https://arxiv.org/pdf/2012.02807v1.pdf}, primaryclass = {stat.ML}, title = {Learning summary features of time series for likelihood free inference}, year = {2020} }
@misc{klopfenstein2020, archiveprefix = {arXiv}, author = {Klopfenstein, Quentin and Bertrand, Quentin and Gramfort, Alexandre and Salmon, Joseph and Vaiter, Samuel}, eprint = {2010.11825}, primaryclass = {stat.ML}, title = {Model identification and local linear convergence of coordinate descent}, url = {https://arxiv.org/abs/2010.11825}, year = {2020} }
@inproceedings{richard-etal:20, archiveprefix = {arXiv}, author = {Richard, Hugo and Gresele, Luigi and Hyv{\"a}rinen, Aapo and Thirion, Bertrand and Gramfort, Alexandre and Ablin, Pierre}, booktitle = {Advances in Neural Information Processing Systems 33 (NeurIPS)}, eprint = {2006.06635}, title = {Modeling Shared Responses in Neuroimaging Studies through MultiView {ICA}}, url = {https://arxiv.org/abs/2006.06635}, year = {2020} }
@inproceedings{chevalier-etal:20, archiveprefix = {arXiv}, author = {Chevalier, Jerome-Alexis and Salmon, Joseph and Gramfort, Alexandre and Thirion, Bertrand}, booktitle = {Advances in Neural Information Processing Systems 33 (NeurIPS)}, eprint = {2009.14310}, title = {Statistical control for spatio-temporal {MEG/EEG} source imaging with desparsified mutli-task Lasso}, url = {https://arxiv.org/abs/2009.14310}, year = {2020} }
@inproceedings{janati-etal:20a, abstract = {Comparing data defined over space and time is notoriously hard. It involves quantifying both spatial and temporal variability while taking into account the chronological structure of the data. Dynamic Time Warping (DTW) computes a minimal cost alignment between time series that preserves the chronological order but is inherently blind to spatio-temporal shifts. In this paper, we propose Spatio-Temporal Alignments (STA), a new differentiable formulation of DTW that captures spatial and temporal variability. Spatial differences between time samples are captured using regularized Optimal transport. While temporal alignment cost exploits a smooth variant of DTW called soft-DTW. We show how smoothing DTW leads to alignment costs that increase quadratically with time shifts. The costs are expressed using an unbalanced Wasserstein distance to cope with observations that are not probabilities. Experiments on handwritten letters and brain imaging data confirm our theoretical findings and illustrate the effectiveness of STA as a dissimilarity for spatio-temporal data.}, address = {Online}, author = {Janati, Hicham and Cuturi, Marco and Gramfort, Alexandre}, booktitle = {AISTATS}, editor = {Silvia Chiappa and Roberto Calandra}, month = {26--28 Aug}, pages = {1695--1704}, pdf = {http://proceedings.mlr.press/v108/janati20a/janati20a.pdf}, publisher = {PMLR}, series = {Proceedings of Machine Learning Research}, title = {Spatio-temporal alignments: Optimal transport through space and time}, url = {http://proceedings.mlr.press/v108/janati20a.html}, volume = {108}, year = {2020} }
@inproceedings{janati-etal:20b, author = {Janati, Hicham and Cuturi, Marco and Gramfort, Alexandre}, booktitle = {Proc. ICML 2020}, month = {July}, pdf = {https://proceedings.icml.cc/static/paper_files/icml/2020/1584-Paper.pdf}, title = {Debiased Sinkhorn Barycenters}, year = {2020} }
@inproceedings{bertrand-etal:20, author = {Bertrand, Quentin and Klopfenstein, Quentin and Blondel, Mathieu and Vaiter, Samuel and Gramfort, Alexandre and Salmon, Joseph}, booktitle = {Proc. ICML 2020}, month = {July}, pdf = {https://arxiv.org/pdf/2002.08943.pdf}, title = {Implicit differentiation of Lasso-type models for hyperparameter optimization}, url = {https://arxiv.org/abs/2002.08943}, year = {2020} }
@inproceedings{massias2020, author = {Massias, Mathurin and Bertrand, Quentin and Gramfort, Alexandre and Salmon, Joseph}, booktitle = {International Conference on Artificial Intelligence and Statistics}, organization = {PMLR}, pages = {2655--2665}, pdf = {http://proceedings.mlr.press/v108/massias20a/massias20a.pdf}, title = {Support recovery and sup-norm convergence rates for sparse pivotal estimation}, year = {2020} }
@article{engemann-etal:20, abstract = {Electrophysiological methods, that is M/EEG, provide unique views into brain health. Yet, when building predictive models from brain data, it is often unclear how electrophysiology should be combined with other neuroimaging methods. Information can be redundant, useful common representations of multimodal data may not be obvious and multimodal data collection can be medically contraindicated, which reduces applicability. Here, we propose a multimodal model to robustly combine MEG, MRI and fMRI for prediction. We focus on age prediction as a surrogate biomarker in 674 subjects from the Cam-CAN dataset. Strikingly, MEG, fMRI and MRI showed additive effects supporting distinct brain-behavior associations. Moreover, the contribution of MEG was best explained by cortical power spectra between 8 and 30 Hz. Finally, we demonstrate that the model preserves benefits of stacking when some data is missing. The proposed framework, hence, enables multimodal learning for a wide range of biomarkers from diverse types of brain signals.}, author = {Engemann, Denis A and Kozynets, Oleh and Sabbagh, David and Lema{\^ i}tre, Guillaume and Varoquaux, Gael and Liem, Franziskus and Gramfort, Alexandre}, citation = {eLife 2020;9:e54055}, doi = {10.7554/eLife.54055}, issn = {2050-084X}, journal = {eLife}, keywords = {biomarker, aging, magnetic resonance imaging, magnetoencephalogrphy, oscillations, machine learning}, month = {may}, pages = {e54055}, pub_date = {2020-05-19}, publisher = {eLife Sciences Publications, Ltd}, title = {Combining magnetoencephalography with magnetic resonance imaging enhances learning of surrogate-biomarkers}, url = {https://doi.org/10.7554/eLife.54055}, volume = {9}, year = {2020} }
@article{sabbagh-etal:20, abstract = {Predicting biomedical outcomes from Magnetoencephalography and Electroencephalography (M/EEG) is central to applications like decoding, brain-computer-interfaces (BCI) or biomarker development and is facilitated by supervised machine learning. Yet, most of the literature is concerned with classification of outcomes defined at the event-level. Here, we focus on predicting continuous outcomes from M/EEG signal defined at the subject-level, and analyze about 600 MEG recordings from Cam-CAN dataset and about 1000 EEG recordings from TUH dataset. Considering different generative mechanisms for M/EEG signals and the biomedical outcome, we propose statistically-consistent predictive models that avoid source-reconstruction based on the covariance as representation. Our mathematical analysis and ground-truth simulations demonstrated that consistent function approximation can be obtained with supervised spatial filtering or by embedding with Riemannian geometry. Additional simulations revealed that Riemannian methods were more robust to model violations, in particular geometric distortions induced by individual anatomy. To estimate the relative contribution of brain dynamics and anatomy to prediction performance, we propose a novel model inspection procedure based on biophysical forward modeling. Applied to prediction of outcomes at the subject-level, the analysis revealed that the Riemannian model better exploited anatomical information while sensitivity to brain dynamics was similar across methods. We then probed the robustness of the models across different data cleaning options. Environmental denoising was globally important but Riemannian models were strikingly robust and continued performing well even without preprocessing. Our results suggest each method has its niche: supervised spatial filtering is practical for event-level prediction while the Riemannian model may enable simple end-to-end learning.}, author = {Sabbagh, David and Ablin, Pierre and Varoquaux, Ga{\"e}l and Gramfort, Alexandre and Engemann, Denis A.}, doi = {https://doi.org/10.1016/j.neuroimage.2020.116893}, issn = {1053-8119}, journal = {NeuroImage}, keywords = {MEG/EEG, Neuronal oscillations, Machine learning, Covariance, Spatial filters, Riemannian geometry}, pages = {116893}, title = {Predictive regression modeling with MEG/EEG: from source power to signals and cognitive states}, url = {http://www.sciencedirect.com/science/article/pii/S1053811920303797}, volume = {222}, year = {2020} }
@article{janati-etal:2020, abstract = {Magnetoencephalography and electroencephalography (M/EEG) are non-invasive modalities that measure the weak electromagnetic fields generated by neural activity. Estimating the location and magnitude of the current sources that generated these electromagnetic fields is an inverse problem. Although it can be cast as a linear regression, this problem is severely ill-posed as the number of observations, which equals the number of sensors, is small. When considering a group study, a common approach consists in carrying out the regression tasks independently for each subject using techniques such as MNE or sLORETA. An alternative is to jointly localize sources for all subjects taken together, while enforcing some similarity between them. By pooling S subjects in a single joint regression, the number of observations is S times larger, potentially making the problem better posed and offering the ability to identify more sources with greater precision. Here we show how the coupling of the different regression problems can be done through a multi-task regularization that promotes focal source estimates. To take into account intersubject variabilities, we propose the Minimum Wasserstein Estimates (MWE). Thanks to a new joint regression method based on optimal transport (OT) metrics, MWE does not enforce perfect overlap of activation foci for all subjects but rather promotes spatial proximity on the cortical mantle. Besides, by estimating the noise level of each subject, MWE copes with the subject-specific signal-to-noise ratios with only one regularization parameter. On realistic simulations, MWE decreases the localization error by up to 4 mm per source compared to individual solutions. Experiments on the Cam-CAN dataset show improvements in spatial specificity in population imaging compared to individual models such as dSPM as well as a state-of-the-art Bayesian group level model. Our analysis of a multimodal dataset shows how multi-subject source localization reduces the gap between MEG and fMRI for brain mapping.}, author = {Janati, Hicham and Bazeille, Thomas and Thirion, Bertrand and Cuturi, Marco and Gramfort, Alexandre}, doi = {https://doi.org/10.1016/j.neuroimage.2020.116847}, issn = {1053-8119}, journal = {NeuroImage}, keywords = {Brain, Inverse modeling, EEG / MEG source imaging}, pages = {116847}, pdf = {https://arxiv.org/pdf/1910.01914.pdf}, title = {Multi-subject MEG/EEG source imaging with sparse multi-task regression}, url = {http://www.sciencedirect.com/science/article/pii/S1053811920303347}, volume = {220}, year = {2020} }
@article{Moreau-etal:20, author = {Moreau, Thomas and Gramfort, Alexandre}, doi = {10.1109/TPAMI.2020.3039215}, journal = {IEEE Transactions on Pattern Analysis and Machine Intelligence}, number = {}, pages = {1-1}, title = {{DiCoDiLe: Distributed Convolutional Dictionary Learning}}, volume = {}, year = {2020} }
@article{jaiswal-etal:20, author = {Jaiswal, Amit and Nenonen, Jukka and Stenroos, Matti and Gramfort, Alexandre and Dalal, Sarang S. and Westner, Britta U. and Litvak, Vladimir and Mosher, John C. and Schoffelen, Jan-Mathijs and Witton, Caroline and Oostenveld, Robert and Parkkonen, Lauri}, doi = {https://doi.org/10.1016/j.neuroimage.2020.116797}, issn = {1053-8119}, journal = {NeuroImage}, keywords = {MEG, EEG, source modeling, beamformers, LCMV, open-source analysis toolbox}, pages = {116797}, title = {Comparison of beamformer implementations for MEG source localization}, url = {http://www.sciencedirect.com/science/article/pii/S1053811920302846}, year = {2020} }
@article{appelhoff_mne-bids:2019, author = {Appelhoff, Stefan and Sanderson, Matthew and Brooks, Teon and Vliet, Marijn van and Quentin, Romain and Holdgraf, Chris and Chaumon, Maximilien and Mikulan, Ezequiel and Tavabi, Kambiz and H{\"o}chenberger, Richard and Welke, Dominik and Brunner, Clemens and Rockhill, Alexander and Larson, Eric and Gramfort, Alexandre and Jas, Mainak}, comment = {[Code]}, doi = {10.21105/joss.01896}, issn = {2475-9066}, journal = {Journal of Open Source Software}, language = {en}, month = {December}, number = {44}, pages = {1896}, shorttitle = {{MNE}-{BIDS}}, title = {{MNE}-{BIDS}: {Organizing} electrophysiological data into the {BIDS} format and facilitating their analysis}, url = {https://joss.theoj.org/papers/10.21105/joss.01896}, urldate = {2019-12-19}, volume = {4}, year = {2019} }
@inproceedings{sabbagh-etal:2019, author = {Sabbagh, David and Ablin, Pierre and Varoquaux, Gael and Gramfort, Alexandre and Engemann, Denis A.}, booktitle = {Advances in Neural Information Processing Systems 32}, comment = {[Code]}, editor = {H. Wallach and H. Larochelle and A. Beygelzimer and F. d\textquotesingle Alch{\' e}-Buc and E. Fox and R. Garnett}, pages = {7321--7332}, publisher = {Curran Associates, Inc.}, title = {Manifold-regression to predict from MEG/EEG brain signals without source modeling}, url = {http://papers.nips.cc/paper/8952-manifold-regression-to-predict-from-megeeg-brain-signals-without-source-modeling.pdf}, year = {2019} }
@inproceedings{banville-etal:19, author = {Banville, Hubert and Albuquerque, Isabela and Hyv{\"a}rinen, Aapo and Moffat, Graeme and Engemann, Denis-Alexander and Gramfort, Alexandre}, journal = {Proc. Machine Learning for Signal Processing (MLSP)}, pdf = {https://arxiv.org/pdf/1911.05419.pdf}, publisher = {IEEE SigPort}, title = {Self-supervised representation learning from electroencephalography signals}, url = {https://arxiv.org/abs/1911.05419}, year = {2019} }
@inproceedings{ablin:hal-02140383, author = {Ablin, Pierre and Moreau, Thomas and Massias, Mathurin and Gramfort, Alexandre}, booktitle = {Advances in Neural Information Processing Systems 32}, editor = {H. Wallach and H. Larochelle and A. Beygelzimer and F. d\textquotesingle Alch{\' e}-Buc and E. Fox and R. Garnett}, pages = {13100--13110}, publisher = {Curran Associates, Inc.}, title = {Learning step sizes for unfolded sparse coding}, url = {http://papers.nips.cc/paper/9469-learning-step-sizes-for-unfolded-sparse-coding.pdf}, year = {2019} }
@article{massias:hal-02263500, author = {Massias, Mathurin and Vaiter, Samuel and Gramfort, Alexandre and Salmon, Joseph}, comment = {[Code]}, hal_id = {hal-02263500}, hal_version = {v1}, journal = {Journal of Machine Learning Research}, month = {August}, pdf = {https://hal.archives-ouvertes.fr/hal-02263500/file/main.pdf}, title = {{Dual Extrapolation for Sparse Generalized Linear Models}}, url = {https://hal.archives-ouvertes.fr/hal-02263500}, year = {2019} }
@inproceedings{bertrand-etal:19, author = {Bertrand, Quentin and Massias, Mathurin and Gramfort, Alexandre and Salmon, Joseph}, booktitle = {Advances in Neural Information Processing Systems 32}, editor = {H. Wallach and H. Larochelle and A. Beygelzimer and F. d\textquotesingle Alch{\' e}-Buc and E. Fox and R. Garnett}, pages = {3961--3972}, publisher = {Curran Associates, Inc.}, title = {Handling correlated and repeated measurements with the smoothed multivariate square-root Lasso}, url = {http://papers.nips.cc/paper/8651-handling-correlated-and-repeated-measurements-with-the-smoothed-multivariate-square-root-lasso.pdf}, year = {2019} }
@inproceedings{janati-etal:2019b, address = {Cham}, author = {Janati, H. and Bazeille, T. and Thirion, B. and Cuturi, M. and Gramfort, A.}, booktitle = {Information Processing in Medical Imaging}, editor = {Chung, Albert C. S. and Gee, James C. and Yushkevich, Paul A. and Bao, Siqi}, isbn = {978-3-030-20351-1}, pages = {743--754}, pdf = {https://arxiv.org/pdf/1902.04812.pdf}, publisher = {Springer International Publishing}, title = {Group Level MEG/EEG Source Imaging via Optimal Transport: Minimum Wasserstein Estimates}, year = {2019} }
@article{roy-etal:2019, abstract = {Context. Electroencephalography (EEG) is a complex signal and can require several years of training, as well as advanced signal processing and feature extraction methodologies to be correctly interpreted. Recently, deep learning (DL) has shown great promise in helping make sense of EEG signals due to its capacity to learn good feature representations from raw data. Whether DL truly presents advantages as compared to more traditional EEG processing approaches, however, remains an open question. Objective. In this work, we review 154 papers that apply DL to EEG, published between January 2010 and July 2018, and spanning different application domains such as epilepsy, sleep, brain–computer interfacing, and cognitive and affective monitoring. We extract trends and highlight interesting approaches from this large body of literature in order to inform future research and formulate recommendations. Methods. Major databases spanning the fields of science and engineering were queried to identify relevant studies published in scientific journals, conferences, and electronic preprint repositories. Various data items were extracted for each study pertaining to (1) the data, (2) the preprocessing methodology, (3) the DL design choices, (4) the results, and (5) the reproducibility of the experiments. These items were then analyzed one by one to uncover trends. Results. Our analysis reveals that the amount of EEG data used across studies varies from less than ten minutes to thousands of hours, while the number of samples seen during training by a network varies from a few dozens to several millions, depending on how epochs are extracted. Interestingly, we saw that more than half the studies used publicly available data and that there has also been a clear shift from intra-subject to inter-subject approaches over the last few years. About of the studies used convolutional neural networks (CNNs), while used recurrent neural networks (RNNs), most often with a total of 3–10 layers. Moreover, almost one-half of the studies trained their models on raw or preprocessed EEG time series. Finally, the median gain in accuracy of DL approaches over traditional baselines was across all relevant studies. More importantly, however, we noticed studies often suffer from poor reproducibility: a majority of papers would be hard or impossible to reproduce given the unavailability of their data and code. Significance. To help the community progress and share work more effectively, we provide a list of recommendations for future studies and emphasize the need for more reproducible research. We also make our summary table of DL and EEG papers available and invite authors of published work to contribute to it directly. A planned follow-up to this work will be an online public benchmarking portal listing reproducible results.}, author = {Roy, Yannick and Banville, Hubert and Albuquerque, Isabela and Gramfort, Alexandre and Falk, Tiago H and Faubert, Jocelyn}, doi = {10.1088/1741-2552/ab260c}, journal = {Journal of Neural Engineering}, month = {aug}, number = {5}, pages = {051001}, pdf = {https://arxiv.org/pdf/1901.05498.pdf}, publisher = {{IOP} Publishing}, title = {Deep learning-based electroencephalography analysis: a systematic review}, volume = {16}, year = {2019} }
@article{Kernbach12295, author = {Kernbach, Julius M. and Yeo, B. T. Thomas and Smallwood, Jonathan and Margulies, Daniel S. and Thiebaut de Schotten, Michel and Walter, Henrik and Sabuncu, Mert R. and Holmes, Avram J. and Gramfort, Alexandre and Varoquaux, Ga{\"e}l and Thirion, Bertrand and Bzdok, Danilo}, doi = {10.1073/pnas.1804876115}, journal = {Proceedings of the National Academy of Sciences (PNAS)}, number = {48}, pages = {12295--12300}, pdf = {https://www.pnas.org/content/115/48/12295.full.pdf}, publisher = {National Academy of Sciences}, title = {Subspecialization within default mode nodes characterized in 10,000 UK Biobank participants}, url = {https://www.pnas.org/content/115/48/12295}, volume = {115}, year = {2018} }
@inproceedings{ablin-etal:18c, author = {Ablin, Pierre A and Cardoso, Jean-Fran{\c c}ois and Gramfort, Alexandre}, booktitle = {European Symposium on Artificial Neural Networks, Computational Intelligence and Machine Learning (ESANN)}, comment = {[Code]}, month = {April}, pdf = {https://hal.archives-ouvertes.fr/hal-01936887/file/main.pdf}, title = {{Beyond Pham's algorithm for joint diagonalization}}, url = {https://hal.archives-ouvertes.fr/hal-01936887}, year = {2019} }
@article{chambon-etal:19, author = {Chambon, S. and Thorey, V. and Arnal, P.J. and Mignot, E. and Gramfort, A.}, comment = {[Code]}, doi = {https://doi.org/10.1016/j.jneumeth.2019.03.017}, issn = {0165-0270}, journal = {Journal of Neuroscience Methods}, keywords = {Deep learning, Machine learning, EEG, Event detection, Sleep}, pages = {64 - 78}, title = {{DOSED: A deep learning approach to detect multiple sleep micro-events in EEG signal}}, url = {http://www.sciencedirect.com/science/article/pii/S0165027019301013}, volume = {321}, year = {2019} }
@inproceedings{ablin-etal:18b, author = {Ablin, P. and Fagot, D. and Wendt, H. and Gramfort, A. and Fevotte, C.}, booktitle = {ICASSP 2019 - 2019 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)}, comment = {[Code]}, doi = {10.1109/ICASSP.2019.8683291}, issn = {2379-190X}, keywords = {Nonnegative matrix factorization (NMF);transform learning;source separation;non-convex optimization;manifolds;audio signal processing}, month = {May}, number = {}, pages = {700-704}, pdf = {https://arxiv.org/pdf/1811.02225.pdf}, title = {{A Quasi-Newton algorithm on the orthogonal manifold for NMF with transform learning}}, url = {https://arxiv.org/abs/1811.02225}, volume = {}, year = {2019} }
@inproceedings{chambon-etal:18b, author = {Chambon, S. and Thorey, V. and Arnal, P. J. and Mignot, E. and Gramfort, A.}, booktitle = {2018 IEEE 28th International Workshop on Machine Learning for Signal Processing (MLSP)}, comment = {[Code]}, doi = {10.1109/MLSP.2018.8517067}, issn = {1551-2541}, keywords = {Sleep;Electroencephalography;Prediction algorithms;Feature extraction;Tensile stress;Kernel;Event detection;Deep learning;EEG;event detection;sleep;EEG-patterns;time series}, month = {Sept}, number = {}, pages = {1-6}, pdf = {https://arxiv.org/pdf/1807.05981}, title = {A Deep Learning Architecture to Detect Events in EEG Signals During Sleep}, volume = {}, year = {2018} }
@article{Grabot-etal:19, abstract = {Precise timing makes the difference between harmony and cacophony, but how the brain achieves precision during timing is unknown. In this study, human participants (7 females, 5 males) generated a time interval while being recorded with magnetoencephalography. Building on the proposal that the coupling of neural oscillations provides a temporal code for information processing in the brain, we tested whether the strength of oscillatory coupling was sensitive to self-generated temporal precision. On a per individual basis, we show the presence of alpha{\textendash}beta phase{\textendash}amplitude coupling whose strength was associated with the temporal precision of self-generated time intervals, not with their absolute duration. Our results provide evidence that active oscillatory coupling engages α oscillations in maintaining the precision of an endogenous temporal motor goal encoded in β power; the when of self-timed actions. We propose that oscillatory coupling indexes the variance of neuronal computations, which translates into the precision of an individual{\textquoteright}s behavioral performance.SIGNIFICANCE STATEMENT Which neural mechanisms enable precise volitional timing in the brain is unknown, yet accurate and precise timing is essential in every realm of life. In this study, we build on the hypothesis that neural oscillations, and their coupling across time scales, are essential for the coding and for the transmission of information in the brain. We show the presence of alpha{\textendash}beta phase{\textendash}amplitude coupling (α{\textendash}β PAC) whose strength was associated with the temporal precision of self-generated time intervals, not with their absolute duration. α{\textendash}β PAC indexes the temporal precision with which information is represented in an individual{\textquoteright}s brain. Our results link large-scale neuronal variability on the one hand, and individuals{\textquoteright} timing precision, on the other.}, author = {Grabot, Laetitia and Kononowicz, Tadeusz W. and Dupr{\'e} la Tour, Tom and Gramfort, Alexandre and Doy{\`e}re, Val{\'e}rie and van Wassenhove, Virginie}, doi = {10.1523/JNEUROSCI.2473-18.2018}, eprint = {http://www.jneurosci.org/content/39/17/3277.full.pdf}, issn = {0270-6474}, journal = {Journal of Neuroscience}, number = {17}, pages = {3277--3291}, publisher = {Society for Neuroscience}, title = {The Strength of Alpha-Beta Oscillatory Coupling Predicts Motor Timing Precision}, url = {http://www.jneurosci.org/content/39/17/3277}, volume = {39}, year = {2019} }
@inproceedings{Ablin-etal:19a, address = {}, author = {Ablin, Pierre and Gramfort, Alexandre and Cardoso, Jean-Fran\c{c}ois and Bach, Francis}, booktitle = {AISTATS}, comment = {[Code]}, editor = {Chaudhuri, Kamalika and Sugiyama, Masashi}, month = {16--18 Apr}, pages = {1564--1573}, pdf = {http://proceedings.mlr.press/v89/ablin19a/ablin19a.pdf}, publisher = {PMLR}, series = {Proceedings of Machine Learning Research}, title = {Stochastic algorithms with descent guarantees for ICA}, url = {http://proceedings.mlr.press/v89/ablin19a.html}, volume = {89}, year = {2019} }
@inproceedings{dupre-etal:18, author = {Dupr{\' e} la Tour, Tom and Moreau, Thomas and Jas, Mainak and Gramfort, Alexandre}, booktitle = {Advances in Neural Information Processing Systems 31}, comment = {[Code]}, editor = {S. Bengio and H. Wallach and H. Larochelle and K. Grauman and N. Cesa-Bianchi and R. Garnett}, pages = {3292--3302}, publisher = {Curran Associates, Inc.}, title = {Multivariate Convolutional Sparse Coding for Electromagnetic Brain Signals}, url = {http://papers.nips.cc/paper/7590-multivariate-convolutional-sparse-coding-for-electromagnetic-brain-signals.pdf}, year = {2018} }
@inproceedings{janati-etal:19, address = {}, author = {Janati, Hicham and Cuturi, Marco and Gramfort, Alexandre}, booktitle = {AISTATS}, editor = {Chaudhuri, Kamalika and Sugiyama, Masashi}, month = {16--18 Apr}, pages = {1407--1416}, pdf = {http://proceedings.mlr.press/v89/janati19a/janati19a.pdf}, publisher = {PMLR}, series = {Proceedings of Machine Learning Research}, title = {Wasserstein regularization for sparse multi-task regression}, url = {http://proceedings.mlr.press/v89/janati19a.html}, volume = {89}, year = {2019} }
@article{jas-etal:18, author = {Jas, Mainak and Larson, Eric and Engemann, Denis A. and Lepp{\"a}kangas, Jaakko and Taulu, Samu and H{\"a}m{\"a}l{\"a}inen, Matti and Gramfort, Alexandre}, comment = {[Code]}, doi = {10.3389/fnins.2018.00530}, issn = {1662-453X}, journal = {Frontiers in Neuroscience}, pages = {530}, title = {A Reproducible {MEG/EEG} Group Study With the MNE Software: Recommendations, Quality Assessments, and Good Practices}, url = {https://www.frontiersin.org/article/10.3389/fnins.2018.00530}, volume = {12}, year = {2018} }
@inproceedings{Massias_Gramfort_Salmon18, author = {Massias, M. and Gramfort, A. and Salmon, J.}, booktitle = {Proceedings of the 35th International Conference on Machine Learning}, comment = {[Code]}, pages = {3321--3330}, pdf = {https://arxiv.org/pdf/1802.07481}, title = {Celer: a Fast Solver for the Lasso with Dual Extrapolation}, url = {https://arxiv.org/abs/1802.07481}, volume = {80}, year = {2018} }
@article{ablin-etal:2017, author = {Ablin, Pierre and Cardoso, Jean-Francois and Gramfort, Alexandre}, comment = {[Code]}, doi = {10.1109/TSP.2018.2844203}, issn = {1053-587X}, journal = {IEEE Transactions on Signal Processing}, keywords = {Approximation algorithms;Brain modeling;Data models;Electronic mail;Neuroscience;Signal processing algorithms;Tensile stress;Blind source separation;Independent Component Analysis;maximum likelihood estimation;preconditioning;quasi-Newton methods;second order methods}, month = {}, number = {15}, pages = {4040-4049}, pdf = {https://hal.inria.fr/hal-01552340/file/quasi-newton-methods%20%286%29.pdf}, title = {Faster independent component analysis by preconditioning with Hessian approximations}, volume = {66}, year = {2018} }
@article{bekhti-etal:17, author = {Bekhti, Yousra and Lucka, Felix and Salmon, Joseph and Gramfort, Alexandre}, comment = {[Code]}, journal = {Inverse Problems}, pdf = {https://arxiv.org/pdf/1710.08747}, title = {A hierarchical Bayesian perspective on majorization-minimization for non-convex sparse regression: application to M/EEG source imaging}, url = {http://iopscience.iop.org/article/10.1088/1361-6420/aac9b3/meta}, year = {2018} }
@inproceedings{chambon-etal:18, address = {Singapour, Singapore}, author = {Chambon, Stanislas and Galtier, Mathieu N. and Gramfort, Alexandre}, booktitle = {{Pattern Recognition in Neuroimaging}}, hal_id = {hal-01814190}, hal_version = {v1}, keywords = {Index Terms-EEG ; sleep stage classification ; domain adapta- tion ; neural network ; optimal transport}, month = {June}, pdf = {https://hal.archives-ouvertes.fr/hal-01814190/file/main.pdf}, title = {{Domain adaptation with optimal transport improves EEG sleep stage classifiers}}, url = {https://hal.archives-ouvertes.fr/hal-01814190}, year = {2018} }
@inproceedings{ablin-etal:2018b, address = {Cham}, author = {Ablin, Pierre and Cardoso, Jean-Fran{\c{c}}ois and Gramfort, Alexandre}, booktitle = {Latent Variable Analysis and Signal Separation (LVA-ICA)}, comment = {[Code]}, editor = {Deville, Yannick and Gannot, Sharon and Mason, Russell and Plumbley, Mark D. and Ward, Dominic}, isbn = {978-3-319-93764-9}, pages = {151--160}, pdf = {https://hal.inria.fr/hal-01822602/document}, publisher = {Springer International Publishing}, title = {Accelerating Likelihood Optimization for ICA on Real Signals}, year = {2018} }
@inproceedings{ablin-etal:2018a, address = {Calgary, Canada}, author = {Ablin, Pierre and Cardoso, Jean-Francois and Gramfort, Alexandre}, booktitle = {International Conference on Acoustics, Speech, and Signal Processing (ICASSP)}, comment = {[Code]}, month = {April}, pdf = {https://arxiv.org/pdf/1711.10873}, title = {{Faster ICA under orthogonal constraint}}, year = {2018} }
@inproceedings{duprelatour-etal:18, address = {Calgary, Canada}, author = {Dupr{\'e} la Tour, Tom and Grenier, Yves and Gramfort, Alexandre}, booktitle = {International Conference on Acoustics, Speech, and Signal Processing (ICASSP)}, hal_id = {hal-01696786}, hal_version = {v1}, keywords = {cross-frequency coupling ; non-linear autoregressive models ; spectrum estimation ; electrophysiology}, month = {April}, pdf = {https://hal.archives-ouvertes.fr/hal-01696786/file/duprelatour2018icassp.pdf}, title = {{Driver estimation in non-linear autoregressive models}}, url = {https://hal.archives-ouvertes.fr/hal-01696786}, year = {2018} }
@inproceedings{schiratti-etal:2018a, address = {Calgary, Canada}, author = {Schiratti, Jean-Baptiste and Le Douget, Jean-Eudes and Le Van Quyen, Michel and Essid, Slim and Gramfort, Alexandre}, booktitle = {International Conference on Acoustics, Speech, and Signal Processing (ICASSP)}, comment = {[Code]}, month = {April}, pdf = {https://hal.archives-ouvertes.fr/hal-01724272/document}, title = {{An ensemble learning approach to detect epileptic seizures from long intracranial EEG recordings}}, year = {2018} }
@article{duprelatour-etal:2017b, abstract = {Author summary Neural oscillations synchronize information across brain areas at various anatomical and temporal scales. Of particular relevance, slow fluctuations of brain activity have been shown to affect high frequency neural activity, by regulating the excitability level of neural populations. Such cross-frequency-coupling can take several forms. In the most frequently observed type, the power of high frequency activity is time-locked to a specific phase of slow frequency oscillations, yielding phase-amplitude-coupling (PAC). Even when readily observed in neural recordings, such non-linear coupling is particularly challenging to formally characterize. Typically, neuroscientists use band-pass filtering and Hilbert transforms with ad-hoc correlations. Here, we explicitly address current limitations and propose an alternative probabilistic signal modeling approach, for which statistical inference is fast and well-posed. To statistically model PAC, we propose to use non-linear auto-regressive models which estimate the spectral modulation of a signal conditionally to a driving signal. This conditional spectral analysis enables easy model selection and clear hypothesis-testing by using the likelihood of a given model. We demonstrate the advantage of the model-based approach on three datasets acquired in rats and in humans. We further provide novel neuroscientific insights on previously reported PAC phenomena, capturing two mechanisms in PAC: influence of amplitude and directionality estimation.}, author = {Dupr{\'e} la Tour, Tom and Tallot, Lucille and Grabot, Laetitia and Doyere, Valerie and van Wassenhove, Virginie and Grenier, Yves and Gramfort, Alexandre}, comment = {[Code]}, doi = {10.1371/journal.pcbi.1005893}, journal = {PLOS Computational Biology}, month = {12}, number = {12}, pages = {1-32}, pdf = {http://journals.plos.org/ploscompbiol/article/file?id=10.1371/journal.pcbi.1005893&type=printable}, publisher = {Public Library of Science}, title = {Non-linear auto-regressive models for cross-frequency coupling in neural time series}, url = {https://doi.org/10.1371/journal.pcbi.1005893}, volume = {13}, year = {2017} }
@inproceedings{schulz:hal-01633096, address = {Long Beach, United States}, author = {Schulz, Marc-Andre and Varoquaux, Ga{\"e}l and Gramfort, Alexandre and Thirion, Bertrand and Bzdok, Danilo}, booktitle = {{Neural Information Processing Systems, Machine Learning in Health Workshop}}, hal_id = {hal-01633096}, hal_version = {v1}, month = {December}, title = {{Label scarcity in biomedicine: Data-rich latent factor discovery enhances phenotype prediction}}, url = {https://hal.archives-ouvertes.fr/hal-01633096}, year = {2017} }
@article{ndiaye-etal:17b, author = {Ndiaye, Eugene and Fercoq, Olivier and Gramfort, Alexandre and Salmon, Joseph}, journal = {Journal of Machine Learning Research}, number = {128}, pages = {1-33}, pdf = {http://jmlr.org/papers/volume18/16-577/16-577.pdf}, title = {Gap Safe Screening Rules for Sparsity Enforcing Penalties}, url = {http://jmlr.org/papers/v18/16-577.html}, volume = {18}, year = {2017} }
@article{ndiaye-etal:17, abstract = {In high dimensional settings, sparse structures are crucial for efficiency, both in term of memory, computation and performance. It is customary to consider ℓ 1 penalty to enforce sparsity in such scenarios. Sparsity enforcing methods, the Lasso being a canonical example, are popular candidates to address high dimension. For efficiency, they rely on tuning a parameter trading data fitting versus sparsity. For the Lasso theory to hold this tuning parameter should be proportional to the noise level, yet the latter is often unknown in practice. A possible remedy is to jointly optimize over the regression parameter as well as over the noise level. This has been considered under several names in the literature: Scaled-Lasso, Square-root Lasso, Concomitant Lasso estimation for instance, and could be of interest for uncertainty quantification. In this work, after illustrating numerical difficulties for the Concomitant Lasso formulation, we propose a modification we coined Smoothed Concomitant Lasso, aimed at increasing numerical stability. We propose an efficient and accurate solver leading to a computational cost no more expensive than the one for the Lasso. We leverage on standard ingredients behind the success of fast Lasso solvers: a coordinate descent algorithm, combined with safe screening rules to achieve speed efficiency, by eliminating early irrelevant features.}, author = {Ndiaye, Eug{\`e}ne and Fercoq, Olivier and Gramfort, Alexandre and Lecl{\`e}re, Vincent and Salmon, Joseph}, journal = {Journal of Physics: Conference Series}, number = {1}, pages = {012006}, pdf = {https://arxiv.org/pdf/1606.02702}, title = {Efficient Smoothed Concomitant Lasso Estimation for High Dimensional Regression}, url = {http://stacks.iop.org/1742-6596/904/i=1/a=012006}, volume = {904}, year = {2017} }
@inproceedings{bekhti-etal:17a, author = {Bekhti, Yousra and Badeau, Roland and Gramfort, Alexandre}, booktitle = {2017 25th European Signal Processing Conference (EUSIPCO)}, doi = {10.23919/EUSIPCO.2017.8081206}, issn = {}, keywords = {Bayes methods;brain;convex programming;electroencephalography;inverse problems;iterative methods;maximum likelihood estimation;medical image processing;regression analysis;Bayesian inference;brain activations;brain imaging;faster algorithms;high-dimensional sparse synthesis models;hyperparameter estimation;inverse problem;nonconvex penalty;posteriori regression;recurrent problem;sparse regression models;Bayes methods;Brain modeling;Estimation;Europe;Inverse problems;Sensors;Signal processing}, month = {Aug}, number = {}, pages = {246-250}, pdf = {https://hal.archives-ouvertes.fr/hal-01531238/document}, title = {Hyperparameter estimation in maximum a posteriori regression using group sparsity with an application to brain imaging}, volume = {}, year = {2017} }
@article{NisoGalan-etal:18, author = {Niso, Guiomar and Gorgolewski, Krzysztof J. and Bock, Elizabeth and Brooks, Teon L. and Flandin, Guillaume and Gramfort, Alexandre and Henson, Richard N. and Jas, Mainak and Litvak, Vladimir and T. Moreau, Jeremy and Oostenveld, Robert and Schoffelen, Jan-Mathijs and Tadel, Francois and Wexler, Joseph and Baillet, Sylvain}, day = {19}, journal = {Scientific Data}, month = {06}, pdf = {https://www.biorxiv.org/content/early/2017/08/08/172684.full.pdf}, title = {MEG-BIDS, the brain imaging data structure extended to magnetoencephalography}, url = {http://dx.doi.org/10.1038/sdata.2018.110}, volume = {5}, year = {2018} }
@article{le-etal:2018, author = {Le, Laetitia Minh Ma{\" i} and K{\'e}gl, Bal{\'a}zs and Gramfort, Alexandre and Marini, Camille and Nguyen, David and Cherti, Mehdi and Tfaili, Sana and Tfayli, Ali and Baillet-Guffroy, Arlette and Prognon, Patrice and Chaminade, Pierre and Caudron, Eric}, doi = {https://doi.org/10.1016/j.talanta.2018.02.109}, issn = {0039-9140}, journal = {Talanta}, keywords = {Machine learning, Chemometrics, Raman spectroscopy, Monoclonal antibody, Classification analysis, Regression analysis}, pages = {260 - 265}, title = {Optimization of classification and regression analysis of four monoclonal antibodies from Raman spectra using collaborative machine learning approach}, url = {http://www.sciencedirect.com/science/article/pii/S0039914018302273}, volume = {184}, year = {2018} }
@article{jas-etal:17b, author = {Jas, Mainak and Engemann, Denis A. and Bekhti, Yousra and Raimondo, Federico and Gramfort, Alexandre}, comment = {[Code]}, doi = {https://doi.org/10.1016/j.neuroimage.2017.06.030}, issn = {1053-8119}, journal = {NeuroImage}, pages = {417 - 429}, pdf = {https://arxiv.org/pdf/1612.08194.pdf}, title = {Autoreject: Automated artifact rejection for MEG and EEG data}, url = {http://www.sciencedirect.com/science/article/pii/S1053811917305013}, volume = {159}, year = {2017} }
@article{chambon-etal:17, author = {Chambon, Stanislas and Galtier, Mathieu and Arnal, Pierrick and Wainrib, Gilles and Gramfort, Alexandre}, doi = {10.1109/TNSRE.2018.2813138}, issn = {1534-4320}, journal = {IEEE Transactions on Neural Systems and Rehabilitation Engineering}, month = {April}, number = {4}, pages = {758-769}, pdf = {https://arxiv.org/pdf/1707.03321}, title = {A Deep Learning Architecture for Temporal Sleep Stage Classification Using Multivariate and Multimodal Time Series}, volume = {26}, year = {2018} }
@inproceedings{massias-etal:2017, author = {Massias, Mathurin and Fercoq, Olivier and Gramfort, Alexandre and Salmon, Joseph}, booktitle = {AISTATS}, pages = {998--1007}, pdf = {http://proceedings.mlr.press/v84/massias18a/massias18a.pdf}, series = {Proceedings of Machine Learning Research}, title = {Generalized Concomitant Multi-Task Lasso for Sparse Multimodal Regression}, volume = {84}, year = {2018} }
@inproceedings{jas-etal:2017, author = {Jas, Mainak and Dupr{\' e} la Tour, Tom and Simsekli, Umut and Gramfort, Alexandre}, booktitle = {Advances in Neural Information Processing Systems (NIPS) 30}, comment = {[Code]}, editor = {I. Guyon and U. V. Luxburg and S. Bengio and H. Wallach and R. Fergus and S. Vishwanathan and R. Garnett}, pages = {1099--1108}, pdf = {http://papers.nips.cc/paper/6710-learning-the-morphology-of-brain-signals-using-alpha-stable-convolutional-sparse-coding.pdf}, publisher = {Curran Associates, Inc.}, title = {Learning the Morphology of Brain Signals Using Alpha-Stable Convolutional Sparse Coding}, year = {2017} }
@inproceedings{thomas-etal:2017, abstract = {Extreme regions in the feature space are of particular concern for anomaly detection: anomalies are likely to be located in the tails, whereas data scarcity in such regions makes it difficult to distinguish between large normal instances and anomalies. This paper presents an unsupervised algorithm for anomaly detection in extreme regions. We propose a Minimum Volume set (MV-set) approach relying on multivariate extreme value theory. This framework includes a canonical pre-processing step, which addresses the issue of output sensitivity to standardization choices. The resulting data representation on the sphere highlights the dependence structure of the extremal observations. Anomaly detection is then cast as a MV-set estimation problem on the sphere, where volume is measured by the spherical measure and mass refers to the angular measure. An anomaly then corresponds to an unusual observation given that one of its variables is large. A preliminary rate bound analysis is carried out for the learning method we introduce and its computational advantages are discussed and illustrated by numerical experiments.}, address = {Fort Lauderdale, FL, USA}, author = {Thomas, Albert and Clemencon, St{\'e}phan and Gramfort, Alexandre and Sabourin, Anne}, booktitle = {AISTATS}, editor = {Aarti Singh and Jerry Zhu}, month = {20--22 Apr}, pages = {1011--1019}, pdf = {http://proceedings.mlr.press/v54/thomas17a/thomas17a.pdf}, publisher = {PMLR}, series = {Proceedings of Machine Learning Research}, title = {{Anomaly Detection in Extreme Regions via Empirical MV-sets on the Sphere}}, url = {http://proceedings.mlr.press/v54/thomas17a.html}, volume = {54}, year = {2017} }
@article{pedregosa-etal:17, author = {Pedregosa, Fabian and Bach, Francis and Gramfort, Alexandre}, journal = {Journal of Machine Learning Research}, pdf = {http://jmlr.org/papers/volume18/15-495/15-495.pdf}, publisher = {MIT Press}, title = {On the Consistency of Ordinal Regression Methods}, year = {2017} }
@inproceedings{dupre-etal:2017, address = {New Orleans, USA}, author = {Dupre la Tour, Tom and Grenier, Yves and Gramfort, Alexandre}, booktitle = {International Conference on Acoustics, Speech, and Signal Processing (ICASSP)}, month = {February}, pdf = {https://hal.archives-ouvertes.fr/hal-01448603/document}, title = {{Parametric estimation of spectrum driven by an exogenous signal}}, url = {https://hal.archives-ouvertes.fr/hal-01448603/}, year = {2017} }
@inproceedings{montoya-etal:2017, address = {Grenoble, France}, author = {Montoya-Martinez, Jair and Cardoso, Jean-Fran{\c c}ois and Gramfort, Alexandre}, booktitle = {International Conference on Latent Variable Analysis, Independent Component Analysis LVA-ICA}, month = {February}, pdf = {https://hal.archives-ouvertes.fr/hal-01451432/document}, title = {{Caveats with stochastic gradient and maximum likelihood based ICA for EEG}}, url = {https://hal.archives-ouvertes.fr/hal-01451432/}, year = {2017} }
@inproceedings{ndiaye-etal:16b, author = {Ndiaye, Eug{\`e}ne and Fercoq, Olivier and Gramfort, Alexandre and Salmon, J.}, booktitle = {Proc. NIPS 2016}, pdf = {http://arxiv.org/pdf/1602.06225v1.pdf}, title = {{GAP} Safe Screening Rules for {Sparse-Group Lasso}}, year = {2016} }
@article{ndiaye-etal:16a, abstract = {In high dimensional settings, sparse structures are crucial for efficiency, both in term of memory, computation and performance. It is customary to consider ℓ 1 penalty to enforce sparsity in such scenarios. Sparsity enforcing methods, the Lasso being a canonical example, are popular candidates to address high dimension. For efficiency, they rely on tuning a parameter trading data fitting versus sparsity. For the Lasso theory to hold this tuning parameter should be proportional to the noise level, yet the latter is often unknown in practice. A possible remedy is to jointly optimize over the regression parameter as well as over the noise level. This has been considered under several names in the literature: Scaled-Lasso, Square-root Lasso, Concomitant Lasso estimation for instance, and could be of interest for uncertainty quantification. In this work, after illustrating numerical difficulties for the Concomitant Lasso formulation, we propose a modification we coined Smoothed Concomitant Lasso, aimed at increasing numerical stability. We propose an efficient and accurate solver leading to a computational cost no more expensive than the one for the Lasso. We leverage on standard ingredients behind the success of fast Lasso solvers: a coordinate descent algorithm, combined with safe screening rules to achieve speed efficiency, by eliminating early irrelevant features.}, author = {Ndiaye, Eug{\`e}ne and Fercoq, Olivier and Gramfort, Alexandre and Lecl{\`e}re, Vincent and Salmon, J.}, journal = {Journal of Physics: Conference Series}, number = {1}, pages = {012006}, pdf = {https://arxiv.org/pdf/1606.02702v1.pdf}, title = {Efficient Smoothed Concomitant Lasso Estimation for High Dimensional Regression}, url = {http://stacks.iop.org/1742-6596/904/i=1/a=012006}, volume = {904}, year = {2017} }
@inproceedings{jas-etal:16, address = {Trento, Italy}, author = {Jas, Mainak and Engemann, Denis and Raimondo, Federico and Bekhti, Yousra and Gramfort, Alexandre}, booktitle = {{6th International Workshop on Pattern Recognition in Neuroimaging (PRNI)}}, comment = {[Code]}, hal_id = {hal-01313458}, hal_version = {v1}, keywords = {magnetoencephalography ; electroencephalography ; preprocessing ; artifact rejection ; automation ; machine learning}, month = {June}, pdf = {https://hal.archives-ouvertes.fr/hal-01313458/file/automated-rejection-repair.pdf}, title = {{Automated rejection and repair of bad trials in MEG/EEG}}, url = {https://hal.archives-ouvertes.fr/hal-01313458}, year = {2016} }
@inproceedings{bekhti-etal:16, address = {Trento, Italy}, author = {Bekhti, Yousra and Strohmeier, Daniel and Jas, Mainak and Badeau, Roland and Gramfort, Alexandre}, booktitle = {{6th International Workshop on Pattern Recognition in Neuroimaging (PRNI)}}, doi = {10.1109/PRNI.2016.7552337}, hal_id = {hal-01313567}, hal_version = {v2}, keywords = { Inverse problem ; MEEG ; iterative reweighted optimization algorithm ; multi-scale dictionary ; Gabor transform.}, month = {June}, pdf = {https://hal.archives-ouvertes.fr/hal-01313567/file/PRNI16_multiscale.pdf}, title = {{M/EEG source localization with multi-scale time-frequency dictionaries}}, url = {https://hal.archives-ouvertes.fr/hal-01313567}, year = {2016} }
@article{eickenberg-etal:16, abstract = {Abstract Convolutional networks used for computer vision represent candidate models for the computations performed in mammalian visual systems. We use them as a detailed model of human brain activity during the viewing of natural images by constructing predictive models based on their different layers and \{BOLD\} fMRI activations. Analyzing the predictive performance across layers yields characteristic fingerprints for each visual brain region: early visual areas are better described by lower level convolutional net layers and later visual areas by higher level net layers, exhibiting a progression across ventral and dorsal streams. Our predictive model generalizes beyond brain responses to natural images. We illustrate this on two experiments, namely retinotopy and face-place oppositions, by synthesizing brain activity and performing classical brain mapping upon it. The synthesis recovers the activations observed in the corresponding fMRI studies, showing that this deep encoding model captures representations of brain function that are universal across experimental paradigms. }, author = {Eickenberg, Michael and Gramfort, Alexandre and Varoquaux, Gael and Thirion, Bertrand}, doi = {http://dx.doi.org/10.1016/j.neuroimage.2016.10.001}, issn = {1053-8119}, journal = {NeuroImage}, pages = {184 - 194}, pdf = {https://hal.inria.fr/hal-01389809/file/neuroimage.pdf}, title = {Seeing it all: Convolutional network layers map the function of the human visual system }, url = {http://www.sciencedirect.com/science/article/pii/S1053811916305481}, volume = {152}, year = {2017} }
@article{strohmeier-etal:16, author = {Strohmeier, Daniel and Bekhti, Yousra and Haueisen, Jens and Gramfort, Alexandre}, doi = {10.1109/TMI.2016.2553445}, issn = {0278-0062}, journal = {IEEE Transactions on Medical Imaging}, month = {Oct}, number = {10}, pages = {2218-2228}, pdf = {https://arxiv.org/pdf/1607.08458.pdf}, title = {The Iterative Reweighted Mixed-Norm Estimate for Spatio-Temporal MEG/EEG Source Reconstruction}, url = {http://ieeexplore.ieee.org/document/7452415/}, volume = {35}, year = {2016} }
@unpublished{laby:hal-01167391, author = {Laby, Romain and Gramfort, Alexandre and Roueff, Fran{\c c}ois and Enderli, Cyrille and Larroque, Alain}, hal_id = {hal-01167391}, hal_version = {v1}, month = {June}, pdf = {https://hal-institut-mines-telecom.archives-ouvertes.fr/hal-01167391/file/version_hal.pdf}, title = {{Sparse pairwise Markov model learning for anomaly detection in heterogeneous data}}, url = {https://hal-institut-mines-telecom.archives-ouvertes.fr/hal-01167391}, year = {2015} }
@inproceedings{ndiaye-etal:15, author = {Ndiaye, Eug{\`e}ne and Fercoq, Olivier and Gramfort, Alexandre and Salmon, Joseph}, booktitle = {Proc. NIPS 2015}, month = {Dec}, title = {GAP Safe screening rules for sparse multi-task and multi-class models}, url = {http://arxiv.org/abs/1506.03736}, year = {2015} }
@inproceedings{thomas-etal:2015, author = {Thomas, Albert and Feuillard, Vincent and Gramfort, Alexandre}, booktitle = {Proc. IEEE DSAA 2015}, month = {Oct}, pdf = {https://arxiv.org/pdf/1508.07535}, title = {{Calibration of One-Class SVM for MV set estimation}}, year = {2015} }
@inproceedings{lemagoarou:hal-01156478, address = {Nice, France}, author = {Le Magoarou, Luc and Gribonval, R{\'e}mi and Gramfort, Alexandre}, booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)}, hal_id = {hal-01156478}, hal_version = {v1}, keywords = {Inverse problems ; Brain source localization ; Fast algorithms ; Deconvolution ; Matrix factorization}, month = {August}, pdf = {https://hal.archives-ouvertes.fr/hal-01156478/file/EUSIPCO_current.pdf}, title = {{FA$\mu$ST: speeding up linear transforms for tractable inverse problems}}, url = {https://hal.archives-ouvertes.fr/hal-01156478}, year = {2015} }
@inproceedings{kowalski-etal:15, author = {Kowalski, Matthieu and Gramfort, Alexandre}, booktitle = {2015 23rd European Signal Processing Conference (EUSIPCO)}, doi = {10.1109/EUSIPCO.2015.7362682}, month = {Aug}, pages = {1741-1745}, pdf = {https://hal.archives-ouvertes.fr/hal-01199635/file/KG_eusipco.pdf}, title = {Inverse problems with time-frequency dictionaries and non-white Gaussian noise}, year = {2015} }
@article{pedregosa-etal:15, abstract = {Abstract Despite the common usage of a canonical, data-independent, hemodynamic response function (HRF), it is known that the shape of the \{HRF\} varies across brain regions and subjects. This suggests that a data-driven estimation of this function could lead to more statistical power when modeling \{BOLD\} fMRI data. However, unconstrained estimation of the \{HRF\} can yield highly unstable results when the number of free parameters is large. We develop a method for the joint estimation of activation and \{HRF\} by means of a rank constraint, forcing the estimated \{HRF\} to be equal across events or experimental conditions, yet permitting it to differ across voxels. Model estimation leads to an optimization problem that we propose to solve with an efficient quasi-Newton method, exploiting fast gradient computations. This model, called \{GLM\} with Rank-1 constraint (R1-GLM), can be extended to the setting of \{GLM\} with separate designs which has been shown to improve decoding accuracy in brain activity decoding experiments. We compare 10 different \{HRF\} modeling methods in terms of encoding and decoding scores on two different datasets. Our results show that the R1-GLM model outperforms competing methods in both encoding and decoding settings, positioning it as an attractive method both from the points of view of accuracy and computational efficiency. }, author = {Pedregosa, Fabian and Eickenberg, Michael and Ciuciu, Philippe and Thirion, Bertrand and Gramfort, Alexandre}, comment = {[Code]}, doi = {http://dx.doi.org/10.1016/j.neuroimage.2014.09.060}, issn = {1053-8119}, journal = {NeuroImage }, keywords = {Functional MRI (fMRI),, Hemodynamic response function (HRF), Machine learning, Optimization, BOLD,Finite impulse response (FIR), Decoding, Encoding}, note = {}, number = {0}, pages = {209 - 220}, title = {Data-driven HRF estimation for encoding and decoding models}, url = {http://www.sciencedirect.com/science/article/pii/S1053811914008027}, volume = {104}, year = {2015} }
@inproceedings{fercoq-etal:15, author = {Fercoq, Olivier and Gramfort, Alexandre and Salmon, Joseph}, booktitle = {Proc. ICML 2015}, month = {July}, title = {Mind the duality gap: safer rules for the Lasso}, url = {http://arxiv.org/abs/1505.03410}, year = {2015} }
@inproceedings{gramfort-etal:15, author = {Gramfort, Alexandre and Peyr{\'e}, Gabriel and Cuturi, Marco}, booktitle = {Proc. IPMI 2015}, month = {July}, title = {Fast Optimal Transport Averaging of Neuroimaging Data}, url = {http://arxiv.org/abs/1503.08596}, year = {2015} }
@article{engemann-etal:15, abstract = {Abstract Magnetoencephalography and electroencephalography (M/EEG) measure non-invasively the weak electromagnetic fields induced by post-synaptic neural currents. The estimation of the spatial covariance of the signals recorded on M/EEG sensors is a building block of modern data analysis pipelines. Such covariance estimates are used in brain–computer interfaces (BCI) systems, in nearly all source localization methods for spatial whitening as well as for data covariance estimation in beamformers. The rationale for such models is that the signals can be modeled by a zero mean Gaussian distribution. While maximizing the Gaussian likelihood seems natural, it leads to a covariance estimate known as empirical covariance (EC). It turns out that the \{EC\} is a poor estimate of the true covariance when the number of samples is small. To address this issue the estimation needs to be regularized. The most common approach downweights off-diagonal coefficients, while more advanced regularization methods are based on shrinkage techniques or generative models with low rank assumptions: probabilistic \{PCA\} (PPCA) and factor analysis (FA). Using cross-validation all of these models can be tuned and compared based on Gaussian likelihood computed on unseen data. We investigated these models on simulations, one electroencephalography (EEG) dataset as well as magnetoencephalography (MEG) datasets from the most common \{MEG\} systems. First, our results demonstrate that different models can be the best, depending on the number of samples, heterogeneity of sensor types and noise properties. Second, we show that the models tuned by cross-validation are superior to models with hand-selected regularization. Hence, we propose an automated solution to the often overlooked problem of covariance estimation of M/EEG signals. The relevance of the procedure is demonstrated here for spatial whitening and source localization of \{MEG\} signals. }, author = {Engemann, D. A. and Gramfort, A.}, doi = {http://dx.doi.org/10.1016/j.neuroimage.2014.12.040}, issn = {1053-8119}, journal = {NeuroImage }, keywords = {Electroencephalography (EEG), Magnetoencephalography (MEG), Neuroimaging, Covariance estimation, Model selection, Statistical learning}, note = {}, number = {0}, pages = {328 - 342}, title = {Automated model selection in covariance estimation and spatial whitening of MEG and EEG signals}, url = {http://www.sciencedirect.com/science/article/pii/S1053811914010325}, volume = {108}, year = {2015} }
@article{moussallam-etal:14, author = {Moussallam, M. and Gramfort, A and Daudet, L. and Richard, G.}, doi = {10.1109/LSP.2014.2334231}, issn = {1070-9908}, journal = {Signal Processing Letters, IEEE}, keywords = {Approximation methods;Dictionaries;Noise;Noise level;Noise reduction;Sensors;Signal processing algorithms;Please add index terms}, month = {Nov}, number = {11}, pages = {1341-1345}, title = {Blind Denoising with Random Greedy Pursuits}, url = {http://arxiv.org/pdf/1312.5444.pdf}, volume = {21}, year = {2014} }
@inproceedings{strohmeier-etal:14, abstract = {{MEG/EEG source imaging allows for the non-invasive analysis of brain activity with high temporal and good spatial resolution. As the bioelectromagnetic inverse problem is ill-posed, a priori information is required to find a unique source estimate. For the analysis of evoked brain activity, spatial sparsity of the neuronal activation can be assumed. Due to the convexity, l1-norm based constraints are often used for this, which however lead to source estimates biased in amplitude and often suboptimal in terms of source selection. As an alternative, non-convex regularization functionals such as lp-quasinorms with 0 < p < 1 can be used. In this work, we present a MEG/EEG inverse solver based on a l2,0.5-quasinorm penalty promoting spatial sparsity as well as temporal stationarity of the brain activity. For solving the resulting non-convex optimization problem, we propose the iterative reweighted Mixed Norm Estimate, which is based on reweighted convex optimization and combines a block coordinate descent scheme and an active set strategy to solve each surrogate problem efficiently. We provide empirical evidence based on simulations and analysis of MEG data that the proposed method outperforms the standard Mixed Norm Estimate in terms of active source identification and amplitude bias}}, address = {Tubingen, Allemagne}, affiliation = {Technische Universit{\"a}t Ilmenau , D{\'e}partement Traitement du Signal et des Images - TSI , Laboratoire Traitement et Communication de l'Information [Paris] - LTCI}, audience = {internationale }, author = {Strohmeier, Daniel and Haueisen, Jens and Gramfort, Alexandre}, booktitle = {{Pattern Recognition in Neuroimaging, 2014 International Workshop on}}, doi = {10.1109/PRNI.2014.6858545}, keywords = {MEG; EEG; bioelectromagnetic inverse problem; structured sparsity; iterative reweighted optimization algorithm}, language = {Anglais}, month = {July}, pages = {1-4}, pdf = {http://hal.archives-ouvertes.fr/hal-01044748/PDF/strohmeier\_prni2014.pdf}, publisher = {IEEE}, title = {{Improved MEG/EEG source localization with reweighted mixed-norms}}, url = {http://hal.archives-ouvertes.fr/hal-01044748}, year = {2014} }
@inproceedings{bekhti-etal:14, abstract = {{Magnetoencephalography (MEG) can map brain activity by recording the electromagnetic fields generated by the electrical currents in the brain during a perceptual or cognitive task. This technique offers a very high temporal resolution that allows noninvasive brain exploration at a millisecond (ms) time scale. Decoding, a.k.a. brain reading, consists in predicting from neuroimaging data the subject's behavior and/or the parameters of the perceived stimuli. This is facilitated by the use of supervised learning techniques. In this work we consider the problem of decoding a target variable with ordered values. This target reflects the use of a parametric experimental design in which a parameter of the stimulus is continuously modulated during the experiment. The decoding step is performed by a Ridge regression. The evaluation metric, given the ordinal nature of the target is performed by a ranking metric. On a visual paradigm consisting of random dot kinematograms with 7 coherence levels recorded on 36 subjects we show that one can predict the perceptual thresholds of the subjects from the MEG data. Results are obtained in sensor space and for source estimates in relevant regions of interests (MT, pSTS, mSTS, VLPFC).}}, author = {Bekhti, Yousra and Zilber, Nicolas and Pedregosa, Fabian and Ciuciu, Philippe and van Wassenhove, Virginie and Gramfort, Alexandre}, booktitle = {Pattern Recognition in Neuroimaging, 2014 International Workshop on}, doi = {10.1109/PRNI.2014.6858510}, keywords = {functional brain imaging, statistical learning, ordinal regression, magnetoencephalography}, month = {June}, pages = {1-4}, pdf = {http://hal.archives-ouvertes.fr/hal-01032909/PDF/PID3197363.pdf}, title = {{Decoding perceptual thresholds from MEG/EEG}}, url = {http://hal.archives-ouvertes.fr/hal-01032909}, year = {2014} }
@inproceedings{dohmatob:hal-00991743, abstract = {{Learning predictive models from brain imaging data, as in decoding cognitive states from fMRI (functional Magnetic Resonance Imaging), is typically an ill-posed problem as it entails estimating many more parameters than available sample points. This estimation problem thus requires regularization. Total variation regularization, combined with sparse models, has been shown to yield good predictive performance, as well as stable and interpretable maps. However, the corresponding optimization problem is very challenging: it is non-smooth, non-separable and heavily ill-conditioned. For the penalty to fully exercise its structuring effect on the maps, this optimization problem must be solved to a good tolerance resulting in a computational challenge. Here we explore a wide variety of solvers and exhibit their convergence properties on fMRI data. We introduce a variant of smooth solvers and show that it is a promising approach in these settings. Our findings show that care must be taken in solving TV-l1 estimation in brain imaging and highlight the successful strategies.}}, author = {Dohmatob, Elvis Dopgima and Gramfort, Alexandre and Thirion, Bertrand and Varoquaux, Gael}, booktitle = {Pattern Recognition in Neuroimaging, 2014 International Workshop on}, doi = {10.1109/PRNI.2014.6858516}, keywords = {Brain;Convergence;Imaging;Logistics;Optimization;Predictive models;TV;Total Variation;classification;fMRI;non-smooth convex optimization;regression;sparse models}, month = {June}, pages = {1-4}, pdf = {http://hal.inria.fr/hal-00991743/PDF/PRNI2014\_TVl1.pdf}, title = {{Benchmarking solvers for TV-l1 least-squares and logistic regression in brain imaging}}, url = {http://hal.inria.fr/hal-00991743}, year = {2014} }
@article{sitt-etal:14, abstract = {In recent years, numerous electrophysiological signatures of consciousness have been proposed. Here, we perform a systematic analysis of these electroencephalography markers by quantifying their efficiency in differentiating patients in a vegetative state from those in a minimally conscious or conscious state. Capitalizing on a review of previous experiments and current theories, we identify a series of measures that can be organized into four dimensions: (i) event-related potentials versus ongoing electroencephalography activity; (ii) local dynamics versus inter-electrode information exchange; (iii) spectral patterns versus information complexity; and (iv) average versus fluctuations over the recording session. We analysed a large set of 181 high-density electroencephalography recordings acquired in a 30 minutes protocol. We show that low-frequency power, electroencephalography complexity, and information exchange constitute the most reliable signatures of the conscious state. When combined, these measures synergize to allow an automatic classification of patients’ state of consciousness.}, author = {Sitt, Jacobo Diego and King, Jean-Remi and El Karoui, Imen and Rohaut, Benjamin and Faugeras, Frederic and Gramfort, Alexandre and Cohen, Laurent and Sigman, Mariano and Dehaene, Stanislas and Naccache, Lionel}, doi = {10.1093/brain/awu141}, eprint = {http://brain.oxfordjournals.org/content/early/2014/06/16/brain.awu141.full.pdf+html}, journal = {Brain}, title = {Large scale screening of neural signatures of consciousness in patients in a vegetative or minimally conscious state}, url = {http://brain.oxfordjournals.org/content/early/2014/06/16/brain.awu141.abstract}, year = {2014} }
@article{Kosem2014, author = {K{\"o}sem, Anne and Gramfort, Alexandre and van Wassenhove, Virginie}, doi = {http://dx.doi.org/10.1016/j.neuroimage.2014.02.010}, issn = {1053-8119}, journal = {NeuroImage}, keywords = {MEG, Oscillatory entrainment, Temporal order, Simultaneity, Internal clock}, number = {0}, pages = {274 - 284}, title = {Encoding of event timing in the phase of neural oscillations}, url = {http://www.sciencedirect.com/science/article/pii/S1053811914001013}, volume = {92}, year = {2014} }
@article{abraham-etal:14, abstract = {Statistical machine learning methods are increasingly used for neuroimaging data analysis.Their main virtue is their ability to model high-dimensional datasets, e.g. multivariate analysisof activation images or resting-state time series. Supervised learning is typically used indecoding or encoding settings to relate brain images to behavioral or clinical observations,while unsupervised learning can uncover hidden structures in sets of images (e.g. resting statefunctional MRI) or find sub-populations in large cohorts. By considering different functionalneuroimaging applications, we illustrate how scikit-learn, a Python machine learning library, canbe used to perform some key analysis steps. Scikit-learn contains a very large set of statisticallearning algorithms, both supervised and unsupervised, and its application to neuroimaging dataprovides a versatile tool to study the brain.}, author = {Abraham, Alexandre and Pedregosa, Fabian and Eickenberg, Michael and Gervais, Philippe and Mueller, Andreas and Kossaifi, Jean and Gramfort, Alexandre and Thirion, Bertrand and Varoquaux, Gael}, doi = {10.3389/fninf.2014.00014}, issn = {1662-5196}, journal = {Frontiers in Neuroinformatics}, number = {14}, title = {Machine Learning for Neuroimaging with Scikit-Learn}, url = {http://www.frontiersin.org/neuroinformatics/10.3389/fninf.2014.00014/abstract}, volume = {8}, year = {2014} }
@article{king-etal:14, abstract = {The brain response to auditory novelty comprises two main EEG components: an early mismatch negativity and a late P300. Whereas the former has been proposed to reflect a prediction error, the latter is often associated with working memory updating. Interestingly, these two proposals predict fundamentally different dynamics: prediction errors are thought to propagate serially through several distinct brain areas, while working memory supposes that activity is sustained over time within a stable set of brain areas. Here we test this temporal dissociation by showing how the generalization of brain activity patterns across time can characterize the dynamics of the underlying neural processes. This method is applied to magnetoencephalography (MEG) recordings acquired from healthy participants who were presented with two types of auditory novelty. Following our predictions, the results show that the mismatch evoked by a local novelty leads to the sequential recruitment of distinct and short-lived patterns of brain activity. In sharp contrast, the global novelty evoked by an unexpected sequence of five sounds elicits a sustained state of brain activity that lasts for several hundreds of milliseconds. The present results highlight how MEG combined with multivariate pattern analyses can characterize the dynamics of human cortical processes.
}, author = {King, Jean-R{\'e}mi and Gramfort, Alexandre and Schurger, Aaron and Naccache, Lionel and Dehaene, Stanislas}, doi = {10.1371/journal.pone.0085791}, journal = {PLoS ONE}, month = {01}, number = {1}, pages = {e85791}, publisher = {Public Library of Science}, title = {Two Distinct Dynamic Modes Subtend the Detection of Unexpected Sounds}, url = {http://dx.doi.org/10.1371%2Fjournal.pone.0085791}, volume = {9}, year = {2014} }
@article{gramfort-etal:2014b, author = {Gramfort, Alexandre and Luessi, Martin and Larson, Eric and Engemann, Denis A. and Strohmeier, Daniel and Brodbeck, Christian and Parkkonen, Lauri and H{\"a}m{\"a}l{\"a}inen, Matti S.}, doi = {http://dx.doi.org/10.1016/j.neuroimage.2013.10.027}, issn = {1053-8119}, journal = {NeuroImage}, note = {}, number = {0}, pages = {446 - 460}, title = {MNE software for processing MEG and EEG data}, url = {http://www.sciencedirect.com/science/article/pii/S1053811913010501}, volume = {86}, year = {2014} }
@article{gramfort-etal:2014a, author = {Gramfort, Alexandre and Poupon, Cyril and Descoteaux, Maxime}, doi = {http://dx.doi.org/10.1016/j.media.2013.08.006}, issn = {1361-8415}, journal = {Medical Image Analysis }, keywords = {Diffusion-weighted imaging, Diffusion Spectrum Imaging (DSI), Sparse coding, Denoising, Undersampling}, note = {}, number = {1}, pages = {36 - 49}, pdf = {http://hal.inria.fr/docs/00/86/73/72/PDF/mia_paper.pdf}, title = {Denoising and fast diffusion imaging with physically constrained sparse dictionary learning }, url = {http://www.sciencedirect.com/science/article/pii/S1361841513001205}, volume = {18}, year = {2014} }
@article{gramfort-etal:2013c, abstract = {Magnetoencephalography and electroencephalography (M/EEG) measure the weak electromagnetic signals generated by neuronal activity in the brain. Using these signals to characterize and locate neural activation in the brain is achallenge that requires expertise in physics, signal processing, statistics, and numerical methods. As part of the MNE softwaresuite, MNE-Python is an open-source software package that addresses this challenge by providing state-of-the-art algorithms implemented in Python that cover multiple methods of data preprocessing, source localization, statistical analysis, and estimation of functional connectivity between distributed brain regions.All algorithms and utility functions are implemented in a consistent manner with well-documented interfaces, enabling users to create M/EEG data analysis pipelines by writing Python scripts.Moreover, MNE-Python is tightly integrated with the core Python libraries for scientific comptutation (Numpy, Scipy) and visualization (matplotlib and Mayavi), as well as the greater neuroimaging ecosystem in Python via the Nibabel package. The code is provided under the new BSD license allowing code reuse, even in commercial products. Although MNE-Python has only been under heavy development for a couple of years, it has rapidly evolved with expanded analysis capabilities and pedagogical tutorials because multiple labs have collaborated during code development to help share best practices.MNE-Python also gives easy access to preprocessed datasets, helping users to get started quickly and facilitating reproducibility of methods by other researchers. Full documentation, including dozens of examples, is available at http://martinos.org/mne.}, author = {Gramfort, Alexandre and Luessi, Martin and Larson, Eric and Engemann, Denis A and Strohmeier, Daniel and Brodbeck, Christian and Goj, Roman and Jas, Mainak and Brooks, Teon and Parkkonen, Lauri and H{\"a}m{\"a}l{\"a}inen, Matti}, doi = {10.3389/fnins.2013.00267}, issn = {1662-453X}, journal = {Frontiers in Neuroscience}, number = {267}, title = {MEG and EEG data analysis with MNE-Python}, url = {http://www.frontiersin.org/brain_imaging_methods/10.3389/fnins.2013.00267/abstract}, volume = {7}, year = {2013} }
@inproceedings{buitinck-etal:2013, abstract = {{Scikit-learn is an increasingly popular machine learning li- brary. Written in Python, it is designed to be simple and efficient, accessible to non-experts, and reusable in various contexts. In this paper, we present and discuss our design choices for the application programming interface (API) of the project. In particular, we describe the simple and elegant interface shared by all learning and processing units in the library and then discuss its advantages in terms of composition and reusability. The paper also comments on implementation details specific to the Python ecosystem and analyzes obstacles faced by users and developers of the library.}}, address = {Prague, Tch{\`e}que, R{\'e}publique}, author = {Buitinck, Lars and Louppe, Gilles and Blondel, Mathieu and Pedregosa, Fabian and Mueller, Andreas and Grisel, Olivier and Niculae, Vlad and Prettenhofer, Peter and Gramfort, Alexandre and Grobler, Jaques and Layton, Robert and Vanderplas, Jake and Joly, Arnaud and Holt, Brian and Varoquaux, Ga{\"e}l}, booktitle = {{European Conference on Machine Learning and Principles and Practices of Knowledge Discovery in Databases}}, keywords = {Machine learning, language design, software engineering, Python}, month = {July}, pdf = {http://hal.inria.fr/hal-00856511/PDF/paper.pdf}, title = {{API design for machine learning software: experiences from the scikit-learn project}}, url = {http://hal.inria.fr/hal-00856511}, year = {2013} }
@article{lau-etal:2013, author = {Lau, E. F. and Gramfort, A. and H{\"a}m{\"a}l{\"a}inen, M. S. and Kuperberg, G.R.}, doi = {10.1523/JNEUROSCI.1018-13.2013}, journal = {Journal of Neuroscience}, month = {Oct}, number = {43}, pages = {17174-17181}, title = {Automatic Semantic Facilitation in Anterior Temporal Cortex Revealed through Multimodal Neuroimaging}, url = {http://www.ncbi.nlm.nih.gov/pubmed/24155321}, volume = {33}, year = {2013} }
@article{king-etal:2013, author = {King, J.R. and Faugeras, F. and Gramfort, A. and Schurger, A. and El Karoui, I. and Sitt, J. and Rohaut, B. and Wacongne, C. and Labyt, E. and Bekinschtein, T. and Cohen, L. and Naccache, L. and Dehaene, S.}, doi = {http://dx.doi.org/10.1016/j.neuroimage.2013.07.013}, issn = {1053-8119}, journal = {NeuroImage}, note = {}, number = {0}, title = {Single-trial decoding of auditory novelty responses facilitates the detection of residual consciousness }, url = {http://www.sciencedirect.com/science/article/pii/S1053811913007684}, volume = {}, year = {2013} }
@inproceedings{pedregosa-etal:2013, abstract = {{Extracting activation patterns from functional Magnetic Resonance Images (fMRI) datasets remains challenging in rapid-event designs due to the inherent delay of blood oxygen level-dependent (BOLD) signal. The general linear model (GLM) allows to estimate the activation from a design matrix and a fixed hemodynamic response function (HRF). However, the HRF is known to vary substantially between subjects and brain regions. In this paper, we propose a model for jointly estimating the hemodynamic response function (HRF) and the activation patterns via a low-rank representation of task effects.This model is based on the linearity assumption behind the GLM and can be computed using standard gradient-based solvers. We use the activation patterns computed by our model as input data for encoding and decoding studies and report performance improvement in both settings.}}, address = {Philadelphia, {\'E}tats-Unis}, author = {Pedregosa, Fabian and Eickenberg, Michael and Thirion, Bertrand and Gramfort, Alexandre}, booktitle = {{3nd International Workshop on Pattern Recognition in NeuroImaging}}, keywords = {fMRI; hemodynamic; HRF; GLM; BOLD; encoding; decoding}, month = {May}, pdf = {http://hal.inria.fr/hal-00821946/PDF/paper.pdf}, title = {{HRF estimation improves sensitivity of fMRI encoding and decoding models}}, url = {http://hal.inria.fr/hal-00821946}, year = {2013} }
@inproceedings{eickenberg-etal:2013, abstract = {{Second layer scattering descriptors are known to provide good classification performance on natural quasi-stationary processes such as visual textures due to their sensitivity to higher order moments and continuity with respect to small deformations. In a functional Magnetic Resonance Imaging (fMRI) experiment we present visual textures to subjects and evaluate the predictive power of these descriptors with respect to the predictive power of simple contour energy - the first scattering layer. We are able to conclude not only that invariant second layer scattering coefficients better encode voxel activity, but also that well predicted voxels need not necessarily lie in known retinotopic regions.}}, address = {Philadelphia, {\'E}tats-Unis}, affiliation = {PARIETAL - INRIA Saclay - Ile de France , Laboratoire de Neuroimagerie Assist{\'e}e par Ordinateur - LNAO , SIERRA - INRIA Paris - Rocquencourt , Laboratoire Traitement et Communication de l'Information [Paris] - LTCI , IFR49 - Neurospin - CEA}, author = {Eickenberg, Michael and Pedregosa, Fabian and Mehdi, Senoussi and Gramfort, Alexandre and Thirion, Bertrand}, booktitle = {{3nd International Workshop on Pattern Recognition in NeuroImaging}}, month = {June}, pdf = {http://hal.inria.fr/hal-00834928/PDF/prni\_2013.pdf}, title = {{Second order scattering descriptors predict fMRI activity due to visual textures}}, url = {http://hal.inria.fr/hal-00834928}, year = {2013} }
@inproceedings{gramfort-etal:2013a, abstract = {{Decoding, i.e. predicting stimulus related quantities from functional brain images, is a powerful tool to demonstrate differences between brain activity across conditions. However, unlike standard brain mapping, it offers no guaranties on the localization of this information. Here, we consider decoding as a statistical estimation problem and show that injecting a spatial segmentation prior leads to unmatched performance in recovering predictive regions. Specifically, we use L1 penalization to set voxels to zero and Total-Variation (TV) penalization to segment regions. Our contribution is two-fold. On the one hand, we show via extensive experiments that, amongst a large selection of decoding and brain-mapping strategies, TV+L1 leads to best region recovery. On the other hand, we consider implementation issues related to this estimator. To tackle efficiently this joint prediction-segmentation problem we introduce a fast optimization algorithm based on a primal-dual approach. We also tackle automatic setting of hyper-parameters and fast computation of image operation on the irregular masks that arise in brain imaging.}}, address = {Philadelphia, {\'E}tats-Unis}, author = {Gramfort, Alexandre and Thirion, Bertrand and Varoquaux, Ga{\"e}l}, booktitle = {{Pattern Recognition in Neuroimaging (PRNI)}}, keywords = {fMRI; supervised learning; total-variation; sparse; decoding; primal-dual optimization; support recovery}, month = {June}, pdf = {http://hal.inria.fr/hal-00839984/PDF/paper.pdf}, publisher = {IEEE}, title = {{Identifying predictive regions from fMRI with TV-L1 prior}}, url = {http://hal.inria.fr/hal-00839984}, year = {2013} }
@inproceedings{damon-etal:13, address = {Vancouver, Canada}, author = {Damon, C. and Liutkus, A. and Gramfort, A. and Essid, S.}, booktitle = {International Conference on Acoustics, Speech, and Signal Processing (ICASSP)}, keywords = {denoising, Electroencephalography (EEG), NMF}, title = {Non-negative matrix factorization for single-channel {EEG} artifact rejection}, url = {http://biblio.telecom-paristech.fr/cgi-bin/download.cgi?id=13264}, year = {2013} }
@inproceedings{hitziger-etal:2013, abstract = {{Dictionary Learning has proven to be a powerful tool for many image processing tasks, where atoms are typically defined on small image patches. As a drawback, the dictionary only encodes basic structures. In addition, this approach treats patches of different locations in one single set, which means a loss of information when features are well-aligned across signals. This is the case, for instance, in multi-trial magneto- or electroencephalography (M/EEG). Learning the dictionary on the entire signals could make use of the alignement and reveal higher-level features. In this case, however, small missalignements or phase variations of features would not be compensated for. In this paper, we propose an extension to the common dictionary learning framework to overcome these limitations by allowing atoms to adapt their position across signals. The method is validated on simulated and real neuroelectric data.}}, address = {Scottsdale, AZ, {\'E}tats-Unis}, author = {Hitziger, Sebastian and Clerc, Maureen and Gramfort, Alexandre and Saillet, Sandrine and B{\'e}nar, Christian and Papadopoulo, Th{\'e}odore}, booktitle = {{ICLR - 1st International Conference on Learning Representations - 2013}}, month = {January}, organization = {Yoshua Bengio, Yann Lecun}, title = {{Jitter-Adaptive Dictionary Learning - Application to Multi-Trial Neuroelectric Signals}}, url = {http://hal.inria.fr/hal-00837987}, year = {2013} }
@inproceedings{zaremba-etal:2013, abstract = {{Magneto- and electroencephalography (M/EEG) measure the electromagnetic signals produced by brain activity. In order to address the issue of limited signal-to-noise ratio (SNR) with raw data, acquisitions consist of multiple repetitions of the same experiment. An important challenge arising from such data is the variability of brain activations over the repetitions. It hinders statistical analysis such as prediction performance in a supervised learning setup. One such confounding variability is the time offset of the peak of the activation, which varies across repetitions. We propose to address this misalignment issue by explicitly modeling time shifts of different brain responses in a classification setup. To this end, we use the latent support vector machine (LSVM) formulation, where the latent shifts are inferred while learning the classifier parameters. The inferred shifts are further used to improve the SNR of the M/EEG data, and to infer the chronometry and the sequence of activations across the brain regions that are involved in the experimental task. Results are validated on a long term memory retrieval task, showing significant improvement using the proposed latent discriminative method.}}, address = {Asilomar, {\'E}tats-Unis}, author = {Zaremba, Wojciech and M. Pawan, Kumar and Gramfort, Alexandre and Blaschko, Matthew}, booktitle = {{International Conference on Information Processing in Medical Imaging 2013}}, month = {March}, pdf = {http://hal.inria.fr/hal-00803981/PDF/ipmi2013.pdf}, title = {{Learning from M/EEG data with variable brain activation delays}}, url = {http://hal.inria.fr/hal-00803981}, year = {2013} }
@article{gramfort-etal:2013, author = {Gramfort, A. and Strohmeier, D. and Haueisen, J. and H{\"a}m{\"a}l{\"a}inen, M.S. and Kowalski, M.}, doi = {10.1016/j.neuroimage.2012.12.051}, issn = {1053-8119}, journal = {NeuroImage}, keywords = {Inverse problem, Magnetoencephalography (MEG), Electroencephalography (EEG), Sparse structured priors, Convex optimization, Time-frequency, Algorithms}, note = {}, number = {0}, pages = {410 - 422}, title = {Time-frequency mixed-norm estimates: Sparse M/EEG imaging with non-stationary source activations}, url = {http://www.sciencedirect.com/science/article/pii/S1053811912012372}, volume = {70}, year = {2013} }
@article{khan-etal:2013, author = {Khan, Sheraz and Gramfort, Alexandre and Shetty, Nandita R. and Kitzbichler, Manfred G. and Ganesan, Santosh and Moran, Joseph M. and Lee, Su Mei and Gabrieli, John D. E. and Tager-Flusberg, Helen B. and Joseph, Robert M. and Herbert, Martha R. and H{\"a}m{\"a}l{\"a}inen, Matti S. and Kenet, Tal}, doi = {10.1073/pnas.1214533110}, eprint = {http://www.pnas.org/content/early/2013/01/09/1214533110.full.pdf+html}, journal = {Proceedings of the National Academy of Sciences (PNAS)}, title = {Local and long-range functional connectivity is reduced in concert in autism spectrum disorders}, url = {http://www.pnas.org/content/early/2013/01/09/1214533110.abstract}, year = {2013} }
@inproceedings{gramfort:hal-00723897, abstract = {{Diffusion spectrum imaging (DSI) from multiple diffusion-weighted images (DWI) allows to image the complex geometry of water diffusion in biological tissue. To capture the structure of DSI data, we propose to use sparse coding constrained by physical properties of the signal, namely symmetry and positivity, to learn a dictionary of diffu- sion profiles. Given this estimated model of the signal, we can extract better estimates of the signal from noisy measurements and also speed up acquisition by reducing the number of acquired DWI while giving access to high resolution DSI data. The method learns jointly for all the acquired DWI and scales to full brain data. Working with two sets of 515 DWI images acquired on two different subjects we show that using just half of the data (258 DWI) we can better predict the other 257 DWI than the classic symmetry procedure. The observation holds even if the diffusion profiles are estimated on a different subject dataset from an undersampled q-space of 40 measurements.}}, address = {Nice, France}, affiliation = {PARIETAL - INRIA Saclay - Ile de France , Athinoula A. Martinos Center for Biomedical Imaging , Laboratoire de Neuroimagerie Assist{\'e}e par Ordinateur - LNAO , Sherbrooke Connectivity Imaging Laboratory - SCIL}, audience = {internationale }, author = {Gramfort, Alexandre and Cyril, Poupon and Descoteaux, Maxime}, booktitle = {{MICCAI}}, hal_id = {hal-00723897}, keywords = {diffusion MRI, dictionary learning, diffusion spectral imaging DSI, sparse, brain}, language = {Anglais}, month = {October}, publisher = {Springer}, title = {{Sparse DSI: Learning DSI structure for denoising and fast imaging}}, url = {http://hal.inria.fr/hal-00723897}, year = {2012} }
@article{varoquaux-etal:2012b, abstract = {{Correlations in the signal observed via functional Magnetic Resonance Imaging (fMRI), are expected to reveal the interactions in the underlying neural populations through hemodynamic response. In particular, they highlight distributed set of mutually correlated regions that correspond to brain networks related to different cognitive functions. Yet graph-theoretical studies of neural connections give a different picture: that of a highly integrated system with small-world properties: local clustering but with short pathways across the complete structure. We examine the conditional independence properties of the fMRI signal, i.e. its Markov structure, to find realistic assumptions on the connectivity structure that are required to explain the observed functional connectivity. In particular we seek a decomposition of the Markov structure into segregated functional networks using decomposable graphs: a set of strongly-connected and partially overlapping cliques. We introduce a new method to efficiently extract such cliques on a large, strongly-connected graph. We compare methods learning different graph structures from functional connectivity by testing the goodness of fit of the model they learn on new data. We find that summarizing the structure as strongly-connected networks can give a good description only for very large and overlapping networks. These results highlight that Markov models are good tools to identify the structure of brain connectivity from fMRI signals, but for this purpose they must reflect the small-world properties of the underlying neural systems.}}, affiliation = {Laboratoire de Neuroimagerie Assist{\'e}e par Ordinateur - LNAO , PARIETAL - INRIA Saclay - Ile de France , Neuroimagerie cognitive}, audience = {internationale }, author = {Varoquaux, Ga{\"e}l and Gramfort, Alexandre and Poline, Jean Baptiste and Thirion, Bertrand}, doi = {10.1016/j.jphysparis.2012.01.001 }, hal_id = {hal-00665340}, journal = {Journal of Physiology - Paris}, keywords = {fMRI, brain networks; small-world; functional connectivity; Markov models; decomposable graphs}, language = {Anglais}, month = {January}, pages = {epub ahead of print}, pdf = {http://hal.inria.fr/hal-00665340/PDF/paper.pdf}, publisher = {Elsevier}, title = {{Markov models for fMRI correlation structure: is brain functional connectivity small world, or decomposable into networks?}}, url = {http://hal.inria.fr/hal-00665340}, year = {2012} }
@inproceedings{varoquaux-etal:2012, abstract = {{Functional neuroimaging can measure the brain's response to an external stimulus. It is used to perform brain mapping: identifying from these observations the brain regions involved. This problem can be cast into a linear supervised learning task where the neuroimaging data are used as predictors for the stimulus. Brain mapping is then seen as a support recovery problem. On functional MRI (fMRI) data, this problem is particularly challenging as i) the number of samples is small due to lim- ited acquisition time and ii) the variables are strongly correlated. We propose to overcome these difficulties using sparse regression models over new variables obtained by clustering of the original variables. The use of randomization techniques, e.g. bootstrap samples, and clustering of the variables improves the recovery properties of sparse methods. We demonstrate the benefit of our approach on an extensive simulation study as well as two fMRI datasets.}}, address = {Edimbourg, Royaume-Uni}, affiliation = {Laboratoire de Neuroimagerie Assist{\'e}e par Ordinateur - LNAO , PARIETAL - INRIA Saclay - Ile de France}, audience = {internationale }, author = {Varoquaux, Ga{\"e}l and Gramfort, Alexandre and Thirion, Bertrand}, booktitle = {{International Conference on Machine Learning}}, editor = {John, Langford and Joelle, Pineau }, hal_id = {hal-00705192}, keywords = {Sparse recovery ; correlated design ; clustering ; randomization ; brain imaging ; fMRI}, language = {Anglais}, month = {June}, organization = {Andrew McCallum}, pdf = {http://hal.inria.fr/hal-00705192/PDF/paper.pdf}, title = {{Small-sample brain mapping: sparse recovery on spatially correlated designs with randomization and clustering}}, url = {http://hal.inria.fr/hal-00705192}, year = {2012} }
@inproceedings{gramfort-etal:2012b, abstract = {{Word reading involves multiple cognitive processes. To infer which word is being visualized, the brain first processes the visual percept, deciphers the letters, bigrams, and activates different words based on context or prior expectation like word frequency. In this contribution, we use supervised machine learning techniques to decode the first step of this processing stream using functional Magnetic Resonance Images (fMRI). We build a decoder that predicts the visual percept formed by four letter words, allowing us to identify words that were not present in the training data. To do so, we cast the learning problem as multiple classification problems after describing words with multiple binary attributes. This work goes beyond the identification or reconstruction of single letters or simple geometrical shapes and addresses a challenging estimation problem, that is the prediction of multiple variables from a single observation, hence facing the problem of learning multiple predictors from correlated inputs.}}, address = {Londres, Royaume-Uni}, affiliation = {PARIETAL - INRIA Saclay - Ile de France , Athinoula A. Martinos Center for Biomedical Imaging , Laboratoire de Neuroimagerie Assist{\'e}e par Ordinateur - LNAO , Neuroimagerie cognitive}, audience = {internationale }, author = {Gramfort, Alexandre and Pallier, Christophe and Varoquaux, Ga{\"e}l and Thirion, Bertrand}, booktitle = {{Pattern Recognition in NeuroImaging (PRNI), 2012 International Workshop on}}, doi = {10.1109/PRNI.2012.20 }, hal_id = {hal-00730768}, keywords = {fMRI, classification, machine learning, brain reading, decoding}, language = {Anglais}, month = {July}, pages = {13-16}, pdf = {http://hal.inria.fr/hal-00730768/PDF/paper.pdf}, title = {{Decoding Visual Percepts Induced by Word Reading with fMRI}}, url = {http://hal.inria.fr/hal-00730768}, year = {2012} }
@inproceedings{pedregosa-etal:2012b, abstract = {{Inferring the functional specificity of brain regions from functional Magnetic Resonance Images (fMRI) data is a challenging statistical problem. While the General Linear Model (GLM) remains the standard approach for brain mapping, supervised learning techniques (a.k.a.} decoding) have proven to be useful to capture multivariate statistical effects distributed across voxels and brain regions. Up to now, much effort has been made to improve decoding by incorporating prior knowledge in the form of a particular regularization term. In this paper we demonstrate that further improvement can be made by accounting for non-linearities using a ranking approach rather than the commonly used least-square regression. Through simulation, we compare the recovery properties of our approach to linear models commonly used in fMRI based decoding. We demonstrate the superiority of ranking with a real fMRI dataset.}, address = {London, Royaume-Uni}, affiliation = {SIERRA - INRIA Paris - Rocquencourt , PARIETAL - INRIA Saclay - Ile de France , Laboratoire de Neuroimagerie Assist{\'e}e par Ordinateur - LNAO , Service NEUROSPIN - NEUROSPIN}, audience = {internationale }, author = {Pedregosa, Fabian and Gramfort, Alexandre and Varoquaux, Ga{\"e}l and Thirion, Bertrand and Pallier, Christophe and Cauvet, Elodie}, booktitle = {{PRNI 2012 : 2nd International Workshop on Pattern Recognition in NeuroImaging}}, hal_id = {hal-00717954}, keywords = {fMRI, supervised learning, decoding, ranking}, language = {Anglais}, month = {July}, pdf = {http://hal.inria.fr/hal-00717954/PDF/paper.pdf}, title = {{Improved brain pattern recovery through ranking approaches}}, url = {http://hal.inria.fr/hal-00717954}, year = {2012} }
@inproceedings{eickenberg-etal:2012, address = {London, United Kingdom}, author = {Eickenberg, Michael and Gramfort, Alexandre and Thirion, Bertrand}, booktitle = {{International Workshop on Pattern Recognition in NeuroImaging}}, keywords = {scattering transform ; fMRI encoding models ; visual cortex}, month = {July}, pdf = {https://hal.inria.fr/hal-00704528/file/example_paper.pdf}, title = {{Multilayer Scattering Image Analysis Fits fMRI Activity in Visual Areas}}, url = {https://hal.inria.fr/hal-00704528}, year = {2012} }
@inproceedings{pedregosa-etal:2012a, abstract = {{Medical images can be used to predict a clinical score coding for the severity of a disease, a pain level or the complexity of a cognitive task. In all these cases, the predicted variable has a natural order. While a standard classifier discards this information, we would like to take it into account in order to improve prediction performance. A standard linear regression does model such information, however the linearity assumption is likely not be satisfied when predicting from pixel intensities in an image. In this paper we address these modeling challenges with a supervised learning procedure where the model aims to order or rank images. We use a linear model for its robustness in high dimension and its possible interpretation. We show on simulations and two fMRI datasets that this approach is able to predict the correct ordering on pairs of images, yielding higher prediction accuracy than standard regression and multiclass classification techniques.}}, address = {Nice, France}, affiliation = {SIERRA - INRIA Paris - Rocquencourt , PARIETAL - INRIA Saclay - Ile de France , Laboratoire de Neuroimagerie Assist{\'e}e par Ordinateur - LNAO , Service NEUROSPIN - NEUROSPIN}, audience = {internationale }, author = {Pedregosa, Fabian and Gramfort, Alexandre and Varoquaux, Ga{\"e}l and Cauvet, Elodie and Pallier, Christophe and Thirion, Bertrand}, booktitle = {{MLMI 2012 - 3rd International Workshop on Machine Learning in Medical Imaging}}, hal_id = {hal-00717990}, keywords = {fMRI, supervised learning, decoding, ranking}, language = {Anglais}, month = {July}, organization = {INRIA}, pdf = {http://hal.inria.fr/hal-00717990/PDF/paper.pdf}, title = {{Learning to rank from medical imaging data}}, url = {http://hal.inria.fr/hal-00717990}, year = {2012} }
@article{jenatton-etal:2012, abstract = {{Inverse inference, or "brain reading", is a recent paradigm for analyzing functional magnetic resonance imaging (fMRI) data, based on pattern recognition and statistical learning. By predicting some cognitive variables related to brain activation maps, this approach aims at decoding brain activity. Inverse inference takes into account the multivariate information between voxels and is currently the only way to assess how precisely some cognitive information is encoded by the activity of neural populations within the whole brain. However, it relies on a prediction function that is plagued by the curse of dimensionality, since there are far more features than samples, i.e., more voxels than fMRI volumes. To address this problem, different methods have been proposed, such as, among others, univariate feature selection, feature agglomeration and regularization techniques. In this paper, we consider a sparse hierarchical structured regularization. Specifically, the penalization we use is constructed from a tree that is obtained by spatially-constrained agglomerative clustering. This approach encodes the spatial structure of the data at different scales into the regularization, which makes the overall prediction procedure more robust to inter-subject variability. The regularization used induces the selection of spatially coherent predictive brain regions simultaneously at different scales. We test our algorithm on real data acquired to study the mental representation of objects, and we show that the proposed algorithm not only delineates meaningful brain regions but yields as well better prediction accuracy than reference methods.}}, affiliation = {Laboratoire d'informatique de l'{\'e}cole normale sup{\'e}rieure - LIENS , SIERRA - INRIA Paris - Rocquencourt , Laboratoire de Neuroimagerie Assist{\'e}e par Ordinateur - LNAO , PARIETAL - INRIA Saclay - Ile de France , Neuroimagerie cognitive}, audience = {internationale }, author = {Jenatton, Rodolphe and Gramfort, Alexandre and Michel, Vincent and Obozinski, Guillaume and Eger, Evelyn and Bach, Francis and Thirion, Bertrand}, doi = {10.1137/110832380 }, hal_id = {inria-00589785}, journal = {SIAM Journal on Imaging Sciences}, keywords = {brain reading; structured sparsity; convex optimization; sparse hierarchical models; inter-subject validation; proximal methods}, language = {Anglais}, month = {July}, number = {3 }, pages = {835-856}, pdf = {http://hal.inria.fr/inria-00589785/PDF/sparse\_hierarchical\_fmri\_mining\_hal.pdf}, publisher = {SIAM}, title = {{Multi-scale Mining of fMRI data with Hierarchical Structured Sparsity}}, url = {http://hal.inria.fr/inria-00589785}, volume = {5}, year = {2012} }
@article{gramfort-etal:2012a, affiliation = {PARIETAL - INRIA Saclay - Ile de France, LNAO CEA Neurospin, Laboratoire des signaux et systemes (L2S) , Athinoula A. Martinos Center for Biomedical Imaging}, author = {Gramfort, Alexandre and Kowalski, Matthieu and H{\"a}m{\"a}l{\"a}inen, Matti}, doi = {10.1088/0031-9155/57/7/1937 }, hal_id = {hal-00690774}, journal = {Physics in Medicine and Biology}, language = {Anglais}, month = {March}, number = {7 }, pages = {1937-1961}, publisher = {IOP Science}, title = {{Mixed-norm estimates for the M/EEG inverse problem using accelerated gradient methods.}}, url = {http://hal.inria.fr/hal-00690774}, volume = {57}, year = {2012} }
@inproceedings{gramfort-etal:2011d, address = {Granada, Espagne}, affiliation = {PARIETAL - INRIA Saclay - Ile de France , Laboratoire de Neuroimagerie Assist{\'e}e par Ordinateur - LNAO}, audience = {internationale }, author = {Gramfort, Alexandre and Varoquaux, Ga{\"e}l and Thirion, Bertrand}, booktitle = {{NIPS 2011 MLINI Workshop}}, hal_id = {hal-00704875}, keywords = {neuroimaging; sparse; recovery; feature identification; statistics}, language = {Anglais}, month = {December}, pdf = {http://hal.inria.fr/hal-00704875/PDF/paper.pdf}, title = {{Beyond brain reading: randomized sparsity and clustering to simultaneously predict and identify}}, url = {http://hal.inria.fr/hal-00704875}, year = {2011} }
@article{pedregosa-etal:2011, abstract = {{Scikit-learn is a Python module integrating a wide range of state-of-the-art machine learning algorithms for medium-scale supervised and unsupervised problems. This package focuses on bringing machine learning to non-specialists using a general-purpose high-level language. Emphasis is put on ease of use, performance, documentation, and API consistency. It has minimal dependencies and is distributed under the simplified BSD license, encouraging its use in both academic and commercial settings. Source code, binaries, and documentation can be downloaded from http://scikit-learn.sourceforge.net.}}, affiliation = {PARIETAL - INRIA Saclay - Ile de France , Laboratoire de Neuroimagerie Assist{\'e}e par Ordinateur - LNAO , Nuxeo , Kobe University , Bauhaus-Universit{\"a}t Weimar , Google Inc , Laboratoire de M{\'e}canique et Ing{\'e}nieries - LAMI , University of Washington , Department of Mechanical and Industrial Engineering [UMass] , Enthought Inc , TOTAL}, audience = {internationale }, author = {Pedregosa, Fabian and Varoquaux, Ga{\"e}l and Gramfort, Alexandre and Michel, Vincent and Thirion, Bertrand and Grisel, Olivier and Blondel, Mathieu and Prettenhofer, Peter and Weiss, Ron and Dubourg, Vincent and Vanderplas, Jake and Passos, Alexandre and Cournapeau, David and Brucher, Matthieu and Perrot, Matthieu and Duchesnay, Edouard}, hal_id = {hal-00650905}, journal = {Journal of Machine Learning Research}, keywords = {Python; supervised learning; unsupervised learning; model selection}, language = {Anglais}, month = {October}, pdf = {http://hal.inria.fr/hal-00650905/PDF/pedregosa11a.pdf}, publisher = {MIT Press}, title = {{Scikit-learn: Machine Learning in Python}}, url = {http://hal.inria.fr/hal-00650905}, year = {2011} }
@article{gramfort-etal:2011e, abstract = {{To recover the sources giving rise to electro- and magnetoencephalography in individual measurements, realistic physiological modeling is required, and accurate numerical solutions must be computed. We present OpenMEEG, which solves the electromagnetic forward problem in the quasistatic regime, for head models with piecewise constant conductivity. The core of OpenMEEG consists of the symmetric Boundary Element Method, which is based on an extended Green Representation theorem. OpenMEEG is able to provide lead fields for four different electromagnetic forward problems: Electroencephalography (EEG), Magnetoencephalography (MEG), Electrical Impedance Tomography (EIT), and intracranial electric potentials (IPs). OpenMEEG is open source and multiplatform. It can be used from Python and Matlab in conjunction with toolboxes that solve the inverse problem; its integration within FieldTrip is operational since release 2.0.}}, author = {Gramfort, Alexandre and Papadopoulo, Th{\'e}odore and Olivi, Emmanuel and Clerc, Maureen}, doi = {10.1155/2011/923703}, journal = {Comput Intell Neurosci}, pages = {923703}, publisher = {Hindawi Publishing Corporation}, title = {{Forward Field Computation with OpenMEEG.}}, url = {http://hal.inria.fr/inria-00584205}, volume = {2011}, year = {2011} }
@inproceedings{gramfort-etal:2011c, affiliation = {INRIA, Parietal team, Saclay, France}, author = {Gramfort, Alexandre and Strohmeier, Daniel and Haueisen, Jens and Hamalainen, Matti and Kowalski, Matthieu}, booktitle = {Information Processing in Medical Imaging}, doi = {10.1007/978-3-642-22092-0_49}, editor = {Sz{\'e}kely, G{\'a}bor and Hahn, Horst}, isbn = {}, pages = {600-611}, publisher = {Springer Berlin / Heidelberg}, series = {Lecture Notes in Computer Science}, title = {Functional Brain Imaging with M/EEG Using Structured Sparsity in Time-Frequency Dictionaries}, url = {http://dx.doi.org/10.1007/978-3-642-22092-0_49}, volume = {6801}, year = {2011} }
@article{michel-etal:2011b, author = {Michel, V. and Gramfort, A. and Varoquaux, G. and Eger, E. and Thirion, B.}, doi = {10.1109/TMI.2011.2113378}, issn = {0278-0062}, journal = {Medical Imaging, IEEE Transactions on}, keywords = {brain mapping; fMRI; multivariate pattern analysis; predictive diagnosis; total variation regularization; image classification}, month = {july}, number = {7}, pages = {1328-1340}, title = {Total Variation Regularization for fMRI-Based Prediction of Behavior}, url = {http://hal.inria.fr/inria-00563468/en/}, volume = {30}, year = {2011} }
@inproceedings{varoquaux-etal:2011, address = {Kaufbeuren, Allemagne}, affiliation = {Laboratoire de Neuroimagerie Assist\'ee par Ordinateur - LNAO - CEA : DSV/I2BM/NEUROSPIN - PARIETAL - INRIA Saclay - Ile de France - INRIA - Neuroimagerie cognitive - INSERM : U992 - Universit\'e Paris Sud - Paris XI - CEA : DSV/I2BM/NEUROSPIN}, author = {Varoquaux, Ga{\"e}l and Gramfort, Alexandre and Pedregosa, Fabian and Michel, Vincent and Thirion, Bertrand}, booktitle = {{Information Processing in Medical Imaging}}, hal_id = {inria-00588898}, keywords = {sparse models, atlas, resting state, segmentation}, month = {July}, organization = {Gabor Szekely, Horst Hahn}, pdf = {http://hal.inria.fr/inria-00588898/PDF/paper.pdf}, title = {{Multi-subject dictionary learning to segment an atlas of brain spontaneous activity}}, url = {http://hal.inria.fr/inria-00588898/en/}, year = {2011} }
@article{michel-etal:2011, affiliation = {Laboratoire de Neuroimagerie Assist\'ee par Ordinateur - LNAO - CEA : DSV/I2BM/NEUROSPIN - PARIETAL - INRIA Saclay - Ile de France - INRIA - Neuroimagerie cognitive - INSERM : U992 - Universit\'e Paris Sud - Paris XI - CEA : DSV/I2BM/NEUROSPIN - SELECT - INRIA Saclay - Ile de France - INRIA - Universit\'e Paris Sud - Paris XI - CNRS : UMR - Laboratoire de Math\'ematiques d'Orsay - LM-Orsay - CNRS : UMR8628 - Universit\'e Paris Sud - Paris XI}, author = {Michel, Vincent and Gramfort, Alexandre and Varoquaux, Ga{\"e}l and Eger, Evelyn and Keribin, Christine and Thirion, Bertrand}, doi = {10.1016/j.patcog.2011.04.006 }, hal_id = {inria-00589201}, journal = {Pattern Recognition}, keywords = {fMRI; brain reading; prediction; hierarchical clustering; dimension reduction; multi-scale analysis; feature agglomeration}, month = {April}, pages = {epub ahead of print}, pdf = {http://hal.inria.fr/inria-00589201/PDF/supervised\_clustering\_vm\_review.pdf}, publisher = {elsevier}, title = {{A supervised clustering approach for fMRI-based inference of brain states}}, url = {http://hal.inria.fr/inria-00589201/en/}, year = {2011} }
@techreport{jenatton-etal:2011, affiliation = {Laboratoire d'informatique de l'\'ecole normale sup\'erieure - LIENS - CNRS : UMR8548 - Ecole Normale Sup\'erieure de Paris - ENS Paris - SIERRA - INRIA Paris - Rocquencourt - INRIA : PARIS - ROCQUENCOURT - Ecole Normale Sup\'erieure de Paris - ENS Paris - CNRS : UMR8548 - Laboratoire de Neuroimagerie Assist\'ee par Ordinateur - LNAO - CEA : DSV/I2BM/NEUROSPIN - PARIETAL - INRIA Saclay - Ile de France - INRIA - Neuroimagerie cognitive - INSERM : U992 - Universit\'e Paris Sud - Paris XI - CEA : DSV/I2BM/NEUROSPIN}, author = {Jenatton, Rodolphe and Gramfort, Alexandre and Michel, Vincent and Obozinski, Guillaume and Eger, Evelyn and Bach, Francis and Thirion, Bertrand}, hal_id = {inria-00589785}, keywords = {brain reading; structured sparsity; convex optimization; sparse hierarchical models; inter-subject validation; proximal methods}, month = {May}, pages = {16}, pdf = {http://hal.inria.fr/inria-00589785/PDF/sparse\_hierarchical\_fmri\_mining\_HAL.pdf}, title = {{Multi-scale Mining of fMRI data with Hierarchical Structured Sparsity}}, type = {Rapport de recherche}, url = {http://hal.inria.fr/inria-00589785/en/}, year = {2011} }
@article{gramfort-etal:2011b, author = {Gramfort, Alexandre and Papadopoulo, Theodore and Baillet, Sylvain and Clerc, Maureen}, doi = {DOI: 10.1016/j.neuroimage.2010.09.087}, issn = {1053-8119}, journal = {NeuroImage}, keywords = {Functional brain imaging, Tracking, Graph Cuts Optimization, Magnetoencephalography (MEG), Electroencephalography (EEG)}, month = {Feb}, number = {3}, pages = {1930-1941}, title = {Tracking cortical activity from M/EEG using graph-cuts with spatiotemporal constraints}, url = {http://www.sciencedirect.com/science/article/B6WNP-5161PBP-4/2/a788648433443badba4516b1d451b049}, volume = {54}, year = {2011} }
@article{cottereau-etal:2011a, author = {Cottereau, Benoit and Lorenceau, Jean and Gramfort, Alexandre and Clerc, Maureen and Thirion, Bertrand and Baillet, Sylvain}, doi = {DOI: 10.1016/j.neuroimage.2010.10.004}, issn = {1053-8119}, journal = {NeuroImage}, keywords = {Vision, Retinotopy, Magnetoencephalography (MEG), Steady-State Visual, Evoked Response (SSVER), Source Imaging}, note = {}, number = {3}, pages = {1919 - 1929}, title = {Phase delays within visual cortex shape the response to steady-state visual stimulation}, url = {http://www.sciencedirect.com/science/article/B6WNP-516M75G-2/2/69bf20b0381f2b20b6e6b6409d67abe0}, volume = {54}, year = {2011} }
@article{gramfort-etal:10b, abstract = {BACKGROUND:Interpreting and controlling bioelectromagnetic phenomena require realistic physiological models and accurate numerical solvers. A semi-realistic model often used in practise is the piecewise constant conductivity model, for which only the interfaces have to be meshed. This simplified model makes it possible to use Boundary Element Methods. Unfortunately, most Boundary Element solutions are confronted with accuracy issues when the conductivity ratio between neighboring tissues is high, as for instance the scalp/skull conductivity ratio in electro-encephalography. To overcome this difficulty, we proposed a new method called the symmetric BEM, which is implemented in the OpenMEEG software. The aim of this paper is to present OpenMEEG, both from the theoretical and the practical point of view, and to compare its performances with other competing software packages.METHODS:We have run a benchmark study in the field of electro- and magneto-encephalography, in order to compare the accuracy of OpenMEEG with other freely distributed forward solvers. We considered spherical models, for which analytical solutions exist, and we designed randomized meshes to assess the variability of the accuracy. Two measures were used to characterize the accuracy. the Relative Difference Measure and the Magnitude ratio. The comparisons were run, either with a constant number of mesh nodes, or a constant number of unknowns across methods. Computing times were also compared.RESULTS:We observed more pronounced differences in accuracy in electroencephalography than in magnetoencephalography. The methods could be classified in three categories: the linear collocation methods, that run very fast but with low accuracy, the linear collocation methods with isolated skull approach for which the accuracy is improved, and OpenMEEG that clearly outperforms the others. As far as speed is concerned, OpenMEEG is on par with the other methods for a constant number of unknowns, and is hence faster for a prescribed accuracy level.CONCLUSIONS:This study clearly shows that OpenMEEG represents the state of the art for forward computations. Moreover, our software development strategies have made it handy to use and to integrate with other packages. The bioelectromagnetic research community should therefore be able to benefit from OpenMEEG with a limited development effort.}, author = {Gramfort, Alexandre and Papadopoulo, Theodore and Olivi, Emmanuel and Clerc, Maureen}, doi = {10.1186/1475-925X-9-45}, issn = {1475-925X}, journal = {BioMedical Engineering OnLine}, keywords = {Boundary Element Method, Electromagnetics, quasistatic regime, Electroencephalography, Magnetoencephalography, Forward modeling, opensource software}, number = {1}, pages = {45}, pubmedid = {20819204}, title = {OpenMEEG: opensource software for quasistatic bioelectromagnetics}, url = {http://www.biomedical-engineering-online.com/content/9/1/45}, volume = {9}, year = {2010} }
@inproceedings{varoquaux-etal:2010, author = {Varoquaux, Gael and Gramfort, Alexandre and Poline, Jean-Baptiste and Thirion, Bertrand}, booktitle = {NIPS}, editor = {Lafferty, John D. and Williams, Christopher K. I. and Shawe-Taylor, John and Zemel, Richard S. and Culotta, Aron}, pages = {2334-2342}, publisher = {Curran Associates, Inc.}, title = {Brain covariance selection: better individual functional connectivity models using population prior.}, url = {http://books.nips.cc/papers/files/nips23/NIPS2010_1054.pdf}, year = {2010} }
@unpublished{kowalski-gramfort:2010, affiliation = {Laboratoire des signaux et syst{\`e}mes (L2S) - UMR8506 CNRS - SUPELEC - Univ Paris-Sud - PARIETAL - INRIA Saclay - Ile de France - INRIA }, author = {Kowalski, Matthieu and Gramfort, Alexandre}, keywords = {normes mixtes ; probl{\`e}me inverse ; op{\'e}rateurs de proximit{\'e} ; {\'e}lectroenc{\'e}phalographie ; magn{\'e}toenc{\'e}phalographie}, title = {A priori par normes mixtes pour les probl{\`e}mes inverses: Application {\`a} la localisation de sources en M/EEG}, url = {http://hal.archives-ouvertes.fr/hal-00473970/en/}, year = {2010} }
@inproceedings{gramfort-etal:10c, affiliation = {PARIETAL - INRIA Saclay - Ile de France - INRIA}, author = {Gramfort, Alexandre}, booktitle = {Biomag: International Conference on Biomagnetism}, doi = {10.3389/conf.fnins.2010.06.00111}, keywords = {MEG, EEG, Inverse Problem, Sparse prior, IRLS}, note = {Winning Paper of the Young Investigator Award}, title = {Multi-condition M/EEG inverse modeling with sparsity assumptions: how to estimate what is common and what is specific in multiple experimental conditions}, url = {http://hal.archives-ouvertes.fr/inria-00468592/en/}, year = {2010} }
@inproceedings{gramfort-etal:10d, affiliation = {PARIETAL - INRIA Saclay - Ile de France - INRIA}, author = {Gramfort, Alexandre and Papadopoulo, Th\'eodore and Olivi, Emmanuel and Clerc, Maureen}, booktitle = {Biomag: International Conference on Biomagnetism}, doi = {10.3389/conf.fnins.2010.06.00065}, keywords = {MEG, EEG, Forward Problem, BEM}, title = {An empirical evaluation of free BEM solvers for accurate M/EEG forward modeling}, url = {files/poster_openmeeg_biomag_2010.pdf}, year = {2010} }
@article{gramfort-etal:10, author = {Gramfort, A. and Keriven, R. and Clerc, M.}, doi = {10.1109/TBME.2009.2037139}, issn = {0018-9294}, journal = {Biomedical Engineering, IEEE Transactions on}, keywords = {Electroencephalography, Electroencephalography (EEG), Estimation, Evoked potentials, Graph Cuts Optimization, Graph Laplacian, Latency estimation, Magnetoencephalography (MEG), Manifold learning, Principal component analysis, Single-trial analysis, Time series analysis}, month = {may }, number = {5}, pages = {1051 -1061}, title = {Graph-Based Variability Estimation in Single-Trial Event-Related Neural Responses}, url = {http://www.ncbi.nlm.nih.gov/pubmed/20142163}, volume = {57}, year = {2010} }
@inproceedings{kowalski-gramfort:09, author = {Kowalski, Mathieu and Gramfort, Alexandre}, booktitle = {GRETSI}, keywords = {Magnetoencephalographie, Electroencephalographie, Probl{\`e}me inverse, Elitist-Lasso, Operateurs de proximit{\'e}}, month = {sept}, title = {{A priori par normes mixtes pour les probl{\`e}mes inverses Application {\`a} la localisation de sources en M/EEG}}, url = {http://hal.archives-ouvertes.fr/hal-00424039/}, year = {2009} }
@inproceedings{cottereau-laurenceau-etal:09, author = {Cottereau, B. and Lorenceau, J. and Gramfort, A. and Clerc, M. and Baillet, S.}, booktitle = {Human Brain Mapping}, keywords = {MEG, retinotopy, chronometry, human vision}, month = {jun}, title = {Fine chronometric mapping of human visual areas}, year = {2009} }
@inproceedings{gramfort-kowalski:09, author = {Gramfort, Alexandre and Kowalski, Mathieu}, booktitle = {IEEE International Symposium on Biomedical Imaging}, keywords = {Magnetoencephalography, Electroencephalography, Inverse problem, Elitist-Lasso, Proximal iterations}, month = {jun}, title = {Improving M/EEG source localization with an inter-condition sparse prior}, url = {http://hal.archives-ouvertes.fr/hal-00424029/}, year = {2009} }
@inproceedings{gramfort-papadopoulo-etal:08, author = {Gramfort, Alexandre and Papadopoulo, Th{\'e}odore and Cottereau, Benoit and Baillet, Sylvain and Clerc, Maureen}, booktitle = {Biomag: International Conference on Biomagnetism}, keywords = {MEG, somatosensory, graph-cuts, tracking}, month = {aug}, title = {Tracking cortical activity with spatio-temporal constraints using graph-cuts}, url = {http://hal.inria.fr/inria-00336887/fr/}, year = {2008} }
@inproceedings{cottereau-gramfort-etal:08, author = {Cottereau, Benoit and Gramfort, Alexandre and Lorenceau, Jean and Thirion, Bertrand and Clerc, Maureen and Baillet, Sylvain}, booktitle = {Human Brain Mapping}, keywords = {MEG, retinotopy}, month = {jun}, title = {Fast retinotopic mapping of visual fields using MEG}, year = {2008} }
@inproceedings{gramfort-cottereau-etal:07, author = {Gramfort, Alexandre and Cottereau, Benoit and Clerc, Maureen and Thirion, Bertrand and Baillet, Sylvain}, booktitle = {EMBC 2007: IEEE, Engineering in Medicine and Biology Society}, keywords = {Retinotopy, MEG, fMRI, Visual Cortex}, month = {aug}, pages = {4945-4948}, title = {Challenging the estimation of cortical activity from MEG with simulated fMRI-constrained retinotopic maps}, url = {ftp://ftp-sop.inria.fr/odyssee/Publications/2007/gramfort-cottereau-etal:07.pdf}, year = {2007} }
@inproceedings{gramfort-clerc:07, author = {Gramfort, Alexandre and Clerc, Maureen}, booktitle = {NFSI 2007: Symposium on Noninvasive Functional Source Imaging}, keywords = {EEG, Event-related potentials, Laplacian eigenmaps, P300, dimensionality reduction}, month = {oct}, pages = {169-172}, title = {Low dimensional representations of {MEG/EEG} data using laplacian eigenmaps}, url = {ftp://ftp-sop.inria.fr/odyssee/Publications/2007/gramfort-clerc:07.pdf}, year = {2007} }
@inproceedings{clerc-gramfort-etal:07, author = {Clerc, Maureen and Gramfort, Alexandre and Landreau, Perrine and Papadopoulo, Theodore}, booktitle = {Proceedings of Neuromath}, keywords = {EEG, MEG, forward modeling, boundary element method}, title = {MEG and EEG processing with OpenMEEG}, year = {2007} }
@inproceedings{cottereau-laurenceau-etal:07, author = {Cottereau, Benoit and Lorenceau, Jean and Gramfort, Alexandre and Thirion, Bertrand and Clerc, Maureen and Baillet, Sylvain}, booktitle = {Proceedings of Neuromath}, keywords = {MEG, retinotopy, human vision}, title = {Fast Retinotopic Mapping of Visual Fields using MEG}, year = {2007} }