Essec\Faculty\Model\Profile {#2216
#_id: "B00806953"
#_source: array:40 [
"bid" => "B00806953"
"academId" => "31454"
"slug" => "lecue-guillaume"
"fullName" => "Guillaume LECUE"
"lastName" => "LECUE"
"firstName" => "Guillaume"
"title" => array:2 [
"fr" => "Professeur"
"en" => "Professor"
]
"email" => "b00806953@essec.edu"
"status" => "ACTIF"
"campus" => "Campus de Cergy"
"departments" => []
"phone" => "01 34 43 36 02"
"sites" => []
"facNumber" => "31454"
"externalCvUrl" => "https://faculty.essec.edu/en/cv/lecue-guillaume/pdf"
"googleScholarUrl" => "https://scholar.google.com/citations?user=kng-DfIAAAAJ&hl=fr"
"facOrcId" => "https://orcid.org/0000-0002-6391-8746"
"career" => array:2 [
0 => Essec\Faculty\Model\CareerItem {#2219
#_index: null
#_id: null
#_source: array:7 [
"startDate" => "2022-09-01"
"endDate" => null
"isInternalPosition" => true
"type" => array:2 [
"fr" => "Positions académiques principales"
"en" => "Full-time academic appointments"
]
"label" => array:2 [
"fr" => "Professeur"
"en" => "Professor"
]
"institution" => array:2 [
"fr" => "ESSEC Business School"
"en" => "ESSEC Business School"
]
"country" => array:2 [
"fr" => "France"
"en" => "France"
]
]
+lang: "en"
+"parent": Essec\Faculty\Model\Profile {#2216}
}
1 => Essec\Faculty\Model\CareerItem {#2222
#_index: null
#_id: null
#_source: array:7 [
"startDate" => "2022-09-01"
"endDate" => "2025-08-31"
"isInternalPosition" => true
"type" => array:2 [
"en" => "Other Academic Appointments"
"fr" => "Autres positions académiques"
]
"label" => array:2 [
"fr" => "Directeur académique du BSc AIDAMS"
"en" => "Academic director of BSc AIDAMS"
]
"institution" => array:2 [
"fr" => "ESSEC Business School"
"en" => "ESSEC Business School"
]
"country" => array:2 [
"fr" => "France"
"en" => "France"
]
]
+lang: "en"
+"parent": Essec\Faculty\Model\Profile {#2216}
}
]
"diplomes" => array:3 [
0 => Essec\Faculty\Model\Diplome {#2218
#_index: null
#_id: null
#_source: array:6 [
"diplome" => "DIPLOMA"
"type" => array:2 [
"fr" => "Diplômes"
"en" => "Diplomas"
]
"year" => "2012"
"label" => array:2 [
"en" => "Doctorate, Other, Mathematics"
"fr" => "Doctorat, Autre, Mathématiques"
]
"institution" => array:2 [
"fr" => "Université Paris Est Créteil"
"en" => "Université Paris Est Créteil"
]
"country" => array:2 [
"fr" => "France"
"en" => "France"
]
]
+lang: "en"
+"parent": Essec\Faculty\Model\Profile {#2216}
}
1 => Essec\Faculty\Model\Diplome {#2220
#_index: null
#_id: null
#_source: array:6 [
"diplome" => "DIPLOMA"
"type" => array:2 [
"fr" => "Diplômes"
"en" => "Diplomas"
]
"year" => "2007"
"label" => array:2 [
"en" => "Doctorate, Mathematics"
"fr" => "Doctorat, Mathématiques"
]
"institution" => array:2 [
"fr" => "Université Pierre et Marie Curie (UPMC)"
"en" => "Université Pierre et Marie Curie (UPMC)"
]
"country" => array:2 [
"fr" => "France"
"en" => "France"
]
]
+lang: "en"
+"parent": Essec\Faculty\Model\Profile {#2216}
}
2 => Essec\Faculty\Model\Diplome {#2217
#_index: null
#_id: null
#_source: array:6 [
"diplome" => "DIPLOMA"
"type" => array:2 [
"fr" => "Diplômes"
"en" => "Diplomas"
]
"year" => "2004"
"label" => array:2 [
"en" => "Master of Engineering, Mathematics"
"fr" => "Ecole d'ingénieur, Mathématiques"
]
"institution" => array:2 [
"fr" => "École Normale Supérieure de Rennes"
"en" => "École Normale Supérieure de Rennes"
]
"country" => array:2 [
"fr" => "France"
"en" => "France"
]
]
+lang: "en"
+"parent": Essec\Faculty\Model\Profile {#2216}
}
]
"bio" => array:2 [
"fr" => null
"en" => """
<p>Guillaume Lecué graduated from Ecole Normale Supérieure de Cachan, France, and received the M.Sc. degree in applied mathematics from Université Paris XI - Orsay, France, in 2005. He received the Ph.D. degree in statistics at Université Paris VI - Jussieu, France, in 2007. In 2008, he went to the Mathematical Science Institue in Canberra, Australia and later at the Technion, Haïfa, Israel as a Post-Doc. He completed his habilitation degree in 2008 at the Laboratoire d'analyse et mathématiques appliquées, Université Paris-Est Marne-la-vallée, France. </p>\n
\n
<p>He is currently Full professor at ESSEC. His research interests are in the areas of learning theory, empirical process theory, high-dimensional phenomenons and deep learning. He taught at Ecole Polytechnqiue from 2012 to 2015 and at ENSAE (the national school of administration and statistics), France from 2015 to 2022.</p>\n
\n
<p>Dr. Lecué received the "Mark Fulk award" for the best student paper at the 2006 Conference on Learning Theory, COLT06, Pittsburgh, PA and the "Prix de la chancellerie des Universités de Paris" for the best Ph.D. thesis in mathematics and its applications defended in Paris in 2007.</p>
"""
]
"department" => array:2 [
"fr" => "Systèmes d'Information, Data Analytics et Opérations"
"en" => "Information Systems, Data Analytics and Operations"
]
"site" => array:2 [
"fr" => "https://guillaume-lecue.faculty.essec.edu"
"en" => "https://guillaume-lecue.faculty.essec.edu"
]
"industrrySectors" => array:2 [
"fr" => "Banques - Internet et E-commerce de détail - Médias et divertissement - Services publics de gaz et d'électricité"
"en" => "Banks - Internet and E-Commerce Retail - Media & Entertainment - Electric & Gas Utilities"
]
"researchFields" => array:2 [
"fr" => "Analyse des données statistiques - Economie de la santé - Mathématiques - Sciences de la décision - Théorie des probabilités et statistiques"
"en" => "Statistical Data Analysis - Health Economics - Mathematics - Decision Sciences - Probability Theory & Mathematical Statistics"
]
"teachingFields" => array:2 [
"fr" => "Théorie des probabilités et statistiques - Mathématiques - Marketing et analyses des données"
"en" => "Probability Theory & Mathematical Statistics - Mathematics - Marketing and Data Analytics"
]
"distinctions" => []
"teaching" => []
"otherActivities" => array:2 [
0 => Essec\Faculty\Model\ExtraActivity {#2221
#_index: null
#_id: null
#_source: array:9 [
"startDate" => "2022-04-01"
"endDate" => null
"year" => null
"uuid" => "103"
"type" => array:2 [
"fr" => "Activités de recherche"
"en" => "Research activities"
]
"subType" => array:2 [
"fr" => "Membre d'un comité de lecture"
"en" => "Editorial Board Membership"
]
"label" => array:2 [
"fr" => "Rédacteur adjoint - ALEA"
"en" => "Associate Editor - ALEA"
]
"institution" => array:2 [
"fr" => null
"en" => null
]
"country" => array:2 [
"fr" => null
"en" => null
]
]
+lang: "en"
+"parent": Essec\Faculty\Model\Profile {#2216}
}
1 => Essec\Faculty\Model\ExtraActivity {#2215
#_index: null
#_id: null
#_source: array:9 [
"startDate" => "2024-01-01"
"endDate" => null
"year" => null
"uuid" => "103"
"type" => array:2 [
"fr" => "Activités de recherche"
"en" => "Research activities"
]
"subType" => array:2 [
"fr" => "Membre d'un comité de lecture"
"en" => "Editorial Board Membership"
]
"label" => array:2 [
"fr" => "Associate Editor - Annals of Statistics"
"en" => "Associate Editor - Annals of Statistics"
]
"institution" => array:2 [
"fr" => null
"en" => null
]
"country" => array:2 [
"fr" => null
"en" => null
]
]
+lang: "en"
+"parent": Essec\Faculty\Model\Profile {#2216}
}
]
"theses" => []
"indexedAt" => "2024-11-21T13:21:24.000Z"
"contributions" => array:13 [
0 => Essec\Faculty\Model\Contribution {#2224
#_index: "academ_contributions"
#_id: "13865"
#_source: array:18 [
"id" => "13865"
"slug" => "estimation-bounds-and-sharp-oracle-inequalities-of-regularized-procedures-with-lipschitz-loss-functions"
"yearMonth" => "2019-08"
"year" => "2019"
"title" => "Estimation bounds and sharp oracle inequalities of regularized procedures with Lipschitz loss functions"
"description" => "ALQUIER, P., COTTET, V. et LECUE, G. (2019). Estimation bounds and sharp oracle inequalities of regularized procedures with Lipschitz loss functions. <i>Annals of Statistics</i>, 47(4), pp. 2117-2144."
"authors" => array:3 [
0 => array:3 [
"name" => "ALQUIER Pierre"
"bid" => "B00809923"
"slug" => "alquier-pierre"
]
1 => array:3 [
"name" => "LECUE Guillaume"
"bid" => "B00806953"
"slug" => "lecue-guillaume"
]
2 => array:1 [
"name" => "COTTET Vincent"
]
]
"ouvrage" => ""
"keywords" => []
"updatedAt" => "2024-10-31 13:51:19"
"publicationUrl" => "https://doi.org/10.1214/18-AOS1742"
"publicationInfo" => array:3 [
"pages" => "2117-2144"
"volume" => "47"
"number" => "4"
]
"type" => array:2 [
"fr" => "Articles"
"en" => "Journal articles"
]
"support_type" => array:2 [
"fr" => "Revue scientifique"
"en" => "Scientific journal"
]
"countries" => array:2 [
"fr" => null
"en" => null
]
"abstract" => array:2 [
"fr" => """
Many classification and regression problems are solved in practice by regularized empirical risk minimizers (RERM). The risk is measured\n
via a loss function. The quadratic loss function is the most popular function for\n
regression. It has been extensively studied (cf. [23, 31] among others). Still many other loss functions are popular among practitioners and are indeed extremely useful in specific situations
"""
"en" => """
Many classification and regression problems are solved in practice by regularized empirical risk minimizers (RERM). The risk is measured\n
via a loss function. The quadratic loss function is the most popular function for\n
regression. It has been extensively studied (cf. [23, 31] among others). Still many other loss functions are popular among practitioners and are indeed extremely useful in specific situations
"""
]
"authors_fields" => array:2 [
"fr" => "Systèmes d'Information, Data Analytics et Opérations"
"en" => "Information Systems, Data Analytics and Operations"
]
"indexedAt" => "2024-11-21T13:21:44.000Z"
]
+lang: "en"
+"_type": "_doc"
+"_score": 6.654894
+"parent": null
}
1 => Essec\Faculty\Model\Contribution {#2226
#_index: "academ_contributions"
#_id: "13872"
#_source: array:18 [
"id" => "13872"
"slug" => "robust-sub-gaussian-estimation-of-a-mean-vector-in-nearly-linear-time"
"yearMonth" => "2022-03"
"year" => "2022"
"title" => "Robust sub-Gaussian estimation of a mean vector in nearly linear time"
"description" => "DEPERSIN, J. et LECUE, G. (2022). Robust sub-Gaussian estimation of a mean vector in nearly linear time. <i>Annals of Statistics</i>, 50(1), pp. 511-536."
"authors" => array:2 [
0 => array:3 [
"name" => "LECUE Guillaume"
"bid" => "B00806953"
"slug" => "lecue-guillaume"
]
1 => array:1 [
"name" => "DEPERSIN Jules"
]
]
"ouvrage" => ""
"keywords" => []
"updatedAt" => "2023-03-30 11:41:59"
"publicationUrl" => "https://projecteuclid.org/journals/annals-of-statistics/volume-50/issue-1/Robust-sub-Gaussian-estimation-of-a-mean-vector-in-nearly/10.1214/21-AOS2118.short"
"publicationInfo" => array:3 [
"pages" => "511-536"
"volume" => "50"
"number" => "1"
]
"type" => array:2 [
"fr" => "Articles"
"en" => "Journal articles"
]
"support_type" => array:2 [
"fr" => "Revue scientifique"
"en" => "Scientific journal"
]
"countries" => array:2 [
"fr" => null
"en" => null
]
"abstract" => array:2 [
"fr" => "We construct an algorithm for estimating the mean of a heavy-tailed random variable when given an adversarial corrupted sample of N independent observations. The only assumption we make on the distribution of the noncorrupted (or informative) data is the existence of a covariance matrix Σ, unknown to the statistician."
"en" => "We construct an algorithm for estimating the mean of a heavy-tailed random variable when given an adversarial corrupted sample of N independent observations. The only assumption we make on the distribution of the noncorrupted (or informative) data is the existence of a covariance matrix Σ, unknown to the statistician."
]
"authors_fields" => array:2 [
"fr" => "Systèmes d'Information, Data Analytics et Opérations"
"en" => "Information Systems, Data Analytics and Operations"
]
"indexedAt" => "2024-11-21T13:21:44.000Z"
]
+lang: "en"
+"_type": "_doc"
+"_score": 6.654894
+"parent": null
}
2 => Essec\Faculty\Model\Contribution {#2228
#_index: "academ_contributions"
#_id: "13873"
#_source: array:18 [
"id" => "13873"
"slug" => "robust-machine-learning-by-median-of-means-theory-and-practice"
"yearMonth" => "2020-04"
"year" => "2020"
"title" => "Robust machine learning by median-of-means: Theory and practice"
"description" => "LECUE, G. et LERASLE, M. (2020). Robust machine learning by median-of-means: Theory and practice. <i>Annals of Statistics</i>, 48(2)."
"authors" => array:2 [
0 => array:3 [
"name" => "LECUE Guillaume"
"bid" => "B00806953"
"slug" => "lecue-guillaume"
]
1 => array:1 [
"name" => "LERASLE Matthieu"
]
]
"ouvrage" => ""
"keywords" => []
"updatedAt" => "2023-03-17 01:00:46"
"publicationUrl" => "https://projecteuclid.org/journals/annals-of-statistics/volume-48/issue-2/Robust-machine-learning-by-median-of-means--Theory-and/10.1214/19-AOS1828.short"
"publicationInfo" => array:3 [
"pages" => ""
"volume" => "48"
"number" => "2"
]
"type" => array:2 [
"fr" => "Articles"
"en" => "Journal articles"
]
"support_type" => array:2 [
"fr" => "Revue scientifique"
"en" => "Scientific journal"
]
"countries" => array:2 [
"fr" => null
"en" => null
]
"abstract" => array:2 [
"fr" => "Median-of-means (MOM) based procedures have been recently introduced in learning theory (Lugosi and Mendelson (2019); Lecué and Lerasle (2017)). These estimators outperform classical least-squares estimators when data are heavy-tailed and/or are corrupted. None of these procedures can be implemented, which is the major issue of current MOM procedures"
"en" => "Median-of-means (MOM) based procedures have been recently introduced in learning theory (Lugosi and Mendelson (2019); Lecué and Lerasle (2017)). These estimators outperform classical least-squares estimators when data are heavy-tailed and/or are corrupted. None of these procedures can be implemented, which is the major issue of current MOM procedures"
]
"authors_fields" => array:2 [
"fr" => "Systèmes d'Information, Data Analytics et Opérations"
"en" => "Information Systems, Data Analytics and Operations"
]
"indexedAt" => "2024-11-21T13:21:44.000Z"
]
+lang: "en"
+"_type": "_doc"
+"_score": 6.654894
+"parent": null
}
3 => Essec\Faculty\Model\Contribution {#2225
#_index: "academ_contributions"
#_id: "13874"
#_source: array:18 [
"id" => "13874"
"slug" => "learning-from-moms-principles-le-cams-approach"
"yearMonth" => "2019-11"
"year" => "2019"
"title" => "Learning from MOM’s principles: Le Cam’s approach"
"description" => "LECUE, G. et LERASLE, M. (2019). Learning from MOM’s principles: Le Cam’s approach. <i>Stochastic Processes and their Applications</i>, 129(11), pp. 4385-4410."
"authors" => array:2 [
0 => array:3 [
"name" => "LECUE Guillaume"
"bid" => "B00806953"
"slug" => "lecue-guillaume"
]
1 => array:1 [
"name" => "LERASLE Matthieu"
]
]
"ouvrage" => ""
"keywords" => []
"updatedAt" => "2024-10-31 13:51:19"
"publicationUrl" => "https://doi.org/10.1016/j.spa.2018.11.024"
"publicationInfo" => array:3 [
"pages" => "4385-4410"
"volume" => "129"
"number" => "11"
]
"type" => array:2 [
"fr" => "Articles"
"en" => "Journal articles"
]
"support_type" => array:2 [
"fr" => "Revue scientifique"
"en" => "Scientific journal"
]
"countries" => array:2 [
"fr" => null
"en" => null
]
"abstract" => array:2 [
"fr" => "New robust estimators are introduced, derived from median-of-means principle and Le Cam’s aggregation of tests. Minimax sparse rates of convergence are obtained with exponential probability, under weak moment’s assumptions and possible contamination of the dataset."
"en" => "New robust estimators are introduced, derived from median-of-means principle and Le Cam’s aggregation of tests. Minimax sparse rates of convergence are obtained with exponential probability, under weak moment’s assumptions and possible contamination of the dataset."
]
"authors_fields" => array:2 [
"fr" => "Systèmes d'Information, Data Analytics et Opérations"
"en" => "Information Systems, Data Analytics and Operations"
]
"indexedAt" => "2024-11-21T13:21:44.000Z"
]
+lang: "en"
+"_type": "_doc"
+"_score": 6.654894
+"parent": null
}
4 => Essec\Faculty\Model\Contribution {#2229
#_index: "academ_contributions"
#_id: "13929"
#_source: array:18 [
"id" => "13929"
"slug" => "regularization-and-the-small-ball-method-i-sparse-recovery"
"yearMonth" => "2018-04"
"year" => "2018"
"title" => "Regularization and the small-ball method I: Sparse recovery"
"description" => "LECUE, G. et MENDELSON, S. (2018). Regularization and the small-ball method I: Sparse recovery. <i>Annals of Statistics</i>, 46(2), pp. 611-641."
"authors" => array:2 [
0 => array:3 [
"name" => "LECUE Guillaume"
"bid" => "B00806953"
"slug" => "lecue-guillaume"
]
1 => array:1 [
"name" => "MENDELSON Shahar"
]
]
"ouvrage" => ""
"keywords" => []
"updatedAt" => "2023-03-28 10:48:21"
"publicationUrl" => "https://projecteuclid.org/journals/annals-of-statistics/volume-46/issue-2/Regularization-and-the-small-ball-method-I-Sparse-recovery/10.1214/17-AOS1562.full"
"publicationInfo" => array:3 [
"pages" => "611-641"
"volume" => "46"
"number" => "2"
]
"type" => array:2 [
"fr" => "Articles"
"en" => "Journal articles"
]
"support_type" => array:2 [
"fr" => "Revue scientifique"
"en" => "Scientific journal"
]
"countries" => array:2 [
"fr" => null
"en" => null
]
"abstract" => array:2 [
"fr" => """
Our approach gives a common framework that may be used in the analysis of learning problems and regularization problems alike. In particular, it sheds some light on the role various notions of sparsity have in regularization and on their connection with the size of subdifferentials of \n
Ψ\n
in a neighborhood of the true minimizer.\n
As “proof of concept” we extend the known estimates for the LASSO, SLOPE and trace norm regularization.
"""
"en" => """
Our approach gives a common framework that may be used in the analysis of learning problems and regularization problems alike. In particular, it sheds some light on the role various notions of sparsity have in regularization and on their connection with the size of subdifferentials of \n
Ψ\n
in a neighborhood of the true minimizer.\n
As “proof of concept” we extend the known estimates for the LASSO, SLOPE and trace norm regularization.
"""
]
"authors_fields" => array:2 [
"fr" => "Systèmes d'Information, Data Analytics et Opérations"
"en" => "Information Systems, Data Analytics and Operations"
]
"indexedAt" => "2024-11-21T13:21:44.000Z"
]
+lang: "en"
+"_type": "_doc"
+"_score": 6.654894
+"parent": null
}
5 => Essec\Faculty\Model\Contribution {#2223
#_index: "academ_contributions"
#_id: "13930"
#_source: array:18 [
"id" => "13930"
"slug" => "a-mom-based-ensemble-method-for-robustness-subsampling-and-hyperparameter-tuning"
"yearMonth" => "2021-03"
"year" => "2021"
"title" => "A MOM-based ensemble method for robustness, subsampling and hyperparameter tuning"
"description" => "KWON, J., LECUE, G. et LERASLE, M. (2021). A MOM-based ensemble method for robustness, subsampling and hyperparameter tuning. <i>The Electronic Journal of Statistics</i>, 15(1), pp. 1202-1207."
"authors" => array:3 [
0 => array:3 [
"name" => "LECUE Guillaume"
"bid" => "B00806953"
"slug" => "lecue-guillaume"
]
1 => array:1 [
"name" => "KWON Joon"
]
2 => array:1 [
"name" => "LERASLE Matthieu"
]
]
"ouvrage" => ""
"keywords" => array:2 [
0 => "heavy-tailed"
1 => "robustness"
]
"updatedAt" => "2024-10-31 13:51:19"
"publicationUrl" => "https://doi.org/10.1214/21-EJS1814"
"publicationInfo" => array:3 [
"pages" => "1202-1207"
"volume" => "15"
"number" => "1"
]
"type" => array:2 [
"fr" => "Articles"
"en" => "Journal articles"
]
"support_type" => array:2 [
"fr" => "Revue scientifique"
"en" => "Scientific journal"
]
"countries" => array:2 [
"fr" => null
"en" => null
]
"abstract" => array:2 [
"fr" => "Hyperparameter tuning and model selection are important steps in machine learning. Unfortunately, classical hyperparameter calibration and model selection procedures are sensitive to outliers and heavy-tailed data. In this work, we construct a selection procedure which can be seen as a robust alternative to cross-validation and is based on a median-of-means principle. Using this procedure, we also build an ensemble method which, trained with algorithms and corrupted heavy-tailed data, selects an algorithm, trains it with a large uncorrupted subsample and automatically tunes its hyperparameters. In particular, the approach can transform any procedure into a robust to outliers and to heavy-tailed data procedure while tuning automatically its hyperparameters."
"en" => "Hyperparameter tuning and model selection are important steps in machine learning. Unfortunately, classical hyperparameter calibration and model selection procedures are sensitive to outliers and heavy-tailed data. In this work, we construct a selection procedure which can be seen as a robust alternative to cross-validation and is based on a median-of-means principle. Using this procedure, we also build an ensemble method which, trained with algorithms and corrupted heavy-tailed data, selects an algorithm, trains it with a large uncorrupted subsample and automatically tunes its hyperparameters. In particular, the approach can transform any procedure into a robust to outliers and to heavy-tailed data procedure while tuning automatically its hyperparameters."
]
"authors_fields" => array:2 [
"fr" => "Systèmes d'Information, Data Analytics et Opérations"
"en" => "Information Systems, Data Analytics and Operations"
]
"indexedAt" => "2024-11-21T13:21:44.000Z"
]
+lang: "en"
+"_type": "_doc"
+"_score": 6.654894
+"parent": null
}
6 => Essec\Faculty\Model\Contribution {#2227
#_index: "academ_contributions"
#_id: "13928"
#_source: array:18 [
"id" => "13928"
"slug" => "slope-meets-lasso-improved-oracle-bounds-and-optimality"
"yearMonth" => "2018-12"
"year" => "2018"
"title" => "Slope Meets Lasso: Improved Oracle Bounds and Optimality"
"description" => "BELLEC, P.C., LECUE, G. et TSYBAKOV, A.B. (2018). Slope Meets Lasso: Improved Oracle Bounds and Optimality. <i>Annals of Statistics</i>, 46(6B), pp. 3603-3642."
"authors" => array:3 [
0 => array:3 [
"name" => "LECUE Guillaume"
"bid" => "B00806953"
"slug" => "lecue-guillaume"
]
1 => array:1 [
"name" => "BELLEC Pierre C."
]
2 => array:1 [
"name" => "TSYBAKOV Alexandre B."
]
]
"ouvrage" => ""
"keywords" => []
"updatedAt" => "2023-03-28 10:48:10"
"publicationUrl" => "https://www.jstor.org/stable/26542913"
"publicationInfo" => array:3 [
"pages" => "3603-3642"
"volume" => "46"
"number" => "6B"
]
"type" => array:2 [
"fr" => "Articles"
"en" => "Journal articles"
]
"support_type" => array:2 [
"fr" => "Revue scientifique"
"en" => "Scientific journal"
]
"countries" => array:2 [
"fr" => null
"en" => null
]
"abstract" => array:2 [
"fr" => "We show that two polynomial time methods, a Lasso estimator with adaptively chosen tuning parameter and a Slope estimator, adaptively achieve the exact minimax prediction and ℓ2 estimation rate (s/n)log(p/s) in high-dimensional linear regression on the class of s-sparse target vectors in Rp. This is done under the Restricted Eigenvalue (RE) condition for the Lasso and under a slightly more constraining assumption on the design for the Slope. The main results have the form of sharp oracle inequalities accounting for the model misspecification error. The minimax optimal bounds are also obtained for the ℓq estimation errors with 1≤q≤2 when the model is well-specified. The results are non-asymptotic, and hold both in probability and in expectation. The assumptions that we impose on the design are satisfied with high probability for a large class of random matrices with independent and possibly anisotropically distributed rows. We give a comparative analysis of conditions, under which oracle bounds for the Lasso and Slope estimators can be obtained. In particular, we show that several known conditions, such as the RE condition and the sparse eigenvalue condition are equivalent if the ℓ2-norms of regressors are uniformly bounded."
"en" => "We show that two polynomial time methods, a Lasso estimator with adaptively chosen tuning parameter and a Slope estimator, adaptively achieve the exact minimax prediction and ℓ2 estimation rate (s/n)log(p/s) in high-dimensional linear regression on the class of s-sparse target vectors in Rp. This is done under the Restricted Eigenvalue (RE) condition for the Lasso and under a slightly more constraining assumption on the design for the Slope. The main results have the form of sharp oracle inequalities accounting for the model misspecification error. The minimax optimal bounds are also obtained for the ℓq estimation errors with 1≤q≤2 when the model is well-specified. The results are non-asymptotic, and hold both in probability and in expectation. The assumptions that we impose on the design are satisfied with high probability for a large class of random matrices with independent and possibly anisotropically distributed rows. We give a comparative analysis of conditions, under which oracle bounds for the Lasso and Slope estimators can be obtained. In particular, we show that several known conditions, such as the RE condition and the sparse eigenvalue condition are equivalent if the ℓ2-norms of regressors are uniformly bounded."
]
"authors_fields" => array:2 [
"fr" => "Systèmes d'Information, Data Analytics et Opérations"
"en" => "Information Systems, Data Analytics and Operations"
]
"indexedAt" => "2024-11-21T13:21:44.000Z"
]
+lang: "en"
+"_type": "_doc"
+"_score": 6.654894
+"parent": null
}
7 => Essec\Faculty\Model\Contribution {#2230
#_index: "academ_contributions"
#_id: "13931"
#_source: array:18 [
"id" => "13931"
"slug" => "optimal-robust-mean-and-location-estimation-via-convex-programs-with-respect-to-any-pseudo-norms"
"yearMonth" => "2022-08"
"year" => "2022"
"title" => "Optimal robust mean and location estimation via convex programs with respect to any pseudo-norms"
"description" => "DEPERSIN, J. et LECUE, G. (2022). Optimal robust mean and location estimation via convex programs with respect to any pseudo-norms. <i>Probability Theory and Related Fields</i>, 183(3-4), pp. 997-1025."
"authors" => array:2 [
0 => array:3 [
"name" => "LECUE Guillaume"
"bid" => "B00806953"
"slug" => "lecue-guillaume"
]
1 => array:1 [
"name" => "DEPERSIN Jules"
]
]
"ouvrage" => ""
"keywords" => []
"updatedAt" => "2023-03-29 01:00:41"
"publicationUrl" => "https://link.springer.com/article/10.1007/s00440-022-01127-y"
"publicationInfo" => array:3 [
"pages" => "997-1025"
"volume" => "183"
"number" => "3-4"
]
"type" => array:2 [
"fr" => "Articles"
"en" => "Journal articles"
]
"support_type" => array:2 [
"fr" => "Revue scientifique"
"en" => "Scientific journal"
]
"countries" => array:2 [
"fr" => null
"en" => null
]
"abstract" => array:2 [
"fr" => ""
"en" => ""
]
"authors_fields" => array:2 [
"fr" => "Systèmes d'Information, Data Analytics et Opérations"
"en" => "Information Systems, Data Analytics and Operations"
]
"indexedAt" => "2024-11-21T13:21:44.000Z"
]
+lang: "en"
+"_type": "_doc"
+"_score": 6.654894
+"parent": null
}
8 => Essec\Faculty\Model\Contribution {#2231
#_index: "academ_contributions"
#_id: "13932"
#_source: array:18 [
"id" => "13932"
"slug" => "learning-with-semi-definite-programming-statistical-bounds-based-on-fixed-point-analysis-and-excess-risk-curvature"
"yearMonth" => "2021-09"
"year" => "2021"
"title" => "Learning with semi-definite programming: statistical bounds based on fixed point analysis and excess risk curvature"
"description" => "CHRÉTIEN, S., CUCURINGU, M., LECUE, G. et NEIRAC, L. (2021). Learning with semi-definite programming: statistical bounds based on fixed point analysis and excess risk curvature. <i>Journal of Machine Learning Research</i>, 22(230), pp. 1-64."
"authors" => array:4 [
0 => array:3 [
"name" => "LECUE Guillaume"
"bid" => "B00806953"
"slug" => "lecue-guillaume"
]
1 => array:1 [
"name" => "CHRÉTIEN Stéphane"
]
2 => array:1 [
"name" => "CUCURINGU Mihai"
]
3 => array:1 [
"name" => "NEIRAC Lucie"
]
]
"ouvrage" => ""
"keywords" => array:4 [
0 => "Semi-Definite Programming"
1 => "Statistical Learning"
2 => "Group Synchronization"
3 => "Signed Clustering"
]
"updatedAt" => "2023-04-18 14:32:57"
"publicationUrl" => "http://jmlr.org/papers/v22/21-0021.html"
"publicationInfo" => array:3 [
"pages" => "1-64"
"volume" => "22"
"number" => "230"
]
"type" => array:2 [
"fr" => "Articles"
"en" => "Journal articles"
]
"support_type" => array:2 [
"fr" => "Revue scientifique"
"en" => "Scientific journal"
]
"countries" => array:2 [
"fr" => "États-Unis"
"en" => "United States of America"
]
"abstract" => array:2 [
"fr" => "Many statistical learning problems have recently been shown to be amenable to Semi-Definite Programming (SDP), with community detection and clustering in Gaussian mixture models as the most striking instances Javanmard et al. (2016). Given the growing range of applications of SDP-based techniques to machine learning problems, and the rapid progress in the design of efficient algorithms for solving SDPs, an intriguing question is to understand how the recent advances from empirical process theory and Statistical Learning Theory can be leveraged for providing a precise statistical analysis of SDP estimators. In the present paper, we borrow cutting edge techniques and concepts from the Learning Theory literature, such as fixed point equations and excess risk curvature arguments, which yield general estimation and prediction results for a wide class of SDP estimators. From this perspective, we revisit some classical results in community detection from Guédon and Vershynin (2016) and Fei and Chen (2019), and we obtain statistical guarantees for SDP estimators used in signed clustering, angular group synchronization (for both multiplicative and additive models) and MAX-CUT. Our theoretical findings are complemented by numerical experiments for each of the three problems considered, showcasing the competitiveness of the SDP estimators."
"en" => "Many statistical learning problems have recently been shown to be amenable to Semi-Definite Programming (SDP), with community detection and clustering in Gaussian mixture models as the most striking instances Javanmard et al. (2016). Given the growing range of applications of SDP-based techniques to machine learning problems, and the rapid progress in the design of efficient algorithms for solving SDPs, an intriguing question is to understand how the recent advances from empirical process theory and Statistical Learning Theory can be leveraged for providing a precise statistical analysis of SDP estimators. In the present paper, we borrow cutting edge techniques and concepts from the Learning Theory literature, such as fixed point equations and excess risk curvature arguments, which yield general estimation and prediction results for a wide class of SDP estimators. From this perspective, we revisit some classical results in community detection from Guédon and Vershynin (2016) and Fei and Chen (2019), and we obtain statistical guarantees for SDP estimators used in signed clustering, angular group synchronization (for both multiplicative and additive models) and MAX-CUT. Our theoretical findings are complemented by numerical experiments for each of the three problems considered, showcasing the competitiveness of the SDP estimators."
]
"authors_fields" => array:2 [
"fr" => "Systèmes d'Information, Data Analytics et Opérations"
"en" => "Information Systems, Data Analytics and Operations"
]
"indexedAt" => "2024-11-21T13:21:44.000Z"
]
+lang: "en"
+"_type": "_doc"
+"_score": 6.654894
+"parent": null
}
9 => Essec\Faculty\Model\Contribution {#2232
#_index: "academ_contributions"
#_id: "13934"
#_source: array:18 [
"id" => "13934"
"slug" => "robust-high-dimensional-learning-for-lipschitz-and-convex-losses"
"yearMonth" => "2021-11"
"year" => "2021"
"title" => "Robust high dimensional learning for Lipschitz and convex losses"
"description" => "CHINOT, G., LECUE, G. et LERASLE, M. (2021). Robust high dimensional learning for Lipschitz and convex losses. <i>Journal of Machine Learning Research</i>, (233), pp. 1-47."
"authors" => array:3 [
0 => array:3 [
"name" => "LECUE Guillaume"
"bid" => "B00806953"
"slug" => "lecue-guillaume"
]
1 => array:1 [
"name" => "CHINOT Geoffrey"
]
2 => array:1 [
"name" => "LERASLE Matthieu"
]
]
"ouvrage" => ""
"keywords" => array:8 [
0 => "Robust Learning"
1 => "Lipschtiz and convex loss functions"
2 => "sparsity bounds"
3 => "Rademacher complexity bounds"
4 => "LASSO"
5 => "SLOPE"
6 => "Group LASSO"
7 => "Total Variation"
]
"updatedAt" => "2023-04-18 14:34:16"
"publicationUrl" => "http://jmlr.org/papers/v21/19-585.html"
"publicationInfo" => array:3 [
"pages" => "1-47"
"volume" => ""
"number" => "233"
]
"type" => array:2 [
"fr" => "Articles"
"en" => "Journal articles"
]
"support_type" => array:2 [
"fr" => "Revue scientifique"
"en" => "Scientific journal"
]
"countries" => array:2 [
"fr" => "États-Unis"
"en" => "United States of America"
]
"abstract" => array:2 [
"fr" => "We establish risk bounds for Regularized Empirical Risk Minimizers (RERM) when the loss is Lipschitz and convex and the regularization function is a norm. In a first part, we obtain these results in the i.i.d. setup under subgaussian assumptions on the design. In a second part, a more general framework where the design might have heavier tails and data may be corrupted by outliers both in the design and the response variables is considered. In this situation, RERM performs poorly in general. We analyse an alternative procedure based on median-of-means principles and called “minmax MOM”. We show optimal subgaussian deviation rates for these estimators in the relaxed setting. The main results are meta-theorems allowing a wide-range of applications to various problems in learning theory. To show a non-exhaustive sample of these potential applications, it is applied to classification problems with logistic loss functions regularized by LASSO and SLOPE, to regression problems with Huber loss regularized by Group LASSO and Total Variation. Another advantage of the minmax MOM formulation is that it suggests a systematic way to slightly modify descent based algorithms used in high-dimensional statistics to make them robust to outliers. We illustrate this principle in a Simulations section where a “ minmax MOM” version of classical proximal descent algorithms are turned into robust to outliers algorithms."
"en" => "We establish risk bounds for Regularized Empirical Risk Minimizers (RERM) when the loss is Lipschitz and convex and the regularization function is a norm. In a first part, we obtain these results in the i.i.d. setup under subgaussian assumptions on the design. In a second part, a more general framework where the design might have heavier tails and data may be corrupted by outliers both in the design and the response variables is considered. In this situation, RERM performs poorly in general. We analyse an alternative procedure based on median-of-means principles and called “minmax MOM”. We show optimal subgaussian deviation rates for these estimators in the relaxed setting. The main results are meta-theorems allowing a wide-range of applications to various problems in learning theory. To show a non-exhaustive sample of these potential applications, it is applied to classification problems with logistic loss functions regularized by LASSO and SLOPE, to regression problems with Huber loss regularized by Group LASSO and Total Variation. Another advantage of the minmax MOM formulation is that it suggests a systematic way to slightly modify descent based algorithms used in high-dimensional statistics to make them robust to outliers. We illustrate this principle in a Simulations section where a “ minmax MOM” version of classical proximal descent algorithms are turned into robust to outliers algorithms."
]
"authors_fields" => array:2 [
"fr" => "Systèmes d'Information, Data Analytics et Opérations"
"en" => "Information Systems, Data Analytics and Operations"
]
"indexedAt" => "2024-11-21T13:21:44.000Z"
]
+lang: "en"
+"_type": "_doc"
+"_score": 6.654894
+"parent": null
}
10 => Essec\Faculty\Model\Contribution {#2233
#_index: "academ_contributions"
#_id: "14049"
#_source: array:18 [
"id" => "14049"
"slug" => "on-the-robustness-to-adversarial-corruption-and-to-heavy-tailed-data-of-the-stahel-donoho-median-of-means"
"yearMonth" => "2023-06"
"year" => "2023"
"title" => "On the robustness to adversarial corruption and to heavy-tailed data of the Stahel–Donoho median of means"
"description" => "DEPERSIN, J. et LECUE, G. (2023). On the robustness to adversarial corruption and to heavy-tailed data of the Stahel–Donoho median of means. <i>Information and Inference: A Journal of the IMA</i>, 12(2), pp. 814-850."
"authors" => array:2 [
0 => array:3 [
"name" => "LECUE Guillaume"
"bid" => "B00806953"
"slug" => "lecue-guillaume"
]
1 => array:1 [
"name" => "DEPERSIN Jules"
]
]
"ouvrage" => ""
"keywords" => []
"updatedAt" => "2023-06-13 17:02:28"
"publicationUrl" => "https://doi.org/10.1093/imaiai/iaac026"
"publicationInfo" => array:3 [
"pages" => "814-850"
"volume" => "12"
"number" => "2"
]
"type" => array:2 [
"fr" => "Articles"
"en" => "Journal articles"
]
"support_type" => array:2 [
"fr" => "Revue scientifique"
"en" => "Scientific journal"
]
"countries" => array:2 [
"fr" => null
"en" => null
]
"abstract" => array:2 [
"fr" => """
We consider median of means (MOM) versions of the Stahel–Donoho outlyingness (SDO) [ 23, 66] and of the Median Absolute Deviation (MAD) [ 30] functions to construct subgaussian estimators of a mean vector under adversarial contamination and heavy-tailed data. We develop a single analysis of the MOM version of the SDO which covers all cases ranging from the Gaussian case to the L2\n
case. It is based on isomorphic and almost isometric properties of the MOM versions of SDO and MAD. This analysis also covers cases where the mean does not even exist but a location parameter does; in those cases we still recover the same subgaussian rates and the same price for adversarial contamination even though there is not even a first moment.
"""
"en" => """
We consider median of means (MOM) versions of the Stahel–Donoho outlyingness (SDO) [ 23, 66] and of the Median Absolute Deviation (MAD) [ 30] functions to construct subgaussian estimators of a mean vector under adversarial contamination and heavy-tailed data. We develop a single analysis of the MOM version of the SDO which covers all cases ranging from the Gaussian case to the L2\n
case. It is based on isomorphic and almost isometric properties of the MOM versions of SDO and MAD. This analysis also covers cases where the mean does not even exist but a location parameter does; in those cases we still recover the same subgaussian rates and the same price for adversarial contamination even though there is not even a first moment.
"""
]
"authors_fields" => array:2 [
"fr" => "Systèmes d'Information, Data Analytics et Opérations"
"en" => "Information Systems, Data Analytics and Operations"
]
"indexedAt" => "2024-11-21T13:21:44.000Z"
]
+lang: "en"
+"_type": "_doc"
+"_score": 6.654894
+"parent": null
}
11 => Essec\Faculty\Model\Contribution {#2234
#_index: "academ_contributions"
#_id: "14362"
#_source: array:18 [
"id" => "14362"
"slug" => "a-geometrical-viewpoint-on-the-benign-overfitting-property-of-the-minimum-l2-norm-interpolant-estimator"
"yearMonth" => "2023-09"
"year" => "2023"
"title" => "A Geometrical Viewpoint on the Benign Overfitting Property of the Minimum l2-norm Interpolant Estimator."
"description" => "LECUE, G. et SHANG, Z. (2023). A Geometrical Viewpoint on the Benign Overfitting Property of the Minimum l2-norm Interpolant Estimator. Dans: 2023 Mini-Workshop: Interpolation and Over-parameterization in Statistics and Machine Learning. Oberwolfach."
"authors" => array:2 [
0 => array:3 [
"name" => "LECUE Guillaume"
"bid" => "B00806953"
"slug" => "lecue-guillaume"
]
1 => array:1 [
"name" => "SHANG Zong"
]
]
"ouvrage" => "2023 Mini-Workshop: Interpolation and Over-parameterization in Statistics and Machine Learning"
"keywords" => []
"updatedAt" => "2024-03-20 16:06:01"
"publicationUrl" => null
"publicationInfo" => array:3 [
"pages" => ""
"volume" => ""
"number" => ""
]
"type" => array:2 [
"fr" => "Communications dans une conférence"
"en" => "Presentations at an Academic or Professional conference"
]
"support_type" => array:2 [
"fr" => null
"en" => null
]
"countries" => array:2 [
"fr" => null
"en" => null
]
"abstract" => array:2 [
"fr" => ""
"en" => ""
]
"authors_fields" => array:2 [
"fr" => "Systèmes d'Information, Data Analytics et Opérations"
"en" => "Information Systems, Data Analytics and Operations"
]
"indexedAt" => "2024-11-21T13:21:44.000Z"
]
+lang: "en"
+"_type": "_doc"
+"_score": 6.654894
+"parent": null
}
12 => Essec\Faculty\Model\Contribution {#2235
#_index: "academ_contributions"
#_id: "14572"
#_source: array:18 [
"id" => "14572"
"slug" => "learning-with-a-linear-loss-function-excess-risk-and-estimation-bounds-for-erm-minmax-mom-and-their-regularized-versions-applications-to-robustness-in-sparse-pca"
"yearMonth" => "2023-12"
"year" => "2023"
"title" => "Learning with a linear loss function. Excess risk and estimation bounds for ERM, minmax MOM and their regularized versions. Applications to robustness in sparse PCA."
"description" => "LECUE, G. et NEIRAC, L. (2023). Learning with a linear loss function. Excess risk and estimation bounds for ERM, minmax MOM and their regularized versions. Applications to robustness in sparse PCA. Dans: 2024 Meeting in Mathematical Statistics Conference, CIRM. Marseille."
"authors" => array:2 [
0 => array:3 [
"name" => "LECUE Guillaume"
"bid" => "B00806953"
"slug" => "lecue-guillaume"
]
1 => array:1 [
"name" => "NEIRAC L"
]
]
"ouvrage" => "2024 Meeting in Mathematical Statistics Conference, CIRM"
"keywords" => []
"updatedAt" => "2023-10-27 01:01:26"
"publicationUrl" => null
"publicationInfo" => array:3 [
"pages" => ""
"volume" => ""
"number" => ""
]
"type" => array:2 [
"fr" => "Communications dans une conférence"
"en" => "Presentations at an Academic or Professional conference"
]
"support_type" => array:2 [
"fr" => null
"en" => null
]
"countries" => array:2 [
"fr" => null
"en" => null
]
"abstract" => array:2 [
"fr" => ""
"en" => ""
]
"authors_fields" => array:2 [
"fr" => "Systèmes d'Information, Data Analytics et Opérations"
"en" => "Information Systems, Data Analytics and Operations"
]
"indexedAt" => "2024-11-21T13:21:44.000Z"
]
+lang: "en"
+"_type": "_doc"
+"_score": 6.654894
+"parent": null
}
]
"avatar" => "https://faculty.essec.edu/wp-content/uploads/avatars/B00806953.jpg"
"contributionCounts" => 13
"personalLinks" => array:2 [
0 => "<a href="https://orcid.org/0000-0002-6391-8746" target="_blank">ORCID</a>"
1 => "<a href="https://scholar.google.com/citations?user=kng-DfIAAAAJ&hl=fr" target="_blank">Google scholar</a>"
]
"docTitle" => "Guillaume LECUE"
"docSubtitle" => "Professor"
"docDescription" => "Department: Information Systems, Data Analytics and Operations<br>Campus de Cergy"
"docType" => "cv"
"docPreview" => "<img src="https://faculty.essec.edu/wp-content/uploads/avatars/B00806953.jpg"><span><span>Guillaume LECUE</span><span>B00806953</span></span>"
"academ_cv_info" => ""
]
#_index: "academ_cv"
+lang: "en"
+"_type": "_doc"
+"_score": 5.0369525
+"parent": null
}