Essec\Faculty\Model\Contribution {#2233
#_index: "academ_contributions"
#_id: "13934"
#_source: array:26 [
"id" => "13934"
"slug" => "robust-high-dimensional-learning-for-lipschitz-and-convex-losses"
"yearMonth" => "2021-11"
"year" => "2021"
"title" => "Robust high dimensional learning for Lipschitz and convex losses"
"description" => "CHINOT, G., LECUE, G. et LERASLE, M. (2021). Robust high dimensional learning for Lipschitz and convex losses. <i>Journal of Machine Learning Research</i>, (233), pp. 1-47."
"authors" => array:3 [
0 => array:3 [
"name" => "LECUE Guillaume"
"bid" => "B00806953"
"slug" => "lecue-guillaume"
]
1 => array:1 [
"name" => "CHINOT Geoffrey"
]
2 => array:1 [
"name" => "LERASLE Matthieu"
]
]
"ouvrage" => ""
"keywords" => array:8 [
0 => "Robust Learning"
1 => "Lipschtiz and convex loss functions"
2 => "sparsity bounds"
3 => "Rademacher complexity bounds"
4 => "LASSO"
5 => "SLOPE"
6 => "Group LASSO"
7 => "Total Variation"
]
"updatedAt" => "2023-04-18 14:34:16"
"publicationUrl" => "http://jmlr.org/papers/v21/19-585.html"
"publicationInfo" => array:3 [
"pages" => "1-47"
"volume" => ""
"number" => "233"
]
"type" => array:2 [
"fr" => "Articles"
"en" => "Journal articles"
]
"support_type" => array:2 [
"fr" => "Revue scientifique"
"en" => "Scientific journal"
]
"countries" => array:2 [
"fr" => "États-Unis"
"en" => "United States of America"
]
"abstract" => array:2 [
"fr" => "We establish risk bounds for Regularized Empirical Risk Minimizers (RERM) when the loss is Lipschitz and convex and the regularization function is a norm. In a first part, we obtain these results in the i.i.d. setup under subgaussian assumptions on the design. In a second part, a more general framework where the design might have heavier tails and data may be corrupted by outliers both in the design and the response variables is considered. In this situation, RERM performs poorly in general. We analyse an alternative procedure based on median-of-means principles and called “minmax MOM”. We show optimal subgaussian deviation rates for these estimators in the relaxed setting. The main results are meta-theorems allowing a wide-range of applications to various problems in learning theory. To show a non-exhaustive sample of these potential applications, it is applied to classification problems with logistic loss functions regularized by LASSO and SLOPE, to regression problems with Huber loss regularized by Group LASSO and Total Variation. Another advantage of the minmax MOM formulation is that it suggests a systematic way to slightly modify descent based algorithms used in high-dimensional statistics to make them robust to outliers. We illustrate this principle in a Simulations section where a “ minmax MOM” version of classical proximal descent algorithms are turned into robust to outliers algorithms."
"en" => "We establish risk bounds for Regularized Empirical Risk Minimizers (RERM) when the loss is Lipschitz and convex and the regularization function is a norm. In a first part, we obtain these results in the i.i.d. setup under subgaussian assumptions on the design. In a second part, a more general framework where the design might have heavier tails and data may be corrupted by outliers both in the design and the response variables is considered. In this situation, RERM performs poorly in general. We analyse an alternative procedure based on median-of-means principles and called “minmax MOM”. We show optimal subgaussian deviation rates for these estimators in the relaxed setting. The main results are meta-theorems allowing a wide-range of applications to various problems in learning theory. To show a non-exhaustive sample of these potential applications, it is applied to classification problems with logistic loss functions regularized by LASSO and SLOPE, to regression problems with Huber loss regularized by Group LASSO and Total Variation. Another advantage of the minmax MOM formulation is that it suggests a systematic way to slightly modify descent based algorithms used in high-dimensional statistics to make them robust to outliers. We illustrate this principle in a Simulations section where a “ minmax MOM” version of classical proximal descent algorithms are turned into robust to outliers algorithms."
]
"authors_fields" => array:2 [
"fr" => "Systèmes d'Information, Data Analytics et Opérations"
"en" => "Information Systems, Data Analytics and Operations"
]
"indexedAt" => "2024-12-22T08:21:46.000Z"
"docTitle" => "Robust high dimensional learning for Lipschitz and convex losses"
"docSurtitle" => "Articles"
"authorNames" => "<a href="/cv/lecue-guillaume">LECUE Guillaume</a>, CHINOT Geoffrey, LERASLE Matthieu"
"docDescription" => "<span class="document-property-authors">LECUE Guillaume, CHINOT Geoffrey, LERASLE Matthieu</span><br><span class="document-property-authors_fields">Systèmes d'Information, Data Analytics et Opérations</span> | <span class="document-property-year">2021</span>"
"keywordList" => "<a href="#">Robust Learning</a>, <a href="#">Lipschtiz and convex loss functions</a>, <a href="#">sparsity bounds</a>, <a href="#">Rademacher complexity bounds</a>, <a href="#">LASSO</a>, <a href="#">SLOPE</a>, <a href="#">Group LASSO</a>, <a href="#">Total Variation</a>"
"docPreview" => "<b>Robust high dimensional learning for Lipschitz and convex losses</b><br><span>2021-11 | Articles </span>"
"docType" => "research"
"publicationLink" => "<a href="http://jmlr.org/papers/v21/19-585.html" target="_blank">Robust high dimensional learning for Lipschitz and convex losses</a>"
]
+lang: "fr"
+"_type": "_doc"
+"_score": 8.725183
+"parent": null
}