Essec\Faculty\Model\Profile {#2216
#_id: "B00809923"
#_source: array:40 [
"bid" => "B00809923"
"academId" => "32281"
"slug" => "alquier-pierre"
"fullName" => "Pierre ALQUIER"
"lastName" => "ALQUIER"
"firstName" => "Pierre"
"title" => array:2 [
"fr" => "Professeur"
"en" => "Professor"
]
"email" => "b00809923@essec.edu"
"status" => "ACTIF"
"campus" => "Campus de Singapour"
"departments" => []
"phone" => ""
"sites" => []
"facNumber" => "32281"
"externalCvUrl" => "https://faculty.essec.edu/en/cv/alquier-pierre/pdf"
"googleScholarUrl" => "https://scholar.google.com/citations?user=ngkCAJYAAAAJ"
"facOrcId" => "https://orcid.org/0000-0003-4249-7337"
"career" => array:6 [
0 => Essec\Faculty\Model\CareerItem {#2255
#_index: null
#_id: null
#_source: array:7 [
"startDate" => "2023-01-02"
"endDate" => null
"isInternalPosition" => true
"type" => array:2 [
"fr" => "Positions académiques principales"
"en" => "Full-time academic appointments"
]
"label" => array:2 [
"fr" => "Professeur"
"en" => "Professor"
]
"institution" => array:2 [
"fr" => "ESSEC Business School"
"en" => "ESSEC Business School"
]
"country" => array:2 [
"fr" => "Singapour"
"en" => "Singapore"
]
]
+lang: "en"
+"parent": Essec\Faculty\Model\Profile {#2216}
}
1 => Essec\Faculty\Model\CareerItem {#2256
#_index: null
#_id: null
#_source: array:7 [
"startDate" => "2019-08-01"
"endDate" => "2022-12-31"
"isInternalPosition" => true
"type" => array:2 [
"en" => "Other appointments"
"fr" => "Autres positions"
]
"label" => array:2 [
"fr" => "Chargé de recherches"
"en" => "Research scientist"
]
"institution" => array:2 [
"fr" => "RIKEN"
"en" => "RIKEN"
]
"country" => array:2 [
"fr" => "Japon"
"en" => "Japan"
]
]
+lang: "en"
+"parent": Essec\Faculty\Model\Profile {#2216}
}
2 => Essec\Faculty\Model\CareerItem {#2257
#_index: null
#_id: null
#_source: array:7 [
"startDate" => "2014-09-15"
"endDate" => "2019-07-31"
"isInternalPosition" => true
"type" => array:2 [
"en" => "Other Academic Appointments"
"fr" => "Autres positions académiques"
]
"label" => array:2 [
"fr" => "Professeur"
"en" => "Professor"
]
"institution" => array:2 [
"fr" => "L'École nationale de la statistique et de l'administration économique (ENSAE)"
"en" => "L'École nationale de la statistique et de l'administration économique (ENSAE)"
]
"country" => array:2 [
"fr" => "France"
"en" => "France"
]
]
+lang: "en"
+"parent": Essec\Faculty\Model\Profile {#2216}
}
3 => Essec\Faculty\Model\CareerItem {#2258
#_index: null
#_id: null
#_source: array:7 [
"startDate" => "2012-09-01"
"endDate" => "2014-08-31"
"isInternalPosition" => true
"type" => array:2 [
"en" => "Other Academic Appointments"
"fr" => "Autres positions académiques"
]
"label" => array:2 [
"fr" => "Lecturer"
"en" => "Lecturer"
]
"institution" => array:2 [
"fr" => "University College of Dublin"
"en" => "University College of Dublin"
]
"country" => array:2 [
"fr" => "Irlande"
"en" => "Ireland"
]
]
+lang: "en"
+"parent": Essec\Faculty\Model\Profile {#2216}
}
4 => Essec\Faculty\Model\CareerItem {#2259
#_index: null
#_id: null
#_source: array:7 [
"startDate" => "2007-09-01"
"endDate" => "2012-08-31"
"isInternalPosition" => true
"type" => array:2 [
"en" => "Other Academic Appointments"
"fr" => "Autres positions académiques"
]
"label" => array:2 [
"fr" => "Maître de Conférences"
"en" => "Assistant Professor"
]
"institution" => array:2 [
"fr" => "Université Paris Diderot (Paris VII)"
"en" => "Université Paris Diderot (Paris VII)"
]
"country" => array:2 [
"fr" => "France"
"en" => "France"
]
]
+lang: "en"
+"parent": Essec\Faculty\Model\Profile {#2216}
}
5 => Essec\Faculty\Model\CareerItem {#2260
#_index: null
#_id: null
#_source: array:7 [
"startDate" => "2006-09-01"
"endDate" => "2007-08-31"
"isInternalPosition" => true
"type" => array:2 [
"en" => "Other Academic Appointments"
"fr" => "Autres positions académiques"
]
"label" => array:2 [
"fr" => "A.T.E.R."
"en" => "Teaching and Research Assistant"
]
"institution" => array:2 [
"fr" => "Université Paris-Dauphine, PSL"
"en" => "Université Paris-Dauphine, PSL"
]
"country" => array:2 [
"fr" => "France"
"en" => "France"
]
]
+lang: "en"
+"parent": Essec\Faculty\Model\Profile {#2216}
}
]
"diplomes" => array:4 [
0 => Essec\Faculty\Model\Diplome {#2218
#_index: null
#_id: null
#_source: array:6 [
"diplome" => "DIPLOMA"
"type" => array:2 [
"fr" => "Diplômes"
"en" => "Diplomas"
]
"year" => "2013"
"label" => array:2 [
"en" => "Habilitation à diriger des recherches"
"fr" => "Habilitation à diriger des recherches"
]
"institution" => array:2 [
"fr" => "Université Pierre et Marie Curie (UPMC)"
"en" => "Université Pierre et Marie Curie (UPMC)"
]
"country" => array:2 [
"fr" => "France"
"en" => "France"
]
]
+lang: "en"
+"parent": Essec\Faculty\Model\Profile {#2216}
}
1 => Essec\Faculty\Model\Diplome {#2220
#_index: null
#_id: null
#_source: array:6 [
"diplome" => "DIPLOMA"
"type" => array:2 [
"fr" => "Diplômes"
"en" => "Diplomas"
]
"year" => "2006"
"label" => array:2 [
"en" => "PhD (mathematical statistics)"
"fr" => "PhD (mathematical statistics)"
]
"institution" => array:2 [
"fr" => "Université Pierre et Marie Curie (UPMC)"
"en" => "Université Pierre et Marie Curie (UPMC)"
]
"country" => array:2 [
"fr" => "France"
"en" => "France"
]
]
+lang: "en"
+"parent": Essec\Faculty\Model\Profile {#2216}
}
2 => Essec\Faculty\Model\Diplome {#2217
#_index: null
#_id: null
#_source: array:6 [
"diplome" => "DIPLOMA"
"type" => array:2 [
"fr" => "Diplômes"
"en" => "Diplomas"
]
"year" => "2003"
"label" => array:2 [
"en" => "MSc in Probability Theory and Statistics"
"fr" => "MSc in Probability Theory and Statistics"
]
"institution" => array:2 [
"fr" => "Université Pierre et Marie Curie (UPMC)"
"en" => "Université Pierre et Marie Curie (UPMC)"
]
"country" => array:2 [
"fr" => "France"
"en" => "France"
]
]
+lang: "en"
+"parent": Essec\Faculty\Model\Profile {#2216}
}
3 => Essec\Faculty\Model\Diplome {#2221
#_index: null
#_id: null
#_source: array:6 [
"diplome" => "DIPLOMA"
"type" => array:2 [
"fr" => "Diplômes"
"en" => "Diplomas"
]
"year" => "2003"
"label" => array:2 [
"en" => "Diplôme de statisticien-économiste"
"fr" => "Diplôme de statisticien-économiste"
]
"institution" => array:2 [
"fr" => "L'École nationale de la statistique et de l'administration économique (ENSAE)"
"en" => "L'École nationale de la statistique et de l'administration économique (ENSAE)"
]
"country" => array:2 [
"fr" => "France"
"en" => "France"
]
]
+lang: "en"
+"parent": Essec\Faculty\Model\Profile {#2216}
}
]
"bio" => array:2 [
"fr" => null
"en" => "<p>Professor (Full), ESSEC Business School</p>"
]
"department" => array:2 [
"fr" => "Systèmes d'Information, Data Analytics et Opérations"
"en" => "Information Systems, Data Analytics and Operations"
]
"site" => array:2 [
"fr" => "https://pierrealquier.github.io/"
"en" => "http://www.linkedin.com/in/pierre-alquier-601453159/"
]
"industrrySectors" => array:2 [
"fr" => null
"en" => null
]
"researchFields" => array:2 [
"fr" => "Analyse des données statistiques - Théorie des probabilités et statistiques"
"en" => "Statistical Data Analysis - Probability Theory & Mathematical Statistics"
]
"teachingFields" => array:2 [
"fr" => "Analyse des données statistiques - Théorie des probabilités et statistiques"
"en" => "Statistical Data Analysis - Probability Theory & Mathematical Statistics"
]
"distinctions" => array:1 [
0 => Essec\Faculty\Model\Distinction {#2261
#_index: null
#_id: null
#_source: array:6 [
"date" => "2019-11-17"
"label" => array:2 [
"fr" => "Best Paper Award"
"en" => "Best Paper Award"
]
"type" => array:2 [
"fr" => "Prix"
"en" => "Awards"
]
"tri" => " 1 "
"institution" => array:2 [
"fr" => "Asian Conference on Machine Learning"
"en" => "Asian Conference on Machine Learning"
]
"country" => array:2 [
"fr" => "Japon"
"en" => "Japan"
]
]
+lang: "en"
+"parent": Essec\Faculty\Model\Profile {#2216}
}
]
"teaching" => array:5 [
0 => Essec\Faculty\Model\TeachingItem {#2250
#_index: null
#_id: null
#_source: array:7 [
"startDate" => null
"endDate" => "2023"
"program" => null
"label" => array:2 [
"fr" => """
Quelques contributions à la statistique des modèles dynamiques\n
aléatoires
"""
"en" => """
Quelques contributions à la statistique des modèles dynamiques\n
aléatoires
"""
]
"type" => array:2 [
"fr" => "Co-directeur de thèse"
"en" => "Thesis co-director"
]
"institution" => array:2 [
"fr" => "Université Paris X Nanterre"
"en" => "Université Paris X Nanterre"
]
"country" => array:2 [
"fr" => "France"
"en" => "France"
]
]
+lang: "en"
}
1 => Essec\Faculty\Model\TeachingItem {#2251
#_index: null
#_id: null
#_source: array:7 [
"startDate" => null
"endDate" => "2020"
"program" => null
"label" => array:2 [
"fr" => "Contributions à l’étude théorique de l’inférence variationelle et `a la robustesse"
"en" => "Contributions à l’étude théorique de l’inférence variationelle et `a la robustesse"
]
"type" => array:2 [
"fr" => "Directeur de thèse"
"en" => "Thesis director"
]
"institution" => array:2 [
"fr" => "L'École nationale de la statistique et de l'administration économique (ENSAE)"
"en" => "L'École nationale de la statistique et de l'administration économique (ENSAE)"
]
"country" => array:2 [
"fr" => "France"
"en" => "France"
]
]
+lang: "en"
}
2 => Essec\Faculty\Model\TeachingItem {#2252
#_index: null
#_id: null
#_source: array:7 [
"startDate" => null
"endDate" => "2019"
"program" => null
"label" => array:2 [
"fr" => "Analyse de données volumineuses dans le domaine du transport"
"en" => "Analyse de données volumineuses dans le domaine du transport"
]
"type" => array:2 [
"fr" => "Directeur de thèse"
"en" => "Thesis director"
]
"institution" => array:2 [
"fr" => "L'École nationale de la statistique et de l'administration économique (ENSAE)"
"en" => "L'École nationale de la statistique et de l'administration économique (ENSAE)"
]
"country" => array:2 [
"fr" => "France"
"en" => "France"
]
]
+lang: "en"
}
3 => Essec\Faculty\Model\TeachingItem {#2253
#_index: null
#_id: null
#_source: array:7 [
"startDate" => null
"endDate" => "2017"
"program" => null
"label" => array:2 [
"fr" => "Étude théorique de quelques procédures statistiques pour le traitement de données complexes"
"en" => "Étude théorique de quelques procédures statistiques pour le traitement de données complexes"
]
"type" => array:2 [
"fr" => "Co-directeur de thèse"
"en" => "Thesis co-director"
]
"institution" => array:2 [
"fr" => "L'École nationale de la statistique et de l'administration économique (ENSAE)"
"en" => "L'École nationale de la statistique et de l'administration économique (ENSAE)"
]
"country" => array:2 [
"fr" => "France"
"en" => "France"
]
]
+lang: "en"
}
4 => Essec\Faculty\Model\TeachingItem {#2254
#_index: null
#_id: null
#_source: array:7 [
"startDate" => null
"endDate" => "2017"
"program" => null
"label" => array:2 [
"fr" => "Estimation PAC-Bayésienne de matrices de faible rang"
"en" => "Estimation PAC-Bayésienne de matrices de faible rang"
]
"type" => array:2 [
"fr" => "Directeur de thèse"
"en" => "Thesis director"
]
"institution" => array:2 [
"fr" => "L'École nationale de la statistique et de l'administration économique (ENSAE)"
"en" => "L'École nationale de la statistique et de l'administration économique (ENSAE)"
]
"country" => array:2 [
"fr" => "France"
"en" => "France"
]
]
+lang: "en"
}
]
"otherActivities" => array:30 [
0 => Essec\Faculty\Model\ExtraActivity {#2215
#_index: null
#_id: null
#_source: array:9 [
"startDate" => "2020-01-01"
"endDate" => null
"year" => null
"uuid" => "102"
"type" => array:2 [
"fr" => "Activités de recherche"
"en" => "Research activities"
]
"subType" => array:2 [
"fr" => "Co-direction d'une revue - Co-rédacteur en chef"
"en" => "Senior or Associate Editor"
]
"label" => array:2 [
"fr" => "Action Editor: Journal of Machine Learning Research"
"en" => "Action Editor: Journal of Machine Learning Research"
]
"institution" => array:2 [
"fr" => null
"en" => null
]
"country" => array:2 [
"fr" => null
"en" => null
]
]
+lang: "en"
+"parent": Essec\Faculty\Model\Profile {#2216}
}
1 => Essec\Faculty\Model\ExtraActivity {#2219
#_index: null
#_id: null
#_source: array:9 [
"startDate" => "2022-01-01"
"endDate" => "2024-10-31"
"year" => null
"uuid" => "102"
"type" => array:2 [
"fr" => "Activités de recherche"
"en" => "Research activities"
]
"subType" => array:2 [
"fr" => "Co-direction d'une revue - Co-rédacteur en chef"
"en" => "Senior or Associate Editor"
]
"label" => array:2 [
"fr" => "Action Editor: Transactions in Machine Learning Research"
"en" => "Action Editor: Transactions in Machine Learning Research"
]
"institution" => array:2 [
"fr" => null
"en" => null
]
"country" => array:2 [
"fr" => null
"en" => null
]
]
+lang: "en"
+"parent": Essec\Faculty\Model\Profile {#2216}
}
2 => Essec\Faculty\Model\ExtraActivity {#2222
#_index: null
#_id: null
#_source: array:9 [
"startDate" => "2020-01-01"
"endDate" => "2022-12-31"
"year" => null
"uuid" => "103"
"type" => array:2 [
"fr" => "Activités de recherche"
"en" => "Research activities"
]
"subType" => array:2 [
"fr" => "Membre d'un comité de lecture"
"en" => "Editorial Board Membership"
]
"label" => array:2 [
"fr" => "Topic Advisory Panel: Entropy"
"en" => "Topic Advisory Panel: Entropy"
]
"institution" => array:2 [
"fr" => null
"en" => null
]
"country" => array:2 [
"fr" => null
"en" => null
]
]
+lang: "en"
+"parent": Essec\Faculty\Model\Profile {#2216}
}
3 => Essec\Faculty\Model\ExtraActivity {#2223
#_index: null
#_id: null
#_source: array:9 [
"startDate" => "2024-06-11"
"endDate" => "2024-06-11"
"year" => null
"uuid" => "201"
"type" => array:2 [
"fr" => "Activités de recherche"
"en" => "Research activities"
]
"subType" => array:2 [
"fr" => "Organisation d'une conférence ou d'un séminaire"
"en" => "Organization of a conference or a seminar"
]
"label" => array:2 [
"fr" => "CNRS@CREATE -- ESSEC APAC workshop"
"en" => "CNRS@CREATE -- ESSEC APAC workshop"
]
"institution" => array:2 [
"fr" => "ESSEC Business School"
"en" => "ESSEC Business School"
]
"country" => array:2 [
"fr" => "Singapour"
"en" => "Singapore"
]
]
+lang: "en"
+"parent": Essec\Faculty\Model\Profile {#2216}
}
4 => Essec\Faculty\Model\ExtraActivity {#2224
#_index: null
#_id: null
#_source: array:9 [
"startDate" => "2024-06-10"
"endDate" => "2024-06-11"
"year" => null
"uuid" => "201"
"type" => array:2 [
"fr" => "Activités de recherche"
"en" => "Research activities"
]
"subType" => array:2 [
"fr" => "Organisation d'une conférence ou d'un séminaire"
"en" => "Organization of a conference or a seminar"
]
"label" => array:2 [
"fr" => "Approximate Inference in Theory and Practice Conference"
"en" => "Approximate Inference in Theory and Practice Conference"
]
"institution" => array:2 [
"fr" => "Institut Henri Poincaré"
"en" => "Institut Henri Poincaré"
]
"country" => array:2 [
"fr" => "France"
"en" => "France"
]
]
+lang: "en"
+"parent": Essec\Faculty\Model\Profile {#2216}
}
5 => Essec\Faculty\Model\ExtraActivity {#2225
#_index: null
#_id: null
#_source: array:9 [
"startDate" => "2024-06-30"
"endDate" => "2024-07-03"
"year" => null
"uuid" => "R1_101"
"type" => array:2 [
"fr" => "Activités de recherche"
"en" => "Research activities"
]
"subType" => array:2 [
"fr" => "Participation au comité scientifique d'une conférence ou reviewer pour une conférence"
"en" => "Participation in scientific commissions or reviewer for a conference"
]
"label" => array:2 [
"fr" => "COLT 2024: senior PC member"
"en" => "COLT 2024: senior PC member"
]
"institution" => array:2 [
"fr" => "Association for Computational Learning (ACL)"
"en" => "Association for Computational Learning (ACL)"
]
"country" => array:2 [
"fr" => "Canada"
"en" => "Canada"
]
]
+lang: "en"
+"parent": Essec\Faculty\Model\Profile {#2216}
}
6 => Essec\Faculty\Model\ExtraActivity {#2226
#_index: null
#_id: null
#_source: array:9 [
"startDate" => "2024-02-25"
"endDate" => "2024-02-28"
"year" => null
"uuid" => "R1_101"
"type" => array:2 [
"fr" => "Activités de recherche"
"en" => "Research activities"
]
"subType" => array:2 [
"fr" => "Participation au comité scientifique d'une conférence ou reviewer pour une conférence"
"en" => "Participation in scientific commissions or reviewer for a conference"
]
"label" => array:2 [
"fr" => "ALT 2024: senior PC member"
"en" => "ALT 2024: senior PC member"
]
"institution" => array:2 [
"fr" => "Association for Algorithmic Learning Theory (AALT)"
"en" => "Association for Algorithmic Learning Theory (AALT)"
]
"country" => array:2 [
"fr" => "États-Unis"
"en" => "United States of America"
]
]
+lang: "en"
+"parent": Essec\Faculty\Model\Profile {#2216}
}
7 => Essec\Faculty\Model\ExtraActivity {#2227
#_index: null
#_id: null
#_source: array:9 [
"startDate" => "2023-07-12"
"endDate" => "2023-07-15"
"year" => null
"uuid" => "R1_101"
"type" => array:2 [
"fr" => "Activités de recherche"
"en" => "Research activities"
]
"subType" => array:2 [
"fr" => "Participation au comité scientifique d'une conférence ou reviewer pour une conférence"
"en" => "Participation in scientific commissions or reviewer for a conference"
]
"label" => array:2 [
"fr" => "COLT 2023: senior PC member"
"en" => "COLT 2023: senior PC member"
]
"institution" => array:2 [
"fr" => "Association for Computational Learning (ACL)"
"en" => "Association for Computational Learning (ACL)"
]
"country" => array:2 [
"fr" => "Inde"
"en" => "India"
]
]
+lang: "en"
+"parent": Essec\Faculty\Model\Profile {#2216}
}
8 => Essec\Faculty\Model\ExtraActivity {#2228
#_index: null
#_id: null
#_source: array:9 [
"startDate" => "2023-02-20"
"endDate" => "2023-02-23"
"year" => null
"uuid" => "R1_101"
"type" => array:2 [
"fr" => "Activités de recherche"
"en" => "Research activities"
]
"subType" => array:2 [
"fr" => "Participation au comité scientifique d'une conférence ou reviewer pour une conférence"
"en" => "Participation in scientific commissions or reviewer for a conference"
]
"label" => array:2 [
"fr" => "ALT 2023: senior PC member"
"en" => "ALT 2023: senior PC member"
]
"institution" => array:2 [
"fr" => "Association for Algorithmic Learning Theory (AALT)"
"en" => "Association for Algorithmic Learning Theory (AALT)"
]
"country" => array:2 [
"fr" => "Singapour"
"en" => "Singapore"
]
]
+lang: "en"
+"parent": Essec\Faculty\Model\Profile {#2216}
}
9 => Essec\Faculty\Model\ExtraActivity {#2229
#_index: null
#_id: null
#_source: array:9 [
"startDate" => "2024-08-27"
"endDate" => "2024-08-30"
"year" => null
"uuid" => "201"
"type" => array:2 [
"fr" => "Activités de recherche"
"en" => "Research activities"
]
"subType" => array:2 [
"fr" => "Organisation d'une conférence ou d'un séminaire"
"en" => "Organization of a conference or a seminar"
]
"label" => array:2 [
"fr" => "The 13th Workshop on High Dimensional Data Analysis (HDDA-XIII)"
"en" => "The 13th Workshop on High Dimensional Data Analysis (HDDA-XIII)"
]
"institution" => array:2 [
"fr" => "ESSEC Business School"
"en" => "ESSEC Business School"
]
"country" => array:2 [
"fr" => "Singapour"
"en" => "Singapore"
]
]
+lang: "en"
+"parent": Essec\Faculty\Model\Profile {#2216}
}
10 => Essec\Faculty\Model\ExtraActivity {#2230
#_index: null
#_id: null
#_source: array:9 [
"startDate" => "2024-02-01"
"endDate" => null
"year" => null
"uuid" => "299"
"type" => array:2 [
"fr" => "Activités de recherche"
"en" => "Research activities"
]
"subType" => array:2 [
"fr" => "Autre activité académique"
"en" => "Other academic activity"
]
"label" => array:2 [
"fr" => "arXiv moderator"
"en" => "arXiv moderator"
]
"institution" => array:2 [
"fr" => "arXiv"
"en" => "arXiv"
]
"country" => array:2 [
"fr" => "États-Unis"
"en" => "United States of America"
]
]
+lang: "en"
+"parent": Essec\Faculty\Model\Profile {#2216}
}
11 => Essec\Faculty\Model\ExtraActivity {#2231
#_index: null
#_id: null
#_source: array:9 [
"startDate" => "2024-07-08"
"endDate" => "2024-08-02"
"year" => null
"uuid" => "201"
"type" => array:2 [
"fr" => "Activités de recherche"
"en" => "Research activities"
]
"subType" => array:2 [
"fr" => "Organisation d'une conférence ou d'un séminaire"
"en" => "Organization of a conference or a seminar"
]
"label" => array:2 [
"fr" => "Interpretable Inference via Principled BNP Approaches in Biomedical Research and Beyond"
"en" => "Interpretable Inference via Principled BNP Approaches in Biomedical Research and Beyond"
]
"institution" => array:2 [
"fr" => "National University of Singapore"
"en" => "National University of Singapore"
]
"country" => array:2 [
"fr" => "Singapour"
"en" => "Singapore"
]
]
+lang: "en"
+"parent": Essec\Faculty\Model\Profile {#2216}
}
12 => Essec\Faculty\Model\ExtraActivity {#2232
#_index: null
#_id: null
#_source: array:9 [
"startDate" => "2023-11-11"
"endDate" => "2023-11-14"
"year" => null
"uuid" => "R1_101"
"type" => array:2 [
"fr" => "Activités de recherche"
"en" => "Research activities"
]
"subType" => array:2 [
"fr" => "Participation au comité scientifique d'une conférence ou reviewer pour une conférence"
"en" => "Participation in scientific commissions or reviewer for a conference"
]
"label" => array:2 [
"fr" => "ACML 2023: PC chair"
"en" => "ACML 2023: PC chair"
]
"institution" => array:2 [
"fr" => "Asian Conference on Machine Learning"
"en" => "Asian Conference on Machine Learning"
]
"country" => array:2 [
"fr" => "Turquie"
"en" => "Turkey"
]
]
+lang: "en"
+"parent": Essec\Faculty\Model\Profile {#2216}
}
13 => Essec\Faculty\Model\ExtraActivity {#2233
#_index: null
#_id: null
#_source: array:9 [
"startDate" => "2014-01-01"
"endDate" => null
"year" => null
"uuid" => "204"
"type" => array:2 [
"fr" => "Activités de recherche"
"en" => "Research activities"
]
"subType" => array:2 [
"fr" => "Membre d'une association académique"
"en" => "Member of an academic association"
]
"label" => array:2 [
"fr" => "Member"
"en" => "Member"
]
"institution" => array:2 [
"fr" => "IMS - Bernoulli Society"
"en" => "IMS - Bernoulli Society"
]
"country" => array:2 [
"fr" => "États-Unis"
"en" => "United States of America"
]
]
+lang: "en"
+"parent": Essec\Faculty\Model\Profile {#2216}
}
14 => Essec\Faculty\Model\ExtraActivity {#2234
#_index: null
#_id: null
#_source: array:9 [
"startDate" => "2022-01-01"
"endDate" => null
"year" => null
"uuid" => "204"
"type" => array:2 [
"fr" => "Activités de recherche"
"en" => "Research activities"
]
"subType" => array:2 [
"fr" => "Membre d'une association académique"
"en" => "Member of an academic association"
]
"label" => array:2 [
"fr" => "Member"
"en" => "Member"
]
"institution" => array:2 [
"fr" => "Société Française de Statistique (SFdS)"
"en" => "Société Française de Statistique (SFdS)"
]
"country" => array:2 [
"fr" => "France"
"en" => "France"
]
]
+lang: "en"
+"parent": Essec\Faculty\Model\Profile {#2216}
}
15 => Essec\Faculty\Model\ExtraActivity {#2235
#_index: null
#_id: null
#_source: array:9 [
"startDate" => "2022-01-01"
"endDate" => null
"year" => null
"uuid" => "204"
"type" => array:2 [
"fr" => "Activités de recherche"
"en" => "Research activities"
]
"subType" => array:2 [
"fr" => "Membre d'une association académique"
"en" => "Member of an academic association"
]
"label" => array:2 [
"fr" => "Member"
"en" => "Member"
]
"institution" => array:2 [
"fr" => "Société de Mathématiques Appliquées et Industrielles (SMAI)"
"en" => "Société de Mathématiques Appliquées et Industrielles (SMAI)"
]
"country" => array:2 [
"fr" => "France"
"en" => "France"
]
]
+lang: "en"
+"parent": Essec\Faculty\Model\Profile {#2216}
}
16 => Essec\Faculty\Model\ExtraActivity {#2236
#_index: null
#_id: null
#_source: array:9 [
"startDate" => "2022-01-01"
"endDate" => null
"year" => null
"uuid" => "204"
"type" => array:2 [
"fr" => "Activités de recherche"
"en" => "Research activities"
]
"subType" => array:2 [
"fr" => "Membre d'une association académique"
"en" => "Member of an academic association"
]
"label" => array:2 [
"fr" => "Member"
"en" => "Member"
]
"institution" => array:2 [
"fr" => "Société Mathématique de France (SMF)"
"en" => "Société Mathématique de France (SMF)"
]
"country" => array:2 [
"fr" => "France"
"en" => "France"
]
]
+lang: "en"
+"parent": Essec\Faculty\Model\Profile {#2216}
}
17 => Essec\Faculty\Model\ExtraActivity {#2237
#_index: null
#_id: null
#_source: array:9 [
"startDate" => "2022-01-01"
"endDate" => null
"year" => null
"uuid" => "204"
"type" => array:2 [
"fr" => "Activités de recherche"
"en" => "Research activities"
]
"subType" => array:2 [
"fr" => "Membre d'une association académique"
"en" => "Member of an academic association"
]
"label" => array:2 [
"fr" => "Member"
"en" => "Member"
]
"institution" => array:2 [
"fr" => "European Mathematical Society (EMS)"
"en" => "European Mathematical Society (EMS)"
]
"country" => array:2 [
"fr" => null
"en" => null
]
]
+lang: "en"
+"parent": Essec\Faculty\Model\Profile {#2216}
}
18 => Essec\Faculty\Model\ExtraActivity {#2238
#_index: null
#_id: null
#_source: array:9 [
"startDate" => "2025-07-01"
"endDate" => "2025-07-31"
"year" => null
"uuid" => "R1_101"
"type" => array:2 [
"fr" => "Activités de recherche"
"en" => "Research activities"
]
"subType" => array:2 [
"fr" => "Participation au comité scientifique d'une conférence ou reviewer pour une conférence"
"en" => "Participation in scientific commissions or reviewer for a conference"
]
"label" => array:2 [
"fr" => "ALT 2025: PC chair"
"en" => "ALT 2025: PC chair"
]
"institution" => array:2 [
"fr" => "Association for Algorithmic Learning Theory (AALT)"
"en" => "Association for Algorithmic Learning Theory (AALT)"
]
"country" => array:2 [
"fr" => "Italie"
"en" => "Italy"
]
]
+lang: "en"
+"parent": Essec\Faculty\Model\Profile {#2216}
}
19 => Essec\Faculty\Model\ExtraActivity {#2239
#_index: null
#_id: null
#_source: array:9 [
"startDate" => "2016-05-09"
"endDate" => "2016-05-11"
"year" => null
"uuid" => "R1_101"
"type" => array:2 [
"fr" => "Activités de recherche"
"en" => "Research activities"
]
"subType" => array:2 [
"fr" => "Participation au comité scientifique d'une conférence ou reviewer pour une conférence"
"en" => "Participation in scientific commissions or reviewer for a conference"
]
"label" => array:2 [
"fr" => "AISTATS 2016: publication chair"
"en" => "AISTATS 2016: publication chair"
]
"institution" => array:2 [
"fr" => "The Society for AI and Statistics"
"en" => "The Society for AI and Statistics"
]
"country" => array:2 [
"fr" => "Espagne"
"en" => "Spain"
]
]
+lang: "en"
+"parent": Essec\Faculty\Model\Profile {#2216}
}
20 => Essec\Faculty\Model\ExtraActivity {#2240
#_index: null
#_id: null
#_source: array:9 [
"startDate" => "2020-02-08"
"endDate" => "2020-02-11"
"year" => null
"uuid" => "R1_101"
"type" => array:2 [
"fr" => "Activités de recherche"
"en" => "Research activities"
]
"subType" => array:2 [
"fr" => "Participation au comité scientifique d'une conférence ou reviewer pour une conférence"
"en" => "Participation in scientific commissions or reviewer for a conference"
]
"label" => array:2 [
"fr" => "ALT 2020: PC chair"
"en" => "ALT 2020: PC chair"
]
"institution" => array:2 [
"fr" => "Association for Algorithmic Learning Theory (AALT)"
"en" => "Association for Algorithmic Learning Theory (AALT)"
]
"country" => array:2 [
"fr" => "États-Unis"
"en" => "United States of America"
]
]
+lang: "en"
+"parent": Essec\Faculty\Model\Profile {#2216}
}
21 => Essec\Faculty\Model\ExtraActivity {#2241
#_index: null
#_id: null
#_source: array:9 [
"startDate" => "2021-04-07"
"endDate" => "2021-04-09"
"year" => null
"uuid" => "R1_101"
"type" => array:2 [
"fr" => "Activités de recherche"
"en" => "Research activities"
]
"subType" => array:2 [
"fr" => "Participation au comité scientifique d'une conférence ou reviewer pour une conférence"
"en" => "Participation in scientific commissions or reviewer for a conference"
]
"label" => array:2 [
"fr" => "ITISE 2021: PC chair"
"en" => "ITISE 2021: PC chair"
]
"institution" => array:2 [
"fr" => "Universidad de Granada"
"en" => "Universidad de Granada"
]
"country" => array:2 [
"fr" => "Espagne"
"en" => "Spain"
]
]
+lang: "en"
+"parent": Essec\Faculty\Model\Profile {#2216}
}
22 => Essec\Faculty\Model\ExtraActivity {#2242
#_index: null
#_id: null
#_source: array:9 [
"startDate" => "2021-12-07"
"endDate" => "2021-12-10"
"year" => null
"uuid" => "R1_101"
"type" => array:2 [
"fr" => "Activités de recherche"
"en" => "Research activities"
]
"subType" => array:2 [
"fr" => "Participation au comité scientifique d'une conférence ou reviewer pour une conférence"
"en" => "Participation in scientific commissions or reviewer for a conference"
]
"label" => array:2 [
"fr" => "NeurIPS 2021: AC (area chair)"
"en" => "NeurIPS 2021: AC (area chair)"
]
"institution" => array:2 [
"fr" => "Neural Information Processing Systems foundation"
"en" => "Neural Information Processing Systems foundation"
]
"country" => array:2 [
"fr" => "États-Unis"
"en" => "United States of America"
]
]
+lang: "en"
+"parent": Essec\Faculty\Model\Profile {#2216}
}
23 => Essec\Faculty\Model\ExtraActivity {#2243
#_index: null
#_id: null
#_source: array:9 [
"startDate" => "2022-03-28"
"endDate" => "2022-03-30"
"year" => null
"uuid" => "R1_101"
"type" => array:2 [
"fr" => "Activités de recherche"
"en" => "Research activities"
]
"subType" => array:2 [
"fr" => "Participation au comité scientifique d'une conférence ou reviewer pour une conférence"
"en" => "Participation in scientific commissions or reviewer for a conference"
]
"label" => array:2 [
"fr" => "AISTATS 2022: AC (area chair)"
"en" => "AISTATS 2022: AC (area chair)"
]
"institution" => array:2 [
"fr" => "The Society for AI and Statistics"
"en" => "The Society for AI and Statistics"
]
"country" => array:2 [
"fr" => null
"en" => null
]
]
+lang: "en"
+"parent": Essec\Faculty\Model\Profile {#2216}
}
24 => Essec\Faculty\Model\ExtraActivity {#2244
#_index: null
#_id: null
#_source: array:9 [
"startDate" => "2022-07-02"
"endDate" => "2022-07-05"
"year" => null
"uuid" => "R1_101"
"type" => array:2 [
"fr" => "Activités de recherche"
"en" => "Research activities"
]
"subType" => array:2 [
"fr" => "Participation au comité scientifique d'une conférence ou reviewer pour une conférence"
"en" => "Participation in scientific commissions or reviewer for a conference"
]
"label" => array:2 [
"fr" => "COLT 2022: PC chair"
"en" => "COLT 2022: PC chair"
]
"institution" => array:2 [
"fr" => "Association for Computational Learning (ACL)"
"en" => "Association for Computational Learning (ACL)"
]
"country" => array:2 [
"fr" => "Royaume-Uni"
"en" => "United Kingdom"
]
]
+lang: "en"
+"parent": Essec\Faculty\Model\Profile {#2216}
}
25 => Essec\Faculty\Model\ExtraActivity {#2245
#_index: null
#_id: null
#_source: array:9 [
"startDate" => "2022-11-29"
"endDate" => "2022-12-01"
"year" => null
"uuid" => "R1_101"
"type" => array:2 [
"fr" => "Activités de recherche"
"en" => "Research activities"
]
"subType" => array:2 [
"fr" => "Participation au comité scientifique d'une conférence ou reviewer pour une conférence"
"en" => "Participation in scientific commissions or reviewer for a conference"
]
"label" => array:2 [
"fr" => "NeurIPS 2022: AC (area chair)"
"en" => "NeurIPS 2022: AC (area chair)"
]
"institution" => array:2 [
"fr" => "Neural Information Processing Systems foundation"
"en" => "Neural Information Processing Systems foundation"
]
"country" => array:2 [
"fr" => "États-Unis"
"en" => "United States of America"
]
]
+lang: "en"
+"parent": Essec\Faculty\Model\Profile {#2216}
}
26 => Essec\Faculty\Model\ExtraActivity {#2246
#_index: null
#_id: null
#_source: array:9 [
"startDate" => "2018-05-28"
"endDate" => "2018-06-01"
"year" => null
"uuid" => "R1_101"
"type" => array:2 [
"fr" => "Activités de recherche"
"en" => "Research activities"
]
"subType" => array:2 [
"fr" => "Participation au comité scientifique d'une conférence ou reviewer pour une conférence"
"en" => "Participation in scientific commissions or reviewer for a conference"
]
"label" => array:2 [
"fr" => "JDS 2018: membre du comité scientifique"
"en" => "JDS 2018: scientific committee member"
]
"institution" => array:2 [
"fr" => "Société Française de Statistique (SFdS)"
"en" => "Société Française de Statistique (SFdS)"
]
"country" => array:2 [
"fr" => "France"
"en" => "France"
]
]
+lang: "en"
+"parent": Essec\Faculty\Model\Profile {#2216}
}
27 => Essec\Faculty\Model\ExtraActivity {#2247
#_index: null
#_id: null
#_source: array:9 [
"startDate" => "2025-04-24"
"endDate" => "2025-04-28"
"year" => null
"uuid" => "R1_101"
"type" => array:2 [
"fr" => "Activités de recherche"
"en" => "Research activities"
]
"subType" => array:2 [
"fr" => "Participation au comité scientifique d'une conférence ou reviewer pour une conférence"
"en" => "Participation in scientific commissions or reviewer for a conference"
]
"label" => array:2 [
"fr" => "ICLR 2025: Workshop Chair"
"en" => "ICLR 2025: Workshop Chair"
]
"institution" => array:2 [
"fr" => "International Conference on Learning Representations (ICLR)"
"en" => "International Conference on Learning Representations (ICLR)"
]
"country" => array:2 [
"fr" => "Singapour"
"en" => "Singapore"
]
]
+lang: "en"
+"parent": Essec\Faculty\Model\Profile {#2216}
}
28 => Essec\Faculty\Model\ExtraActivity {#2248
#_index: null
#_id: null
#_source: array:9 [
"startDate" => "2025-06-16"
"endDate" => "2025-06-20"
"year" => null
"uuid" => "R1_101"
"type" => array:2 [
"fr" => "Activités de recherche"
"en" => "Research activities"
]
"subType" => array:2 [
"fr" => "Participation au comité scientifique d'une conférence ou reviewer pour une conférence"
"en" => "Participation in scientific commissions or reviewer for a conference"
]
"label" => array:2 [
"fr" => "Bayes Comp 2025: Member of the Scientific program committee"
"en" => "Bayes Comp 2025: Member of the Scientific program committee"
]
"institution" => array:2 [
"fr" => "National University of Singapore"
"en" => "National University of Singapore"
]
"country" => array:2 [
"fr" => "Singapour"
"en" => "Singapore"
]
]
+lang: "en"
+"parent": Essec\Faculty\Model\Profile {#2216}
}
29 => Essec\Faculty\Model\ExtraActivity {#2249
#_index: null
#_id: null
#_source: array:9 [
"startDate" => "2025-07-15"
"endDate" => "2025-07-17"
"year" => null
"uuid" => "R1_101"
"type" => array:2 [
"fr" => "Activités de recherche"
"en" => "Research activities"
]
"subType" => array:2 [
"fr" => "Participation au comité scientifique d'une conférence ou reviewer pour une conférence"
"en" => "Participation in scientific commissions or reviewer for a conference"
]
"label" => array:2 [
"fr" => "ICML 2025: Senior Area Chair"
"en" => "ICML 2025: Senior Area Chair"
]
"institution" => array:2 [
"fr" => "ICML"
"en" => "ICML"
]
"country" => array:2 [
"fr" => "Canada"
"en" => "Canada"
]
]
+lang: "en"
+"parent": Essec\Faculty\Model\Profile {#2216}
}
]
"theses" => array:5 [
0 => Essec\Faculty\Model\These {#2262
#_index: null
#_id: null
#_source: array:9 [
"year" => "2019"
"startDate" => null
"endDate" => "2019"
"student" => "CAREL L."
"firstJob" => "Machine learning scientist, Expedia group"
"label" => array:2 [
"fr" => "Analyse de données volumineuses dans le domaine du transport"
"en" => "Big data analysis in the field of transportation"
]
"role" => array:2 [
"fr" => "Directeur de thèse"
"en" => "Thesis director"
]
"institution" => array:2 [
"fr" => "L'École nationale de la statistique et de l'administration économique (ENSAE)"
"en" => "L'École nationale de la statistique et de l'administration économique (ENSAE)"
]
"country" => array:2 [
"fr" => "France"
"en" => "France"
]
]
+lang: "en"
+"parent": Essec\Faculty\Model\Profile {#2216}
}
1 => Essec\Faculty\Model\These {#2263
#_index: null
#_id: null
#_source: array:9 [
"year" => "2020"
"startDate" => null
"endDate" => "2020"
"student" => "CHÉRIEF-ABDELLATIF B.-E."
"firstJob" => "Post-doctoral researcher, University of Oxford"
"label" => array:2 [
"fr" => "Contributions à l’étude théorique de l’inférence variationelle et `a la robustesse"
"en" => "Contributions to the theoretical study of variational inference and robustness"
]
"role" => array:2 [
"fr" => "Directeur de thèse"
"en" => "Thesis director"
]
"institution" => array:2 [
"fr" => "L'École nationale de la statistique et de l'administration économique (ENSAE)"
"en" => "L'École nationale de la statistique et de l'administration économique (ENSAE)"
]
"country" => array:2 [
"fr" => "France"
"en" => "France"
]
]
+lang: "en"
+"parent": Essec\Faculty\Model\Profile {#2216}
}
2 => Essec\Faculty\Model\These {#2264
#_index: null
#_id: null
#_source: array:9 [
"year" => "2017"
"startDate" => null
"endDate" => "2017"
"student" => "COTTET V."
"firstJob" => "Senior statistician, INSEE"
"label" => array:2 [
"fr" => "Étude théorique de quelques procédures statistiques pour le traitement de données complexes"
"en" => "Theoretical study of some statistical procedures applied to complex data"
]
"role" => array:2 [
"fr" => "Co-directeur de thèse"
"en" => "Thesis co-director"
]
"institution" => array:2 [
"fr" => "L'École nationale de la statistique et de l'administration économique (ENSAE)"
"en" => "L'École nationale de la statistique et de l'administration économique (ENSAE)"
]
"country" => array:2 [
"fr" => "France"
"en" => "France"
]
]
+lang: "en"
+"parent": Essec\Faculty\Model\Profile {#2216}
}
3 => Essec\Faculty\Model\These {#2265
#_index: null
#_id: null
#_source: array:9 [
"year" => "2017"
"startDate" => null
"endDate" => "2017"
"student" => "MAI T. T."
"firstJob" => "Post-doctoral researcher, University of Oslo"
"label" => array:2 [
"fr" => "Estimation PAC-Bayésienne de matrices de faible rang"
"en" => "PAC-Bayesian estimation of low-rank matrices"
]
"role" => array:2 [
"fr" => "Directeur de thèse"
"en" => "Thesis director"
]
"institution" => array:2 [
"fr" => "L'École nationale de la statistique et de l'administration économique (ENSAE)"
"en" => "L'École nationale de la statistique et de l'administration économique (ENSAE)"
]
"country" => array:2 [
"fr" => "France"
"en" => "France"
]
]
+lang: "en"
+"parent": Essec\Faculty\Model\Profile {#2216}
}
4 => Essec\Faculty\Model\These {#2266
#_index: null
#_id: null
#_source: array:9 [
"year" => "2023"
"startDate" => null
"endDate" => "2023"
"student" => "ROSIER A."
"firstJob" => "Enseignant-chercheur à l'école d'ingénieurs ESME"
"label" => array:2 [
"fr" => """
Quelques contributions à la statistique des modèles dynamiques\n
aléatoires
"""
"en" => "Contributions to statistics of stochastic processes"
]
"role" => array:2 [
"fr" => "Co-directeur de thèse"
"en" => "Thesis co-director"
]
"institution" => array:2 [
"fr" => "Université Paris X Nanterre"
"en" => "Université Paris X Nanterre"
]
"country" => array:2 [
"fr" => "France"
"en" => "France"
]
]
+lang: "en"
+"parent": Essec\Faculty\Model\Profile {#2216}
}
]
"indexedAt" => "2024-11-21T07:21:22.000Z"
"contributions" => array:76 [
0 => Essec\Faculty\Model\Contribution {#2268
#_index: "academ_contributions"
#_id: "13863"
#_source: array:18 [
"id" => "13863"
"slug" => "chatgpt"
"yearMonth" => "2023-03"
"year" => "2023"
"title" => "ChatGPT"
"description" => "ALQUIER, P. 2023. <i>ChatGPT</i>. Mars."
"authors" => array:1 [
0 => array:3 [
"name" => "ALQUIER Pierre"
"bid" => "B00809923"
"slug" => "alquier-pierre"
]
]
"ouvrage" => ""
"keywords" => []
"updatedAt" => "2023-03-21 09:57:42"
"publicationUrl" => null
"publicationInfo" => array:3 [
"pages" => ""
"volume" => ""
"number" => ""
]
"type" => array:2 [
"fr" => "Interviews : Emission radio - TV - presse écrite"
"en" => "Interviews: radio - TV - press"
]
"support_type" => array:2 [
"fr" => "Presse"
"en" => "Press"
]
"countries" => array:2 [
"fr" => "Singapour"
"en" => "Singapore"
]
"abstract" => array:2 [
"fr" => ""
"en" => ""
]
"authors_fields" => array:2 [
"fr" => "Systèmes d'Information, Data Analytics et Opérations"
"en" => "Information Systems, Data Analytics and Operations"
]
"indexedAt" => "2024-11-21T07:21:52.000Z"
]
+lang: "en"
+"_type": "_doc"
+"_score": 4.794264
+"parent": null
}
1 => Essec\Faculty\Model\Contribution {#2270
#_index: "academ_contributions"
#_id: "13883"
#_source: array:18 [
"id" => "13883"
"slug" => "inverse-problems-and-high-dimensional-estimation"
"yearMonth" => "2011-06"
"year" => "2011"
"title" => "Inverse Problems and High-Dimensional Estimation"
"description" => "ALQUIER, P., GAUTIER, E. et STOLTZ, G. [Eds] (2011). <i>Inverse Problems and High-Dimensional Estimation</i>. Springer Berlin Heidelberg."
"authors" => array:3 [
0 => array:3 [
"name" => "ALQUIER Pierre"
"bid" => "B00809923"
"slug" => "alquier-pierre"
]
1 => array:1 [
"name" => "GAUTIER Eric"
]
2 => array:1 [
"name" => "STOLTZ Gilles"
]
]
"ouvrage" => ""
"keywords" => []
"updatedAt" => "2023-03-22 13:44:09"
"publicationUrl" => "https://link.springer.com/book/10.1007/978-3-642-19989-9"
"publicationInfo" => array:3 [
"pages" => ""
"volume" => ""
"number" => ""
]
"type" => array:2 [
"fr" => "Direction d'ouvrage"
"en" => "Book editor"
]
"support_type" => array:2 [
"fr" => "Editeur"
"en" => "Publisher"
]
"countries" => array:2 [
"fr" => null
"en" => null
]
"abstract" => array:2 [
"fr" => ""
"en" => ""
]
"authors_fields" => array:2 [
"fr" => "Systèmes d'Information, Data Analytics et Opérations"
"en" => "Information Systems, Data Analytics and Operations"
]
"indexedAt" => "2024-11-21T07:21:52.000Z"
]
+lang: "en"
+"_type": "_doc"
+"_score": 4.794264
+"parent": null
}
2 => Essec\Faculty\Model\Contribution {#2272
#_index: "academ_contributions"
#_id: "13888"
#_source: array:18 [
"id" => "13888"
"slug" => "generative-ai-friend-or-foe"
"yearMonth" => "2023-03"
"year" => "2023"
"title" => "Generative AI: Friend or Foe?"
"description" => "ALQUIER, P. 2023. <i>Generative AI: Friend or Foe?</i> Mars."
"authors" => array:1 [
0 => array:3 [
"name" => "ALQUIER Pierre"
"bid" => "B00809923"
"slug" => "alquier-pierre"
]
]
"ouvrage" => ""
"keywords" => []
"updatedAt" => "2024-10-31 13:51:19"
"publicationUrl" => "https://www.theedgesingapore.com/digitaledge/focus/generative-ai-friend-or-foe"
"publicationInfo" => array:3 [
"pages" => null
"volume" => null
"number" => null
]
"type" => array:2 [
"fr" => "Interviews : Emission radio - TV - presse écrite"
"en" => "Interviews: radio - TV - press"
]
"support_type" => array:2 [
"fr" => "Presse"
"en" => "Press"
]
"countries" => array:2 [
"fr" => null
"en" => null
]
"abstract" => array:2 [
"fr" => "Comments with Nurdianah Md Nur"
"en" => "Comments with Nurdianah Md Nur"
]
"authors_fields" => array:2 [
"fr" => "Systèmes d'Information, Data Analytics et Opérations"
"en" => "Information Systems, Data Analytics and Operations"
]
"indexedAt" => "2024-11-21T07:21:52.000Z"
]
+lang: "en"
+"_type": "_doc"
+"_score": 4.794264
+"parent": null
}
3 => Essec\Faculty\Model\Contribution {#2269
#_index: "academ_contributions"
#_id: "14355"
#_source: array:18 [
"id" => "14355"
"slug" => "rates-of-convergence-in-bayesian-meta-learning"
"yearMonth" => "2024-01"
"year" => "2024"
"title" => "Rates of Convergence in Bayesian Meta-learning"
"description" => "ALQUIER, P., RIOU, C. et CHÉRIEF-ABDELLATIF, B.E. (2024). Rates of Convergence in Bayesian Meta-learning. Dans: 2024 IMS Asia-Pacific Rim Meeting. Melbourne."
"authors" => array:3 [
0 => array:3 [
"name" => "ALQUIER Pierre"
"bid" => "B00809923"
"slug" => "alquier-pierre"
]
1 => array:1 [
"name" => "RIOU Charles"
]
2 => array:1 [
"name" => "CHÉRIEF-ABDELLATIF Badr-Eddine"
]
]
"ouvrage" => "2024 IMS Asia-Pacific Rim Meeting"
"keywords" => []
"updatedAt" => "2023-09-27 01:00:43"
"publicationUrl" => null
"publicationInfo" => array:3 [
"pages" => ""
"volume" => ""
"number" => ""
]
"type" => array:2 [
"fr" => "Communications dans une conférence"
"en" => "Presentations at an Academic or Professional conference"
]
"support_type" => array:2 [
"fr" => null
"en" => null
]
"countries" => array:2 [
"fr" => null
"en" => null
]
"abstract" => array:2 [
"fr" => ""
"en" => ""
]
"authors_fields" => array:2 [
"fr" => "Systèmes d'Information, Data Analytics et Opérations"
"en" => "Information Systems, Data Analytics and Operations"
]
"indexedAt" => "2024-11-21T07:21:52.000Z"
]
+lang: "en"
+"_type": "_doc"
+"_score": 4.794264
+"parent": null
}
4 => Essec\Faculty\Model\Contribution {#2273
#_index: "academ_contributions"
#_id: "14378"
#_source: array:18 [
"id" => "14378"
"slug" => "rates-of-convergence-in-bayesian-meta-learning"
"yearMonth" => "2023-08"
"year" => "2023"
"title" => "Rates of convergence in Bayesian meta-learning"
"description" => "ALQUIER, P., RIOU, C. et CHÉRIEF-ABDELLATIF, B.E. (2023). Rates of convergence in Bayesian meta-learning. Dans: 6th International Conference on Econometrics and Statistics 2023. Tokyo."
"authors" => array:3 [
0 => array:3 [
"name" => "ALQUIER Pierre"
"bid" => "B00809923"
"slug" => "alquier-pierre"
]
1 => array:1 [
"name" => "RIOU Charles"
]
2 => array:1 [
"name" => "CHÉRIEF-ABDELLATIF Badr-Eddine"
]
]
"ouvrage" => "6th International Conference on Econometrics and Statistics 2023"
"keywords" => []
"updatedAt" => "2023-09-27 01:00:43"
"publicationUrl" => null
"publicationInfo" => array:3 [
"pages" => ""
"volume" => ""
"number" => ""
]
"type" => array:2 [
"fr" => "Communications dans une conférence"
"en" => "Presentations at an Academic or Professional conference"
]
"support_type" => array:2 [
"fr" => null
"en" => null
]
"countries" => array:2 [
"fr" => null
"en" => null
]
"abstract" => array:2 [
"fr" => ""
"en" => ""
]
"authors_fields" => array:2 [
"fr" => "Systèmes d'Information, Data Analytics et Opérations"
"en" => "Information Systems, Data Analytics and Operations"
]
"indexedAt" => "2024-11-21T07:21:52.000Z"
]
+lang: "en"
+"_type": "_doc"
+"_score": 4.794264
+"parent": null
}
5 => Essec\Faculty\Model\Contribution {#2267
#_index: "academ_contributions"
#_id: "14429"
#_source: array:18 [
"id" => "14429"
"slug" => "fast-rates-in-meta-learning-with-pac-bayes-bounds"
"yearMonth" => "2023-04"
"year" => "2023"
"title" => "Fast Rates in Meta-Learning with PAC-Bayes Bounds"
"description" => "ALQUIER, P. et CHÉRIEF-ABDELLATIF, B.E. (2023). Fast Rates in Meta-Learning with PAC-Bayes Bounds. Dans: 12th Workshop on High Dimensional Data Analysis 2023. Rabat."
"authors" => array:2 [
0 => array:3 [
"name" => "ALQUIER Pierre"
"bid" => "B00809923"
"slug" => "alquier-pierre"
]
1 => array:1 [
"name" => "CHÉRIEF-ABDELLATIF Badr-Eddine"
]
]
"ouvrage" => "12th Workshop on High Dimensional Data Analysis 2023"
"keywords" => []
"updatedAt" => "2023-09-27 01:00:43"
"publicationUrl" => null
"publicationInfo" => array:3 [
"pages" => ""
"volume" => ""
"number" => ""
]
"type" => array:2 [
"fr" => "Communications dans une conférence"
"en" => "Presentations at an Academic or Professional conference"
]
"support_type" => array:2 [
"fr" => null
"en" => null
]
"countries" => array:2 [
"fr" => null
"en" => null
]
"abstract" => array:2 [
"fr" => ""
"en" => ""
]
"authors_fields" => array:2 [
"fr" => "Systèmes d'Information, Data Analytics et Opérations"
"en" => "Information Systems, Data Analytics and Operations"
]
"indexedAt" => "2024-11-21T07:21:52.000Z"
]
+lang: "en"
+"_type": "_doc"
+"_score": 4.794264
+"parent": null
}
6 => Essec\Faculty\Model\Contribution {#2271
#_index: "academ_contributions"
#_id: "14433"
#_source: array:18 [
"id" => "14433"
"slug" => "getting-organised-for-ai"
"yearMonth" => "2023-09"
"year" => "2023"
"title" => "Getting organised for AI"
"description" => "ALQUIER, P. 2023. <i>Getting organised for AI</i>. Septembre."
"authors" => array:1 [
0 => array:3 [
"name" => "ALQUIER Pierre"
"bid" => "B00809923"
"slug" => "alquier-pierre"
]
]
"ouvrage" => ""
"keywords" => []
"updatedAt" => "2024-10-31 13:51:19"
"publicationUrl" => "https://www.theedgesingapore.com/digitaledge/cxos/getting-organised-ai"
"publicationInfo" => array:3 [
"pages" => null
"volume" => null
"number" => null
]
"type" => array:2 [
"fr" => "Interviews : Emission radio - TV - presse écrite"
"en" => "Interviews: radio - TV - press"
]
"support_type" => array:2 [
"fr" => "Presse"
"en" => "Press"
]
"countries" => array:2 [
"fr" => null
"en" => null
]
"abstract" => array:2 [
"fr" => null
"en" => null
]
"authors_fields" => array:2 [
"fr" => "Systèmes d'Information, Data Analytics et Opérations"
"en" => "Information Systems, Data Analytics and Operations"
]
"indexedAt" => "2024-11-21T07:21:52.000Z"
]
+lang: "en"
+"_type": "_doc"
+"_score": 4.794264
+"parent": null
}
7 => Essec\Faculty\Model\Contribution {#2274
#_index: "academ_contributions"
#_id: "14602"
#_source: array:18 [
"id" => "14602"
"slug" => "a-i-an-enabler-not-a-solution"
"yearMonth" => "2023-10"
"year" => "2023"
"title" => "A.I. -- An Enabler, Not A Solution"
"description" => "ALQUIER, P. 2023. <i>A.I. -- An Enabler, Not A Solution</i>. Octobre."
"authors" => array:1 [
0 => array:3 [
"name" => "ALQUIER Pierre"
"bid" => "B00809923"
"slug" => "alquier-pierre"
]
]
"ouvrage" => ""
"keywords" => []
"updatedAt" => "2024-10-31 13:51:19"
"publicationUrl" => "https://storm-asia.com/wedwebchat-ai-enabler-not-a-solution/"
"publicationInfo" => array:3 [
"pages" => null
"volume" => null
"number" => null
]
"type" => array:2 [
"fr" => "Interviews : Emission radio - TV - presse écrite"
"en" => "Interviews: radio - TV - press"
]
"support_type" => array:2 [
"fr" => "Presse"
"en" => "Press"
]
"countries" => array:2 [
"fr" => "Singapour"
"en" => "Singapore"
]
"abstract" => array:2 [
"fr" => null
"en" => null
]
"authors_fields" => array:2 [
"fr" => "Systèmes d'Information, Data Analytics et Opérations"
"en" => "Information Systems, Data Analytics and Operations"
]
"indexedAt" => "2024-11-21T07:21:52.000Z"
]
+lang: "en"
+"_type": "_doc"
+"_score": 4.794264
+"parent": null
}
8 => Essec\Faculty\Model\Contribution {#2275
#_index: "academ_contributions"
#_id: "14732"
#_source: array:18 [
"id" => "14732"
"slug" => "concentration-of-variational-approximations"
"yearMonth" => "2023-04"
"year" => "2023"
"title" => "Concentration of variational approximations"
"description" => "ALQUIER, P. (2023). Concentration of variational approximations. Dans: Department of Statistics and Data Science Seminar. Singapore."
"authors" => array:1 [
0 => array:3 [
"name" => "ALQUIER Pierre"
"bid" => "B00809923"
"slug" => "alquier-pierre"
]
]
"ouvrage" => "Department of Statistics and Data Science Seminar"
"keywords" => []
"updatedAt" => "2024-03-07 17:01:07"
"publicationUrl" => null
"publicationInfo" => array:3 [
"pages" => ""
"volume" => ""
"number" => ""
]
"type" => array:2 [
"fr" => "Présentations dans un séminaire de recherche"
"en" => "Presentations at a Faculty research seminar"
]
"support_type" => array:2 [
"fr" => null
"en" => null
]
"countries" => array:2 [
"fr" => null
"en" => null
]
"abstract" => array:2 [
"fr" => ""
"en" => ""
]
"authors_fields" => array:2 [
"fr" => "Systèmes d'Information, Data Analytics et Opérations"
"en" => "Information Systems, Data Analytics and Operations"
]
"indexedAt" => "2024-11-21T07:21:52.000Z"
]
+lang: "en"
+"_type": "_doc"
+"_score": 4.794264
+"parent": null
}
9 => Essec\Faculty\Model\Contribution {#2276
#_index: "academ_contributions"
#_id: "14733"
#_source: array:18 [
"id" => "14733"
"slug" => "robust-estimation-with-mmd"
"yearMonth" => "2023-11"
"year" => "2023"
"title" => "Robust estimation with MMD"
"description" => "ALQUIER, P. (2023). Robust estimation with MMD. Dans: UCD School of Mathematics and Statistics -- Statistics Seminar. Dublin."
"authors" => array:1 [
0 => array:3 [
"name" => "ALQUIER Pierre"
"bid" => "B00809923"
"slug" => "alquier-pierre"
]
]
"ouvrage" => "UCD School of Mathematics and Statistics -- Statistics Seminar"
"keywords" => []
"updatedAt" => "2024-03-07 17:01:51"
"publicationUrl" => null
"publicationInfo" => array:3 [
"pages" => ""
"volume" => ""
"number" => ""
]
"type" => array:2 [
"fr" => "Présentations dans un séminaire de recherche"
"en" => "Presentations at a Faculty research seminar"
]
"support_type" => array:2 [
"fr" => null
"en" => null
]
"countries" => array:2 [
"fr" => null
"en" => null
]
"abstract" => array:2 [
"fr" => ""
"en" => ""
]
"authors_fields" => array:2 [
"fr" => "Systèmes d'Information, Data Analytics et Opérations"
"en" => "Information Systems, Data Analytics and Operations"
]
"indexedAt" => "2024-11-21T07:21:52.000Z"
]
+lang: "en"
+"_type": "_doc"
+"_score": 4.794264
+"parent": null
}
10 => Essec\Faculty\Model\Contribution {#2277
#_index: "academ_contributions"
#_id: "14734"
#_source: array:18 [
"id" => "14734"
"slug" => "robust-estimation-and-regression-with-mmd"
"yearMonth" => "2023-12"
"year" => "2023"
"title" => "Robust estimation and regression with MMD"
"description" => "ALQUIER, P. (2023). Robust estimation and regression with MMD. Dans: Séminaire de Probabilités et Statistiques d'Orsay. Orsay."
"authors" => array:1 [
0 => array:3 [
"name" => "ALQUIER Pierre"
"bid" => "B00809923"
"slug" => "alquier-pierre"
]
]
"ouvrage" => "Séminaire de Probabilités et Statistiques d'Orsay"
"keywords" => []
"updatedAt" => "2024-03-07 17:03:46"
"publicationUrl" => null
"publicationInfo" => array:3 [
"pages" => ""
"volume" => ""
"number" => ""
]
"type" => array:2 [
"fr" => "Présentations dans un séminaire de recherche"
"en" => "Presentations at a Faculty research seminar"
]
"support_type" => array:2 [
"fr" => null
"en" => null
]
"countries" => array:2 [
"fr" => null
"en" => null
]
"abstract" => array:2 [
"fr" => ""
"en" => ""
]
"authors_fields" => array:2 [
"fr" => "Systèmes d'Information, Data Analytics et Opérations"
"en" => "Information Systems, Data Analytics and Operations"
]
"indexedAt" => "2024-11-21T07:21:52.000Z"
]
+lang: "en"
+"_type": "_doc"
+"_score": 4.794264
+"parent": null
}
11 => Essec\Faculty\Model\Contribution {#2278
#_index: "academ_contributions"
#_id: "14735"
#_source: array:18 [
"id" => "14735"
"slug" => "rates-of-convergence-in-bayesian-meta-learning"
"yearMonth" => "2023-12"
"year" => "2023"
"title" => "Rates of convergence in Bayesian meta-learning"
"description" => "ALQUIER, P. (2023). Rates of convergence in Bayesian meta-learning. Dans: UCL Statistical Science Seminars. London."
"authors" => array:1 [
0 => array:3 [
"name" => "ALQUIER Pierre"
"bid" => "B00809923"
"slug" => "alquier-pierre"
]
]
"ouvrage" => "UCL Statistical Science Seminars"
"keywords" => []
"updatedAt" => "2024-03-07 17:03:04"
"publicationUrl" => null
"publicationInfo" => array:3 [
"pages" => ""
"volume" => ""
"number" => ""
]
"type" => array:2 [
"fr" => "Présentations dans un séminaire de recherche"
"en" => "Presentations at a Faculty research seminar"
]
"support_type" => array:2 [
"fr" => null
"en" => null
]
"countries" => array:2 [
"fr" => null
"en" => null
]
"abstract" => array:2 [
"fr" => ""
"en" => ""
]
"authors_fields" => array:2 [
"fr" => "Systèmes d'Information, Data Analytics et Opérations"
"en" => "Information Systems, Data Analytics and Operations"
]
"indexedAt" => "2024-11-21T07:21:52.000Z"
]
+lang: "en"
+"_type": "_doc"
+"_score": 4.794264
+"parent": null
}
12 => Essec\Faculty\Model\Contribution {#2279
#_index: "academ_contributions"
#_id: "14736"
#_source: array:18 [
"id" => "14736"
"slug" => "robust-estimation-and-regression-with-mmd"
"yearMonth" => "2023-12"
"year" => "2023"
"title" => "Robust estimation and regression with MMD"
"description" => "ALQUIER, P. (2023). Robust estimation and regression with MMD. Dans: Séminaire de Statistique du Laboratoire "Probabilités, Statistiques et Modélisation". Paris."
"authors" => array:1 [
0 => array:3 [
"name" => "ALQUIER Pierre"
"bid" => "B00809923"
"slug" => "alquier-pierre"
]
]
"ouvrage" => "Séminaire de Statistique du Laboratoire "Probabilités, Statistiques et Modélisation""
"keywords" => []
"updatedAt" => "2024-03-07 17:02:35"
"publicationUrl" => null
"publicationInfo" => array:3 [
"pages" => ""
"volume" => ""
"number" => ""
]
"type" => array:2 [
"fr" => "Présentations dans un séminaire de recherche"
"en" => "Presentations at a Faculty research seminar"
]
"support_type" => array:2 [
"fr" => null
"en" => null
]
"countries" => array:2 [
"fr" => null
"en" => null
]
"abstract" => array:2 [
"fr" => ""
"en" => ""
]
"authors_fields" => array:2 [
"fr" => "Systèmes d'Information, Data Analytics et Opérations"
"en" => "Information Systems, Data Analytics and Operations"
]
"indexedAt" => "2024-11-21T07:21:52.000Z"
]
+lang: "en"
+"_type": "_doc"
+"_score": 4.794264
+"parent": null
}
13 => Essec\Faculty\Model\Contribution {#2280
#_index: "academ_contributions"
#_id: "14796"
#_source: array:18 [
"id" => "14796"
"slug" => "pac-bayes-bounds-understanding-the-generalization-of-bayesian-learning-algorithms"
"yearMonth" => "2024-06"
"year" => "2024"
"title" => "PAC-Bayes bounds: understanding the generalization of Bayesian learning algorithms"
"description" => "ALQUIER, P. (2024). PAC-Bayes bounds: understanding the generalization of Bayesian learning algorithms. Dans: CNRS - ESSEC APAC Workshop. Singapore."
"authors" => array:1 [
0 => array:3 [
"name" => "ALQUIER Pierre"
"bid" => "B00809923"
"slug" => "alquier-pierre"
]
]
"ouvrage" => "CNRS - ESSEC APAC Workshop"
"keywords" => []
"updatedAt" => "2024-06-11 07:15:22"
"publicationUrl" => null
"publicationInfo" => array:3 [
"pages" => ""
"volume" => ""
"number" => ""
]
"type" => array:2 [
"fr" => "Communications dans une conférence"
"en" => "Presentations at an Academic or Professional conference"
]
"support_type" => array:2 [
"fr" => null
"en" => null
]
"countries" => array:2 [
"fr" => null
"en" => null
]
"abstract" => array:2 [
"fr" => ""
"en" => ""
]
"authors_fields" => array:2 [
"fr" => "Systèmes d'Information, Data Analytics et Opérations"
"en" => "Information Systems, Data Analytics and Operations"
]
"indexedAt" => "2024-11-21T07:21:52.000Z"
]
+lang: "en"
+"_type": "_doc"
+"_score": 4.794264
+"parent": null
}
14 => Essec\Faculty\Model\Contribution {#2281
#_index: "academ_contributions"
#_id: "14797"
#_source: array:18 [
"id" => "14797"
"slug" => "pac-bayesian-bounds-for-offline-contextual-bandits"
"yearMonth" => "2024-05"
"year" => "2024"
"title" => "PAC-Bayesian Bounds for Offline Contextual Bandits"
"description" => "ALQUIER, P. (2024). PAC-Bayesian Bounds for Offline Contextual Bandits. Dans: Mini-Workshop on Learning Theory & Methodology at NTU. Singapore."
"authors" => array:1 [
0 => array:3 [
"name" => "ALQUIER Pierre"
"bid" => "B00809923"
"slug" => "alquier-pierre"
]
]
"ouvrage" => "Mini-Workshop on Learning Theory & Methodology at NTU"
"keywords" => []
"updatedAt" => "2024-05-28 10:00:31"
"publicationUrl" => null
"publicationInfo" => array:3 [
"pages" => ""
"volume" => ""
"number" => ""
]
"type" => array:2 [
"fr" => "Communications dans une conférence"
"en" => "Presentations at an Academic or Professional conference"
]
"support_type" => array:2 [
"fr" => null
"en" => null
]
"countries" => array:2 [
"fr" => null
"en" => null
]
"abstract" => array:2 [
"fr" => ""
"en" => ""
]
"authors_fields" => array:2 [
"fr" => "Systèmes d'Information, Data Analytics et Opérations"
"en" => "Information Systems, Data Analytics and Operations"
]
"indexedAt" => "2024-11-21T07:21:52.000Z"
]
+lang: "en"
+"_type": "_doc"
+"_score": 4.794264
+"parent": null
}
15 => Essec\Faculty\Model\Contribution {#2282
#_index: "academ_contributions"
#_id: "14838"
#_source: array:18 [
"id" => "14838"
"slug" => "learning-with-pac-bayes-bounds"
"yearMonth" => "2024-06"
"year" => "2024"
"title" => "Learning with PAC-Bayes bounds"
"description" => "ALQUIER, P. (2024). Learning with PAC-Bayes bounds. Dans: Third RIKEN AIP & A*STAR-CFAR Joint Workshop on Machine Learning and Artificial Intelligence. Singapore."
"authors" => array:1 [
0 => array:3 [
"name" => "ALQUIER Pierre"
"bid" => "B00809923"
"slug" => "alquier-pierre"
]
]
"ouvrage" => "Third RIKEN AIP & A*STAR-CFAR Joint Workshop on Machine Learning and Artificial Intelligence"
"keywords" => []
"updatedAt" => "2024-06-24 13:46:19"
"publicationUrl" => null
"publicationInfo" => array:3 [
"pages" => ""
"volume" => ""
"number" => ""
]
"type" => array:2 [
"fr" => "Communications dans une conférence"
"en" => "Presentations at an Academic or Professional conference"
]
"support_type" => array:2 [
"fr" => null
"en" => null
]
"countries" => array:2 [
"fr" => null
"en" => null
]
"abstract" => array:2 [
"fr" => ""
"en" => ""
]
"authors_fields" => array:2 [
"fr" => "Systèmes d'Information, Data Analytics et Opérations"
"en" => "Information Systems, Data Analytics and Operations"
]
"indexedAt" => "2024-11-21T07:21:52.000Z"
]
+lang: "en"
+"_type": "_doc"
+"_score": 4.794264
+"parent": null
}
16 => Essec\Faculty\Model\Contribution {#2283
#_index: "academ_contributions"
#_id: "13865"
#_source: array:18 [
"id" => "13865"
"slug" => "estimation-bounds-and-sharp-oracle-inequalities-of-regularized-procedures-with-lipschitz-loss-functions"
"yearMonth" => "2019-08"
"year" => "2019"
"title" => "Estimation bounds and sharp oracle inequalities of regularized procedures with Lipschitz loss functions"
"description" => "ALQUIER, P., COTTET, V. et LECUE, G. (2019). Estimation bounds and sharp oracle inequalities of regularized procedures with Lipschitz loss functions. <i>Annals of Statistics</i>, 47(4), pp. 2117-2144."
"authors" => array:3 [
0 => array:3 [
"name" => "ALQUIER Pierre"
"bid" => "B00809923"
"slug" => "alquier-pierre"
]
1 => array:3 [
"name" => "LECUE Guillaume"
"bid" => "B00806953"
"slug" => "lecue-guillaume"
]
2 => array:1 [
"name" => "COTTET Vincent"
]
]
"ouvrage" => ""
"keywords" => []
"updatedAt" => "2024-10-31 13:51:19"
"publicationUrl" => "https://doi.org/10.1214/18-AOS1742"
"publicationInfo" => array:3 [
"pages" => "2117-2144"
"volume" => "47"
"number" => "4"
]
"type" => array:2 [
"fr" => "Articles"
"en" => "Journal articles"
]
"support_type" => array:2 [
"fr" => "Revue scientifique"
"en" => "Scientific journal"
]
"countries" => array:2 [
"fr" => null
"en" => null
]
"abstract" => array:2 [
"fr" => """
Many classification and regression problems are solved in practice by regularized empirical risk minimizers (RERM). The risk is measured\n
via a loss function. The quadratic loss function is the most popular function for\n
regression. It has been extensively studied (cf. [23, 31] among others). Still many other loss functions are popular among practitioners and are indeed extremely useful in specific situations
"""
"en" => """
Many classification and regression problems are solved in practice by regularized empirical risk minimizers (RERM). The risk is measured\n
via a loss function. The quadratic loss function is the most popular function for\n
regression. It has been extensively studied (cf. [23, 31] among others). Still many other loss functions are popular among practitioners and are indeed extremely useful in specific situations
"""
]
"authors_fields" => array:2 [
"fr" => "Systèmes d'Information, Data Analytics et Opérations"
"en" => "Information Systems, Data Analytics and Operations"
]
"indexedAt" => "2024-11-21T07:21:52.000Z"
]
+lang: "en"
+"_type": "_doc"
+"_score": 4.794264
+"parent": null
}
17 => Essec\Faculty\Model\Contribution {#2284
#_index: "academ_contributions"
#_id: "13866"
#_source: array:18 [
"id" => "13866"
"slug" => "consistency-of-variational-bayes-inference-for-estimation-and-model-selection-in-mixtures"
"yearMonth" => "2018-09"
"year" => "2018"
"title" => "Consistency of variational Bayes inference for estimation and model selection in mixtures"
"description" => "CHERIEF-ABDELLATIF, B.E. et ALQUIER, P. (2018). Consistency of variational Bayes inference for estimation and model selection in mixtures. <i>The Electronic Journal of Statistics</i>, 12(2), pp. 2995-3035."
"authors" => array:2 [
0 => array:2 [
"name" => "CHERIEF-ABDELLATIF Badr-Eddine"
"bid" => "B00810114"
]
1 => array:3 [
"name" => "ALQUIER Pierre"
"bid" => "B00809923"
"slug" => "alquier-pierre"
]
]
"ouvrage" => ""
"keywords" => array:4 [
0 => "Mixture models"
1 => "frequentist evaluation of Bayesian methods"
2 => "variational approximations"
3 => "model selection"
]
"updatedAt" => "2024-10-31 13:51:19"
"publicationUrl" => "https://doi.org/10.1214/18-EJS1475"
"publicationInfo" => array:3 [
"pages" => "2995-3035"
"volume" => "12"
"number" => "2"
]
"type" => array:2 [
"fr" => "Articles"
"en" => "Journal articles"
]
"support_type" => array:2 [
"fr" => "Revue scientifique"
"en" => "Scientific journal"
]
"countries" => array:2 [
"fr" => null
"en" => null
]
"abstract" => array:2 [
"fr" => "Mixture models are widely used in Bayesian statistics and machine learning, in particular in computational biology, natural language processing and many other fields. Variational inference, a technique for approximating intractable posteriors thanks to optimization algorithms, is extremely popular in practice when dealing with complex models such as mixtures. The contribution of this paper is two-fold. First, we study the concentration of variational approximations of posteriors, which is still an open problem for general mixtures, and we derive consistency and rates of convergence. We also tackle the problem of model selection for the number of components: we study the approach already used in practice, which consists in maximizing a numerical criterion (the Evidence Lower Bound). We prove that this strategy indeed leads to strong oracle inequalities. We illustrate our theoretical results by applications to Gaussian and multinomial mixtures."
"en" => "Mixture models are widely used in Bayesian statistics and machine learning, in particular in computational biology, natural language processing and many other fields. Variational inference, a technique for approximating intractable posteriors thanks to optimization algorithms, is extremely popular in practice when dealing with complex models such as mixtures. The contribution of this paper is two-fold. First, we study the concentration of variational approximations of posteriors, which is still an open problem for general mixtures, and we derive consistency and rates of convergence. We also tackle the problem of model selection for the number of components: we study the approach already used in practice, which consists in maximizing a numerical criterion (the Evidence Lower Bound). We prove that this strategy indeed leads to strong oracle inequalities. We illustrate our theoretical results by applications to Gaussian and multinomial mixtures."
]
"authors_fields" => array:2 [
"fr" => "Systèmes d'Information, Data Analytics et Opérations"
"en" => "Information Systems, Data Analytics and Operations"
]
"indexedAt" => "2024-11-21T07:21:52.000Z"
]
+lang: "en"
+"_type": "_doc"
+"_score": 4.794264
+"parent": null
}
18 => Essec\Faculty\Model\Contribution {#2285
#_index: "academ_contributions"
#_id: "13864"
#_source: array:18 [
"id" => "13864"
"slug" => "concentration-of-tempered-posteriors-and-of-their-variational-approximations"
"yearMonth" => "2020-06"
"year" => "2020"
"title" => "Concentration of tempered posteriors and of their variational approximations"
"description" => "ALQUIER, P. et RIDGWAY, J. (2020). Concentration of tempered posteriors and of their variational approximations. <i>Annals of Statistics</i>, 48(3), pp. 1475-1497."
"authors" => array:2 [
0 => array:3 [
"name" => "ALQUIER Pierre"
"bid" => "B00809923"
"slug" => "alquier-pierre"
]
1 => array:1 [
"name" => "RIDGWAY James"
]
]
"ouvrage" => ""
"keywords" => []
"updatedAt" => "2023-03-17 01:00:45"
"publicationUrl" => "https://doi.org/10.1214/19-AOS1855"
"publicationInfo" => array:3 [
"pages" => "1475-1497"
"volume" => "48"
"number" => "3"
]
"type" => array:2 [
"fr" => "Articles"
"en" => "Journal articles"
]
"support_type" => array:2 [
"fr" => "Revue scientifique"
"en" => "Scientific journal"
]
"countries" => array:2 [
"fr" => null
"en" => null
]
"abstract" => array:2 [
"fr" => """
While Bayesian methods are extremely popular in statistics and machine\n
learning, their application to massive data sets is often challenging, when\n
possible at all. The classical MCMC algorithms are prohibitively slow when\n
both the model dimension and the sample size are large. Variational Bayesian\n
methods aim at approximating the posterior by a distribution in a tractable\n
family F. Thus, MCMC are replaced by an optimization algorithm which is\n
orders of magnitude faster. VB methods have been applied in such computationally demanding applications as collaborative filtering, image and video\n
processing or NLP to name a few. However, despite nice results in practice,\n
the theoretical properties of these approximations are not known. We propose\n
a general oracle inequality that relates the quality of the VB approximation to\n
the prior π and to the structure of F. We provide a simple condition that allows to derive rates of convergence from this oracle inequality. We apply our\n
theory to various examples. First, we show that for parametric models with\n
log-Lipschitz likelihood, Gaussian VB leads to efficient algorithms and consistent estimators. We then study a high-dimensional example: matrix completion, and a nonparametric example: density estimation.
"""
"en" => """
While Bayesian methods are extremely popular in statistics and machine\n
learning, their application to massive data sets is often challenging, when\n
possible at all. The classical MCMC algorithms are prohibitively slow when\n
both the model dimension and the sample size are large. Variational Bayesian\n
methods aim at approximating the posterior by a distribution in a tractable\n
family F. Thus, MCMC are replaced by an optimization algorithm which is\n
orders of magnitude faster. VB methods have been applied in such computationally demanding applications as collaborative filtering, image and video\n
processing or NLP to name a few. However, despite nice results in practice,\n
the theoretical properties of these approximations are not known. We propose\n
a general oracle inequality that relates the quality of the VB approximation to\n
the prior π and to the structure of F. We provide a simple condition that allows to derive rates of convergence from this oracle inequality. We apply our\n
theory to various examples. First, we show that for parametric models with\n
log-Lipschitz likelihood, Gaussian VB leads to efficient algorithms and consistent estimators. We then study a high-dimensional example: matrix completion, and a nonparametric example: density estimation.
"""
]
"authors_fields" => array:2 [
"fr" => "Systèmes d'Information, Data Analytics et Opérations"
"en" => "Information Systems, Data Analytics and Operations"
]
"indexedAt" => "2024-11-21T07:21:52.000Z"
]
+lang: "en"
+"_type": "_doc"
+"_score": 4.794264
+"parent": null
}
19 => Essec\Faculty\Model\Contribution {#2286
#_index: "academ_contributions"
#_id: "13867"
#_source: array:18 [
"id" => "13867"
"slug" => "simpler-pac-bayesian-bounds-for-hostile-data"
"yearMonth" => "2018-05"
"year" => "2018"
"title" => "Simpler PAC-Bayesian bounds for hostile data"
"description" => "ALQUIER, P. et GUEDJ, B. (2018). Simpler PAC-Bayesian bounds for hostile data. <i>Machine Learning</i>, 107(5), pp. 887-902."
"authors" => array:2 [
0 => array:3 [
"name" => "ALQUIER Pierre"
"bid" => "B00809923"
"slug" => "alquier-pierre"
]
1 => array:1 [
"name" => "GUEDJ Benjamin"
]
]
"ouvrage" => ""
"keywords" => []
"updatedAt" => "2024-10-31 13:51:19"
"publicationUrl" => "https://link.springer.com/article/10.1007/s10994-017-5690-0"
"publicationInfo" => array:3 [
"pages" => "887-902"
"volume" => "107"
"number" => "5"
]
"type" => array:2 [
"fr" => "Articles"
"en" => "Journal articles"
]
"support_type" => array:2 [
"fr" => "Revue scientifique"
"en" => "Scientific journal"
]
"countries" => array:2 [
"fr" => "États-Unis"
"en" => "United States of America"
]
"abstract" => array:2 [
"fr" => """
PAC-Bayesian learning bounds are of the utmost interest to the learning community. Their role is to connect the generalization ability of an aggregation distribution ρ\n
to its empirical risk and to its Kullback-Leibler divergence with respect to some prior distribution π\n
. Unfortunately, most of the available bounds typically rely on heavy assumptions such as boundedness and independence of the observations. This paper aims at relaxing these constraints and provides PAC-Bayesian learning bounds that hold for dependent, heavy-tailed observations (hereafter referred to as hostile data). In these bounds the Kullack-Leibler divergence is replaced with a general version of Csiszár’s f-divergence. We prove a general PAC-Bayesian bound, and show how to use it in various hostile settings.
"""
"en" => """
PAC-Bayesian learning bounds are of the utmost interest to the learning community. Their role is to connect the generalization ability of an aggregation distribution ρ\n
to its empirical risk and to its Kullback-Leibler divergence with respect to some prior distribution π\n
. Unfortunately, most of the available bounds typically rely on heavy assumptions such as boundedness and independence of the observations. This paper aims at relaxing these constraints and provides PAC-Bayesian learning bounds that hold for dependent, heavy-tailed observations (hereafter referred to as hostile data). In these bounds the Kullack-Leibler divergence is replaced with a general version of Csiszár’s f-divergence. We prove a general PAC-Bayesian bound, and show how to use it in various hostile settings.
"""
]
"authors_fields" => array:2 [
"fr" => "Systèmes d'Information, Data Analytics et Opérations"
"en" => "Information Systems, Data Analytics and Operations"
]
"indexedAt" => "2024-11-21T07:21:52.000Z"
]
+lang: "en"
+"_type": "_doc"
+"_score": 4.794264
+"parent": null
}
20 => Essec\Faculty\Model\Contribution {#2287
#_index: "academ_contributions"
#_id: "13868"
#_source: array:18 [
"id" => "13868"
"slug" => "deviation-inequalities-for-stochastic-approximation-by-averaging"
"yearMonth" => "2022-10"
"year" => "2022"
"title" => "Deviation inequalities for stochastic approximation by averaging"
"description" => "FAN, X., ALQUIER, P. et DOUKHAN, P. (2022). Deviation inequalities for stochastic approximation by averaging. <i>Stochastic Processes and their Applications</i>, 152, pp. 452-485."
"authors" => array:3 [
0 => array:3 [
"name" => "ALQUIER Pierre"
"bid" => "B00809923"
"slug" => "alquier-pierre"
]
1 => array:1 [
"name" => "FAN Xiequan"
]
2 => array:1 [
"name" => "DOUKHAN Paul"
]
]
"ouvrage" => ""
"keywords" => array:5 [
0 => "Deviation inequalities"
1 => "Martingales"
2 => "Iterated random functions"
3 => "Stochastic approximation by averaging"
4 => "Empirical risk minimization"
]
"updatedAt" => "2023-07-10 16:36:07"
"publicationUrl" => "https://doi.org/10.1016/j.spa.2022.07.002"
"publicationInfo" => array:3 [
"pages" => "452-485"
"volume" => "152"
"number" => ""
]
"type" => array:2 [
"fr" => "Articles"
"en" => "Journal articles"
]
"support_type" => array:2 [
"fr" => "Revue scientifique"
"en" => "Scientific journal"
]
"countries" => array:2 [
"fr" => null
"en" => null
]
"abstract" => array:2 [
"fr" => "We introduce a class of Markov chains that includes models of stochastic approximation by averaging and non-averaging. Using a martingale approximation method, we establish various deviation inequalities for separately Lipschitz functions of such a chain, with different moment conditions on some dominating random variables of martingale differences. Finally, we apply these inequalities to stochastic approximation by averaging and empirical risk minimization."
"en" => "We introduce a class of Markov chains that includes models of stochastic approximation by averaging and non-averaging. Using a martingale approximation method, we establish various deviation inequalities for separately Lipschitz functions of such a chain, with different moment conditions on some dominating random variables of martingale differences. Finally, we apply these inequalities to stochastic approximation by averaging and empirical risk minimization."
]
"authors_fields" => array:2 [
"fr" => "Systèmes d'Information, Data Analytics et Opérations"
"en" => "Information Systems, Data Analytics and Operations"
]
"indexedAt" => "2024-11-21T07:21:52.000Z"
]
+lang: "en"
+"_type": "_doc"
+"_score": 4.794264
+"parent": null
}
21 => Essec\Faculty\Model\Contribution {#2288
#_index: "academ_contributions"
#_id: "13869"
#_source: array:18 [
"id" => "13869"
"slug" => "understanding-the-population-structure-correction-regression"
"yearMonth" => "2022-07"
"year" => "2022"
"title" => "Understanding the Population Structure Correction Regression"
"description" => "MAI, T.T. et ALQUIER, P. (2022). Understanding the Population Structure Correction Regression. Dans: <i>4th International Conference on Statistics: Theory and Applications (ICSTA'22)</i>. Prague: Avestia Publishing."
"authors" => array:2 [
0 => array:3 [
"name" => "ALQUIER Pierre"
"bid" => "B00809923"
"slug" => "alquier-pierre"
]
1 => array:1 [
"name" => "MAI The Tien"
]
]
"ouvrage" => "4th International Conference on Statistics: Theory and Applications (ICSTA'22)"
"keywords" => array:5 [
0 => "GWAS"
1 => "population structure correction"
2 => "linear regression"
3 => "bias"
4 => "variance"
]
"updatedAt" => "2023-03-21 09:20:51"
"publicationUrl" => "https://doi.org/10.11159/icsta22.114"
"publicationInfo" => array:3 [
"pages" => ""
"volume" => ""
"number" => ""
]
"type" => array:2 [
"fr" => "Actes d'une conférence"
"en" => "Conference Proceedings"
]
"support_type" => array:2 [
"fr" => "Editeur"
"en" => "Publisher"
]
"countries" => array:2 [
"fr" => null
"en" => null
]
"abstract" => array:2 [
"fr" => "Although genome-wide association studies (GWAS) on complex traits have achieved great successes, the current leading GWAS approaches simply perform to test each genotype-phenotype association separately for each genetic variant. Curiously, the statistical property for using these approaches is not known when a joint model for the whole genetic variants is considered. Here we advance in GWAS in understanding the statistical properties of the “population structure correction” (PSC) approach, a standard univariate approach in GWAS. We further propose and analyse a correction to the PSC approach, termed as “corrected population correction” (CPC). Together with the theoretical results, numerical simulations show that CPC is always comparable or better than PSC, with a dramatic improvement in some special cases. -"
"en" => "Although genome-wide association studies (GWAS) on complex traits have achieved great successes, the current leading GWAS approaches simply perform to test each genotype-phenotype association separately for each genetic variant. Curiously, the statistical property for using these approaches is not known when a joint model for the whole genetic variants is considered. Here we advance in GWAS in understanding the statistical properties of the “population structure correction” (PSC) approach, a standard univariate approach in GWAS. We further propose and analyse a correction to the PSC approach, termed as “corrected population correction” (CPC). Together with the theoretical results, numerical simulations show that CPC is always comparable or better than PSC, with a dramatic improvement in some special cases. -"
]
"authors_fields" => array:2 [
"fr" => "Systèmes d'Information, Data Analytics et Opérations"
"en" => "Information Systems, Data Analytics and Operations"
]
"indexedAt" => "2024-11-21T07:21:52.000Z"
]
+lang: "en"
+"_type": "_doc"
+"_score": 4.794264
+"parent": null
}
22 => Essec\Faculty\Model\Contribution {#2289
#_index: "academ_contributions"
#_id: "13870"
#_source: array:18 [
"id" => "13870"
"slug" => "tight-risk-bound-for-high-dimensional-time-series-completion"
"yearMonth" => "2022-03"
"year" => "2022"
"title" => "Tight risk bound for high dimensional time series completion"
"description" => "ALQUIER, P., MARIE, N. et ROSIER, A. (2022). Tight risk bound for high dimensional time series completion. <i>The Electronic Journal of Statistics</i>, 16(1), pp. 3001-3035."
"authors" => array:3 [
0 => array:3 [
"name" => "ALQUIER Pierre"
"bid" => "B00809923"
"slug" => "alquier-pierre"
]
1 => array:1 [
"name" => "MARIE Nicolas"
]
2 => array:1 [
"name" => "ROSIER Amélie"
]
]
"ouvrage" => ""
"keywords" => array:6 [
0 => "Matrix completion"
1 => "multivariate time series analysis"
2 => "matrix factorization"
3 => "high-dimensional time series"
4 => "concentration inequalities"
5 => "mixing"
]
"updatedAt" => "2024-10-31 13:51:19"
"publicationUrl" => "https://doi.org/10.1214/22-EJS2015"
"publicationInfo" => array:3 [
"pages" => "3001-3035"
"volume" => "16"
"number" => "1"
]
"type" => array:2 [
"fr" => "Articles"
"en" => "Journal articles"
]
"support_type" => array:2 [
"fr" => "Revue scientifique"
"en" => "Scientific journal"
]
"countries" => array:2 [
"fr" => null
"en" => null
]
"abstract" => array:2 [
"fr" => "Initially designed for independent datas, low-rank matrix completion was successfully applied in many domains to the reconstruction of partially observed high-dimensional time series. However, there is a lack of theory to support the application of these methods to dependent datas. In this paper, we propose a general model for multivariate, partially observed time series. We show that the least-square method with a rank penalty leads to reconstruction error of the same order as for independent datas. Moreover, when the time series has some additional properties such as periodicity or smoothness, the rate can actually be faster than in the independent case."
"en" => "Initially designed for independent datas, low-rank matrix completion was successfully applied in many domains to the reconstruction of partially observed high-dimensional time series. However, there is a lack of theory to support the application of these methods to dependent datas. In this paper, we propose a general model for multivariate, partially observed time series. We show that the least-square method with a rank penalty leads to reconstruction error of the same order as for independent datas. Moreover, when the time series has some additional properties such as periodicity or smoothness, the rate can actually be faster than in the independent case."
]
"authors_fields" => array:2 [
"fr" => "Systèmes d'Information, Data Analytics et Opérations"
"en" => "Information Systems, Data Analytics and Operations"
]
"indexedAt" => "2024-11-21T07:21:52.000Z"
]
+lang: "en"
+"_type": "_doc"
+"_score": 4.794264
+"parent": null
}
23 => Essec\Faculty\Model\Contribution {#2290
#_index: "academ_contributions"
#_id: "13871"
#_source: array:18 [
"id" => "13871"
"slug" => "finite-sample-properties-of-parametric-mmd-estimation-robustness-to-misspecification-and-dependence"
"yearMonth" => "2022-03"
"year" => "2022"
"title" => "Finite sample properties of parametric MMD estimation: Robustness to misspecification and dependence"
"description" => "CHERIEF-ABDELLATIF, B.E. et ALQUIER, P. (2022). Finite sample properties of parametric MMD estimation: Robustness to misspecification and dependence. <i>Bernoulli: A Journal of Mathematical Statistics and Probability</i>, 28(1), pp. 181-213."
"authors" => array:2 [
0 => array:2 [
"name" => "CHERIEF-ABDELLATIF Badr-Eddine"
"bid" => "B00810114"
]
1 => array:3 [
"name" => "ALQUIER Pierre"
"bid" => "B00809923"
"slug" => "alquier-pierre"
]
]
"ouvrage" => ""
"keywords" => []
"updatedAt" => "2023-07-10 16:42:20"
"publicationUrl" => "https://doi.org/10.3150/21-BEJ1338"
"publicationInfo" => array:3 [
"pages" => "181-213"
"volume" => "28"
"number" => "1"
]
"type" => array:2 [
"fr" => "Articles"
"en" => "Journal articles"
]
"support_type" => array:2 [
"fr" => "Revue scientifique"
"en" => "Scientific journal"
]
"countries" => array:2 [
"fr" => null
"en" => null
]
"abstract" => array:2 [
"fr" => "Many works in statistics aim at designing a universal estimation procedure, that is, an estimator that would converge to the best approximation of the (unknown) data generating distribution in a model, without any assumption on this distribution. This question is of major interest, in particular because the universality property leads to the robustness of the estimator. In this paper, we tackle the problem of universal estimation using a minimum distance estimator presented in (Briol et al. (2019)) based on the Maximum Mean Discrepancy. We show that the estimator is robust to both dependence and to the presence of outliers in the dataset. Finally, we provide a theoretical study of the stochastic gradient descent algorithm used to compute the estimator, and we support our findings with numerical simulations."
"en" => "Many works in statistics aim at designing a universal estimation procedure, that is, an estimator that would converge to the best approximation of the (unknown) data generating distribution in a model, without any assumption on this distribution. This question is of major interest, in particular because the universality property leads to the robustness of the estimator. In this paper, we tackle the problem of universal estimation using a minimum distance estimator presented in (Briol et al. (2019)) based on the Maximum Mean Discrepancy. We show that the estimator is robust to both dependence and to the presence of outliers in the dataset. Finally, we provide a theoretical study of the stochastic gradient descent algorithm used to compute the estimator, and we support our findings with numerical simulations."
]
"authors_fields" => array:2 [
"fr" => "Systèmes d'Information, Data Analytics et Opérations"
"en" => "Information Systems, Data Analytics and Operations"
]
"indexedAt" => "2024-11-21T07:21:52.000Z"
]
+lang: "en"
+"_type": "_doc"
+"_score": 4.794264
+"parent": null
}
24 => Essec\Faculty\Model\Contribution {#2291
#_index: "academ_contributions"
#_id: "13879"
#_source: array:18 [
"id" => "13879"
"slug" => "meta-strategy-for-learning-tuning-parameters-with-guarantees"
"yearMonth" => "2021-09"
"year" => "2021"
"title" => "Meta-Strategy for Learning Tuning Parameters with Guarantees"
"description" => "MEUNIER, D. et ALQUIER, P. (2021). Meta-Strategy for Learning Tuning Parameters with Guarantees. <i>Entropy</i>, 23(10)."
"authors" => array:2 [
0 => array:3 [
"name" => "ALQUIER Pierre"
"bid" => "B00809923"
"slug" => "alquier-pierre"
]
1 => array:1 [
"name" => "MEUNIER Dimitri"
]
]
"ouvrage" => ""
"keywords" => array:7 [
0 => "meta-learning"
1 => "hyperparameters"
2 => "priors"
3 => "online learning"
4 => "Bayesian inference"
5 => "online optimization"
6 => "gradient descent"
]
"updatedAt" => "2024-10-31 13:51:19"
"publicationUrl" => "https://doi.org/10.3390/e23101257"
"publicationInfo" => array:3 [
"pages" => null
"volume" => "23"
"number" => "10"
]
"type" => array:2 [
"fr" => "Articles"
"en" => "Journal articles"
]
"support_type" => array:2 [
"fr" => "Revue scientifique"
"en" => "Scientific journal"
]
"countries" => array:2 [
"fr" => "Suisse"
"en" => "Switzerland"
]
"abstract" => array:2 [
"fr" => "Online learning methods, similar to the online gradient algorithm (OGA) and exponentially weighted aggregation (EWA), often depend on tuning parameters that are difficult to set in practice. We consider an online meta-learning scenario, and we propose a meta-strategy to learn these parameters from past tasks. Our strategy is based on the minimization of a regret bound. It allows us to learn the initialization and the step size in OGA with guarantees. It also allows us to learn the prior or the learning rate in EWA. We provide a regret analysis of the strategy. It allows to identify settings where meta-learning indeed improves on learning each task in isolation."
"en" => "Online learning methods, similar to the online gradient algorithm (OGA) and exponentially weighted aggregation (EWA), often depend on tuning parameters that are difficult to set in practice. We consider an online meta-learning scenario, and we propose a meta-strategy to learn these parameters from past tasks. Our strategy is based on the minimization of a regret bound. It allows us to learn the initialization and the step size in OGA with guarantees. It also allows us to learn the prior or the learning rate in EWA. We provide a regret analysis of the strategy. It allows to identify settings where meta-learning indeed improves on learning each task in isolation."
]
"authors_fields" => array:2 [
"fr" => "Systèmes d'Information, Data Analytics et Opérations"
"en" => "Information Systems, Data Analytics and Operations"
]
"indexedAt" => "2024-11-21T07:21:52.000Z"
]
+lang: "en"
+"_type": "_doc"
+"_score": 4.794264
+"parent": null
}
25 => Essec\Faculty\Model\Contribution {#2292
#_index: "academ_contributions"
#_id: "13880"
#_source: array:18 [
"id" => "13880"
"slug" => "non-exponentially-weighted-aggregation-regret-bounds-for-unbounded-loss-functions"
"yearMonth" => "2021-07"
"year" => "2021"
"title" => "Non-exponentially Weighted Aggregation: Regret Bounds for Unbounded Loss Functions"
"description" => "ALQUIER, P. (2021). Non-exponentially Weighted Aggregation: Regret Bounds for Unbounded Loss Functions. Dans: <i>38th International Conference on Machine Learning (ICML'21)</i>. Proceedings of Machine Learning Research."
"authors" => array:1 [
0 => array:3 [
"name" => "ALQUIER Pierre"
"bid" => "B00809923"
"slug" => "alquier-pierre"
]
]
"ouvrage" => "38th International Conference on Machine Learning (ICML'21)"
"keywords" => []
"updatedAt" => "2024-10-31 13:51:19"
"publicationUrl" => "http://proceedings.mlr.press/v139/alquier21a.html"
"publicationInfo" => array:3 [
"pages" => null
"volume" => null
"number" => null
]
"type" => array:2 [
"fr" => "Actes d'une conférence"
"en" => "Conference Proceedings"
]
"support_type" => array:2 [
"fr" => "Editeur"
"en" => "Publisher"
]
"countries" => array:2 [
"fr" => "Royaume-Uni"
"en" => "United Kingdom"
]
"abstract" => array:2 [
"fr" => "We tackle the problem of online optimization with a general, possibly unbounded, loss function. It is well known that when the loss is bounded, the exponentially weighted aggregation strategy (EWA) leads to a regret in √T after T steps. In this paper, we study a generalized aggregation strategy, where the weights no longer depend exponentially on the losses. Our strategy is based on Follow The Regularized Leader (FTRL): we minimize the expected losses plus a regularizer, that is here a ϕ-divergence. When the regularizer is the Kullback-Leibler divergence, we obtain EWA as a special case. Using alternative divergences enables unbounded losses, at the cost of a worst regret bound in some cases."
"en" => "We tackle the problem of online optimization with a general, possibly unbounded, loss function. It is well known that when the loss is bounded, the exponentially weighted aggregation strategy (EWA) leads to a regret in √T after T steps. In this paper, we study a generalized aggregation strategy, where the weights no longer depend exponentially on the losses. Our strategy is based on Follow The Regularized Leader (FTRL): we minimize the expected losses plus a regularizer, that is here a ϕ-divergence. When the regularizer is the Kullback-Leibler divergence, we obtain EWA as a special case. Using alternative divergences enables unbounded losses, at the cost of a worst regret bound in some cases."
]
"authors_fields" => array:2 [
"fr" => "Systèmes d'Information, Data Analytics et Opérations"
"en" => "Information Systems, Data Analytics and Operations"
]
"indexedAt" => "2024-11-21T07:21:52.000Z"
]
+lang: "en"
+"_type": "_doc"
+"_score": 4.794264
+"parent": null
}
26 => Essec\Faculty\Model\Contribution {#2293
#_index: "academ_contributions"
#_id: "13881"
#_source: array:18 [
"id" => "13881"
"slug" => "a-theoretical-analysis-of-catastrophic-forgetting-through-the-ntk-overlap-matrix"
"yearMonth" => "2021-04"
"year" => "2021"
"title" => "A Theoretical Analysis of Catastrophic Forgetting through the NTK Overlap Matrix"
"description" => "DOAN, T., ABBANA BENNANI, M., MAZOURE, B., RABUSSEAU, G. et ALQUIER, P. (2021). A Theoretical Analysis of Catastrophic Forgetting through the NTK Overlap Matrix. Dans: <i>24th International Conference on Artificial Intelligence and Statistics (AIStat'21)</i>. Proceedings of Machine Learning Research."
"authors" => array:5 [
0 => array:3 [
"name" => "ALQUIER Pierre"
"bid" => "B00809923"
"slug" => "alquier-pierre"
]
1 => array:1 [
"name" => "DOAN Tang"
]
2 => array:1 [
"name" => "ABBANA BENNANI Mehdi"
]
3 => array:1 [
"name" => "MAZOURE Bogdan"
]
4 => array:1 [
"name" => "RABUSSEAU Guillaume"
]
]
"ouvrage" => "24th International Conference on Artificial Intelligence and Statistics (AIStat'21)"
"keywords" => []
"updatedAt" => "2024-10-31 13:51:19"
"publicationUrl" => "http://proceedings.mlr.press/v130/doan21a.html"
"publicationInfo" => array:3 [
"pages" => null
"volume" => null
"number" => null
]
"type" => array:2 [
"fr" => "Actes d'une conférence"
"en" => "Conference Proceedings"
]
"support_type" => array:2 [
"fr" => "Editeur"
"en" => "Publisher"
]
"countries" => array:2 [
"fr" => "Royaume-Uni"
"en" => "United Kingdom"
]
"abstract" => array:2 [
"fr" => "Continual learning (CL) is a setting in which an agent has to learn from an incoming stream of data during its entire lifetime. Although major advances have been made in the field, one recurring problem which remains unsolved is that of Catastrophic Forgetting (CF). While the issue has been extensively studied empirically, little attention has been paid from a theoretical angle. In this paper, we show that the impact of CF increases as two tasks increasingly align. We introduce a measure of task similarity called the NTK overlap matrix which is at the core of CF. We analyze common projected gradient algorithms and demonstrate how they mitigate forgetting. Then, we propose a variant of Orthogonal Gradient Descent (OGD) which leverages structure of the data through Principal Component Analysis (PCA). Experiments support our theoretical findings and show how our method can help reduce CF on classical CL datasets."
"en" => "Continual learning (CL) is a setting in which an agent has to learn from an incoming stream of data during its entire lifetime. Although major advances have been made in the field, one recurring problem which remains unsolved is that of Catastrophic Forgetting (CF). While the issue has been extensively studied empirically, little attention has been paid from a theoretical angle. In this paper, we show that the impact of CF increases as two tasks increasingly align. We introduce a measure of task similarity called the NTK overlap matrix which is at the core of CF. We analyze common projected gradient algorithms and demonstrate how they mitigate forgetting. Then, we propose a variant of Orthogonal Gradient Descent (OGD) which leverages structure of the data through Principal Component Analysis (PCA). Experiments support our theoretical findings and show how our method can help reduce CF on classical CL datasets."
]
"authors_fields" => array:2 [
"fr" => "Systèmes d'Information, Data Analytics et Opérations"
"en" => "Information Systems, Data Analytics and Operations"
]
"indexedAt" => "2024-11-21T07:21:52.000Z"
]
+lang: "en"
+"_type": "_doc"
+"_score": 4.794264
+"parent": null
}
27 => Essec\Faculty\Model\Contribution {#2294
#_index: "academ_contributions"
#_id: "13882"
#_source: array:18 [
"id" => "13882"
"slug" => "simultaneous-dimension-reduction-and-clustering-via-the-nmf-em-algorithm"
"yearMonth" => "2021-03"
"year" => "2021"
"title" => "Simultaneous dimension reduction and clustering via the NMF-EM algorithm"
"description" => "CAREL, L. et ALQUIER, P. (2021). Simultaneous dimension reduction and clustering via the NMF-EM algorithm. <i>Advances in Data Analysis and Classification</i>, 15(1), pp. 231-260."
"authors" => array:2 [
0 => array:3 [
"name" => "ALQUIER Pierre"
"bid" => "B00809923"
"slug" => "alquier-pierre"
]
1 => array:1 [
"name" => "CAREL Léna"
]
]
"ouvrage" => ""
"keywords" => []
"updatedAt" => "2023-03-22 09:28:55"
"publicationUrl" => "https://link.springer.com/article/10.1007/s11634-020-00398-4"
"publicationInfo" => array:3 [
"pages" => "231-260"
"volume" => "15"
"number" => "1"
]
"type" => array:2 [
"fr" => "Articles"
"en" => "Journal articles"
]
"support_type" => array:2 [
"fr" => "Revue scientifique"
"en" => "Scientific journal"
]
"countries" => array:2 [
"fr" => null
"en" => null
]
"abstract" => array:2 [
"fr" => "Mixture models are among the most popular tools for clustering. However, when the dimension and the number of clusters is large, the estimation of the clusters become challenging, as well as their interpretation. Restriction on the parameters can be used to reduce the dimension. An example is given by mixture of factor analyzers for Gaussian mixtures. The extension of MFA to non-Gaussian mixtures is not straightforward. We propose a new constraint for parameters in non-Gaussian mixture model: the K components parameters are combinations of elements from a small dictionary, say H elements, with H≪K. Including a nonnegative matrix factorization (NMF) in the EM algorithm allows us to simultaneously estimate the dictionary and the parameters of the mixture. We propose the acronym NMF-EM for this algorithm, implemented in the R package nmfem. This original approach is motivated by passengers clustering from ticketing data: we apply NMF-EM to data from two Transdev public transport networks. In this case, the words are easily interpreted as typical slots in a timetable."
"en" => "Mixture models are among the most popular tools for clustering. However, when the dimension and the number of clusters is large, the estimation of the clusters become challenging, as well as their interpretation. Restriction on the parameters can be used to reduce the dimension. An example is given by mixture of factor analyzers for Gaussian mixtures. The extension of MFA to non-Gaussian mixtures is not straightforward. We propose a new constraint for parameters in non-Gaussian mixture model: the K components parameters are combinations of elements from a small dictionary, say H elements, with H≪K. Including a nonnegative matrix factorization (NMF) in the EM algorithm allows us to simultaneously estimate the dictionary and the parameters of the mixture. We propose the acronym NMF-EM for this algorithm, implemented in the R package nmfem. This original approach is motivated by passengers clustering from ticketing data: we apply NMF-EM to data from two Transdev public transport networks. In this case, the words are easily interpreted as typical slots in a timetable."
]
"authors_fields" => array:2 [
"fr" => "Systèmes d'Information, Data Analytics et Opérations"
"en" => "Information Systems, Data Analytics and Operations"
]
"indexedAt" => "2024-11-21T07:21:52.000Z"
]
+lang: "en"
+"_type": "_doc"
+"_score": 4.794264
+"parent": null
}
28 => Essec\Faculty\Model\Contribution {#2295
#_index: "academ_contributions"
#_id: "13884"
#_source: array:18 [
"id" => "13884"
"slug" => "approximate-bayesian-inference"
"yearMonth" => "2022-05"
"year" => "2022"
"title" => "Approximate Bayesian Inference"
"description" => "ALQUIER, P. [Ed] (2022). <i>Approximate Bayesian Inference</i>. MDPI."
"authors" => array:1 [
0 => array:3 [
"name" => "ALQUIER Pierre"
"bid" => "B00809923"
"slug" => "alquier-pierre"
]
]
"ouvrage" => ""
"keywords" => array:11 [
0 => "Bayesian statistics"
1 => "machine learning"
2 => "variational approximations"
3 => "PAC-Bayes"
4 => "expectation-propagation"
5 => "Markov chain Monte Carlo"
6 => "Langevin Monte Carlo"
7 => "sequential Monte Carlo"
8 => "Laplace approximations"
9 => "approximate Bayesian computation"
10 => "Gibbs posterior"
]
"updatedAt" => "2024-10-31 13:51:19"
"publicationUrl" => "https://www.mdpi.com/books/book/5544"
"publicationInfo" => array:3 [
"pages" => null
"volume" => null
"number" => null
]
"type" => array:2 [
"fr" => "Direction d'ouvrage"
"en" => "Book editor"
]
"support_type" => array:2 [
"fr" => "Editeur"
"en" => "Publisher"
]
"countries" => array:2 [
"fr" => null
"en" => null
]
"abstract" => array:2 [
"fr" => """
This book is a reprint of the Special Issue Approximate Bayesian Inference that was published in the open access journal Entropy (ISSN 1099-4300) (available at: https://www.mdpi.com/journal/entropy/special_issues/approx_Bayes_inference).\n
The objective of this Special Issue is to provide the latest advances in approximate Monte Carlo methods and in approximations of the posterior: design of efficient algorithms, study of the statistical properties of these algorithms, and challenging applications.
"""
"en" => """
This book is a reprint of the Special Issue Approximate Bayesian Inference that was published in the open access journal Entropy (ISSN 1099-4300) (available at: https://www.mdpi.com/journal/entropy/special_issues/approx_Bayes_inference).\n
The objective of this Special Issue is to provide the latest advances in approximate Monte Carlo methods and in approximations of the posterior: design of efficient algorithms, study of the statistical properties of these algorithms, and challenging applications.
"""
]
"authors_fields" => array:2 [
"fr" => "Systèmes d'Information, Data Analytics et Opérations"
"en" => "Information Systems, Data Analytics and Operations"
]
"indexedAt" => "2024-11-21T07:21:52.000Z"
]
+lang: "en"
+"_type": "_doc"
+"_score": 4.794264
+"parent": null
}
29 => Essec\Faculty\Model\Contribution {#2296
#_index: "academ_contributions"
#_id: "13885"
#_source: array:18 [
"id" => "13885"
"slug" => "approximate-bayesian-inference"
"yearMonth" => "2020-11"
"year" => "2020"
"title" => "Approximate Bayesian Inference"
"description" => "ALQUIER, P. (2020). Approximate Bayesian Inference. <i>Entropy</i>, 22(11), pp. 1272."
"authors" => array:1 [
0 => array:3 [
"name" => "ALQUIER Pierre"
"bid" => "B00809923"
"slug" => "alquier-pierre"
]
]
"ouvrage" => ""
"keywords" => array:11 [
0 => "Bayesian statistics"
1 => "machine learning"
2 => "variational approximations"
3 => "PAC-Bayes"
4 => "expectation-propagation"
5 => "Markov chain Monte Carlo"
6 => "Langevin Monte Carlo"
7 => "sequential Monte Carlo"
8 => "Laplace approximations"
9 => "approximate Bayesian computation"
10 => "Gibbs posterior"
]
"updatedAt" => "2024-10-31 13:51:19"
"publicationUrl" => "https://www.mdpi.com/1099-4300/22/11/1272"
"publicationInfo" => array:3 [
"pages" => "1272"
"volume" => "22"
"number" => "11"
]
"type" => array:2 [
"fr" => "Préfaces / Introductions de revue"
"en" => "Prefaces of a journal"
]
"support_type" => array:2 [
"fr" => "Revue scientifique"
"en" => "Scientific journal"
]
"countries" => array:2 [
"fr" => "Suisse"
"en" => "Switzerland"
]
"abstract" => array:2 [
"fr" => "This is the Editorial article summarizing the scope of the Special Issue: Approximate Bayesian Inference."
"en" => "This is the Editorial article summarizing the scope of the Special Issue: Approximate Bayesian Inference."
]
"authors_fields" => array:2 [
"fr" => "Systèmes d'Information, Data Analytics et Opérations"
"en" => "Information Systems, Data Analytics and Operations"
]
"indexedAt" => "2024-11-21T07:21:52.000Z"
]
+lang: "en"
+"_type": "_doc"
+"_score": 4.794264
+"parent": null
}
30 => Essec\Faculty\Model\Contribution {#2297
#_index: "academ_contributions"
#_id: "13887"
#_source: array:18 [
"id" => "13887"
"slug" => "high-dimensional-var-with-low-rank-transition"
"yearMonth" => "2020-07"
"year" => "2020"
"title" => "High-dimensional VAR with low-rank transition"
"description" => "ALQUIER, P., BERTIN, K., DOUKHAN, P. et GARNIER, R. (2020). High-dimensional VAR with low-rank transition. <i>Statistics and Computing</i>, 30(4), pp. 1139-1153."
"authors" => array:4 [
0 => array:3 [
"name" => "ALQUIER Pierre"
"bid" => "B00809923"
"slug" => "alquier-pierre"
]
1 => array:1 [
"name" => "BERTIN Karine"
]
2 => array:1 [
"name" => "DOUKHAN Paul"
]
3 => array:1 [
"name" => "GARNIER Rémy"
]
]
"ouvrage" => ""
"keywords" => []
"updatedAt" => "2023-03-22 09:36:35"
"publicationUrl" => "https://link.springer.com/article/10.1007/s11222-020-09929-7"
"publicationInfo" => array:3 [
"pages" => "1139-1153"
"volume" => "30"
"number" => "4"
]
"type" => array:2 [
"fr" => "Articles"
"en" => "Journal articles"
]
"support_type" => array:2 [
"fr" => "Revue scientifique"
"en" => "Scientific journal"
]
"countries" => array:2 [
"fr" => null
"en" => null
]
"abstract" => array:2 [
"fr" => "We propose a vector auto-regressive model with a low-rank constraint on the transition matrix. This model is well suited to predict high-dimensional series that are highly correlated, or that are driven by a small number of hidden factors. While our model has formal similarities with factor models, its structure is more a way to reduce the dimension in order to improve the predictions, rather than a way to define interpretable factors. We provide an estimator for the transition matrix in a very general setting and study its performances in terms of prediction and adaptation to the unknown rank. Our method obtains good result on simulated data, in particular when the rank of the underlying process is small. On macroeconomic data from Giannone et al. (Rev Econ Stat 97(2):436–451, 2015), our method is competitive with state-of-the-art methods in small dimension and even improves on them in high dimension."
"en" => "We propose a vector auto-regressive model with a low-rank constraint on the transition matrix. This model is well suited to predict high-dimensional series that are highly correlated, or that are driven by a small number of hidden factors. While our model has formal similarities with factor models, its structure is more a way to reduce the dimension in order to improve the predictions, rather than a way to define interpretable factors. We provide an estimator for the transition matrix in a very general setting and study its performances in terms of prediction and adaptation to the unknown rank. Our method obtains good result on simulated data, in particular when the rank of the underlying process is small. On macroeconomic data from Giannone et al. (Rev Econ Stat 97(2):436–451, 2015), our method is competitive with state-of-the-art methods in small dimension and even improves on them in high dimension."
]
"authors_fields" => array:2 [
"fr" => "Systèmes d'Information, Data Analytics et Opérations"
"en" => "Information Systems, Data Analytics and Operations"
]
"indexedAt" => "2024-11-21T07:21:52.000Z"
]
+lang: "en"
+"_type": "_doc"
+"_score": 4.794264
+"parent": null
}
31 => Essec\Faculty\Model\Contribution {#2298
#_index: "academ_contributions"
#_id: "13889"
#_source: array:18 [
"id" => "13889"
"slug" => "mmd-bayes-robust-bayesian-estimation-via-maximum-mean-discrepancy"
"yearMonth" => "2020-01"
"year" => "2020"
"title" => "MMD-Bayes: Robust Bayesian Estimation via Maximum Mean Discrepancy"
"description" => "CHERIEF-ABDELLATIF, B.E. et ALQUIER, P. (2020). MMD-Bayes: Robust Bayesian Estimation via Maximum Mean Discrepancy. Dans: <i>2nd Symposium on Advances in Approximate Bayesian Inference (AABI'19)</i>. Proceedings of Machine Learning Research."
"authors" => array:2 [
0 => array:2 [
"name" => "CHERIEF-ABDELLATIF Badr-Eddine"
"bid" => "B00810114"
]
1 => array:3 [
"name" => "ALQUIER Pierre"
"bid" => "B00809923"
"slug" => "alquier-pierre"
]
]
"ouvrage" => "2nd Symposium on Advances in Approximate Bayesian Inference (AABI'19)"
"keywords" => []
"updatedAt" => "2024-10-31 13:51:19"
"publicationUrl" => "http://proceedings.mlr.press/v118/cherief-abdellatif20a.html"
"publicationInfo" => array:3 [
"pages" => null
"volume" => null
"number" => null
]
"type" => array:2 [
"fr" => "Actes d'une conférence"
"en" => "Conference Proceedings"
]
"support_type" => array:2 [
"fr" => "Editeur"
"en" => "Publisher"
]
"countries" => array:2 [
"fr" => "Royaume-Uni"
"en" => "United Kingdom"
]
"abstract" => array:2 [
"fr" => "In some misspecified settings, the posterior distribution in Bayesian statistics may lead to inconsistent estimates. To fix this issue, it has been suggested to replace the likelihood by a pseudo-likelihood, that is the exponential of a loss function enjoying suitable robustness properties. In this paper, we build a pseudo-likelihood based on the Maximum Mean Discrepancy, dened via an embedding of probability distributions into a reproducing kernel Hilbert space. We show that this MMD-Bayes posterior is consistent and robust to model misspecication. As the posterior obtained in this way might be intractable, we also prove that reasonable variational approximations of this posterior enjoy the same properties. We provide details on a stochastic gradient algorithm to compute these variational approximations. Numerical simulations indeed suggest that our estimator is more robust to misspecication than the ones based on the likelihood."
"en" => "In some misspecified settings, the posterior distribution in Bayesian statistics may lead to inconsistent estimates. To fix this issue, it has been suggested to replace the likelihood by a pseudo-likelihood, that is the exponential of a loss function enjoying suitable robustness properties. In this paper, we build a pseudo-likelihood based on the Maximum Mean Discrepancy, dened via an embedding of probability distributions into a reproducing kernel Hilbert space. We show that this MMD-Bayes posterior is consistent and robust to model misspecication. As the posterior obtained in this way might be intractable, we also prove that reasonable variational approximations of this posterior enjoy the same properties. We provide details on a stochastic gradient algorithm to compute these variational approximations. Numerical simulations indeed suggest that our estimator is more robust to misspecication than the ones based on the likelihood."
]
"authors_fields" => array:2 [
"fr" => "Systèmes d'Information, Data Analytics et Opérations"
"en" => "Information Systems, Data Analytics and Operations"
]
"indexedAt" => "2024-11-21T07:21:52.000Z"
]
+lang: "en"
+"_type": "_doc"
+"_score": 4.794264
+"parent": null
}
32 => Essec\Faculty\Model\Contribution {#2299
#_index: "academ_contributions"
#_id: "13890"
#_source: array:18 [
"id" => "13890"
"slug" => "matrix-factorization-for-multivariate-time-series-analysis"
"yearMonth" => "2019-11"
"year" => "2019"
"title" => "Matrix factorization for multivariate time series analysis"
"description" => "ALQUIER, P. et MARIE, N. (2019). Matrix factorization for multivariate time series analysis. <i>The Electronic Journal of Statistics</i>, 13(2), pp. 4346-4366."
"authors" => array:2 [
0 => array:3 [
"name" => "ALQUIER Pierre"
"bid" => "B00809923"
"slug" => "alquier-pierre"
]
1 => array:1 [
"name" => "MARIE Nicolas"
]
]
"ouvrage" => ""
"keywords" => array:4 [
0 => "Multivariate Time Series Analysis"
1 => "matrix Factorization"
2 => "random Matrices"
3 => "non-parametric Regression"
]
"updatedAt" => "2024-10-31 13:51:19"
"publicationUrl" => "https://projecteuclid.org/journals/electronic-journal-of-statistics/volume-13/issue-2/Matrix-factorization-for-multivariate-time-series-analysis/10.1214/19-EJS1630.full"
"publicationInfo" => array:3 [
"pages" => "4346-4366"
"volume" => "13"
"number" => "2"
]
"type" => array:2 [
"fr" => "Articles"
"en" => "Journal articles"
]
"support_type" => array:2 [
"fr" => "Revue scientifique"
"en" => "Scientific journal"
]
"countries" => array:2 [
"fr" => null
"en" => null
]
"abstract" => array:2 [
"fr" => "Matrix factorization is a powerful data analysis tool. It has been used in multivariate time series analysis, leading to the decomposition of the series in a small set of latent factors. However, little is known on the statistical performances of matrix factorization for time series. In this paper, we extend the results known for matrix estimation in the i.i.d setting to time series. Moreover, we prove that when the series exhibit some additional structure like periodicity or smoothness, it is possible to improve on the classical rates of convergence."
"en" => "Matrix factorization is a powerful data analysis tool. It has been used in multivariate time series analysis, leading to the decomposition of the series in a small set of latent factors. However, little is known on the statistical performances of matrix factorization for time series. In this paper, we extend the results known for matrix estimation in the i.i.d setting to time series. Moreover, we prove that when the series exhibit some additional structure like periodicity or smoothness, it is possible to improve on the classical rates of convergence."
]
"authors_fields" => array:2 [
"fr" => "Systèmes d'Information, Data Analytics et Opérations"
"en" => "Information Systems, Data Analytics and Operations"
]
"indexedAt" => "2024-11-21T07:21:52.000Z"
]
+lang: "en"
+"_type": "_doc"
+"_score": 4.794264
+"parent": null
}
33 => Essec\Faculty\Model\Contribution {#2300
#_index: "academ_contributions"
#_id: "13891"
#_source: array:18 [
"id" => "13891"
"slug" => "a-generalization-bound-for-online-variational-inference"
"yearMonth" => "2019-11"
"year" => "2019"
"title" => "A Generalization Bound for Online Variational Inference"
"description" => "CHERIEF-ABDELLATIF, B.E., ALQUIER, P. et KHAN, M.E. (2019). A Generalization Bound for Online Variational Inference. Dans: <i>11th Asian Conference on Machine Learning (ACML'19)</i>. Proceedings of Machine Learning Research."
"authors" => array:3 [
0 => array:2 [
"name" => "CHERIEF-ABDELLATIF Badr-Eddine"
"bid" => "B00810114"
]
1 => array:3 [
"name" => "ALQUIER Pierre"
"bid" => "B00809923"
"slug" => "alquier-pierre"
]
2 => array:1 [
"name" => "KHAN Mohammad Emtiyaz"
]
]
"ouvrage" => "11th Asian Conference on Machine Learning (ACML'19)"
"keywords" => []
"updatedAt" => "2024-10-31 13:51:19"
"publicationUrl" => "http://proceedings.mlr.press/v101/cherief-abdellatif19a.html"
"publicationInfo" => array:3 [
"pages" => null
"volume" => null
"number" => null
]
"type" => array:2 [
"fr" => "Actes d'une conférence"
"en" => "Conference Proceedings"
]
"support_type" => array:2 [
"fr" => "Editeur"
"en" => "Publisher"
]
"countries" => array:2 [
"fr" => "Royaume-Uni"
"en" => "United Kingdom"
]
"abstract" => array:2 [
"fr" => "Bayesian inference provides an attractive online-learning framework to analyze sequential data, and offers generalization guarantees which hold even with model mismatch and adversaries. Unfortunately, exact Bayesian inference is rarely feasible in practice and approximation methods are usually employed, but do such methods preserve the generalization properties of Bayesian inference? In this paper, we show that this is indeed the case for some variational inference (VI) algorithms. We consider a few existing online, tempered VI algorithms, as well as a new algorithm, and derive their generalization bounds. Our theoretical result relies on the convexity of the variational objective, but we argue that the result should hold more generally and present empirical evidence in support of this. Our work in this paper presents theoretical justifications in favor of online algorithms relying on approximate Bayesian methods."
"en" => "Bayesian inference provides an attractive online-learning framework to analyze sequential data, and offers generalization guarantees which hold even with model mismatch and adversaries. Unfortunately, exact Bayesian inference is rarely feasible in practice and approximation methods are usually employed, but do such methods preserve the generalization properties of Bayesian inference? In this paper, we show that this is indeed the case for some variational inference (VI) algorithms. We consider a few existing online, tempered VI algorithms, as well as a new algorithm, and derive their generalization bounds. Our theoretical result relies on the convexity of the variational objective, but we argue that the result should hold more generally and present empirical evidence in support of this. Our work in this paper presents theoretical justifications in favor of online algorithms relying on approximate Bayesian methods."
]
"authors_fields" => array:2 [
"fr" => "Systèmes d'Information, Data Analytics et Opérations"
"en" => "Information Systems, Data Analytics and Operations"
]
"indexedAt" => "2024-11-21T07:21:52.000Z"
]
+lang: "en"
+"_type": "_doc"
+"_score": 4.794264
+"parent": null
}
34 => Essec\Faculty\Model\Contribution {#2301
#_index: "academ_contributions"
#_id: "13892"
#_source: array:18 [
"id" => "13892"
"slug" => "exponential-inequalities-for-nonstationary-markov-chains"
"yearMonth" => "2019-06"
"year" => "2019"
"title" => "Exponential inequalities for nonstationary Markov chains"
"description" => "ALQUIER, P., DOUKHAN, P. et FAN, X. (2019). Exponential inequalities for nonstationary Markov chains. <i>Dependence Modeling</i>, 7(1), pp. 150-168."
"authors" => array:3 [
0 => array:3 [
"name" => "ALQUIER Pierre"
"bid" => "B00809923"
"slug" => "alquier-pierre"
]
1 => array:1 [
"name" => "DOUKHAN Paul"
]
2 => array:1 [
"name" => "FAN Xiequan"
]
]
"ouvrage" => ""
"keywords" => array:7 [
0 => "Nonstationary Markov chains"
1 => "Martingales"
2 => "Exponential inequalities"
3 => "Time series forecasting"
4 => "Sta-tistical learning theory"
5 => "Oracle inequalities"
6 => "Model selection"
]
"updatedAt" => "2024-10-31 13:51:19"
"publicationUrl" => "https://www.degruyter.com/document/doi/10.1515/demo-2019-0007/html?lang=en"
"publicationInfo" => array:3 [
"pages" => "150-168"
"volume" => "7"
"number" => "1"
]
"type" => array:2 [
"fr" => "Articles"
"en" => "Journal articles"
]
"support_type" => array:2 [
"fr" => "Revue scientifique"
"en" => "Scientific journal"
]
"countries" => array:2 [
"fr" => null
"en" => null
]
"abstract" => array:2 [
"fr" => "Exponential inequalities are main tools in machine learning theory. To prove exponential inequalities for non i.i.d random variables allows to extend many learning techniques to these variables. Indeed, much work has been done both on inequalities and learning theory for time series, in the past 15 years. How-ever, for the non-independent case, almost all the results concern stationary time series. This excludes many important applications: for example any series with a periodic behaviour is nonstationary. In this paper, we extend the basic tools of [19] to nonstationary Markov chains. As an application, we provide a Bernstein-type inequality, and we deduce risk bounds for the prediction of periodic autoregressive processes with an unknown period"
"en" => "Exponential inequalities are main tools in machine learning theory. To prove exponential inequalities for non i.i.d random variables allows to extend many learning techniques to these variables. Indeed, much work has been done both on inequalities and learning theory for time series, in the past 15 years. How-ever, for the non-independent case, almost all the results concern stationary time series. This excludes many important applications: for example any series with a periodic behaviour is nonstationary. In this paper, we extend the basic tools of [19] to nonstationary Markov chains. As an application, we provide a Bernstein-type inequality, and we deduce risk bounds for the prediction of periodic autoregressive processes with an unknown period"
]
"authors_fields" => array:2 [
"fr" => "Systèmes d'Information, Data Analytics et Opérations"
"en" => "Information Systems, Data Analytics and Operations"
]
"indexedAt" => "2024-11-21T07:21:52.000Z"
]
+lang: "en"
+"_type": "_doc"
+"_score": 4.794264
+"parent": null
}
35 => Essec\Faculty\Model\Contribution {#2302
#_index: "academ_contributions"
#_id: "13893"
#_source: array:18 [
"id" => "13893"
"slug" => "informed-sub-sampling-mcmc-approximate-bayesian-inference-for-large-datasets"
"yearMonth" => "2019-05"
"year" => "2019"
"title" => "Informed sub-sampling MCMC: approximate Bayesian inference for large datasets"
"description" => "MAIRE, F., FRIEL, N. et ALQUIER, P. (2019). Informed sub-sampling MCMC: approximate Bayesian inference for large datasets. <i>Statistics and Computing</i>, 29(3), pp. 449-482."
"authors" => array:3 [
0 => array:3 [
"name" => "ALQUIER Pierre"
"bid" => "B00809923"
"slug" => "alquier-pierre"
]
1 => array:1 [
"name" => "MAIRE Florian"
]
2 => array:1 [
"name" => "FRIEL Nial"
]
]
"ouvrage" => ""
"keywords" => []
"updatedAt" => "2023-03-22 09:45:27"
"publicationUrl" => "https://link.springer.com/article/10.1007/s11222-018-9817-3"
"publicationInfo" => array:3 [
"pages" => "449-482"
"volume" => "29"
"number" => "3"
]
"type" => array:2 [
"fr" => "Articles"
"en" => "Journal articles"
]
"support_type" => array:2 [
"fr" => "Revue scientifique"
"en" => "Scientific journal"
]
"countries" => array:2 [
"fr" => null
"en" => null
]
"abstract" => array:2 [
"fr" => "This paper introduces a framework for speeding up Bayesian inference conducted in presence of large datasets. We design a Markov chain whose transition kernel uses an unknown fraction of fixed size of the available data that is randomly refreshed throughout the algorithm. Inspired by the Approximate Bayesian Computation literature, the subsampling process is guided by the fidelity to the observed data, as measured by summary statistics. The resulting algorithm, Informed Sub-Sampling MCMC, is a generic and flexible approach which, contrary to existing scalable methodologies, preserves the simplicity of the Metropolis–Hastings algorithm. Even though exactness is lost, i.e. the chain distribution approximates the posterior, we study and quantify theoretically this bias and show on a diverse set of examples that it yields excellent performances when the computational budget is limited. If available and cheap to compute, we show that setting the summary statistics as the maximum likelihood estimator is supported by theoretical arguments."
"en" => "This paper introduces a framework for speeding up Bayesian inference conducted in presence of large datasets. We design a Markov chain whose transition kernel uses an unknown fraction of fixed size of the available data that is randomly refreshed throughout the algorithm. Inspired by the Approximate Bayesian Computation literature, the subsampling process is guided by the fidelity to the observed data, as measured by summary statistics. The resulting algorithm, Informed Sub-Sampling MCMC, is a generic and flexible approach which, contrary to existing scalable methodologies, preserves the simplicity of the Metropolis–Hastings algorithm. Even though exactness is lost, i.e. the chain distribution approximates the posterior, we study and quantify theoretically this bias and show on a diverse set of examples that it yields excellent performances when the computational budget is limited. If available and cheap to compute, we show that setting the summary statistics as the maximum likelihood estimator is supported by theoretical arguments."
]
"authors_fields" => array:2 [
"fr" => "Systèmes d'Information, Data Analytics et Opérations"
"en" => "Information Systems, Data Analytics and Operations"
]
"indexedAt" => "2024-11-21T07:21:52.000Z"
]
+lang: "en"
+"_type": "_doc"
+"_score": 4.794264
+"parent": null
}
36 => Essec\Faculty\Model\Contribution {#2303
#_index: "academ_contributions"
#_id: "13894"
#_source: array:18 [
"id" => "13894"
"slug" => "1-bit-matrix-completion-pac-bayesian-analysis-of-a-variational-approximation"
"yearMonth" => "2018-03"
"year" => "2018"
"title" => "1-Bit matrix completion: PAC-Bayesian analysis of a variational approximation"
"description" => "COTTET, V. et ALQUIER, P. (2018). 1-Bit matrix completion: PAC-Bayesian analysis of a variational approximation. <i>Machine Learning</i>, 107(3), pp. 579-603."
"authors" => array:2 [
0 => array:3 [
"name" => "ALQUIER Pierre"
"bid" => "B00809923"
"slug" => "alquier-pierre"
]
1 => array:1 [
"name" => "COTTET Vincent"
]
]
"ouvrage" => ""
"keywords" => array:1 [
0 => "Bayesian analysis"
]
"updatedAt" => "2023-07-10 17:29:05"
"publicationUrl" => "https://link.springer.com/article/10.1007/s10994-017-5667-z"
"publicationInfo" => array:3 [
"pages" => "579-603"
"volume" => "107"
"number" => "3"
]
"type" => array:2 [
"fr" => "Articles"
"en" => "Journal articles"
]
"support_type" => array:2 [
"fr" => "Revue scientifique"
"en" => "Scientific journal"
]
"countries" => array:2 [
"fr" => "États-Unis"
"en" => "United States of America"
]
"abstract" => array:2 [
"fr" => "We focus on the completion of a (possibly) low-rank matrix with binary entries, the so-called 1-bit matrix completion problem. Our approach relies on tools from machine learning theory: empirical risk minimization and its convex relaxations. We propose an algorithm to compute a variational approximation of the pseudo-posterior. Thanks to the convex relaxation, the corresponding minimization problem is bi-convex, and thus the method works well in practice. We study the performance of this variational approximation through PAC-Bayesian learning bounds. Contrary to previous works that focused on upper bounds on the estimation error of M with various matrix norms, we are able to derive from this analysis a PAC bound on the prediction error of our algorithm. We focus essentially on convex relaxation through the hinge loss, for which we present a complete analysis, a complete simulation study and a test on the MovieLens data set. We also discuss a variational approximation to deal with the logistic loss."
"en" => "We focus on the completion of a (possibly) low-rank matrix with binary entries, the so-called 1-bit matrix completion problem. Our approach relies on tools from machine learning theory: empirical risk minimization and its convex relaxations. We propose an algorithm to compute a variational approximation of the pseudo-posterior. Thanks to the convex relaxation, the corresponding minimization problem is bi-convex, and thus the method works well in practice. We study the performance of this variational approximation through PAC-Bayesian learning bounds. Contrary to previous works that focused on upper bounds on the estimation error of M with various matrix norms, we are able to derive from this analysis a PAC bound on the prediction error of our algorithm. We focus essentially on convex relaxation through the hinge loss, for which we present a complete analysis, a complete simulation study and a test on the MovieLens data set. We also discuss a variational approximation to deal with the logistic loss."
]
"authors_fields" => array:2 [
"fr" => "Systèmes d'Information, Data Analytics et Opérations"
"en" => "Information Systems, Data Analytics and Operations"
]
"indexedAt" => "2024-11-21T07:21:52.000Z"
]
+lang: "en"
+"_type": "_doc"
+"_score": 4.794264
+"parent": null
}
37 => Essec\Faculty\Model\Contribution {#2304
#_index: "academ_contributions"
#_id: "13895"
#_source: array:18 [
"id" => "13895"
"slug" => "pseudo-bayesian-quantum-tomography-with-rank-adaptation"
"yearMonth" => "2017-05"
"year" => "2017"
"title" => "Pseudo-Bayesian quantum tomography with rank-adaptation"
"description" => "MAI, T.T. et ALQUIER, P. (2017). Pseudo-Bayesian quantum tomography with rank-adaptation. <i>Journal of Statistical Planning and Inference</i>, 184, pp. 62-76."
"authors" => array:2 [
0 => array:3 [
"name" => "ALQUIER Pierre"
"bid" => "B00809923"
"slug" => "alquier-pierre"
]
1 => array:1 [
"name" => "MAI The Tien"
]
]
"ouvrage" => ""
"keywords" => array:5 [
0 => "Quantum statistics"
1 => "Bayesian statistics"
2 => "PAC-Bayesian bounds"
3 => "Oracle inequalities"
4 => "MCMC"
]
"updatedAt" => "2023-03-22 09:51:22"
"publicationUrl" => "https://doi.org/10.1016/j.jspi.2016.11.003"
"publicationInfo" => array:3 [
"pages" => "62-76"
"volume" => "184"
"number" => ""
]
"type" => array:2 [
"fr" => "Articles"
"en" => "Journal articles"
]
"support_type" => array:2 [
"fr" => "Revue scientifique"
"en" => "Scientific journal"
]
"countries" => array:2 [
"fr" => null
"en" => null
]
"abstract" => array:2 [
"fr" => "Quantum state tomography, an important task in quantum information processing, aims at reconstructing a state from prepared measurement data. Bayesian methods are recognized to be one of the good and reliable choices in estimating quantum states (Blume-Kohout, 2010). Several numerical works showed that Bayesian estimations are comparable to, and even better than other methods in the problem of 1-qubit state recovery. However, the problem of choosing prior distribution in the general case of n qubits is not straightforward. More importantly, the statistical performance of Bayesian type estimators has not been studied from a theoretical perspective yet. In this paper, we propose a novel prior for quantum states (density matrices), and we define pseudo-Bayesian estimators of the density matrix. Then, using PAC-Bayesian theorems (Catoni, 2007), we derive rates of convergence for the posterior mean. The numerical performance of these estimators is tested on simulated and real datasets."
"en" => "Quantum state tomography, an important task in quantum information processing, aims at reconstructing a state from prepared measurement data. Bayesian methods are recognized to be one of the good and reliable choices in estimating quantum states (Blume-Kohout, 2010). Several numerical works showed that Bayesian estimations are comparable to, and even better than other methods in the problem of 1-qubit state recovery. However, the problem of choosing prior distribution in the general case of n qubits is not straightforward. More importantly, the statistical performance of Bayesian type estimators has not been studied from a theoretical perspective yet. In this paper, we propose a novel prior for quantum states (density matrices), and we define pseudo-Bayesian estimators of the density matrix. Then, using PAC-Bayesian theorems (Catoni, 2007), we derive rates of convergence for the posterior mean. The numerical performance of these estimators is tested on simulated and real datasets."
]
"authors_fields" => array:2 [
"fr" => "Systèmes d'Information, Data Analytics et Opérations"
"en" => "Information Systems, Data Analytics and Operations"
]
"indexedAt" => "2024-11-21T07:21:52.000Z"
]
+lang: "en"
+"_type": "_doc"
+"_score": 4.794264
+"parent": null
}
38 => Essec\Faculty\Model\Contribution {#2305
#_index: "academ_contributions"
#_id: "13896"
#_source: array:18 [
"id" => "13896"
"slug" => "regret-bounds-for-lifelong-learning"
"yearMonth" => "2017-04"
"year" => "2017"
"title" => "Regret Bounds for Lifelong Learning"
"description" => "ALQUIER, P., MAI, T.T. et PONTIL, M. (2017). Regret Bounds for Lifelong Learning. Dans: <i>20th International Conference on Artificial Intelligence and Statistics (AIStat'17)</i>. Proceedings of Machine Learning Research."
"authors" => array:3 [
0 => array:3 [
"name" => "ALQUIER Pierre"
"bid" => "B00809923"
"slug" => "alquier-pierre"
]
1 => array:1 [
"name" => "MAI The Tien"
]
2 => array:1 [
"name" => "PONTIL Massimiliano"
]
]
"ouvrage" => "20th International Conference on Artificial Intelligence and Statistics (AIStat'17)"
"keywords" => []
"updatedAt" => "2024-10-31 13:51:19"
"publicationUrl" => "http://proceedings.mlr.press/v54/alquier17a.html"
"publicationInfo" => array:3 [
"pages" => null
"volume" => null
"number" => null
]
"type" => array:2 [
"fr" => "Actes d'une conférence"
"en" => "Conference Proceedings"
]
"support_type" => array:2 [
"fr" => "Editeur"
"en" => "Publisher"
]
"countries" => array:2 [
"fr" => "Royaume-Uni"
"en" => "United Kingdom"
]
"abstract" => array:2 [
"fr" => "We consider the problem of transfer learning in an online setting. Different tasks are presented sequentially and processed by a within-task algorithm. We propose a lifelong learning strategy which refines the underlying data representation used by the within-task algorithm, thereby transferring information from one task to the next. We show that when the within-task algorithm comes with some regret bound, our strategy inherits this good property. Our bounds are in expectation for a general loss function, and uniform for a convex loss. We discuss applications to dictionary learning and finite set of predictors."
"en" => "We consider the problem of transfer learning in an online setting. Different tasks are presented sequentially and processed by a within-task algorithm. We propose a lifelong learning strategy which refines the underlying data representation used by the within-task algorithm, thereby transferring information from one task to the next. We show that when the within-task algorithm comes with some regret bound, our strategy inherits this good property. Our bounds are in expectation for a general loss function, and uniform for a convex loss. We discuss applications to dictionary learning and finite set of predictors."
]
"authors_fields" => array:2 [
"fr" => "Systèmes d'Information, Data Analytics et Opérations"
"en" => "Information Systems, Data Analytics and Operations"
]
"indexedAt" => "2024-11-21T07:21:52.000Z"
]
+lang: "en"
+"_type": "_doc"
+"_score": 4.794264
+"parent": null
}
39 => Essec\Faculty\Model\Contribution {#2306
#_index: "academ_contributions"
#_id: "13897"
#_source: array:18 [
"id" => "13897"
"slug" => "non-negative-matrix-factorization-as-a-pre-processing-tool-for-travelers-temporal-profiles-clustering"
"yearMonth" => "2017-04"
"year" => "2017"
"title" => "Non-negative Matrix Factorization as a Pre-processing tool for Travelers Temporal Profiles Clustering"
"description" => "CAREL, L. et ALQUIER, P. (2017). Non-negative Matrix Factorization as a Pre-processing tool for Travelers Temporal Profiles Clustering. Dans: <i>25th European Symposium on Artificial Neural Networks, Computational Intelligence and Machine Learning (ESANN'17)</i>. i6doc.com."
"authors" => array:2 [
0 => array:3 [
"name" => "ALQUIER Pierre"
"bid" => "B00809923"
"slug" => "alquier-pierre"
]
1 => array:1 [
"name" => "CAREL Léna"
]
]
"ouvrage" => "25th European Symposium on Artificial Neural Networks, Computational Intelligence and Machine Learning (ESANN'17)"
"keywords" => []
"updatedAt" => "2024-10-31 13:51:19"
"publicationUrl" => null
"publicationInfo" => array:3 [
"pages" => null
"volume" => null
"number" => null
]
"type" => array:2 [
"fr" => "Actes d'une conférence"
"en" => "Conference Proceedings"
]
"support_type" => array:2 [
"fr" => "Editeur"
"en" => "Publisher"
]
"countries" => array:2 [
"fr" => "Belgique"
"en" => "Belgium"
]
"abstract" => array:2 [
"fr" => null
"en" => null
]
"authors_fields" => array:2 [
"fr" => "Systèmes d'Information, Data Analytics et Opérations"
"en" => "Information Systems, Data Analytics and Operations"
]
"indexedAt" => "2024-11-21T07:21:52.000Z"
]
+lang: "en"
+"_type": "_doc"
+"_score": 4.794264
+"parent": null
}
40 => Essec\Faculty\Model\Contribution {#2307
#_index: "academ_contributions"
#_id: "13898"
#_source: array:18 [
"id" => "13898"
"slug" => "an-oracle-inequality-for-quasi-bayesian-nonnegative-matrix-factorization"
"yearMonth" => "2017-01"
"year" => "2017"
"title" => "An oracle inequality for quasi-Bayesian nonnegative matrix factorization"
"description" => "ALQUIER, P. et GUEDJ, B. (2017). An oracle inequality for quasi-Bayesian nonnegative matrix factorization. <i>Mathematical Methods of Statistics</i>, 26(1), pp. 55-67."
"authors" => array:2 [
0 => array:3 [
"name" => "ALQUIER Pierre"
"bid" => "B00809923"
"slug" => "alquier-pierre"
]
1 => array:1 [
"name" => "GUEDJ B."
]
]
"ouvrage" => ""
"keywords" => []
"updatedAt" => "2023-03-22 09:53:05"
"publicationUrl" => "https://link.springer.com/article/10.3103/S1066530717010045"
"publicationInfo" => array:3 [
"pages" => "55-67"
"volume" => "26"
"number" => "1"
]
"type" => array:2 [
"fr" => "Articles"
"en" => "Journal articles"
]
"support_type" => array:2 [
"fr" => "Revue scientifique"
"en" => "Scientific journal"
]
"countries" => array:2 [
"fr" => null
"en" => null
]
"abstract" => array:2 [
"fr" => "The aim of this paper is to provide some theoretical understanding of quasi-Bayesian aggregation methods of nonnegative matrix factorization. We derive an oracle inequality for an aggregated estimator. This result holds for a very general class of prior distributions and shows how the prior affects the rate of convergence."
"en" => "The aim of this paper is to provide some theoretical understanding of quasi-Bayesian aggregation methods of nonnegative matrix factorization. We derive an oracle inequality for an aggregated estimator. This result holds for a very general class of prior distributions and shows how the prior affects the rate of convergence."
]
"authors_fields" => array:2 [
"fr" => "Systèmes d'Information, Data Analytics et Opérations"
"en" => "Information Systems, Data Analytics and Operations"
]
"indexedAt" => "2024-11-21T07:21:52.000Z"
]
+lang: "en"
+"_type": "_doc"
+"_score": 4.794264
+"parent": null
}
41 => Essec\Faculty\Model\Contribution {#2308
#_index: "academ_contributions"
#_id: "13899"
#_source: array:18 [
"id" => "13899"
"slug" => "on-the-properties-of-variational-approximations-of-gibbs-posteriors"
"yearMonth" => "2016-12"
"year" => "2016"
"title" => "On the Properties of Variational Approximations of Gibbs Posteriors"
"description" => "ALQUIER, P., RIDGWAY, J. et CHOPIN, N. (2016). On the Properties of Variational Approximations of Gibbs Posteriors. <i>Journal of Machine Learning Research</i>, 17(239), pp. 1-41."
"authors" => array:3 [
0 => array:3 [
"name" => "ALQUIER Pierre"
"bid" => "B00809923"
"slug" => "alquier-pierre"
]
1 => array:1 [
"name" => "RIDGWAY James"
]
2 => array:1 [
"name" => "CHOPIN Nicolas"
]
]
"ouvrage" => ""
"keywords" => []
"updatedAt" => "2024-10-31 13:51:19"
"publicationUrl" => "http://www.jmlr.org/papers/v17/15-290.html"
"publicationInfo" => array:3 [
"pages" => "1-41"
"volume" => "17"
"number" => "239"
]
"type" => array:2 [
"fr" => "Articles"
"en" => "Journal articles"
]
"support_type" => array:2 [
"fr" => "Revue scientifique"
"en" => "Scientific journal"
]
"countries" => array:2 [
"fr" => "États-Unis"
"en" => "United States of America"
]
"abstract" => array:2 [
"fr" => "The PAC-Bayesian approach is a powerful set of techniques to derive non-asymptotic risk bounds for random estimators. The corresponding optimal distribution of estimators, usually called the Gibbs posterior, is unfortunately often intractable. One may sample from it using Markov chain Monte Carlo, but this is usually too slow for big datasets. We consider instead variational approximations of the Gibbs posterior, which are fast to compute. We undertake a general study of the properties of such approximations. Our main finding is that such a variational approximation has often the same rate of convergence as the original PAC-Bayesian procedure it approximates. In addition, we show that, when the risk function is convex, a variational approximation can be obtained in polynomial time using a convex solver. We give finite sample oracle inequalities for the corresponding estimator. We specialize our results to several learning tasks (classification, ranking, matrix completion), discuss how to implement a variational approximation in each case, and illustrate the good properties of said approximation on real datasets."
"en" => "The PAC-Bayesian approach is a powerful set of techniques to derive non-asymptotic risk bounds for random estimators. The corresponding optimal distribution of estimators, usually called the Gibbs posterior, is unfortunately often intractable. One may sample from it using Markov chain Monte Carlo, but this is usually too slow for big datasets. We consider instead variational approximations of the Gibbs posterior, which are fast to compute. We undertake a general study of the properties of such approximations. Our main finding is that such a variational approximation has often the same rate of convergence as the original PAC-Bayesian procedure it approximates. In addition, we show that, when the risk function is convex, a variational approximation can be obtained in polynomial time using a convex solver. We give finite sample oracle inequalities for the corresponding estimator. We specialize our results to several learning tasks (classification, ranking, matrix completion), discuss how to implement a variational approximation in each case, and illustrate the good properties of said approximation on real datasets."
]
"authors_fields" => array:2 [
"fr" => "Systèmes d'Information, Data Analytics et Opérations"
"en" => "Information Systems, Data Analytics and Operations"
]
"indexedAt" => "2024-11-21T07:21:52.000Z"
]
+lang: "en"
+"_type": "_doc"
+"_score": 4.794264
+"parent": null
}
42 => Essec\Faculty\Model\Contribution {#2309
#_index: "academ_contributions"
#_id: "13900"
#_source: array:18 [
"id" => "13900"
"slug" => "noisy-monte-carlo-convergence-of-markov-chains-with-approximate-transition-kernels"
"yearMonth" => "2016-01"
"year" => "2016"
"title" => "Noisy Monte Carlo: convergence of Markov chains with approximate transition kernels"
"description" => "ALQUIER, P., FRIEL, N., EVERITT, R. et BOLAND, A. (2016). Noisy Monte Carlo: convergence of Markov chains with approximate transition kernels. <i>Statistics and Computing</i>, 26(1-2), pp. 29-47."
"authors" => array:4 [
0 => array:3 [
"name" => "ALQUIER Pierre"
"bid" => "B00809923"
"slug" => "alquier-pierre"
]
1 => array:1 [
"name" => "FRIEL N."
]
2 => array:1 [
"name" => "EVERITT R."
]
3 => array:1 [
"name" => "BOLAND A."
]
]
"ouvrage" => ""
"keywords" => []
"updatedAt" => "2023-03-22 09:58:38"
"publicationUrl" => "https://link.springer.com/article/10.1007/s11222-014-9521-x"
"publicationInfo" => array:3 [
"pages" => "29-47"
"volume" => "26"
"number" => "1-2"
]
"type" => array:2 [
"fr" => "Articles"
"en" => "Journal articles"
]
"support_type" => array:2 [
"fr" => "Revue scientifique"
"en" => "Scientific journal"
]
"countries" => array:2 [
"fr" => null
"en" => null
]
"abstract" => array:2 [
"fr" => "Monte Carlo algorithms often aim to draw from a distribution π by simulating a Markov chain with transition kernel P such that π is invariant under P. However, there are many situations for which it is impractical or impossible to draw from the transition kernel P. For instance, this is the case with massive datasets, where is it prohibitively expensive to calculate the likelihood and is also the case for intractable likelihood models arising from, for example, Gibbs random fields, such as those found in spatial statistics and network analysis."
"en" => "Monte Carlo algorithms often aim to draw from a distribution π by simulating a Markov chain with transition kernel P such that π is invariant under P. However, there are many situations for which it is impractical or impossible to draw from the transition kernel P. For instance, this is the case with massive datasets, where is it prohibitively expensive to calculate the likelihood and is also the case for intractable likelihood models arising from, for example, Gibbs random fields, such as those found in spatial statistics and network analysis."
]
"authors_fields" => array:2 [
"fr" => "Systèmes d'Information, Data Analytics et Opérations"
"en" => "Information Systems, Data Analytics and Operations"
]
"indexedAt" => "2024-11-21T07:21:52.000Z"
]
+lang: "en"
+"_type": "_doc"
+"_score": 4.794264
+"parent": null
}
43 => Essec\Faculty\Model\Contribution {#2310
#_index: "academ_contributions"
#_id: "13901"
#_source: array:18 [
"id" => "13901"
"slug" => "a-bayesian-approach-for-noisy-matrix-completion-optimal-rate-under-general-sampling-distribution"
"yearMonth" => "2015-04"
"year" => "2015"
"title" => "A Bayesian approach for noisy matrix completion: Optimal rate under general sampling distribution"
"description" => "MAI, T.T. et ALQUIER, P. (2015). A Bayesian approach for noisy matrix completion: Optimal rate under general sampling distribution. <i>The Electronic Journal of Statistics</i>, 9(1), pp. 823-841."
"authors" => array:2 [
0 => array:3 [
"name" => "ALQUIER Pierre"
"bid" => "B00809923"
"slug" => "alquier-pierre"
]
1 => array:1 [
"name" => "MAI The Tien"
]
]
"ouvrage" => ""
"keywords" => array:6 [
0 => "Matrix completion"
1 => "Bayesian Analysis"
2 => "PACBayesian bounds"
3 => "oracle inequality"
4 => "low-rank matrix"
5 => "Gibbs sampler"
]
"updatedAt" => "2024-10-31 13:51:19"
"publicationUrl" => "https://projecteuclid.org/journals/electronic-journal-of-statistics/volume-9/issue-1/A-Bayesian-approach-for-noisy-matrix-completion--Optimal-rate/10.1214/15-EJS1020.full"
"publicationInfo" => array:3 [
"pages" => "823-841"
"volume" => "9"
"number" => "1"
]
"type" => array:2 [
"fr" => "Articles"
"en" => "Journal articles"
]
"support_type" => array:2 [
"fr" => "Revue scientifique"
"en" => "Scientific journal"
]
"countries" => array:2 [
"fr" => null
"en" => null
]
"abstract" => array:2 [
"fr" => "Bayesian methods for low-rank matrix completion with noise have been shown to be very efficient computationally [3, 18, 19, 24, 28]. While the behaviour of penalized minimization methods is well understood both from the theoretical and computational points of view (see [7, 9, 16, 23] among others) in this problem, the theoretical optimality of Bayesian estimators have not been explored yet. In this paper, we propose a Bayesian estimator for matrix completion under general sampling distribution. We also provide an oracle inequality for this estimator. This inequality proves that, whatever the rank of the matrix to be estimated, our estimator reaches the minimax-optimal rate of convergence (up to a logarithmic factor). We end the paper with a short simulation study."
"en" => "Bayesian methods for low-rank matrix completion with noise have been shown to be very efficient computationally [3, 18, 19, 24, 28]. While the behaviour of penalized minimization methods is well understood both from the theoretical and computational points of view (see [7, 9, 16, 23] among others) in this problem, the theoretical optimality of Bayesian estimators have not been explored yet. In this paper, we propose a Bayesian estimator for matrix completion under general sampling distribution. We also provide an oracle inequality for this estimator. This inequality proves that, whatever the rank of the matrix to be estimated, our estimator reaches the minimax-optimal rate of convergence (up to a logarithmic factor). We end the paper with a short simulation study."
]
"authors_fields" => array:2 [
"fr" => "Systèmes d'Information, Data Analytics et Opérations"
"en" => "Information Systems, Data Analytics and Operations"
]
"indexedAt" => "2024-11-21T07:21:52.000Z"
]
+lang: "en"
+"_type": "_doc"
+"_score": 4.794264
+"parent": null
}
44 => Essec\Faculty\Model\Contribution {#2311
#_index: "academ_contributions"
#_id: "13902"
#_source: array:18 [
"id" => "13902"
"slug" => "pac-bayesian-auc-classification-and-scoring"
"yearMonth" => "2014-12"
"year" => "2014"
"title" => "PAC-Bayesian AUC Classification and Scoring"
"description" => "RIDGWAY, J., ALQUIER, P., CHOPIN, N. et LIANG, F. (2014). PAC-Bayesian AUC Classification and Scoring. Dans: <i>28th Conference on Neural Information Processing Systems (NIPS'14)</i>. Curran Associates, Inc."
"authors" => array:4 [
0 => array:3 [
"name" => "ALQUIER Pierre"
"bid" => "B00809923"
"slug" => "alquier-pierre"
]
1 => array:1 [
"name" => "RIDGWAY James"
]
2 => array:1 [
"name" => "CHOPIN Nicolas"
]
3 => array:1 [
"name" => "LIANG Feng"
]
]
"ouvrage" => "28th Conference on Neural Information Processing Systems (NIPS'14)"
"keywords" => []
"updatedAt" => "2024-10-31 13:51:19"
"publicationUrl" => "http://papers.nips.cc/paper/5604-pac-bayesian-auc-classification-and-scoring"
"publicationInfo" => array:3 [
"pages" => null
"volume" => null
"number" => null
]
"type" => array:2 [
"fr" => "Actes d'une conférence"
"en" => "Conference Proceedings"
]
"support_type" => array:2 [
"fr" => "Editeur"
"en" => "Publisher"
]
"countries" => array:2 [
"fr" => "États-Unis"
"en" => "United States of America"
]
"abstract" => array:2 [
"fr" => "We develop a scoring and classification procedure based on the PAC-Bayesian approach and the AUC (Area Under Curve) criterion. We focus initially on the class of linear score functions. We derive PAC-Bayesian non-asymptotic bounds for two types of prior for the score parameters: a Gaussian prior, and a spike-and-slab prior; the latter makes it possible to perform feature selection. One important advantage of our approach is that it is amenable to powerful Bayesian computational tools. We derive in particular a Sequential Monte Carlo algorithm, as an efficient method which may be used as a gold standard, and an Expectation-Propagation algorithm, as a much faster but approximate method. We also extend our method to a class of non-linear score functions, essentially leading to a nonparametric procedure, by considering a Gaussian process prior."
"en" => "We develop a scoring and classification procedure based on the PAC-Bayesian approach and the AUC (Area Under Curve) criterion. We focus initially on the class of linear score functions. We derive PAC-Bayesian non-asymptotic bounds for two types of prior for the score parameters: a Gaussian prior, and a spike-and-slab prior; the latter makes it possible to perform feature selection. One important advantage of our approach is that it is amenable to powerful Bayesian computational tools. We derive in particular a Sequential Monte Carlo algorithm, as an efficient method which may be used as a gold standard, and an Expectation-Propagation algorithm, as a much faster but approximate method. We also extend our method to a class of non-linear score functions, essentially leading to a nonparametric procedure, by considering a Gaussian process prior."
]
"authors_fields" => array:2 [
"fr" => "Systèmes d'Information, Data Analytics et Opérations"
"en" => "Information Systems, Data Analytics and Operations"
]
"indexedAt" => "2024-11-21T07:21:52.000Z"
]
+lang: "en"
+"_type": "_doc"
+"_score": 4.794264
+"parent": null
}
45 => Essec\Faculty\Model\Contribution {#2312
#_index: "academ_contributions"
#_id: "13903"
#_source: array:18 [
"id" => "13903"
"slug" => "prediction-of-time-series-by-statistical-learning-general-losses-and-fast-rates"
"yearMonth" => "2013-12"
"year" => "2013"
"title" => "Prediction of time series by statistical learning: general losses and fast rates"
"description" => "ALQUIER, P., LI, X. et WINTENBERGER, O. (2013). Prediction of time series by statistical learning: general losses and fast rates. <i>Dependence Modeling</i>, 1, pp. 65-93."
"authors" => array:3 [
0 => array:3 [
"name" => "ALQUIER Pierre"
"bid" => "B00809923"
"slug" => "alquier-pierre"
]
1 => array:1 [
"name" => "LI Xiang"
]
2 => array:1 [
"name" => "WINTENBERGER Olivier"
]
]
"ouvrage" => ""
"keywords" => array:8 [
0 => "Statistical learning theory"
1 => "time series forecasting"
2 => "PAC-Bayesian bounds"
3 => "weak dependence"
4 => "mixing"
5 => "oracle inequalities"
6 => "fast rates"
7 => "GDP forecasting"
]
"updatedAt" => "2024-10-31 13:51:19"
"publicationUrl" => "https://www.degruyter.com/document/doi/10.2478/demo-2013-0004/html"
"publicationInfo" => array:3 [
"pages" => "65-93"
"volume" => "1"
"number" => null
]
"type" => array:2 [
"fr" => "Articles"
"en" => "Journal articles"
]
"support_type" => array:2 [
"fr" => "Revue scientifique"
"en" => "Scientific journal"
]
"countries" => array:2 [
"fr" => null
"en" => null
]
"abstract" => array:2 [
"fr" => "We establish rates of convergences in statistical learning for time series forecasting. Using the PAC-Bayesian approach, slow rates of convergence √d/n for the Gibbs estimator under the absolute loss were given in a previous work, where n is the sample size and d the dimension of the set of predictors. Under the same weak dependence conditions, we extend this result to any convex Lipschitz loss function. We also identify a condition on the parameter space that ensures similar rates for the classical penalized ERM procedure. We apply this method for quantile forecasting of the French GDP. Under additional conditions on the loss functions (satisfied by the quadratic loss function) and for uniformly mixing processes, we prove that the Gibbs estimator actually achieves fast rates of convergence d/n. We discuss the optimality of these different rates pointing out references to lower bounds when they are available. In particular, these results bring a generalization the results of on sparse regression estimation to some auto regression."
"en" => "We establish rates of convergences in statistical learning for time series forecasting. Using the PAC-Bayesian approach, slow rates of convergence √d/n for the Gibbs estimator under the absolute loss were given in a previous work, where n is the sample size and d the dimension of the set of predictors. Under the same weak dependence conditions, we extend this result to any convex Lipschitz loss function. We also identify a condition on the parameter space that ensures similar rates for the classical penalized ERM procedure. We apply this method for quantile forecasting of the French GDP. Under additional conditions on the loss functions (satisfied by the quadratic loss function) and for uniformly mixing processes, we prove that the Gibbs estimator actually achieves fast rates of convergence d/n. We discuss the optimality of these different rates pointing out references to lower bounds when they are available. In particular, these results bring a generalization the results of on sparse regression estimation to some auto regression."
]
"authors_fields" => array:2 [
"fr" => "Management"
"en" => "Management"
]
"indexedAt" => "2024-11-21T07:21:52.000Z"
]
+lang: "en"
+"_type": "_doc"
+"_score": 4.794264
+"parent": null
}
46 => Essec\Faculty\Model\Contribution {#2313
#_index: "academ_contributions"
#_id: "13904"
#_source: array:18 [
"id" => "13904"
"slug" => "bayesian-methods-for-low-rank-matrix-estimation-short-survey-and-theoretical-study"
"yearMonth" => "2013-10"
"year" => "2013"
"title" => "Bayesian Methods for Low-Rank Matrix Estimation: Short Survey and Theoretical Study"
"description" => "ALQUIER, P. (2013). Bayesian Methods for Low-Rank Matrix Estimation: Short Survey and Theoretical Study. Dans: <i>24th International Conference on Algorithmic Learning Theory (ALT'13)</i>. Singapore: Springer Berlin Heidelberg, pp. 309-323."
"authors" => array:1 [
0 => array:3 [
"name" => "ALQUIER Pierre"
"bid" => "B00809923"
"slug" => "alquier-pierre"
]
]
"ouvrage" => "24th International Conference on Algorithmic Learning Theory (ALT'13)"
"keywords" => []
"updatedAt" => "2023-03-22 13:58:49"
"publicationUrl" => "https://link.springer.com/chapter/10.1007/978-3-642-40935-6_22"
"publicationInfo" => array:3 [
"pages" => "309-323"
"volume" => ""
"number" => ""
]
"type" => array:2 [
"fr" => "Actes d'une conférence"
"en" => "Conference Proceedings"
]
"support_type" => array:2 [
"fr" => "Editeur"
"en" => "Publisher"
]
"countries" => array:2 [
"fr" => null
"en" => null
]
"abstract" => array:2 [
"fr" => "he problem of low-rank matrix estimation recently received a lot of attention due to challenging applications. A lot of work has been done on rank-penalized methods and convex relaxation, both on the theoretical and applied sides. However, only a few papers considered Bayesian estimation. In this paper, we review the different type of priors considered on matrices to favour low-rank. We also prove that the obtained Bayesian estimators, under suitable assumptions, enjoys the same optimality properties as the ones based on penalization."
"en" => "he problem of low-rank matrix estimation recently received a lot of attention due to challenging applications. A lot of work has been done on rank-penalized methods and convex relaxation, both on the theoretical and applied sides. However, only a few papers considered Bayesian estimation. In this paper, we review the different type of priors considered on matrices to favour low-rank. We also prove that the obtained Bayesian estimators, under suitable assumptions, enjoys the same optimality properties as the ones based on penalization."
]
"authors_fields" => array:2 [
"fr" => "Systèmes d'Information, Data Analytics et Opérations"
"en" => "Information Systems, Data Analytics and Operations"
]
"indexedAt" => "2024-11-21T07:21:52.000Z"
]
+lang: "en"
+"_type": "_doc"
+"_score": 4.794264
+"parent": null
}
47 => Essec\Faculty\Model\Contribution {#2314
#_index: "academ_contributions"
#_id: "13905"
#_source: array:18 [
"id" => "13905"
"slug" => "rank-penalized-estimation-of-a-quantum-system"
"yearMonth" => "2013-09"
"year" => "2013"
"title" => "Rank-penalized estimation of a quantum system"
"description" => "ALQUIER, P., BUTUCEA, C., HEBIRI, M., MEZIANI, K. et MORIMAE, T. (2013). Rank-penalized estimation of a quantum system. <i>Physical Review A</i>, 88(3)."
"authors" => array:5 [
0 => array:3 [
"name" => "ALQUIER Pierre"
"bid" => "B00809923"
"slug" => "alquier-pierre"
]
1 => array:1 [
"name" => "BUTUCEA Cristina"
]
2 => array:1 [
"name" => "HEBIRI Mohamed"
]
3 => array:1 [
"name" => "MEZIANI Katia"
]
4 => array:1 [
"name" => "MORIMAE Tomoyuki"
]
]
"ouvrage" => ""
"keywords" => []
"updatedAt" => "2023-03-22 10:14:40"
"publicationUrl" => "https://journals.aps.org/pra/abstract/10.1103/PhysRevA.88.032113"
"publicationInfo" => array:3 [
"pages" => ""
"volume" => "88"
"number" => "3"
]
"type" => array:2 [
"fr" => "Articles"
"en" => "Journal articles"
]
"support_type" => array:2 [
"fr" => "Revue scientifique"
"en" => "Scientific journal"
]
"countries" => array:2 [
"fr" => null
"en" => null
]
"abstract" => array:2 [
"fr" => "We introduce a method to reconstruct the density matrix ρ of a system of n qubits and estimate its rank d from data obtained by quantum-state-tomography measurements repeated m times. The procedure consists of minimizing the risk of a linear estimator ˆρ of ρ penalized by a given rank (from 1 to 2n), where ˆρ is previously obtained by the moment method. We obtain simultaneously an estimator of the rank and the resulting density matrix associated to this rank. We establish an upper bound for the error of the penalized estimator, evaluated with the Frobenius norm, which is of order dn(4/3)n/m and consistent for the estimator of the rank. The proposed methodology is computationally efficient and is illustrated with some example states and real experimental data sets."
"en" => "We introduce a method to reconstruct the density matrix ρ of a system of n qubits and estimate its rank d from data obtained by quantum-state-tomography measurements repeated m times. The procedure consists of minimizing the risk of a linear estimator ˆρ of ρ penalized by a given rank (from 1 to 2n), where ˆρ is previously obtained by the moment method. We obtain simultaneously an estimator of the rank and the resulting density matrix associated to this rank. We establish an upper bound for the error of the penalized estimator, evaluated with the Frobenius norm, which is of order dn(4/3)n/m and consistent for the estimator of the rank. The proposed methodology is computationally efficient and is illustrated with some example states and real experimental data sets."
]
"authors_fields" => array:2 [
"fr" => "Systèmes d'Information, Data Analytics et Opérations"
"en" => "Information Systems, Data Analytics and Operations"
]
"indexedAt" => "2024-11-21T07:21:52.000Z"
]
+lang: "en"
+"_type": "_doc"
+"_score": 4.794264
+"parent": null
}
48 => Essec\Faculty\Model\Contribution {#2315
#_index: "academ_contributions"
#_id: "13906"
#_source: array:18 [
"id" => "13906"
"slug" => "adaptive-estimation-of-the-density-matrix-in-quantum-homodyne-tomography-with-noisy-data"
"yearMonth" => "2013-06"
"year" => "2013"
"title" => "Adaptive estimation of the density matrix in quantum homodyne tomography with noisy data"
"description" => "ALQUIER, P., MEZIANI, K. et PEYRÉ, G. (2013). Adaptive estimation of the density matrix in quantum homodyne tomography with noisy data. <i>Inverse Problems</i>, 29(7), pp. 075017."
"authors" => array:3 [
0 => array:3 [
"name" => "ALQUIER Pierre"
"bid" => "B00809923"
"slug" => "alquier-pierre"
]
1 => array:1 [
"name" => "MEZIANI K"
]
2 => array:1 [
"name" => "PEYRÉ G"
]
]
"ouvrage" => ""
"keywords" => []
"updatedAt" => "2023-03-22 10:16:12"
"publicationUrl" => "https://iopscience.iop.org/article/10.1088/0266-5611/29/7/075017/meta"
"publicationInfo" => array:3 [
"pages" => "075017"
"volume" => "29"
"number" => "7"
]
"type" => array:2 [
"fr" => "Articles"
"en" => "Journal articles"
]
"support_type" => array:2 [
"fr" => "Revue scientifique"
"en" => "Scientific journal"
]
"countries" => array:2 [
"fr" => null
"en" => null
]
"abstract" => array:2 [
"fr" => ""
"en" => ""
]
"authors_fields" => array:2 [
"fr" => "Systèmes d'Information, Data Analytics et Opérations"
"en" => "Information Systems, Data Analytics and Operations"
]
"indexedAt" => "2024-11-21T07:21:52.000Z"
]
+lang: "en"
+"_type": "_doc"
+"_score": 4.794264
+"parent": null
}
49 => Essec\Faculty\Model\Contribution {#2316
#_index: "academ_contributions"
#_id: "13907"
#_source: array:18 [
"id" => "13907"
"slug" => "sparse-single-index-model"
"yearMonth" => "2013-01"
"year" => "2013"
"title" => "Sparse Single-Index Model"
"description" => "ALQUIER, P. et BIAU, G. (2013). Sparse Single-Index Model. <i>Journal of Machine Learning Research</i>, 14, pp. 243-280."
"authors" => array:2 [
0 => array:3 [
"name" => "ALQUIER Pierre"
"bid" => "B00809923"
"slug" => "alquier-pierre"
]
1 => array:1 [
"name" => "BIAU Gérard"
]
]
"ouvrage" => ""
"keywords" => array:6 [
0 => "single-index model"
1 => "sparsity"
2 => "regression estimation"
3 => "PAC-Bayesian"
4 => "oracle inequality"
5 => "reversible jump Markov chain Monte Carlo method"
]
"updatedAt" => "2024-10-31 13:51:19"
"publicationUrl" => "https://jmlr.csail.mit.edu/papers/v14/alquier13a.html"
"publicationInfo" => array:3 [
"pages" => "243-280"
"volume" => "14"
"number" => null
]
"type" => array:2 [
"fr" => "Articles"
"en" => "Journal articles"
]
"support_type" => array:2 [
"fr" => "Revue scientifique"
"en" => "Scientific journal"
]
"countries" => array:2 [
"fr" => "États-Unis"
"en" => "United States of America"
]
"abstract" => array:2 [
"fr" => "We consider the single-index model estimation problem from a sparsity perspective using a PAC-Bayesian approach. On the theoretical side, we offer a sharp oracle inequality, which is more powerful than the best known oracle inequalities for other common procedures of single-index recovery. The proposed method is implemented by means of the reversible jump Markov chain Monte Carlo technique and its performance is compared with that of standard procedures."
"en" => "We consider the single-index model estimation problem from a sparsity perspective using a PAC-Bayesian approach. On the theoretical side, we offer a sharp oracle inequality, which is more powerful than the best known oracle inequalities for other common procedures of single-index recovery. The proposed method is implemented by means of the reversible jump Markov chain Monte Carlo technique and its performance is compared with that of standard procedures."
]
"authors_fields" => array:2 [
"fr" => "Systèmes d'Information, Data Analytics et Opérations"
"en" => "Information Systems, Data Analytics and Operations"
]
"indexedAt" => "2024-11-21T07:21:52.000Z"
]
+lang: "en"
+"_type": "_doc"
+"_score": 4.794264
+"parent": null
}
50 => Essec\Faculty\Model\Contribution {#2317
#_index: "academ_contributions"
#_id: "13908"
#_source: array:18 [
"id" => "13908"
"slug" => "pac-bayesian-estimation-and-prediction-in-sparse-additive-models"
"yearMonth" => "2013-01"
"year" => "2013"
"title" => "PAC-Bayesian estimation and prediction in sparse additive models"
"description" => "GUEDJ, B. et ALQUIER, P. (2013). PAC-Bayesian estimation and prediction in sparse additive models. <i>The Electronic Journal of Statistics</i>, 7, pp. 264-291."
"authors" => array:2 [
0 => array:3 [
"name" => "ALQUIER Pierre"
"bid" => "B00809923"
"slug" => "alquier-pierre"
]
1 => array:1 [
"name" => "GUEDJ Benjamin"
]
]
"ouvrage" => ""
"keywords" => array:7 [
0 => "Additive models"
1 => "MCMC"
2 => "Oracle inequality"
3 => "PAC-Bayesian bounds"
4 => "Regression estimation"
5 => "Sparsity"
6 => "stochastic search"
]
"updatedAt" => "2024-10-31 13:51:19"
"publicationUrl" => "https://projecteuclid.org/journals/electronic-journal-of-statistics/volume-7/issue-none/PAC-Bayesian-estimation-and-prediction-in-sparse-additive-models/10.1214/13-EJS771.full"
"publicationInfo" => array:3 [
"pages" => "264-291"
"volume" => "7"
"number" => null
]
"type" => array:2 [
"fr" => "Articles"
"en" => "Journal articles"
]
"support_type" => array:2 [
"fr" => "Revue scientifique"
"en" => "Scientific journal"
]
"countries" => array:2 [
"fr" => null
"en" => null
]
"abstract" => array:2 [
"fr" => "The present paper is about estimation and prediction in high-dimensional additive models under a sparsity assumption (p ≫ n paradigm). A PAC-Bayesian strategy is investigated, delivering oracle inequalities in probability. The implementation is performed through recent outcomes in high-dimensional MCMC algorithms, and the performance of our method is assessed on simulated data."
"en" => "The present paper is about estimation and prediction in high-dimensional additive models under a sparsity assumption (p ≫ n paradigm). A PAC-Bayesian strategy is investigated, delivering oracle inequalities in probability. The implementation is performed through recent outcomes in high-dimensional MCMC algorithms, and the performance of our method is assessed on simulated data."
]
"authors_fields" => array:2 [
"fr" => "Systèmes d'Information, Data Analytics et Opérations"
"en" => "Information Systems, Data Analytics and Operations"
]
"indexedAt" => "2024-11-21T07:21:52.000Z"
]
+lang: "en"
+"_type": "_doc"
+"_score": 4.794264
+"parent": null
}
51 => Essec\Faculty\Model\Contribution {#2318
#_index: "academ_contributions"
#_id: "13909"
#_source: array:18 [
"id" => "13909"
"slug" => "prediction-of-quantiles-by-statistical-learning-and-application-to-gdp-forecasting"
"yearMonth" => "2012-10"
"year" => "2012"
"title" => "Prediction of Quantiles by Statistical Learning and Application to GDP Forecasting"
"description" => "ALQUIER, P. et LI, X. (2012). Prediction of Quantiles by Statistical Learning and Application to GDP Forecasting. Dans: <i>15th International Conference on Discovery Science (DS'12)</i>. Lyon: Springer Berlin Heidelberg, pp. 22-36."
"authors" => array:2 [
0 => array:3 [
"name" => "ALQUIER Pierre"
"bid" => "B00809923"
"slug" => "alquier-pierre"
]
1 => array:1 [
"name" => "LI Xiang"
]
]
"ouvrage" => "15th International Conference on Discovery Science (DS'12)"
"keywords" => []
"updatedAt" => "2023-03-22 13:57:13"
"publicationUrl" => "https://link.springer.com/chapter/10.1007/978-3-642-33492-4_5"
"publicationInfo" => array:3 [
"pages" => "22-36"
"volume" => ""
"number" => ""
]
"type" => array:2 [
"fr" => "Actes d'une conférence"
"en" => "Conference Proceedings"
]
"support_type" => array:2 [
"fr" => "Editeur"
"en" => "Publisher"
]
"countries" => array:2 [
"fr" => null
"en" => null
]
"abstract" => array:2 [
"fr" => "n this paper, we tackle the problem of prediction and confidence intervals for time series using a statistical learning approach and quantile loss functions. In a first time, we show that the Gibbs estimator is able to predict as well as the best predictor in a given family for a wide set of loss functions. In particular, using the quantile loss function of [1], this allows to build confidence intervals. We apply these results to the problem of prediction and confidence regions for the French Gross Domestic Product (GDP) growth, with promising results."
"en" => "n this paper, we tackle the problem of prediction and confidence intervals for time series using a statistical learning approach and quantile loss functions. In a first time, we show that the Gibbs estimator is able to predict as well as the best predictor in a given family for a wide set of loss functions. In particular, using the quantile loss function of [1], this allows to build confidence intervals. We apply these results to the problem of prediction and confidence regions for the French Gross Domestic Product (GDP) growth, with promising results."
]
"authors_fields" => array:2 [
"fr" => "Management"
"en" => "Management"
]
"indexedAt" => "2024-11-21T07:21:52.000Z"
]
+lang: "en"
+"_type": "_doc"
+"_score": 4.794264
+"parent": null
}
52 => Essec\Faculty\Model\Contribution {#2319
#_index: "academ_contributions"
#_id: "13910"
#_source: array:18 [
"id" => "13910"
"slug" => "model-selection-for-weakly-dependent-time-series-forecasting"
"yearMonth" => "2012-08"
"year" => "2012"
"title" => "Model selection for weakly dependent time series forecasting"
"description" => "ALQUIER, P. et WINTENBERGER, O. (2012). Model selection for weakly dependent time series forecasting. <i>Bernoulli: A Journal of Mathematical Statistics and Probability</i>, 18(3), pp. 883-913."
"authors" => array:2 [
0 => array:3 [
"name" => "ALQUIER Pierre"
"bid" => "B00809923"
"slug" => "alquier-pierre"
]
1 => array:1 [
"name" => "WINTENBERGER Olivier"
]
]
"ouvrage" => ""
"keywords" => array:8 [
0 => "adaptative inference"
1 => "aggregation of estimators"
2 => "autoregression estimation"
3 => "model selection"
4 => "randomized estimators"
5 => "statistical learning"
6 => "time series prediction"
7 => "weak dependence"
]
"updatedAt" => "2023-03-22 10:26:35"
"publicationUrl" => "https://projecteuclid.org/journals/bernoulli/volume-18/issue-3/Model-selection-for-weakly-dependent-time-series-forecasting/10.3150/11-BEJ359.full"
"publicationInfo" => array:3 [
"pages" => "883-913"
"volume" => "18"
"number" => "3"
]
"type" => array:2 [
"fr" => "Articles"
"en" => "Journal articles"
]
"support_type" => array:2 [
"fr" => "Revue scientifique"
"en" => "Scientific journal"
]
"countries" => array:2 [
"fr" => null
"en" => null
]
"abstract" => array:2 [
"fr" => "Observing a stationary time series, we propose a two-steps procedure for the prediction of its next value. The first step follows machine learning theory paradigm and consists in determining a set of possible predictors as randomized estimators in (possibly numerous) different predictive models. The second step follows the model selection paradigm and consists in choosing one predictor with good properties among all the predictors of the first step. We study our procedure for two different types of observations: causal Bernoulli shifts and bounded weakly dependent processes. In both cases, we give oracle inequalities: the risk of the chosen predictor is close to the best prediction risk in all predictive models that we consider. We apply our procedure for predictive models as linear predictors, neural networks predictors and nonparametric autoregressive predictors."
"en" => "Observing a stationary time series, we propose a two-steps procedure for the prediction of its next value. The first step follows machine learning theory paradigm and consists in determining a set of possible predictors as randomized estimators in (possibly numerous) different predictive models. The second step follows the model selection paradigm and consists in choosing one predictor with good properties among all the predictors of the first step. We study our procedure for two different types of observations: causal Bernoulli shifts and bounded weakly dependent processes. In both cases, we give oracle inequalities: the risk of the chosen predictor is close to the best prediction risk in all predictive models that we consider. We apply our procedure for predictive models as linear predictors, neural networks predictors and nonparametric autoregressive predictors."
]
"authors_fields" => array:2 [
"fr" => "Systèmes d'Information, Data Analytics et Opérations"
"en" => "Information Systems, Data Analytics and Operations"
]
"indexedAt" => "2024-11-21T07:21:52.000Z"
]
+lang: "en"
+"_type": "_doc"
+"_score": 4.794264
+"parent": null
}
53 => Essec\Faculty\Model\Contribution {#2320
#_index: "academ_contributions"
#_id: "13911"
#_source: array:18 [
"id" => "13911"
"slug" => "transductive-versions-of-the-lasso-and-the-dantzig-selector"
"yearMonth" => "2012-09"
"year" => "2012"
"title" => "Transductive versions of the LASSO and the Dantzig Selector"
"description" => "ALQUIER, P. et HEBIRI, M. (2012). Transductive versions of the LASSO and the Dantzig Selector. <i>Journal of Statistical Planning and Inference</i>, 142(9), pp. 2485-2500."
"authors" => array:2 [
0 => array:3 [
"name" => "ALQUIER Pierre"
"bid" => "B00809923"
"slug" => "alquier-pierre"
]
1 => array:1 [
"name" => "HEBIRI Mohamed"
]
]
"ouvrage" => ""
"keywords" => array:6 [
0 => "High-dimensional data"
1 => "LASSO"
2 => "Sparsity"
3 => "High-dimensional regression estimation"
4 => "Variable selection"
5 => "Transduction"
]
"updatedAt" => "2023-03-22 10:24:37"
"publicationUrl" => "https://www.sciencedirect.com/science/article/pii/S037837581200136X"
"publicationInfo" => array:3 [
"pages" => "2485-2500"
"volume" => "142"
"number" => "9"
]
"type" => array:2 [
"fr" => "Articles"
"en" => "Journal articles"
]
"support_type" => array:2 [
"fr" => "Revue scientifique"
"en" => "Scientific journal"
]
"countries" => array:2 [
"fr" => null
"en" => null
]
"abstract" => array:2 [
"fr" => "Transductive methods are useful in prediction problems when the training dataset is composed of a large number of unlabeled observations and a smaller number of labeled observations. In this paper, we propose an approach for developing transductive prediction procedures that are able to take advantage of the sparsity in the high dimensional linear regression. More precisely, we define transductive versions of the LASSO (Tibshirani, 1996) and the Dantzig Selector (Candès and Tao, 2007). These procedures combine labeled and unlabeled observations of the training dataset to produce a prediction for the unlabeled observations. We propose an experimental study of the transductive estimators that shows that they improve the LASSO and Dantzig Selector in many situations, and particularly in high dimensional problems when the predictors are correlated. We then provide non-asymptotic theoretical guarantees for these estimation methods. Interestingly, our theoretical results show that the Transductive LASSO and Dantzig Selector satisfy sparsity inequalities under weaker assumptions than those required for the “original” LASSO."
"en" => "Transductive methods are useful in prediction problems when the training dataset is composed of a large number of unlabeled observations and a smaller number of labeled observations. In this paper, we propose an approach for developing transductive prediction procedures that are able to take advantage of the sparsity in the high dimensional linear regression. More precisely, we define transductive versions of the LASSO (Tibshirani, 1996) and the Dantzig Selector (Candès and Tao, 2007). These procedures combine labeled and unlabeled observations of the training dataset to produce a prediction for the unlabeled observations. We propose an experimental study of the transductive estimators that shows that they improve the LASSO and Dantzig Selector in many situations, and particularly in high dimensional problems when the predictors are correlated. We then provide non-asymptotic theoretical guarantees for these estimation methods. Interestingly, our theoretical results show that the Transductive LASSO and Dantzig Selector satisfy sparsity inequalities under weaker assumptions than those required for the “original” LASSO."
]
"authors_fields" => array:2 [
"fr" => "Systèmes d'Information, Data Analytics et Opérations"
"en" => "Information Systems, Data Analytics and Operations"
]
"indexedAt" => "2024-11-21T07:21:52.000Z"
]
+lang: "en"
+"_type": "_doc"
+"_score": 4.794264
+"parent": null
}
54 => Essec\Faculty\Model\Contribution {#2321
#_index: "academ_contributions"
#_id: "13912"
#_source: array:18 [
"id" => "13912"
"slug" => "generalization-of-constraints-for-high-dimensional-regression-problems"
"yearMonth" => "2011-12"
"year" => "2011"
"title" => "Generalization of constraints for high dimensional regression problems"
"description" => "ALQUIER, P. et HEBIRI, M. (2011). Generalization of constraints for high dimensional regression problems. <i>Statistics & Probability Letters</i>, 81(12), pp. 1760-1765."
"authors" => array:2 [
0 => array:3 [
"name" => "ALQUIER Pierre"
"bid" => "B00809923"
"slug" => "alquier-pierre"
]
1 => array:1 [
"name" => "HEBIRI Mohamed"
]
]
"ouvrage" => ""
"keywords" => array:5 [
0 => "High-dimensional data"
1 => "LASSO"
2 => "Restricted eigenvalue assumption"
3 => "Sparsity"
4 => "Variable selection"
]
"updatedAt" => "2023-03-22 12:07:29"
"publicationUrl" => "https://www.sciencedirect.com/science/article/pii/S0167715211002422"
"publicationInfo" => array:3 [
"pages" => "1760-1765"
"volume" => "81"
"number" => "12"
]
"type" => array:2 [
"fr" => "Articles"
"en" => "Journal articles"
]
"support_type" => array:2 [
"fr" => "Revue scientifique"
"en" => "Scientific journal"
]
"countries" => array:2 [
"fr" => null
"en" => null
]
"abstract" => array:2 [
"fr" => ""
"en" => ""
]
"authors_fields" => array:2 [
"fr" => "Systèmes d'Information, Data Analytics et Opérations"
"en" => "Information Systems, Data Analytics and Operations"
]
"indexedAt" => "2024-11-21T07:21:52.000Z"
]
+lang: "en"
+"_type": "_doc"
+"_score": 4.794264
+"parent": null
}
55 => Essec\Faculty\Model\Contribution {#2322
#_index: "academ_contributions"
#_id: "13913"
#_source: array:18 [
"id" => "13913"
"slug" => "sparsity-considerations-for-dependent-variables"
"yearMonth" => "2011-08"
"year" => "2011"
"title" => "Sparsity considerations for dependent variables"
"description" => "ALQUIER, P. et DOUKHAN, P. (2011). Sparsity considerations for dependent variables. <i>The Electronic Journal of Statistics</i>, 5, pp. 750-774."
"authors" => array:2 [
0 => array:3 [
"name" => "ALQUIER Pierre"
"bid" => "B00809923"
"slug" => "alquier-pierre"
]
1 => array:1 [
"name" => "DOUKHAN Paul"
]
]
"ouvrage" => ""
"keywords" => array:8 [
0 => "Density estimation"
1 => "deviation of empirical mean"
2 => "Estimation in high dimension"
3 => "Lasso"
4 => "Penalization"
5 => "Regression estimation"
6 => "Sparsity"
7 => "Weak dependence"
]
"updatedAt" => "2024-10-31 13:51:19"
"publicationUrl" => "https://projecteuclid.org/journals/electronic-journal-of-statistics/volume-5/issue-none/Sparsity-considerations-for-dependent-variables/10.1214/11-EJS626.full"
"publicationInfo" => array:3 [
"pages" => "750-774"
"volume" => "5"
"number" => null
]
"type" => array:2 [
"fr" => "Articles"
"en" => "Journal articles"
]
"support_type" => array:2 [
"fr" => "Revue scientifique"
"en" => "Scientific journal"
]
"countries" => array:2 [
"fr" => null
"en" => null
]
"abstract" => array:2 [
"fr" => "The aim of this paper is to provide a comprehensive introduction for the study of ℓ1-penalized estimators in the context of dependent observations. We define a general ℓ1-penalized estimator for solving problems of stochastic optimization. This estimator turns out to be the LASSO [Tib96] in the regression estimation setting. Powerful theoretical guarantees on the statistical performances of the LASSO were provided in recent papers, however, they usually only deal with the iid case. Here, we study this estimator under various dependence assumptions."
"en" => "The aim of this paper is to provide a comprehensive introduction for the study of ℓ1-penalized estimators in the context of dependent observations. We define a general ℓ1-penalized estimator for solving problems of stochastic optimization. This estimator turns out to be the LASSO [Tib96] in the regression estimation setting. Powerful theoretical guarantees on the statistical performances of the LASSO were provided in recent papers, however, they usually only deal with the iid case. Here, we study this estimator under various dependence assumptions."
]
"authors_fields" => array:2 [
"fr" => "Systèmes d'Information, Data Analytics et Opérations"
"en" => "Information Systems, Data Analytics and Operations"
]
"indexedAt" => "2024-11-21T07:21:52.000Z"
]
+lang: "en"
+"_type": "_doc"
+"_score": 4.794264
+"parent": null
}
56 => Essec\Faculty\Model\Contribution {#2323
#_index: "academ_contributions"
#_id: "13914"
#_source: array:18 [
"id" => "13914"
"slug" => "pac-bayesian-bounds-for-sparse-regression-estimation-with-exponential-weights"
"yearMonth" => "2011-03"
"year" => "2011"
"title" => "PAC-Bayesian bounds for sparse regression estimation with exponential weights"
"description" => "ALQUIER, P. et LOUNICI, K. (2011). PAC-Bayesian bounds for sparse regression estimation with exponential weights. <i>The Electronic Journal of Statistics</i>, 5, pp. 127-145."
"authors" => array:2 [
0 => array:3 [
"name" => "ALQUIER Pierre"
"bid" => "B00809923"
"slug" => "alquier-pierre"
]
1 => array:1 [
"name" => "LOUNICI Karim"
]
]
"ouvrage" => ""
"keywords" => array:4 [
0 => "Exponential weights"
1 => "high-dimensional regression"
2 => "PAC-Bayesian inequalities"
3 => "Sparsity oracle inequality"
]
"updatedAt" => "2023-03-22 12:55:44"
"publicationUrl" => "https://projecteuclid.org/journals/electronic-journal-of-statistics/volume-5/issue-none/PAC-Bayesian-bounds-for-sparse-regression-estimation-with-exponential-weights/10.1214/11-EJS601.full"
"publicationInfo" => array:3 [
"pages" => "127-145"
"volume" => "5"
"number" => ""
]
"type" => array:2 [
"fr" => "Articles"
"en" => "Journal articles"
]
"support_type" => array:2 [
"fr" => "Revue scientifique"
"en" => "Scientific journal"
]
"countries" => array:2 [
"fr" => null
"en" => null
]
"abstract" => array:2 [
"fr" => "We consider the sparse regression model where the number of parameters p is larger than the sample size n. The difficulty when considering high-dimensional problems is to propose estimators achieving a good compromise between statistical and computational performances. The Lasso is solution of a convex minimization problem, hence computable for large value of p. However stringent conditions on the design are required to establish fast rates of convergence for this estimator. Dalalyan and Tsybakov proposed an exponential weights procedure achieving a good compromise between the statistical and computational aspects. This estimator can be computed for reasonably large p and satisfies a sparsity oracle inequality in expectation for the empirical excess risk only under mild assumptions on the design."
"en" => "We consider the sparse regression model where the number of parameters p is larger than the sample size n. The difficulty when considering high-dimensional problems is to propose estimators achieving a good compromise between statistical and computational performances. The Lasso is solution of a convex minimization problem, hence computable for large value of p. However stringent conditions on the design are required to establish fast rates of convergence for this estimator. Dalalyan and Tsybakov proposed an exponential weights procedure achieving a good compromise between the statistical and computational aspects. This estimator can be computed for reasonably large p and satisfies a sparsity oracle inequality in expectation for the empirical excess risk only under mild assumptions on the design."
]
"authors_fields" => array:2 [
"fr" => "Systèmes d'Information, Data Analytics et Opérations"
"en" => "Information Systems, Data Analytics and Operations"
]
"indexedAt" => "2024-11-21T07:21:52.000Z"
]
+lang: "en"
+"_type": "_doc"
+"_score": 4.794264
+"parent": null
}
57 => Essec\Faculty\Model\Contribution {#2324
#_index: "academ_contributions"
#_id: "13915"
#_source: array:18 [
"id" => "13915"
"slug" => "an-algorithm-for-iterative-selection-of-blocks-of-features"
"yearMonth" => "2010-10"
"year" => "2010"
"title" => "An Algorithm for Iterative Selection of Blocks of Features"
"description" => "ALQUIER, P. (2010). An Algorithm for Iterative Selection of Blocks of Features. Dans: <i>21st International Conference on Algorithmic Learning Theory (ALT'10)</i>. Caberra: Springer Berlin Heidelberg, pp. 35-49."
"authors" => array:1 [
0 => array:3 [
"name" => "ALQUIER Pierre"
"bid" => "B00809923"
"slug" => "alquier-pierre"
]
]
"ouvrage" => "21st International Conference on Algorithmic Learning Theory (ALT'10)"
"keywords" => array:5 [
0 => "Feature Selection"
1 => "Sparsity"
2 => "Linear Regression"
3 => "Grouped Variables"
4 => "ArrayCGH"
]
"updatedAt" => "2023-03-22 13:10:17"
"publicationUrl" => "https://link.springer.com/chapter/10.1007/978-3-642-16108-7_7"
"publicationInfo" => array:3 [
"pages" => "35-49"
"volume" => ""
"number" => ""
]
"type" => array:2 [
"fr" => "Actes d'une conférence"
"en" => "Conference Proceedings"
]
"support_type" => array:2 [
"fr" => "Editeur"
"en" => "Publisher"
]
"countries" => array:2 [
"fr" => null
"en" => null
]
"abstract" => array:2 [
"fr" => "We focus on the problem of linear regression estimation in high dimension, when the parameter β is ”sparse” (most of its coordinates are 0) and ”blocky” (β i and β i + 1 are likely to be equal). Recently, some authors defined estimators taking into account this information, such as the Fused-LASSO [19] or the S-LASSO among others. However, there are no theoretical results about the obtained estimators in the general design matrix case. Here, we propose an alternative point of view, based on the Iterative Feature Selection method. We propose an iterative algorithm that takes into account the fact that β is sparse and blocky, with no prior knowledge on the position of the blocks. Moreover, we give a theoretical result that ensures that every step of our algorithm actually improves the statistical performance of the obtained estimator. We provide some simulations, where our method outperforms LASSO-type methods in the cases where the parameter is sparse and blocky. Moreover, we give an application to real data (CGH arrays), that shows that our estimator can be used on large datasets."
"en" => "We focus on the problem of linear regression estimation in high dimension, when the parameter β is ”sparse” (most of its coordinates are 0) and ”blocky” (β i and β i + 1 are likely to be equal). Recently, some authors defined estimators taking into account this information, such as the Fused-LASSO [19] or the S-LASSO among others. However, there are no theoretical results about the obtained estimators in the general design matrix case. Here, we propose an alternative point of view, based on the Iterative Feature Selection method. We propose an iterative algorithm that takes into account the fact that β is sparse and blocky, with no prior knowledge on the position of the blocks. Moreover, we give a theoretical result that ensures that every step of our algorithm actually improves the statistical performance of the obtained estimator. We provide some simulations, where our method outperforms LASSO-type methods in the cases where the parameter is sparse and blocky. Moreover, we give an application to real data (CGH arrays), that shows that our estimator can be used on large datasets."
]
"authors_fields" => array:2 [
"fr" => "Systèmes d'Information, Data Analytics et Opérations"
"en" => "Information Systems, Data Analytics and Operations"
]
"indexedAt" => "2024-11-21T07:21:52.000Z"
]
+lang: "en"
+"_type": "_doc"
+"_score": 4.794264
+"parent": null
}
58 => Essec\Faculty\Model\Contribution {#2325
#_index: "academ_contributions"
#_id: "13916"
#_source: array:18 [
"id" => "13916"
"slug" => "pac-bayesian-bounds-for-randomized-empirical-risk-minimizers"
"yearMonth" => "2008-12"
"year" => "2008"
"title" => "PAC-Bayesian bounds for randomized empirical risk minimizers"
"description" => "ALQUIER, P. (2008). PAC-Bayesian bounds for randomized empirical risk minimizers. <i>Mathematical Methods of Statistics</i>, 17(4), pp. 279-304."
"authors" => array:1 [
0 => array:3 [
"name" => "ALQUIER Pierre"
"bid" => "B00809923"
"slug" => "alquier-pierre"
]
]
"ouvrage" => ""
"keywords" => []
"updatedAt" => "2023-03-22 12:57:14"
"publicationUrl" => "https://link.springer.com/article/10.3103/S1066530708040017"
"publicationInfo" => array:3 [
"pages" => "279-304"
"volume" => "17"
"number" => "4"
]
"type" => array:2 [
"fr" => "Articles"
"en" => "Journal articles"
]
"support_type" => array:2 [
"fr" => "Revue scientifique"
"en" => "Scientific journal"
]
"countries" => array:2 [
"fr" => null
"en" => null
]
"abstract" => array:2 [
"fr" => "The aim of this paper is to generalize the PAC-Bayesian theorems proved by Catoni [6, 8] in the classification setting to more general problems of statistical inference. We show how to control the deviations of the risk of randomized estimators. A particular attention is paid to randomized estimators drawn in a small neighborhood of classical estimators, whose study leads to control of the risk of the latter. These results allow us to bound the risk of very general estimation procedures, as well as to perform model selection."
"en" => "The aim of this paper is to generalize the PAC-Bayesian theorems proved by Catoni [6, 8] in the classification setting to more general problems of statistical inference. We show how to control the deviations of the risk of randomized estimators. A particular attention is paid to randomized estimators drawn in a small neighborhood of classical estimators, whose study leads to control of the risk of the latter. These results allow us to bound the risk of very general estimation procedures, as well as to perform model selection."
]
"authors_fields" => array:2 [
"fr" => "Systèmes d'Information, Data Analytics et Opérations"
"en" => "Information Systems, Data Analytics and Operations"
]
"indexedAt" => "2024-11-21T07:21:52.000Z"
]
+lang: "en"
+"_type": "_doc"
+"_score": 4.794264
+"parent": null
}
59 => Essec\Faculty\Model\Contribution {#2326
#_index: "academ_contributions"
#_id: "13917"
#_source: array:18 [
"id" => "13917"
"slug" => "lasso-iterative-feature-selection-and-the-correlation-selector-oracle-inequalities-and-numerical-performances"
"yearMonth" => "2008-11"
"year" => "2008"
"title" => "LASSO, Iterative Feature Selection and the Correlation Selector: Oracle inequalities and numerical performances"
"description" => "ALQUIER, P. (2008). LASSO, Iterative Feature Selection and the Correlation Selector: Oracle inequalities and numerical performances. <i>The Electronic Journal of Statistics</i>, 2, pp. 1129-1152."
"authors" => array:1 [
0 => array:3 [
"name" => "ALQUIER Pierre"
"bid" => "B00809923"
"slug" => "alquier-pierre"
]
]
"ouvrage" => ""
"keywords" => array:5 [
0 => "Confidence regions"
1 => "Lasso"
2 => "Regression estimation"
3 => "shrinkage and thresholding methods"
4 => "Statistical learning"
]
"updatedAt" => "2023-03-22 12:59:39"
"publicationUrl" => "https://projecteuclid.org/journals/electronic-journal-of-statistics/volume-2/issue-none/LASSO-Iterative-Feature-Selection-and-the-Correlation-Selector--Oracle/10.1214/08-EJS288.full"
"publicationInfo" => array:3 [
"pages" => "1129-1152"
"volume" => "2"
"number" => ""
]
"type" => array:2 [
"fr" => "Articles"
"en" => "Journal articles"
]
"support_type" => array:2 [
"fr" => "Revue scientifique"
"en" => "Scientific journal"
]
"countries" => array:2 [
"fr" => null
"en" => null
]
"abstract" => array:2 [
"fr" => "We propose a general family of algorithms for regression estimation with quadratic loss, on the basis of geometrical considerations. These algorithms are able to select relevant functions into a large dictionary. We prove that a lot of methods that have already been studied for this task (LASSO, Dantzig selector, Iterative Feature Selection, among others) belong to our family, and exhibit another particular member of this family that we call Correlation Selector in this paper. Using general properties of our family of algorithm we prove oracle inequalities for IFS, for the LASSO and for the Correlation Selector, and compare numerical performances of these estimators on a toy example."
"en" => "We propose a general family of algorithms for regression estimation with quadratic loss, on the basis of geometrical considerations. These algorithms are able to select relevant functions into a large dictionary. We prove that a lot of methods that have already been studied for this task (LASSO, Dantzig selector, Iterative Feature Selection, among others) belong to our family, and exhibit another particular member of this family that we call Correlation Selector in this paper. Using general properties of our family of algorithm we prove oracle inequalities for IFS, for the LASSO and for the Correlation Selector, and compare numerical performances of these estimators on a toy example."
]
"authors_fields" => array:2 [
"fr" => "Systèmes d'Information, Data Analytics et Opérations"
"en" => "Information Systems, Data Analytics and Operations"
]
"indexedAt" => "2024-11-21T07:21:52.000Z"
]
+lang: "en"
+"_type": "_doc"
+"_score": 4.794264
+"parent": null
}
60 => Essec\Faculty\Model\Contribution {#2327
#_index: "academ_contributions"
#_id: "13918"
#_source: array:18 [
"id" => "13918"
"slug" => "density-estimation-with-quadratic-loss-a-confidence-intervals-method"
"yearMonth" => "2008-07"
"year" => "2008"
"title" => "Density estimation with quadratic loss: a confidence intervals method"
"description" => "ALQUIER, P. (2008). Density estimation with quadratic loss: a confidence intervals method. <i>ESAIM: Probability and Statistics</i>, 12, pp. 438-463."
"authors" => array:1 [
0 => array:3 [
"name" => "ALQUIER Pierre"
"bid" => "B00809923"
"slug" => "alquier-pierre"
]
]
"ouvrage" => ""
"keywords" => array:5 [
0 => "Density estimation"
1 => "support vector machines"
2 => "kernel algorithms"
3 => "thresholding methods"
4 => "wavelets"
]
"updatedAt" => "2023-03-22 13:04:08"
"publicationUrl" => "https://doi.org/10.1051/ps:2007050"
"publicationInfo" => array:3 [
"pages" => "438-463"
"volume" => "12"
"number" => ""
]
"type" => array:2 [
"fr" => "Articles"
"en" => "Journal articles"
]
"support_type" => array:2 [
"fr" => "Revue scientifique"
"en" => "Scientific journal"
]
"countries" => array:2 [
"fr" => null
"en" => null
]
"abstract" => array:2 [
"fr" => "We propose a feature selection method for density estimation with quadratic loss. This method relies on the study of unidimensional approximation models and on the definition of confidence regions for the density thanks to these models. It is quite general and includes cases of interest like detection of relevant wavelets coefficients or selection of support vectors in SVM. In the general case, we prove that every selected feature actually improves the performance of the estimator. In the case where features are defined by wavelets, we prove that this method is adaptative near minimax (up to a log term) in some Besov spaces. We end the paper by simulations indicating that it must be possible to extend the adaptation result to other features"
"en" => "We propose a feature selection method for density estimation with quadratic loss. This method relies on the study of unidimensional approximation models and on the definition of confidence regions for the density thanks to these models. It is quite general and includes cases of interest like detection of relevant wavelets coefficients or selection of support vectors in SVM. In the general case, we prove that every selected feature actually improves the performance of the estimator. In the case where features are defined by wavelets, we prove that this method is adaptative near minimax (up to a log term) in some Besov spaces. We end the paper by simulations indicating that it must be possible to extend the adaptation result to other features"
]
"authors_fields" => array:2 [
"fr" => "Systèmes d'Information, Data Analytics et Opérations"
"en" => "Information Systems, Data Analytics and Operations"
]
"indexedAt" => "2024-11-21T07:21:52.000Z"
]
+lang: "en"
+"_type": "_doc"
+"_score": 4.794264
+"parent": null
}
61 => Essec\Faculty\Model\Contribution {#2328
#_index: "academ_contributions"
#_id: "13919"
#_source: array:18 [
"id" => "13919"
"slug" => "iterative-feature-selection-in-least-square-regression-estimation"
"yearMonth" => "2008-02"
"year" => "2008"
"title" => "Iterative feature selection in least square regression estimation"
"description" => "ALQUIER, P. (2008). Iterative feature selection in least square regression estimation. <i>Annales de l Institut Henri Poincare-Probabilites et Statistiques</i>, 44(1), pp. 47-88."
"authors" => array:1 [
0 => array:3 [
"name" => "ALQUIER Pierre"
"bid" => "B00809923"
"slug" => "alquier-pierre"
]
]
"ouvrage" => ""
"keywords" => array:5 [
0 => "Confidence regions"
1 => "Regression estimation"
2 => "Statistical learning"
3 => "Support vector machines"
4 => "Thresholding methods"
]
"updatedAt" => "2024-10-31 13:51:19"
"publicationUrl" => "https://projecteuclid.org/journals/annales-de-linstitut-henri-poincare-probabilites-et-statistiques/volume-44/issue-1/Iterative-feature-selection-in-least-square-regression-estimation/10.1214/07-AIHP106.full"
"publicationInfo" => array:3 [
"pages" => "47-88"
"volume" => "44"
"number" => "1"
]
"type" => array:2 [
"fr" => "Articles"
"en" => "Journal articles"
]
"support_type" => array:2 [
"fr" => "Revue scientifique"
"en" => "Scientific journal"
]
"countries" => array:2 [
"fr" => null
"en" => null
]
"abstract" => array:2 [
"fr" => "Cette article présente un nouvel algorithme d’estimation de régression, dans les contextes inductifs et transductifs. L’estimateur est défini par une combinaison linéaire de fonctions choisies dans un dictionnaire donné. Les coefficients de cette combinaison sont calculés par des projections successives sur des ensembles simples. Ces ensembles sont définis comme des régions de confiance données par une inégalité de déviation (ou inégalité PAC). On démontre en particulier que chaque projection au cours de l’algorithme améliore effectivement l’estimateur obtenu. On donne tout d’abord les résultats dans le contexte inductif, où l’algorithme nécessite la connaissance de la distribution du design, puis dans le contexte transductif, plus naturel ici puisque l’algorithme s’applique sans la connaissance de cette distribution. On établit finalement un lien avec les inégalités d’oracle, permettant de montrer que notre estimateur atteint les vitesses optimales dans les espaces de Sobolev et de Besov."
"en" => "This paper presents a new algorithm to perform regression estimation, in both the inductive and transductive setting. The estimator is defined as a linear combination of functions in a given dictionary. Coefficients of the combinations are computed sequentially using projection on some simple sets. These sets are defined as confidence regions provided by a deviation (PAC) inequality on an estimator in one-dimensional models. We prove that every projection the algorithm actually improves the performance of the estimator. We give all the estimators and results at first in the inductive case, where the algorithm requires the knowledge of the distribution of the design, and then in the transductive case, which seems a more natural application for this algorithm as we do not need particular information on the distribution of the design in this case. We finally show a connection with oracle inequalities, making us able to prove that the estimator reaches minimax rates of convergence in Sobolev and Besov spaces."
]
"authors_fields" => array:2 [
"fr" => "Systèmes d'Information, Data Analytics et Opérations"
"en" => "Information Systems, Data Analytics and Operations"
]
"indexedAt" => "2024-11-21T07:21:52.000Z"
]
+lang: "en"
+"_type": "_doc"
+"_score": 4.794264
+"parent": null
}
62 => Essec\Faculty\Model\Contribution {#2329
#_index: "academ_contributions"
#_id: "13920"
#_source: array:18 [
"id" => "13920"
"slug" => "constributions-to-statistical-learning-in-sparse-models"
"yearMonth" => "2013-12"
"year" => "2013"
"title" => "Constributions to Statistical Learning in Sparse Models"
"description" => "ALQUIER, P. (2013). Constributions to Statistical Learning in Sparse Models. Paris: France."
"authors" => array:1 [
0 => array:3 [
"name" => "ALQUIER Pierre"
"bid" => "B00809923"
"slug" => "alquier-pierre"
]
]
"ouvrage" => ""
"keywords" => []
"updatedAt" => "2024-10-31 13:51:19"
"publicationUrl" => "http://tel.archives-ouvertes.fr/tel-00915505"
"publicationInfo" => array:3 [
"pages" => null
"volume" => null
"number" => null
]
"type" => array:2 [
"fr" => "HDR"
"en" => "HDR"
]
"support_type" => array:2 [
"fr" => null
"en" => null
]
"countries" => array:2 [
"fr" => null
"en" => null
]
"abstract" => array:2 [
"fr" => "The aim of this habilitation thesis is to give an overview of my works on high-dimensional statistics and statistical learning, under various sparsity assumptions. In a first part, I will describe the major challenges of high-dimensional statistics in the context of the generic linear regression model. After a brief review of existing results, I will present the theoretical study of aggregated estimators that was done in (Alquier & Lounici 2011). The second part essentially aims at providing extensions of the various theories presented in the first part to the estimation of time series models (Alquier & Doukhan 2011, Alquier & Wintenberger 2013, Alquier & Li 2012, Alquier, Wintenberger & Li 2012). Finally, the third part presents various extensions to nonparametric models, or to specific applications such as quantum statistics (Alquier & Biau 2013, Guedj & Alquier 2013, Alquier, Meziani & Peyré 2013, Alquier, Butucea, Hebiri, Meziani & Morimae 2013, Alquier 2013, Alquier 2008). In each section, we provide explicitely the estimators used and, as much as possible, optimal oracle inequalities satisfied by these estimators."
"en" => "The aim of this habilitation thesis is to give an overview of my works on high-dimensional statistics and statistical learning, under various sparsity assumptions. In a first part, I will describe the major challenges of high-dimensional statistics in the context of the generic linear regression model. After a brief review of existing results, I will present the theoretical study of aggregated estimators that was done in (Alquier & Lounici 2011). The second part essentially aims at providing extensions of the various theories presented in the first part to the estimation of time series models (Alquier & Doukhan 2011, Alquier & Wintenberger 2013, Alquier & Li 2012, Alquier, Wintenberger & Li 2012). Finally, the third part presents various extensions to nonparametric models, or to specific applications such as quantum statistics (Alquier & Biau 2013, Guedj & Alquier 2013, Alquier, Meziani & Peyré 2013, Alquier, Butucea, Hebiri, Meziani & Morimae 2013, Alquier 2013, Alquier 2008). In each section, we provide explicitely the estimators used and, as much as possible, optimal oracle inequalities satisfied by these estimators."
]
"authors_fields" => array:2 [
"fr" => "Systèmes d'Information, Data Analytics et Opérations"
"en" => "Information Systems, Data Analytics and Operations"
]
"indexedAt" => "2024-11-21T07:21:52.000Z"
]
+lang: "en"
+"_type": "_doc"
+"_score": 4.794264
+"parent": null
}
63 => Essec\Faculty\Model\Contribution {#2330
#_index: "academ_contributions"
#_id: "13976"
#_source: array:18 [
"id" => "13976"
"slug" => "estimation-of-copulas-via-maximum-mean-discrepancy"
"yearMonth" => "2023-07"
"year" => "2023"
"title" => "Estimation of Copulas via Maximum Mean Discrepancy"
"description" => "ALQUIER, P., CHERIEF-ABDELLATIF, B.E., DERUMIGNY, A. et FERMANIAN, J.D. (2023). Estimation of Copulas via Maximum Mean Discrepancy. <i>Journal of the American Statistical Association</i>, 118(543), pp. 1997-2012."
"authors" => array:4 [
0 => array:3 [
"name" => "ALQUIER Pierre"
"bid" => "B00809923"
"slug" => "alquier-pierre"
]
1 => array:2 [
"name" => "CHERIEF-ABDELLATIF Badr-Eddine"
"bid" => "B00810114"
]
2 => array:1 [
"name" => "DERUMIGNY Alexis"
]
3 => array:1 [
"name" => "FERMANIAN Jean-David"
]
]
"ouvrage" => ""
"keywords" => array:4 [
0 => "Algorithms semiparametric inference"
1 => "Copula"
2 => "Kernel methods and RKHS"
3 => "Robust procedures"
]
"updatedAt" => "2024-10-31 13:51:19"
"publicationUrl" => "https://doi.org/10.1080/01621459.2021.2024836"
"publicationInfo" => array:3 [
"pages" => "1997-2012"
"volume" => "118"
"number" => "543"
]
"type" => array:2 [
"fr" => "Articles"
"en" => "Journal articles"
]
"support_type" => array:2 [
"fr" => "Revue scientifique"
"en" => "Scientific journal"
]
"countries" => array:2 [
"fr" => null
"en" => null
]
"abstract" => array:2 [
"fr" => "This article deals with robust inference for parametric copula models. Estimation using canonical maximum likelihood might be unstable, especially in the presence of outliers. We propose to use a procedure based on the maximum mean discrepancy (MMD) principle. We derive nonasymptotic oracle inequalities, consistency and asymptotic normality of this new estimator. In particular, the oracle inequality holds without any assumption on the copula family, and can be applied in the presence of outliers or under misspecification."
"en" => "This article deals with robust inference for parametric copula models. Estimation using canonical maximum likelihood might be unstable, especially in the presence of outliers. We propose to use a procedure based on the maximum mean discrepancy (MMD) principle. We derive nonasymptotic oracle inequalities, consistency and asymptotic normality of this new estimator. In particular, the oracle inequality holds without any assumption on the copula family, and can be applied in the presence of outliers or under misspecification."
]
"authors_fields" => array:2 [
"fr" => "Systèmes d'Information, Data Analytics et Opérations"
"en" => "Information Systems, Data Analytics and Operations"
]
"indexedAt" => "2024-11-21T07:21:52.000Z"
]
+lang: "en"
+"_type": "_doc"
+"_score": 4.794264
+"parent": null
}
64 => Essec\Faculty\Model\Contribution {#2331
#_index: "academ_contributions"
#_id: "13980"
#_source: array:18 [
"id" => "13980"
"slug" => "universal-robust-regression-via-maximum-mean-discrepancy"
"yearMonth" => "2024-02"
"year" => "2024"
"title" => "Universal Robust Regression via Maximum Mean Discrepancy"
"description" => "ALQUIER, P. et GERBER, M. (2024). Universal Robust Regression via Maximum Mean Discrepancy. <i>Biometrika</i>, 111(1), pp. 71-92."
"authors" => array:2 [
0 => array:3 [
"name" => "ALQUIER Pierre"
"bid" => "B00809923"
"slug" => "alquier-pierre"
]
1 => array:1 [
"name" => "GERBER Mathieu"
]
]
"ouvrage" => ""
"keywords" => array:3 [
0 => "Maximum mean discrepancy"
1 => "Minimum distance estimator"
2 => "Robust regression"
]
"updatedAt" => "2024-10-31 13:51:19"
"publicationUrl" => "https://doi.org/10.1093/biomet/asad031"
"publicationInfo" => array:3 [
"pages" => "71-92"
"volume" => "111"
"number" => "1"
]
"type" => array:2 [
"fr" => "Articles"
"en" => "Journal articles"
]
"support_type" => array:2 [
"fr" => "Revue scientifique"
"en" => "Scientific journal"
]
"countries" => array:2 [
"fr" => null
"en" => null
]
"abstract" => array:2 [
"fr" => "Many modern datasets are collected automatically and are thus easily contaminated by outliers. This has led to a renewed interest in robust estimation, including new notions of robustness such as robustness to adversarial contamination of the data. However, most robust estimation methods are designed for a specific model. Notably, many methods were proposed recently to obtain robust estimators in linear models, or generalized linear models, and a few were developed for very specific settings, for example beta regression or sample selection models. In this paper we develop a new approach for robust estimation in arbitrary regression models, based on maximum mean discrepancy minimization. We build two estimators which are both proven to be robust to Huber-type contamination. We obtain a non-asymptotic error bound for them and show that it is also robust to adversarial contamination, but this estimator is computationally more expensive to use in practice than the other one. As a by-product of our theoretical analysis of the proposed estimators we derive new results on kernel conditional mean embedding of distributions which are of independent interest."
"en" => "Many modern datasets are collected automatically and are thus easily contaminated by outliers. This has led to a renewed interest in robust estimation, including new notions of robustness such as robustness to adversarial contamination of the data. However, most robust estimation methods are designed for a specific model. Notably, many methods were proposed recently to obtain robust estimators in linear models, or generalized linear models, and a few were developed for very specific settings, for example beta regression or sample selection models. In this paper we develop a new approach for robust estimation in arbitrary regression models, based on maximum mean discrepancy minimization. We build two estimators which are both proven to be robust to Huber-type contamination. We obtain a non-asymptotic error bound for them and show that it is also robust to adversarial contamination, but this estimator is computationally more expensive to use in practice than the other one. As a by-product of our theoretical analysis of the proposed estimators we derive new results on kernel conditional mean embedding of distributions which are of independent interest."
]
"authors_fields" => array:2 [
"fr" => "Systèmes d'Information, Data Analytics et Opérations"
"en" => "Information Systems, Data Analytics and Operations"
]
"indexedAt" => "2024-11-21T07:21:52.000Z"
]
+lang: "en"
+"_type": "_doc"
+"_score": 4.794264
+"parent": null
}
65 => Essec\Faculty\Model\Contribution {#2332
#_index: "academ_contributions"
#_id: "14098"
#_source: array:18 [
"id" => "14098"
"slug" => "pac-bayesian-offline-contextual-bandits-with-guarantees"
"yearMonth" => "2023-07"
"year" => "2023"
"title" => "PAC-Bayesian Offline Contextual Bandits With Guarantees"
"description" => "SAKHI, O., ALQUIER, P. et CHOPIN, N. (2023). PAC-Bayesian Offline Contextual Bandits With Guarantees. Dans: <i>40th International Conference on Machine Learning (ICML)</i>. Hawaii: Proceedings of Machine Learning Research, pp. 29777-29799."
"authors" => array:3 [
0 => array:3 [
"name" => "ALQUIER Pierre"
"bid" => "B00809923"
"slug" => "alquier-pierre"
]
1 => array:1 [
"name" => "SAKHI Otmane"
]
2 => array:1 [
"name" => "CHOPIN Nicolas"
]
]
"ouvrage" => "40th International Conference on Machine Learning (ICML)"
"keywords" => []
"updatedAt" => "2024-10-31 13:51:19"
"publicationUrl" => "https://proceedings.mlr.press/v202/sakhi23a.html"
"publicationInfo" => array:3 [
"pages" => "29777-29799"
"volume" => "202"
"number" => null
]
"type" => array:2 [
"fr" => "Actes d'une conférence"
"en" => "Conference Proceedings"
]
"support_type" => array:2 [
"fr" => "Editeur"
"en" => "Publisher"
]
"countries" => array:2 [
"fr" => "Royaume-Uni"
"en" => "United Kingdom"
]
"abstract" => array:2 [
"fr" => null
"en" => null
]
"authors_fields" => array:2 [
"fr" => "Systèmes d'Information, Data Analytics et Opérations"
"en" => "Information Systems, Data Analytics and Operations"
]
"indexedAt" => "2024-11-21T07:21:52.000Z"
]
+lang: "en"
+"_type": "_doc"
+"_score": 4.794264
+"parent": null
}
66 => Essec\Faculty\Model\Contribution {#2333
#_index: "academ_contributions"
#_id: "14619"
#_source: array:18 [
"id" => "14619"
"slug" => "robust-estimation-and-regression-with-mmd"
"yearMonth" => "2024-01"
"year" => "2024"
"title" => "Robust estimation and regression with MMD"
"description" => "ALQUIER, P. et GERBER, M. (2024). Robust estimation and regression with MMD. Dans: The Mathematics of Data: Workshop on Optimization and Discrete Structures. Singapore."
"authors" => array:2 [
0 => array:3 [
"name" => "ALQUIER Pierre"
"bid" => "B00809923"
"slug" => "alquier-pierre"
]
1 => array:1 [
"name" => "GERBER Mathieu"
]
]
"ouvrage" => "The Mathematics of Data: Workshop on Optimization and Discrete Structures"
"keywords" => array:2 [
0 => "Maximum Mean Discrepancy"
1 => "robustness"
]
"updatedAt" => "2024-01-04 11:15:43"
"publicationUrl" => null
"publicationInfo" => array:3 [
"pages" => ""
"volume" => ""
"number" => ""
]
"type" => array:2 [
"fr" => "Invité dans une conférence académique (Keynote speaker)"
"en" => "Invited speaker at an academic conference"
]
"support_type" => array:2 [
"fr" => null
"en" => null
]
"countries" => array:2 [
"fr" => null
"en" => null
]
"abstract" => array:2 [
"fr" => "Maximum Mean Discrepancy (MMD, based on suitable kernels) leads to estimation procedures that are consistent without any assumption on the model nor on the data-generating process. This leads to strong robustness properties in practice, and this method was already used in complex models with promising results: estimation of SDE coefficients, ccopulas, data compression, generative models in AI."
"en" => "Maximum Mean Discrepancy (MMD, based on suitable kernels) leads to estimation procedures that are consistent without any assumption on the model nor on the data-generating process. This leads to strong robustness properties in practice, and this method was already used in complex models with promising results: estimation of SDE coefficients, ccopulas, data compression, generative models in AI."
]
"authors_fields" => array:2 [
"fr" => "Systèmes d'Information, Data Analytics et Opérations"
"en" => "Information Systems, Data Analytics and Operations"
]
"indexedAt" => "2024-11-21T07:21:52.000Z"
]
+lang: "en"
+"_type": "_doc"
+"_score": 4.794264
+"parent": null
}
67 => Essec\Faculty\Model\Contribution {#2334
#_index: "academ_contributions"
#_id: "14643"
#_source: array:18 [
"id" => "14643"
"slug" => "user-friendly-introduction-to-pac-bayes-bounds"
"yearMonth" => "2024-01"
"year" => "2024"
"title" => "User-friendly Introduction to PAC-Bayes Bounds"
"description" => "ALQUIER, P. (2024). <i>User-friendly Introduction to PAC-Bayes Bounds</i>. Boston - Delft: now publishers."
"authors" => array:1 [
0 => array:3 [
"name" => "ALQUIER Pierre"
"bid" => "B00809923"
"slug" => "alquier-pierre"
]
]
"ouvrage" => ""
"keywords" => array:9 [
0 => "Deep learning"
1 => "Model choice"
2 => "Online learning"
3 => "Variational inference"
4 => "Bayesian learning"
5 => "Classification and prediction"
6 => "Statistical learning theory"
7 => "Learning and statistical methods"
8 => "Information theory and statistics"
]
"updatedAt" => "2024-05-27 17:51:39"
"publicationUrl" => "https://www.nowpublishers.com/article/Details/MAL-100"
"publicationInfo" => array:3 [
"pages" => ""
"volume" => "17(2)"
"number" => ""
]
"type" => array:2 [
"fr" => "Livres"
"en" => "Books"
]
"support_type" => array:2 [
"fr" => "Editeur"
"en" => "Publisher"
]
"countries" => array:2 [
"fr" => null
"en" => null
]
"abstract" => array:2 [
"fr" => """
Probably almost correct (PAC) bounds have been an intensive field of research over the last two decades. Hundreds of papers have been published and much progress has been made resulting in PAC-Bayes bounds becoming an important technique in machine learning.\n
The proliferation of research has made the field for a newcomer somewhat daunting. In this tutorial, the author guides the reader through the topic’s complexity and large body of publications. Covering both empirical and oracle PAC-bounds, this book serves as a primer for students and researchers who want to get to grips quickly with the subject. It provides a friendly introduction that illuminates the basic theory and points to the most important publications to gain deeper understanding of any particular aspect.
"""
"en" => """
Probably almost correct (PAC) bounds have been an intensive field of research over the last two decades. Hundreds of papers have been published and much progress has been made resulting in PAC-Bayes bounds becoming an important technique in machine learning.\n
\n
The proliferation of research has made the field for a newcomer somewhat daunting. In this tutorial, the author guides the reader through the topic’s complexity and large body of publications. Covering both empirical and oracle PAC-bounds, this book serves as a primer for students and researchers who want to get to grips quickly with the subject. It provides a friendly introduction that illuminates the basic theory and points to the most important publications to gain deeper understanding of any particular aspect.
"""
]
"authors_fields" => array:2 [
"fr" => "Systèmes d'Information, Data Analytics et Opérations"
"en" => "Information Systems, Data Analytics and Operations"
]
"indexedAt" => "2024-11-21T07:21:52.000Z"
]
+lang: "en"
+"_type": "_doc"
+"_score": 4.794264
+"parent": null
}
68 => Essec\Faculty\Model\Contribution {#2335
#_index: "academ_contributions"
#_id: "14700"
#_source: array:18 [
"id" => "14700"
"slug" => "dimension-free-bounds-for-sums-of-dependend-matrices-and-operators-with-heavy-tailed-distribution"
"yearMonth" => "2024-02"
"year" => "2024"
"title" => "Dimension-free bounds for sums of dependend matrices and operators with heavy-tailed distribution"
"description" => "NAKAKITA, S., ALQUIER, P. et IMAIZUMI, M. (2024). Dimension-free bounds for sums of dependend matrices and operators with heavy-tailed distribution. <i>The Electronic Journal of Statistics</i>, 18(1), pp. 1130-1159."
"authors" => array:3 [
0 => array:3 [
"name" => "ALQUIER Pierre"
"bid" => "B00809923"
"slug" => "alquier-pierre"
]
1 => array:1 [
"name" => "NAKAKITA Shogo"
]
2 => array:1 [
"name" => "IMAIZUMI Masaaki"
]
]
"ouvrage" => ""
"keywords" => array:4 [
0 => "Dependent process"
1 => "heavy-tailed distribution"
2 => "high-dimension"
3 => "random matrix"
]
"updatedAt" => "2024-10-31 13:51:19"
"publicationUrl" => "https://doi.org/10.1214/24-EJS2224"
"publicationInfo" => array:3 [
"pages" => "1130-1159"
"volume" => "18"
"number" => "1"
]
"type" => array:2 [
"fr" => "Articles"
"en" => "Journal articles"
]
"support_type" => array:2 [
"fr" => "Revue scientifique"
"en" => "Scientific journal"
]
"countries" => array:2 [
"fr" => null
"en" => null
]
"abstract" => array:2 [
"fr" => "We study the deviation inequality for a sum of high-dimensional random matrices and operators with dependence and arbitrary heavy tails. There is an increase in the importance of the problem of estimating high-dimensional matrices, and dependence and heavy-tail properties of data are among the most critical topics currently. In this paper, we derive a dimension-free upper bound on the deviation, that is, the bound does not depend explicitly on the dimension of matrices, but depends on their effective rank. Our result is a generalization of several existing studies on the deviation of the sum of matrices. Our proof is based on two techniques: (i) a variational approximation of the dual of moment generating functions, and (ii) robustification through truncation of eigenvalues of matrices. We show that our results are applicable to several problems such as covariance matrix estimation, hidden Markov models, and overparameterized linear regression models."
"en" => "We study the deviation inequality for a sum of high-dimensional random matrices and operators with dependence and arbitrary heavy tails. There is an increase in the importance of the problem of estimating high-dimensional matrices, and dependence and heavy-tail properties of data are among the most critical topics currently. In this paper, we derive a dimension-free upper bound on the deviation, that is, the bound does not depend explicitly on the dimension of matrices, but depends on their effective rank. Our result is a generalization of several existing studies on the deviation of the sum of matrices. Our proof is based on two techniques: (i) a variational approximation of the dual of moment generating functions, and (ii) robustification through truncation of eigenvalues of matrices. We show that our results are applicable to several problems such as covariance matrix estimation, hidden Markov models, and overparameterized linear regression models."
]
"authors_fields" => array:2 [
"fr" => "Systèmes d'Information, Data Analytics et Opérations"
"en" => "Information Systems, Data Analytics and Operations"
]
"indexedAt" => "2024-11-21T07:21:52.000Z"
]
+lang: "en"
+"_type": "_doc"
+"_score": 4.794264
+"parent": null
}
69 => Essec\Faculty\Model\Contribution {#2336
#_index: "academ_contributions"
#_id: "14731"
#_source: array:18 [
"id" => "14731"
"slug" => "introduction-to-pac-bayes-bounds"
"yearMonth" => "2024-03"
"year" => "2024"
"title" => "Introduction to PAC-Bayes bounds"
"description" => "ALQUIER, P. (2024). Introduction to PAC-Bayes bounds. Dans: Machine Learning Summer School in Okinawa 2024. Okinawa."
"authors" => array:1 [
0 => array:3 [
"name" => "ALQUIER Pierre"
"bid" => "B00809923"
"slug" => "alquier-pierre"
]
]
"ouvrage" => "Machine Learning Summer School in Okinawa 2024"
"keywords" => array:9 [
0 => "Deep learning"
1 => "Model choice"
2 => "Online learning"
3 => "Variational inference"
4 => "Bayesian learning"
5 => "Classification and prediction"
6 => "Statistical learning theory"
7 => "Learning and statistical methods"
8 => "Information theory and statistics"
]
"updatedAt" => "2024-03-06 17:38:38"
"publicationUrl" => null
"publicationInfo" => array:3 [
"pages" => ""
"volume" => ""
"number" => ""
]
"type" => array:2 [
"fr" => "Invité dans une conférence académique (Keynote speaker)"
"en" => "Invited speaker at an academic conference"
]
"support_type" => array:2 [
"fr" => null
"en" => null
]
"countries" => array:2 [
"fr" => null
"en" => null
]
"abstract" => array:2 [
"fr" => """
Probably approximately correct (PAC) bounds have been an intensive field of research over the last two decades. Hundreds of papers have been published and much progress has been made resulting in PAC-Bayes bounds becoming an important technique in machine learning.\n
\n
The proliferation of research has made the field for a newcomer somewhat daunting. In this tutorial, the author guides the reader through the topic’s complexity and large body of publications. Covering both empirical and oracle PAC-bounds, this book serves as a primer for students and researchers who want to get to grips quickly with the subject. It provides a friendly introduction that illuminates the basic theory and points to the most important publications to gain deeper understanding of any particular aspect.
"""
"en" => """
Probably approximately correct (PAC) bounds have been an intensive field of research over the last two decades. Hundreds of papers have been published and much progress has been made resulting in PAC-Bayes bounds becoming an important technique in machine learning.\n
\n
The proliferation of research has made the field for a newcomer somewhat daunting. In this tutorial, the author guides the reader through the topic’s complexity and large body of publications. Covering both empirical and oracle PAC-bounds, this book serves as a primer for students and researchers who want to get to grips quickly with the subject. It provides a friendly introduction that illuminates the basic theory and points to the most important publications to gain deeper understanding of any particular aspect.
"""
]
"authors_fields" => array:2 [
"fr" => "Systèmes d'Information, Data Analytics et Opérations"
"en" => "Information Systems, Data Analytics and Operations"
]
"indexedAt" => "2024-11-21T07:21:52.000Z"
]
+lang: "en"
+"_type": "_doc"
+"_score": 4.794264
+"parent": null
}
70 => Essec\Faculty\Model\Contribution {#2337
#_index: "academ_contributions"
#_id: "14737"
#_source: array:18 [
"id" => "14737"
"slug" => "optimistic-estimation-of-convergence-in-markov-chains-with-the-average-mixing-time"
"yearMonth" => "2024-07"
"year" => "2024"
"title" => "Optimistic Estimation of Convergence in Markov Chains with the Average Mixing Time"
"description" => "WOLFER, G. et ALQUIER, P. (2024). Optimistic Estimation of Convergence in Markov Chains with the Average Mixing Time. Dans: International Conference on Scientific Computation and Differential Equations. Singapore."
"authors" => array:2 [
0 => array:3 [
"name" => "ALQUIER Pierre"
"bid" => "B00809923"
"slug" => "alquier-pierre"
]
1 => array:1 [
"name" => "WOLFER Geoffrey"
]
]
"ouvrage" => "International Conference on Scientific Computation and Differential Equations"
"keywords" => array:1 [
0 => "Markov chains"
]
"updatedAt" => "2024-07-15 12:32:43"
"publicationUrl" => null
"publicationInfo" => array:3 [
"pages" => ""
"volume" => ""
"number" => ""
]
"type" => array:2 [
"fr" => "Communications dans une conférence"
"en" => "Presentations at an Academic or Professional conference"
]
"support_type" => array:2 [
"fr" => null
"en" => null
]
"countries" => array:2 [
"fr" => null
"en" => null
]
"abstract" => array:2 [
"fr" => "The convergence rate of a Markov chain to its stationary distribution is typically assessed using the concept of total variation mixing time. However, this worst-case measure often yields pessimistic estimates and is challenging to infer from observations. In this paper, we advocate for the use of the average-mixing time as a more optimistic and demonstrably easier-to-estimate alternative. We further illustrate its applicability across a range of settings, from two-point to countable spaces, and discuss some practical implications."
"en" => "The convergence rate of a Markov chain to its stationary distribution is typically assessed using the concept of total variation mixing time. However, this worst-case measure often yields pessimistic estimates and is challenging to infer from observations. In this paper, we advocate for the use of the average-mixing time as a more optimistic and demonstrably easier-to-estimate alternative. We further illustrate its applicability across a range of settings, from two-point to countable spaces, and discuss some practical implications."
]
"authors_fields" => array:2 [
"fr" => "Systèmes d'Information, Data Analytics et Opérations"
"en" => "Information Systems, Data Analytics and Operations"
]
"indexedAt" => "2024-11-21T07:21:52.000Z"
]
+lang: "en"
+"_type": "_doc"
+"_score": 4.794264
+"parent": null
}
71 => Essec\Faculty\Model\Contribution {#2338
#_index: "academ_contributions"
#_id: "14738"
#_source: array:18 [
"id" => "14738"
"slug" => "pac-bayesian-offline-contextual-bandits-with-guarantees"
"yearMonth" => "2024-07"
"year" => "2024"
"title" => "PAC-Bayesian Offline Contextual Bandits With Guarantees"
"description" => "SAKHI, O., ALQUIER, P. et CHOPIN, N. (2024). PAC-Bayesian Offline Contextual Bandits With Guarantees. Dans: Closing Workshop of the ISBA Programme on Interpretable Inference via Principled BNP Approaches in Biomedical Research and Beyond. Singapore."
"authors" => array:3 [
0 => array:3 [
"name" => "ALQUIER Pierre"
"bid" => "B00809923"
"slug" => "alquier-pierre"
]
1 => array:1 [
"name" => "SAKHI Otmane"
]
2 => array:1 [
"name" => "CHOPIN Nicolas"
]
]
"ouvrage" => "Closing Workshop of the ISBA Programme on Interpretable Inference via Principled BNP Approaches in Biomedical Research and Beyond"
"keywords" => []
"updatedAt" => "2024-07-31 12:31:43"
"publicationUrl" => null
"publicationInfo" => array:3 [
"pages" => ""
"volume" => ""
"number" => ""
]
"type" => array:2 [
"fr" => "Invité dans une conférence académique (Keynote speaker)"
"en" => "Invited speaker at an academic conference"
]
"support_type" => array:2 [
"fr" => null
"en" => null
]
"countries" => array:2 [
"fr" => null
"en" => null
]
"abstract" => array:2 [
"fr" => ""
"en" => "This work introduces a new principled approach for off-policy learning in contextual bandits. Unlike previous work, our approach does not derive learning principles from intractable or loose bounds. We analyse the problem through the PAC-Bayesian lens, interpreting policies as mixtures of decision rules. This allows us to propose novel generalization bounds and provide tractable algorithms to optimize them. We prove that the derived bounds are tighter than their competitors, and can be optimized directly to confidently improve upon the logging policy offline. Our approach learns policies with guarantees, uses all available data and does not require tuning additional hyperparameters on held-out sets. We demonstrate through extensive experiments the effectiveness of our approach in providing performance guarantees in practical scenarios."
]
"authors_fields" => array:2 [
"fr" => "Systèmes d'Information, Data Analytics et Opérations"
"en" => "Information Systems, Data Analytics and Operations"
]
"indexedAt" => "2024-11-21T07:21:52.000Z"
]
+lang: "en"
+"_type": "_doc"
+"_score": 4.794264
+"parent": null
}
72 => Essec\Faculty\Model\Contribution {#2339
#_index: "academ_contributions"
#_id: "14795"
#_source: array:18 [
"id" => "14795"
"slug" => "deepfakes-a-comms-persons-friend-or-foe"
"yearMonth" => "2024-05"
"year" => "2024"
"title" => "Deepfakes: a comms person's friend or foe?"
"description" => "ALQUIER, P. 2024. <i>Deepfakes: a comms person's friend or foe?</i> Mai."
"authors" => array:1 [
0 => array:3 [
"name" => "ALQUIER Pierre"
"bid" => "B00809923"
"slug" => "alquier-pierre"
]
]
"ouvrage" => ""
"keywords" => array:4 [
0 => "Deepfakes"
1 => "AI"
2 => "corporate communication"
3 => "public relations"
]
"updatedAt" => "2024-10-31 13:51:19"
"publicationUrl" => "https://www.mynewsdesk.com/sg/hong-bao-media/videos/119588"
"publicationInfo" => array:3 [
"pages" => null
"volume" => null
"number" => null
]
"type" => array:2 [
"fr" => "Interviews : Emission radio - TV - presse écrite"
"en" => "Interviews: radio - TV - press"
]
"support_type" => array:2 [
"fr" => "Presse"
"en" => "Press"
]
"countries" => array:2 [
"fr" => null
"en" => null
]
"abstract" => array:2 [
"fr" => """
There is a dark side to the exciting Generative AI trend: deepfakes. But whether deepfakes are used for good or for ill is still up to the person creating them. So, what scope is there for communications professionals to use deepfakes? Appropriately? What would constitute an inappropriate use? Join us to hear from our expert speakers.\n
In this discussion, Mark Laudi and Pierre Alquier lay bare how easy it is to create a deepfake and what the limitations are, and they give participants the chance to create one using a video of themselves. They also share how to detect them.
"""
"en" => """
There is a dark side to the exciting Generative AI trend: deepfakes. But whether deepfakes are used for good or for ill is still up to the person creating them. So, what scope is there for communications professionals to use deepfakes? Appropriately? What would constitute an inappropriate use? Join us to hear from our expert speakers.\n
In this discussion, Mark Laudi and Pierre Alquier lay bare how easy it is to create a deepfake and what the limitations are, and they give participants the chance to create one using a video of themselves. They also share how to detect them.
"""
]
"authors_fields" => array:2 [
"fr" => "Systèmes d'Information, Data Analytics et Opérations"
"en" => "Information Systems, Data Analytics and Operations"
]
"indexedAt" => "2024-11-21T07:21:52.000Z"
]
+lang: "en"
+"_type": "_doc"
+"_score": 4.794264
+"parent": null
}
73 => Essec\Faculty\Model\Contribution {#2340
#_index: "academ_contributions"
#_id: "14839"
#_source: array:18 [
"id" => "14839"
"slug" => "pac-bayesian-bounds-with-applications-to-deep-learning-and-offline-contextual-bandits"
"yearMonth" => "2024-08"
"year" => "2024"
"title" => "PAC-Bayesian Bounds, with applications to Deep Learning and Offline Contextual Bandits"
"description" => "ALQUIER, P. (2024). PAC-Bayesian Bounds, with applications to Deep Learning and Offline Contextual Bandits. Dans: International Conference on Mathematical Theory of Deep Learning, Chinese Academy of Science. Beijing."
"authors" => array:1 [
0 => array:3 [
"name" => "ALQUIER Pierre"
"bid" => "B00809923"
"slug" => "alquier-pierre"
]
]
"ouvrage" => "International Conference on Mathematical Theory of Deep Learning, Chinese Academy of Science"
"keywords" => []
"updatedAt" => "2024-08-26 16:23:11"
"publicationUrl" => null
"publicationInfo" => array:3 [
"pages" => ""
"volume" => ""
"number" => ""
]
"type" => array:2 [
"fr" => "Invité dans une conférence académique (Keynote speaker)"
"en" => "Invited speaker at an academic conference"
]
"support_type" => array:2 [
"fr" => null
"en" => null
]
"countries" => array:2 [
"fr" => null
"en" => null
]
"abstract" => array:2 [
"fr" => ""
"en" => ""
]
"authors_fields" => array:2 [
"fr" => "Systèmes d'Information, Data Analytics et Opérations"
"en" => "Information Systems, Data Analytics and Operations"
]
"indexedAt" => "2024-11-21T07:21:52.000Z"
]
+lang: "en"
+"_type": "_doc"
+"_score": 4.794264
+"parent": null
}
74 => Essec\Faculty\Model\Contribution {#2341
#_index: "academ_contributions"
#_id: "14980"
#_source: array:18 [
"id" => "14980"
"slug" => "pac-bayes-bounds-understanding-the-generalization-of-bayesian-learning-algorithms"
"yearMonth" => "2024-10"
"year" => "2024"
"title" => "PAC-Bayes bounds: understanding the generalization of Bayesian learning algorithms."
"description" => "ALQUIER, P. (2024). PAC-Bayes bounds: understanding the generalization of Bayesian learning algorithms. Dans: Stochastics Seminar, Department of Mathematics, NUS. Singapore."
"authors" => array:1 [
0 => array:3 [
"name" => "ALQUIER Pierre"
"bid" => "B00809923"
"slug" => "alquier-pierre"
]
]
"ouvrage" => "Stochastics Seminar, Department of Mathematics, NUS"
"keywords" => array:1 [
0 => "Machine learning, neural networks, information theory."
]
"updatedAt" => "2024-10-14 10:15:49"
"publicationUrl" => null
"publicationInfo" => array:3 [
"pages" => ""
"volume" => ""
"number" => ""
]
"type" => array:2 [
"fr" => "Présentations dans un séminaire de recherche"
"en" => "Presentations at a Faculty research seminar"
]
"support_type" => array:2 [
"fr" => null
"en" => null
]
"countries" => array:2 [
"fr" => null
"en" => null
]
"abstract" => array:2 [
"fr" => ""
"en" => """
The PAC-Bayesian theory provides tools to understand the accuracy of Bayes-inspired algorithms that learn probability distributions on parameters. This theory was initially developed by McAllester about 20 years ago, and applied successfully\n
to various machine learning algorithms in various problems. Recently, it led to tight generalization bounds for deep neural networks, a task that could not be achieved by standard "worst-case" generalization bounds such as Vapnik-Chervonenkis bounds. In this talk, I will provide a brief introduction to PAC-Bayes bounds, and explain the core ideas of the theory. I will also provide an overview of the recent research directions. In particular, I will highlight the application of PAC-Bayes bounds to derive minimax-optimal rates of convergence in classification and in regression, and the connection to mutual-information bounds.
"""
]
"authors_fields" => array:2 [
"fr" => "Systèmes d'Information, Data Analytics et Opérations"
"en" => "Information Systems, Data Analytics and Operations"
]
"indexedAt" => "2024-11-21T07:21:52.000Z"
]
+lang: "en"
+"_type": "_doc"
+"_score": 4.794264
+"parent": null
}
75 => Essec\Faculty\Model\Contribution {#2342
#_index: "academ_contributions"
#_id: "15169"
#_source: array:18 [
"id" => "15169"
"slug" => "concentration-and-robustness-of-discrepancy-based-abc-via-rademacher-complexity"
"yearMonth" => "2025-09"
"year" => "2025"
"title" => "Concentration and robustness of discrepancy–based ABC via Rademacher complexity"
"description" => "LEGRAMANTI, S., ALQUIER, P. et DURANTE, D. (2025). Concentration and robustness of discrepancy–based ABC via Rademacher complexity. <i>Annals of Statistics</i>."
"authors" => array:3 [
0 => array:3 [
"name" => "ALQUIER Pierre"
"bid" => "B00809923"
"slug" => "alquier-pierre"
]
1 => array:1 [
"name" => "LEGRAMANTI Sirio"
]
2 => array:1 [
"name" => "DURANTE Daniele"
]
]
"ouvrage" => ""
"keywords" => array:5 [
0 => "Approximate Bayesian computation"
1 => "Integral probability semimetrics"
2 => "Maximum mean discrepancy"
3 => "Rademacher complexity"
4 => "Wasserstein distance"
]
"updatedAt" => "2024-09-30 13:49:32"
"publicationUrl" => "https://www.e-publications.org/ims/submission/AOS/user/submissionFile/64475?confirm=ade31112"
"publicationInfo" => array:3 [
"pages" => ""
"volume" => ""
"number" => ""
]
"type" => array:2 [
"fr" => "Articles"
"en" => "Journal articles"
]
"support_type" => array:2 [
"fr" => "Revue scientifique"
"en" => "Scientific journal"
]
"countries" => array:2 [
"fr" => null
"en" => null
]
"abstract" => array:2 [
"fr" => "There has been an increasing interest on summary-free versions of approximate Bayesian computation (ABC), which replace distances among summaries with discrepancies between the empirical distributions of the observed data and the synthetic samples generated under the proposed parameter values. The success of these solutions has motivated theoretical studies on the limiting properties of the induced posteriors. However, current results (i) are often tailored to a specific discrepancy, (ii) require, either explicitly or implicitly, regularity conditions on the data generating process and the assumed statistical model, and (iii) yield bounds depending on sequences of control functions that are not made explicit. As such, there is the lack of a theoretical framework that (i) is unified, (ii) facilitates the derivation of limiting properties that hold uniformly, and (iii) relies on verifiable assumptions that provide concentration bounds clarifying which factors govern the limiting behavior of the ABC posterior. We address this gap via a novel theoretical framework that introduces the concept of Rademacher complexity in the analysis of the limiting properties for discrepancy-based ABC posteriors. This yields a unified theory that relies on constructive arguments and provides more informative asymptotic results and uniform concentration bounds, even in settings not covered by current studies. These advancements are obtained by relating the properties of summary-free ABC posteriors to the behavior of the Rademacher complexity associated with the chosen discrepancy within the family of integral probability semimetrics. This family extends summary-based ABC, and includes the Wasserstein distance and maximum mean discrepancy (MMD), among others. As clarified through a focus on the MMD case and via illustrative simulations, this perspective yields an improved understanding of summary-free ABC."
"en" => "There has been an increasing interest on summary-free versions of approximate Bayesian computation (ABC), which replace distances among summaries with discrepancies between the empirical distributions of the observed data and the synthetic samples generated under the proposed parameter values. The success of these solutions has motivated theoretical studies on the limiting properties of the induced posteriors. However, current results (i) are often tailored to a specific discrepancy, (ii) require, either explicitly or implicitly, regularity conditions on the data generating process and the assumed statistical model, and (iii) yield bounds depending on sequences of control functions that are not made explicit. As such, there is the lack of a theoretical framework that (i) is unified, (ii) facilitates the derivation of limiting properties that hold uniformly, and (iii) relies on verifiable assumptions that provide concentration bounds clarifying which factors govern the limiting behavior of the ABC posterior. We address this gap via a novel theoretical framework that introduces the concept of Rademacher complexity in the analysis of the limiting properties for discrepancy-based ABC posteriors. This yields a unified theory that relies on constructive arguments and provides more informative asymptotic results and uniform concentration bounds, even in settings not covered by current studies. These advancements are obtained by relating the properties of summary-free ABC posteriors to the behavior of the Rademacher complexity associated with the chosen discrepancy within the family of integral probability semimetrics. This family extends summary-based ABC, and includes the Wasserstein distance and maximum mean discrepancy (MMD), among others. As clarified through a focus on the MMD case and via illustrative simulations, this perspective yields an improved understanding of summary-free ABC."
]
"authors_fields" => array:2 [
"fr" => "Systèmes d'Information, Data Analytics et Opérations"
"en" => "Information Systems, Data Analytics and Operations"
]
"indexedAt" => "2024-11-21T07:21:52.000Z"
]
+lang: "en"
+"_type": "_doc"
+"_score": 4.794264
+"parent": null
}
]
"avatar" => "https://faculty.essec.edu/wp-content/uploads/avatars/B00809923.jpg"
"contributionCounts" => 76
"personalLinks" => array:2 [
0 => "<a href="https://orcid.org/0000-0003-4249-7337" target="_blank">ORCID</a>"
1 => "<a href="https://scholar.google.com/citations?user=ngkCAJYAAAAJ" target="_blank">Google scholar</a>"
]
"docTitle" => "Pierre ALQUIER"
"docSubtitle" => "Professor"
"docDescription" => "Department: Information Systems, Data Analytics and Operations<br>Campus de Singapour"
"docType" => "cv"
"docPreview" => "<img src="https://faculty.essec.edu/wp-content/uploads/avatars/B00809923.jpg"><span><span>Pierre ALQUIER</span><span>B00809923</span></span>"
"academ_cv_info" => ""
]
#_index: "academ_cv"
+lang: "en"
+"_type": "_doc"
+"_score": 5.0369525
+"parent": null
}