Essec\Faculty\Model\Contribution {#2233 ▼
#_index: "academ_contributions"
#_id: "13864"
#_source: array:26 [
"id" => "13864"
"slug" => "13864-concentration-of-tempered-posteriors-and-of-their-variational-approximations"
"yearMonth" => "2020-06"
"year" => "2020"
"title" => "Concentration of tempered posteriors and of their variational approximations"
"description" => "ALQUIER, P. et RIDGWAY, J. (2020). Concentration of tempered posteriors and of their variational approximations. <i>Annals of Statistics</i>, 48(3), pp. 1475-1497.
ALQUIER, P. et RIDGWAY, J. (2020). Concentration of tempered posteriors and of their variational app
"
"authors" => array:2 [
0 => array:3 [
"name" => "ALQUIER Pierre"
"bid" => "B00809923"
"slug" => "alquier-pierre"
]
1 => array:1 [
"name" => "RIDGWAY James"
]
]
"ouvrage" => ""
"keywords" => []
"updatedAt" => "2023-03-17 01:00:45"
"publicationUrl" => "https://doi.org/10.1214/19-AOS1855"
"publicationInfo" => array:3 [
"pages" => "1475-1497"
"volume" => "48"
"number" => "3"
]
"type" => array:2 [
"fr" => "Articles"
"en" => "Journal articles"
]
"support_type" => array:2 [
"fr" => "Revue scientifique"
"en" => "Scientific journal"
]
"countries" => array:2 [
"fr" => null
"en" => null
]
"abstract" => array:2 [
"fr" => """
While Bayesian methods are extremely popular in statistics and machine\n
learning, their application to massive data sets is often challenging, when\n
possible at all. The classical MCMC algorithms are prohibitively slow when\n
both the model dimension and the sample size are large. Variational Bayesian\n
methods aim at approximating the posterior by a distribution in a tractable\n
family F. Thus, MCMC are replaced by an optimization algorithm which is\n
orders of magnitude faster. VB methods have been applied in such computationally demanding applications as collaborative filtering, image and video\n
orders of magnitude faster. VB methods have been applied in such computationally demanding applicati
processing or NLP to name a few. However, despite nice results in practice,\n
the theoretical properties of these approximations are not known. We propose\n
a general oracle inequality that relates the quality of the VB approximation to\n
the prior π and to the structure of F. We provide a simple condition that allows to derive rates of convergence from this oracle inequality. We apply our\n
the prior π and to the structure of F. We provide a simple condition that allows to derive rates of
theory to various examples. First, we show that for parametric models with\n
log-Lipschitz likelihood, Gaussian VB leads to efficient algorithms and consistent estimators. We then study a high-dimensional example: matrix completion, and a nonparametric example: density estimation.
log-Lipschitz likelihood, Gaussian VB leads to efficient algorithms and consistent estimators. We th
"""
"en" => """
While Bayesian methods are extremely popular in statistics and machine\n
learning, their application to massive data sets is often challenging, when\n
possible at all. The classical MCMC algorithms are prohibitively slow when\n
both the model dimension and the sample size are large. Variational Bayesian\n
methods aim at approximating the posterior by a distribution in a tractable\n
family F. Thus, MCMC are replaced by an optimization algorithm which is\n
orders of magnitude faster. VB methods have been applied in such computationally demanding applications as collaborative filtering, image and video\n
orders of magnitude faster. VB methods have been applied in such computationally demanding applicati
processing or NLP to name a few. However, despite nice results in practice,\n
the theoretical properties of these approximations are not known. We propose\n
a general oracle inequality that relates the quality of the VB approximation to\n
the prior π and to the structure of F. We provide a simple condition that allows to derive rates of convergence from this oracle inequality. We apply our\n
the prior π and to the structure of F. We provide a simple condition that allows to derive rates of
theory to various examples. First, we show that for parametric models with\n
log-Lipschitz likelihood, Gaussian VB leads to efficient algorithms and consistent estimators. We then study a high-dimensional example: matrix completion, and a nonparametric example: density estimation.
log-Lipschitz likelihood, Gaussian VB leads to efficient algorithms and consistent estimators. We th
"""
]
"authors_fields" => array:2 [
"fr" => "Systèmes d'Information, Data Analytics et Opérations"
"en" => "Information Systems, Data Analytics and Operations"
]
"indexedAt" => "2025-04-02T19:21:40.000Z"
"docTitle" => "Concentration of tempered posteriors and of their variational approximations"
"docSurtitle" => "Articles"
"authorNames" => "<a href="/cv/alquier-pierre">ALQUIER Pierre</a>, RIDGWAY James"
"docDescription" => "<span class="document-property-authors">ALQUIER Pierre, RIDGWAY James</span><br><span class="document-property-authors_fields">Systèmes d'Information, Data Analytics et Opérations</span> | <span class="document-property-year">2020</span>
<span class="document-property-authors">ALQUIER Pierre, RIDGWAY James</span><br><span class="documen
"
"keywordList" => ""
"docPreview" => "<b>Concentration of tempered posteriors and of their variational approximations</b><br><span>2020-06 | Articles </span>
<b>Concentration of tempered posteriors and of their variational approximations</b><br><span>2020-06
"
"docType" => "research"
"publicationLink" => "<a href="https://doi.org/10.1214/19-AOS1855" target="_blank">Concentration of tempered posteriors and of their variational approximations</a>
<a href="https://doi.org/10.1214/19-AOS1855" target="_blank">Concentration of tempered posteriors an
"
]
+lang: "fr"
+"_type": "_doc"
+"_score": 8.539033
+"parent": null
}