Essec\Faculty\Model\Contribution {#2233 ▼
#_index: "academ_contributions"
#_id: "15212"
#_source: array:26 [
"id" => "15212"
"slug" => "15212-logarithmic-smoothing-for-pessimistic-off-policy-evaluation-selection-and-learning"
"yearMonth" => "2024-12"
"year" => "2024"
"title" => "Logarithmic Smoothing for Pessimistic Off-Policy Evaluation, Selection and Learning"
"description" => "SAKHI, O., AOUALI, I., ALQUIER, P. et CHOPIN, N. (2024). Logarithmic Smoothing for Pessimistic Off-Policy Evaluation, Selection and Learning. Dans: <i>38th Conference on Neural Information Processing Systems (NeurIPS'24)</i>. Vancouver: Curran Associates, Inc, pp. 80706-80755.
SAKHI, O., AOUALI, I., ALQUIER, P. et CHOPIN, N. (2024). Logarithmic Smoothing for Pessimistic Off-P
"
"authors" => array:4 [
0 => array:3 [
"name" => "ALQUIER Pierre"
"bid" => "B00809923"
"slug" => "alquier-pierre"
]
1 => array:1 [
"name" => "SAKHI Otmane"
]
2 => array:1 [
"name" => "AOUALI Imad"
]
3 => array:1 [
"name" => "CHOPIN Nicolas"
]
]
"ouvrage" => "38th Conference on Neural Information Processing Systems (NeurIPS'24)"
"keywords" => []
"updatedAt" => "2025-04-02 09:13:33"
"publicationUrl" => "https://arxiv.org/abs/2405.14335"
"publicationInfo" => array:3 [
"pages" => "80706-80755"
"volume" => ""
"number" => ""
]
"type" => array:2 [
"fr" => "Actes d'une conférence"
"en" => "Conference Proceedings"
]
"support_type" => array:2 [
"fr" => "Editeur"
"en" => "Publisher"
]
"countries" => array:2 [
"fr" => "États-Unis"
"en" => "United States of America"
]
"abstract" => array:2 [
"fr" => "This work investigates the offline formulation of the contextual bandit problem, where the goal is to leverage past interactions collected under a behavior policy to evaluate, select, and learn new, potentially better-performing, policies. Motivated by critical applications, we move beyond point estimators. Instead, we adopt the principle of pessimism where we construct upper bounds that assess a policy's worst-case performance, enabling us to confidently select and learn improved policies. Precisely, we introduce novel, fully empirical concentration bounds for a broad class of importance weighting risk estimators. These bounds are general enough to cover most existing estimators and pave the way for the development of new ones. In particular, our pursuit of the tightest bound within this class motivates a novel estimator (LS), that logarithmically smooths large importance weights. The bound for LS is provably tighter than all its competitors, and naturally results in improved policy selection and learning strategies. Extensive policy evaluation, selection, and learning experiments highlight the versatility and favorable performance of LS.
This work investigates the offline formulation of the contextual bandit problem, where the goal is t
"
"en" => "This work investigates the offline formulation of the contextual bandit problem, where the goal is to leverage past interactions collected under a behavior policy to evaluate, select, and learn new, potentially better-performing, policies. Motivated by critical applications, we move beyond point estimators. Instead, we adopt the principle of pessimism where we construct upper bounds that assess a policy's worst-case performance, enabling us to confidently select and learn improved policies. Precisely, we introduce novel, fully empirical concentration bounds for a broad class of importance weighting risk estimators. These bounds are general enough to cover most existing estimators and pave the way for the development of new ones. In particular, our pursuit of the tightest bound within this class motivates a novel estimator (LS), that logarithmically smooths large importance weights. The bound for LS is provably tighter than all its competitors, and naturally results in improved policy selection and learning strategies. Extensive policy evaluation, selection, and learning experiments highlight the versatility and favorable performance of LS.
This work investigates the offline formulation of the contextual bandit problem, where the goal is t
"
]
"authors_fields" => array:2 [
"fr" => "Systèmes d'Information, Data Analytics et Opérations"
"en" => "Information Systems, Data Analytics and Operations"
]
"indexedAt" => "2025-04-02T13:21:49.000Z"
"docTitle" => "Logarithmic Smoothing for Pessimistic Off-Policy Evaluation, Selection and Learning"
"docSurtitle" => "Actes d'une conférence"
"authorNames" => "<a href="/cv/alquier-pierre">ALQUIER Pierre</a>, SAKHI Otmane, AOUALI Imad, CHOPIN Nicolas"
"docDescription" => "<span class="document-property-authors">ALQUIER Pierre, SAKHI Otmane, AOUALI Imad, CHOPIN Nicolas</span><br><span class="document-property-authors_fields">Systèmes d'Information, Data Analytics et Opérations</span> | <span class="document-property-year">2024</span>
<span class="document-property-authors">ALQUIER Pierre, SAKHI Otmane, AOUALI Imad, CHOPIN Nicolas</s
"
"keywordList" => ""
"docPreview" => "<b>Logarithmic Smoothing for Pessimistic Off-Policy Evaluation, Selection and Learning</b><br><span>2024-12 | Actes d'une conférence </span>
<b>Logarithmic Smoothing for Pessimistic Off-Policy Evaluation, Selection and Learning</b><br><span>
"
"docType" => "research"
"publicationLink" => "<a href="https://arxiv.org/abs/2405.14335" target="_blank">Logarithmic Smoothing for Pessimistic Off-Policy Evaluation, Selection and Learning</a>
<a href="https://arxiv.org/abs/2405.14335" target="_blank">Logarithmic Smoothing for Pessimistic Off
"
]
+lang: "fr"
+"_type": "_doc"
+"_score": 8.742511
+"parent": null
}