Publications of Alessandro Arlotto
%% Papers Published
@article{fds338563,
Author = {Arlotto, A and Xie, X},
Title = {Logarithmic regret in the dynamic and stochastic knapsack
problem with equal rewards},
Journal = {Stochastic Systems},
Volume = {10},
Number = {2},
Pages = {170-191},
Year = {2020},
Month = {June},
Abstract = {We study a dynamic and stochastic knapsack problem in which
a decision maker is sequentially presented with items
arriving according to a Bernoulli process over n discrete
time periods. Items have equal rewards and independent
weights that are drawn from a known nonnegative continuous
distribution F. The decision maker seeks to maximize the
expected total reward of the items that the decision maker
includes in the knapsack while satisfying a capacity
constraint and while making terminal decisions as soon as
each item weight is revealed. Under mild regularity
conditions on the weight distribution F, we prove that the
regret—the expected difference between the performance of
the best sequential algorithm and that of a prophet who sees
all of the weights before making any decision—is, at most,
logarithmic in n. Our proof is constructive. We devise a
reoptimized heuristic that achieves this regret
bound.},
Doi = {10.1287/stsy.2019.0055},
Key = {fds338563}
}
@article{fds330135,
Author = {Arlotto, A and Frazelle, AE and Wei, Y},
Title = {Strategic open routing in service networks},
Journal = {Management Science},
Publisher = {INFORMS},
Year = {2018},
Key = {fds330135}
}
@article{fds330137,
Author = {Arlotto, A and Gurvich, I},
Title = {Uniformly bounded regret in the multi-secretary
problem},
Year = {2017},
Month = {October},
Abstract = {In the secretary problem of Cayley (1875) and Moser (1956),
$n$ non-negative, independent, random variables with common
distribution are sequentially presented to a decision maker
who decides when to stop and collect the most recent
realization. The goal is to maximize the expected value of
the collected element. In the $k$-choice variant, the
decision maker is allowed to make $k \leq n$ selections to
maximize the expected total value of the selected elements.
Assuming that the values are drawn from a known distribution
with finite support, we prove that the best regret---the
expected gap between the optimal online policy and its
offline counterpart in which all $n$ values are made visible
at time $0$---is uniformly bounded in the the number of
candidates $n$ and the budget $k$. Our proof is
constructive: we develop an adaptive Budget-Ratio policy
that achieves this performance. The policy selects or skips
values depending on where the ratio of the residual budget
to the remaining time stands relative to multiple thresholds
that correspond to middle points of the distribution. We
also prove that being adaptive is crucial: in general, the
minimal regret among non-adaptive policies grows like the
square root of $n$. The difference is the value of
adaptiveness.},
Key = {fds330137}
}
@article{fds322098,
Author = {Arlotto, A and Steele, JM},
Title = {A central limit theorem for temporally nonhomogenous Markov
chains with applications to dynamic programming},
Journal = {Mathematics of Operations Research},
Volume = {41},
Number = {4},
Pages = {1448-1468},
Publisher = {Institute for Operations Research and the Management
Sciences (INFORMS)},
Year = {2016},
Month = {November},
Abstract = {© 2016 INFORMS. We prove a central limit theorem for a
class of additive processes that arise naturally in the
theory of finite horizon Markov decision problems. The main
theorem generalizes a classic result of Dobrushin for
temporally nonhomogeneous Markov chains, and the principal
innovation is that here the summands are permitted to depend
on both the current state and a bounded number of future
states of the chain. We show through several examples that
this added flexibility gives one a direct path to asymptotic
normality of the optimal total reward of finite horizon
Markov decision problems. The same examples also explain why
such results are not easily obtained by alternative
Markovian techniques such as enlargement of the state
space.},
Doi = {10.1287/moor.2016.0784},
Key = {fds322098}
}
@article{fds322099,
Author = {Arlotto, A and Mossel, E and Steele, JM},
Title = {Quickest online selection of an increasing subsequence of
specified size},
Journal = {Random Structures & Algorithms},
Volume = {49},
Number = {2},
Pages = {235-252},
Publisher = {WILEY},
Year = {2016},
Month = {September},
Abstract = {Given a sequence of independent random variables with a
common continuous distribution, we consider the online
decision problem where one seeks to minimize the expected
value of the time that is needed to complete the selection
of a monotone increasing subsequence of a prespecified
length n. This problem is dual to some online decision
problems that have been considered earlier, and this dual
problem has some notable advantages. In particular, the
recursions and equations of optimality lead with relative
ease to asymptotic formulas for mean and variance of the
minimal selection time. © 2016 Wiley Periodicals, Inc.
Random Struct. Alg., 49, 235–252, 2016.},
Doi = {10.1002/rsa.20634},
Key = {fds322099}
}
@article{fds330138,
Author = {Arlotto, A and Steele, JM},
Title = {Beardwood–Halton–Hammersley theorem for stationary
ergodic sequences: a counterexample},
Journal = {The Annals of Applied Probability},
Volume = {26},
Number = {4},
Pages = {2141-2168},
Publisher = {Institute of Mathematical Statistics},
Year = {2016},
Month = {August},
Doi = {10.1214/15-AAP1142},
Key = {fds330138}
}
@article{fds319303,
Author = {Arlotto, A and Nguyen, VV and Steele, JM},
Title = {Optimal online selection of a monotone subsequence: A
central limit theorem},
Journal = {Stochastic Processes and Their Applications},
Volume = {125},
Number = {9},
Pages = {3596-3622},
Publisher = {Elsevier BV},
Year = {2015},
Month = {July},
Abstract = {Consider a sequence of n independent random variables with a
common continuous distribution F, and consider the task of
choosing an increasing subsequence where the observations
are revealed sequentially and where an observation must be
accepted or rejected when it is first revealed. There is a
unique selection policy πn∗ that is optimal in the sense
that it maximizes the expected value of <sup>Ln</sup>(πn∗),
the number of selected observations. We investigate the
distribution of <sup>Ln</sup>(πn∗); in particular, we
obtain a central limit theorem for <sup>Ln</sup>(πn∗) and
a detailed understanding of its mean and variance for large
n. Our results and methods are complementary to the work of
Bruss and Delbaen (2004) where an analogous central limit
theorem is found for monotone increasing selections from a
finite sequence with cardinality N where N is a Poisson
random variable that is independent of the
sequence.},
Doi = {10.1016/j.spa.2015.03.009},
Key = {fds319303}
}
@article{fds319304,
Author = {Arlotto, A and Gans, N and Steele, JM},
Title = {Markov decision problems where means bound
variances},
Journal = {Operations Research},
Volume = {62},
Number = {4},
Pages = {864-875},
Publisher = {Institute for Operations Research and the Management
Sciences (INFORMS)},
Year = {2014},
Month = {August},
Doi = {10.1287/opre.2014.1281},
Key = {fds319304}
}
@article{fds330139,
Author = {Arlotto, A and Steele, JM},
Title = {Optimal Online Selection of an Alternating Subsequence: A
Central Limit Theorem},
Journal = {Advances in Applied Probability},
Volume = {46},
Number = {2},
Pages = {536-559},
Publisher = {Cambridge University Press (CUP)},
Year = {2014},
Month = {June},
Abstract = {<jats:p>We analyze the optimal policy for the sequential
selection of an alternating subsequence from a sequence
of<jats:italic>n</jats:italic>independent observations from
a continuous distribution<jats:italic>F</jats:italic>, and
we prove a central limit theorem for the number of
selections made by that policy. The proof exploits the
backward recursion of dynamic programming and assembles a
detailed understanding of the associated value functions and
selection rules.</jats:p>},
Doi = {10.1239/aap/1401369706},
Key = {fds330139}
}
@article{fds319305,
Author = {Arlotto, A and Chick, SE and Gans, N},
Title = {Optimal hiring and retention policies for heterogeneous
workers who learn},
Journal = {Management Science},
Volume = {60},
Number = {1},
Pages = {110-129},
Publisher = {Institute for Operations Research and the Management
Sciences (INFORMS)},
Year = {2014},
Month = {January},
Doi = {10.1287/mnsc.2013.1754},
Key = {fds319305}
}
@article{fds319306,
Author = {Arlotto, A and Chen, RW and Shepp, LA and Steele,
JM},
Title = {Online Selection of Alternating Subsequences from a Random
Sample},
Journal = {Journal of Applied Probability},
Volume = {48},
Number = {4},
Pages = {1114-1132},
Publisher = {Cambridge University Press (CUP)},
Year = {2011},
Month = {December},
Abstract = {<jats:p>We consider sequential selection of an alternating
subsequence from a sequence of independent, identically
distributed, continuous random variables, and we determine
the exact asymptotic behavior of an optimal sequentially
selected subsequence. Moreover, we find (in a sense we make
precise) that a person who is constrained to make sequential
selections does only about 12 percent worse than a person
who can make selections with full knowledge of the random
sequence.</jats:p>},
Doi = {10.1239/jap/1324046022},
Key = {fds319306}
}
@article{fds319307,
Author = {Arlotto, A and Steele, JM},
Title = {Optimal sequential selection of a unimodal subsequence of a
random sequence},
Journal = {Combinatorics, Probability and Computing},
Volume = {20},
Number = {06},
Pages = {799-814},
Publisher = {Cambridge University Press (CUP)},
Year = {2011},
Month = {November},
Abstract = {We consider the problem of selecting sequentially a unimodal
subsequence from a sequence of independent identically
distributed random variables, and we find that a person
doing optimal sequential selection does so within a factor
of the square root of two as well as a prophet who knows all
of the random observations in advance of any selections. Our
analysis applies in fact to selections of subsequences that
have d+1 monotone blocks, and, by including the case d=0,
our analysis also covers monotone subsequences. © 2011
Cambridge University Press.},
Doi = {10.1017/S0963548311000411},
Key = {fds319307}
}
@article{fds319308,
Author = {Arlotto, A and Gans, N and Chick, S},
Title = {Optimal employee retention when inferring unknown learning
curves},
Journal = {Proceedings Winter Simulation Conference},
Pages = {1178-1188},
Publisher = {IEEE},
Editor = {Johansson, B and Jain, S and Montoya-Torres, J and Hugan, J and Yücesan, E},
Year = {2010},
Abstract = {This paper formulates an employer's hiring and retention
decisions as an infinite-armed bandit problem and
characterizes the structure of optimal hiring and retention
policies. We develop approximations that allow us to
explicitly calculate these policies and to evaluate their
benefit. The solution involves a balance of two types of
learning: the learning that reflects the improvement in
performance of employees as they gain experience, and the
Bayesian learning of employers as they infer properties of
employees' abilities to inform the decision of whether to
retain or replace employees. Numerical experiments with
Monte Carlo simulation suggest that the gains to active
screening and monitoring of employees can be substantial.
©2010 IEEE.},
Doi = {10.1109/WSC.2010.5679074},
Key = {fds319308}
}
@article{fds319309,
Author = {Arlotto, A and Scarsini, M},
Title = {Hessian orders and multinormal distributions},
Journal = {Journal of Multivariate Analysis},
Volume = {100},
Number = {10},
Pages = {2324-2330},
Publisher = {Elsevier BV},
Year = {2009},
Month = {November},
Abstract = {Several well known integral stochastic orders (like the
convex order, the supermodular order, etc.) can be defined
in terms of the Hessian matrix of a class of functions. Here
we consider a generic Hessian order, i.e., an integral
stochastic order defined through a convex cone H of Hessian
matrices, and we prove that if two random vectors are
ordered by the Hessian order, then their means are equal and
the difference of their covariance matrices belongs to the
dual of H. Then we show that the same conditions are also
sufficient for multinormal random vectors. We study several
particular cases of this general result. © 2009 Elsevier
Inc. All rights reserved.},
Doi = {10.1016/j.jmva.2009.03.009},
Key = {fds319309}
}