Economics Economics
Arts & Sciences
Duke University

 HOME > Arts & Sciences > Economics    Search Help Login pdf version printable version 
Webpage

Economics : Publications since January 2018

List all publications in the database.    :chronological  alphabetical  combined listing:
%% Abdulkadiroglu, Atila   
@article{fds325223,
   Author = {Abdulkadiroğlu, A and Pathak, PA and Walters,
             CR},
   Title = {Free to choose: Can school choice reduce student
             achievement},
   Journal = {American Economic Journal: Applied Economics},
   Volume = {10},
   Number = {1},
   Pages = {175-206},
   Publisher = {American Economic Association},
   Year = {2018},
   Month = {January},
   url = {http://dx.doi.org/10.1257/app.20160634},
   Abstract = {A central argument for school choice is that parents can
             choose schools wisely. This principle may underlie why
             lottery-based school evaluations have almost always reported
             positive or zero achievement effects. This paper reports on
             a striking counterexample to these results. We use
             randomized lotteries to evaluate the Louisiana Scholarship
             Program, a voucher plan that provides public funds for
             disadvantaged students to attend private schools. LSP
             participation lowers math scores by 0.4 standard deviations
             and also reduces achievement in reading, science, and social
             studies. These effects may be due in part to selection of
             low-quality private schools into the program.},
   Doi = {10.1257/app.20160634},
   Key = {fds325223}
}


%% Ambrus, Attila   
@article{fds343589,
   Author = {Ambrus, A and Greiner, B and Zednik, A},
   Title = {The Effects of a ‘None of the Above’ Ballot Paper Option
             on Voting Behavior and Election Outcomes},
   Journal = {Economic Research Initiatives at Duke (Erid) Working
             Paper},
   Number = {277},
   Year = {2019},
   Month = {March},
   Key = {fds343589}
}

@article{fds326998,
   Author = {Ambrus, A and Kolb, A},
   Title = {On Defining Ex Ante Payoffs in Games with Diffuse
             Prior},
   Journal = {Economic Research Initiatives at Duke (Erid) Working
             Paper},
   Number = {246},
   Year = {2018},
   Month = {March},
   Key = {fds326998}
}

@article{fds325427,
   Author = {Ambrus, A and Chaney, E and Salitskiy, I},
   Title = {Pirates of the Mediterranean: An empirical investigation of
             bargaining with asymmetric information},
   Journal = {Quantitative Economics},
   Volume = {9},
   Number = {1},
   Pages = {217-246},
   Publisher = {The Econometric Society},
   Year = {2018},
   Month = {March},
   url = {http://dx.doi.org/10.3982/QE655},
   Abstract = {Copyright © 2018 The Authors. We investigate the effect of
             delay on prices in bargaining situations using a data set
             containing thousands of captives ransomed from Barbary
             pirates between 1575 and 1692. Plausibly exogenous variation
             in the delay in ransoming provides evidence that negotiating
             delays decreased the size of ransom payments, and that much
             of the effect stems from the signalling value of strategic
             delay, in accordance with theoretical predictions. We also
             structurally estimate a version of the screening type
             bargaining model, adjusted to our context, and find that the
             model fits both the observed prices and acceptance
             probabilities well.},
   Doi = {10.3982/QE655},
   Key = {fds325427}
}

@article{fds320208,
   Author = {Ambrus, A and Baranovskyi, V and Kolb, A},
   Title = {A Delegation-Based Theory of Expertise},
   Journal = {Economic Research Initiatives at Duke (Erid) Working
             Paper},
   Number = {193},
   Pages = {45 pages},
   Year = {2018},
   Month = {February},
   Abstract = {We investigate competition in a delegation framework, with a
             coarsely informed principal. Two imperfectly informed and
             biased experts simultaneously propose action choices. A
             principal with a diffuse prior, and only being able to
             ordinally compare the two proposals, has to choose one of
             them. The selected expert might receive a bonus payment. We
             show that having a second expert benefits the principal,
             even if the two experts have the same biases and the bonus
             of the winner is zero. In contrast with other models of
             expertise, in our setting the principal prefers experts with
             equal rather than opposite biases. Increasing the bonus
             brings experts closer to truthful reporting, but this only
             benefits the principal up to a threshold level, with further
             increases in the bonus strictly decreasing her payoffs. A
             methodological contribution of our paper is characterizing
             restrictions on the set of strategies which allows a formal
             generalization of ex ante expected payoffs to games with
             diffuse prior.},
   Key = {fds320208}
}

@article{fds320209,
   Author = {Ambrus, A and Baranovskyi, V and Kolb, A},
   Title = {Supplementary Appendix to 'A Delegation-Based Theory of
             Expertise'},
   Journal = {Economic Research Initiatives at Duke (Erid) Working
             Paper},
   Number = {194},
   Pages = {16 pages},
   Year = {2018},
   Month = {February},
   Abstract = {This supplement provides welfare results not contained in
             the main text and a proof of Lemma A.1. For small bonuses, a
             mixed equilibrium exists if and only if a downward
             equilibrium exists; if so, it is unique. For large bonuses,
             we find a unique candidate for mixed equilibrium and show
             that mixed and upward equilibria cannot co-exist. Also, we
             give an example for equal biases, where this candidate is
             indeed a mixed equilibrium. However, when biases are
             different enough and the bonus is high, a mixed equilibrium
             does not exist. Though a general analytical comparison is
             infeasible, we show that mixed equilibria are inferior to
             upward equilibrium or simple delegation in various special
             cases.},
   Key = {fds320209}
}


%% Ananat, Elizabeth O.   
@article{fds339724,
   Author = {Ananat, E and Shihe, F and Ross, SL},
   Title = {Race-specific urban wage premia and the black-white wage
             gap},
   Journal = {Journal of Urban Economics},
   Volume = {108},
   Pages = {141-153},
   Publisher = {Elsevier BV},
   Year = {2018},
   Month = {November},
   url = {http://dx.doi.org/10.1016/j.jue.2018.11.002},
   Abstract = {© 2018 Elsevier Inc. We establish a novel empirical fact
             about the black-white wage gap: looking both across and
             within metropolitan areas, increasing city size or
             employment density is associated with a larger black-white
             wage gap. The estimated effects represent between 9 and 18%
             of recent estimates of the black-white wage gap. Using a
             variety of techniques, we demonstrate that our within-city
             relationship is unlikely to be driven by racial differences
             in unobserved ability. Finally, we present evidence
             suggestive of a role for race-specific networks in
             explaining these differences in the black-white wage
             gap.},
   Doi = {10.1016/j.jue.2018.11.002},
   Key = {fds339724}
}


%% Ariely, Dan   
@article{fds342495,
   Author = {Akbaş, M and Ariely, D and Yuksel, S},
   Title = {When is inequality fair? An experiment on the effect of
             procedural justice and agency},
   Journal = {Journal of Economic Behavior and Organization},
   Volume = {161},
   Pages = {114-127},
   Year = {2019},
   Month = {May},
   url = {http://dx.doi.org/10.1016/j.jebo.2019.02.014},
   Abstract = {© 2019 Elsevier B.V. We investigate how the perceived
             fairness of an income distribution depends on the beliefs
             about the process that generates the inequality.
             Specifically, we examine how two crucial features of this
             process affect fairness views: (1) Procedural justice -
             equal treatment of all; (2) Agency – one's ability to
             determine his/her income. We do this in a lab experiment by
             differentially varying subjects’ ability to influence
             their earnings. Comparison of ex-post redistribution
             decisions of total earnings under different conditions
             indicate both agency and procedural justice to matter for
             fairness. Highlighting the importance of agency, we observe
             lower redistribution of unequal earnings resulting from risk
             when risk is chosen freely. Highlighting the importance of
             procedural justice, we find introduction of inequality of
             opportunity to significantly increase redistribution.
             Despite this increase, under inequality of opportunity, the
             share of subjects redistributing none remain close to the
             share of subjects redistributing fully revealing an
             underlying heterogeneity in the population about how
             fairness views should account for inequality of
             opportunity.},
   Doi = {10.1016/j.jebo.2019.02.014},
   Key = {fds342495}
}

@article{fds341526,
   Author = {Garcia-Rada, X and Anik, L and Ariely, D},
   Title = {Consuming together (versus separately) makes the heart grow
             fonder},
   Journal = {Marketing Letters},
   Volume = {30},
   Number = {1},
   Pages = {27-43},
   Year = {2019},
   Month = {March},
   url = {http://dx.doi.org/10.1007/s11002-019-09479-7},
   Abstract = {© 2019, Springer Science+Business Media, LLC, part of
             Springer Nature. Across three studies, we investigate how
             consumers in romantic relationships make decisions when
             choosing an item to share with their partner. We show that
             consumers will forgo their preferred alternative for an
             option that is more aligned with the preferences of their
             partner when consuming the same item together vs.
             separately. We theorize and show that when consuming
             together (vs. separately), consumers’ purchase motivation
             shifts from being utilitarian (e.g., satisfying one’s
             hunger) to hedonic (e.g., having an enjoyable evening).
             Consequently, when consuming together (vs. separately),
             consumers weigh more highly their partner’s affective
             reactions to the item and overall experience—leading them
             to pick a less preferred option in an effort to please their
             partner. In sum, we provide a framework that contributes
             novel insight into the trade-offs consumers make between
             their preferences and the preferences of
             others.},
   Doi = {10.1007/s11002-019-09479-7},
   Key = {fds341526}
}

@article{fds342496,
   Author = {Ariely, D and Garcia-Rada, X and Gödker, K and Hornuf, L and Mann,
             H},
   Title = {The impact of two different economic systems on
             dishonesty},
   Journal = {European Journal of Political Economy},
   Year = {2019},
   Month = {January},
   url = {http://dx.doi.org/10.1016/j.ejpoleco.2019.02.010},
   Abstract = {© 2019 The Authors Using an artefactual field experiment,
             this paper tests the long-term implications of living in a
             specific economic system on individual dishonesty. By
             comparing cheating behaviour across individuals from the
             former socialist East of Germany with those of the
             capitalist West of Germany, we examine behavioural
             differences within a single country. We find long-term
             implications of living in a specific economic system for
             individual dishonesty when social interactions are possible:
             participants with an East German background cheated
             significantly more on an abstract die-rolling task than
             those with a West German background, but only when exposed
             to the enduring system of former West Germany. Moreover, our
             results indicate that the longer individuals had experienced
             socialist East Germany, the more likely they were to cheat
             on the behavioural task.},
   Doi = {10.1016/j.ejpoleco.2019.02.010},
   Key = {fds342496}
}

@article{fds341346,
   Author = {Yang, H and Carmon, Z and Ariely, D and Norton, MI},
   Title = {The Feeling of Not Knowing It All},
   Journal = {Journal of Consumer Psychology},
   Year = {2019},
   Month = {January},
   url = {http://dx.doi.org/10.1002/jcpy.1089},
   Abstract = {© 2019 Society for Consumer Psychology How do consumers
             assess their mastery of knowledge they have learned? We
             explore this question by investigating a common knowledge
             consumption situation: encountering opportunities for
             further learning. We argue and show that such opportunities
             can trigger a feeling-of-not-knowing-it-all (FONKIA), which
             lowers consumers’ confidence in their mastery of the
             knowledge they already possess. Specifically, listing
             optional follow-up readings at the conclusion of a course
             lowered students’ confidence in their mastery of the
             course material they had already learned (Study 1).
             Encountering an optional learning opportunity increased the
             FONKIA, which mediated the decreased confidence (Studies 2
             and 3). We also document two moderators consistent with our
             conceptualization. First, participants primed with mastery
             (vs. instrumental) motivation were more negatively impacted
             when they encountered optional learning opportunities.
             Second, the more related the optional opportunities were to
             the target topic, the lower participants’ confidence in
             their mastery of what they had already learned. We conclude
             by discussing the implications of these findings, such as
             encouraging further learning or harming teaching
             evaluations.},
   Doi = {10.1002/jcpy.1089},
   Key = {fds341346}
}

@article{fds339755,
   Author = {Turner, MC and O'Brien, JD and Kahn, RM and Mantyh, CR and Migaly, J and Ariely, D},
   Title = {Impact of Disgust on Intentions to Undergo Colorectal
             Surgery.},
   Journal = {Dis Colon Rectum},
   Volume = {61},
   Number = {12},
   Pages = {1386-1392},
   Year = {2018},
   Month = {December},
   url = {http://dx.doi.org/10.1097/DCR.0000000000001254},
   Abstract = {BACKGROUND: Surgeons present patients with complex
             information at the perioperative appointment. Emotions
             likely play a role in surgical decision-making, and disgust
             is an emotion of revulsion at a stimulus that can lead to
             avoidance. OBJECTIVE: The purpose of this study was to
             determine the impact of disgust on intention to undergo
             surgical resection for colorectal cancer and recall of
             perioperative instructions. DESIGN: This was a
             cross-sectional observational study conducted online using
             hypothetical scenarios with nonpatient subjects. SETTINGS:
             The study was conducted using Amazon's Mechanical Turk.
             PATIENTS: Survey respondents were living in the United
             States. MAIN OUTCOME MEASURES: Surgery intention and recall
             of perioperative instructions were measured. RESULTS: A
             total of 319 participants met the inclusion criteria.
             Participants in the experimental condition, who were
             provided with detailed information and pictures about stoma
             care, had significantly lower surgery intentions (mean ±
             SD, 4.60 ± 1.15) compared with the control condition
             with no stoma prompt (mean ± SD, 5.14 ± 0.91; p =
             0.05) and significantly lower recall for preoperative
             instructions (mean ± SD, 13.75 ± 2.38) compared with
             the control condition (mean ± SD, 14.36 ± 2.19; p =
             0.03). Those within the experimental conditions also
             reported significantly higher state levels of disgust (mean
             ± SD, 4.08 ± 1.74) compared with a control condition
             (mean ± SD, 2.35 ± 1.38; p < 0.001). State-level
             disgust was found to fully mediate the relationship between
             condition and recall (b = -0.31) and to partially mediate
             the effect of condition on surgery intentions (b = 0.17).
             LIMITATIONS: It is unknown whether these results will
             replicate with patients and the impact of competing emotions
             in clinical settings. CONCLUSIONS: Intentions to undergo
             colorectal surgery and recall of preoperative instructions
             are diminished in patients who experience disgust when
             presented with stoma information. Surgeons and care teams
             must account for this as they perform perioperative
             counseling to minimize interference with recall of important
             perioperative information. See Video Abstract at
             http://links.lww.com/DCR/A776.},
   Doi = {10.1097/DCR.0000000000001254},
   Key = {fds339755}
}

@article{fds335813,
   Author = {O'Brien, JD and Kahn, RM and Zenko, Z and Fernandez, JR and Ariely,
             D},
   Title = {Naïve models of dietary splurges: Beliefs about caloric
             compensation and weight change following non-habitual
             overconsumption.},
   Journal = {Appetite},
   Volume = {128},
   Pages = {321-332},
   Year = {2018},
   Month = {September},
   url = {http://dx.doi.org/10.1016/j.appet.2018.06.016},
   Abstract = {The mechanisms that lead to overeating and the consumption
             of tempting, unhealthy foods have been studied extensively,
             but the compensatory actions taken afterwards have not. Here
             we describe the naïve models individuals hold around
             dietary splurges (single bouts of overeating) and associated
             weight changes. Across six online experiments, we found
             that, following a hypothetical dietary splurge, participants
             did not plan to adequately adjust calorie consumption to
             account for the additional calories consumed (Studies 1 and
             2), and this pattern was worse following hypothetical
             splurges characterized by a large amount of food consumed in
             a single bout (Study 3). Participants expected weight
             changes to happen faster than they do in reality (Study 4)
             and they expected that weight gained from a dietary splurge
             would disappear on its own without explicit compensation
             attempts through diet or exercise (Study 5). Similarly,
             participants expected that when compensation attempts were
             made through calorie restriction, the rate of weight loss
             would be faster following a dietary splurge compared to
             normal eating (Study 6). This research contributes novel
             data demonstrating an important mechanism that likely
             contributes to weight gain and failed weight loss
             attempts.},
   Doi = {10.1016/j.appet.2018.06.016},
   Key = {fds335813}
}

@article{fds332890,
   Author = {Mazar, N and Mochon, D and Ariely, D},
   Title = {If You Are Going to Pay Within the Next 24 Hours, Press 1:
             Automatic Planning Prompt Reduces Credit Card
             Delinquency},
   Journal = {Journal of Consumer Psychology},
   Volume = {28},
   Number = {3},
   Pages = {466-476},
   Publisher = {WILEY},
   Year = {2018},
   Month = {July},
   url = {http://dx.doi.org/10.1002/jcpy.1031},
   Abstract = {© 2018 The Authors. Journal of Consumer Psychology
             published by Wiley Periodicals, Inc. on behalf of Society
             for Consumer Psychology. People often form intentions but
             fail to follow through on them. Mounting evidence suggests
             that such intention-action gaps can be narrowed with prompts
             to make concrete plans about when, where, and how to act to
             achieve the intention. In this paper, we pushed the notion
             of plan-concreteness to test the efficacy of a prompt under
             a minimalist automated calling setting, where respondents
             were only prompted to indicate a narrower duration within
             which they intent to act. In a field experiment, this
             planning prompt significantly helped people to pay their
             past dues and get out of debt delinquency. These results
             suggest that minimalist automatic planning prompts are a
             scalable, cost-effective intervention.},
   Doi = {10.1002/jcpy.1031},
   Key = {fds332890}
}

@article{fds332055,
   Author = {LeBlanc, TW and Bloom, N and Wolf, SP and Lowman, SG and Pollak, KI and Steinhauser, KE and Ariely, D and Tulsky, JA},
   Title = {Triadic treatment decision-making in advanced cancer: a
             pilot study of the roles and perceptions of patients,
             caregivers, and oncologists.},
   Journal = {Support Care Cancer},
   Volume = {26},
   Number = {4},
   Pages = {1197-1205},
   Year = {2018},
   Month = {April},
   url = {http://dx.doi.org/10.1007/s00520-017-3942-y},
   Abstract = {PURPOSE: The research on cancer treatment decision-making
             focuses on dyads; the full "triad" of patients, oncologists,
             and caregivers remains largely unstudied. We investigated
             how all members of this triad perceive and experience
             decisions related to treatment for advanced cancer. METHODS:
             At an academic cancer center, we enrolled adult patients
             with advanced gastrointestinal or hematological
             malignancies, their caregivers, and their oncologists. Triad
             members completed a semi-structured qualitative interview
             and a survey measuring decisional conflict and perceived
             influence of the other triad members on treatment decisions.
             RESULTS: Seventeen patients, 14 caregivers, and 10
             oncologists completed the study. Patients and caregivers
             reported little decisional regret and voiced high
             satisfaction with their decisions, but levels of decisional
             conflict were high. We found sizeable disagreement among
             triad members' perceptions and preferences. For example,
             patients and oncologists disagreed about the caregiver's
             influence on the decision 56% of the time. In addition, many
             patients and caregivers preferred to defer to their
             oncologist about treatment decisions, felt like no true
             decision existed, and disagreed with their oncologist about
             how many treatment options had been presented. CONCLUSIONS:
             Patients, caregivers, and oncologists have discordant
             perceptions of the cancer treatment decision-making process,
             and bring different preferences about how they want to make
             decisions. These data suggest that oncologists should assess
             patients' and caregivers' decisional preferences, explicitly
             signal that a decision needs to be made whenever approaching
             an important crossroads in treatment and ensure that
             patients and caregivers understand the full range of
             presented options.},
   Doi = {10.1007/s00520-017-3942-y},
   Key = {fds332055}
}

@article{fds335814,
   Author = {Hahn, E and Ariely, D and Tannock, I and Fyles, A and Corn,
             BW},
   Title = {Slogans and donor pages of cancer centres: do they convey
             discordant messages?},
   Journal = {The Lancet. Oncology},
   Volume = {19},
   Number = {4},
   Pages = {447-448},
   Year = {2018},
   Month = {April},
   url = {http://dx.doi.org/10.1016/s1470-2045(18)30203-1},
   Doi = {10.1016/s1470-2045(18)30203-1},
   Key = {fds335814}
}

@article{fds333286,
   Author = {Amar, M and Ariely, D and Carmon, Z and Yang, H},
   Title = {How Counterfeits Infect Genuine Products: The Role of Moral
             Disgust},
   Journal = {Journal of Consumer Psychology},
   Volume = {28},
   Number = {2},
   Pages = {329-343},
   Publisher = {WILEY},
   Year = {2018},
   Month = {April},
   url = {http://dx.doi.org/10.1002/jcpy.1036},
   Abstract = {© 2018 The Authors. Journal of Consumer Psychology
             published by Wiley Periodicals, Inc. on behalf of Society
             for Consumer Psychology. We argue that moral disgust toward
             counterfeiting can degrade both the efficacy of products
             perceived to be counterfeits and that of genuine products
             resembling them. Five studies support our propositions and
             highlight the infectious nature of counterfeiting:
             Perceiving a product as a counterfeit made disgust more
             mentally accessible, and led participants to disinfect the
             item more and reduce how long they remained in physical
             contact with it (Study 1). Participants who perceived a
             mouse as a counterfeit, performed less well in a computer
             game using the mouse and expressed greater moral disgust,
             which mediated lowered performance (Study 2). Exposure to a
             supposedly counterfeit fountain pen in an unrelated prior
             task infected participants’ performance using a genuine
             ballpoint pen resembling the “counterfeit;” individual
             differences in moral attitudes moderated the effect (Study
             3). Exposure to a supposedly counterfeit mouse infected
             performance with a genuine mouse of the same brand; moral
             disgust mediated this effect (Study 4). Finally, moral
             disgust mediated lowered efficacy of a supposed counterfeit
             and that of a genuine item resembling the “counterfeit”
             (Study 5).},
   Doi = {10.1002/jcpy.1036},
   Key = {fds333286}
}

@article{fds332183,
   Author = {Ariely, D and Gneezy, U and Haruvy, E},
   Title = {Social Norms and the Price of Zero},
   Journal = {Journal of Consumer Psychology},
   Volume = {28},
   Number = {2},
   Pages = {180-191},
   Publisher = {WILEY},
   Year = {2018},
   Month = {April},
   url = {http://dx.doi.org/10.1002/jcpy.1018},
   Abstract = {© 2017 Society for Consumer Psychology The standard
             economic model assumes that demand is weakly decreasing in
             price. While empirical evidence shows that this is true for
             most price levels, it might not hold for the price of zero,
             where social norms are not entirely compatible with the
             self-maximizing economic agent. A set of experiments shows
             that switching from a low price to a price of zero has two
             effects on behavior: First, in accordance with the economic
             theory, more people demand the product. Second, whereas in
             the low price case some individuals demand high quantities
             of the product, in the zero price case most people take only
             one unit of the product. As a result, lowering the price to
             zero may lead to a net decrease in the total amount demanded
             in the market. We further show that polite priming results
             in higher demand than ethical priming in both zero price and
             1¢ conditions.},
   Doi = {10.1002/jcpy.1018},
   Key = {fds332183}
}


%% Bansal, Ravi   
@article{fds339265,
   Author = {Ai, H and Bansal, R},
   Title = {Risk Preferences and the Macroeconomic Announcement
             Premium},
   Journal = {Econometrica},
   Volume = {86},
   Number = {4},
   Pages = {1383-1430},
   Publisher = {The Econometric Society},
   Year = {2018},
   Month = {January},
   url = {http://dx.doi.org/10.3982/ECTA14607},
   Abstract = {© 2018 The Econometric Society This paper develops a
             revealed preference theory for the equity premium around
             macroeconomic announcements. Stock returns realized around
             pre-scheduled macroeconomic announcements, such as the
             employment report and the FOMC statements, account for 55%
             of the market equity premium. We provide a characterization
             theorem for the set of intertemporal preferences that
             generates a nonnegative announcement premium. Our theory
             establishes that the announcement premium identifies a
             significant deviation from time-separable expected utility
             and provides asset-market-based evidence for a large class
             of non-expected utility models. We also provide conditions
             under which asset prices may rise prior to some
             macroeconomic announcements and exhibit a pre-announcement
             drift.},
   Doi = {10.3982/ECTA14607},
   Key = {fds339265}
}


%% Barseghyan, Levon   
@article{fds337405,
   Author = {Barseghyan, L and Molinari, F and O'Donoghue, T and Teitelbaum,
             JC},
   Title = {Estimating risk preferences in the field},
   Journal = {Journal of Economic Literature},
   Volume = {56},
   Number = {2},
   Pages = {501-564},
   Publisher = {American Economic Association},
   Year = {2018},
   Month = {June},
   url = {http://dx.doi.org/10.1257/jel.20161148},
   Abstract = {© 2018 American Economic Association. All rights reserved.
             We survey the literature on estimating risk preferences
             using field data. We concentrate our attention on studies in
             which risk preferences are the focal object and estimating
             their structure is the core enterprise. We review a number
             of models of risk preferences-including both expected
             utility (EU) theory and non-EU models-that have been
             estimated using field data, and we highlight issues related
             to identification and estimation of such models using field
             data. We then survey the literature, giving separate
             treatment to research that uses individual-level data (e.g.,
             property-insurance data) and research that uses aggregate
             data (e.g., betting-market data). We conclude by discussing
             directions for future research.},
   Doi = {10.1257/jel.20161148},
   Key = {fds337405}
}


%% Bayer, Patrick   
@article{fds339569,
   Author = {Bayer, P and Charles, KK},
   Title = {Divergent paths: A new perspective on earnings differences
             between black and white men since 1940},
   Journal = {The Quarterly Journal of Economics},
   Volume = {133},
   Number = {3},
   Pages = {1459-1501},
   Publisher = {Oxford University Press (OUP)},
   Year = {2018},
   Month = {January},
   url = {http://dx.doi.org/10.1093/QJE/QJY003},
   Abstract = {© The Author(s) 2018. Published by Oxford University Press
             on behalf of President and Fellows of Harvard College. All
             rights reserved. We present new evidence on the evolution of
             black-white earnings differences among all men, including
             both workers and nonworkers. We study two measures: (i) the
             level earnings gap-the racial earnings difference at a given
             quantile; and (ii) the earnings rank gap-the difference
             between a black man's percentile in the black earnings
             distribution and the position he would hold in the white
             earnings distribution. After narrowing from 1940 to
             themid-1970s, the median black-white level earnings gap has
             since grown as large as it was in 1950. At the same time,
             the median black man's relative position in the earnings
             distribution has remained essentially constant since 1940,
             so that the improvement then worsening of median relative
             earnings have come mainly from the stretching and narrowing
             of the overall earnings distribution. Black men at higher
             percentiles have experienced significant advances in
             relative earnings since 1940, due mainly to strong
             positional gains among those with college educations. Large
             relative schooling gains by blacks at the median and below
             have been more than counteracted by rising return to skill
             in the labor market, which has increasingly penalized
             remaining racial differences in schooling at the bottom of
             the distribution.},
   Doi = {10.1093/QJE/QJY003},
   Key = {fds339569}
}


%% Becker, Charles M.   
@article{fds343393,
   Author = {Becker, C and Rickert, T},
   Title = {Zoned out? The determinants of manufactured housing rents:
             Evidence from North Carolina},
   Journal = {Journal of Housing Economics},
   Year = {2019},
   Month = {January},
   url = {http://dx.doi.org/10.1016/j.jhe.2019.03.003},
   Abstract = {© 2019 Elsevier Inc. This paper explores determinants of
             manufactured housing park (MHP)plot rents in North Carolina,
             with particular focus on the distinction among high-growth
             urban parks and small town/rural parks, and on the possible
             role played by zoning restrictiveness. Little is known about
             how MHP rents are determined, even though it is estimated
             that more than 10 million Americans live in MHPs. We
             implement a hedonic model and an instrumental variables
             approach to examine the relationship between MHP rents and
             local housing markets, land use restrictions, and other
             factors. We find that, contrary to expectations, zoning is
             strongly negatively associated with park rents in periurban
             and rural parks, but appears as a positive driver in
             high-growth cities. We then extend this model to an
             out-of-sample prediction for MHPs rents in
             Texas.},
   Doi = {10.1016/j.jhe.2019.03.003},
   Key = {fds343393}
}

@article{fds338034,
   Author = {Werner, C and Edling, C and Becker, C and Kim, E and Kleinbach, R and Sartbay, FE and Teachout, W},
   Title = {Bride kidnapping in post-Soviet Eurasia: a roundtable
             discussion},
   Journal = {Central Asian Survey},
   Volume = {37},
   Number = {4},
   Pages = {582-601},
   Publisher = {Informa UK Limited},
   Year = {2018},
   Month = {October},
   url = {http://dx.doi.org/10.1080/02634937.2018.1511519},
   Abstract = {© 2018, © 2018 Southseries Inc. Throughout Eurasia, bride
             kidnapping continues to be a fairly common way to get
             married. The practice is becoming increasingly
             controversial. Some local actors argue the practice is a
             cultural tradition, while others question its acceptability,
             particularly when a woman is forced to marry against her
             will. Many scholars, journalists and non-governmental
             organization workers view non-consensual variations of bride
             kidnapping as a form of gender-based violence. In October
             2016, an interdisciplinary group of scholars gathered at the
             annual Central Eurasia Studies Society conference to assess
             existing scholarship on bride kidnapping in post-Soviet
             Eurasia. Using an innovative format, this paper offers an
             edited transcript of that roundtable discussion. The
             roundtable format provides readers an opportunity to see a
             diverse range of perspectives and opinions in response to
             several questions about bride kidnapping. This paper
             provides a thorough introduction to key issues surrounding
             bride kidnapping and offers suggestions for areas that need
             further exploration.},
   Doi = {10.1080/02634937.2018.1511519},
   Key = {fds338034}
}

@article{fds338033,
   Author = {Olofsgård, A and Wachtel, P and Becker, CM},
   Title = {The economics of transition literature},
   Journal = {Economics of Transition},
   Volume = {26},
   Number = {4},
   Pages = {827-840},
   Publisher = {WILEY},
   Year = {2018},
   Month = {October},
   url = {http://dx.doi.org/10.1111/ecot.12196},
   Abstract = {© 2018 The Authors Economics of Transition © 2018 The
             European Bank for Reconstruction and Development Published
             by Blackwell Publishing Ltd This article is based on a panel
             discussion on the contribution of the economics of
             transition literature to the broader understanding of
             economic and social development. All panel participants have
             been working in the field for decades and made important
             contributions to this literature. The transition experience
             was a social experiment on a scale not seen before, and many
             lessons were learned that travel beyond the specific region.
             Important contributions in areas such as political economy,
             contract theory, and the sequencing and complementarity of
             reforms were discussed. It was concluded that there is
             little reason at this point to consider economics of
             transition and development economics as separate subfields
             as they share the same intellectual objective, and
             complement each other in our understanding of the
             development process.},
   Doi = {10.1111/ecot.12196},
   Key = {fds338033}
}

@article{fds320570,
   Author = {Ye, VY and Becker, CM},
   Title = {The Z-axis: Elevation gradient effects in Urban
             America},
   Journal = {Regional Science and Urban Economics},
   Volume = {70},
   Number = {217},
   Pages = {312-329},
   Publisher = {Elsevier BV},
   Year = {2018},
   Month = {May},
   url = {http://dx.doi.org/10.1016/j.regsciurbeco.2017.10.002},
   Abstract = {© 2017 Elsevier B.V. This paper presents an in-depth
             analysis of hilliness effects in American urban communities.
             Using data from seventeen cities, we establish robust
             relationships between topography and density, income and
             housing value gradients. We find that high-income households
             display strong preference not only for higher altitude but
             also for unevenness, leading to spatial income
             stratification at both the city and tract-level. We analyze
             potential causes of this propensity: micro-climate, crime,
             congestion, view effects, and use of public transit. We
             conclude that multi-dimensional spatial methods are crucial
             to investigations of cities with substantial unevenness.
             Moreover, redistributive social and economic policies must
             struggle with a fundamental, topographical dimension to
             inequality.},
   Doi = {10.1016/j.regsciurbeco.2017.10.002},
   Key = {fds320570}
}


%% Bianchi, Francesco   
@article{fds339231,
   Author = {Bianchi, F and Melosi, L},
   Title = {The dire effects of the lack of monetary and fiscal
             coordination},
   Journal = {Journal of Monetary Economics},
   Volume = {104},
   Pages = {1-22},
   Publisher = {Elsevier BV},
   Year = {2019},
   Month = {June},
   url = {http://dx.doi.org/10.1016/j.jmoneco.2018.09.001},
   Abstract = {© 2018 Elsevier B.V. If the government's willingness to
             stabilize debt is waning, while the central bank is adamant
             about keeping inflation low, the economy enters a vicious
             spiral of higher inflation, monetary tightening, recession,
             and further debt accumulation. The mere possibility of this
             conflict represents a drag on the economy. A commitment to
             inflate away the debt accumulated during a large recession
             leads to welfare improvements and lower uncertainty by
             separating long-run fiscal sustainability from the short-run
             fiscal stimulus. This strategy can be used to avoid the zero
             lower bound. As a technical contribution, we explain how to
             build shock-specific policy rules.},
   Doi = {10.1016/j.jmoneco.2018.09.001},
   Key = {fds339231}
}

@article{fds320576,
   Author = {Bianchi, F and Kung, H and Morales, G},
   Title = {Growth, slowdowns, and recoveries},
   Journal = {Journal of Monetary Economics},
   Volume = {101},
   Number = {184},
   Pages = {47-63},
   Publisher = {Elsevier BV},
   Year = {2019},
   Month = {January},
   url = {http://dx.doi.org/10.1016/j.jmoneco.2018.07.001},
   Abstract = {© 2018 Elsevier B.V. We construct and estimate an
             endogenous growth model with debt and equity financing
             frictions to understand the relation between business cycle
             fluctuations and long-term growth. The presence of spillover
             effects from R&D imply an endogenous relation between
             productivity growth and the state of the economy. A large
             contractionary shock to equity financing in the 2001
             recession led to a persistent growth slowdown that was more
             severe than in the 2008 recession. Equity (debt) financing
             shocks are more important for explaining R&D (physical)
             investment. Therefore, these two financing shocks affect the
             economy over different horizons.},
   Doi = {10.1016/j.jmoneco.2018.07.001},
   Key = {fds320576}
}

@article{fds342816,
   Author = {Bianchi, F},
   Title = {The Great Depression and the Great Recession: A view from
             financial markets},
   Journal = {Journal of Monetary Economics},
   Year = {2019},
   Month = {January},
   url = {http://dx.doi.org/10.1016/j.jmoneco.2019.03.010},
   Abstract = {© 2019 Elsevier B.V. Similarities between the Great
             Depression and the Great Recession are documented with
             respect to the behavior of financial markets. A Great
             Depression regime is identified by using a Markov-switching
             VAR. The probability of this regime has remained close to
             zero for many decades, but spiked for a short period during
             the most recent financial crisis, the Great Recession. The
             Great Depression regime implies a collapse of the stock
             market, with small-growth stocks outperforming small-value
             stocks. A model with financial frictions and uncertainty
             about policy makers’ intervention suggests that policy
             intervention during the Great Recession might have avoided a
             second Great Depression. A multi-country analysis shows that
             the Great Depression and Great Recession were not like any
             other financial crises.},
   Doi = {10.1016/j.jmoneco.2019.03.010},
   Key = {fds342816}
}

@article{fds320574,
   Author = {Bianchi, F and Melosi, L},
   Title = {Constrained Discretion and Central Bank Transparency},
   Journal = {The Review of Economics and Statistics},
   Volume = {100},
   Number = {1},
   Pages = {187-202},
   Publisher = {MIT Press - Journals},
   Year = {2018},
   Month = {March},
   url = {http://dx.doi.org/10.1162/rest_a_00659},
   Abstract = {We develop and estimate a general equilibrium model to
             quantitatively assess the effects and welfare implications
             of central bank transparency. Monetary policy can deviate
             from active inflation stabilization and agents conduct
             Bayesian learning about the nature of these deviations.
             Under constrained discretion, only short deviations occur,
             agents' uncertainty about the macroeconomy remains
             contained, and welfare is high. However, if a deviation
             persists, uncertainty accelerates and welfare declines.
             Announcing the future policy course raises uncertainty in
             the short run by revealing that active inflation
             stabilization will be temporarily abandoned. However, this
             announcement reduces policy uncertainty and anchors
             inflationary beliefs at the end of the policy. For the U.S.
             enhancing transparency is found to increase
             welfare.},
   Doi = {10.1162/rest_a_00659},
   Key = {fds320574}
}

@article{fds339885,
   Author = {Bianchi, F and Melosi, L},
   Title = {Constrained Discretion and Central Bank Transparency},
   Journal = {Review of Economics and Statistics},
   Volume = {100},
   Number = {1},
   Pages = {187-202},
   Year = {2018},
   Month = {March},
   Abstract = {We develop and estimate a general equilibrium model to
             assess the effects and welfare implications of central bank
             transparency. Monetary policy can deviate from active
             inflation stabilization, and agents conduct Bayesian
             learning about the nature of these deviations. Under
             constrained discretion, only short deviations occur,
             agents’ uncertainty about the macroeconomy remains
             contained, and welfare is high. However, if a deviation
             persists, uncertainty eventually accelerates and welfare
             declines. Announcing that inflation stabilization will be
             temporarily abandoned raises uncertainty. However, these
             announcements lower policy uncertainty and curb inflationary
             beliefs at the end of the policy. For the United States,
             enhancing transparency raises welfare.},
   Key = {fds339885}
}

@article{fds333702,
   Author = {Bianchi, F and Lettau, M and Ludvigson, SC},
   Title = {Monetary Policy and Asset Valuation},
   Year = {2018},
   Month = {January},
   Key = {fds333702}
}


%% Bollerslev, Tim   
@article{fds342812,
   Author = {Bollerslev, T and Meddahi, N and Nyawa, S},
   Title = {High-dimensional multivariate realized volatility
             estimation},
   Journal = {Journal of Econometrics},
   Year = {2019},
   Month = {January},
   url = {http://dx.doi.org/10.1016/j.jeconom.2019.04.023},
   Abstract = {© 2019 Elsevier B.V. We provide a new factor-based
             estimator of the realized covolatility matrix, applicable in
             situations when the number of assets is large and the
             high-frequency data are contaminated with microstructure
             noises. Our estimator relies on the assumption of a factor
             structure for the noise component, separate from the latent
             systematic risk factors that characterize the
             cross-sectional variation in the frictionless returns. The
             new estimator provides theoretically more efficient and
             finite-sample more accurate estimates of large-scale
             integrated covolatility and correlation matrices than other
             recently developed realized estimation procedures. These
             theoretical and simulation-based findings are further
             corroborated by an empirical application related to
             portfolio allocation and risk minimization involving several
             hundred individual stocks.},
   Doi = {10.1016/j.jeconom.2019.04.023},
   Key = {fds342812}
}

@article{fds339232,
   Author = {Bollerslev, T and Li, J and Xue, Y},
   Title = {Volume, volatility, and public news announcements},
   Journal = {Review of Economic Studies},
   Volume = {85},
   Number = {4},
   Pages = {2005-2041},
   Publisher = {Oxford University Press (OUP)},
   Year = {2018},
   Month = {October},
   url = {http://dx.doi.org/10.1093/restud/rdy003},
   Abstract = {© The Author(s) 2017. Published by Oxford University Press
             on behalf of The Review of Economic Studies Limited. We
             provide new empirical evidence for the way in which
             financial markets process information. Our results rely
             critically on high-frequency intraday price and volume data
             for theS & P500 equity portfolio and U.S. Treasury bonds,
             along with new econometric techniques, for making inference
             on the relationship between trading intensity and spot
             volatility around public news announcements. Consistent with
             the predictions derived from a theoretical model in which
             investors agree to disagree, our estimates for the intraday
             volume-volatility elasticity around important news
             announcements are systematically belowunity. Our elasticity
             estimates also decrease significantly with measures of
             disagreements in beliefs, economic uncertainty, and
             textual-based sentiment, further highlighting the key role
             played by differences-of-opinion.},
   Doi = {10.1093/restud/rdy003},
   Key = {fds339232}
}

@article{fds339888,
   Author = {Bollerslev, T and Patton, AJ and Quaedvlieg, R},
   Title = {Multivariate Leverage Effects and Realized Semicovariance
             GARCH Models},
   Year = {2018},
   Month = {April},
   Key = {fds339888}
}

@article{fds335427,
   Author = {Bollerslev, T and Hood, B and Huss, J and Pedersen,
             LH},
   Title = {Risk Everywhere: Modeling and Managing Volatility},
   Year = {2018},
   Month = {February},
   Key = {fds335427}
}

@article{fds339889,
   Author = {Bollerslev, T and Patton, AJ and Quaedvlieg, R},
   Title = {Modeling and forecasting (un)reliable realized covariances
             for more reliable financial decisions},
   Volume = {207},
   Number = {1},
   Pages = {71-91},
   Year = {2018},
   Abstract = {We propose a new framework for modeling and forecasting
             common financial risks based on (un)reliable realized
             covariance measures constructed from high-frequency intraday
             data. Our new approach explicitly incorporates the effect of
             measurement errors and time-varying attenuation biases into
             the covariance forecasts, by allowing the ex-ante
             predictions to respond more (less) aggressively to changes
             in the ex-post realized covariance measures when they are
             more (less) reliable. Applying the new procedures in the
             construction of minimum variance and minimum tracking error
             portfolios results in reduced turnover and statistically
             superior positions compared to existing procedures.
             Translating these statistical improvements into economic
             gains, we find that under empirically realistic assumptions
             a risk-averse investor would be willing to pay up to 170
             basis points per year to shift to using the new class of
             forecasting models.},
   Key = {fds339889}
}


%% Boyd, Gale A.   
@article{fds342502,
   Author = {Boyd, GA and Lee, JM},
   Title = {Measuring plant level energy efficiency and technical change
             in the U.S. metal-based durable manufacturing sector using
             stochastic frontier analysis},
   Journal = {Energy Economics},
   Volume = {81},
   Pages = {159-174},
   Year = {2019},
   Month = {June},
   url = {http://dx.doi.org/10.1016/j.eneco.2019.03.021},
   Abstract = {© 2019 This study analyzes the electric and fuel energy
             efficiency for five different metal-based durable
             manufacturing industries in the United States over the time
             period 1987–2012, at the 3 digit North American Industry
             Classification System (NAICS) level. Using confidential
             plant-level data on energy use and production from the
             quinquennial U.S. Economic Census, a stochastic frontier
             regression analysis (SFA) is applied in six repeated cross
             sections for each five year census. The SFA controls for
             energy prices and climate-driven energy demand (heating
             degree days HDD and cooling degree days CDD) due to
             differences in plant level locations, as well as 6-digit
             NAICS industry effects. Own energy price elasticities range
             from −0.7 to −1.0, with electricity tending to have
             slightly higher elasticity than fuel. Mean efficiency
             estimates (100% = best practice level) range from a low of
             33% (fuel, NAICS 334 - Computer and Electronic Products) to
             86% (electricity, NAICS 332 - Fabricated Metal Products).
             Electric efficiency is consistently better than fuel
             efficiency for all NAICS. Assuming that all plants in the
             least efficient quartile of the efficiency distribution
             achieve a median level of performance, we compute the
             decline in total energy use to be 21%. A Malmquist index is
             used to decompose the aggregate change in energy performance
             into indices of efficiency and frontier (best practice)
             change. Modest improvements in aggregate energy performance
             are mostly change in best practice, but failure to keep up
             with the frontier retards aggregate improvement. Given that
             the best practice frontier has shifted, we also find that
             firms entering the industry are statistically more
             efficient, i.e. closer to the frontier; about 0.6% for
             electricity and 1.7% for fuels on average.},
   Doi = {10.1016/j.eneco.2019.03.021},
   Key = {fds342502}
}


%% Bugni, Federico   
@article{fds336353,
   Author = {Bugni, FA and Canay, IA and Shaikh, AM},
   Title = {Inference Under Covariate-Adaptive Randomization},
   Journal = {Journal of the American Statistical Association},
   Volume = {113},
   Number = {524},
   Pages = {1784-1796},
   Publisher = {Informa UK Limited},
   Year = {2018},
   Month = {October},
   url = {http://dx.doi.org/10.1080/01621459.2017.1375934},
   Abstract = {© 2018, © 2018 American Statistical Association. This
             article studies inference for the average treatment effect
             in randomized controlled trials with covariate-adaptive
             randomization. Here, by covariate-adaptive randomization, we
             mean randomization schemes that first stratify according to
             baseline covariates and then assign treatment status so as
             to achieve “balance” within each stratum. Our main
             requirement is that the randomization scheme assigns
             treatment status within each stratum so that the fraction of
             units being assigned to treatment within each stratum has a
             well behaved distribution centered around a proportion π as
             the sample size tends to infinity. Such schemes include, for
             example, Efron’s biased-coin design and stratified block
             randomization. When testing the null hypothesis that the
             average treatment effect equals a prespecified value in such
             settings, we first show the usual two-sample t-test is
             conservative in the sense that it has limiting rejection
             probability under the null hypothesis no greater than and
             typically strictly less than the nominal level. We show,
             however, that a simple adjustment to the usual standard
             error of the two-sample t-test leads to a test that is exact
             in the sense that its limiting rejection probability under
             the null hypothesis equals the nominal level. Next, we
             consider the usual t-test (on the coefficient on treatment
             assignment) in a linear regression of outcomes on treatment
             assignment and indicators for each of the strata. We show
             that this test is exact for the important special case of
             randomization schemes with π=1/2, but is otherwise
             conservative. We again provide a simple adjustment to the
             standard errors that yields an exact test more generally.
             Finally, we study the behavior of a modified version of a
             permutation test, which we refer to as the
             covariate-adaptive permutation test, that only permutes
             treatment status for units within the same stratum. When
             applied to the usual two-sample t-statistic, we show that
             this test is exact for randomization schemes with π=1/2 and
             that additionally achieve what we refer to as “strong
             balance.” For randomization schemes with π≠1/2, this
             test may have limiting rejection probability under the null
             hypothesis strictly greater than the nominal level. When
             applied to a suitably adjusted version of the two-sample
             t-statistic, however, we show that this test is exact for
             all randomization schemes that achieve “strong balance,”
             including those with π≠1/2. A simulation study confirms
             the practical relevance of our theoretical results. We
             conclude with recommendations for empirical practice and an
             empirical illustration. Supplementary materials for this
             article are available online.},
   Doi = {10.1080/01621459.2017.1375934},
   Key = {fds336353}
}


%% Burnside, A. Craig   
@article{fds324943,
   Author = {Burnside, AC and Graveline, JJ},
   Title = {On the Asset Market View of Exchange Rates},
   Journal = {Review of Financial Studies},
   Year = {2019},
   Month = {January},
   Key = {fds324943}
}


%% Caldwell, Bruce J.   
@article{fds342815,
   Author = {Caldwell, B},
   Title = {Keynes and hayek},
   Journal = {History of Political Economy},
   Volume = {51},
   Number = {1},
   Pages = {89-94},
   Year = {2019},
   Month = {February},
   url = {http://dx.doi.org/10.1215/00182702-7289288},
   Doi = {10.1215/00182702-7289288},
   Key = {fds342815}
}


%% Clotfelter, Charles T.   
@article{fds333290,
   Author = {Clotfelter, CT and Hemelt, SW and Ladd, HF},
   Title = {MULTIFACETED AID FOR LOW-INCOME STUDENTS AND COLLEGE
             OUTCOMES: EVIDENCE FROM NORTH CAROLINA},
   Pages = {278-303},
   Publisher = {WILEY},
   Year = {2018},
   Month = {January},
   url = {http://dx.doi.org/10.1111/ecin.12486},
   Abstract = {© 2017 Western Economic Association International We study
             the evolution of a campus-based aid program for low-income
             students that began with grant-heavy financial aid and later
             added a suite of nonfinancial supports. We find little to no
             evidence that program eligibility during the early years
             (2004–2006), in which students received additional
             institutional grant aid and few nonfinancial supports,
             improved postsecondary progress, performance, or completion.
             In contrast, program-eligible students in more recent
             cohorts (2007–2010), when the program supplemented
             grant-heavy aid with an array of nonfinancial supports, were
             more likely to meet credit accumulation benchmarks toward
             timely graduation and earned higher grade point averages
             than their barely ineligible counterparts. (JEL I21, I23,
             I24, J08).},
   Doi = {10.1111/ecin.12486},
   Key = {fds333290}
}


%% Collard-Wexler, Allan   
@article{fds342471,
   Author = {Asker, J and Collard-Wexler, A and De Loecker,
             J},
   Title = {(Mis)Allocation, Market Power, and Global Oil
             Extraction},
   Journal = {American Economic Review},
   Volume = {109},
   Number = {4},
   Pages = {1568-1615},
   Year = {2019},
   Month = {April},
   url = {http://dx.doi.org/10.1257/aer.20171438},
   Abstract = {© 2019 American Economic Association. All Rights Reserved.
             We propose an approach to measuring the misallocation of
             production in a market that compares actual industry cost
             curves to undistorted (counterfactual) supply curves. As
             compared to traditional, TFPR- based, misallocation
             measures, this approach leverages cost data, such that
             results are readily mapped to welfare metrics. As an
             application, we analyze global crude oil extraction and
             quantify the extent of misallocation therein, together with
             the proportion attributable to market power. From 1970 to
             2014, we find substantial misallocation, in the order of
             US$744 billion, 14.1 percent to 21.9 percent of which is
             attributable to market power.},
   Doi = {10.1257/aer.20171438},
   Key = {fds342471}
}

@article{fds325892,
   Author = {Collard-Wexler, A and Gowrisankaran, G and Lee,
             RS},
   Title = {“Nash-in-Nash” Bargaining: A Microfoundation for Applied
             Work},
   Pages = {163-195},
   Publisher = {University of Chicago Press},
   Year = {2019},
   Month = {February},
   url = {http://dx.doi.org/10.1086/700729},
   Abstract = {© 2019 by The University of Chicago. All rights reserved. A
             “Nash equilibrium in Nash bargains” has become a
             workhorse bargaining model in applied analyses of bilateral
             oligopoly. This paper proposes a noncooperative foundation
             for “Nash-in-Nash” bargaining that extends
             Rubinstein’s alternating offers model to multiple upstream
             and downstream firms. We provide conditions on firms’
             marginal contributions under which there exists, for
             sufficiently short time between offers, an equilibrium with
             agreement among all firms at prices arbitrarily close to
             Nash-in-Nash prices, that is, each pair’s Nash bargaining
             solution given agreement by all other pairs. Conditioning on
             equilibria without delayed agreement, limiting prices are
             unique. Unconditionally, they are unique under stronger
             assumptions.},
   Doi = {10.1086/700729},
   Key = {fds325892}
}


%% Conitzer, Vincent   
@article{fds333306,
   Author = {Conitzer, V},
   Title = {A Puzzle about Further Facts},
   Journal = {Erkenntnis},
   Volume = {84},
   Number = {3},
   Pages = {727-739},
   Publisher = {Springer Nature},
   Year = {2019},
   Month = {June},
   url = {http://dx.doi.org/10.1007/s10670-018-9979-6},
   Abstract = {© 2018, The Author(s). In metaphysics, there are a number
             of distinct but related questions about the existence of
             “further facts”—facts that are contingent relative to
             the physical structure of the universe. These include
             further facts about qualia, personal identity, and time. In
             this article I provide a sequence of examples involving
             computer simulations, ranging from one in which the
             protagonist can clearly conclude such further facts exist to
             one that describes our own condition. This raises the
             question of where along the sequence (if at all) the
             protagonist stops being able to soundly conclude that
             further facts exist.},
   Doi = {10.1007/s10670-018-9979-6},
   Key = {fds333306}
}

@article{fds341328,
   Author = {Kramer, MF and Schaich Borg and J and Conitzer, V and Sinnott-Armstrong,
             W},
   Title = {When Do People Want AI to Make Decisions?},
   Journal = {Aies 2018 Proceedings of the 2018 Aaai/Acm Conference on Ai,
             Ethics, and Society},
   Pages = {204-209},
   Year = {2018},
   Month = {December},
   ISBN = {9781450360128},
   url = {http://dx.doi.org/10.1145/3278721.3278752},
   Abstract = {© 2018 ACM. AI systems are now or will soon be
             sophisticated enough to make consequential decisions.
             Although this technology has flourished, we also need public
             appraisals of AI systems playing these more important roles.
             This article reports surveys of preferences for and against
             AI systems making decisions in various domains as well as
             experiments that intervene on these preferences. We find
             that these preferences are contingent on subjects' previous
             exposure to computer systems making these kinds of
             decisions, and some interventions designed to mimic previous
             exposure successfully encourage subjects to be more
             hospitable to computer systems making these weighty
             decisions.},
   Doi = {10.1145/3278721.3278752},
   Key = {fds341328}
}

@article{fds335334,
   Author = {Ueda, S and Iwasaki, A and Conitzer, V and Ohta, N and Sakurai, Y and Yokoo, M},
   Title = {Coalition structure generation in cooperative games with
             compact representations},
   Journal = {Autonomous Agents and Multi Agent Systems},
   Volume = {32},
   Number = {4},
   Pages = {503-533},
   Publisher = {Springer Nature},
   Year = {2018},
   Month = {July},
   url = {http://dx.doi.org/10.1007/s10458-018-9386-z},
   Abstract = {© 2018, The Author(s). This paper presents a new way of
             formalizing the coalition structure generation problem (CSG)
             so that we can apply constraint optimization techniques to
             it. Forming effective coalitions is a major research
             challenge in AI and multi-agent systems. CSG involves
             partitioning a set of agents into coalitions to maximize
             social surplus. Traditionally, the input of the CSG problem
             is a black-box function called a characteristic function,
             which takes a coalition as input and returns the value of
             the coalition. As a result, applying constraint optimization
             techniques to this problem has been infeasible. However,
             characteristic functions that appear in practice often can
             be represented concisely by a set of rules, rather than
             treating the function as a black box. Then we can solve the
             CSG problem more efficiently by directly applying constraint
             optimization techniques to this compact representation. We
             present new formalizations of the CSG problem by utilizing
             recently developed compact representation schemes for
             characteristic functions. We first characterize the
             complexity of CSG under these representation schemes. In
             this context, the complexity is driven more by the number of
             rules than by the number of agents. As an initial step
             toward developing efficient constraint optimization
             algorithms for solving the CSG problem, we also develop
             mixed integer programming formulations and show that an
             off-the-shelf optimization package can perform reasonably
             well.},
   Doi = {10.1007/s10458-018-9386-z},
   Key = {fds335334}
}

@article{fds337141,
   Author = {Freeman, R and Conitzer, V and Zahedi, SM and Lee,
             BC},
   Title = {Dynamic proportional sharing: A game-theoretic
             approach},
   Journal = {Sigmetrics 2018 Abstracts of the 2018 Acm International
             Conference on Measurement and Modeling of Computer
             Systems},
   Pages = {33-35},
   Publisher = {ACM Press},
   Year = {2018},
   Month = {June},
   ISBN = {9781450358460},
   url = {http://dx.doi.org/10.1145/3219617.3219631},
   Abstract = {© 2018 Copyright held by the owner/author(s). Sharing
             computational resources amortizes cost and improves
             utilization and efficiency. When agents pool their
             resources, each becomes entitled to a portion of the shared
             pool. Static allocations in each round can guarantee
             entitlements and are strategy-proof, but efficiency suffers
             because allocations do not reflect variations in agents’
             demands for resources across rounds. Dynamic allocation
             mechanisms assign resources to agents across multiple rounds
             while guaranteeing agents their entitlements. Designing
             dynamic mechanisms is challenging, however, when agents are
             strategic and can benefit by misreporting their demands for
             resources. In this paper, we show that dynamic allocation
             mechanisms based on max-min fail to guarantee entitlements,
             strategy-proofness or both. We propose the flexible lending
             (FL) mechanism and show that it satisfies strategy-proofness
             and guarantees at least half of the utility from static
             allocations while providing an asymptotic efficiency
             guarantee. Our simulations with real and synthetic data show
             that the performance of the flexible lending mechanism is
             comparable to that of state-of-the-art mechanisms, providing
             agents with at least 0.98x, and on average 15x, of their
             utility from static allocations. Finally, we propose the
             T-period mechanism and prove that it satisfies
             strategy-proofness and guarantees entitlements for T =
             2.},
   Doi = {10.1145/3219617.3219631},
   Key = {fds337141}
}

@article{fds325596,
   Author = {Kolb, A and Conitzer, V},
   Title = {Crying about a Strategic Wolf},
   Journal = {Kelley School of Business Research Paper},
   Number = {16},
   Year = {2018},
   Month = {March},
   Key = {fds325596}
}

@article{fds332974,
   Author = {Conitzer, V},
   Title = {Technical perspective designing algorithms and the fairness
             criteria they should satisfy},
   Journal = {Communications of the Acm},
   Volume = {61},
   Number = {2},
   Pages = {92},
   Year = {2018},
   Month = {February},
   url = {http://dx.doi.org/10.1145/3166066},
   Doi = {10.1145/3166066},
   Key = {fds332974}
}

@article{fds339563,
   Author = {De Weerdt and M and Albert, M and Conitzer, V and Van Der Linden,
             K},
   Title = {Complexity of scheduling charging in the smart
             grid},
   Journal = {Ijcai International Joint Conference on Artificial
             Intelligence},
   Volume = {2018-July},
   Pages = {4736-4742},
   Year = {2018},
   Month = {January},
   ISBN = {9780999241127},
   Abstract = {© 2018 International Joint Conferences on Artificial
             Intelligence.All right reserved. The problem of optimally
             scheduling the charging demand of electric vehicles within
             the constraints of the electricity infrastructure is called
             the charge scheduling problem. The models of the charging
             speed, horizon, and charging demand determine the
             computational complexity of the charge scheduling problem.
             We show that for about 20 variants the problem is either in
             P or weakly NP-hard and dynamic programs exist to compute
             optimal solutions. About 10 other variants of the problem
             are strongly NP-hard, presenting a potentially significant
             obstacle to their use in practical situations of scale. An
             experimental study establishes up to what parameter values
             the dynamic programs can determine optimal solutions in a
             couple of minutes.},
   Key = {fds339563}
}

@article{fds341330,
   Author = {Deng, Y and Conitzer, V},
   Title = {Disarmament games with resources},
   Journal = {32nd Aaai Conference on Artificial Intelligence, Aaai
             2018},
   Pages = {981-988},
   Year = {2018},
   Month = {January},
   ISBN = {9781577358008},
   Abstract = {Copyright © 2018, Association for the Advancement of
             Artificial Intelligence (www.aaai.org). All rights reserved.
             A paper by Deng and Conitzer in AAAI'17 introduces
             disarmament games, in which players alternatingly commit not
             to play certain pure strategies. However, in practice
             disarmament usually does not consist in removing a strategy,
             but rather in removing a resource (and doing so rules out
             all the strategies in which that resource is used
             simultaneously). In this paper, we introduce a model of
             disarmament games in which resources, rather than
             strategies, are removed. We prove NP-completeness of several
             formulations of the problem of achieving desirable outcomes
             via disarmament. We then study the case where resources can
             be fractionally removed, and prove a result analogous to the
             folk theorem that all desirable outcomes can be achieved. We
             show that we can approximately achieve any desirable outcome
             in a polynomial number of rounds, though determining whether
             a given outcome can be obtained in a given number of rounds
             remains NP-complete.},
   Key = {fds341330}
}

@article{fds341329,
   Author = {Freedman, R and Dickerson, JP and Borg, JS and Sinnott-Armstrong, W and Conitzer, V},
   Title = {Adapting a kidney exchange algorithm to align with human
             values},
   Journal = {32nd Aaai Conference on Artificial Intelligence, Aaai
             2018},
   Pages = {1636-1643},
   Year = {2018},
   Month = {January},
   ISBN = {9781577358008},
   Abstract = {Copyright © 2018, Association for the Advancement of
             Artificial Intelligence (www.aaai.org). All rights reserved.
             The efficient allocation of limited resources is a classical
             problem in economics and computer science. In kidney
             exchanges, a central market maker allocates living kidney
             donors to patients in need of an organ. Patients and donors
             in kidney exchanges are prioritized using ad-hoc weights
             decided on by committee and then fed into an allocation
             algorithm that determines who get what-and who does not. In
             this paper, we provide an end-to-end methodology for
             estimating weights of individual participant profiles in a
             kidney exchange. We first elicit from human subjects a list
             of patient attributes they consider acceptable for the
             purpose of prioritizing patients (e.g., medical
             characteristics, lifestyle choices, and so on). Then, we ask
             subjects comparison queries between patient profiles and
             estimate weights in a principled way from their responses.
             We show how to use these weights in kidney exchange market
             clearing algorithms. We then evaluate the impact of the
             weights in simulations and find that the precise numerical
             values of the weights we computed matter little, other than
             the ordering of profiles that they imply. However, compared
             to not prioritizing patients at all, there is a significant
             effect, with certain classes of patients being
             (de)prioritized based on the human-elicited value
             judgments.},
   Key = {fds341329}
}

@article{fds339285,
   Author = {De Weerdt and MM and Conitzer, V and Albert, M and Van Der Linden,
             K},
   Title = {Complexity of scheduling charging in the smart
             grid},
   Journal = {Proceedings of the International Joint Conference on
             Autonomous Agents and Multiagent Systems,
             Aamas},
   Volume = {3},
   Pages = {1924-1926},
   Year = {2018},
   Month = {January},
   ISBN = {9781510868083},
   Abstract = {© 2018 International Foundation for Autonomous Agents and
             Multiagent Systems (www.ifaamas.org). All rights reserved.
             The problem of optimally scheduling the charging demand of
             electric vehicles within the constraints of the electricity
             infrastructure is called the charge scheduling problem. The
             models of the charging speed, horizon, and charging demand
             determine the computational complexity of the charge
             scheduling problem. For about 20 variants the problem is
             either in P or weakly NP-hard and dynamic programs exist to
             compute optimal solutions. About 10 other variants of the
             problem are strongly NP-hard, presenting a potentially
             significant obstacle to their use in practical situations of
             scale.},
   Key = {fds339285}
}


%% Connolly, Michelle P.   
@article{fds341976,
   Author = {Sá, N and Connolly, MP},
   Title = {An Economic Model of Tiered Spectrum Access},
   Year = {2018},
   Month = {September},
   Key = {fds341976}
}

@article{fds341977,
   Author = {Connolly, M and Lim, E and Mitchell, F and Trivedi,
             A},
   Title = {The 2016 FCC Broadcast Incentive Auction},
   Year = {2018},
   Month = {March},
   Key = {fds341977}
}

@article{fds341978,
   Author = {Connolly, MP and Sá, N and Zaman, A and Roark, C and Trivedi,
             A},
   Title = {The Evolution of U.S. Spectrum Values Over
             Time},
   Year = {2018},
   Month = {February},
   Key = {fds341978}
}

@article{fds341979,
   Author = {Connolly, MP and Sá, N and Zaman, A and Roark, C and Trivedi,
             A},
   Title = {The Evolution of U.S. Spectrum Values Over
             Time},
   Journal = {Economic Research Initiatives at Duke (Erid) Working
             Paper},
   Number = {247},
   Year = {2018},
   Month = {February},
   Key = {fds341979}
}


%% Cook, Philip J.   
@article{fds343563,
   Author = {Cook, PJ and Pollack, HA and White, K},
   Title = {The Last Link: from Gun Acquisition to Criminal
             Use.},
   Journal = {Journal of Urban Health : Bulletin of the New York Academy
             of Medicine},
   Year = {2019},
   Month = {May},
   url = {http://dx.doi.org/10.1007/s11524-019-00358-0},
   Abstract = {Guns that are used in crime and recovered by the police
             typically have changed hands often since first retail sale
             and are quite old. While there is an extensive literature on
             "time to crime" for guns, defined as the elapsed time from
             first retail sale to known use in a crime, there is little
             information available on the duration of the "last link"-the
             elapsed time from the transaction that actually provided the
             offender with the gun in question. In this article, we use
             data from the new Chicago Inmate Survey (CIS) to estimate
             the duration of the last link. The median is just 2 months.
             Many of the gun-involved respondents to the CIS (42%) did
             not have any gun 6 months prior to their arrest for the
             current crime. The CIS respondents were almost all barred
             from purchasing a gun from a gun store because of their
             prior criminal record-as a result, their guns were obtained
             by illegal transactions with friends, relatives, and the
             underground market. We conclude that more effective
             enforcement of the laws governing gun transactions may have
             a quick and pervasive effect on gun use in
             crime.},
   Doi = {10.1007/s11524-019-00358-0},
   Key = {fds343563}
}

@article{fds335163,
   Author = {Cook, PJ and Ludwig, J},
   Title = {The social costs of gun ownership: a reply to Hayo,
             Neumeier, and Westphal},
   Journal = {Empirical Economics},
   Volume = {56},
   Number = {1},
   Pages = {13-22},
   Publisher = {Springer Nature},
   Year = {2019},
   Month = {January},
   url = {http://dx.doi.org/10.1007/s00181-018-1497-5},
   Abstract = {© 2018, Springer-Verlag GmbH Germany, part of Springer
             Nature. We respond to the new article by Hayo, Neumeier, and
             Westphal (HNW), which is a critique of our 2006 article. The
             principal contribution of that article was to use a greatly
             improved proxy for gun prevalence to estimate the effect of
             gun prevalence on homicide rates. While the best available,
             our proxy, the ratio of firearms suicides to total suicides
             in a jurisdiction (FSS), is subject to measurement error
             which limits its use to larger jurisdictions that have
             enough suicides to stabilize the ratio. In this response, we
             report estimates for four different specifications and two
             data sets, the 200-county data and the data for the 50
             states. We develop the claim that measurement error in FSS
             helps explain the observed pattern of results. Adopting the
             assumption that FSS follows a binomial process with a number
             of trials equal to the number of suicides, we characterize
             the relationship between measurement error and size of the
             jurisdiction, and thereby justify our conclusion that
             restricting the estimation to large jurisdictions reduces
             measurement error in FSS and hence the attenuation bias in
             the key coefficient estimate. We conclude that for the
             county-level data, the measurement error in FSS is of
             greater concern than using a specification that is flexible
             with respect to population. HNW focus on the latter but at
             the cost of increasing the effects of the former. We then
             demonstrate that the state-level data provide a robust case
             that more guns lead to more homicides.},
   Doi = {10.1007/s00181-018-1497-5},
   Key = {fds335163}
}

@article{fds343387,
   Author = {Cook, PJ and Ludwig, J},
   Title = {RESPONSE TO COUNTERPOINT: VIOLENCE ITSELF IS A ROOT CAUSE OF
             VIOLENCE},
   Journal = {Journal of Policy Analysis and Management},
   Year = {2019},
   Month = {January},
   url = {http://dx.doi.org/10.1002/pam.22142},
   Doi = {10.1002/pam.22142},
   Key = {fds343387}
}

@article{fds343386,
   Author = {Cook, PJ and Ludwig, J},
   Title = {UNDERSTANDING GUN VIOLENCE: PUBLIC HEALTH VS. PUBLIC
             POLICY},
   Journal = {Journal of Policy Analysis and Management},
   Year = {2019},
   Month = {January},
   url = {http://dx.doi.org/10.1002/pam.22141},
   Doi = {10.1002/pam.22141},
   Key = {fds343386}
}

@article{fds339874,
   Author = {Cook, PJ},
   Title = {Expanding the Public Health Approach to Gun Violence
             Prevention.},
   Journal = {Annals of Internal Medicine},
   Volume = {169},
   Number = {10},
   Pages = {723-724},
   Year = {2018},
   Month = {November},
   url = {http://dx.doi.org/10.7326/m18-2846},
   Doi = {10.7326/m18-2846},
   Key = {fds339874}
}

@article{fds337404,
   Author = {Braga, AA and Cook, PJ},
   Title = {The Association of Firearm Caliber With Likelihood of Death
             From Gunshot Injury in Criminal Assaults.},
   Journal = {Jama Network Open},
   Volume = {1},
   Number = {3},
   Pages = {e180833},
   Year = {2018},
   Month = {July},
   url = {https://jamanetwork.com/journals/jamanetworkopen/fullarticle/2688536},
   Abstract = {Importance:A foundational issue in firearms policy has been
             whether the type of weapon used in an assault affects the
             likelihood of death. Objective:To determine whether the
             likelihood of death from gunshot wounds inflicted in
             criminal assaults is associated with the power of the
             assailant's firearm as indicated by its caliber. Design,
             Setting, and Participants:Cross-sectional study with
             multivariate analysis of data on shooting cases extracted by
             the authors from police investigation files for assaults
             that took place in Boston, Massachusetts, between January 1,
             2010, and December 31, 2014. These data were analyzed
             between October 1, 2017, and February 18, 2018. In all cases
             the victim sustained 1 or more gunshot wounds in
             circumstances that the Boston Police Department deemed
             criminal. The working sample included all 221 gun homicides
             and a stratified random sample of 300 nonfatal cases drawn
             from the 1012 that occurred during the 5-year period. Seven
             nonfatal cases were omitted because they had been
             misclassified. Exposures:The primary source of variation was
             the caliber of the firearm used to shoot the victim. Main
             Outcomes and Measures:Whether the victim died from the
             gunshot wound(s). Results:The final sample of 511 gunshot
             victims and survivors (n = 220 fatal; n = 291
             nonfatal) was predominantly male (n = 470 [92.2%]),
             black (n = 413 [80.8%]) or Hispanic (n = 69
             [13.5%]), and young (mean [SD] age, 26.8 [9.4] years).
             Police investigations determined firearm caliber in 184
             nonfatal cases (63.2%) and 183 fatal cases (83.2%). These
             367 cases were divided into 3 groups by caliber: small (.22,
             .25, and .32), medium (.38, .380, and 9 mm), or large (.357
             magnum, .40, .44 magnum, .45, 10 mm, and 7.62 × 39 mm).
             Firearm caliber had no systematic association with the
             number of wounds, the location of wounds, circumstances of
             the assault, or victim characteristics, as demonstrated by
             χ2 tests of each cluster of variables and by a
             comprehensive multinomial logit analysis. A logit analysis
             of the likelihood of death found that compared with
             small-caliber cases, medium caliber had an odds ratio of
             2.25 (95% CI, 1.37-3.70; P = .001) and large caliber had
             an odds ratio of 4.54 (95% CI, 2.37-8.70; P < .001).
             Based on a simulation using the logit equation, replacing
             the medium- and large-caliber guns with small-caliber guns
             would have reduced gun homicides by 39.5%. Conclusions and
             Relevance:Firearms caliber was associated with the
             likelihood of death from gunshot wounds in criminal assault.
             Shootings with larger-caliber handguns were more deadly but
             no more sustained or accurate than shootings with
             smaller-caliber handguns. This conclusion is of direct
             relevance to the design of gun policy.},
   Doi = {10.1001/jamanetworkopen.2018.0833},
   Key = {fds337404}
}

@article{fds335164,
   Author = {Smucker, S and Kerber, RE and Cook, PJ},
   Title = {Suicide and Additional Homicides Associated with Intimate
             Partner Homicide: North Carolina 2004-2013.},
   Journal = {Journal of Urban Health : Bulletin of the New York Academy
             of Medicine},
   Volume = {95},
   Number = {3},
   Pages = {337-343},
   Year = {2018},
   Month = {June},
   url = {http://dx.doi.org/10.1007/s11524-018-0252-8},
   Abstract = {Intimate partner homicide (IPH) is a critical public health
             and safety issue in the USA. In this study, we determine the
             prevalence and correlates of perpetrator suicide and
             additional homicides following intimate partner homicide
             (IPH) in a large, diverse state with high quality data. We
             extract IPHs from the North Carolina Violent Death Reporting
             System for 2004-2013 and identify suicides and other
             homicides that were part of the same incidents. We analyze
             the likelihood (in odds ration form) of perpetrator suicide
             and additional homicides using logistic regression analysis.
             Almost all IPH-suicide cases were by men with guns (86.6%).
             Almost one-half of IPHs committed by men with guns ended
             with suicide. Male-perpetrated IPH incidents averaged 1.58
             deaths if a gun was used, and 1.14 deaths otherwise. It is
             well-known that gun access increases the chance that a
             violent domestic relationship will end in death. The current
             findings demonstrate that gun IPH is often coupled with
             additional killings. As suicidal batterers will not be
             deterred from IPH by threat of punishment, the results
             underline the importance of preemption by limiting
             batterers' access to guns.},
   Doi = {10.1007/s11524-018-0252-8},
   Key = {fds335164}
}

@article{fds333796,
   Author = {Cook, PJ},
   Title = {Gun Theft and Crime.},
   Journal = {Journal of Urban Health : Bulletin of the New York Academy
             of Medicine},
   Volume = {95},
   Number = {3},
   Pages = {305-312},
   Year = {2018},
   Month = {June},
   url = {http://dx.doi.org/10.1007/s11524-018-0253-7},
   Abstract = {Some law enforcement officials and other observers have
             asserted that theft is the primary source of guns to crime.
             In fact, the role of theft in supplying the guns used in
             robbery, assault, and murder is unknown, and current
             evidence provides little guidance about whether an effective
             program to reduce gun theft would reduce gun violence. The
             current article analyzes publicly available national data on
             gun theft together with a unique data set for Chicago. The
             results tend to support a conclusion that stolen guns play
             only a minor role in crime. First, publicly available data
             are used to calculate that thefts are only about 1% of all
             gun transactions nationwide. Second, an analysis of original
             data from Chicago demonstrates that less than 3% of crime
             guns recovered by the police have been reported stolen to
             the Chicago Police Department (CPD). If a gun is reported
             stolen, there is a 20% chance that it will be recovered,
             usually in conjunction with an arrest for illegal carrying.
             Less than half of those picked up with a stolen gun have a
             criminal record that includes violent offenses. Third,
             results from surveys of convicted criminals, both nationally
             and in Chicago, suggest that it is rare for respondents to
             have stolen the gun used in their most recent crime. The
             data on which these results are based have various
             shortcomings. A research agenda is proposed that would
             provide more certainty about the role of
             theft.},
   Doi = {10.1007/s11524-018-0253-7},
   Key = {fds333796}
}

@article{fds335165,
   Author = {Cook, PJ},
   Title = {Challenge of Firearms Control in a Free Society},
   Journal = {Criminology & Public Policy},
   Volume = {17},
   Number = {2},
   Pages = {437-451},
   Year = {2018},
   Month = {May},
   url = {http://dx.doi.org/10.1111/1745-9133.12359},
   Abstract = {© 2018 American Society of Criminology Chapter 10 of The
             Challenge of Crime in a Free Society, titled “Control of
             Firearms,” is a brief but strong statement in support of
             regulating gun transactions, possession, and carrying, with
             several specific recommendations, including the adoption of
             universal gun registration and permit-to-purchase
             requirements. The U.S. President's Commission on Law
             Enforcement and Administration of Justice, when writing the
             chapter, had no systematic research to draw on. Since its
             publication in 1967, the field of gun violence has become an
             active area of research, and much has been learned. But the
             nation has become far more polarized politically during the
             last 50 years, and gun policy has become a rigidly partisan
             issue. A new commission would have great difficulty reaching
             consensus, although there may be common ground on regulating
             guns vis-à-vis mental illness and domestic
             violence.},
   Doi = {10.1111/1745-9133.12359},
   Key = {fds335165}
}

@article{fds335166,
   Author = {Cook, PJ and Kang, S},
   Title = {The School-Entry-Age Rule Affects Redshirting Patterns and
             Resulting Disparities in Achievement},
   Year = {2018},
   Month = {April},
   Key = {fds335166}
}

@book{fds339875,
   Author = {Cook, PJ},
   Title = {Foreword},
   Pages = {xiii-xiv},
   Publisher = {Routledge},
   Year = {2018},
   Month = {January},
   ISBN = {9780813397801},
   url = {http://dx.doi.org/10.4324/9780429501265},
   Doi = {10.4324/9780429501265},
   Key = {fds339875}
}

@article{fds333797,
   Author = {Cook, PJ},
   Title = {Gun markets},
   Journal = {Annual Review of Criminology},
   Volume = {1},
   Number = {1},
   Pages = {379-400},
   Publisher = {ANNUAL REVIEWS},
   Year = {2018},
   Month = {January},
   url = {http://dx.doi.org/10.1146/annurev-criminol-032317-092149},
   Abstract = {Copyright © 2018 by Annual Reviews. All rights reserved.
             The systematic study of how available weapons influence the
             rates, patterns, and outcomes of criminal violence is new,
             but it is now a well-established and fast-growing subfield
             in criminology, legal studies, public health, and economics.
             This review focuses on the transactions that arm dangerous
             offenders, noting that if those transactions could be
             effectively curtailed it would have an immediate and
             profound effect on gun violence and homicide rates. Guns are
             legal commodities, but violent offenders typically obtain
             their guns by illegal means. Our knowledge of these
             transactions comes primarily from trace data on guns
             recovered by the police and from occasional surveys of
             gun-involved offenders. Because most guns used in crime are
             sourced from the stock of guns in private hands (rather than
             a purchase from a licensed dealer), the local prevalence of
             gun ownership appears to influence the transaction costs and
             the proportions of robberies and assaults committed with
             guns rather than knives or other weapons. Nonetheless,
             regulations that govern licensed dealers have been linked to
             trafficking patterns and in some cases to the use of guns in
             crime.},
   Doi = {10.1146/annurev-criminol-032317-092149},
   Key = {fds333797}
}

@book{fds333581,
   Author = {Krawiec, K and Cook, P},
   Title = {If We Allow Football Players and Boxers to Be Paid for
             Entertaining the Public, Why Don't We Allow Kidney Donors to
             Be Paid for Saving Lives?},
   Volume = {81},
   Pages = {9-35},
   Year = {2018},
   Key = {fds333581}
}

@article{fds335167,
   Author = {Krawiec, K and Cook, P},
   Title = {If We Pay Football Players, Why Not Kidney
             Donors?},
   Journal = {Regulation},
   Volume = {41},
   Pages = {12-17},
   Publisher = {Cato Institute},
   Year = {2018},
   Abstract = {Ethicists who oppose compensating kidney donors claim they
             do so because kidney donation is risky for the donor’s
             health, donors may not appreciate the risks and may be
             cognitively biased in other ways, and donors may come from
             disadvantaged groups and thus could be exploited. However,
             few ethical qualms are raised about professional football
             players, who face much greater health risks than kidney
             donors, have much less counseling and screening concerning
             that risk, and who often come from racial and economic
             groups deemed disadvantaged. It thus seems that either
             ethicists—and the law—should ban both professional
             football and compensated organ donation, allow both, or
             allow compensated organ donation but prohibit professional
             football. The fact that we choose none of those options
             raises questions about the wisdom of the compensation
             ban},
   Key = {fds335167}
}


%% Darity, William A.   
@article{fds337033,
   Author = {Craigie, T-A and Myers, SL and Darity, WA},
   Title = {Racial Differences in the Effect of Marriageable Males on
             Female Family Headship.},
   Journal = {Journal of Demographic Economics},
   Volume = {84},
   Number = {3},
   Pages = {231-256},
   Year = {2018},
   Month = {September},
   url = {http://dx.doi.org/10.1017/dem.2018.3},
   Abstract = {Female family headship has strong implications for endemic
             poverty in the United States. Consequently, it is imperative
             to explore the chief factors that contribute to this
             problem. Departing from prior literature that places
             significant weight on welfare-incentive effects, our study
             highlights the role of male marriageability in explaining
             the prevalence of never-married female family headship for
             blacks and whites. Specifically, we examine racial
             differences in the effect of male marriageability on
             never-married female headship from 1980 to 2010. By
             exploiting data from IPUMS-USA (N = 4,958,722) and exogenous
             variation from state-level sentencing reforms, the study
             finds that the decline in the relative supply of
             marriageable males significantly increases the incidence of
             never-married female family headship for blacks but not for
             whites.},
   Doi = {10.1017/dem.2018.3},
   Key = {fds337033}
}

@article{fds337034,
   Author = {McMillian, MM and Fuller, S and Hill, Z and Duch, K and Darity,
             WA},
   Title = {Can Class-Based Substitute for Race-Based Student Assignment
             Plans? Evidence From Wake County, North Carolina},
   Journal = {Urban Education},
   Volume = {53},
   Number = {7},
   Pages = {843-874},
   Publisher = {SAGE Publications},
   Year = {2018},
   Month = {September},
   url = {http://dx.doi.org/10.1177/0042085915613554},
   Abstract = {© The Author(s) 2015. This study uses a North Carolina
             administrative data set to analyze racial segregation and
             student achievement in Wake County during race-based and
             income-based school assignment plans. We find a modest
             increase in the level of racial segregation in Wake schools
             during the income-based plan, but compared with other large
             districts in the state, Wake County remained relatively
             desegregated. We also find a small increase in reading and
             math test scores and a narrowing of the Black-White test
             score gap. Our analysis indicates that the improvement in
             math scores may be partially due to school composition
             changes attributable to the income-based assignment
             plan.},
   Doi = {10.1177/0042085915613554},
   Key = {fds337034}
}

@article{fds339727,
   Author = {De La Cruz-Viesca and M and Ong, PM and Coman-Don, A and Darity, WA and Hamilton, D},
   Title = {Fifty years after the Kerner Commission report: Place,
             housing, and racial wealth inequality in Los
             Angeles},
   Journal = {Rsf: the Russell Sage Foundation Journal of the Social
             Sciences},
   Volume = {4},
   Number = {6},
   Pages = {160-184},
   Publisher = {Russell Sage Foundation},
   Year = {2018},
   Month = {September},
   url = {http://dx.doi.org/10.7758/rsf.2018.4.6.08},
   Abstract = {© 2018 Russell Sage Foundation. All Rights Reserved. Fifty
             years after the national Kerner Commission report on urban
             unrest and fifty-three years after California's McCone
             Commission report on the 1965 Watts riots, substantial
             racial disparity in education, housing, employment, and
             wealth is still pervasive in Los Angeles. Neither report
             mentions wealth inequality as a cause for concern, however.
             This article examines one key dimension of racial wealth
             inequality through the lens of home ownership, particularly
             in South Los Angeles, where the 1965 Watts riots took place.
             It also analyzes the state's role in housing development in
             codifying and expanding practices of racial and class
             segregation that has led to the production and reproduction
             of racial inequality in South Los Angeles compared with Los
             Angeles County.},
   Doi = {10.7758/rsf.2018.4.6.08},
   Key = {fds339727}
}

@article{fds339728,
   Author = {Bentley-Edwards, KL and Edwards, MC and Spence, CN and Darity, WA and Hamilton, D and Perez, J},
   Title = {How does it feel to be a problem? The missing Kerner
             commission report},
   Journal = {Rsf: the Russell Sage Foundation Journal of the Social
             Sciences},
   Volume = {4},
   Number = {6},
   Pages = {20-40},
   Publisher = {Russell Sage Foundation},
   Year = {2018},
   Month = {September},
   url = {http://dx.doi.org/10.7758/rsf.2018.4.6.02},
   Abstract = {© 2018 Russell Sage Foundation. All Rights Reserved. Using
             an intersectional lens of race and gender, this article
             offers a critique of the Kerner Commission report and fills
             the gap of the missing analysis of white rage and of black
             women. A protracted history of white race riots resulted in
             the loss of black lives, black-owned property, and
             constitutional rights. However, only black riots, marked by
             the loss of white-owned property but few white lives, was
             the issue that prompted the forma tion of a national
             commission to investigate the events. Then and now, the
             privileging of white property rights over black life and
             liberty explains why black revolts result in presidential
             commissions, but white terror campaigns have never led to
             any comparable study.},
   Doi = {10.7758/rsf.2018.4.6.02},
   Key = {fds339728}
}

@article{fds337709,
   Author = {Broady, KE and Todd, CL and Darity, WA},
   Title = {Passing and the Costs and Benefits of Appropriating
             Blackness},
   Journal = {The Review of Black Political Economy},
   Volume = {45},
   Number = {2},
   Pages = {104-122},
   Publisher = {SAGE Publications},
   Year = {2018},
   Month = {June},
   url = {http://dx.doi.org/10.1177/0034644618789182},
   Abstract = {© The Author(s) 2018. The socioeconomic position of Blacks
             in America cannot be fully contextualized without
             considering the marginalization of their racialized social
             identities as minorities who have historically combated
             subjugation and oppression with respect to income,
             employment, homeownership, education, and political
             representation. It is not difficult to understand why the
             historical reference to “passing” primarily has been
             associated with Blacks who were able to—and many who
             did—claim to be White to secure the social, educational,
             political, and economic benefits that were reserved for
             Whites. Therefore, the majority of passing narratives have
             focused on Black to White passing. This article departs from
             the tradition in the literature by considering appropriation
             of various aspects of Black culture and White to Black
             passing. We evaluate the socioeconomic costs and benefits of
             being Black and inequalities in citizenship status between
             Blacks and Whites. Furthermore, we examine the socioeconomic
             and political capital of Blackness versus Whiteness in an
             attempt to explore the rationality of passing for
             Black.},
   Doi = {10.1177/0034644618789182},
   Key = {fds337709}
}

@article{fds340886,
   Author = {Diette, TM and Goldsmith, AH and Hamilton, D and Darity,
             W},
   Title = {Adult happiness and prior traumatic victimization in and out
             of the household},
   Journal = {Review of Economics of the Household},
   Volume = {16},
   Number = {2},
   Pages = {275-295},
   Year = {2018},
   Month = {June},
   url = {http://dx.doi.org/10.1007/s11150-016-9334-0},
   Abstract = {© 2016, Springer Science+Business Media New York. A large
             share of the American population suffers from traumatic
             experiences early in life. Many adults are also victims of
             trauma. Using data drawn from the National Comorbidity
             Survey–Replication, we examine the link between
             self-reported happiness, a broad gauge of subjective
             well-being, and four types of traumatic victimization that
             may occur at various points in the life cycle. In
             particular, we consider the association between home
             violence, sexual assault, community violence, and stalking
             and subsequent victims’ adult happiness. For females and
             males, we find that each of these traumas significantly
             reduces self-reported happiness later in the life course,
             and for both women and men, the estimated impact of home
             violence is greatest. Furthermore, we find that the adverse
             effects of trauma on happiness are comparable to the impact
             of critical socioeconomic developments on happiness.
             Moreover, we find that experiencing more than one type of
             these four traumas has a greater negative impact on
             subsequent happiness than experiencing only one type. Our
             findings are robust to the inclusion of a wide range of
             controls, and the influence of trauma on subsequent
             happiness is independent of personal and family
             characteristics. Since happiness and mental health are
             closely related, our work suggests that traumatic
             victimization undermines overall health and well-being in
             the U.S.},
   Doi = {10.1007/s11150-016-9334-0},
   Key = {fds340886}
}

@article{fds335168,
   Author = {Darity, W and Hamilton, D},
   Title = {The Federal Job Guarantee},
   Journal = {Intereconomics},
   Volume = {53},
   Number = {3},
   Pages = {179-180},
   Publisher = {Springer Nature},
   Year = {2018},
   Month = {May},
   url = {http://dx.doi.org/10.1007/s10272-018-0744-5},
   Doi = {10.1007/s10272-018-0744-5},
   Key = {fds335168}
}

@article{fds339729,
   Author = {Paul, M and Darity, W and Hamilton, D and Zaw, K},
   Title = {A path to ending poverty by way of ending unemployment: A
             federal job guarantee},
   Journal = {Rsf: the Russell Sage Foundation Journal of the Social
             Sciences},
   Volume = {4},
   Number = {3},
   Pages = {44-63},
   Publisher = {Russell Sage Foundation},
   Year = {2018},
   Month = {February},
   url = {http://dx.doi.org/10.7758/rsf.2018.4.3.03},
   Abstract = {© 2018 Russell Sage Foundation. Poverty in the United
             States, one of the world's most wealthy and prosperous
             nations, is persistently high. Despite a complex array of
             social insurance programs in place, 43.1 million people
             remain in poverty. Because unemployment is a strong
             predictor of poverty, we propose a permanent federal job
             guarantee for all Americans. The program would provide
             full-time employment for any American over eighteen,
             offering at least nonpoverty wages plus benefits. Such a
             program will constitute a direct route to producing full
             employment by eradicating involuntary unemployment. It also
             will substantially increase worker bargaining power by
             removing the employer threat of unemployment. To make the
             case that the federal job guarantee is viable, this paper
             includes responses to five common criticisms lodged against
             programs of this type.},
   Doi = {10.7758/rsf.2018.4.3.03},
   Key = {fds339729}
}


%% Dix-Carneiro, Rafael   
@article{fds335429,
   Author = {Dix-Carneiro, R and Kovak, BK},
   Title = {Margins of labor market adjustment to trade},
   Journal = {Journal of International Economics},
   Volume = {117},
   Pages = {125-142},
   Year = {2019},
   Month = {March},
   url = {http://dx.doi.org/10.1016/j.jinteco.2019.01.005},
   Abstract = {© 2019 Elsevier B.V. We use both longitudinal
             administrative data and cross-sectional household survey
             data to study the margins of labor market adjustment
             following Brazil's early 1990s trade liberalization. We
             document how workers and regional labor markets adjust to
             trade-induced changes in local labor demand, examining
             various adjustment margins, including earnings and wage
             changes; interregional migration; shifts between tradable
             and nontradable employment; and shifts between formal
             employment, informal employment, and non-employment. Our
             results provide insight into the regional labor market
             effects of trade, and have important implications for
             policies that address informal employment and that assist
             trade-displaced workers.},
   Doi = {10.1016/j.jinteco.2019.01.005},
   Key = {fds335429}
}

@article{fds325540,
   Author = {Dix-Carneiro, R and Soares, RR and Ulyssea, G},
   Title = {Economic shocks and crime: Evidence from the Brazilian trade
             liberalization},
   Journal = {American Economic Journal: Applied Economics},
   Volume = {10},
   Number = {4},
   Pages = {158-195},
   Publisher = {American Economic Association},
   Year = {2018},
   Month = {October},
   url = {http://dx.doi.org/10.1257/app.20170080},
   Abstract = {© 2018 American Economic Association. This paper studies
             the effect of changes in economic conditions on crime. We
             exploit the 1990s trade liberalization in Brazil as a
             natural experiment generating exogenous shocks to local
             economies. We document that regions exposed to larger tariff
             reductions experienced a temporary increase in crime
             following liberalization. Next, we investigate through what
             channels the trade-induced economic shocks may have affected
             crime. We show that the shocks had significant effects on
             potential determinants of crime, such as labor market
             conditions, public goods provision, and income inequality.
             We propose a novel framework exploiting the distinct dynamic
             responses of these variables to obtain bounds on the effect
             of labor market conditions on crime. Our results indicate
             that this channel accounts for 75 to 93 percent of the
             effect of the trade-induced shocks on crime.},
   Doi = {10.1257/app.20170080},
   Key = {fds325540}
}


%% Frakes, Michael D   
@article{fds339693,
   Author = {Frakes, M and Wasserman, M},
   Title = {Irrational Ignorance at the Patent Office},
   Journal = {Vanderbilt Law Review},
   Volume = {72},
   Number = {3},
   Pages = {975-1030},
   Year = {2019},
   Key = {fds339693}
}

@article{fds335994,
   Author = {Elsamadicy, AA and Sergesketter, AR and Frakes, MD and Lad,
             SP},
   Title = {Review of Neurosurgery Medical Professional Liability Claims
             in the United States.},
   Journal = {Neurosurgery},
   Volume = {83},
   Number = {5},
   Pages = {997-1006},
   Year = {2018},
   Month = {November},
   url = {http://dx.doi.org/10.1093/neuros/nyx565},
   Abstract = {BACKGROUND: Due to disparaging costs and rates of
             malpractice claims in neurosurgery, there has been
             significant interest in identifying high-risk specialties,
             types of malpractice claims, and characteristics of
             claim-prone physicians. OBJECTIVE: To characterize the
             malpractice claims against neurosurgeons. METHODS: This was
             a comprehensive analysis of all malpractice liability claims
             involving a neurosurgeon as the primary defendant, conducted
             using the Physician Insurers Association of America Data
             Sharing Project from January 1, 2003 and December 31, 2012.
             RESULTS: From 2003 to 2012, 2131 closed malpractice claims
             were filed against a neurosurgeon. The total amount of
             indemnity paid collective between 1998 to 2002, 2003 to
             2007, and 2008 to 2012 was $109 614 935, $140 031 875, and
             $122 577 230, respectively. Of all the neurosurgery claims,
             the most prevalent chief medical factor was improper
             performance (42.1%, $124 943 933), presenting medical
             condition was intervertebral disc disorder (20.6%, $54 223
             206), and operative procedure performed involved the spinal
             cord and/or spinal canal (21.0%, $62 614 995). Eighty-five
             (22.91%) of the total neurosurgery claims resulted in
             patient death, resulting in $32 067 759 paid. Improper
             performance of the actual procedure was the most prevalent
             and highest total paid cause for patient death ($9 584 519).
             CONCLUSION: From 2003 to 2012, we found that neurosurgery
             malpractice claims rank among one of the most costly and
             prevalent, with the average indemnities paid annually and
             the overall economic burden increasing. Diagnoses and
             procedures involving the spine, along with improper
             performance, were the most prevalent malpractice claims
             against neurosurgeons. Continued medical malpractice reform
             is essential to correct the overall health care cost
             burdens, and ultimately improve patient safety.},
   Doi = {10.1093/neuros/nyx565},
   Key = {fds335994}
}

@misc{fds333544,
   Author = {Frakes, M and Wasserman, M},
   Title = {Do Patent Lawsuits Target Invalid Patents},
   Booktitle = {Selection and Decision in the Judicial Process Around the
             World: Empirical Inquiries},
   Publisher = {Cambridge University Press},
   Year = {2018},
   Key = {fds333544}
}


%% Frankenberg, Elizabeth   
@article{fds335174,
   Author = {Thomas, D and Seeman, T and Potter, A and Hu, P and Crimmins, E and Herningtyas, EH and Sumantri, C and Frankenberg,
             E},
   Title = {HPLC-based Measurement of Glycated Hemoglobin using Dried
             Blood Spots Collected under Adverse Field
             Conditions.},
   Journal = {Biodemography and Social Biology},
   Volume = {64},
   Number = {1},
   Pages = {43-62},
   Year = {2018},
   Month = {January},
   url = {http://dx.doi.org/10.1080/19485565.2018.1451300},
   Abstract = {Glycated hemoglobin (HbA1c) measured using high-performance
             liquid chromatography (HPLC) assays with venous blood and
             dried blood spots (DBS) are compared for 143 paired samples
             collected in Aceh, Indonesia. Relative to gold-standard
             venous-blood values, DBS-based values reported by the HPLC
             are systematically upward biased for HbA1c<8% and the
             fraction diabetic (HbA1c ≥ 6.5%) is overstated almost
             five-fold. Inspection of chromatograms from DBS assays
             indicates the % glycosylated calculated by the HPLC excludes
             part of the hemoglobin A which is misidentified as a
             hemoglobin variant. Taking this into account, unbiased
             DBS-based values are computed using data from the
             machine-generated chromatograms. When the DBS are collected
             in a clinic-like setting, under controlled
             humidity/temperature conditions, the recalculated values are
             almost identical to venous-based values. When DBS are
             collected under field conditions, the recalculated values
             are unbiased, but only about half the HbA1c values are
             measured reliably, calling into question the validity of the
             other half. The results suggest that collection conditions,
             particularly humidity, affect the quality of the DBS-based
             measures. Cross-validating DBS-based HbA1c values with
             venous samples collected under exactly the same
             environmental conditions is a prudent investment in
             population-based studies.},
   Doi = {10.1080/19485565.2018.1451300},
   Key = {fds335174}
}


%% Fullenkamp, Connel   
@article{fds338470,
   Author = {Chami, R and Ernst, E and Fullenkamp, C and Oeking,
             A},
   Title = {Is there a remittance trap?},
   Journal = {Finance and Development},
   Volume = {55},
   Number = {3},
   Pages = {44-47},
   Year = {2018},
   Month = {September},
   Key = {fds338470}
}


%% Grabowski, Henry G.   
@article{fds332940,
   Author = {H.G. Grabowski and C. Brain  and A. Taub and R.
             Guha},
   Title = {Pharmaceutical Patent Challenges: Company Strategies and
             Litigation Outcomes},
   Journal = {American Journal of Health Economics},
   Volume = {3},
   Number = {1},
   Pages = {33-59},
   Year = {2018},
   Month = {Winter},
   url = {https://www.mitpressjournals.org/doi/10.1162/AJHE_a_00066},
   Doi = {https://doi.org/10.1162/AJHE_a_00066},
   Key = {fds332940}
}

@misc{fds332943,
   Author = {H.G. Grabowski and Richard Manning},
   Title = {Key economic and value considerations in the U.S. market for
             plasma protein therapies},
   Publisher = {Bates White Economic Consulting},
   Year = {2018},
   Month = {February},
   url = {https://drive.google.com/file/d/179-f0zh7y-w7QVC6p1CmO_o-YMr5FFfX/view},
   Key = {fds332943}
}


%% Hoover, Kevin D.   
@article{fds342813,
   Author = {Hoover, KD},
   Title = {Keynes and economics},
   Journal = {History of Political Economy},
   Volume = {51},
   Number = {1},
   Pages = {83-88},
   Year = {2019},
   Month = {February},
   url = {http://dx.doi.org/10.1215/00182702-7289276},
   Doi = {10.1215/00182702-7289276},
   Key = {fds342813}
}

@article{fds342814,
   Author = {Hoover, KD},
   Title = {Craufurd goodwin: Economist as collector},
   Journal = {History of Political Economy},
   Volume = {51},
   Number = {1},
   Pages = {187-191},
   Year = {2019},
   Month = {February},
   url = {http://dx.doi.org/10.1215/00182702-7289420},
   Doi = {10.1215/00182702-7289420},
   Key = {fds342814}
}

@article{fds333200,
   Author = {Hoover, K},
   Title = {Scots are more studious},
   Journal = {Economist},
   Volume = {414},
   Number = {9074},
   Year = {2018},
   Month = {February},
   Key = {fds333200}
}

@article{fds339820,
   Author = {Hoover, KD},
   Title = {First principles, fallibilism, and economics},
   Journal = {Synthese},
   Publisher = {Springer Nature America, Inc},
   Year = {2018},
   Month = {January},
   url = {http://dx.doi.org/10.1007/s11229-018-02021-8},
   Abstract = {© 2018, Springer Nature B.V. In the eyes of its
             practitioners, economics is both a deductive science and an
             empirical science. The starting point of its deductions
             might be thought of as first principles. But what is the
             status of such principles? The tension between
             foundationalism, the idea that there are necessary and
             secure first principles for economic inquiry, and
             fallibilism, the idea that no belief can be certified as
             true beyond the possibility of doubt, is explored. Empirical
             disciplines require some sort of falsifiability. Yet,
             empirical inquiries also require a starting place—if not a
             necessarily true one, at least an indubitable one, that is,
             one that is not actually doubted. Indubitability appears to
             have necessary consequences, undercutting fallibilism, while
             fallibilism threatens confidence in the de facto first
             principles that begin inquiry. This tension is examined in
             three well-known attempts to define economics and its
             method: John Stuart Mill’s economics as the science of
             wealth, Lionel Robbins’s economics as constrained
             optimization; and George Stigler and Becker’s attempt to
             reformulate neoclassical economics to square empiricism with
             Robbins’ deductivism.},
   Doi = {10.1007/s11229-018-02021-8},
   Key = {fds339820}
}


%% Hotz, V. Joseph   
@article{fds340659,
   Author = {Wiemers, EE and Seltzer, JA and Schoeni, RF and Hotz, VJ and Bianchi,
             SM},
   Title = {Stepfamily Structure and Transfers Between Generations in
             U.S. Families.},
   Journal = {Demography},
   Volume = {56},
   Number = {1},
   Pages = {229-260},
   Year = {2019},
   Month = {February},
   url = {http://dx.doi.org/10.1007/s13524-018-0740-1},
   Abstract = {Unstable couple relationships and high rates of repartnering
             have increased the share of U.S. families with stepkin. Yet
             data on stepfamily structure are from earlier periods,
             include only coresident stepkin, or cover only older adults.
             In this study, we use new data on family structure and
             transfers in the Panel Study of Income Dynamics (PSID) to
             describe the prevalence and numbers of stepparents and
             stepchildren for adults of all ages and to characterize the
             relationship between having stepkin and transfers of time
             and money between generations, regardless of whether the kin
             live together. We find that having stepparents and
             stepchildren is very common among U.S. households,
             especially younger households. Furthermore, stepkin
             substantially increase the typical household's family size;
             stepparents and stepchildren increase a household's number
             of parents and adult children by nearly 40 % for
             married/cohabiting couples with living parents and children.
             However, having stepkin is associated with fewer transfers,
             particularly time transfers between married women and their
             stepparents and stepchildren. The increase in the number of
             family members due to stepkin is insufficient to compensate
             for the lower likelihood of transfers in stepfamilies. Our
             findings suggest that recent cohorts with more stepkin may
             give less time assistance to adult children and receive less
             time assistance from children in old age than prior
             generations.},
   Doi = {10.1007/s13524-018-0740-1},
   Key = {fds340659}
}

@article{fds341373,
   Author = {Hotz, VJ and Wiemers, E and Rasmussen, J and Maxwell,
             K},
   Title = {The Role of Parental Wealth and Income in Financing
             Children's College Attendance and its Consequences},
   Year = {2018},
   Month = {October},
   Key = {fds341373}
}

@article{fds341374,
   Author = {Ashworth, J and Hotz, VJ and Maurel, A and Ransom,
             T},
   Title = {Changes Across Cohorts in Wage Returns to Schooling and
             Early Work Experiences},
   Year = {2018},
   Month = {May},
   Key = {fds341374}
}


%% Ilut, Cosmin L.   
@article{fds324314,
   Author = {Ilut, C and Kehrig, M and Schneider, M},
   Title = {Slow to hire, quick to fire: Employment dynamics with
             asymmetric responses to news},
   Journal = {Journal of Political Economy},
   Volume = {126},
   Number = {5},
   Pages = {2011-2071},
   Publisher = {University of Chicago Press},
   Year = {2018},
   Month = {October},
   url = {http://dx.doi.org/10.1086/699189},
   Abstract = {© 2018 by The University of Chicago. All rights reserved.
             Concave hiring rules imply that firms respond more to bad
             shocks than to good shocks. They provide a unified
             explanation for several seemingly unrelated facts about
             employment growth in macro-and microdata. In particular,
             they generate countercyclical movement in both aggregate
             conditional “macro” volatility and cross-sectional
             “micro” volatility, as well as negative skewness in the
             cross section and in the time series at different levels of
             aggregation. Concave establishment-level responses of
             employment growth to total factor productivity shocks
             estimated from census data induce significant skewness,
             movements in volatility, and amplification of bad aggregate
             shocks.},
   Doi = {10.1086/699189},
   Key = {fds324314}
}


%% Jurado, Kyle   
@article{fds320593,
   Author = {Chahrour, R and Jurado, K},
   Title = {News or noise? the missing link},
   Number = {228},
   Pages = {1702-1736},
   Publisher = {American Economic Association},
   Year = {2018},
   Month = {July},
   url = {http://dx.doi.org/10.1257/aer.20170792},
   Abstract = {© 2018 American Economic Association. All rights reserved.
             The literature on belief-driven business cycles treats news
             and noise as distinct representations of agents' beliefs. We
             prove they are empirically the same. Our result lets us
             isolate the importance of purely belief-driven fluctuations.
             Using three prominent estimated models, we show that
             existing research understates the importance of pure
             beliefs. We also explain how differences in both economic
             environment and information structure affect the estimated
             importance of pure beliefs.},
   Doi = {10.1257/aer.20170792},
   Key = {fds320593}
}


%% Kehrig, Matthias   
@article{fds325834,
   Author = {Donangelo, A and Gourio, F and Kehrig, M and Palacios,
             M},
   Title = {The cross-section of labor leverage and equity
             returns},
   Pages = {497-518},
   Publisher = {Elsevier BV},
   Year = {2019},
   Month = {May},
   url = {http://dx.doi.org/10.1016/j.jfineco.2018.10.016},
   Abstract = {© 2018 The relative size and inflexibility of labor
             expenses lead to a form of operating leverage, which we call
             labor leverage. We derive a set of conditions for the
             existence of labor leverage even when labor markets are
             frictionless. Our model provides theoretical support for the
             use of firm-level labor share as a measure of labor
             leverage. Using Compustat/CRSP and confidential Census data,
             we provide evidence for the existence and for the economic
             significance of labor leverage: high labor share firms have
             operating profits that are more sensitive to economic shocks
             and have higher expected returns.},
   Doi = {10.1016/j.jfineco.2018.10.016},
   Key = {fds325834}
}

@article{fds335430,
   Author = {Kehrig, M},
   Title = {Comment on “Computerizing industries and routinizing jobs:
             Explaining trends in aggregate productivity” by Sangmin
             Aum, Sang Yoon (Tim) Lee and Yongseok Shin},
   Journal = {Journal of Monetary Economics},
   Volume = {97},
   Pages = {22-28},
   Publisher = {Elsevier BV},
   Year = {2018},
   Month = {August},
   url = {http://dx.doi.org/10.1016/j.jmoneco.2018.05.004},
   Abstract = {© 2018 Elsevier B.V. Aum et al. (2018) quantify the impact
             of production complementarities and differential
             productivity growth across occupations and sectors on the
             slowdown of aggregate productivity growth. This note expands
             their work to study substitutability between new computer
             equipment and labor in individual occupations as opposed to
             all occupations combined. Preliminary empirical evidence
             suggests (1) significantly different elasticities of
             substitution between computers and labor across occupations
             and (2) a strong correlation between productivity growth of
             computers and labor in occupations where these two inputs
             are complementary. When they are substitutes, however, their
             productivity growth rates appear uncorrelated. These
             findings have the potential to amplify or weaken the
             magnitude of the aggregate productivity slowdown explained
             by Aum et al. (2018) making their approach a promising
             avenue for future research.},
   Doi = {10.1016/j.jmoneco.2018.05.004},
   Key = {fds335430}
}


%% Kozecke, Lindsey Eldred   
@article{fds337008,
   Author = {Sloan, FA and Robinson, PA and Eldred, LM},
   Title = {Advantageous Selection, Moral Hazard, and Insurer Sorting on
             Risk in the U.S. Automobile Insurance Market.},
   Journal = {Journal of Risk and Insurance},
   Volume = {85},
   Number = {2},
   Pages = {545-575},
   Year = {2018},
   Month = {June},
   url = {http://dx.doi.org/10.1111/jori.12170},
   Abstract = {This study quantifies the role of private information in
             automobile insurance policy choice using data on subjective
             beliefs, risk preference, reckless driving, the respondent's
             insurer and insurance policy characteristics merged with
             insurer-specific quality ratings distributed by independent
             organizations. We find a zero correlation between ex post
             accident risk and insurance coverage, reflecting
             advantageous selection in policy choice offset by moral
             hazard. Advantageous selection is partly attributable to
             insurer sorting on consumer attributes known and used by
             insurers. Our analysis of insurer sorting reveals that
             lower-risk drivers on attributes observed by insurers obtain
             coverage from insurers with higher-quality
             ratings.},
   Doi = {10.1111/jori.12170},
   Key = {fds337008}
}

@article{fds337009,
   Author = {Eldred, L and Gifford, EJ},
   Title = {Downstream effects of criminal justice involvement},
   Journal = {Corrdocs},
   Volume = {21},
   Number = {2},
   Pages = {1-1},
   Year = {2018},
   Key = {fds337009}
}

@article{fds337010,
   Author = {Robinson, P and Sloan, F and Eldred, L},
   Title = {Advantageous Selection, Moral Hazard, and Insurer Sorting on
             Risk in the U.S. Automobile Insurance Market},
   Journal = {Journal of Risk and Insurance},
   Volume = {85},
   Number = {2},
   Pages = {545-575},
   Publisher = {WILEY},
   Year = {2018},
   Key = {fds337010}
}


%% Kramer, Randall   
@article{fds341554,
   Author = {Khandekar, E and Kramer, R and Ali, AS and Al-Mafazy, A-W and Egger, JR and LeGrand, S and Mkali, HR and McKay, M and Ngondi,
             JM},
   Title = {Evaluating Response Time in Zanzibar's Malaria Elimination
             Case-Based Surveillance-Response System.},
   Journal = {The American Journal of Tropical Medicine and
             Hygiene},
   Volume = {100},
   Number = {2},
   Pages = {256-263},
   Year = {2019},
   Month = {February},
   url = {http://dx.doi.org/10.4269/ajtmh.17-0546},
   Abstract = {As countries transition toward malaria elimination, malaria
             programs rely on surveillance-response systems, which are
             often supported by web- and mobile phone-based reporting
             tools. Such surveillance-response systems are interventions
             for elimination, making it important to determine if they
             are operating optimally. A metric to measure this by is
             timeliness. This study used a mixed-methods approach to
             investigate the response time of Zanzibar's malaria
             elimination surveillance-response system, Malaria Case
             Notification (MCN). MCN conducts both passive and reactive
             case detection, supported by a mobile phone-based reporting
             tool called Coconut Surveillance. Using data obtained from
             RTI International and the Zanzibar Malaria Elimination
             Program (ZAMEP), analysis of summary statistics was
             conducted to investigate the association of response time
             with geography, and time series techniques were used to
             investigate trends in response time and its association with
             the number of reported cases. Results indicated that
             response time varied by the district in Zanzibar (0.6-6.05
             days) and that it was not associated with calendar time or
             the number of reported cases. Survey responses and focus
             groups with a cadre of health workers, district malaria
             surveillance officers, shed light on operational challenges
             faced during case investigation, such as incomplete health
             records and transportation issues, which stem from
             deficiencies in aspects of ZAMEP's program management. These
             findings illustrate that timely response for malaria
             elimination depends on effective program management, despite
             the automation of web-based or mobile phone-based tools. For
             surveillance-response systems to work optimally, malaria
             programs should ensure that optimal management practices are
             in place.},
   Doi = {10.4269/ajtmh.17-0546},
   Key = {fds341554}
}

@article{fds332061,
   Author = {Cole, JC and McDonald, JB and Wen, X and Kramer, RA},
   Title = {Marketing energy efficiency: perceived benefits and barriers
             to home energy efficiency},
   Journal = {Energy Efficiency},
   Volume = {11},
   Number = {7},
   Pages = {1811-1824},
   Publisher = {Springer Nature America, Inc},
   Year = {2018},
   Month = {October},
   url = {http://dx.doi.org/10.1007/s12053-018-9614-z},
   Abstract = {© 2018, Springer Science+Business Media B.V., part of
             Springer Nature. Energy efficiency contributes significantly
             to the reduction of greenhouse gas emissions and the
             associated mitigation of climate change. The uptake of
             energy efficiency measures in the residential sector
             requires significant effort on the part of homeowners or
             residents. Past research has revealed that cost savings and
             social interaction motivate energy efficiency behavior. This
             study expands on this research by examining the hypothesis
             that there are regional differences in what motivates
             individuals to implement home energy efficiency upgrades.
             Two surveys (N = 320 and N = 423) examine the perceived
             benefits of and barriers to undertaking home energy
             efficiency improvements in varying geographic regions across
             the USA and test marketing materials that target these
             benefits and barriers. The hypothesis that there are
             regional differences in perceptions of energy efficiency was
             confirmed. Cost savings were found to be the most important
             benefit to individuals across the country. Energy efficiency
             being a good investment is either the second or third most
             important benefit across all regions. Increased comfort is
             the last of the top three most important benefits to those
             in the South and Midwest, while those in the Northeast
             demonstrated interest in the increase in home retail value
             associated with energy efficiency, and those in the West
             found the environmental benefits to be important. High costs
             of energy efficiency improvements were found to be the most
             commonly perceived barrier. Reported likelihood to enroll in
             a home energy efficiency program offered by one’s employer
             was predicted by perceived likelihood that coworkers would
             enroll, income level, and personal opinions about the
             importance of energy efficiency.},
   Doi = {10.1007/s12053-018-9614-z},
   Key = {fds332061}
}


%% Kranton, Rachel   
@article{fds341870,
   Author = {Amasino, DR and Sullivan, NJ and Kranton, RE and Huettel,
             SA},
   Title = {Amount and time exert independent influences on
             intertemporal choice.},
   Journal = {Nature Human Behaviour},
   Volume = {3},
   Number = {4},
   Pages = {383-392},
   Year = {2019},
   Month = {April},
   url = {http://dx.doi.org/10.1038/s41562-019-0537-2},
   Abstract = {Intertemporal choices involve trade-offs between the value
             of rewards and the delay before those rewards are
             experienced. Canonical intertemporal choice models such as
             hyperbolic discounting assume that reward amount and time
             until delivery are integrated within each option prior to
             comparison1,2. An alternative view posits that intertemporal
             choice reflects attribute-wise processes in which amount and
             time attributes are compared separately3-6. Here, we use
             multi-attribute drift diffusion modelling (DDM) to show that
             attribute-wise comparison represents the choice process
             better than option-wise comparison for intertemporal choice
             in a young adult population. We find that, while
             accumulation rates for amount and time information are
             uncorrelated, the difference between those rates predicts
             individual differences in patience. Moreover, patient
             individuals incorporate amount earlier than time into the
             decision process. Using eye tracking, we link these
             modelling results to attention, showing that patience
             results from a rapid, attribute-wise process that
             prioritizes amount over time information. Thus, we find
             converging evidence that distinct evaluation processes for
             amount and time determine intertemporal financial choices.
             Because intertemporal decisions in the lab have been linked
             to failures of patience ranging from insufficient saving to
             addiction7-13, understanding individual differences in the
             choice process is important for developing more effective
             interventions.},
   Doi = {10.1038/s41562-019-0537-2},
   Key = {fds341870}
}

@article{fds343586,
   Author = {Kranton, R},
   Title = {The devil is in the details: Implications of Samuel
             bowles’s the moral economy for economics and policy
             research},
   Journal = {Journal of Economic Literature},
   Volume = {57},
   Number = {1},
   Pages = {147-160},
   Year = {2019},
   Month = {March},
   url = {http://dx.doi.org/10.1257/jel.20171463},
   Abstract = {© 2019 American Economic Association. All rights reserved.
             All economists should buy and read The Moral Economy by
             Samuel Bowles. The book challenges basic premises of
             economic theory and questions policies based on monetary
             incentives. Incentives not only crowd out intrinsic
             motivations, they Erode the ethical and moral codes
             necessary for the workings of markets. Bowles boldly
             suggests that successful policies must combine incentives
             and moral messages, exploiting complementarities between the
             two. This essay argues that to achieve this objective,
             economists must study the local institutions and social
             context and engage untraditional data to uncover the
             interplay of incentives and identity.},
   Doi = {10.1257/jel.20171463},
   Key = {fds343586}
}

@article{fds337046,
   Author = {Bloch, F and Demange, G and Kranton, R},
   Title = {RUMORS AND SOCIAL NETWORKS},
   Pages = {421-448},
   Publisher = {WILEY},
   Year = {2018},
   Month = {May},
   url = {http://dx.doi.org/10.1111/iere.12275},
   Abstract = {© (2018) by the Economics Department of the University of
             Pennsylvania and the Osaka University Institute of Social
             and Economic Research Association This article studies the
             transmission of rumors in social networks. We consider a
             model with biased and unbiased agents. Biased agents want to
             enforce a specific decision and unbiased agents to match the
             true state. One agent learns the true state and sends a
             message to her neighbors, who decide whether or not to
             transmit it further. We characterize the perfect Bayesian
             equilibria of the game, show that the social network can act
             as a filter, and that biased agents may have an incentive to
             limit their number.},
   Doi = {10.1111/iere.12275},
   Key = {fds337046}
}


%% Kuran, Timur   
@article{fds343458,
   Author = {Kuran, T},
   Title = {Zakat: Islam’s missed opportunity to limit predatory
             taxation},
   Journal = {Public Choice},
   Year = {2019},
   Month = {January},
   url = {http://dx.doi.org/10.1007/s11127-019-00663-x},
   Abstract = {© 2019, Springer Science+Business Media, LLC, part of
             Springer Nature. One of Islam’s five canonical pillars is
             a predictable, fixed, and mildly progressive tax system
             called zakat. It was meant to finance various causes typical
             of a pre-modern government. Implicit in the entire transfer
             system was personal property rights as well as constraints
             on government—two key elements of a liberal order. Those
             features could have provided the starting point for
             broadening political liberties under a state with explicitly
             restricted functions. Instead, just a few decades after the
             rise of Islam, zakat opened the door to arbitrary political
             rule and material insecurity. A major reason is that the
             Quran does not make explicit the underlying principles of
             governance. It simply outlines the specifics of zakat as
             they related to conditions in seventh-century
             Arabia.},
   Doi = {10.1007/s11127-019-00663-x},
   Key = {fds343458}
}

@article{fds341375,
   Author = {Kuran, T},
   Title = {Islam and economic performance: Historical and contemporary
             links},
   Journal = {Journal of Economic Literature},
   Volume = {56},
   Pages = {1292-1359},
   Year = {2018},
   Month = {December},
   url = {http://dx.doi.org/10.1257/jel.20171243},
   Abstract = {© 2018 American Academy of Pediatric Dentistry. All rights
             reserved. This essay critically evaluates the analytic
             literature concerned with causal connections between Islam
             and economic performance. It focuses on works since 1997,
             when this literature was last surveyed comprehensively.
             Among the findings are the following: Ramadan fasting by
             pregnant women harms prenatal development; Islamic charities
             mainly benefit the middle class; Islam affects educational
             outcomes less through Islamic schooling than through
             structural factors that handicap learning as a whole;
             Islamic finance has a negligible effect on Muslim financial
             behavior; and low generalized trust depresses Muslim trade.
             The last feature reflects the Muslim world's delay in
             transitioning from personal to impersonal exchange. The
             delay resulted from the persistent simplicity of the private
             enterprises formed under Islamic law. Weak property rights
             reinforced the private sector's stagnation by driving
             capital from commerce to rigid waqfs. Waqfs limited economic
             development through their inflexibility and democratization
             by keeping civil society embryonic. Parts of the Muslim
             world conquered by Arab armies are especially undemocratic,
             which suggests that early Islamic institutions were
             particularly critical to the persistence of authoritarian
             patterns of governance. States have contributed to the
             persistence of authoritarianism by treating Islam as an
             instrument of governance. As the world started to
             industrialize, non-Muslim subjects of Muslim-governed states
             pulled ahead of their Muslim neighbors, partly by exercising
             the choice of law they enjoyed under Islamic law in favor of
             a Western legal system.},
   Doi = {10.1257/jel.20171243},
   Key = {fds341375}
}


%% Ladd, Helen F.   
@article{fds340050,
   Author = {Brighouse, H and Ladd, H and Loeb, S and Swift, A},
   Title = {Good education policy making: Data-informed but
             values-driven},
   Journal = {Phi Delta Kappan},
   Volume = {100},
   Number = {4},
   Pages = {36-39},
   Publisher = {SAGE Publications},
   Year = {2018},
   Month = {December},
   url = {http://dx.doi.org/10.1177/0031721718815671},
   Abstract = {© 2018 by Phi Delta Kappa International. In this article,
             based on their book Educational Goods: Values, Evidence and
             Decision Making, Harry Brighouse, Helen Ladd, Susanna Loeb,
             and Adam Swift encourage education decision makers to give
             careful thought to the values that underlie the data they
             collect and use to inform policy. Rather than basing
             decisions entirely on what improves academic achievement,
             the authors call for attention to a wider array of values,
             which they call educational goods. These include the
             capacities to function in the labor market, to participate
             effectively in the democratic process, to make autonomous
             judgments about key life decisions such as occupation or
             religion, to develop healthy interpersonal relationships, to
             seek personal fulfilment, and to treat others with respect
             and dignity. Thinking in terms of these values can broaden
             the conversation about education priorities and bring
             clarity to decisions involving trade-offs and conflicting
             aims.},
   Doi = {10.1177/0031721718815671},
   Key = {fds340050}
}

@misc{fds341115,
   Author = {Ladd, HF},
   Title = {Self-governing schools, parental choice, and the public
             interest},
   Pages = {235-248},
   Booktitle = {School Choice at the Crossroads: Research
             Perspectives},
   Year = {2018},
   Month = {October},
   ISBN = {9780815380368},
   url = {http://dx.doi.org/10.4324/9781351213318},
   Doi = {10.4324/9781351213318},
   Key = {fds341115}
}

@article{fds335186,
   Author = {Muschkin, CG and Ladd, HF and Dodge, KA and Bai, Y},
   Title = {Gender Differences in the Impact of North Carolina’s Early
             Care and Education Initiatives on Student Outcomes in
             Elementary School},
   Journal = {Educational Policy},
   Pages = {089590481877390-089590481877390},
   Year = {2018},
   Month = {May},
   url = {http://dx.doi.org/10.1177/0895904818773901},
   Abstract = {© 2018, The Author(s) 2018. Based on growing evidence of
             the long-term benefits of enriched early childhood
             experiences, we evaluate the potential for addressing gender
             disparities in elementary school through early care and
             education programs. Specifically, we explore the
             community-wide effects of two statewide initiatives in North
             Carolina on gender differences in academic outcomes in
             Grades 3 to 5, using administrative student data and
             information on variation in program availability across
             counties and over time. We find that although investments in
             early care and education programs produce significant gains
             in math and reading skills on average for all children, boys
             experience larger program-related gains than girls.
             Moreover, the greatest gains among boys emerge for those
             from less advantaged families. In contrast, the large and
             statistically significant reductions in special education
             placements induced by these early childhood program do not
             differ consistently by gender.},
   Doi = {10.1177/0895904818773901},
   Key = {fds335186}
}

@article{fds340577,
   Author = {Ladd, HF and Singleton, JD},
   Title = {The Fiscal Externalities of Charter Schools: Evidence from
             North Carolina},
   Journal = {Economic Research Initiatives at Duke (Erid) Working
             Paper},
   Number = {261},
   Year = {2018},
   Month = {April},
   Key = {fds340577}
}

@article{fds331038,
   Author = {Heissel, JA and Ladd, HF},
   Title = {School turnaround in North Carolina: A regression
             discontinuity analysis},
   Journal = {Economics of Education Review},
   Volume = {62},
   Pages = {302-320},
   Publisher = {Elsevier BV},
   Year = {2018},
   Month = {February},
   url = {http://dx.doi.org/10.1016/j.econedurev.2017.08.001},
   Abstract = {© 2017 This paper examines the effect of a federally
             supported school turnaround program in North Carolina
             elementary and middle schools. Using a regression
             discontinuity design, we find that the turnaround program
             did not improve, and may have reduced, average school-level
             passing rates in math and reading. One potential contributor
             to that finding appears to be that the program increased the
             concentration of low-income students in treated schools.
             Based on teacher survey data, we find that, as was intended,
             treated schools brought in new principals and increased the
             time teachers devoted to professional development. At the
             same time, the program increased administrative burdens and
             distracted teachers, potentially reducing time available for
             instruction, and increased teacher turnover after the first
             full year of implementation. Overall, we find little
             evidence of success for North Carolina's efforts to turn
             around low-performing schools under its Race to the Top
             grant.},
   Doi = {10.1016/j.econedurev.2017.08.001},
   Key = {fds331038}
}

@article{fds333293,
   Author = {Clotfelter, CT and Hemelt, SW and Ladd, HF},
   Title = {MULTIFACETED AID FOR LOW-INCOME STUDENTS AND COLLEGE
             OUTCOMES: EVIDENCE FROM NORTH CAROLINA},
   Pages = {278-303},
   Publisher = {WILEY},
   Year = {2018},
   Month = {January},
   url = {http://dx.doi.org/10.1111/ecin.12486},
   Abstract = {© 2017 Western Economic Association International We study
             the evolution of a campus-based aid program for low-income
             students that began with grant-heavy financial aid and later
             added a suite of nonfinancial supports. We find little to no
             evidence that program eligibility during the early years
             (2004–2006), in which students received additional
             institutional grant aid and few nonfinancial supports,
             improved postsecondary progress, performance, or completion.
             In contrast, program-eligible students in more recent
             cohorts (2007–2010), when the program supplemented
             grant-heavy aid with an array of nonfinancial supports, were
             more likely to meet credit accumulation benchmarks toward
             timely graduation and earned higher grade point averages
             than their barely ineligible counterparts. (JEL I21, I23,
             I24, J08).},
   Doi = {10.1111/ecin.12486},
   Key = {fds333293}
}


%% Lanteri, Andrea   
@article{fds320600,
   Author = {Lanteri, A},
   Title = {The Market for Used Capital: Endogenous Irreversibility and
             Reallocation over the Business Cycle},
   Journal = {American Economic Review},
   Volume = {108},
   Number = {9},
   Pages = {2383-2419},
   Year = {2018},
   Month = {September},
   Abstract = {This paper studies the business-cycle dynamics of secondary
             markets for physical capital and their effects on the
             macroeconomy. In the data, both capital reallocation and the
             price of used capital are procyclical. To rationalize these
             facts, I propose a model with endogenous partial
             irreversibility, where used investment goods are imperfect
             substitutes for new ones because of firm-level capital
             specificity. Equilibrium dynamics in the market for used
             capital induce countercyclical dispersion of marginal
             products of capital, propagate movements in aggregate TFP
             and provide a microfoundation for state-dependent non-convex
             capital adjustment costs.},
   Key = {fds320600}
}

@article{fds337719,
   Author = {Lanteri, A},
   Title = {The market for used capital: Endogenous irreversibility and
             reallocation over the business cycle},
   Pages = {2383-2419},
   Publisher = {American Economic Association},
   Year = {2018},
   Month = {September},
   url = {http://dx.doi.org/10.1257/aer.20160131},
   Abstract = {© 2018 American Economic Association. All rights reserved.
             This paper studies the business- cycle dynamics of secondary
             markets for physical capital and their effects on the
             macroeconomy. In the data, both capital reallocation and the
             price of used capital are procyclical. To rationalize these
             facts, I propose a model with endogenous partial
             irreversibility, where used investment goods are imperfect
             substitutes for new ones because of firm- level capital
             specificity. Equilibrium dynamics in the market for used
             capital induce countercyclical dispersion of marginal
             products of capital, propagate movements in aggregate TFP,
             and provide a microfoundation for state- dependent nonconvex
             capital adjustment costs.},
   Doi = {10.1257/aer.20160131},
   Key = {fds337719}
}

@article{fds326047,
   Author = {Clymo, A and Lanteri, A},
   Title = {Fiscal Policy with Limited-Time Commitment},
   Year = {2018},
   Month = {June},
   Key = {fds326047}
}


%% Leventoglu, Bahar   
@article{fds336486,
   Author = {Leventoğlu, B and Metternich, NW},
   Title = {Born Weak, Growing Strong: Anti-Government Protests as a
             Signal of Rebel Strength in the Context of Civil
             Wars},
   Journal = {American Journal of Political Science},
   Volume = {62},
   Number = {3},
   Pages = {581-596},
   Publisher = {WILEY},
   Year = {2018},
   Month = {July},
   url = {http://dx.doi.org/10.1111/ajps.12356},
   Abstract = {© 2018 The Authors. American Journal of Political Science
             published by Wiley Periodicals, Inc. on behalf of Society
             for American Journal of Political Science All rebel
             organizations start weak, but how do they grow and achieve
             favorable conflict outcomes? We present a theoretical model
             that allows for rebel organizations to gain support beyond
             their “core” and build their bargaining power during
             fighting. We highlight that rebel organizations need to win
             over crucial parts of society to generate the necessary
             support that allows them to attain favorable civil conflict
             outcomes. We find empirical support for the argument that
             low-income individuals who initially fight the government
             (rebel organizations) have to convince middle-class
             individuals to turn out against the government to gain
             government concessions. Empirically, we demonstrate that
             government concessions in the form of peace agreements and
             the onset of negotiations become more likely when protest
             occurs in the context of civil conflicts.},
   Doi = {10.1111/ajps.12356},
   Key = {fds336486}
}


%% Li, Jia   
@article{fds343333,
   Author = {Li, J and Todorov, V and Tauchen, G},
   Title = {Jump factor models in large cross-sections},
   Journal = {Quantitative Economics},
   Volume = {10},
   Number = {2},
   Pages = {419-456},
   Year = {2019},
   Month = {May},
   url = {http://dx.doi.org/10.3982/QE1060},
   Abstract = {Copyright © 2019 The Authors. We develop tests for deciding
             whether a large cross-section of asset prices obey an exact
             factor structure at the times of factor jumps. Such jump
             dependence is implied by standard linear factor models. Our
             inference is based on a panel of asset returns with
             asymptotically increasing cross-sectional dimension and
             sampling frequency, and essentially no restriction on the
             relative magnitude of these two dimensions of the panel. The
             test is formed from the high-frequency returns at the times
             when the risk factors are detected to have a jump. The test
             statistic is a cross-sectional average of a measure of
             discrepancy in the estimated jump factor loadings of the
             assets at consecutive jump times. Under the null hypothesis,
             the discrepancy in the factor loadings is due to a
             measurement error, which shrinks with the increase of the
             sampling frequency, while under an alternative of a noisy
             jump factor model this discrepancy contains also
             nonvanishing firm-specific shocks. The limit behavior of the
             test under the null hypothesis is nonstandard and reflects
             the strong-dependence in the cross-section of returns as
             well as their heteroskedasticity which is left unspecified.
             We further develop estimators for assessing the magnitude of
             firm-specific risk in asset prices at the factor jump
             events. Empirical application to S&P 100 stocks provides
             evidence for exact one-factor structure at times of big
             market-wide jump events.},
   Doi = {10.3982/QE1060},
   Key = {fds343333}
}

@article{fds329370,
   Author = {Li, J and Todorov, V and Tauchen, G and Lin, H},
   Title = {Rank Tests at Jump Events},
   Journal = {Journal of Business & Economic Statistics},
   Volume = {37},
   Number = {2},
   Pages = {312-321},
   Publisher = {Informa UK Limited},
   Year = {2019},
   Month = {April},
   url = {http://dx.doi.org/10.1080/07350015.2017.1328362},
   Abstract = {© 2019, © 2019 American Statistical Association. We
             propose a test for the rank of a cross-section of processes
             at a set of jump events. The jump events are either specific
             known times or are random and associated with jumps of some
             process. The test is formed from discretely sampled data on
             a fixed time interval with asymptotically shrinking mesh. In
             the first step, we form nonparametric estimates of the jump
             events via thresholding techniques. We then compute the
             eigenvalues of the outer product of the cross-section of
             increments at the identified jump events. The test for rank
             r is based on the asymptotic behavior of the sum of the
             squared eigenvalues excluding the largest r. A simple
             resampling method is proposed for feasible testing. The test
             is applied to financial data spanning the period 2007–2015
             at the times of stock market jumps. We find support for a
             one-factor model of both industry portfolio and Dow 30 stock
             returns at market jump times. This stands in contrast with
             earlier evidence for higher-dimensional factor structure of
             stock returns during “normal” (nonjump) times. We
             identify the latent factor driving the stocks and portfolios
             as the size of the market jump.},
   Doi = {10.1080/07350015.2017.1328362},
   Key = {fds329370}
}

@article{fds340105,
   Author = {Li, J and Liu, Y and Xiu, D},
   Title = {Efficient estimation of integrated volatility functionals
             via multiscale Jackknife},
   Journal = {The Annals of Statistics},
   Volume = {47},
   Number = {1},
   Pages = {156-176},
   Publisher = {Institute of Mathematical Statistics},
   Year = {2019},
   Month = {February},
   url = {http://dx.doi.org/10.1214/18-AOS1684},
   Abstract = {© Institute of Mathematical Statistics, 2019. We propose
             semiparametrically efficient estimators for general
             integrated volatility functionals of multivariate
             semimartingale processes. A plug-in method that uses
             nonparametric estimates of spot volatilities is known to
             induce high-order biases that need to be corrected to obey a
             central limit theorem. Such bias terms arise from boundary
             effects, the diffusive and jump movements of stochastic
             volatility and the sampling error from the nonparametric
             spot volatility estimation. We propose a novel jackknife
             method for bias correction. The jackknife estimator is
             simply formed as a linear combination of a few uncorrected
             estimators associated with different local window sizes used
             in the estimation of spot volatility. We show theoretically
             that our estimator is asymptotically mixed Gaussian,
             semiparametrically efficient, and more robust to the choice
             of local windows. To facilitate the practical use, we
             introduce a simulation-based estimator of the asymptotic
             variance, so that our inference is derivative-free, and
             hence is convenient to implement.},
   Doi = {10.1214/18-AOS1684},
   Key = {fds340105}
}

@article{fds339233,
   Author = {Bollerslev, T and Li, J and Xue, Y},
   Title = {Volume, volatility, and public news announcements},
   Journal = {Review of Economic Studies},
   Volume = {85},
   Number = {4},
   Pages = {2005-2041},
   Publisher = {Oxford University Press (OUP)},
   Year = {2018},
   Month = {October},
   url = {http://dx.doi.org/10.1093/restud/rdy003},
   Abstract = {© The Author(s) 2017. Published by Oxford University Press
             on behalf of The Review of Economic Studies Limited. We
             provide new empirical evidence for the way in which
             financial markets process information. Our results rely
             critically on high-frequency intraday price and volume data
             for theS & P500 equity portfolio and U.S. Treasury bonds,
             along with new econometric techniques, for making inference
             on the relationship between trading intensity and spot
             volatility around public news announcements. Consistent with
             the predictions derived from a theoretical model in which
             investors agree to disagree, our estimates for the intraday
             volume-volatility elasticity around important news
             announcements are systematically belowunity. Our elasticity
             estimates also decrease significantly with measures of
             disagreements in beliefs, economic uncertainty, and
             textual-based sentiment, further highlighting the key role
             played by differences-of-opinion.},
   Doi = {10.1093/restud/rdy003},
   Key = {fds339233}
}

@article{fds339634,
   Author = {Li, J and Xiu, D},
   Title = {Comment on: Limit of Random Measures associated with the
             increments of a Brownian Semimartingale},
   Journal = {Journal of Financial Econometrics},
   Volume = {16},
   Number = {4},
   Pages = {570-582},
   Publisher = {Oxford University Press (OUP)},
   Year = {2018},
   Month = {September},
   url = {http://dx.doi.org/10.1093/jjfinec/nbx034},
   Doi = {10.1093/jjfinec/nbx034},
   Key = {fds339634}
}


%% Marx, Leslie M.   
@article{fds340151,
   Author = {Delacrétaz, D and Loertscher, S and Marx, LM and Wilkening,
             T},
   Title = {Two-sided allocation problems, decomposability, and the
             impossibility of efficient trade},
   Journal = {Journal of Economic Theory},
   Volume = {179},
   Pages = {416-454},
   Publisher = {Elsevier BV},
   Year = {2019},
   Month = {January},
   url = {http://dx.doi.org/10.1016/j.jet.2018.11.004},
   Abstract = {© 2018 Elsevier Inc. Previous literature has shown that
             private information is a transaction cost that prevents
             efficient reallocation in two-sided setups with bilateral
             trade or homogeneous goods. We derive conditions under which
             the impossibility of efficient trade extends to rich
             environments in which buyers and sellers have
             multi-dimensional private types, accommodating many-to-many
             trades and heterogeneous objects. If agents can be
             decomposed into unit constituents, the allocation problem
             can be represented as an assignment game and impossibility
             obtains through a generalization of Shapley's (1962) result
             that buyers and sellers are complements. We introduce a
             general family of payoff functions that ensures
             decomposability and thus impossibility.},
   Doi = {10.1016/j.jet.2018.11.004},
   Key = {fds340151}
}


%% Masten, Matthew A   
@article{fds341376,
   Author = {Freyberger, J and Masten, MA},
   Title = {A practical guide to compact infinite dimensional parameter
             spaces},
   Journal = {Econometric Reviews},
   Year = {2019},
   Month = {January},
   url = {http://dx.doi.org/10.1080/07474938.2018.1514025},
   Abstract = {© 2019, © 2019 Taylor & Francis Group, LLC. Compactness is
             a widely used assumption in econometrics. In this article,
             we gather and review general compactness results for many
             commonly used parameter spaces in nonparametric estimation,
             and we provide several new results. We consider three kinds
             of functions: (1) functions with bounded domains which
             satisfy standard norm bounds, (2) functions with bounded
             domains which do not satisfy standard norm bounds, and (3)
             functions with unbounded domains. In all three cases, we
             provide two kinds of results, compact embedding and
             closedness, which together allow one to show that parameter
             spaces defined by a (Formula presented.) -norm bound are
             compact under a norm (Formula presented.). We illustrate how
             the choice of norms affects the parameter space, the
             strength of the conclusions, as well as other regularity
             conditions in two common settings: nonparametric mean
             regression and nonparametric instrumental variables
             estimation.},
   Doi = {10.1080/07474938.2018.1514025},
   Key = {fds341376}
}

@article{fds335431,
   Author = {Masten, MA},
   Title = {Random coefficients on endogenous variables in simultaneous
             equations models},
   Journal = {Review of Economic Studies},
   Volume = {85},
   Number = {2},
   Pages = {1193-1250},
   Publisher = {Oxford University Press (OUP)},
   Year = {2018},
   Month = {April},
   url = {http://dx.doi.org/10.1093/restud/rdx047},
   Abstract = {© The Author 2017. Published by Oxford University Press on
             behalf of The Review of Economic Studies Limited. This
             article considers a classical linear simultaneous equations
             model with random coefficients on the endogenous variables.
             Simultaneous equations models are used to study social
             interactions, strategic interactions between firms, and
             market equilibrium. Random coefficient models allow for
             heterogeneous marginal effects. I show that random
             coefficient seemingly unrelated regression models with
             common regressors are not point identified, which implies
             random coefficient simultaneous equations models are not
             point identified. Important features of these models,
             however, can be identified. For two-equation systems, I give
             two sets of sufficient conditions for point identification
             of the coefficients' marginal distributions conditional on
             exogenous covariates. The first allows for small support
             continuous instruments under tail restrictions on the
             distributions of unobservables which are necessary for point
             identification. The second requires full support
             instruments, but allows for nearly arbitrary distributions
             of unobservables. I discuss how to generalize these results
             to many equation systems, where I focus on linear-in-means
             models with heterogeneous endogenous social interaction
             effects. I give sufficient conditions for point
             identification of the distributions of these endogenous
             social effects. I propose a consistent nonparametric kernel
             estimator for these distributions based on the
             identification arguments. I apply my results to the Add
             Health data to analyse peer effects in education.},
   Doi = {10.1093/restud/rdx047},
   Key = {fds335431}
}

@article{fds335432,
   Author = {Masten, MA and Poirier, A},
   Title = {Identification of Treatment Effects Under Conditional
             Partial Independence},
   Journal = {Econometrica},
   Volume = {86},
   Number = {1},
   Pages = {317-351},
   Publisher = {The Econometric Society},
   Year = {2018},
   Month = {January},
   url = {http://dx.doi.org/10.3982/ECTA14481},
   Abstract = {Conditional independence of treatment assignment from
             potential outcomes is a commonly used but nonrefutable
             assumption. We derive identified sets for various treatment
             effect parameters under nonparametric deviations from this
             conditional independence assumption. These deviations are
             defined via a conditional treatment assignment probability,
             which makes it straightforward to interpret. Our results can
             be used to assess the robustness of empirical conclusions
             obtained under the baseline conditional independence
             assumption.},
   Doi = {10.3982/ECTA14481},
   Key = {fds335432}
}


%% Maurel, Arnaud P.   
@article{fds342518,
   Author = {Ashworth, J and Hotz, VJ and Maurel, A and Ransom,
             T},
   Title = {Changes Across Cohorts in Wage Returns to Schooling and
             Early Work Experiences},
   Year = {2018},
   Month = {May},
   Key = {fds342518}
}

@article{fds320604,
   Author = {D'Haultfœuille, X and Maurel, A and Zhang, Y},
   Title = {Extremal quantile regressions for selection models and the
             black–white wage gap},
   Journal = {Journal of Econometrics},
   Volume = {203},
   Number = {1},
   Pages = {129-142},
   Publisher = {Elsevier BV},
   Year = {2018},
   Month = {March},
   url = {http://dx.doi.org/10.1016/j.jeconom.2017.11.004},
   Abstract = {© 2017 Elsevier B.V. We consider the estimation of a
             semiparametric sample selection model without instrument or
             large support regressor. Identification relies on the
             independence between the covariates and selection, for
             arbitrarily large values of the outcome. We propose a simple
             estimator based on extremal quantile regression and
             establish its asymptotic normality by extending previous
             results on extremal quantile regressions to allow for
             selection. Finally, we apply our method to estimate the
             black–white wage gap among males from the NLSY79 and
             NLSY97. We find that premarket factors such as AFQT and
             family background play a key role in explaining the
             black–white wage gap.},
   Doi = {10.1016/j.jeconom.2017.11.004},
   Key = {fds320604}
}


%% McAdams, David   
@article{fds333814,
   Author = {Hortaçsu, A and McAdams, D},
   Title = {Empirical work on auctions of multiple objects},
   Journal = {Journal of Economic Literature},
   Volume = {56},
   Number = {1},
   Pages = {157-184},
   Publisher = {American Economic Association},
   Year = {2018},
   Month = {March},
   url = {http://dx.doi.org/10.1257/jel.20160961},
   Abstract = {© 2018 American Economic Association. All rights reserved.
             Abundant data has led to new opportunities for empirical
             auctions research in recent years, with much of the newest
             work on auctions of multiple objects, including: (1)
             auctions of ranked objects (such as sponsored search ads),
             (2) auctions of identical objects (such as Treasury bonds),
             and (3) auctions of dissimilar objects (such as FCC spectrum
             licenses). This paper surveys recent developments in the
             empirical analysis of such auctions.},
   Doi = {10.1257/jel.20160961},
   Key = {fds333814}
}


%% Mohanan, Manoj   
@article{fds339217,
   Author = {Kruk, ME and Gage, AD and Arsenault, C and Jordan, K and Leslie, HH and Roder-DeWan, S and Adeyi, O and Barker, P and Daelmans, B and Doubova,
             SV and English, M and Elorrio, EG and Guanais, F and Gureje, O and Hirschhorn, LR and Jiang, L and Kelley, E and Lemango, ET and Liljestrand, J and Malata, A and Marchant, T and Matsoso, MP and Meara,
             JG and Mohanan, M and Ndiaye, Y and Norheim, OF and Reddy, KS and Rowe, AK and Salomon, JA and Thapa, G and Twum-Danso, NAY and Pate,
             M},
   Title = {High-quality health systems in the Sustainable Development
             Goals era: time for a revolution.},
   Journal = {The Lancet. Global Health},
   Volume = {6},
   Number = {11},
   Pages = {e1196-e1252},
   Year = {2018},
   Month = {November},
   url = {http://dx.doi.org/10.1016/s2214-109x(18)30386-3},
   Doi = {10.1016/s2214-109x(18)30386-3},
   Key = {fds339217}
}

@article{fds337123,
   Author = {Prudhomme O'Meara and W and Menya, D and Laktabai, J and Platt, A and Saran, I and Maffioli, E and Kipkoech, J and Mohanan, M and Turner,
             EL},
   Title = {Improving rational use of ACTs through diagnosis-dependent
             subsidies: Evidence from a cluster-randomized controlled
             trial in western Kenya.},
   Journal = {Plos Medicine},
   Volume = {15},
   Number = {7},
   Pages = {e1002607},
   Year = {2018},
   Month = {July},
   url = {http://dx.doi.org/10.1371/journal.pmed.1002607},
   Abstract = {BACKGROUND: More than half of artemisinin combination
             therapies (ACTs) consumed globally are dispensed in the
             retail sector, where diagnostic testing is uncommon, leading
             to overconsumption and poor targeting. In many
             malaria-endemic countries, ACTs sold over the counter are
             available at heavily subsidized prices, further contributing
             to their misuse. Inappropriate use of ACTs can have serious
             implications for the spread of drug resistance and leads to
             poor outcomes for nonmalaria patients treated with incorrect
             drugs. We evaluated the public health impact of an
             innovative strategy that targets ACT subsidies to confirmed
             malaria cases by coupling free diagnostic testing with a
             diagnosis-dependent ACT subsidy. METHODS AND FINDINGS: We
             conducted a cluster-randomized controlled trial in 32
             community clusters in western Kenya (population
             approximately 160,000). Eligible clusters had retail outlets
             selling ACTs and existing community health worker (CHW)
             programs and were randomly assigned 1:1 to control and
             intervention arms. In intervention areas, CHWs were
             available in their villages to perform malaria rapid
             diagnostic tests (RDTs) on demand for any individual >1 year
             of age experiencing a malaria-like illness. Malaria
             RDT-positive individuals received a voucher for a discount
             on a quality-assured ACT, redeemable at a participating
             retail medicine outlet. In control areas, CHWs offered a
             standard package of health education, prevention, and
             referral services. We conducted 4 population-based
             surveys-at baseline, 6 months, 12 months, and 18 months-of a
             random sample of households with fever in the last 4 weeks
             to evaluate predefined, individual-level outcomes. The
             primary outcome was uptake of malaria diagnostic testing at
             12 months. The main secondary outcome was rational ACT use,
             defined as the proportion of ACTs used by test-positive
             individuals. Analyses followed the intention-to-treat
             principle using generalized estimating equations (GEEs) to
             account for clustering with prespecified adjustment for
             gender, age, education, and wealth. All descriptive
             statistics and regressions were weighted to account for
             sampling design. Between July 2015 and May 2017, 32,404
             participants were tested for malaria, and 10,870 vouchers
             were issued. A total of 7,416 randomly selected participants
             with recent fever from all 32 clusters were surveyed. The
             majority of recent fevers were in children under 18 years
             (62.9%, n = 4,653). The gender of enrolled participants was
             balanced in children (49.8%, n = 2,318 boys versus 50.2%, n
             = 2,335 girls), but more adult women were enrolled than men
             (78.0%, n = 2,139 versus 22.0%, n = 604). At baseline, 67.6%
             (n = 1,362) of participants took an ACT for their illness,
             and 40.3% (n = 810) of all participants took an ACT
             purchased from a retail outlet. At 12 months, 50.5% (n =
             454) in the intervention arm and 43.4% (n = 389) in the
             control arm had a malaria diagnostic test for their recent
             fever (adjusted risk difference [RD] = 9 percentage points
             [pp]; 95% CI 2-15 pp; p = 0.015; adjusted risk ratio [RR] =
             1.20; 95% CI 1.05-1.38; p = 0.015). By 18 months, the ARR
             had increased to 1.25 (95% CI 1.09-1.44; p = 0.005).
             Rational use of ACTs in the intervention area increased from
             41.7% (n = 279) at baseline to 59.6% (n = 403) and was 40%
             higher in the intervention arm at 18 months (ARR 1.40; 95%
             CI 1.19-1.64; p < 0.001). While intervention effects
             increased between 12 and 18 months, we were not able to
             estimate longer-term impact of the intervention and could
             not independently evaluate the effects of the free testing
             and the voucher on uptake of testing. CONCLUSIONS:
             Diagnosis-dependent ACT subsidies and community-based
             interventions that include the private sector can have an
             important impact on diagnostic testing and population-wide
             rational use of ACTs. Targeting of the ACT subsidy itself to
             those with a positive malaria diagnostic test may also
             improve sustainability and reduce the cost of retail-sector
             ACT subsidies. TRIAL REGISTRATION: ClinicalTrials.gov
             NCT02461628.},
   Doi = {10.1371/journal.pmed.1002607},
   Key = {fds337123}
}

@article{fds339802,
   Author = {Maffioli, EM and Mohanan, M},
   Title = {Touching beliefs: Using touchscreen technology to elicit
             subjective expectations in survey research.},
   Journal = {Plos One},
   Volume = {13},
   Number = {11},
   Pages = {e0207484},
   Year = {2018},
   Month = {January},
   url = {http://dx.doi.org/10.1371/journal.pone.0207484},
   Abstract = {When making decisions under uncertainty, individuals may
             form subjective expectations about probabilities of events
             relevant for their choice. Accurate measurement of
             subjective expectations is critical for high-quality data
             needed to analyze individual behavior. This paper reports
             the development and validity of a new method of eliciting
             point subjective expectations in developing countries. We
             developed a touchscreen-based application that combines an
             animated slider along with dynamic images that change
             relative sizes based on the probability indicated by the
             respondent. We compare our method to the more traditional
             approach of using beans as visual aids. First, we find that
             respondents have a sound understanding of basic concepts of
             probability. Second, we test for equality of the
             distributions elicited with the different methods and find
             them highly comparable. Third, we provide evidence that
             respondents report a more favorable opinion about the slider
             method and more willingness to complete long surveys using
             the slider rather than beans. Our findings suggest that the
             slider could be a viable elicitation method for empirical
             researchers who aim to collect data on subjective
             expectations in developing countries.},
   Doi = {10.1371/journal.pone.0207484},
   Key = {fds339802}
}


%% Munger, Michael C.   
@article{fds341734,
   Author = {Munger, MC and Vilarreal-Diaz, M},
   Title = {The Road to Crony Capitalism},
   Journal = {Independent Review},
   Volume = {23},
   Number = {3},
   Pages = {331-344},
   Publisher = {INDEPENDENT INST},
   Year = {2019},
   Month = {December},
   Key = {fds341734}
}

@article{fds339419,
   Author = {Munger, MC},
   Title = {Tullock and the welfare costs of corruption: there is a
             “political Coase Theorem”},
   Journal = {Public Choice},
   Volume = {181},
   Number = {1-2},
   Pages = {83-100},
   Publisher = {Springer Nature America, Inc},
   Year = {2019},
   Month = {October},
   url = {http://dx.doi.org/10.1007/s11127-018-0610-9},
   Abstract = {© 2018, Springer Science+Business Media, LLC, part of
             Springer Nature. Gordon Tullock developed an approach to
             understanding dynamic processes of political change and
             policy outcomes. The key insight is the notion that
             political insiders have a comparative advantage—because
             they face lower transaction costs—in manipulating rules.
             The result is that political actors can collect revenues
             from threatening to restrict, or offering to loosen, access
             to valuable permissions, permits, or services. To the extent
             that the ability to pay for such favorable treatment is a
             consequence of private activities that produce greater
             social value, there is a “political Coase theorem”:
             corruption makes bad systems more efficient. But the dynamic
             consequences are extremely negative, because of the
             inability to institute reforms resulting from application of
             Tullock’s “transitional gains trap.”.},
   Doi = {10.1007/s11127-018-0610-9},
   Key = {fds339419}
}

@article{fds342604,
   Author = {Munger, MC},
   Title = {Making the Voluntaryist Venn Work for Us, Not against
             Us},
   Journal = {Independent Review},
   Volume = {23},
   Number = {4},
   Pages = {503-520},
   Publisher = {INDEPENDENT INST},
   Year = {2019},
   Month = {March},
   Key = {fds342604}
}

@article{fds342282,
   Author = {Guzmán, RA and Munger, MC},
   Title = {A Theory of Just Market Exchange},
   Journal = {Journal of Value Inquiry},
   Year = {2019},
   Month = {January},
   url = {http://dx.doi.org/10.1007/s10790-019-09686-5},
   Doi = {10.1007/s10790-019-09686-5},
   Key = {fds342282}
}

@article{fds331467,
   Author = {Munger, MC},
   Title = {On the origins and goals of public choice: Constitutional
             conspiracy?},
   Journal = {Independent Review},
   Volume = {22},
   Number = {3},
   Pages = {359-382},
   Year = {2018},
   Month = {December},
   Key = {fds331467}
}

@article{fds338183,
   Author = {Munger, MC},
   Title = {What Is "Actually Existing Socialism"?},
   Journal = {Independent Review},
   Volume = {23},
   Number = {2},
   Pages = {297-299},
   Publisher = {INDEPENDENT INST},
   Year = {2018},
   Month = {September},
   Key = {fds338183}
}

@article{fds332797,
   Author = {Munger, MC},
   Title = {30 years after the nobel: James Buchanan’s political
             philosophy},
   Journal = {The Review of Austrian Economics},
   Volume = {31},
   Number = {2},
   Pages = {151-167},
   Publisher = {Springer Nature},
   Year = {2018},
   Month = {June},
   url = {http://dx.doi.org/10.1007/s11138-018-0418-3},
   Abstract = {© 2018, Springer Science+Business Media, LLC, part of
             Springer Nature. There are three main foundations of Public
             Choice theory: methodological individualism, behavioral
             symmetry, and “politics as exchange.” The first two are
             represented in nearly all work that identifies as “Public
             Choice,” but politics as exchange is often forgotten or
             de-emphasized. This paper—adapted from a lecture given on
             the occasion of the 30th year after Buchanan’s Nobel
             Prize—fleshes out Buchanan’s theory of politics as
             exchange, using four notions that are uniquely central to
             his thought: philosophical anarchism, ethical neutrality,
             subjectivism, and the “relatively absolute absolutes.” A
             central tension in Buchanan’s work is identified, in which
             he seems simultaneously to argue both that nearly anything
             agreed to by a group could be enforced within the group as a
             contract, and that there are certain types of rules and
             arrangements, generated by decentralized processes, that
             serve human needs better than state action. It is argued
             that it is a mistake to try to reconcile this tension, and
             that both parts of the argument are important.},
   Doi = {10.1007/s11138-018-0418-3},
   Key = {fds332797}
}

@article{fds343478,
   Author = {Munger, MC},
   Title = {On the contingent vice of corruption},
   Journal = {Social Philosophy and Policy},
   Volume = {35},
   Number = {2},
   Pages = {158-181},
   Year = {2018},
   Month = {January},
   url = {http://dx.doi.org/10.1017/S0265052519000153},
   Abstract = {© 2019 Social Philosophy & Policy Foundation. Printed in
             the USA. This essay develops a notion of “functional
             corruption,” adapted from sociology, to note that the harm
             of corruption appears to be contingent. In a system of
             dysfunctional institutions, corruption can improve the
             efficiency and speed of allocative mechanisms of the
             bureaucracy, possibly quite substantially. The problem is
             that this “short run” benefit locks in the long run harm
             of corruption by making institutions much more difficult to
             reform. In particular, a nation with bad institutions but
             without bureaucracy may be much more open to reform than a
             nation with similarly bad institutions but with
             “efficiently corrupt” bureaucrats. The idea of a “long
             run” is developed using the North, Wallis, and Weingast
             conception of open access orders. Corrupt systems are likely
             to be locked into closed access orders indefinitely, even
             though everyone knows there are better institutions
             available.},
   Doi = {10.1017/S0265052519000153},
   Key = {fds343478}
}

@book{fds327639,
   Author = {Munger, MC},
   Title = {Tomorrow 3.0: The Sharing-Middleman Economy},
   Publisher = {Cambridge University Press},
   Year = {2018},
   Key = {fds327639}
}


%% Newell, Richard G.   
@article{fds342154,
   Author = {Newell, RG and Pizer, WA and Raimi, D},
   Title = {U.S. federal government subsidies for clean energy: Design
             choices and implications},
   Journal = {Energy Economics},
   Volume = {80},
   Pages = {831-841},
   Year = {2019},
   Month = {May},
   url = {http://dx.doi.org/10.1016/j.eneco.2019.02.018},
   Abstract = {© 2019 Elsevier B.V. Subsidies for clean energy deployment
             have become a major component of U.S. federal energy and
             climate policy. After a surge in spending under the American
             Recovery and Reinvestment Act of 2009, they are an even
             larger component but now face increased scrutiny. Given
             their lasting presence, how does one design these subsidies
             to be as cost-effective as possible? Surprisingly, the
             conceptual framework and empirical evidence available to
             help policymakers identify which subsidies generate the most
             “bang for the buck” are limited. To help answer this
             question, we begin with an overview of the justifications
             for, and the arguments against, subsidizing clean energy
             technologies. Next, we briefly describe major subsidies.
             Finally, we summarize key design choices, suggesting an
             increased focus on upfront cash payments for physical
             outcomes such as capacity. This contrasts with the
             considerable focus on tax credits, loan guarantees,
             production, and cost-based subsidies which have been more
             prominent to date.},
   Doi = {10.1016/j.eneco.2019.02.018},
   Key = {fds342154}
}

@article{fds333902,
   Author = {Newell, RG and Prest, BC},
   Title = {The unconventional oil supply boom: Aggregate price response
             from microdata},
   Journal = {The Energy Journal},
   Volume = {40},
   Number = {3},
   Pages = {1-30},
   Publisher = {International Association for Energy Economics
             (IAEE)},
   Year = {2019},
   Month = {January},
   url = {http://dx.doi.org/10.5547/01956574.40.3.rnew},
   Abstract = {© 2019 by the IAEE. We analyze the price responsiveness of
             U.S. conventional and unconventional oil supply across three
             key stages of oil production: Drilling, completion, and
             production. Drilling is the most important margin, with
             price elasticities of 1.3 and 1.6 for conventional and
             unconventional drilling respectively. Well productivity
             declines as prices rise, implying smaller net supply
             elasticities of about 1.1 and 1.2. Despite similar supply
             elasticities, the price response of unconventional supply is
             larger in terms of barrels because of much higher production
             per well (∼10x initially). Oil supply simulations show a
             13-fold larger supply response due to the shale revolution.
             The simulations suggest that a price rise from $50 to $80
             per barrel induces incremental U.S. production of 0.6MM
             barrels per day in 6 months, 1.4MM in 1 year, 2.4MM in 2
             years, and 4.2MM in 5 years. Nonetheless, the response takes
             much longer than the 30 to 90 days than typically associated
             with the role of 'swing producer'.},
   Doi = {10.5547/01956574.40.3.rnew},
   Key = {fds333902}
}

@article{fds333535,
   Author = {Newell, RG and Raimi, D},
   Title = {The fiscal impacts of increased U.S. oil and gas development
             on local governments},
   Journal = {Energy Policy},
   Volume = {117},
   Pages = {14-24},
   Publisher = {Elsevier BV},
   Year = {2018},
   Month = {June},
   url = {http://dx.doi.org/10.1016/j.enpol.2018.02.042},
   Abstract = {© 2018 Elsevier Ltd Increased US oil and gas production has
             created opportunities and challenges for local governments.
             Through interviews with roughly 250 local officials, we
             evaluate the fiscal effects of this development in 21
             regions across every major US oil and gas producing state
             during “boom” and “bust” periods. Growing oil and
             gas production has increased local government revenues
             through a variety of mechanisms, including property taxes,
             sales taxes, severance taxes, and more. Industry activity
             has also increased costs and demand for local services led
             by road damage, water and wastewater infrastructure, and a
             range of staff costs including emergency services and law
             enforcement. Despite volatility in revenues and service
             demands, our interview results show that 74% of local
             governments have experienced net fiscal benefits, 14%
             reported roughly neutral effects, and 12% reported net
             fiscal costs. Local governments in highly rural regions
             experiencing large-scale growth have faced the greatest
             challenges. To further improve future outcomes, local
             officials can plan for impacts, state policymakers can
             re-examine revenue policies, and operators can pursue
             collaboration with local governments.},
   Doi = {10.1016/j.enpol.2018.02.042},
   Key = {fds333535}
}

@article{fds333908,
   Author = {Bielen, DA and Newell, RG and Pizer, WA},
   Title = {Who did the ethanol tax credit benefit? An event analysis of
             subsidy incidence},
   Journal = {Journal of Public Economics},
   Volume = {161},
   Pages = {1-14},
   Publisher = {Elsevier BV},
   Year = {2018},
   Month = {May},
   url = {http://dx.doi.org/10.1016/j.jpubeco.2018.03.005},
   Abstract = {© 2018 Elsevier B.V. At the end of 2011, the Volumetric
             Ethanol Excise Tax Credit (VEETC), which had subsidized the
             blending of ethanol in gasoline, was allowed to expire.
             During its tenure, the subsidy was the subject of intense
             scrutiny concerning who benefited from its existence. Using
             commodity price data, we estimate the subsidy incidence
             accruing to corn farmers, ethanol producers, gasoline
             blenders, and gasoline consumers around the time of
             expiration. Our empirical approach contributes
             methodologically to the event studies literature by
             analyzing futures contract prices (as opposed to spot
             prices) when possible. Ultimately, we find compelling
             evidence that, at the date of VEETC expiration, ethanol
             producers captured about 25¢ of the 45¢ subsidy per gallon
             of ethanol blended. We find suggestive, albeit inconclusive,
             evidence that a portion of this benefit (about 5¢ per
             gallon) was passed further upstream from ethanol producers
             to corn farmers. Most of the remainder seems most likely to
             have been captured by the blenders themselves. On the
             petroleum side, we find no evidence that oil refiners
             captured any part of the subsidy. We also find no evidence
             that the subsidy was passed downstream to gasoline consumers
             in the form of lower gasoline prices.},
   Doi = {10.1016/j.jpubeco.2018.03.005},
   Key = {fds333908}
}

@article{fds330723,
   Author = {Newell, RG and Raimi, D},
   Title = {US state and local oil and gas revenue sources and
             uses},
   Journal = {Energy Policy},
   Volume = {112},
   Pages = {12-18},
   Publisher = {Elsevier BV},
   Year = {2018},
   Month = {January},
   url = {http://dx.doi.org/10.1016/j.enpol.2017.10.002},
   Abstract = {© 2017 Elsevier Ltd US state and local governments generate
             revenues from oil and gas production through a variety of
             mechanisms. In this paper, we quantify four leading sources:
             (1) state taxes levied on the value or volume of oil and gas
             produced; (2) local property taxes levied on the value of
             oil and gas property; (3) oil and gas lease revenues from
             state lands; and (4) oil and gas lease revenues from federal
             lands. We measure these revenues against the total value of
             oil and gas produced in the top 16 oil- and gas-producing
             states using fiscal year 2013 as a benchmark. On average,
             state and local governments collect roughly 10% of oil and
             gas revenue, ranging from a low of roughly 1% to a high of
             nearly 40% (not including income taxes). We also assess the
             use of these revenues, finding that there is substantial
             variation among states. The largest shares of revenue flow
             to state governments’ current expenditures and education,
             followed by local governments. Some states also allocate a
             portion of oil and gas revenues to trust funds endowing
             future government operations and/or education
             expenditures.},
   Doi = {10.1016/j.enpol.2017.10.002},
   Key = {fds330723}
}


%% Nolan, Zachary   
@article{fds341377,
   Author = {McManus, B and Nevo, A and Nolan, Z and Williams,
             JW},
   Title = {Steering Incentives and Bundling Practices in the
             Telecommunications Industry},
   Year = {2018},
   Month = {October},
   Key = {fds341377}
}


%% Pattanayak, Subhrendu K.   
@article{fds343313,
   Author = {Litzow, EL and Pattanayak, SK and Thinley, T},
   Title = {Returns to rural electrification: Evidence from
             Bhutan},
   Journal = {World Development},
   Volume = {121},
   Pages = {75-96},
   Year = {2019},
   Month = {September},
   url = {http://dx.doi.org/10.1016/j.worlddev.2019.04.002},
   Abstract = {© 2019 Elsevier Ltd Rural electrification (RE) is a core
             component of the Sustainable Development Goals and a major
             focal point of the global development community. Despite
             this focus, more than one billion people worldwide lack
             access to electricity, and electrification rates need to
             more than quadruple to meet international goals. We believe
             that lack of progress is partly driven by a know-do gap, a
             misalignment between academic research and the information
             needs of policy makers. Most studies measuring the impacts
             of electrification focus on precise estimation of a few
             outcomes, specifically health, education and productivity
             impacts. Other important impacts, e.g. environmental, have
             remained largely unstudied. As a consequence, quantifying
             the full set of costs and benefits of expanding electricity
             access is difficult and rarely done. When cost benefit
             analyses are done, they are often incomplete, and
             conclusions are highly susceptible to unavailable or
             uncertain parameters. We illustrate these arguments in the
             case of Bhutan, where RE rates have expanded rapidly in the
             past few decades. We show that RE via grid extension had
             positive impacts related to fuelwood consumption, education,
             and employment, but we do not find an effect on health. We
             then use these impact estimates to conduct cost-benefit
             analyses. For the cost-benefit parameters not available from
             our impact evaluation, we transfer reasonable estimates from
             related contexts. To acknowledge the uncertainty induced by
             this process, we conduct Monte Carlo analyses and confirm
             that, while the private NPV calculations are robust to
             alternative parameter values, the social returns are
             sensitive to estimates of the social cost of carbon and
             costs of grid operation and maintenance. Based on this
             exercise, we highlight research gaps that persist and that
             preclude 1) careful cost-benefit analysis of RE more
             generally and 2) financial investment in the
             sector.},
   Doi = {10.1016/j.worlddev.2019.04.002},
   Key = {fds343313}
}

@article{fds341467,
   Author = {Tan-Soo, J-S and Pattanayak, SK},
   Title = {Seeking natural capital projects: Forest fires, haze, and
             early-life exposure in Indonesia.},
   Journal = {Proceedings of the National Academy of Sciences of the
             United States of America},
   Volume = {116},
   Number = {12},
   Pages = {5239-5245},
   Year = {2019},
   Month = {March},
   url = {http://dx.doi.org/10.1073/pnas.1802876116},
   Abstract = {Natural capital will be depleted rapidly and excessively if
             the long-term, offsite impacts of depletion are ignored. By
             examining the case of tropical forest burning, we illustrate
             such myopia: Pursuit of short-term economic gains results in
             air pollution that causes long-term, irreversible health
             impacts. We integrate longitudinal data on prenatal exposure
             to the 1997 Indonesian forest fires with child nutritional
             outcomes and find that mean exposure to air pollution during
             the prenatal stage is associated with a half-SD decrease in
             height-for-age z score at age 17, which is robust to several
             statistical checks. Because adult height is associated with
             income, this implies a loss of 4% of average monthly wages
             for approximately one million Indonesian workers born during
             this period. To put these human capital losses in the
             context of policy making, we conduct social cost-benefit
             analyses of oil palm plantations under different scenarios
             for clearing land and controlling fires. We find that
             clearing for oil palm plantations using mechanical methods
             generates higher social net benefits compared with clearing
             using fires. Oil palm producers, however, would be unwilling
             to bear the higher private costs of mechanical clearing.
             Therefore, we need more effective fire bans, fire
             suppression, and moratoriums on oil palm in Indonesia to
             protect natural and human capital, and increase social
             welfare.},
   Doi = {10.1073/pnas.1802876116},
   Key = {fds341467}
}

@article{fds326800,
   Author = {Mullan, K and Sills, E and Pattanayak, SK and Caviglia-Harris,
             J},
   Title = {Converting Forests to Farms: The Economic Benefits of
             Clearing Forests in Agricultural Settlements in the
             Amazon},
   Journal = {Environmental and Resource Economics},
   Volume = {71},
   Number = {2},
   Pages = {427-455},
   Publisher = {Springer Nature America, Inc},
   Year = {2018},
   Month = {October},
   url = {http://dx.doi.org/10.1007/s10640-017-0164-1},
   Abstract = {© 2017, Springer Science+Business Media Dordrecht.
             Agricultural expansion into tropical forests is believed to
             bring local economic benefits at the expense of global
             environmental costs. The resulting tension is reflected in
             Brazilian government policy. The national agrarian reform
             program has settled farm families in the Amazon region since
             the 1970s, with the expectation that they will clear forests
             in order to farm the land. On the other hand, recent
             Brazilian policy initiatives seek to reduce deforestation to
             mitigate climate change. We contribute to the policy debate
             that surrounds these dual goals for the Amazon by estimating
             the marginal effects of new agricultural land on the full
             income and assets of farm settlers over a 13-year period
             from 1996 to 2009. Using micro panel data from agrarian
             settlements where forest was being rapidly cleared, and
             controlling for factors that would otherwise confound the
             relationship, we estimate the effect of converting forest to
             agriculture on total household income to estimate the
             opportunity cost of conserving forest. Our measure of income
             reflects any re-allocation of resources by utility
             maximizing households and any productivity effects due to
             loss of forest ecosystem services. The estimated effect of
             new agricultural land on income is positive, but small
             relative to the income per hectare of previously cleared
             land. However, we show that income increases investment in
             physical assets, which raises households’ income
             generating capacity and future accumulation of assets. Thus,
             while there is only a small immediate income gain from
             clearing more forest, the long-term effects on wealth are
             still substantial. This demonstrates that given the right
             conditions, conversion of forest to agricultural land can be
             an impetus for asset accumulation by smallholders. It also
             highlights the importance of considering the indirect and
             long-term welfare benefits of new agricultural land when
             assessing the opportunity costs of forest
             conservation.},
   Doi = {10.1007/s10640-017-0164-1},
   Key = {fds326800}
}

@article{fds333028,
   Author = {Rieb, JT and Chaplin-Kramer, R and Daily, GC and Armsworth, PR and Böhning-Gaese, K and Bonn, A and Cumming, GS and Eigenbrod, F and Grimm, V and Jackson, BM and Marques, A and Pattanayak, SK and Pereira,
             HM and Peterson, GD and Ricketts, TH and Robinson, BE and Schröter, M and Schulte, LA and Seppelt, R and Turner, MG and Bennett,
             EM},
   Title = {Response to Kabisch and Colleagues},
   Journal = {Bioscience},
   Volume = {68},
   Number = {3},
   Pages = {167-168},
   Publisher = {Oxford University Press (OUP)},
   Year = {2018},
   Month = {March},
   url = {http://dx.doi.org/10.1093/biosci/bix154},
   Doi = {10.1093/biosci/bix154},
   Key = {fds333028}
}

@article{fds333729,
   Author = {Pattanayak, SK and Haines, A},
   Title = {Implementation of policies to protect planetary health -
             Authors' reply.},
   Journal = {The Lancet. Planetary Health},
   Volume = {2},
   Number = {2},
   Pages = {e63},
   Year = {2018},
   Month = {February},
   url = {http://dx.doi.org/10.1016/s2542-5196(18)30007-x},
   Doi = {10.1016/s2542-5196(18)30007-x},
   Key = {fds333729}
}

@article{fds338445,
   Author = {Pattanayak, SK and Pakhtigian, EL and Litzow, EL},
   Title = {Through the looking glass: Environmental health economics in
             low and middle income countries },
   Journal = {Handbook of Environmental Economics},
   Volume = {4},
   Pages = {143-191},
   Year = {2018},
   Month = {January},
   url = {http://dx.doi.org/10.1016/bs.hesenv.2018.08.004},
   Abstract = {© 2018 Elsevier B.V. Human interactions with the
             environment can profoundly impact many outcomes – health
             being chief among them. While the nature of environmental
             risks changes across time and space, the burden of disease
             attributable to environmental risk hovers stubbornly around
             one quarter of the total global disease burden. Further,
             environmental risks are particularly damaging to the health
             of children, but also to the elderly and the impoverished in
             low and middle income countries (LMICs). This chapter
             highlights the ways in which economics provides analytical
             insight about the human–environment relationship and about
             potential ways to prevent diseases. Specifically, we contend
             that the household production framework – which focuses on
             the beneficiary and households – helps us understand when
             and how households will avert environmental risks. While
             economists have been mostly on the sidelines of
             environmental health research, there is a growing literature
             from LMICs that examines three aspects of reduction in
             household environmental risks: (i) how households value
             these risk reductions, (ii) what factors drive household
             adoption of environmental health technologies, and (iii)
             what are the impacts of these technologies on household
             health. At the risk of simplification, our review of this
             literature finds relatively low values for environmental
             risk reductions, which is mirrored by limited adoption of
             environmental health technologies and, accordingly,
             disappointing impact on health. Economists have made less
             progress in linking the literatures on valuation, adoption
             and impacts with each other. We conclude by explaining why
             the next wave of research should focus on these links and on
             multiple risks, environmental disasters, and political
             economy of the supply of interventions.},
   Doi = {10.1016/bs.hesenv.2018.08.004},
   Key = {fds338445}
}

@article{fds338388,
   Author = {Shannon, AK and Usmani, F and Pattanayak, SK and Jeuland,
             M},
   Title = {The Price of Purity: Willingness to Pay for Air and Water
             Purification Technologies in Rajasthan, India},
   Journal = {Environmental and Resource Economics},
   Publisher = {Springer Nature America, Inc},
   Year = {2018},
   Month = {January},
   url = {http://dx.doi.org/10.1007/s10640-018-0290-4},
   Abstract = {© 2018, Springer Nature B.V. Diarrheal illnesses and acute
             respiratory infections are among the top causes for
             premature death and disability across the developing world,
             and adoption of various technologies for avoiding these
             illnesses remains extremely low. We exploit data from a
             unique contingent valuation experiment to consider whether
             households in rural Rajasthan are unwilling to make
             investments in “domain-specific” environmental health
             technologies when faced with health risks in multiple
             domains. Results indicate that demand for water-related risk
             reductions is higher on average than demand for air-related
             risk reduction. In addition, households’ private health
             benefits from mitigating diarrheal (respiratory) disease
             risks are higher (no different) when community-level air
             pollution risks, rather than community-level water pollution
             risks, have previously been mitigated. This asymmetric
             response cannot fully be explained by survey order effects
             or embedding, but rather suggests that that the broader
             health environment and the salience of particular risks may
             be important in households’ decision to adopt
             environmental health technologies.},
   Doi = {10.1007/s10640-018-0290-4},
   Key = {fds338388}
}


%% Patton, Andrew J.   
@article{fds333583,
   Author = {Patton, AJ and Weller, B},
   Title = {What You See Is Not What You Get: The Costs of Trading
             Market Anomalies},
   Number = {255},
   Year = {2019},
   Month = {May},
   Key = {fds333583}
}


%% Peretto, Pietro F.   
@article{fds325931,
   Author = {Ferraro, D and Peretto, PF},
   Title = {Commodity Prices and Growth},
   Journal = {The Economic Journal},
   Volume = {128},
   Number = {616},
   Pages = {3242-3265},
   Publisher = {Oxford University Press (OUP)},
   Year = {2018},
   Month = {December},
   url = {http://dx.doi.org/10.1111/ecoj.12559},
   Abstract = {© 2017 Royal Economic Society In this article, we propose
             an endogenous growth model of commodity-rich economies in
             which: (i) long-run (steady-state) growth is endogenous and
             yet independent of commodity prices; (ii) commodity prices
             affect short-run growth through transitional dynamics; and
             (iii) the status of net commodity importer/exporter is
             endogenous. We argue that these predictions are consistent
             with historical evidence from the 19th to the 21st
             century.},
   Doi = {10.1111/ecoj.12559},
   Key = {fds325931}
}

@article{fds335433,
   Author = {Peretto, PF},
   Title = {Robust endogenous growth},
   Journal = {European Economic Review},
   Volume = {108},
   Pages = {49-77},
   Publisher = {Elsevier BV},
   Year = {2018},
   Month = {September},
   url = {http://dx.doi.org/10.1016/j.euroecorev.2018.06.007},
   Abstract = {© 2018 Elsevier B.V. This paper studies a generalization of
             the Schumpeterian models with endogenous market structure
             that allows the overall production structure to be more than
             linear in the growth-driving factor and yet generates
             endogenous growth, defined as steady-state, constant,
             exponential growth of income per capita. This version of
             modern growth theory, therefore, is robust in the sense that
             its key result obtains for a thick set of parameter values
             instead of, as often claimed, for a set of measure zero. The
             paper, moreover, pays close attention to transitional
             dynamics, showing not only the existence but also the global
             stability of the endogenous-growth steady
             state.},
   Doi = {10.1016/j.euroecorev.2018.06.007},
   Key = {fds335433}
}


%% Pfaff, Alexander   
@article{fds342540,
   Author = {Naidoo, R and Gerkey, D and Hole, D and Pfaff, A and Ellis, AM and Golden,
             CD and Herrera, D and Johnson, K and Mulligan, M and Ricketts, TH and Fisher, B},
   Title = {Evaluating the impacts of protected areas on human
             well-being across the developing world.},
   Journal = {Science Advances},
   Volume = {5},
   Number = {4},
   Pages = {eaav3006},
   Year = {2019},
   Month = {April},
   url = {http://dx.doi.org/10.1126/sciadv.aav3006},
   Abstract = {Protected areas (PAs) are fundamental for biodiversity
             conservation, yet their impacts on nearby residents are
             contested. We synthesized environmental and socioeconomic
             conditions of >87,000 children in >60,000 households
             situated either near or far from >600 PAs within 34
             developing countries. We used quasi-experimental
             hierarchical regression to isolate the impact of living near
             a PA on several aspects of human well-being. Households near
             PAs with tourism also had higher wealth levels (by 17%) and
             a lower likelihood of poverty (by 16%) than similar
             households living far from PAs. Children under 5 years old
             living near multiple-use PAs with tourism also had higher
             height-for-age scores (by 10%) and were less likely to be
             stunted (by 13%) than similar children living far from PAs.
             For the largest and most comprehensive socioeconomic-environmental
             dataset yet assembled, we found no evidence of negative PA
             impacts and consistent statistical evidence to suggest PAs
             can positively affect human well-being.},
   Doi = {10.1126/sciadv.aav3006},
   Key = {fds342540}
}

@article{fds342117,
   Author = {Fisher, B and Herrera, D and Adams, D and Fox, HE and Gallagher, L and Gerkey, D and Gill, D and Golden, CD and Hole, D and Johnson, K and Mulligan, M and Myers, SS and Naidoo, R and Pfaff, A and Rasolofoson, R and Selig, ER and Tickner, D and Treuer, T and Ricketts,
             T},
   Title = {Can nature deliver on the sustainable development
             goals?},
   Journal = {The Lancet. Planetary Health},
   Volume = {3},
   Number = {3},
   Pages = {e112-e113},
   Year = {2019},
   Month = {March},
   url = {http://dx.doi.org/10.1016/s2542-5196(18)30281-x},
   Doi = {10.1016/s2542-5196(18)30281-x},
   Key = {fds342117}
}

@article{fds341860,
   Author = {Pfaff, A and Rodriguez, LA and Shapiro-Garza, E},
   Title = {Collective Local Payments for ecosystem services: New local
             PES between groups, sanctions, and prior watershed trust in
             Mexico},
   Journal = {Water Resources and Economics},
   Year = {2019},
   Month = {January},
   url = {http://dx.doi.org/10.1016/j.wre.2019.01.002},
   Abstract = {© 2019 Elsevier B.V. Payments for ecosystem services (PES)
             programs are now high in number, if not always in impact.
             When groups of users pay groups of service providers,
             establishing PES involves collective action. We study the
             creation of collective PES institutions, and their
             continuation, as group coordination. We use framed
             lab-in-field experiments with hydroservices users and
             providers within watersheds participating in Mexico's
             Matching Funds program in Veracruz, Yucatan and Quintana Roo
             states. We explore the coordination of contributions between
             downstream users and upstream providers, plus effects of
             different types of sanctions that can affect expectations
             for both users and providers. Both information alone and
             sanctions raise contributions overall, although outcomes
             varied by site in line with our rankings of ‘watershed
             trust’. For instance, monetary sanctions raise
             contributions in the watershed we ranked high in trust, yet
             initially lowered them for the lowest-trust watershed. This
             suggests that upstream-downstream social capital will be
             central to new collective local PES, while our overall
             trends suggest social capital can be raised by successful
             coordination over time.},
   Doi = {10.1016/j.wre.2019.01.002},
   Key = {fds341860}
}

@article{fds338021,
   Author = {Panlasigui, S and Rico-Straffon, J and Pfaff, A and Swenson, J and Loucks, C},
   Title = {Impacts of certification, uncertified concessions, and
             protected areas on forest loss in Cameroon, 2000 to
             2013},
   Journal = {Biological Conservation},
   Volume = {227},
   Pages = {160-166},
   Publisher = {Elsevier BV},
   Year = {2018},
   Month = {November},
   url = {http://dx.doi.org/10.1016/j.biocon.2018.09.013},
   Abstract = {© 2018 Elsevier Ltd Deforestation and forest fragmentation
             are leading drivers of biodiversity loss. Protected areas
             have been the leading conservation policy response, yet
             their scale and scope remain inadequate to meet biodiversity
             conservation targets. Managed forest concessions
             increasingly have been recognized as a complement to
             protected areas in meeting conservation targets. Similarly,
             programs for voluntary third-party certification of
             concession management aim to create incentives for logging
             companies to manage forests more sustainably. Rigorous
             evidence on the impacts from large-scale certification
             programs is thereby critical, yet detailed field
             observations are limited, temporally and spatially.
             Remotely-sensed data, in contrast, can provide repeated
             observations over time and at a fine spatial scale, albeit
             with less detail. Using the Global Forest Change dataset, we
             examine annual forest loss in Cameroon during 2000–2013 to
             assess the impact of Forest Stewardship Council
             certification, as well as uncertified logging concessions
             and national parks. We use panel regressions that control
             for the effects of unobserved factors that vary across space
             or time. We find low forest loss inside the boundaries of
             each management intervention, with <1% lost over the study
             period. Yet those low levels of loss appear to be influenced
             more by a site's proximity to drivers of deforestation, such
             as distances to population centers or roads, than by
             national parks, uncertified concessions, or certification.
             The exception is that if a site faces high deforestation
             pressure, uncertified logging concessions appear to reduce
             forest loss. This may reflect private companies’
             incentives to protect rights to forest use. Such an
             influence of private logging companies could provide a
             foundation for future impacts from certification upon rates
             of forest loss, at least within areas that are facing
             elevated deforestation pressures.},
   Doi = {10.1016/j.biocon.2018.09.013},
   Key = {fds338021}
}

@article{fds333561,
   Author = {Pfaff, A and Robalino, J and Reis, EJ and Walker, R and Perz, S and Laurance, W and Bohrer, C and Aldrich, S and Arima, E and Caldas, M and Kirby, K},
   Title = {Roads & SDGs, tradeoffs and synergies: Learning from
             Brazil’s Amazon in distinguishing frontiers},
   Journal = {Economics: the Open Access, Open Assessment E
             Journal},
   Volume = {12},
   Publisher = {ZBW - German National Library of Economics},
   Year = {2018},
   Month = {March},
   url = {http://dx.doi.org/10.5018/economics-ejournal.ja.2018-11},
   Abstract = {© Author(s) 2018. To reduce SDG tradeoffs in infrastructure
             provision, and to inform searches for SDG synergies, the
             authors show that roads’ impacts on Brazilian Amazon
             forests varied significantly across frontiers. Impacts
             varied predictably with prior development – prior roads
             and prior deforestation – and, further, in a pattern that
             suggests a potential synergy for roads between forests and
             urban growth. For multiple periods of roads investments, the
             authors estimate forest impacts for high, medium and low
             prior roads and deforestation. For each setting,
             census-tract observations are numerous. Results confirm
             predictions for this kind of frontier of a pattern not
             consistent with endogeneity, i.e., short-run forest impacts
             of new roads are: small for relatively high prior
             development; larger for medium prior development; and small
             for low prior development (for the latter setting, impacts
             in such isolated areas could rise over time, depending on
             interactions with conservation policies). These Amazonian
             results suggest ‘SDG strategic’ locations for
             infrastructure, an idea the authors note for other frontiers
             while highlighting major differences across frontiers and
             their SDG opportunities.},
   Doi = {10.5018/economics-ejournal.ja.2018-11},
   Key = {fds333561}
}

@article{fds332950,
   Author = {Tesfaw, AT and Pfaff, A and Golden Kroner and RE and Qin, S and Medeiros,
             R and Mascia, MB},
   Title = {Land-use and land-cover change shape the sustainability and
             impacts of protected areas.},
   Journal = {Proceedings of the National Academy of Sciences of the
             United States of America},
   Volume = {115},
   Number = {9},
   Pages = {2084-2089},
   Year = {2018},
   Month = {February},
   url = {http://dx.doi.org/10.1073/pnas.1716462115},
   Abstract = {Protected areas (PAs) remain the dominant policy to protect
             biodiversity and ecosystem services but have been shown to
             have limited impact when development interests force them to
             locations with lower deforestation pressure. Far less known
             is that such interests also cause widespread tempering,
             reduction, or removal of protection [i.e., PA downgrading,
             downsizing, and degazettement (PADDD)]. We inform responses
             to PADDD by proposing and testing a bargaining explanation
             for PADDD risks and deforestation impacts. We examine recent
             degazettements for hydropower development and rural
             settlements in the state of Rondônia in the Brazilian
             Amazon. Results support two hypotheses: (i) ineffective PAs
             (i.e., those where internal deforestation was similar to
             nearby rates) were more likely to be degazetted and (ii)
             degazettement of ineffective PAs caused limited, if any,
             additional deforestation. We also report on cases in which
             ineffective portions were upgraded. Overall our results
             suggest that enhancing PAs' ecological impacts enhances
             their legal durability.},
   Doi = {10.1073/pnas.1716462115},
   Key = {fds332950}
}

@article{fds335194,
   Author = {Pfaff, A and Velez, MA and Broad, K and Hamoudi, A and Taddei,
             R},
   Title = {Contracts versus trust for transfers of ecosystem services:
             Equity and efficiency in resource allocation and
             environmental provision},
   Journal = {Water Resources and Economics},
   Publisher = {Elsevier BV},
   Year = {2018},
   Month = {January},
   url = {http://dx.doi.org/10.1016/j.wre.2018.04.001},
   Abstract = {© 2018 Managing natural-resource allocation and
             environmental externalities is a challenge. Institutional
             designs are central when improving water quality for
             downstream users, for instance, and when reallocating water
             quantities including for climate adaptation. Views differ on
             which institutions are best: states; markets; or informal
             institutions. For transfers of ecosystem services, we
             compare informal trust-based institutions to enforced
             contracts, both being institutional types we observe
             commonly in the field. The trust-based institutions lack
             binding promises, thus ecosystem-services suppliers are
             unsure about the compensation they will receive for
             transferring services to users. We employ decision
             experiments given the shortcomings of the alternative
             methods for empirical study of institutions, as well as the
             limits on theoretical prediction about behaviors under
             trust. In our bargaining game that decouples equity and
             efficiency, we find that enforced contracts increased
             efficiency as well as all measures of equity. This informs
             the design of institutions to manage transfers of ecosystem
             services, as equity in surplus sharing is important in of
             itself and in permitting efficient allocation.},
   Doi = {10.1016/j.wre.2018.04.001},
   Key = {fds335194}
}


%% Pizer, Billy   
@article{fds342118,
   Author = {Newell, RG and Pizer, WA and Raimi, D},
   Title = {U.S. federal government subsidies for clean energy: Design
             choices and implications},
   Journal = {Energy Economics},
   Volume = {80},
   Pages = {831-841},
   Year = {2019},
   Month = {May},
   url = {http://dx.doi.org/10.1016/j.eneco.2019.02.018},
   Abstract = {© 2019 Elsevier B.V. Subsidies for clean energy deployment
             have become a major component of U.S. federal energy and
             climate policy. After a surge in spending under the American
             Recovery and Reinvestment Act of 2009, they are an even
             larger component but now face increased scrutiny. Given
             their lasting presence, how does one design these subsidies
             to be as cost-effective as possible? Surprisingly, the
             conceptual framework and empirical evidence available to
             help policymakers identify which subsidies generate the most
             “bang for the buck” are limited. To help answer this
             question, we begin with an overview of the justifications
             for, and the arguments against, subsidizing clean energy
             technologies. Next, we briefly describe major subsidies.
             Finally, we summarize key design choices, suggesting an
             increased focus on upfront cash payments for physical
             outcomes such as capacity. This contrasts with the
             considerable focus on tax credits, loan guarantees,
             production, and cost-based subsidies which have been more
             prominent to date.},
   Doi = {10.1016/j.eneco.2019.02.018},
   Key = {fds342118}
}

@article{fds336242,
   Author = {Fischer, C and Pizer, WA},
   Title = {Horizontal Equity Effects in Energy Regulation},
   Journal = {Journal of the Association of Environmental and Resource
             Economists},
   Volume = {6},
   Number = {S1},
   Pages = {S209-S237},
   Publisher = {University of Chicago Press},
   Year = {2019},
   Month = {March},
   url = {http://dx.doi.org/10.1086/701192},
   Doi = {10.1086/701192},
   Key = {fds336242}
}

@article{fds342570,
   Author = {Deryugina, T and Fullerton, D and Pizer, WA},
   Title = {An Introduction to Energy Policy Trade-Offs between Economic
             Efficiency and Distributional Equity},
   Journal = {Journal of the Association of Environmental and Resource
             Economists},
   Volume = {6},
   Number = {S1},
   Pages = {S1-S6},
   Publisher = {University of Chicago Press},
   Year = {2019},
   Month = {March},
   url = {http://dx.doi.org/10.1086/701515},
   Doi = {10.1086/701515},
   Key = {fds342570}
}

@article{fds336243,
   Author = {Pizer, WA and Sexton, S},
   Title = {The Distributional Impacts of Energy Taxes},
   Journal = {Review of Environmental Economics and Policy},
   Volume = {13},
   Number = {1},
   Pages = {104-123},
   Year = {2019},
   Month = {February},
   url = {http://dx.doi.org/10.1093/reep/rey021},
   Abstract = {© The Author(s) 2019. Published by Oxford University Press
             on behalf of the Association of Environmental and Resource
             Economists. All rights reserved. For permissions, please
             email: journals.permissions@oup.com. Taxes have long been
             advocated by economists for efficient pollution control,
             particularly in the energy sector. However, these taxes may
             enjoy less political support than standards-based regulation
             at least partly because of the common assumption that they
             place a greater burden on the poor than the rich. This
             article evaluates the validity of that assumption by
             reviewing the literature on the distributional impacts of
             energy taxes and by analyzing energy consumption surveys in
             select countries. The evidence suggests that energy taxes
             need not be as regressive as is often assumed. We find that
             the incidence (i.e., distributional impact) of such taxes
             depends upon the energy commodities that are taxed; the
             physical, social, and climatic characteristics of the
             jurisdictions in which they are implemented; and the use of
             energy tax revenues. We also show that the variation in
             household energy expenditure is greater within income groups
             than across income groups and that such variation is not
             easily reduced.},
   Doi = {10.1093/reep/rey021},
   Key = {fds336243}
}

@article{fds340746,
   Author = {Li, Y and Pizer, WA and Wu, L},
   Title = {Climate change and residential electricity consumption in
             the Yangtze River Delta, China.},
   Journal = {Proceedings of the National Academy of Sciences of the
             United States of America},
   Volume = {116},
   Number = {2},
   Pages = {472-477},
   Year = {2019},
   Month = {January},
   url = {http://dx.doi.org/10.1073/pnas.1804667115},
   Abstract = {Estimating the impact of climate change on energy use across
             the globe is essential for analysis of both mitigation and
             adaptation policies. Yet existing empirical estimates are
             concentrated in Western countries, especially the United
             States. We use daily data on household electricity
             consumption to estimate how electricity consumption would
             change in Shanghai in the context of climate change. For
             colder days <7 °C, a 1 °C increase in daily temperature
             reduces electricity consumption by 2.8%. On warm days >25
             °C, a 1 °C increase in daily temperatures leads to a 14.5%
             increase in electricity consumption. As income increases,
             households' weather sensitivity remains the same for hotter
             days in the summer but increases during the winter. We use
             this estimated behavior in conjunction with a collection of
             downscaled global climate models (GCMs) to construct a
             relationship between future annual global mean surface
             temperature (GMST) changes and annual residential
             electricity consumption. We find that annual electricity
             consumption increases by 9.2% per +1 °C in annual GMST. In
             comparison, annual peak electricity use increases by as much
             as 36.1% per +1 °C in annual GMST. Although most accurate
             for Shanghai, our findings could be most credibly extended
             to the urban areas in the Yangtze River Delta, covering
             roughly one-fifth of China's urban population and one-fourth
             of the gross domestic product.},
   Doi = {10.1073/pnas.1804667115},
   Key = {fds340746}
}

@article{fds336240,
   Author = {Bielen, DA and Newell, RG and Pizer, WA},
   Title = {Who did the ethanol tax credit benefit? An event analysis of
             subsidy incidence},
   Journal = {Journal of Public Economics},
   Volume = {161},
   Pages = {1-14},
   Publisher = {Elsevier BV},
   Year = {2018},
   Month = {May},
   url = {http://dx.doi.org/10.1016/j.jpubeco.2018.03.005},
   Abstract = {© 2018 Elsevier B.V. At the end of 2011, the Volumetric
             Ethanol Excise Tax Credit (VEETC), which had subsidized the
             blending of ethanol in gasoline, was allowed to expire.
             During its tenure, the subsidy was the subject of intense
             scrutiny concerning who benefited from its existence. Using
             commodity price data, we estimate the subsidy incidence
             accruing to corn farmers, ethanol producers, gasoline
             blenders, and gasoline consumers around the time of
             expiration. Our empirical approach contributes
             methodologically to the event studies literature by
             analyzing futures contract prices (as opposed to spot
             prices) when possible. Ultimately, we find compelling
             evidence that, at the date of VEETC expiration, ethanol
             producers captured about 25¢ of the 45¢ subsidy per gallon
             of ethanol blended. We find suggestive, albeit inconclusive,
             evidence that a portion of this benefit (about 5¢ per
             gallon) was passed further upstream from ethanol producers
             to corn farmers. Most of the remainder seems most likely to
             have been captured by the blenders themselves. On the
             petroleum side, we find no evidence that oil refiners
             captured any part of the subsidy. We also find no evidence
             that the subsidy was passed downstream to gasoline consumers
             in the form of lower gasoline prices.},
   Doi = {10.1016/j.jpubeco.2018.03.005},
   Key = {fds336240}
}

@article{fds332833,
   Author = {Iyer, G and Calvin, K and Clarke, L and Edmonds, J and Hultman, N and Hartin, C and McJeon, H and Aldy, J and Pizer, W},
   Title = {Implications of sustainable development considerations for
             comparability across nationally determined
             contributions},
   Journal = {Nature Climate Change},
   Volume = {8},
   Number = {2},
   Pages = {124-129},
   Publisher = {Springer Nature},
   Year = {2018},
   Month = {February},
   url = {http://dx.doi.org/10.1038/s41558-017-0039-z},
   Abstract = {© 2018 The Author (s), under exclusive licence to Macmillan
             Publishers Limited, part of Springer Nature. An important
             component of the Paris Agreement is the assessment of
             comparability across nationally determined contributions
             (NDCs). Indeed, game-theory literature on international
             environmental agreements highlights the need for comparable
             emission-mitigation efforts by countries to avoid
             free-riding 1 . At the same time, there are well-recognized
             links between mitigation and other national priorities,
             including but not limited to the 17 United Nations
             Sustainable Development Goals (SDGs) 2-6, which raises the
             question of how such links might influence comparability
             assessments. Here, using a global integrated assessment
             model 7, we demonstrate that geographical distributions of
             the influence of meeting the domestic mitigation component
             of the NDCs on a subset of the broader SDGs may not align
             with distributions of effort across NDCs obtained from
             conventional emissions-based or cost-based comparability
             metrics 8-11 . This implies that comparability assessments
             would be altered if interactions between mitigation and
             other SDGs were accounted for. Furthermore, we demonstrate
             that the extent to which these distributions differ depends
             on the degree to which mitigation activities directly affect
             broader SDGs domestically and indirectly affect
             international goals, and whether these effects are
             synergistic or antagonistic. Our analysis provides a
             foundation for assessing how comparability across NDCs could
             be better understood in the larger context of
             sustainability.},
   Doi = {10.1038/s41558-017-0039-z},
   Key = {fds332833}
}

@misc{fds336241,
   Author = {Pizer, WA and Zhang, X},
   Title = {China's New National Carbon Market},
   Journal = {Aea Papers and Proceedings},
   Volume = {108},
   Pages = {463-67},
   Publisher = {American Economic Association},
   Year = {2018},
   url = {http://dx.doi.org/10.1257/pandp.20181029},
   Abstract = {<jats:p>On December 19, 2017, China announced the official
             start of its national emissions trading system (ETS)
             construction program. When fully implemented, this program
             will more than double the volume of worldwide carbon dioxide
             emissions covered by either tax or tradable permit policy.
             Many of program's design features reflect those of China's
             pilot programs but differ from those of most emissions
             trading programs in the United States and Europe. This paper
             explains the context and design of China's new carbon
             market, discusses implications and possible modifications,
             and suggests topics for further research.</jats:p>},
   Doi = {10.1257/pandp.20181029},
   Key = {fds336241}
}


%% Quaedvlieg, Rogier   
@article{fds338103,
   Author = {Bollerslev, T and Patton, AJ and Quaedvlieg, R},
   Title = {Modeling and forecasting (un)reliable realized covariances
             for more reliable financial decisions},
   Journal = {Journal of Econometrics},
   Volume = {207},
   Number = {1},
   Pages = {71-91},
   Publisher = {Elsevier BV},
   Year = {2018},
   Month = {November},
   url = {http://dx.doi.org/10.1016/j.jeconom.2018.05.004},
   Abstract = {© 2018 Elsevier B.V. We propose a new framework for
             modeling and forecasting common financial risks based on
             (un)reliable realized covariance measures constructed from
             high-frequency intraday data. Our new approach explicitly
             incorporates the effect of measurement errors and
             time-varying attenuation biases into the covariance
             forecasts, by allowing the ex-ante predictions to respond
             more (less) aggressively to changes in the ex-post realized
             covariance measures when they are more (less) reliable.
             Applying the new procedures in the construction of minimum
             variance and minimum tracking error portfolios results in
             reduced turnover and statistically superior positions
             compared to existing procedures. Translating these
             statistical improvements into economic gains, we find that
             under empirically realistic assumptions a risk-averse
             investor would be willing to pay up to 170 basis points per
             year to shift to using the new class of forecasting
             models.},
   Doi = {10.1016/j.jeconom.2018.05.004},
   Key = {fds338103}
}

@article{fds338102,
   Author = {Bollerslev, T and Patton, AJ and Quaedvlieg, R},
   Title = {Multivariate Leverage Effects and Realized Semicovariance
             GARCH Models},
   Year = {2018},
   Month = {April},
   Key = {fds338102}
}

@article{fds339890,
   Author = {Bollerslev, T and Patton, AJ and Quaedvlieg, R},
   Title = {Modeling and forecasting (un)reliable realized covariances
             for more reliable financial decisions},
   Volume = {207},
   Number = {1},
   Pages = {71-91},
   Year = {2018},
   Abstract = {We propose a new framework for modeling and forecasting
             common financial risks based on (un)reliable realized
             covariance measures constructed from high-frequency intraday
             data. Our new approach explicitly incorporates the effect of
             measurement errors and time-varying attenuation biases into
             the covariance forecasts, by allowing the ex-ante
             predictions to respond more (less) aggressively to changes
             in the ex-post realized covariance measures when they are
             more (less) reliable. Applying the new procedures in the
             construction of minimum variance and minimum tracking error
             portfolios results in reduced turnover and statistically
             superior positions compared to existing procedures.
             Translating these statistical improvements into economic
             gains, we find that under empirically realistic assumptions
             a risk-averse investor would be willing to pay up to 170
             basis points per year to shift to using the new class of
             forecasting models.},
   Key = {fds339890}
}


%% Rampini, Adriano A.   
@article{fds303018,
   Author = {Rampini, AA and Viswanathan, S},
   Title = {Financing Insurance},
   Year = {2019},
   Month = {May},
   Abstract = {Households’ insurance against shocks to income, health and
             other non-discretionary expenditures, and asset values (that
             is, household risk management) is limited, especially for
             poor households. We argue that a trade-off between
             intertemporal financing needs and insurance across states
             explains this basic insurance pattern. In a model with
             limited enforcement, we show that household risk management
             is increasing in household net worth and income, incomplete,
             and precautionary. These results hold in economies with
             income risk, durable goods and collateral constraints, and
             durable goods price risk, under quite general conditions
             and, remarkably, risk aversion is sufficient and prudence is
             not required.},
   Key = {fds303018}
}

@article{fds303012,
   Author = {Rampini, AA and Viswanathan, S and Vuillemey, G},
   Title = {Risk Management in Financial Institutions},
   Year = {2019},
   Month = {March},
   Abstract = {We study risk management in financial institutions using
             data on hedging of interest rate risk by U.S. banks and bank
             holding companies. Theory predicts that more financially
             constrained institutions hedge less and that institutions
             whose net worth declines due to adverse shocks reduce
             hedging. We find strong evidence consistent with the theory
             in both the cross-section of institutions and within
             institutions over time. We use shocks to institutions’ net
             worth resulting from loan losses due to drops in house
             prices for identification. Institutions which sustain such
             losses reduce hedging substantially relative to otherwise
             similar institutions. We find no evidence that risk
             shifting, changes in interest rate risk exposures, or
             regulatory capital explain hedging behavior.},
   Key = {fds303012}
}

@article{fds303011,
   Author = {Rampini, AA},
   Title = {Financing durable assets},
   Journal = {American Economic Review},
   Volume = {109},
   Number = {2},
   Pages = {664-701},
   Publisher = {American Economic Association},
   Year = {2019},
   Month = {February},
   url = {http://dx.doi.org/10.1257/aer.20170995},
   Abstract = {© 2019 American Economic Association. All rights reserved.
             This paper studies how the durability of assets affects
             financing. We show that more durable assets require larger
             down payments making them harder to finance, because
             durability affects the price of assets and hence the overall
             financing need more than their collateral value. Durability
             affects technology adoption, the choice between new and used
             capital, and the rent versus buy decision. Constrained firms
             invest in less durable assets and buy used assets. More
             durable assets are more likely to be rented. Economies with
             weak legal enforcement invest more in less durable,
             otherwise dominated assets and are net importers of used
             assets.},
   Doi = {10.1257/aer.20170995},
   Key = {fds303011}
}

@article{fds303020,
   Author = {Rampini, AA and Viswanathan, S},
   Title = {Financial Intermediary Capital},
   Pages = {413-455},
   Year = {2019},
   Month = {January},
   url = {http://dx.doi.org/10.1093/restud/rdy020},
   Abstract = {© The Author(s) 2018. We propose a dynamic theory of
             financial intermediaries that are better able to
             collateralize claims than households, that is, have a
             collateralization advantage. Intermediaries require capital
             as they have to finance the additional amount that they can
             lend out of their own net worth. The net worth of financial
             intermediaries and the corporate sector are both state
             variables affecting the spread between intermediated and
             direct finance and the dynamics of real economic activity,
             such as investment, and financing. The accumulation of net
             worth of intermediaries is slow relative to that of the
             corporate sector. The model is consistent with key stylized
             facts about macroeconomic downturns associated with a credit
             crunch, namely, their severity, their protractedness, and
             the fact that the severity of the credit crunch itself
             affects the severity and persistence of downturns. The model
             captures the tentative and halting nature of recoveries from
             crises.},
   Doi = {10.1093/restud/rdy020},
   Key = {fds303020}
}


%% Rangel, Marcos A.   
@article{fds340818,
   Author = {Rangel, MA and Shi, Y},
   Title = {Early patterns of skill acquisition and immigrants'
             specialization in STEM careers.},
   Journal = {Proceedings of the National Academy of Sciences of the
             United States of America},
   Volume = {116},
   Number = {2},
   Pages = {484-489},
   Year = {2019},
   Month = {January},
   url = {http://dx.doi.org/10.1073/pnas.1812041116},
   Abstract = {We provide empirical evidence of immigrants' specialization
             in skill acquisition well before entering the US labor
             market. Nationally representative datasets enable studying
             the academic trajectories of immigrant children, with a
             focus on high-school course-taking patterns and college
             major choice. Immigrant children accumulate skills in ways
             that reinforce comparative advantages in nonlanguage
             intensive skills such as mathematics and science, and this
             contributes to their growing numbers in science, technology,
             engineering, and math (STEM) careers. These results are
             compatible with well-established models of skill formation
             that emphasize dynamic complementarities of investments in
             learning.},
   Doi = {10.1073/pnas.1812041116},
   Key = {fds340818}
}


%% Roberts, James W.   
@article{fds325455,
   Author = {Eliason, PJ and Grieco, PLE and McDevitt, RC and Roberts,
             JW},
   Title = {Strategic Patient Discharge: the Case of Long-Term Care
             Hospitals.},
   Pages = {3232-3265},
   Year = {2018},
   Month = {November},
   Abstract = {Medicare's prospective payment system for long-term
             acute-care hospitals (LTCHs) provides modest reimbursements
             at the beginning of a patient's stay before jumping
             discontinuously to a large lump-sum payment after a
             prespecified number of days. We show that LTCHs respond to
             the financial incentives of this system by
             disproportionately discharging patients after they cross the
             large-payment threshold. We find this occurs more often at
             for-profit facilities, facilities acquired by leading LTCH
             chains, and facilities colocated with other hospitals. Using
             a dynamic structural model, we evaluate counterfactual
             payment policies that would provide substantial savings for
             Medicare.},
   Key = {fds325455}
}

@article{fds343587,
   Author = {Li, Y and Mazur, L and Park, Y and Roberts, JW and Sweeting, A and Zhang,
             J},
   Title = {Endogenous and Selective Service Choices after Airline
             Mergers},
   Year = {2018},
   Month = {January},
   Key = {fds343587}
}


%% Sadowski, Philipp   
@article{fds340875,
   Author = {Dillenberger, D and Sadowski, P},
   Title = {Stable behavior and generalized partition},
   Journal = {Economic Theory},
   Pages = {1-18},
   Year = {2018},
   Month = {April},
   url = {http://dx.doi.org/10.1007/s00199-018-1122-z},
   Abstract = {© 2018 Springer-Verlag GmbH Germany, part of Springer
             Nature Behavior is stable if the ex ante ranking of two acts
             that differ only on some event I coincides with their ex
             post ranking upon learning I. We identify the largest class
             of information structures for which the behavior of a
             Bayesian expected utility maximizer is stable. We call them
             generalized partitions and characterize the learning
             processes they can accommodate. Often, the information
             structure is not explicitly part of the primitives in the
             model, and so becomes a subjective parameter. We propose a
             way to identify how the individual plans to choose
             contingent on learning an event, and establish that for a
             Bayesian expected utility maximizer, stable
             behavior—formulated in terms of this indirectly observed
             contingent ranking—is a tight characterization of
             subjective learning via a generalized partition.},
   Doi = {10.1007/s00199-018-1122-z},
   Key = {fds340875}
}


%% Sloan, Frank A.   
@article{fds342132,
   Author = {Akushevich, I and Yashkin, A and Kravchenko, J and Fang, F and Arbeev,
             K and Sloan, F and Yashin, AI},
   Title = {A forecasting model of disease prevalence based on the
             McKendrick-von Foerster equation.},
   Journal = {Math Biosci},
   Volume = {311},
   Pages = {31-38},
   Year = {2019},
   Month = {May},
   url = {http://dx.doi.org/10.1016/j.mbs.2018.12.017},
   Abstract = {A new model for disease prevalence based on the analytical
             solutions of McKendric-von Foerster's partial differential
             equations is developed. Derivation of the model and methods
             to cross check obtained results are explicitly demonstrated.
             Obtained equations describe the time evolution of the
             healthy and unhealthy age-structured sub-populations and age
             patterns of disease prevalence. The projection of disease
             prevalence into the future requires estimates of time trends
             of age-specific disease incidence, relative survival
             functions, and prevalence at the initial age and year
             available in the data. The computational scheme for
             parameter estimations using Medicare data, analytical
             properties of the model, application for diabetes
             prevalence, and relationship with partitioning models are
             described and discussed. The model allows natural
             generalization for the case of several diseases as well as
             for modeling time trends in cause-specific mortality
             rates.},
   Doi = {10.1016/j.mbs.2018.12.017},
   Key = {fds342132}
}

@article{fds339571,
   Author = {Wang, Y and Sloan, FA},
   Title = {Present bias and health},
   Journal = {Journal of Risk and Uncertainty},
   Volume = {57},
   Number = {2},
   Pages = {177-198},
   Publisher = {Springer Nature America, Inc},
   Year = {2018},
   Month = {October},
   url = {http://dx.doi.org/10.1007/s11166-018-9289-z},
   Abstract = {© 2018, Springer Science+Business Media, LLC, part of
             Springer Nature. This study uses a dynamic discrete choice
             model to examine the degree of present bias and naivete
             about present bias in individuals’ health care decisions.
             Clinical guidelines exist for several common chronic
             diseases. Although the empirical evidence for some
             guidelines is strong, many individuals with these diseases
             do not follow the guidelines. Using persons with diabetes as
             a case study, we find evidence of substantial present bias
             and naivete. Counterfactual simulations indicate the
             importance of present bias and naivete in explaining low
             adherence rates to health care guidelines.},
   Doi = {10.1007/s11166-018-9289-z},
   Key = {fds339571}
}

@article{fds333659,
   Author = {Akushevich, I and Yashkin, AP and Kravchenko, J and Fang, F and Arbeev,
             K and Sloan, F and Yashin, AI},
   Title = {Identifying the causes of the changes in the prevalence
             patterns of diabetes in older U.S. adults: A new trend
             partitioning approach.},
   Journal = {J Diabetes Complications},
   Volume = {32},
   Number = {4},
   Pages = {362-367},
   Year = {2018},
   Month = {April},
   url = {http://dx.doi.org/10.1016/j.jdiacomp.2017.12.014},
   Abstract = {AIMS: To identify how efforts to control the diabetes
             epidemic and the resulting changes in diabetes mellitus,
             type II (T2D) incidence and survival have affected the
             time-trend of T2D prevalence. METHODS: A newly developed
             method of trend decomposition was applied to a 5% sample of
             Medicare administrative claims filed between 1991 and 2012.
             RESULTS: Age-adjusted prevalence of T2D for adults age 65+
             increased at an average annual percentage change of 2.31%
             between 1992 and 2012. Primary contributors to this trend
             were (in order of magnitude): improved survival at all ages,
             increased prevalence of T2D prior to age of Medicare
             eligibility, decreased incidence of T2D after age of
             Medicare eligibility. CONCLUSIONS: Health services supported
             by the Medicare system, coupled with improvements in medical
             technology and T2D awareness efforts provide effective care
             for individuals age 65 and older. However, policy maker
             attention should be shifted to the prevention of T2D in
             younger age groups to control the increase in prevalence
             observed prior to Medicare eligibility.},
   Doi = {10.1016/j.jdiacomp.2017.12.014},
   Key = {fds333659}
}

@article{fds333201,
   Author = {Yashkin, AP and Sloan, F},
   Title = {Adherence to Guidelines for Screening and Medication Use:
             Mortality and Onset of Major Macrovascular Complications in
             Elderly Persons With Diabetes Mellitus.},
   Journal = {Journal of Aging and Health},
   Volume = {30},
   Number = {4},
   Pages = {503-520},
   Publisher = {SAGE Publications},
   Year = {2018},
   Month = {April},
   url = {http://dx.doi.org/10.1177/0898264316684270},
   Abstract = {OBJECTIVE:The objective of this study is to investigate
             relationships between adherence to recommended screening and
             medication use and severe macrovascular complications and
             all-cause mortality among persons aged above 68 years with
             diabetes mellitus (DM). METHOD:Data came from a 5% Medicare
             claims sample of beneficiaries initially diagnosed with DM
             during 2006-2008; follow-up was up to 7 years.
             RESULTS:Adherence to screening guidelines led to reduced
             mortality-hazard ratio (HR) = 0.57, 95% confidence interval
             [CI] = [0.56, 0.58]; congestive heart failure [CHF], HR =
             0.89, CI = [0.87, 0.91]; acute myocardial infarction [AMI],
             HR = 0.90, CI = [0.85, 0.95]; and stroke/transient ischemic
             attack [Stroke/TIA], HR = 0.92, CI = [0.87, 0.97]-during
             follow-up. Recommended medication use led to lower
             mortality: HR = 0.72, CI = [0.70, 0.73]; CHF, HR = 0.67, CI
             = [0.66, 0.69]; AMI, HR = 0.68, CI = [0.65, 0.71]; and
             Stroke/TIA, HR = 0.79, CI = [0.76, 0.83]. DISCUSSION:Elderly
             persons newly diagnosed with diabetes who adhered to
             recommended care experienced reduced risk of mortality and
             severe macrovascular complications.},
   Doi = {10.1177/0898264316684270},
   Key = {fds333201}
}

@article{fds332094,
   Author = {Yashkin, AP and Kravchenko, J and Yashin, AI and Sloan,
             F},
   Title = {Mortality and Macrovascular Risk in Elderly With
             Hypertension and Diabetes: Effect of Intensive Drug
             Therapy.},
   Journal = {Am J Hypertens},
   Volume = {31},
   Number = {2},
   Pages = {220-227},
   Year = {2018},
   Month = {January},
   url = {http://dx.doi.org/10.1093/ajh/hpx151},
   Abstract = {BACKGROUND: This study identifies the effect of intensive
             drug therapy (IDT) in individuals age 65+ with diabetes
             (type 2 diabetes mellitus (T2D)) and hypertension on
             all-cause death, congestive heart failure (CHF),
             hospitalization for myocardial infarction (MI), and stroke
             or transient ischemic attack (TIA). METHODS: Individuals
             from the Medicare 5% dataset with hypertension and T2D
             undergoing IDT for these conditions were propensity score
             matched to a nonintensive drug-therapy group. Hazard ratios
             (HRs) were obtained using the Cox proportional hazard model.
             RESULTS: IDT was associated with increased risk of CHF (HR
             2.32; 95% confidence interval (CI) 2.32-2.38), MI (HR 4.27;
             95% CI 4.05-4.52), and stroke or TIA (HR 1.80; 95% CI
             1.70-1.89) but decreased risk of death (HR 0.95; 95% CI
             0.93-0.97). Risk for CHF (HR 0.73; 95% CI 0.71-0.73), MI (HR
             0.64; 95% CI 0.62-0.67), stroke or TIA (HR 0.82; 95% CI
             0.78-0.86), and death (HR 0.29; 95% CI 0.28-0.29) was
             decreased by adherence to diabetes management guidelines.
             CONCLUSIONS: Use of IDT in a high-risk population delays
             death but not severe macrovascular outcomes. Protective
             effects of IDT in high-risk patients likely outweigh
             polypharmacy-related health concerns.},
   Doi = {10.1093/ajh/hpx151},
   Key = {fds332094}
}

@article{fds335434,
   Author = {Gifford, EJ and Eldred, L and Sloan, F},
   Title = {Expanding the Role of Drug Treatment Courts to Prevent Child
             Maltreatment},
   Journal = {Cw360},
   Volume = {2018},
   Number = {Spring},
   Publisher = {School of Social Work University of Minnesota},
   Editor = {LaLiberte, T and Barry, K and Walthour, K},
   Year = {2018},
   Key = {fds335434}
}


%% Smith, Martin D.   
@article{fds342425,
   Author = {Smith, MD},
   Title = {Subsidies, efficiency, and fairness in fisheries
             policy.},
   Journal = {Science (New York, N.Y.)},
   Volume = {364},
   Number = {6435},
   Pages = {34-35},
   Year = {2019},
   Month = {April},
   url = {http://dx.doi.org/10.1126/science.aaw4087},
   Doi = {10.1126/science.aaw4087},
   Key = {fds342425}
}

@article{fds339523,
   Author = {Ferraro, PJ and Sanchirico, JN and Smith, MD},
   Title = {Causal inference in coupled human and natural
             systems.},
   Journal = {Proceedings of the National Academy of Sciences of the
             United States of America},
   Volume = {116},
   Number = {12},
   Pages = {5311-5318},
   Year = {2019},
   Month = {March},
   url = {http://dx.doi.org/10.1073/pnas.1805563115},
   Abstract = {Coupled human and natural systems (CHANS) are complex,
             dynamic, interconnected systems with feedback across social
             and environmental dimensions. This feedback leads to
             formidable challenges for causal inference. Two significant
             challenges involve assumptions about excludability and the
             absence of interference. These two assumptions have been
             largely unexplored in the CHANS literature, but when either
             is violated, causal inferences from observable data are
             difficult to interpret. To explore their plausibility,
             structural knowledge of the system is requisite, as is an
             explicit recognition that most causal variables in CHANS
             affect a coupled pairing of environmental and human
             elements. In a large CHANS literature that evaluates marine
             protected areas, nearly 200 studies attempt to make causal
             claims, but few address the excludability assumption. To
             examine the relevance of interference in CHANS, we develop a
             stylized simulation of a marine CHANS with shocks that can
             represent policy interventions, ecological disturbances, and
             technological disasters. Human and capital mobility in CHANS
             is both a cause of interference, which biases inferences
             about causal effects, and a moderator of the causal effects
             themselves. No perfect solutions exist for satisfying
             excludability and interference assumptions in CHANS. To
             elucidate causal relationships in CHANS, multiple approaches
             will be needed for a given causal question, with the aim of
             identifying sources of bias in each approach and then
             triangulating on credible inferences. Within CHANS research,
             and sustainability science more generally, the path to
             accumulating an evidence base on causal relationships
             requires skills and knowledge from many disciplines and
             effective academic-practitioner collaborations.},
   Doi = {10.1073/pnas.1805563115},
   Key = {fds339523}
}

@article{fds341926,
   Author = {Birkenbach, AM and Smith, MD and Stefanski, S},
   Title = {Feature-Taking Stock of Catch Shares: Lessons from the Past
             and Directions for the Future},
   Journal = {Review of Environmental Economics and Policy},
   Volume = {13},
   Number = {1},
   Pages = {130-139},
   Year = {2019},
   Month = {February},
   url = {http://dx.doi.org/10.1093/reep/rey016},
   Abstract = {© The Author(s) 2019. Published by Oxford University Press
             on behalf of the Association of Environmental and Resource
             Economists. All rights reserved. For permissions, please
             email: journals.permissions@oup.com. With the widespread
             implementation of catch shares (i.e., rights-based fisheries
             management) at the end of the twentieth century, economists
             have begun to examine empirical evidence about their
             performance. Yet despite documented positive outcomes and
             predicted gains from wider adoption of this approach, catch
             shares face persistent political opposition and criticism in
             the noneconomics literature. The debate surrounding catch
             shares focuses on equity, industry consolidation, nonlocal
             ownership of quotas, employment, and other impacts on
             fishing communities, but the evidence on both sides has been
             largely anecdotal. To inform this debate, it is important
             for economists and other researchers to produce rigorous
             analyses that quantify the effects of catch shares on
             employment, the distribution of economic value in the
             harvest and processing sectors, and other indicators of
             community well-being. We assess catch shares to identify
             research needs and guide policymakers. Using examples from
             the experiences of the United States and Argentina with
             rights-based fisheries, we demonstrate that a key challenge
             for researchers and policymakers is accounting for multiple
             species, globalization of seafood markets, and climate
             change. We urge policymakers to consider these forces and
             their impacts, along with available empirical evidence, when
             evaluating fisheries management options that balance
             efficiency and equity goals.},
   Doi = {10.1093/reep/rey016},
   Key = {fds341926}
}

@article{fds340960,
   Author = {Mullin, M and Smith, MD and McNamara, DE},
   Title = {Paying to save the beach: effects of local finance decisions
             on coastal management},
   Journal = {Climatic Change},
   Volume = {152},
   Number = {2},
   Pages = {275-289},
   Year = {2019},
   Month = {January},
   url = {http://dx.doi.org/10.1007/s10584-018-2191-5},
   Abstract = {© 2018, Springer Science+Business Media B.V., part of
             Springer Nature. As sea level rises and storm frequency and
             severity increase, communities worldwide are investing in
             coastline management projects to maintain beach widths and
             dunes that support recreational amenities and mitigate storm
             risks. These projects are costly, and differences in
             property owners’ returns from maintaining wide beaches
             will influence community-level support for investment in
             shoreline defense. One way to account for these differences
             is by funding the project through a tax instrument that
             imposes the heaviest cost on residents who benefit most from
             beach nourishment. Some communities along the US east coast
             have adopted this approach. We use an agent-based model to
             evaluate how the imposition of project costs affects
             coastline management over the long-term. Charging higher tax
             rates on oceanfront properties reduces desired beach width
             among those owners but increases desired width for owners of
             inland properties. The aggregate impact on beach width
             depends on coastline shape and development patterns that
             determine the balance between these two groups,
             heterogeneity of beach width preferences and climate change
             beliefs, and levels of participation in local politics.
             Overall, requiring property owners who benefit most from
             beach nourishment to bear the highest cost results in wider
             beaches. The result suggests that delineating tax rates to
             account for unequal benefits of local public goods across
             taxpayers could facilitate local investment in climate
             change adaptation.},
   Doi = {10.1007/s10584-018-2191-5},
   Key = {fds340960}
}

@article{fds339522,
   Author = {Asche, F and Garlock, TM and Anderson, JL and Bush, SR and Smith, MD and Anderson, CM and Chu, J and Garrett, KA and Lem, A and Lorenzen, K and Oglend, A and Tveteras, S and Vannuccini, S},
   Title = {Three pillars of sustainability in fisheries.},
   Journal = {Proceedings of the National Academy of Sciences of the
             United States of America},
   Volume = {115},
   Number = {44},
   Pages = {11221-11225},
   Year = {2018},
   Month = {October},
   url = {http://dx.doi.org/10.1073/pnas.1807677115},
   Abstract = {Sustainability of global fisheries is a growing concern. The
             United Nations has identified three pillars of
             sustainability: economic development, social development,
             and environmental protection. The fisheries literature
             suggests that there are two key trade-offs among these
             pillars of sustainability. First, poor ecological health of
             a fishery reduces economic profits for fishers, and second,
             economic profitability of individual fishers undermines the
             social objectives of fishing communities. Although recent
             research has shown that management can reconcile ecological
             and economic objectives, there are lingering concerns about
             achieving positive social outcomes. We examined trade-offs
             among the three pillars of sustainability by analyzing the
             Fishery Performance Indicators, a unique dataset that scores
             121 distinct fishery systems worldwide on 68 metrics
             categorized by social, economic, or ecological outcomes. For
             each of the 121 fishery systems, we averaged the outcome
             measures to create overall scores for economic, ecological,
             and social performance. We analyzed the scores and found
             that they were positively associated in the full sample. We
             divided the data into subsamples that correspond to
             fisheries management systems with three categories of
             access-open access, access rights, and harvest rights-and
             performed a similar analysis. Our results show that
             economic, social, and ecological objectives are at worst
             independent and are mutually reinforcing in both types of
             managed fisheries. The implication is that rights-based
             management systems should not be rejected on the basis of
             potentially negative social outcomes; instead, social
             considerations should be addressed in the design of these
             systems.},
   Doi = {10.1073/pnas.1807677115},
   Key = {fds339522}
}

@article{fds335997,
   Author = {Abbott, JK and Sanchirico, JN and Smith, MD},
   Title = {Common property resources and the dynamics of
             overexploitation: The case of the north pacific fur seal—a
             42-year legacy},
   Journal = {Marine Resource Economics},
   Volume = {33},
   Number = {3},
   Pages = {209-212},
   Publisher = {University of Chicago Press},
   Year = {2018},
   Month = {July},
   url = {http://dx.doi.org/10.1086/698020},
   Doi = {10.1086/698020},
   Key = {fds335997}
}

@article{fds335998,
   Author = {Asche, F and Smith, MD},
   Title = {Viewpoint: Induced Innovation in Fisheries and
             Aquaculture},
   Journal = {Food Policy},
   Volume = {76},
   Pages = {1-7},
   Publisher = {Elsevier BV},
   Year = {2018},
   Month = {April},
   url = {http://dx.doi.org/10.1016/j.foodpol.2018.02.002},
   Abstract = {© 2018 Elsevier Ltd Some classical economists, most notably
             Malthus, predicted that scarcity would undermine long-term
             human well-being. John Stuart Mill, in contrast, predicted
             that the threat of scarcity creates incentives for
             innovation that help to avoid some of the worst outcomes.
             Popular claims of marine ecologists often apply the
             Malthusian narrative to supplies of seafood, yet global
             supplies have continued to grow. We examine the modern
             seafood industry and evaluate Mill's claims about
             innovation. We argue that the mechanisms that Mill
             discusses–innovation in response to and in anticipation of
             scarcity–account for much of what we see. Scarcities
             induce technological, policy, and market innovations that
             enable seafood supplies to grow, and these innovations can
             build on each other. The challenge for policy makers is to
             avoid knee-jerk responses to Malthusian narratives and craft
             policy responses that encourage innovation while recognizing
             physical limits of ocean resources.},
   Doi = {10.1016/j.foodpol.2018.02.002},
   Key = {fds335998}
}

@article{fds333239,
   Author = {Gopalakrishnan, S and Landry, CE and Smith, MD},
   Title = {Climate change adaptation in coastal environments: Modeling
             challenges for resource and environmental
             economists},
   Journal = {Review of Environmental Economics and Policy},
   Volume = {12},
   Number = {1},
   Pages = {48-68},
   Publisher = {Oxford University Press (OUP)},
   Year = {2018},
   Month = {February},
   url = {http://dx.doi.org/10.1093/reep/rex020},
   Doi = {10.1093/reep/rex020},
   Key = {fds333239}
}


%% Suarez Serrato, Juan Carlos   
@article{fds343256,
   Author = {Chen, Z and Liu, Z and Suárez Serrato and JC and Yi Xu,
             D},
   Title = {Notching R&D Investment with Corporate Income Tax Cuts in
             China},
   Year = {2018},
   Month = {June},
   Key = {fds343256}
}


%% Tauchen, George E.   
@article{fds343332,
   Author = {Li, J and Todorov, V and Tauchen, G},
   Title = {Jump factor models in large cross-sections},
   Journal = {Quantitative Economics},
   Volume = {10},
   Number = {2},
   Pages = {419-456},
   Year = {2019},
   Month = {May},
   url = {http://dx.doi.org/10.3982/QE1060},
   Abstract = {Copyright © 2019 The Authors. We develop tests for deciding
             whether a large cross-section of asset prices obey an exact
             factor structure at the times of factor jumps. Such jump
             dependence is implied by standard linear factor models. Our
             inference is based on a panel of asset returns with
             asymptotically increasing cross-sectional dimension and
             sampling frequency, and essentially no restriction on the
             relative magnitude of these two dimensions of the panel. The
             test is formed from the high-frequency returns at the times
             when the risk factors are detected to have a jump. The test
             statistic is a cross-sectional average of a measure of
             discrepancy in the estimated jump factor loadings of the
             assets at consecutive jump times. Under the null hypothesis,
             the discrepancy in the factor loadings is due to a
             measurement error, which shrinks with the increase of the
             sampling frequency, while under an alternative of a noisy
             jump factor model this discrepancy contains also
             nonvanishing firm-specific shocks. The limit behavior of the
             test under the null hypothesis is nonstandard and reflects
             the strong-dependence in the cross-section of returns as
             well as their heteroskedasticity which is left unspecified.
             We further develop estimators for assessing the magnitude of
             firm-specific risk in asset prices at the factor jump
             events. Empirical application to S&P 100 stocks provides
             evidence for exact one-factor structure at times of big
             market-wide jump events.},
   Doi = {10.3982/QE1060},
   Key = {fds343332}
}

@article{fds329309,
   Author = {Li, J and Todorov, V and Tauchen, G and Lin, H},
   Title = {Rank Tests at Jump Events},
   Journal = {Journal of Business & Economic Statistics},
   Volume = {37},
   Number = {2},
   Pages = {312-321},
   Publisher = {Informa UK Limited},
   Year = {2019},
   Month = {April},
   url = {http://dx.doi.org/10.1080/07350015.2017.1328362},
   Abstract = {© 2019, © 2019 American Statistical Association. We
             propose a test for the rank of a cross-section of processes
             at a set of jump events. The jump events are either specific
             known times or are random and associated with jumps of some
             process. The test is formed from discretely sampled data on
             a fixed time interval with asymptotically shrinking mesh. In
             the first step, we form nonparametric estimates of the jump
             events via thresholding techniques. We then compute the
             eigenvalues of the outer product of the cross-section of
             increments at the identified jump events. The test for rank
             r is based on the asymptotic behavior of the sum of the
             squared eigenvalues excluding the largest r. A simple
             resampling method is proposed for feasible testing. The test
             is applied to financial data spanning the period 2007–2015
             at the times of stock market jumps. We find support for a
             one-factor model of both industry portfolio and Dow 30 stock
             returns at market jump times. This stands in contrast with
             earlier evidence for higher-dimensional factor structure of
             stock returns during “normal” (nonjump) times. We
             identify the latent factor driving the stocks and portfolios
             as the size of the market jump.},
   Doi = {10.1080/07350015.2017.1328362},
   Key = {fds329309}
}

@article{fds336355,
   Author = {Ronald Gallant and A and Tauchen, G},
   Title = {Exact Bayesian moment based inference for the distribution
             of the small-time movements of an Itô semimartingale},
   Journal = {Journal of Econometrics},
   Volume = {205},
   Number = {1},
   Pages = {140-155},
   Publisher = {Elsevier BV},
   Year = {2018},
   Month = {July},
   url = {http://dx.doi.org/10.1016/j.jeconom.2018.03.008},
   Abstract = {© 2018 Elsevier B.V. We modify the Gallant and Tauchen
             (1996) efficient method of moments (EMM) method to perform
             exact Bayesian inference, where exact means no reliance on
             asymptotic approximations. We use this modification to
             evaluate the empirical plausibility of recent predictions
             from high frequency financial theory regarding the
             small-time movements of an Itô semimartingale. The theory
             indicates that the probability distribution of the small
             moves should be locally stable around the origin. It makes
             no predictions regarding large rare jumps, which get
             filtered out. Our exact Bayesian procedure imposes support
             conditions on parameters as implied by this theory. The
             empirical application uses S&P Index options extending over
             a wide range of moneyness, including deep out of the money
             puts. The evidence is consistent with a locally stable
             distribution valid over most of the support of the observed
             data while mildly failing in the extreme tails, about which
             the theory makes no prediction. We undertake diagnostic
             checks on all aspects of the procedure. In particular, we
             evaluate the distributional assumptions regarding a
             semi-pivotal statistic, and we test by Monte Carlo that the
             posterior distribution is properly centered with short
             credibility intervals. Taken together, our results suggest a
             more important role than previously thought for pure
             jump-like models with diminished, if not absent, diffusive
             component.},
   Doi = {10.1016/j.jeconom.2018.03.008},
   Key = {fds336355}
}

@article{fds320629,
   Author = {Davies, R and Tauchen, G},
   Title = {Data-driven jump detection thresholds for application in
             jump regressions},
   Journal = {Econometrics},
   Volume = {6},
   Number = {2},
   Pages = {16-16},
   Publisher = {MDPI AG},
   Year = {2018},
   Month = {June},
   url = {http://dx.doi.org/10.3390/econometrics6020016},
   Abstract = {© 2018 by the authors. Licensee MDPI, Basel, Switzerland.
             This paper develops a method to select the threshold in
             threshold-based jump detection methods. The method is
             motivated by an analysis of threshold-based jump detection
             methods in the context of jump-diffusion models. We show
             that over the range of sampling frequencies a researcher is
             most likely to encounter that the usual in-fill asymptotics
             provide a poor guide for selecting the jump threshold.
             Because of this we develop a sample-based method. Our method
             estimates the number of jumps over a grid of thresholds and
             selects the optimal threshold at what we term the
             ‘take-off’ point in the estimated number of jumps. We
             show that this method consistently estimates the jumps and
             their indices as the sampling interval goes to zero. In
             several Monte Carlo studies we evaluate the performance of
             our method based on its ability to accurately locate jumps
             and its ability to distinguish between true jumps and large
             diffusive moves. In one of these Monte Carlo studies we
             evaluate the performance of our method in a jump regression
             context. Finally, we apply our method in two empirical
             studies. In one we estimate the number of jumps and report
             the jump threshold our method selects for three commonly
             used market indices. In the other empirical application we
             perform a series of jump regressions using our method to
             select the jump threshold.},
   Doi = {10.3390/econometrics6020016},
   Key = {fds320629}
}


%% Thomas, Duncan   
@article{fds336356,
   Author = {Thomas, D and Seeman, T and Potter, A and Hu, P and Crimmins, E and Herningtyas, EH and Sumantri, C and Frankenberg,
             E},
   Title = {HPLC-based Measurement of Glycated Hemoglobin using Dried
             Blood Spots Collected under Adverse Field
             Conditions.},
   Journal = {Biodemography and Social Biology},
   Volume = {64},
   Number = {1},
   Pages = {43-62},
   Year = {2018},
   Month = {January},
   url = {http://dx.doi.org/10.1080/19485565.2018.1451300},
   Abstract = {Glycated hemoglobin (HbA1c) measured using high-performance
             liquid chromatography (HPLC) assays with venous blood and
             dried blood spots (DBS) are compared for 143 paired samples
             collected in Aceh, Indonesia. Relative to gold-standard
             venous-blood values, DBS-based values reported by the HPLC
             are systematically upward biased for HbA1c<8% and the
             fraction diabetic (HbA1c ≥ 6.5%) is overstated almost
             five-fold. Inspection of chromatograms from DBS assays
             indicates the % glycosylated calculated by the HPLC excludes
             part of the hemoglobin A which is misidentified as a
             hemoglobin variant. Taking this into account, unbiased
             DBS-based values are computed using data from the
             machine-generated chromatograms. When the DBS are collected
             in a clinic-like setting, under controlled
             humidity/temperature conditions, the recalculated values are
             almost identical to venous-based values. When DBS are
             collected under field conditions, the recalculated values
             are unbiased, but only about half the HbA1c values are
             measured reliably, calling into question the validity of the
             other half. The results suggest that collection conditions,
             particularly humidity, affect the quality of the DBS-based
             measures. Cross-validating DBS-based HbA1c values with
             venous samples collected under exactly the same
             environmental conditions is a prudent investment in
             population-based studies.},
   Doi = {10.1080/19485565.2018.1451300},
   Key = {fds336356}
}


%% Timmins, Christopher D.   
@article{fds342133,
   Author = {Freeman, R and Liang, W and Song, R and Timmins, C},
   Title = {Willingness to pay for clean air in China},
   Journal = {Journal of Environmental Economics and Management},
   Volume = {94},
   Pages = {188-216},
   Year = {2019},
   Month = {March},
   url = {http://dx.doi.org/10.1016/j.jeem.2019.01.005},
   Abstract = {© 2019 We use a residential sorting model incorporating
             migration disutility to recover the implicit value of clean
             air in China. The model is estimated using China Population
             Census Data along with PM2.5 satellite data. Our study
             provides new evidence on the willingness to pay for air
             quality improvements in developing countries and is the
             first application of an equilibrium sorting model to the
             valuation of non-market amenities in China. We employ two
             instrumental variables based on coal-fired electricity
             generation and wind direction to address the endogeneity of
             local air pollution. Results suggest important differences
             between the residential sorting model and a conventional
             hedonic model, highlighting the role of moving costs and the
             discreteness of the choice set. Our sorting results indicate
             that the economic value of air quality improvement
             associated with a one-unit decline in PM2.5 concentration is
             up to $8.83 billion for all Chinese households in
             2005.},
   Doi = {10.1016/j.jeem.2019.01.005},
   Key = {fds342133}
}

@article{fds341378,
   Author = {Banzhaf, S and Ma, L and Timmins, C},
   Title = {Environmental Justice: the Economics of Race, Place, and
             Pollution.},
   Journal = {The Journal of Economic Perspectives : a Journal of the
             American Economic Association},
   Volume = {33},
   Number = {1},
   Pages = {185-208},
   Year = {2019},
   Month = {January},
   Key = {fds341378}
}

@article{fds340432,
   Author = {Bishop, KC and Timmins, C},
   Title = {Estimating the marginal willingness to pay function without
             instrumental variables},
   Journal = {Journal of Urban Economics},
   Volume = {109},
   Pages = {66-83},
   Publisher = {Elsevier BV},
   Year = {2019},
   Month = {January},
   url = {http://dx.doi.org/10.1016/j.jue.2018.11.006},
   Abstract = {© 2018 Elsevier Inc. The hedonic model of Rosen (1974) has
             become a workhorse for valuing the characteristics of
             differentiated products despite a number of well-documented
             econometric problems, including a source of endogeneity that
             has proven difficult to overcome. Here we outline a simple,
             likelihood-based estimation approach for recovering the
             marginal willingness-to-pay function that avoids this
             endogeneity problem. Using this framework, we find that
             marginal willingness-to-pay to avoid violent crime increases
             by sixteen cents with each additional incident per 100,000
             residents. Accounting for the slope of the marginal
             willingness-to-pay function has significant impacts on
             welfare analyses.},
   Doi = {10.1016/j.jue.2018.11.006},
   Key = {fds340432}
}

@article{fds335440,
   Author = {Bishop, KC and Timmins, C},
   Title = {Using panel data to easily estimate hedonic demand
             functions},
   Journal = {Journal of the Association of Environmental and Resource
             Economists},
   Volume = {5},
   Number = {3},
   Pages = {517-543},
   Publisher = {University of Chicago Press},
   Year = {2018},
   Month = {July},
   url = {http://dx.doi.org/10.1086/696981},
   Abstract = {© 2018 by The Association of Environmental and Resource
             Economists. The hedonics literature has often asserted that
             if one were able to observe the same individual make
             multiple purchase decisions, one could recover rich
             estimates of preference heterogeneity for a given amenity.
             In particular, in the face of a changing price schedule,
             observing each individual twice is sufficient to recover a
             linear demand function separately for each individual, with
             no additional restrictions. Constructing a rich panel data
             set of buyers, we recover the full distribution of demand
             functions for clean air in the Bay Area of California.
             First, we find that estimating the full demand function,
             rather than simply recovering a local estimate of marginal
             willingness to pay, is important. Second, we find evidence
             of considerable heterogeneity, which is important from a
             policy perspective; our data-driven estimates of the welfare
             effects associated with a nonmarginal change in air quality
             differ substantially from those recovered using the existing
             approaches to welfare estimation.},
   Doi = {10.1086/696981},
   Key = {fds335440}
}


%% Viswanathan, S.   
@article{fds341927,
   Author = {Rampini, AA and Viswanathan, S},
   Title = {Financial Intermediary Capital},
   Pages = {413-455},
   Year = {2019},
   Month = {January},
   url = {http://dx.doi.org/10.1093/restud/rdy020},
   Abstract = {© The Author(s) 2018. We propose a dynamic theory of
             financial intermediaries that are better able to
             collateralize claims than households, that is, have a
             collateralization advantage. Intermediaries require capital
             as they have to finance the additional amount that they can
             lend out of their own net worth. The net worth of financial
             intermediaries and the corporate sector are both state
             variables affecting the spread between intermediated and
             direct finance and the dynamics of real economic activity,
             such as investment, and financing. The accumulation of net
             worth of intermediaries is slow relative to that of the
             corporate sector. The model is consistent with key stylized
             facts about macroeconomic downturns associated with a credit
             crunch, namely, their severity, their protractedness, and
             the fact that the severity of the credit crunch itself
             affects the severity and persistence of downturns. The model
             captures the tentative and halting nature of recoveries from
             crises.},
   Doi = {10.1093/restud/rdy020},
   Key = {fds341927}
}


%% Weintraub, E. Roy   
@article{fds342583,
   Author = {Weintraub, ER},
   Title = {Keynes as policy adviser},
   Journal = {History of Political Economy},
   Volume = {51},
   Number = {1},
   Pages = {77-81},
   Publisher = {Duke University Press},
   Year = {2019},
   Month = {February},
   url = {http://dx.doi.org/10.1215/00182702-7289264},
   Doi = {10.1215/00182702-7289264},
   Key = {fds342583}
}

@article{fds342582,
   Author = {Weintraub, ER},
   Title = {Craufurd goodwin and duke university, 1955-1970},
   Journal = {History of Political Economy},
   Volume = {51},
   Number = {1},
   Pages = {129-135},
   Publisher = {Duke University Press},
   Year = {2019},
   Month = {February},
   url = {http://dx.doi.org/10.1215/00182702-7289336},
   Doi = {10.1215/00182702-7289336},
   Key = {fds342582}
}

@misc{fds342584,
   Author = {Düppe, T and Weintraub, ER},
   Title = {Contemporary historiography of economics: Editors’
             introduction},
   Volume = {50},
   Pages = {551-553},
   Year = {2018},
   Month = {September},
   url = {http://dx.doi.org/10.1215/00182702-7023470},
   Doi = {10.1215/00182702-7023470},
   Key = {fds342584}
}

@book{fds332327,
   Author = {Düppe, T and Weintraub, ER},
   Title = {Contemporary Historiography of Economics},
   Pages = {248 pages},
   Publisher = {Routledge},
   Editor = {Weintraub, ER and Duppe, T},
   Year = {2018},
   ISBN = {1138049956},
   Abstract = {This book brings together leading contributors to provide,
             for the first time, a methodological overview of the
             historiography of economics.},
   Key = {fds332327}
}

@article{fds332852,
   Author = {Weintraub, ER and Duppe, T},
   Title = {Introduction: Contemporary Historiography of
             Economics},
   Journal = {History of Political Economy},
   Volume = {50},
   Number = {3},
   Pages = {551-553},
   Publisher = {Duke University Press},
   Year = {2018},
   url = {http://dx.doi.org/10.1215/00182702-7023470},
   Doi = {10.1215/00182702-7023470},
   Key = {fds332852}
}


%% Weller, Brian M   
@article{fds338585,
   Author = {Patton, AJ and Weller, B},
   Title = {What You See Is Not What You Get: The Costs of Trading
             Market Anomalies},
   Number = {255},
   Year = {2019},
   Month = {May},
   Key = {fds338585}
}

@article{fds318189,
   Author = {Weller, B},
   Title = {Measuring Tail Risks at High Frequency},
   Year = {2018},
   Month = {October},
   Abstract = {I develop a new methodology for measuring tail risks using
             the cross section of bid-ask spreads. Market makers embed
             tail risk information into spreads because (1) they lose to
             arbitrageurs when changes to asset values exceed the cost of
             liquidity and (2) underlying price movements and potential
             costs are linear in factor loadings. Using this insight,
             simple cross-sectional regressions relating spreads and
             trading volume to factor betas can recover tail risks in
             real time for priced or non-priced return factors. The
             methodology disentangles financial and aggregate market
             risks during the 2007-2008 Financial Crisis; anticipates
             jump risks associated with Federal Open Market Committee
             announcements; and quantifies a sharp, temporary increase in
             market tail risk before and throughout the 2010 Flash Crash.
             The recovered time series of implied market risks also
             aligns closely with both realized market jumps and the
             VIX.},
   Key = {fds318189}
}

@article{fds318190,
   Author = {Weller, BM},
   Title = {Does algorithmic trading reduce information
             acquisition?},
   Journal = {Review of Financial Studies},
   Volume = {31},
   Number = {6},
   Pages = {2184-2226},
   Year = {2018},
   Month = {June},
   url = {http://dx.doi.org/10.1093/rfs/hhx137},
   Abstract = {© The Author(s) 2018. I demonstrate an important tension
             between acquiring information and incorporating it into
             asset prices. As a salient case, I analyze algorithmic
             trading (AT), which is typically associated with improved
             price efficiency. Using a new measure of the information
             content of prices and a comprehensive panel of 54,879
             stock-quarters of Securities and Exchange Commission (SEC)
             market data, I establish instead that the amount of
             information in prices decreases by 9% to 13% per standard
             deviation of AT activity and up to a month before scheduled
             disclosures. AT thus may reduce price informativeness
             despite its importance for translating available information
             into prices.},
   Doi = {10.1093/rfs/hhx137},
   Key = {fds318190}
}

@article{fds333310,
   Author = {Patton, AJ and Weller, B},
   Title = {What You See Is Not What You Get: The Costs of Trading
             Market Anomalies},
   Journal = {Economic Research Initiatives at Duke (Erid) Working
             Paper},
   Number = {255},
   Year = {2018},
   Month = {March},
   Key = {fds333310}
}


%% Xu, Daniel Yi   
@article{fds338620,
   Author = {Cabral, L and Wang, Z and Xu, DY},
   Title = {Competitors, complementors, parents and places: Explaining
             regional agglomeration in the U.S. auto industry},
   Pages = {1-29},
   Publisher = {Elsevier BV},
   Year = {2018},
   Month = {October},
   url = {http://dx.doi.org/10.1016/j.red.2018.01.006},
   Abstract = {© 2018 Elsevier Inc. Taking the early U.S. automobile
             industry as an example, we evaluate four competing
             hypotheses on regional industry agglomeration:
             intra-industry local externalities, inter-industry local
             externalities, employee spinouts, and location fixed
             effects. Our findings suggest that in the automobile case,
             inter-industry local externalities (particularly from the
             carriage and wagon industry) and employee spinouts
             (particularly due to the high spinout rate in Detroit) play
             important roles. The presence of other firms in the same
             industry has a negligible or negative effect. Finally, local
             inputs account for some agglomeration in the short run, but
             the effects are much more profound in the long
             run.},
   Doi = {10.1016/j.red.2018.01.006},
   Key = {fds338620}
}

@article{fds339234,
   Author = {Roberts, MJ and Xu, DY and Fan, X and Zhang, S},
   Title = {The role of firm factors in demand, cost, and export market
             selection for chinese footwear producers},
   Journal = {Review of Economic Studies},
   Volume = {85},
   Number = {4},
   Pages = {2429-2461},
   Publisher = {Oxford University Press (OUP)},
   Year = {2018},
   Month = {October},
   url = {http://dx.doi.org/10.1093/restud/rdx066},
   Abstract = {© The Author(s) 2017. Published by Oxford University Press
             on behalf of The Review of Economic Studies Limited. In this
             article, we use micro data on both trade and production for
             a sample of large Chinese manufacturing firms in the
             footwear industry from 2002 to 2006 to estimate an empirical
             model of export demand, pricing, and market participation by
             destination market. We use the model to construct indexes of
             firm-level demand, marginal cost, and fixed cost. The
             empirical results indicate substantial firm heterogeneity in
             all three dimension with demand being the most dispersed.
             The firm-specific demand and marginal cost components
             account for over 30% of market share variation, 40% of sales
             variation, and over 50% of price variation among exporters.
             The fixed cost index is the primary factor explaining
             differences in the pattern of destination markets across
             firms. The estimates are used to analyse the supply
             reallocation following the removal of the quota on Chinese
             footwear exports to the EU. This led to a rapid
             restructuring of export supply sources on both the intensive
             and extensive margins in favour of firms with high demand
             and low fixed costs indexes, with marginal cost differences
             not being important.},
   Doi = {10.1093/restud/rdx066},
   Key = {fds339234}
}

@article{fds343255,
   Author = {Chen, Z and Liu, Z and Suárez Serrato and JC and Yi Xu,
             D},
   Title = {Notching R&D Investment with Corporate Income Tax Cuts in
             China},
   Year = {2018},
   Month = {June},
   Key = {fds343255}
}

@article{fds331945,
   Author = {Fieler, AC and Eslava, M and Xu, DY},
   Title = {Trade, quality upgrading, and input linkages: Theory and
             evidence from Colombia},
   Journal = {American Economic Review},
   Volume = {108},
   Number = {1},
   Pages = {109-146},
   Publisher = {American Economic Association},
   Year = {2018},
   Month = {January},
   url = {http://dx.doi.org/10.1257/aer.20150796},
   Abstract = {A quantitative model brings together theories linking
             international trade to quality, technology, and demand for
             skills. Standard effects of trade on importers and exporters
             are magnifed through domestic input linkages. We estimate
             the model with data from Colombian manufacturing frms before
             the 1991 trade liberalization. A counterfactual trade
             liberalization is broadly consistent with postliberalization
             data. It increases skill intensity from 12 to 16 percent,
             while decreasing sales. Imported inputs, estimated to be of
             higher quality, and domestic input linkages are
             quantitatively important. Economies of scale, export
             expansion, and reallocation of production are small and
             cannot explain post-liberalization data.},
   Doi = {10.1257/aer.20150796},
   Key = {fds331945}
}

@article{fds331308,
   Author = {Xu, DY},
   Title = {Comments on “Innovation and product reallocation in the
             great recession”},
   Journal = {Journal of Monetary Economics},
   Volume = {93},
   Pages = {21-23},
   Publisher = {Elsevier BV},
   Year = {2018},
   Month = {January},
   url = {http://dx.doi.org/10.1016/j.jmoneco.2017.10.002},
   Doi = {10.1016/j.jmoneco.2017.10.002},
   Key = {fds331308}
}


%% Yashkin, Arseniy   
@article{fds343334,
   Author = {Akushevich, I and Kravchenko, J and Yashkin, AP and Fang, F and Yashin,
             AI},
   Title = {Partitioning of time trends in prevalence and mortality of
             lung cancer.},
   Journal = {Statistics in Medicine},
   Year = {2019},
   Month = {May},
   url = {http://dx.doi.org/10.1002/sim.8170},
   Abstract = {BACKGROUND:Time trends of lung cancer prevalence and
             mortality are the result of three competing processes:
             changes in the incidence rate, stage-specific survival, and
             ascertainment at early stages. Improvements in these
             measures act concordantly to improve disease-related
             mortality, but push the prevalence rate in opposite
             directions making a qualitative interpretation difficult.
             The goal of this paper is to evaluate the relative
             contributions of these components to changes in lung cancer
             prevalence and mortality. METHODS:Partitioning of prevalence
             and mortality trends into their components using SEER data
             for 1973-2013. RESULTS:The prevalence of lung cancer
             increases for females and decreases for males. In 1998, the
             former was due to increased incidence (45%-50% of total
             trend), improved survival (40%-45%), and increased
             ascertainment at early stages (10%-15%). In males, a rapidly
             declining incidence rate overpowered the effects of survival
             and ascertainment resulting in an overall decrease in
             prevalence over time. Trends in lung cancer mortality are
             determined by incidence during 1993-2002 with noticeable
             contribution of survival after 2002. CONCLUSION:Lung cancer
             incidence was the main driving force behind trends in
             prevalence and mortality. Improved survival played essential
             role from 2000 onwards. Trends in stage ascertainment played
             a small but adverse role. Our results suggest that further
             improvement in lung cancer mortality can be achieved through
             advances in early stage ascertainment, especially for males,
             and that in spite of success in treatment, adenocarcinoma
             continues to exhibit adverse trends (especially in female
             incidence) and its role among other histology-specific lung
             cancers will increase in the near future.},
   Doi = {10.1002/sim.8170},
   Key = {fds343334}
}

@article{fds342134,
   Author = {Akushevich, I and Yashkin, A and Kravchenko, J and Fang, F and Arbeev,
             K and Sloan, F and Yashin, AI},
   Title = {A forecasting model of disease prevalence based on the
             McKendrick-von Foerster equation.},
   Journal = {Math Biosci},
   Volume = {311},
   Pages = {31-38},
   Year = {2019},
   Month = {May},
   url = {http://dx.doi.org/10.1016/j.mbs.2018.12.017},
   Abstract = {A new model for disease prevalence based on the analytical
             solutions of McKendric-von Foerster's partial differential
             equations is developed. Derivation of the model and methods
             to cross check obtained results are explicitly demonstrated.
             Obtained equations describe the time evolution of the
             healthy and unhealthy age-structured sub-populations and age
             patterns of disease prevalence. The projection of disease
             prevalence into the future requires estimates of time trends
             of age-specific disease incidence, relative survival
             functions, and prevalence at the initial age and year
             available in the data. The computational scheme for
             parameter estimations using Medicare data, analytical
             properties of the model, application for diabetes
             prevalence, and relationship with partitioning models are
             described and discussed. The model allows natural
             generalization for the case of several diseases as well as
             for modeling time trends in cause-specific mortality
             rates.},
   Doi = {10.1016/j.mbs.2018.12.017},
   Key = {fds342134}
}

@article{fds343590,
   Author = {Nazarian, A and Arbeev, KG and Yashkin, AP and Kulminski,
             AM},
   Title = {Genetic heterogeneity of Alzheimer's disease in subjects
             with and without hypertension.},
   Journal = {Geroscience},
   Volume = {41},
   Number = {2},
   Pages = {137-154},
   Year = {2019},
   Month = {April},
   url = {http://dx.doi.org/10.1007/s11357-019-00071-5},
   Abstract = {Alzheimer's disease (AD) is a progressive neurodegenerative
             disorder caused by the interplay of multiple genetic and
             non-genetic factors. Hypertension is one of the AD risk
             factors that has been linked to underlying pathological
             changes like senile plaques and neurofibrillary tangles
             formation as well as hippocampal atrophy. In this study, we
             investigated the differences in the genetic architecture of
             AD between hypertensive and non-hypertensive subjects in
             four independent cohorts. Our genome-wide association
             analyses revealed significant associations of 15 novel
             potentially AD-associated polymorphisms (P < 5E-06) that
             were located outside the chromosome 19q13 region and were
             significant either in hypertensive or non-hypertensive
             groups. The closest genes to 14 polymorphisms were not
             associated with AD at P < 5E-06 in previous genome-wide
             association studies (GWAS). Also, four of them were located
             within two chromosomal regions (i.e., 3q13.11 and 17q21.2)
             that were not associated with AD at P < 5E-06 before. In
             addition, 30 genes demonstrated evidence of group-specific
             associations with AD at the false discovery rates (FDR)
             < 0.05 in our gene-based and transcriptome-wide
             association analyses. The chromosomal regions corresponding
             to four genes (i.e., 2p13.1, 9p13.3, 17q12, and 18q21.1)
             were not associated with AD at P < 5E-06 in previous
             GWAS. These genes may serve as a list of prioritized
             candidates for future functional studies. Our
             pathway-enrichment analyses revealed the associations of 11
             non-group-specific and four group-specific pathways with AD
             at FDR < 0.05. These findings provided novel insights
             into the potential genetic heterogeneity of AD among
             subjects with and without hypertension.},
   Doi = {10.1007/s11357-019-00071-5},
   Key = {fds343590}
}

@article{fds335441,
   Author = {Akushevich, I and Kravchenko, J and Yashkin, AP and Yashin,
             AI},
   Title = {Time trends in the prevalence of cancer and non-cancer
             diseases among older U.S. adults: Medicare-based
             analysis.},
   Journal = {Exp Gerontol},
   Volume = {110},
   Pages = {267-276},
   Year = {2018},
   Month = {September},
   url = {http://dx.doi.org/10.1016/j.exger.2018.06.017},
   Abstract = {Longer lifespan is accompanied by a larger number of chronic
             diseases among older adults. Because of a growing proportion
             of older adults in the U.S., this brings the problem of
             age-related morbidity to the forefront as a major
             contributor to rising medical expenditures. We evaluated
             15-year time trends (from 1998 to 2013) in the prevalence of
             48 acute and chronic non-cancer diseases and cancers in
             older U.S. adults aged 65+ and estimated the annual
             percentage changes of these prevalence trends using
             SEER-Medicare and HRS-Medicare data. We found that
             age-adjusted prevalence of cancers of kidney, pancreas, and
             melanoma, as well as diabetes, renal disease, limb fracture,
             depression, anemia, weight deficiency, dementia/Alzheimer's
             disease, drug/medications abuse and several other
             diseases/conditions increased over time. Conversely,
             prevalence of myocardial infarction, heart failure,
             cardiomyopathy, pneumonia/influenza, peptic ulcer, and
             gastrointestinal bleeding, among others, decreased over
             time. There are also diseases whose prevalence did not
             change substantially over time, e.g., a group of fast
             progressing cancers and rheumatoid arthritis. Analysis of
             trends of multiple diseases performed simultaneously within
             one study design with focus on the same time interval and
             the same population for all diseases allowed us to provide
             insight into the epidemiology of these conditions and
             identify the most alarming and/or unexpected trends and
             trade-offs. The obtained results can be used for health
             expenditures planning for growing sector of older adults in
             the U.S.},
   Doi = {10.1016/j.exger.2018.06.017},
   Key = {fds335441}
}

@article{fds330152,
   Author = {Yashin, AI and Fang, F and Kovtun, M and Wu, D and Duan, M and Arbeev, K and Akushevich, I and Kulminski, A and Culminskaya, I and Zhbannikov, I and Yashkin, A and Stallard, E and Ukraintseva, S},
   Title = {Hidden heterogeneity in Alzheimer's disease: Insights from
             genetic association studies and other analyses.},
   Journal = {Experimental Gerontology},
   Volume = {107},
   Pages = {148-160},
   Year = {2018},
   Month = {July},
   url = {http://dx.doi.org/10.1016/j.exger.2017.10.020},
   Abstract = {Despite evident success in clarifying many important
             features of Alzheimer's disease (AD) the efficient methods
             of its prevention and treatment are not yet available. The
             reasons are likely to be the fact that AD is a
             multifactorial and heterogeneous health disorder with
             multiple alternative pathways of disease development and
             progression. The availability of genetic data on individuals
             participated in longitudinal studies of aging health and
             longevity, as well as on participants of cross-sectional
             case-control studies allow for investigating genetic and
             non-genetic connections with AD and to link the results of
             these analyses with research findings obtained in clinical,
             experimental, and molecular biological studies of this
             health disorder. The objective of this paper is to perform
             GWAS of AD in several study populations and investigate
             possible roles of detected genetic factors in developing AD
             hallmarks and in other health disorders. The data collected
             in the Framingham Heart Study (FHS), Cardiovascular Health
             Study (CHS), Health and Retirement Study (HRS) and Late
             Onset Alzheimer's Disease Family Study (LOADFS) were used in
             these analyses. The logistic regression and Cox's regression
             were used as statistical models in GWAS. The results of
             analyses confirmed strong associations of genetic variants
             from well-known genes APOE, TOMM40, PVRL2 (NECTIN2), and
             APOC1 with AD. Possible roles of these genes in pathological
             mechanisms resulting in development of hallmarks of AD are
             described. Many genes whose connection with AD was detected
             in other studies showed nominally significant associations
             with this health disorder in our study. The evidence on
             genetic connections between AD and vulnerability to
             infection, as well as between AD and other health disorders,
             such as cancer and type 2 diabetes, were investigated. The
             progress in uncovering hidden heterogeneity in AD would be
             substantially facilitated if common mechanisms involved in
             development of AD, its hallmarks, and AD related chronic
             conditions were investigated in their mutual
             connection.},
   Doi = {10.1016/j.exger.2017.10.020},
   Key = {fds330152}
}

@article{fds333664,
   Author = {Akushevich, I and Yashkin, AP and Kravchenko, J and Fang, F and Arbeev,
             K and Sloan, F and Yashin, AI},
   Title = {Identifying the causes of the changes in the prevalence
             patterns of diabetes in older U.S. adults: A new trend
             partitioning approach.},
   Journal = {J Diabetes Complications},
   Volume = {32},
   Number = {4},
   Pages = {362-367},
   Year = {2018},
   Month = {April},
   url = {http://dx.doi.org/10.1016/j.jdiacomp.2017.12.014},
   Abstract = {AIMS: To identify how efforts to control the diabetes
             epidemic and the resulting changes in diabetes mellitus,
             type II (T2D) incidence and survival have affected the
             time-trend of T2D prevalence. METHODS: A newly developed
             method of trend decomposition was applied to a 5% sample of
             Medicare administrative claims filed between 1991 and 2012.
             RESULTS: Age-adjusted prevalence of T2D for adults age 65+
             increased at an average annual percentage change of 2.31%
             between 1992 and 2012. Primary contributors to this trend
             were (in order of magnitude): improved survival at all ages,
             increased prevalence of T2D prior to age of Medicare
             eligibility, decreased incidence of T2D after age of
             Medicare eligibility. CONCLUSIONS: Health services supported
             by the Medicare system, coupled with improvements in medical
             technology and T2D awareness efforts provide effective care
             for individuals age 65 and older. However, policy maker
             attention should be shifted to the prevention of T2D in
             younger age groups to control the increase in prevalence
             observed prior to Medicare eligibility.},
   Doi = {10.1016/j.jdiacomp.2017.12.014},
   Key = {fds333664}
}

@article{fds333203,
   Author = {Yashkin, AP and Sloan, F},
   Title = {Adherence to Guidelines for Screening and Medication Use:
             Mortality and Onset of Major Macrovascular Complications in
             Elderly Persons With Diabetes Mellitus.},
   Journal = {Journal of Aging and Health},
   Volume = {30},
   Number = {4},
   Pages = {503-520},
   Publisher = {SAGE Publications},
   Year = {2018},
   Month = {April},
   url = {http://dx.doi.org/10.1177/0898264316684270},
   Abstract = {OBJECTIVE:The objective of this study is to investigate
             relationships between adherence to recommended screening and
             medication use and severe macrovascular complications and
             all-cause mortality among persons aged above 68 years with
             diabetes mellitus (DM). METHOD:Data came from a 5% Medicare
             claims sample of beneficiaries initially diagnosed with DM
             during 2006-2008; follow-up was up to 7 years.
             RESULTS:Adherence to screening guidelines led to reduced
             mortality-hazard ratio (HR) = 0.57, 95% confidence interval
             [CI] = [0.56, 0.58]; congestive heart failure [CHF], HR =
             0.89, CI = [0.87, 0.91]; acute myocardial infarction [AMI],
             HR = 0.90, CI = [0.85, 0.95]; and stroke/transient ischemic
             attack [Stroke/TIA], HR = 0.92, CI = [0.87, 0.97]-during
             follow-up. Recommended medication use led to lower
             mortality: HR = 0.72, CI = [0.70, 0.73]; CHF, HR = 0.67, CI
             = [0.66, 0.69]; AMI, HR = 0.68, CI = [0.65, 0.71]; and
             Stroke/TIA, HR = 0.79, CI = [0.76, 0.83]. DISCUSSION:Elderly
             persons newly diagnosed with diabetes who adhered to
             recommended care experienced reduced risk of mortality and
             severe macrovascular complications.},
   Doi = {10.1177/0898264316684270},
   Key = {fds333203}
}

@article{fds333750,
   Author = {Kulminski, AM and Huang, J and Loika, Y and Arbeev, KG and Bagley, O and Yashkin, A and Duan, M and Culminskaya, I},
   Title = {Strong impact of natural-selection-free heterogeneity in
             genetics of age-related phenotypes.},
   Journal = {Aging},
   Volume = {10},
   Number = {3},
   Pages = {492-514},
   Year = {2018},
   Month = {March},
   url = {http://dx.doi.org/10.18632/aging.101407},
   Abstract = {A conceptual difficulty in genetics of age-related
             phenotypes that make individuals vulnerable to disease in
             post-reproductive life is genetic heterogeneity attributed
             to an undefined role of evolution in establishing their
             molecular mechanisms. Here, we performed univariate and
             pleiotropic genome-wide meta-analyses of 20 age-related
             phenotypes leveraging longitudinal information in a sample
             of 33,431 individuals and dealing with the
             natural-selection-free genetic heterogeneity. We identified
             142 non-proxy single nucleotide polymorphisms (SNPs) with
             phenotype-specific (18 SNPs) and pleiotropic (124 SNPs)
             associations at genome-wide level. Univariate meta-analysis
             identified two novel (11.1%) and replicated 16 SNPs whereas
             pleiotropic meta-analysis identified 115 novel (92.7%) and
             nine replicated SNPs. Pleiotropic associations for most
             novel (93.9%) and all replicated SNPs were strongly impacted
             by the natural-selection-free genetic heterogeneity in its
             unconventional form of antagonistic heterogeneity, implying
             antagonistic directions of genetic effects for directly
             correlated phenotypes. Our results show that the common
             genome-wide approach is well adapted to handle homogeneous
             univariate associations within Mendelian framework whereas
             most associations with age-related phenotypes are more
             complex and well beyond that framework. Dissecting the
             natural-selection-free genetic heterogeneity is critical for
             gaining insights into genetics of age-related phenotypes and
             has substantial and unexplored yet potential for improving
             efficiency of genome-wide analysis.},
   Doi = {10.18632/aging.101407},
   Key = {fds333750}
}

@article{fds332155,
   Author = {Yashkin, AP and Kravchenko, J and Yashin, AI and Sloan,
             F},
   Title = {Mortality and Macrovascular Risk in Elderly With
             Hypertension and Diabetes: Effect of Intensive Drug
             Therapy.},
   Journal = {Am J Hypertens},
   Volume = {31},
   Number = {2},
   Pages = {220-227},
   Year = {2018},
   Month = {January},
   url = {http://dx.doi.org/10.1093/ajh/hpx151},
   Abstract = {BACKGROUND: This study identifies the effect of intensive
             drug therapy (IDT) in individuals age 65+ with diabetes
             (type 2 diabetes mellitus (T2D)) and hypertension on
             all-cause death, congestive heart failure (CHF),
             hospitalization for myocardial infarction (MI), and stroke
             or transient ischemic attack (TIA). METHODS: Individuals
             from the Medicare 5% dataset with hypertension and T2D
             undergoing IDT for these conditions were propensity score
             matched to a nonintensive drug-therapy group. Hazard ratios
             (HRs) were obtained using the Cox proportional hazard model.
             RESULTS: IDT was associated with increased risk of CHF (HR
             2.32; 95% confidence interval (CI) 2.32-2.38), MI (HR 4.27;
             95% CI 4.05-4.52), and stroke or TIA (HR 1.80; 95% CI
             1.70-1.89) but decreased risk of death (HR 0.95; 95% CI
             0.93-0.97). Risk for CHF (HR 0.73; 95% CI 0.71-0.73), MI (HR
             0.64; 95% CI 0.62-0.67), stroke or TIA (HR 0.82; 95% CI
             0.78-0.86), and death (HR 0.29; 95% CI 0.28-0.29) was
             decreased by adherence to diabetes management guidelines.
             CONCLUSIONS: Use of IDT in a high-risk population delays
             death but not severe macrovascular outcomes. Protective
             effects of IDT in high-risk patients likely outweigh
             polypharmacy-related health concerns.},
   Doi = {10.1093/ajh/hpx151},
   Key = {fds332155}
}

@article{fds335442,
   Author = {Akushevich, I and Yashkin, AP and Kravchenko, J and Ukraintseva, S and Stallard, E and Yashin, AI},
   Title = {Time Trends in the Prevalence of Neurocognitive Disorders
             and Cognitive Impairment in the United States: The Effects
             of Disease Severity and Improved Ascertainment.},
   Journal = {J Alzheimers Dis},
   Volume = {64},
   Number = {1},
   Pages = {137-148},
   Year = {2018},
   url = {http://dx.doi.org/10.3233/JAD-180060},
   Abstract = {BACKGROUND: Trends in the prevalence of cognitive impairment
             (CI) based on cognitive assessment instruments are often
             inconsistent with those of neurocognitive disorders (ND)
             based on Medicare claims records. OBJECTIVE: We hypothesized
             that improved ascertainment and resulting decrease in
             disease severity at the time of diagnosis are responsible
             for this phenomenon. METHODS: Using Medicare data linked to
             the Health and Retirement Study (1992-2012), we performed a
             joint analysis of trends in CI and ND to test our
             hypothesis. RESULTS: We identified two major contributors to
             the divergent directions in CI and ND trends: reductions in
             disease severity explained more than 60% of the differences
             between CI and ND prevalence over the study period; the
             remaining 40% was explained by a decrease in the fraction of
             undiagnosed individuals. DISCUSSION: Improvements in the
             diagnoses of ND diseases were a major contributor to
             reported trends in ND and CI. Recent forecasts of CI and ND
             trends in the U.S. may be overly pessimistic.},
   Doi = {10.3233/JAD-180060},
   Key = {fds335442}
}


%% Yildirim, Huseyin   
@article{fds343583,
   Author = {Kesten, O and Meyer-ter-Vehn, M and Yildirim, H},
   Title = {Letter from the editors},
   Journal = {Review of Economic Design},
   Volume = {23},
   Number = {1-2},
   Year = {2019},
   Month = {June},
   url = {http://dx.doi.org/10.1007/s10058-019-00222-2},
   Doi = {10.1007/s10058-019-00222-2},
   Key = {fds343583}
}

@article{fds325838,
   Author = {Name-Correa, AJ and Yildirim, H},
   Title = {A capture theory of committees},
   Journal = {Public Choice},
   Volume = {177},
   Number = {1-2},
   Pages = {135-154},
   Publisher = {Springer Nature America, Inc},
   Year = {2018},
   Month = {October},
   url = {http://dx.doi.org/10.1007/s11127-018-0593-6},
   Abstract = {© 2018, Springer Science+Business Media, LLC, part of
             Springer Nature. Why do committees exist? The extant
             literature emphasizes that they pool dispersed information
             across members. In this paper, we argue that they may also
             serve to discourage outside influence or capture by raising
             its cost. As such, committees may contain members who are
             uninformed or who add no new information to the collective
             decision. We show that the optimal committee is larger when
             outsiders have larger stakes in its decision or contribute
             lower-quality proposals, or when its members are more
             corruptible. We also show that keeping committee members
             anonymous and accountable for their votes helps deter
             capture.},
   Doi = {10.1007/s11127-018-0593-6},
   Key = {fds325838}
}

@article{fds342232,
   Author = {Name Correa and A and Yildirim, H},
   Title = {Biased Experts, Majority Rule, and the Optimal Composition
             of Committee},
   Journal = {Economic Research Initiatives at Duke (Erid) Working
             Paper},
   Number = {268},
   Year = {2018},
   Month = {June},
   Key = {fds342232}
}


Duke University * Arts & Sciences * Economics * Faculty * Research * Staff * Master's * Ph.D. * Reload * Login