Economics Faculty Database
Economics
Arts & Sciences
Duke University

 HOME > Arts & Sciences > Economics > Faculty    Search Help Login pdf version printable version 

Publications of A Ronald Gallant    :recent first  alphabetical  combined listing:

%% Journal Articles   
@article{fds266639,
   Author = {AR Gallant},
   Title = {SOME ARGUMENTS AGAINST THE USE OF STATISTICAL PACKAGES IN
             TEACHING STATISTICAL METHODS.},
   Pages = {223-225},
   Year = {1973},
   Abstract = {Some points in favor of the use of a simple programming
             language in teaching statistical methods rather than a
             statistical package are presented.},
   Key = {fds266639}
}

@article{fds266638,
   Author = {NPC Chao and JA Cuculo and AR Gallant and TW George},
   Title = {Statistical method for determining the glass transition
             temperature from dilatometric data},
   Year = {1975},
   Key = {fds266638}
}

@article{fds266640,
   Author = {NPC Chao and JA Cuculo and AR Gallant and TW George},
   Title = {STATISTICAL METHOD FOR DETERMINING THE GLASS TRANSITION
             TEMPERATURE FROM DILATOMETRIC DATA.},
   Journal = {Appl Polym Symp},
   Number = {27},
   Pages = {193-204},
   Year = {1975},
   Abstract = {An objective procedure for estimating the glass transition
             temperature (T//g) from dilatometric data is described. The
             method uses the technique of fitting a segmented linear
             regression model by least squares. The regression model
             employed may be specified so as to allow a transition either
             of the first order in the thermodynamic sense or may be
             constrained to fit a second order. Methods are given for
             finding statistical confidence intervals for the estimated
             glass transition temperature (T//g). Experimental data
             obtained from PET (polyethylene terephthalate) fiber are
             used for illustration; these data indicate a preference for
             the second-order transition model. 8 refs.},
   Key = {fds266640}
}

@article{fds266641,
   Author = {AR Gallant},
   Title = {Seemingly unrelated nonlinear regressions},
   Journal = {Journal of Econometrics},
   Volume = {3},
   Number = {1},
   Pages = {35-50},
   Year = {1975},
   ISSN = {0304-4076},
   Abstract = {The article considers the estimation of the parameters of a
             set of nonlinear regression equations when the responses are
             contemporaneously but not serially correlated. Conditions
             are set forth such that the estimator obtained is strongly
             consistent, asymptotically normally distributed, and
             asymptotically more efficient than the single-equation least
             squares estimator. The methods presented allow estimation of
             the parameters subject to nonlinear restrictions across
             equations. The article includes a discussion of methods to
             perform the computations and a Monte Carlo simulation. ©
             1975.},
   Key = {fds266641}
}

@article{fds266642,
   Author = {CR Shumway and PM Maher and MR Baker and WE Souder and AH Rubenstein and AR Gallant},
   Title = {DIFFUSE DECISION-MAKING IN HIERARCHICAL ORGANIZATIONS: AN
             EMPIRICAL EXAMINATION.},
   Journal = {Management Science},
   Volume = {21},
   Number = {6},
   Pages = {697-707},
   Year = {1975},
   Abstract = {The applied research resource allocation decision process in
             a complex, hierarchical federal organization is explored in
             this paper. This decision process includes the
             identification of research objectives and the funding of
             projects selected to achieve the objectives. The
             hierarchical, geographical, and temporal diffuseness of
             participation in the decision process is
             described.},
   Key = {fds266642}
}

@article{fds266644,
   Author = {AR Gallant},
   Title = {Three-stage least-squares estimation for a system of
             simultaneous, nonlinear, implicit equations},
   Journal = {Journal of Econometrics},
   Volume = {5},
   Number = {1},
   Pages = {71-88},
   Year = {1977},
   ISSN = {0304-4076},
   Abstract = {The article describes a nonlinear three-stage least-squares
             estimator for the parameters of a system of simultaneous,
             nonlinear, implicit equations; the method allows the
             estimation of these parameters subject to nonlinear
             parametric restrictions across equations. The estimator is
             shown to be strongly consistent, asymptotically normally
             distributed, and more efficient than the nonlinear two-stage
             least-squares estimator. Some practical implications of the
             regularity conditions used to obtain these results are
             discussed from the point of view of one whose interest is in
             applications, Also, computing methods using readily
             available nonlinear regression programs are described. ©
             1977.},
   Key = {fds266644}
}

@article{fds266643,
   Author = {AR Gallant and DW Jorgenson},
   Title = {Statistical inference for a system of simultaneous,
             non-linear, implicit equations in the context of
             instrumental variable estimation},
   Journal = {Journal of Econometrics},
   Volume = {11},
   Number = {2-3},
   Pages = {275-302},
   Year = {1979},
   ISSN = {0304-4076},
   Abstract = {Statistical inference for a system of simultaneous,
             non-linear, implicit equations is discussed. The discussion
             considers inference as an adjunct to two- and three-stage
             least squares estimation rather than in a general setting.
             For both of these cases the non-null asymptotic distribution
             of a test statistic based on the optimization criterion and
             a test based on the asymptotic distribution of the estimator
             is found; a total of four. It is argued that the tests based
             on the optimization criterion are to be preferred in
             applications. The methods are illustrated by application to
             hypotheses implied by the theory of demand using a translog
             expenditure system and data on personal consumption
             expenditures for durables, non-durables, and energy for the
             period 1947- 1971. © 1979.},
   Key = {fds266643}
}

@article{fds266646,
   Author = {AR Gallant and TM Gerig},
   Title = {Computations for constrained linear models},
   Journal = {Journal of Econometrics},
   Volume = {12},
   Number = {1},
   Pages = {59-84},
   Year = {1980},
   ISSN = {0304-4076},
   Abstract = {The article presents an algorithm for linear regression
             computations subject to linear parametric equality
             constraints, linear parametric inequality constraints, or a
             mixture of the two. No rank conditions are imposed on the
             regression specification or the constraint specification.
             The algorithm requires a full Moore-Penrose g-inverse which
             entails extra computational effort relative to other
             orthonormalization type algorithms. In exchange, auxiliary
             statistical information is generated: feasibility of a set
             of constraints may be checked, estimability of a linear
             parametric function may be checked, and bias and variance
             may be decomposed by source. © 1980.},
   Key = {fds266646}
}

@article{fds266645,
   Author = {AR Gallant},
   Title = {On the bias in flexible functional forms and an essentially
             unbiased form. The fourier flexible form},
   Journal = {Journal of Econometrics},
   Volume = {15},
   Number = {2},
   Pages = {211-245},
   Year = {1981},
   ISSN = {0304-4076},
   Abstract = {The Fourier flexible form and its derived expenditure system
             are introduced. Subject to smoothness conditions on the
             consumer's true indirect utility function, the consumer's
             true expenditure system must be of the Fourier form over the
             region of interest in an empirical investigation.
             Arbitrarily accurate finite parameter approximations of the
             consumer's true expenditure system are obtained by dropping
             all high-order terms of the Fourier expenditure system past
             an appropriate truncation point. The resulting finite
             parametersystem is tractable in empirical studies. The
             reader who is primarily interested in applications need only
             read the second and fifth sections. The remainder of the
             article is concerned with the verification of these claims
             and an investigation of some aspects of the bias in Translog
             specifications. © 1981.},
   Key = {fds266645}
}

@article{fds266648,
   Author = {AR Gallant},
   Title = {Unbiased determination of production technologies},
   Journal = {Journal of Econometrics},
   Volume = {20},
   Number = {2},
   Pages = {285-323},
   Year = {1982},
   ISSN = {0304-4076},
   Abstract = {To determine whether an industry exhibits constant returns
             to scale, whether the production function is homothetic, or
             whether inputs are separable, a common approach is to
             specify a cost function, estimate its parameters using data
             such as prices and quantities of inputs, and then test the
             parametric restrictions corresponding to constant returns, a
             homothetic technology, or separability. Statistically, such
             inferences are valid if the true cost function is a member
             of the parametric class considered, otherwise the inference
             is biased. That is, the true rejection probability is not
             necessarily adequately approximated by the nominal size of
             the statistical test. The use of fixed parameter flexible
             functional forms such as the Translog, the generalized
             Leontief, or the Box-Cox will not alleviate this problem.
             The Fourier flexible form differs fundamentally from other
             flexible forms in that it has a variable number of
             parameters and a known bound, depending on the number of
             parameters, on the error, as measured by the Sobolev norm,
             of approximation to an arbitrary cost function. Thus it is
             possible to construct statistical tests for constant
             returns, a homothetic technology, or separability which are
             asymptotically size α by letting the number of parameters
             of the Fourier flexible form depend on sample size. That is,
             the true rejection probability converges to the nominal size
             of the test as sample size tends to infinity. The rate of
             convergence depends on the smoothness of the true cost
             function; the more times is differentiable the true cost
             function, the faster the convergence. The method is
             illustrated using the data on aggregate U.S. manufacturing
             of Berndt and Wood (1975, 1979) and Berndt and Khaled
             (1979). © 1982.},
   Key = {fds266648}
}

@article{fds266647,
   Author = {V Aguirre-Torres and AR Gallant},
   Title = {The null and non-null asymptotic distribution of the Cox
             test for multivariate nonlinear regression. Alternatives and
             a new distribution-free Cox test},
   Journal = {Journal of Econometrics},
   Volume = {21},
   Number = {1},
   Pages = {5-33},
   Year = {1983},
   ISSN = {0304-4076},
   Abstract = {The asymptotic distribution of the generalized Cox test for
             choosing between two multivariate, nonlinear regression
             models in implicit form is derived. The data is assumed to
             be generated by a model that need not be either the null or
             the non-null model. As the data-generating model is not
             subjected to a Pitman drift the analysis is global, not
             local, and provides a fairly complete qualitative
             description of the power characteristics of the generalized
             Cox test. Some investigations of these characteristics are
             included. A new test statistic is introduced that does not
             require an explicit specification of the error distribution
             of the null model. The idea is to replace an analytical
             computation of the expectation of the Cox difference with a
             bootstrap estimate. The null distribution of this new test
             is derived. © 1983.},
   Key = {fds266647}
}

@article{fds266649,
   Author = {AR Gallant and RW Koenker},
   Title = {Costs and benefits of peak-load pricing of electricity. A
             continuous-time econometric approach},
   Journal = {Journal of Econometrics},
   Volume = {26},
   Number = {1-2},
   Pages = {83-113},
   Year = {1984},
   ISSN = {0304-4076},
   Abstract = {We address the following question of current policy
             interest: Would the efficiency gains from residential
             time-of-use pricing for electricity exceed the metering
             costs necessitated by these more complex rates? A model of
             consumer preferences for daily electricity consumption is
             estimated based on data from the North Carolina Rate
             Experiment. The model is formulated in continuous time and
             thus is capable of evaluating demand responses and welfare
             consequences of quite arbitrary changes in pricing policy. A
             model of long-run electricity costs - viewed as a functional
             of the daily load cycle - is constructed based on
             engineering data. The models of demand and cost are combined
             to compute solutions to several optimal pricing problems and
             to estimate the potential long-run welfare gain from several
             alternative time-of-use pricing policies including policies
             incorporating so-called 'demand charges'. We find that the
             best of the rate treatments used in the North Carolina
             experiment achieves a net welfare gain of 5¢ per day per
             household, or roughly half the cost of current metering
             equipment. Smoothly varying rates are capable of achieving
             nearly 18¢ per day per household, but would require more
             complex metering. Demand charges while they are quite
             successful in smoothing the demand cycle are not as
             successful as conventional pricing policies in achieving our
             welfare objective. © 1984.},
   Key = {fds266649}
}

@article{fds266651,
   Author = {AR Gallant and GH Golub},
   Title = {Imposing curvature restrictions on flexible functional
             forms},
   Journal = {Journal of Econometrics},
   Volume = {26},
   Number = {3},
   Pages = {295-321},
   Year = {1984},
   ISSN = {0304-4076},
   Abstract = {A general computational method for estimating the parameters
             of a flexible functional form subject to convexity,
             quasi-convexity, concavity, or quasi-concavity at a point,
             at several points, or over a region, is set forth and
             illustrated with an example. © 1984.},
   Key = {fds266651}
}

@article{fds266650,
   Author = {JA Chalfant and AR Gallant},
   Title = {Estimating substitution elasticities with the Fourier cost
             function. Some Monte Carlo results},
   Journal = {Journal of Econometrics},
   Volume = {28},
   Number = {2},
   Pages = {205-222},
   Year = {1985},
   ISSN = {0304-4076},
   Abstract = {The Fourier flexible form possesses desirable asymptotic
             properties that are not shared by other flexible forms such
             as the translog, generalized Leontief, and generalized
             Box-Cox. One of them is that an elasticity of substitution
             can be estimated with negligible bias in sufficiently large
             samples regardless of what the true form actually is, save
             that it be smooth enough. This article reports the results
             of an experiment designed to determine whether or not this
             property obtains in samples of the sizes customarily
             encountered in practice. A three-input, homothetic version
             of the generalized Box-Cox cost function was used to
             generate technologies that were oriented in a
             two-dimensional design space according to a central
             composite rotatable design; the two factors of the design
             were the Box-Cox parameter and a measure of the dispersion
             of the substitution matrix. The Fourier cost function was
             used to estimate the substitution elasticities at each
             design point, and the bias at each point was estimated using
             the Monte Carlo method. A response surface over the entire
             design space was fitted to these estimates. An examination
             of the surface reveals that the bias is small over the
             entire design space. Roughly speaking, the estimates of
             elasticities of substitution are unbiased to three
             significant digits using the Fourier flexible form no matter
             what the true technology. Our conclusion is that the small
             bias property of the Fourier form does obtain in samples of
             reasonable size; this claim must be tampered by the usual
             caveats associated with inductive inference. ©
             1985.},
   Key = {fds266650}
}

@article{fds266652,
   Author = {WA Barnett and AR Gallant},
   Title = {Editor's introduction},
   Journal = {Journal of Econometrics},
   Volume = {30},
   Number = {1-2},
   Pages = {1-},
   Year = {1985},
   ISSN = {0304-4076},
   Key = {fds266652}
}

@article{fds266653,
   Author = {AR Gallant and JF Monahan},
   Title = {Explicitly infinite-dimensional Bayesian analysis of
             production technologies},
   Journal = {Journal of Econometrics},
   Volume = {30},
   Number = {1-2},
   Pages = {171-201},
   Year = {1985},
   ISSN = {0304-4076},
   Abstract = {The firm's cost function is viewed as a point in a function
             space and data is viewed as following some probability law
             that has as its parameters various functionals evaluated at
             the firm's cost function. The Fourier flexible form is used
             to represent a cost function as an infinite-dimensional
             vector whose elements are the parameters of the Fourier
             form. This representation is used to assign a prior
             distribution to the function space. A procedure for
             numerical computation of the posterior distribution of an
             elasticity of substitution is set forth. The ideas are
             illustrated with an example. © 1985.},
   Key = {fds266653}
}

@article{fds266654,
   Author = {AR Gallant and H White},
   Title = {There exists a neural network that does not make avoidable
             mistakes},
   Pages = {657-664},
   Year = {1988},
   Abstract = {The authors show that a multiple-input, single-output,
             single-hidden-layer feedforward network with (known)
             hardwired connections from input to hidden layer, monotone
             squashing at the hidden layer and no squashing at the output
             embeds as a special case a so-called Fourier network, which
             yields a Fourier series approximation properties of Fourier
             series representations. In particular, approximation to any
             desired accuracy of any square integrable function can be
             achieved by such a network, using sufficiently many hidden
             units. In this sense, such networks do not make avoidable
             mistakes.},
   Key = {fds266654}
}

@article{fds266655,
   Author = {AR Gallant and LP Hansen and G Tauchen},
   Title = {Using conditional moments of asset payoffs to infer the
             volatility of intertemporal marginal rates of
             substitution},
   Journal = {Journal of Econometrics},
   Volume = {45},
   Number = {1-2},
   Pages = {141-179},
   Year = {1990},
   ISSN = {0304-4076},
   Abstract = {Previously Hansen and Jagannathan (1990a) derived and
             computed mean-standard deviation frontiers for intertemporal
             marginal rates of substitution (IMRS) implied by asset
             market data. These frontiers give the lower bounds on the
             standard deviations as a function of the mean. In this paper
             we develop a strategy for utilizing conditioning information
             efficiently, and hence improve on the standard deviation
             bounds computed by Hansen and Jagannathan. We implement this
             strategy empirically by using the seminonparametric (SNP)
             methodology suggested by Gallant and Tauchen (1989) to
             estimate the conditional distribution of a vector of monthly
             asset payoffs. We use the fitted conditional distributions
             to calculate both conditional and unconditional standard
             deviation bounds for the IMRS. The unconditional bounds are
             as sharp as possible subject to robustness considerations.
             We also use the fitted distributions to compute the moments
             of various candidate marginal rates of substitution
             suggested by economic theory, and in particular the
             time-nonseparable preferences of Dunn and Singleton (1986)
             and Eichenbaum and Hansen (1990). For these preferences, our
             findings suggest that habit persistence will put the moments
             of the IMRS inside the frontier at reasonable values of the
             curvature parameter. At the same time we uncover evidence
             that the implied IMRS fails to satisfy all of the
             restrictions inherent in the Euler equation. The findings
             help explain why Euler equation estimation methods typically
             find evidence in favor of local durability instead of habit
             persistence for monthly data. © 1990.},
   Key = {fds266655}
}

@article{fds266656,
   Author = {AR Gallant and G Souza},
   Title = {On the asymptotic normality of Fourier flexible form
             estimates},
   Journal = {Journal of Econometrics},
   Volume = {50},
   Number = {3},
   Pages = {329-353},
   Year = {1991},
   ISSN = {0304-4076},
   Abstract = {Rates of increase in the number of parameters of a Fourier
             factor demand system that imply asymptotically normal
             elasticity estimates are characterized. This is the
             multivariate analog of work by Andrews (1991). Our proof
             strategy is new and consists of relating the minimum
             eigenvalue of the sample sum of squares and cross-products
             matrix to the minimum eigenvalue of the population matrix
             via a uniform strong law with rate that is established using
             results from the empirical processes literature. In its
             customary form, the minimum eigenvalue of the Fourier sum of
             squares and cross-products matrix, considered as a function
             of the number of parameters, decreases faster than any
             polynomial. The consequence is that the rate at which
             parameters may increase is slower than any fractional power
             of the sample size. In this case, we get the same rate as
             Andrews. When our results are applied to multivariate
             regressions with a minimum eigenvalue that is bounded or
             declines at a polynomial rate, the rate on the parameters is
             a fractional power of the sample size. In this case, our
             method of proof gives faster rates than Andrews. Andrews'
             results cover the heteroskedastic case, ours do not. ©
             1991.},
   Key = {fds266656}
}

@article{fds266658,
   Author = {S Ellner and AR Gallant and D McCaffrey and D Nychka},
   Title = {Convergence rates and data requirements for Jacobian-based
             estimates of Lyapunov exponents from data},
   Journal = {Physics Letters A},
   Volume = {153},
   Number = {6-7},
   Pages = {357-363},
   Year = {1991},
   ISSN = {0375-9601},
   Abstract = {We present a method for estimating the dominant Lyapunov
             exponent from time-series data, based on nonparametric
             regression. For data from a finite-dimensional deterministic
             system with additive stochastic perturbations, we show that
             the estimate converges to the true values as the sample size
             increases, and give the asymptotic rate of convergence. ©
             1991.},
   Key = {fds266658}
}

@article{fds266657,
   Author = {AR Gallant and H White},
   Title = {On learning the derivatives of an unknown mapping with
             multilayer feedforward networks},
   Journal = {Neural Networks},
   Volume = {5},
   Number = {1},
   Pages = {129-138},
   Year = {1992},
   ISSN = {0893-6080},
   Abstract = {Recently, multiple input, single output, single hidden-layer
             feedforward neural networks have been shown to be capable of
             approximating a nonlinear map and its partial derivatives.
             Specifically, neural nets have been shown to be dense in
             various Sobolev spaces. Building upon this result, we show
             that a net can be trained so that the map and its
             derivatives are learned. Specifically, we use a result of
             Gallant's to show that least squares and similar estimates
             are strongly consistent in Sobolev norm provided the number
             of hidden units and the size of the training set increase
             together. We illustrate these results by an application to
             the inverse problem of chaotic dynamics: recovery of a
             nonlinear map from a time series of iterates. These results
             extend automatically to nets that embed the single hidden
             layer, feedforward network as a special case. © 1992
             Pergamon Press plc.},
   Key = {fds266657}
}

@article{fds266660,
   Author = {M Davidian and AR Gallant},
   Title = {Smooth nonparametric maximum likelihood estimation for
             population pharmacokinetics, with application to
             quinidine.},
   Journal = {J Pharmacokinet Biopharm},
   Volume = {20},
   Number = {5},
   Pages = {529-556},
   Year = {1992},
   Month = {October},
   ISSN = {0090-466X},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/1287201},
   Abstract = {The seminonparametric (SNP) method, popular in the
             econometrics literature, is proposed for use in population
             pharmacokinetic analysis. For data that can be described by
             the nonlinear mixed effects model, the method produces
             smooth nonparametric estimates of the entire random effects
             density and simultaneous estimates of fixed effects by
             maximum likelihood. A graphical model-building strategy
             based on the SNP method is described. The methods are
             illustrated by a population analysis of plasma levels in 136
             patients undergoing oral quinidine therapy.},
   Key = {fds266660}
}

@article{fds266659,
   Author = {M Davidian and AR Gallant},
   Title = {The nonlinear mixed effects model with a smooth random
             effects density},
   Journal = {Biometrika},
   Volume = {80},
   Number = {3},
   Pages = {475-488},
   Year = {1993},
   ISSN = {0006-3444},
   url = {http://dx.doi.org/10.1093/biomet/80.3.475},
   Abstract = {SUMMARY: The fixed parameters of the nonlinear mixed effects
             model and the density of the random effects are estimated
             jointly by maximum likelihood. The density of the random
             effects is assumed to be smooth but is otherwise
             unrestricted. The method uses a series expansion that
             follows from the smoothness assumption to represent the
             density and quadrature to compute the likelihood. Standard
             algorithms are used for optimization. Empirical Bayes
             estimates of random coefficients are obtained by computing
             posterior modes. The method is applied to data from
             pharmacokinetics, and properties of the method are
             investigated by application to simulated data. © 1993
             Biometrika Trust.},
   Doi = {10.1093/biomet/80.3.475},
   Key = {fds266659}
}

@article{fds266661,
   Author = {DF McCaffrey and AR Gallant},
   Title = {Convergence rates for single hidden layer feedforward
             networks},
   Journal = {Neural Networks},
   Volume = {7},
   Number = {1},
   Pages = {147-158},
   Year = {1994},
   ISSN = {0893-6080},
   Abstract = {By allowing the training set to become arbitrarily large,
             appropriately trained and configured single hidden layer
             feedforward networks converge in probability to the smooth
             function that they were trained to estimate. A bound on the
             probabilistic rate of convergence of these network estimates
             is given. The convergence rate is calculated as a function
             of the sample size n. If the function being estimated has
             square integrable mth order partial derivatives then the
             L2-norm estimation error approaches Op(n- 1 2) for large m.
             Two steps are required for determining these bounds. A bound
             on the rate of convergence of approximations to an unknown
             smooth function by members of a special class of single
             hidden layer feedforward networks is determined. The class
             of networks considered can embed Fourier series. Using this
             fact and results on approximation properties of Fourier
             series yields a bound on L2-norm approximation error. This
             bound is less than O(q- 1 2) for approximating a smooth
             function by networks with q hidden units. A modification of
             existing results for bounding estimation error provides a
             general theorem for calculating estimation error convergence
             rates. Combining this result with the bound on approximation
             rates yields the final convergence rates. ©
             1994.},
   Key = {fds266661}
}

@article{fds266663,
   Author = {WA Barnett and AR Gallant and MJ Hinich and JA Jungeilges and DT Kaplan and MJ Jensen},
   Title = {Robustness of nonlinearity and chaos tests to measurement
             error, inference method, and sample size},
   Journal = {Journal of Economic Behavior and Organization},
   Volume = {27},
   Number = {2},
   Pages = {301-320},
   Year = {1995},
   ISSN = {0167-2681},
   Abstract = {Interest has been growing in testing for nonlinearity and
             chaos in economic data, but much controversy has arisen
             about the available results. This paper explores the reasons
             for these empirical difficulties. We apply five tests for
             nonlinearity or chaos to various monetary aggregate data
             series. We find that the inferences vary across tests for
             the same data, and within tests for varying sample sizes and
             various methods of aggregation of the data. Robustness of
             inferences in this area of research seems to be low and may
             account for the controversies surrounding empirical claims
             of nonlinearity and chaos in economics. ©
             1995.},
   Key = {fds266663}
}

@article{fds266692,
   Author = {R Bansal and AR Gallant and R Hussey and G Tauchen},
   Title = {Nonparametric estimation of structural models for
             high-frequency currency market data},
   Journal = {Journal of Econometrics},
   Volume = {66},
   Number = {1-2},
   Pages = {251-287},
   Year = {1995},
   ISSN = {0304-4076},
   url = {http://hdl.handle.net/10161/1902 Duke open
             access},
   Abstract = {Empirical modeling of high-frequency currency market data
             reveals substantial evidence for nonnormality, stochastic
             volatility, and other nonlinearities. This paper
             investigates whether an equilibrium monetary model can
             account for nonlinearities in weekly data. The model
             incorporates time-nonseparable preferences and a transaction
             cost technology. Simulated sample paths are generated using
             Marcet's parameterized expectations procedure. The paper
             also develops a new method for estimation of structural
             economic models. The method forces the model to match (under
             a GMM criterion) the score function of a nonparametric
             estimate of the conditional density of observed data. The
             estimation uses weekly U.S.-German currency market data,
             1975-90. © 1995.},
   Doi = {10.1016/0304-4076(94)01618-A},
   Key = {fds266692}
}

@article{fds266662,
   Author = {VM Fenton and AR Gallant},
   Title = {Convergence rates of SNP density estimators},
   Journal = {Econometrica},
   Volume = {64},
   Number = {3},
   Pages = {719-727},
   Year = {1996},
   Key = {fds266662}
}

@article{fds266664,
   Author = {AR Gallant and G Tauchen},
   Title = {Which moments to match?},
   Journal = {Econometric Theory},
   Volume = {12},
   Number = {4},
   Pages = {657-681},
   Year = {1996},
   url = {http://hdl.handle.net/10161/2542 Duke open
             access},
   Abstract = {We describe an intuitive, simple, and systematic approach to
             generating moment conditions for generalized method of
             moments (GMM) estimation of the parameters of a structural
             model. The idea is to use the score of a density that has an
             analytic expression to define the GMM criterion. The
             auxiliary model that generates the score should closely
             approximate the distribution of the observed data, but is
             not required to nest it. If the auxiliary model nests the
             structural model then the estimator is as efficient as
             maximum likelihood. The estimator is advantageous when
             expectations under a structural model can be computed by
             simulation, by quadrature, or by analytic expressions but
             the likelihood cannot be computed easily. © 1996 Cambridge
             University Press.},
   Key = {fds266664}
}

@article{fds266666,
   Author = {VM Fenton and AR Gallant},
   Title = {Qualitative and asymptotic performance of SNP density
             estimators},
   Journal = {Journal of Econometrics},
   Volume = {74},
   Number = {1},
   Pages = {77-118},
   Year = {1996},
   url = {http://dx.doi.org/10.1016/0304-4076(95)01752-6},
   Abstract = {The SNP estimator is the most convenient nonparametric
             method for simultaneously estimating the parameters of a
             nonlinear model and the density of a latent process by
             maximum likelihood. To determine if this convenience comes
             at a price, we assess the qualitative behavior of SNP in
             finite samples using the Marron-Wand test suite and verify
             theoretical convergence rates by Monte Carlo simulation. Our
             results suggest that there is no price for convenience
             because the SNP estimator is both qualitatively and
             asymptotically similar to the kernel estimator which is
             optimal.},
   Doi = {10.1016/0304-4076(95)01752-6},
   Key = {fds266666}
}

@article{fds266665,
   Author = {AR Gallant and G Tauchen},
   Title = {Estimation of continuous-time models for stock returns and
             interest rates},
   Journal = {Macroeconomic Dynamics},
   Volume = {1},
   Number = {1},
   Pages = {135-168},
   Year = {1997},
   ISSN = {1365-1005},
   url = {http://hdl.handle.net/10161/2590 Duke open
             access},
   Abstract = {Efficient Method of Moments is used to estimate and test
             continuous-time diffusion models for stock returns and
             interest rates. For stock returns, a four-state, two-factor
             diffusion with one state observed can account for the
             dynamics of the daily return on the S&P Composite Index,
             1927-1987. This contrasts with results indicating that
             discrete-time, stochastic volatility models cannot explain
             these dynamics. For interest rates, a trivariate
             Yield-Factor Model is estimated from weekly, 1962-1995,
             Treasury rates. The Yield-Factor Model is sharply rejected,
             although extensions permitting convexities in the local
             variance come closer to fitting the data.},
   Key = {fds266665}
}

@article{fds266667,
   Author = {AR Gallant and JR Long},
   Title = {Estimating stochastic differential equations efficiently by
             minimum chi-squared},
   Journal = {Biometrika},
   Volume = {84},
   Number = {1},
   Pages = {125-141},
   Year = {1997},
   ISSN = {0006-3444},
   Abstract = {We propose a minimum chi-squared estimator for the
             parameters of an ergodic system of stochastic differential
             equations with partially observed state. We prove that the
             efficiency of the estimator approaches that of maximum
             likelihood as the number of moment functions entering the
             chi-squared criterion increases and as the number of past
             observations entering each moment function increases. The
             minimised criterion is asymptotically chi-squared and can be
             used to test system adequacy. When a fitted system is
             rejected, inspecting studentised moments suggests how the
             fitted system might be modified to improve the fit. The
             method and diagnostic tests are applied to daily
             observations on the U.S. dollar to Deutschmark exchange rate
             from 1977 to 1992.},
   Key = {fds266667}
}

@article{fds266668,
   Author = {AR Gallant and D Hsiehb and G Tauchen},
   Title = {Estimation of stochastic volatility models with
             diagnostics},
   Journal = {Journal of Econometrics},
   Volume = {81},
   Number = {1},
   Pages = {159-192},
   Year = {1997},
   url = {http://hdl.handle.net/10161/2057 Duke open
             access},
   Abstract = {Efficient method of moments (EMM) is used to fit the
             standard stochastic volatility model and various extensions
             to several daily financial time series. EMM matches to the
             score of a model determined by data analysis called the
             score generator. Discrepancies reveal characteristics of
             data that stochastic volatility models cannot approximate.
             The two score generators employed here are 'semiparametric
             ARCH' and 'nonlinear nonparametric'. With the first, the
             standard model is rejected, although some extensions are
             accepted. With the second, all versions are rejected. The
             extensions required for an adequate fit are so elaborate
             that nonparametric specifications are probably more
             convenient. © 1997 Elsevier Science S.A.},
   Key = {fds266668}
}

@article{fds266669,
   Author = {AR Gallant and G Tauchen},
   Title = {Reprojecting partially observed systems with application to
             interest rate diffusions},
   Journal = {Journal of the American Statistical Association},
   Volume = {93},
   Number = {441},
   Pages = {10-24},
   Year = {1998},
   Abstract = {We introduce reprojection as a general purpose technique for
             characterizing the dynamic response of a partially observed
             nonlinear system to its observable history. Reprojection is
             the third step of a procedure wherein first data are
             summarized by projection onto a Hermite series
             representation of the unconstrained transition density for
             observables; second, system parameters are estimated by
             minimum chi-squared, where the chi-squared criterion is a
             quadratic form in the expected score of the projection; and
             third, the constraints on dynamics implied by the nonlinear
             system are imposed by projecting a long simulation of the
             estimated system onto a Hermite series representation of the
             constrained transition density for observables, The
             constrained transition density can be used to study the
             response of the system to its observable history. We utilize
             the technique to assess the dynamics of several diffusion
             models for the short-term interest rate that have been
             proposed and to compare them to a new model that has
             feedback from the interest rate into both the drift and
             diffusion coefficients of a volatility equation.},
   Key = {fds266669}
}

@article{fds266670,
   Author = {SP Ellner and BA Bailey and GV Bobashev and AR Gallant and BT Grenfell and DW Nychka},
   Title = {Noise and nonlinearity in measles epidemics: Combining
             mechanistic and statistical approaches to population
             modeling},
   Journal = {American Naturalist},
   Volume = {151},
   Number = {5},
   Pages = {425-440},
   Year = {1998},
   ISSN = {0003-0147},
   url = {http://dx.doi.org/10.1086/286130},
   Abstract = {We present and evaluate an approach to analyzing population
             dynamics data using semimechanistic models. These models
             incorporate reliable information on population structure and
             underlying dynamic mechanisms but use nonparametric
             surface-fitting methods to avoid unsupported assumptions
             about the precise form of rate equations. Using historical
             data on measles epidemics as a case study, we show how this
             approach can lead to better forecasts, better
             characterizations of the dynamics, and a better
             understanding of the factors causing complex population
             dynamics relative to either mechanistic models or purely
             descriptive statistical time-series models. The
             semimechanistic models are found to have better forecasting
             accuracy than either of the model types used in previous
             analyses when tested on data not used to fit the models. The
             dynamics are characterized as being both nonlinear and
             noisy, and the global dynamics are clustered very tightly
             near the border of stability (dominant Lyapunov exponent λ
             ≃ 0). However, locally in state space the dynamics
             oscillate between strong short-term stability and strong
             short-term chaos (i.e., between negative and positive local
             Lyapunov exponents). There is statistically significant
             evidence for short-term chaos in all data sets examined.
             Thus the nonlinearity in these systems is characterized by
             the variance over state space in local measures of chaos
             versus stability rather than a single summary measure of the
             over-all dynamics as either chaotic or nonchaotic.},
   Doi = {10.1086/286130},
   Key = {fds266670}
}

@article{fds266671,
   Author = {WA Barnett and AR Gallant and MJ Hinich and JA Jungeilges and DT Kaplan and MJ Jensen},
   Title = {A single-blind controlled competition among tests for
             nonlinearity and chaos},
   Journal = {Journal of Econometrics},
   Volume = {82},
   Number = {1},
   Pages = {157-192},
   Year = {1998},
   ISSN = {0304-4076},
   Abstract = {Interest has been growing in testing for nonlinearity or
             chaos in economic data, but much controversy has arisen
             about the available results. This paper explores the reasons
             for these empirical difficulties. We designed and ran a
             single-blind controlled competition among five highly
             regarded tests for nonlinearity or chaos with ten simulated
             data series. The data generating mechanisms include linear
             processes, chaotic recursions, and non-chaotic stochastic
             processes; and both large and small samples were included in
             the experiment. The data series were produced in a single
             blind manner by the competition manager and sent by e-mail,
             without identifying information, to the experiment
             participants. Each such participant is an acknowledged
             expert in one of the tests and has a possible vested
             interest in producing the best possible results with that
             one test. The results of this competition provide much
             surprising information about the power functions of some of
             the best regarded tests for nonlinearity or noisy chaos. ©
             1997 Elsevier Science S.A.},
   Key = {fds266671}
}

@article{fds266672,
   Author = {AR Gallant and CT Hsu and G Tauchen},
   Title = {Using daily range data to calibrate volatility diffusions
             and extract the forward integrated variance},
   Journal = {Review of Economics and Statistics},
   Volume = {81},
   Number = {4},
   Pages = {617-631},
   Year = {1999},
   url = {http://hdl.handle.net/10161/1999 Duke open
             access},
   Abstract = {A common model for security price dynamics is the
             continuous-time stochastic volatility model. For this model,
             Hull and White (1987) show that the price of a derivative
             claim is the conditional expectation of the Black-Scholes
             price with the forward integrated variance replacing the
             Black-Scholes variance. Implementing the Hull and White
             characterization requires both estimates of the price
             dynamics and the conditional distribution of the forward
             integrated variance given observed variables. Using daily
             data on close-to-close price movement and the daily range,
             we find that standard models do not fit the data very well
             and that a more general three-factor model does better, as
             it mimics the long-memory feature of financial volatility.
             We develop techniques for estimating the conditional
             distribution of the forward integrated variance given
             observed variables.},
   Key = {fds266672}
}

@article{fds266673,
   Author = {AR Gallant and G Tauchen},
   Title = {The relative efficiency of method of moments
             estimators},
   Journal = {Journal of Econometrics},
   Volume = {92},
   Number = {1},
   Pages = {149-172},
   Year = {1999},
   url = {http://hdl.handle.net/10161/1900 Duke open
             access},
   Abstract = {The asymptotic relative efficiency of efficient method of
             moments when implemented with a seminonparametric auxiliary
             model is compared to that of conventional method of moments
             when implemented with polynomial moment functions. Because
             the expectations required by these estimators can be
             computed by simulation, these two methods are commonly used
             to estimate the parameters of nonlinear latent variables
             models. The comparison is for the models in the Marron-Wand
             test suite, a scale mixture of normals, and the second
             largest order statistic of the lognormal distribution. The
             latter models are representative of financial market data
             and auction data, respectively, which are the two most
             common applications of simulation estimators. Efficient
             method of moments dominates conventional method of moments
             over these models. © 1999 Elsevier Science S.A. All rights
             reserved.},
   Key = {fds266673}
}

@article{fds266674,
   Author = {AR Fleissig and AR Gallant and JJ Seater},
   Title = {Separability, aggregation, and euler equation
             estimation},
   Journal = {Macroeconomic Dynamics},
   Volume = {4},
   Number = {4},
   Pages = {547-572},
   Year = {2000},
   Abstract = {We derive a seminonparametric utility function containing
             the constant relative risk aversion (CRRA) function as a
             special case, and we estimate the associated Euler equations
             with U.S. consumption data. There is strong evidence that
             the CRRA function is misspecified. The correctly specified
             function includes lagged effects of durable goods and
             perhaps nondurable goods, is bounded as required by Arrow's
             Utility Boundedness Theorem, and has a positive rate of time
             preference. Constraining sample periods and separability
             structure to be consistent with the generalized axiom of
             revealed preference affects estimation results
             substantially. Using Divisia aggregates instead of the NIPA
             aggregates also affects results.},
   Key = {fds266674}
}

@article{fds266675,
   Author = {B Eraker and GB Durham and AR Gallant},
   Title = {Comment [4] (multiple letters)},
   Journal = {Journal of Business and Economic Statistics},
   Volume = {20},
   Number = {3},
   Pages = {327-329+335+337},
   Year = {2002},
   Key = {fds266675}
}

@article{fds266676,
   Author = {Y Aït-Sahalia and GB Durham and AR Gallant},
   Title = {Comment [1] (multiple letters)},
   Journal = {Journal of Business and Economic Statistics},
   Volume = {20},
   Number = {3},
   Pages = {317-321+335},
   Year = {2002},
   Key = {fds266676}
}

@article{fds266677,
   Author = {H Zhou and GB Durham and AR Gallant},
   Title = {Comment [7] (multiple letters)},
   Journal = {Journal of Business and Economic Statistics},
   Volume = {20},
   Number = {3},
   Pages = {332-335+338},
   Year = {2002},
   Key = {fds266677}
}

@article{fds266678,
   Author = {G Tauchen and GB Durham and AR Gallant},
   Title = {Comment [6] (multiple letters)},
   Journal = {Journal of Business and Economic Statistics},
   Volume = {20},
   Number = {3},
   Pages = {331-332+335+337},
   Year = {2002},
   Key = {fds266678}
}

@article{fds266679,
   Author = {GB Durham and AR Gallant},
   Title = {Numerical techniques for maximum likelihood estimation of
             continuous-time diffusion processes},
   Journal = {Journal of Business and Economic Statistics},
   Volume = {20},
   Number = {3},
   Pages = {297-316},
   Year = {2002},
   url = {http://dx.doi.org/10.1198/073500102288618397},
   Abstract = {Stochastic differential equations often provide a convenient
             way to describe the dynamics of economic and financial data,
             and a great deal of effort has been expended searching for
             efficient ways to estimate models based on them. Maximum
             likelihood is typically the estimator of choice; however,
             since the transition density is generally unknown, one is
             forced to approximate it. The simulation-based approach
             suggested by Pedersen (1995) has great theoretical appeal,
             hut previously available implementations have been
             computationally costly. We examine a variety of numerical
             techniques designed to improve the performance of this
             approach. Synthetic data generated by a Cox-Ingersoll-Ross
             model with parameters calibrated to match monthly
             observations of the U.S. short-term interest rate are used
             as a test case. Since the likelihood function of this
             process is known, the quality of the approximations can be
             easily evaluated. On datasets with 1,000 observations, we
             are able to approximate the maximum likelihood estimator
             with negligible error in well under 1 min. This represents
             something on the order of a 10,000-fold reduction in
             computational effort as compared to implementations without
             these enhancements. With other parameter settings designed
             to stress the methodology, performance remains strong. These
             ideas are easily generalized to multivariate settings and
             (with some additional work) to latent variable models. To
             illustrate, we estimate a simple stochastic volatility model
             of the U.S. short-term interest rate.},
   Doi = {10.1198/073500102288618397},
   Key = {fds266679}
}

@article{fds266680,
   Author = {S Chib and N Shephard and GB Durham and AR Gallant},
   Title = {Comment [3] (multiple letters)},
   Journal = {Journal of Business and Economic Statistics},
   Volume = {20},
   Number = {3},
   Pages = {325-327+335},
   Year = {2002},
   Key = {fds266680}
}

@article{fds266681,
   Author = {P Glynn and GB Durham and AR Gallant},
   Title = {Comment [5] (multiple letters)},
   Journal = {Journal of Business and Economic Statistics},
   Volume = {20},
   Number = {3},
   Pages = {330-331+335+337},
   Year = {2002},
   Key = {fds266681}
}

@article{fds266682,
   Author = {DH Ahn and RF Dittmar and AR Gallant},
   Title = {Quadratic Term Structure Models: Theory and
             Evidence},
   Journal = {Review of Financial Studies},
   Volume = {15},
   Number = {1},
   Pages = {243-288},
   Year = {2002},
   ISSN = {0893-9454},
   Abstract = {This article theoretically explores the characteristics
             underpinning quadratic term structure models (QTSMs), which
             designate the yield on a bond as a quadratic function of
             underlying state variables. We develop a comprehensive QTSM,
             which is maximally flexible and thus encompasses the
             features of several diverse models including the double
             square-root model of Longstaff (1989), the univariate
             quadratic model of Beaglehole and Tenney (1992), and the
             squared-autoregressive-independent-variable nominal term
             structure (SAINTS) model of Constantinides (1992). We
             document a complete classification of admissibility and
             empirical identification for the QTSM, and demonstrate that
             the QTSM can overcome limitations inherent in affine term
             structure models (ATSMs). Using the efficient method of
             moments of Gallant and Tauchen (1996), we test the empirical
             performance of the model in determining bond prices and
             compare the performance to the ATSMs. The results of the
             goodness-of-fit tests suggest that the QTSMs outperform the
             ATSMs in explaining historical bond price behavior in the
             United States.},
   Key = {fds266682}
}

@article{fds266683,
   Author = {M Coppejans and AR Gallant},
   Title = {Cross-validated SNP density estimates},
   Journal = {Journal of Econometrics},
   Volume = {110},
   Number = {1},
   Pages = {27-65},
   Year = {2002},
   url = {http://dx.doi.org/10.1016/S0304-4076(02)00121-5},
   Abstract = {We consider cross-validation strategies for the
             seminonparametric (SNP) density estimator, which is a
             truncation (or sieve) estimator based upon a Hermite series
             expansion with coefficients determined by quasi-maximum
             likelihood. Our main focus is on the use of SNP density
             estimators as an adjunct to efficient method of moments
             (EMM) structural estimation. It is known that for this
             purpose a desirable truncation point occurs at the last
             point at which the integrated squared error (ISE) curve of
             the SNP density estimate declines abruptly. We study the
             determination of the ISE curve for iid data by means of
             leave-one-out cross-validation and hold-out-sample
             cross-validation through an examination of their performance
             over the Marron-Wand test suite and models related to asset
             pricing and auction applications. We find that both methods
             are informative as to the location of abrupt drops, but that
             neither can reliably determine the minimum of the ISE curve.
             We validate these findings with a Monte Carlo study. The
             hold-out-sample method is cheaper to compute because it
             requires fewer nonlinear optimizations. We consider the
             asymptotic justification of hold-out-sample
             cross-validation. For this purpose, we establish rates of
             convergence of the SNP estimator under the Hellinger norm
             that are of interest in their own right. © 2002 Elsevier
             Science B.V. All rights reserved.},
   Doi = {10.1016/S0304-4076(02)00121-5},
   Key = {fds266683}
}

@article{fds266691,
   Author = {MW Brandt and P Santa-Clara and GB Durhama and AR
             Gallant},
   Title = {Comment [2] (multiple letters)},
   Journal = {Journal of Business and Economic Statistics},
   Volume = {20},
   Number = {3},
   Pages = {321-324+335},
   Year = {2002},
   Key = {fds266691}
}

@article{fds266684,
   Author = {DH Ahn and RF Dittmar and AR Gallant and B Gao},
   Title = {Purebred or hybrid?: Reproducing the volatility in term
             structure dynamics},
   Journal = {Journal of Econometrics},
   Volume = {116},
   Number = {1-2},
   Pages = {147-180},
   Year = {2003},
   url = {http://dx.doi.org/10.1016/S0304-4076(03)00106-4},
   Abstract = {This paper investigates the ability of mixtures of affine,
             quadratic, and non-linear models to track the volatility in
             the term structure of interest rates. Term structure
             dynamics appear to exhibit pronounced time varying or
             stochastic volatility. Ahn et al. (Rev. Financial Stud. xx
             (2001) xxx) provide evidence suggesting that term structure
             models incorporating a set of quadratic factors are better
             able to reproduce term structure dynamics than affine
             models, although neither class of models is able to fully
             capture term structure volatility. In this study, we combine
             affine, quadratic and non-linear factors in order to
             maximize the ability of a term structure model to generate
             heteroskedastic volatility. We show that this combination
             entails a tradeoff between specification of heteroskedastic
             volatility and correlations among the factors. By combining
             factors, we are able to gauge the cost of this tradeoff.
             Using efficient method of moments (Gallant and Tauchen,
             Econometric Theory 12 (1996) 657), we find that augmenting a
             quadratic model with a non-linear factor results in
             improvement in fit over a model comprised solely of
             quadratic factors when the model only has to confront first
             and second moment dynamics. When the full dynamics are
             confronted, this result reverses. Since the non-linear
             factor is characterized by stronger dependence of volatility
             on the level of the factor, we conclude that flexibility in
             the specification of both level dependence and correlation
             structure of the factors are important for describing term
             structure dynamics. © 2003 Elsevier B.V. All rights
             reserved.},
   Doi = {10.1016/S0304-4076(03)00106-4},
   Key = {fds266684}
}

@article{fds266685,
   Author = {M Chernov and AR Gallant and E Ghysels and G Tauchen},
   Title = {Alternative models for stock price dynamics},
   Journal = {Journal of Econometrics},
   Volume = {116},
   Number = {1-2},
   Pages = {225-257},
   Year = {2003},
   url = {http://hdl.handle.net/10161/1892 Duke open
             access},
   Abstract = {This paper evaluates the role of various volatility
             specifications, such as multiple stochastic volatility (SV)
             factors and jump components, in appropriate modeling of
             equity return distributions. We use estimation technology
             that facilitates nonnested model comparisons and use a long
             data set which provides rich information about the
             conditional and unconditional distribution of returns. We
             consider two broad families of models: (1) the multifactor
             loglinear family, and (2) the affine-jump family. Both
             classes of models have attracted much attention in the
             derivatives and econometrics literatures. There are various
             tradeoffs in considering such diverse specifications. If
             pure diffusion SV models are chosen over jump diffusions, it
             has important implications for hedging strategies. If
             logarithmic models are chosen over affine ones, it may
             seriously complicate option pricing. Comparing many
             different specifications of pure diffusion multifactor
             models and jump diffusion models, we find that (1) log
             linear models have to be extended to two factors with
             feedback in the mean reverting factor, (2) affine models
             have to have a jump in returns, stochastic volatility or
             probably both. Models (1) and (2) are observationally
             equivalent on the data set in hand. In either (1) or (2) the
             key is that the volatility can move violently. As we obtain
             models with comparable empirical fit, one must make a choice
             based on arguments other than statistical goodness-of-fit
             criteria. The considerations include facility to price
             options, to hedge and parsimony. The affine specification
             with jumps in volatility might therefore be preferred
             because of the closed-form derivatives prices. © 2003
             Elsevier B.V. All rights reserved.},
   Doi = {10.1016/S0304-4076(03)00108-8},
   Key = {fds266685}
}

@article{fds266686,
   Author = {LJ Christiano and AR Gallant and CA Sims and J Faust and L Kilian, MD
             Negro and F Schorfheide and F Smets and R Wouters},
   Title = {Comment},
   Journal = {Journal of Business and Economic Statistics},
   Volume = {25},
   Number = {2},
   Pages = {143-162},
   Year = {2007},
   ISSN = {0735-0015},
   url = {http://dx.doi.org/10.1198/073500107000000061},
   Doi = {10.1198/073500107000000061},
   Key = {fds266686}
}

@article{fds266687,
   Author = {AR Gallant and H Hong},
   Title = {A statistical inquiry into the plausibility of recursive
             utility},
   Journal = {Journal of Financial Econometrics},
   Volume = {5},
   Number = {4},
   Pages = {523-559},
   Year = {2007},
   ISSN = {1479-8409},
   url = {http://dx.doi.org/10.1093/jjfinec/nbm013},
   Abstract = {We use purely statistical methods to determine if the
             pricing kernel is the intertemporal marginal rate of
             substitution under recursive utility. We introduce a
             nonparametric Bayesian method that treats the pricing kernel
             as a latent variable and extracts it and its transition
             density from payoffs on 24 Fama-French portfolios, on bonds,
             and on payoffs that use conditioning information available
             when portfolios are formed. Our priors are formed from an
             examination of a Bansal-Yaron economy. Using both monthly
             data and annual data, we find that the data support
             recursive utility. © The Author 2007. Published by Oxford
             University Press.},
   Doi = {10.1093/jjfinec/nbm013},
   Key = {fds266687}
}

@article{fds266693,
   Author = {R Bansal and AR Gallant and G Tauchen},
   Title = {Rational pessimism, rational exuberance, and asset pricing
             models},
   Journal = {Review of Economic Studies},
   Volume = {74},
   Number = {4},
   Pages = {1005-1033},
   Year = {2007},
   ISSN = {0034-6527},
   url = {http://dx.doi.org/10.1111/j.1467-937X.2007.00454.x},
   Abstract = {The paper estimates and examines the empirical plausibility
             of asset pricing models that attempt to explain features of
             financial markets such as the size of the equity premium and
             the volatility of the stock market. In one model, the
             long-run risks (LRR) model of Bansal and Yaron,
             low-frequency movements, and time-varying uncertainty in
             aggregate consumption growth are the key channels for
             understanding asset prices. In another, as typified by
             Campbell and Cochrane, habit formation, which generates
             time-varying risk aversion and consequently time variation
             in risk premia, is the key channel. These models are fitted
             to data using simulation estimators. Both models are found
             to fit the data equally well at conventional significance
             levels, and they can track quite closely a new measure of
             realized annual volatility. Further, scrutiny using a rich
             array of diagnostics suggests that the LRR model is
             preferred. © 2007 The Review of Economic Studies
             Limited.},
   Doi = {10.1111/j.1467-937X.2007.00454.x},
   Key = {fds266693}
}

@article{fds266689,
   Author = {ARM Cheng and AR Gallant and C Ji and BS Lee},
   Title = {A Gaussian approximation scheme for computation of option
             prices in stochastic volatility models},
   Journal = {Journal of Econometrics},
   Volume = {146},
   Number = {1},
   Pages = {44-58},
   Year = {2008},
   ISSN = {0304-4076},
   url = {http://dx.doi.org/10.1016/j.jeconom.2008.07.002},
   Abstract = {We consider European options on a price process that follows
             the log-linear stochastic volatility model. Two stochastic
             integrals in the option pricing formula are costly to
             compute. We derive a central limit theorem to approximate
             them. At parameter settings appropriate to foreign exchange
             data our formulas improve computation speed by a factor of
             1000 over brute force Monte Carlo making MCMC statistical
             methods practicable. We provide estimates of model
             parameters from daily data on the Swiss Franc to Euro and
             Japanese Yen to Euro over the period 1999-2002. © 2008
             Elsevier B.V. All rights reserved.},
   Doi = {10.1016/j.jeconom.2008.07.002},
   Key = {fds266689}
}

@article{fds266688,
   Author = {AR Gallant and RE Mcculloch},
   Title = {On the determination of general scientific models with
             application to asset pricing},
   Journal = {Journal of the American Statistical Association},
   Volume = {104},
   Number = {485},
   Pages = {117-131},
   Year = {2009},
   ISSN = {0162-1459},
   url = {http://dx.doi.org/10.1198/jasa.2009.0008},
   Abstract = {We consider a consumption-based asset pricing model that
             uses habit persistence to overcome the known statistical
             inadequacies of the classical consumption-based asset
             pricing model. We find that the habit model fits reasonably
             well and agrees with results reported in the literature if
             conditional heteroskedasticity is suppressed but that it
             does not fit nor do results agree if conditional
             heteroskedasticity, well known to be present in financial
             market data, is allowed to manifest itself.We also find that
             it is the preference parameters of the model that are most
             affected by the presence or absence of conditional
             heteroskedasticity, especially the risk aversion parameter.
             The habit model exhibits four characteristics that are often
             present in models developed from scientific considerations:
             (1) a likelihood is not available; (2) prior information is
             available; (3) a portion of the prior information is
             expressed in terms of functionals of the model that cannot
             be converted into an analytic prior on model parameters; (4)
             the model can be simulated. The underpinning of our approach
             is that, in addition, (5) a parametric statistical model for
             the data, determined without reference to the scientific
             model, is known. In general one can expect to be able to
             determine a model that satisfies (5) because very richly
             parameterized statistical models are easily accommodated. We
             develop a computationally intensive, generally applicable,
             Bayesian strategy for estimation and inference for
             scientific models that meet this description together with
             methods for assessing model adequacy. An important adjunct
             to the method is that a map from the parameters of the
             scientific model to functionals of the scientific and
             statistical models becomes available. This map is a powerful
             tool for understanding the properties of the scientific
             model. © 2009 American Statistical Association.},
   Doi = {10.1198/jasa.2009.0008},
   Key = {fds266688}
}

@article{fds266690,
   Author = {EM Aldrich and AR Gallant},
   Title = {Habit, long-run risks, prospect? A statistical
             inquiry},
   Journal = {Journal of Financial Econometrics},
   Volume = {9},
   Number = {4},
   Pages = {589-618},
   Year = {2011},
   ISSN = {1479-8409},
   url = {http://dx.doi.org/10.1093/jjfinec/nbq034},
   Abstract = {We use recently proposed Bayesian statistical methods to
             compare the habit persistence asset pricing model of
             Campbell and Cochrane, the long-run risks model of Bansal
             and Yaron, and the prospect theory model of Barberis, Huang,
             and Santos. We improve these Bayesian methods so that they
             can accommodate highly nonlinear models such as the three
             aforementioned. Our substantive results can be stated
             succinctly: If one believes that the extreme consumption
             fluctuations of 1930-1949 can recur, although they have not
             in the last sixty years even counting the current recession,
             then the long-run risks model is preferred. Otherwise, the
             habit model is preferred. © The Author 2011. Published by
             Oxford University Press. All rights reserved.},
   Doi = {10.1093/jjfinec/nbq034},
   Key = {fds266690}
}


Duke University * Arts & Sciences * Economics * Faculty * Research * Staff * Master's * Ph.D. * Reload * Login