Department of Mathematics
 Search | Help | Login | pdf version | printable version

Math @ Duke





.......................

.......................


Publications of Ingrid Daubechies    :recent first  alphabetical  combined listing:

%% Papers Published   
@article{fds287111,
   Author = {Daubechies, I},
   Title = {An application of hyperdifferential operators to holomorphic
             quantization},
   Journal = {Letters in Mathematical Physics},
   Volume = {2},
   Number = {6},
   Pages = {459-469},
   Publisher = {Springer Nature America, Inc},
   Year = {1978},
   Month = {November},
   ISSN = {0377-9017},
   url = {http://dx.doi.org/10.1007/BF00398498},
   Abstract = {We use a hyperdifferential operator approach to study
             holomorphic quantization. We explicitly construct the
             Hilbert space operator which corresponds to a given
             holomorphic function. We further construct the adjoint and
             products of such operators and we discuss some special cases
             of selfadjointness. © 1978 D. Reidel Publishing
             Company.},
   Doi = {10.1007/BF00398498},
   Key = {fds287111}
}

@article{fds287110,
   Author = {Aerts, D and Daubechies, I},
   Title = {A characterization of subsystems in physics},
   Journal = {Letters in Mathematical Physics},
   Volume = {3},
   Number = {1},
   Pages = {11-17},
   Publisher = {Springer Nature},
   Year = {1979},
   Month = {January},
   ISSN = {0377-9017},
   url = {http://dx.doi.org/10.1007/BF00959533},
   Abstract = {Working within the framework of the propositional system
             formalism, we use a previous study [1] of the description of
             two independent physical systems as one big physical system
             to derive a characterization of a (non-interacting) physical
             subsystem. We discuss the classical case and the quantum
             case. © 1979 D. Reidel Publishing Company.},
   Doi = {10.1007/BF00959533},
   Key = {fds287110}
}

@article{fds287112,
   Author = {Aerts, D and Daubechies, I},
   Title = {A mathematical condition for a sublattice of a propositional
             system to represent a physical subsystem, with a physical
             interpretation},
   Journal = {Letters in Mathematical Physics},
   Volume = {3},
   Number = {1},
   Pages = {19-27},
   Publisher = {Springer Nature},
   Year = {1979},
   Month = {January},
   ISSN = {0377-9017},
   url = {http://dx.doi.org/10.1007/BF00959534},
   Abstract = {We display three equivalent conditions for a sublattice,
             isomorphic to a P {Mathematical expression}, of the
             propositional system P(ℋ) of a quantum system to be the
             representation of a physical subsystem (see [1]). These
             conditions are valid for dim {Mathematical expression}≥3.
             We prove that one of them is still necessary and sufficient
             if dim {Mathematical expression}<3. A physical
             interpretation of this condition is given. © 1979 D. Reidel
             Publishing Company.},
   Doi = {10.1007/BF00959534},
   Key = {fds287112}
}

@article{fds287113,
   Author = {Daubechies, I and Grossmann, A},
   Title = {An integral transform related to quantization},
   Journal = {Journal of Mathematical Physics},
   Volume = {21},
   Number = {8},
   Pages = {2080-2090},
   Publisher = {AIP Publishing},
   Year = {1979},
   Month = {January},
   ISSN = {0022-2488},
   url = {http://dx.doi.org/10.1063/1.524702},
   Abstract = {We study in some detail the correspondence between a
             function f on phase space and the matrix elements (Qf)(a, b)
             of its quantized Q f between the coherent states |a< and
             |b<. It is an integral transform: Qf(a, b) = ∫{a, b |v}
             f(v) dv which resembles in many ways the integral transform
             of Bargmann. We obtain the matrix elements of Q f between
             harmonic oscillator states as the Fourier coefficients of f
             with respect to an explicit orthonormal system. © 1980
             American Institute of Physics.},
   Doi = {10.1063/1.524702},
   Key = {fds287113}
}

@article{fds287114,
   Author = {Daubechies, I},
   Title = {On the distributions corresponding to bounded operators in
             the Weyl quantization},
   Journal = {Communications in Mathematical Physics},
   Volume = {75},
   Number = {3},
   Pages = {229-238},
   Publisher = {Springer Nature},
   Year = {1980},
   Month = {October},
   ISSN = {0010-3616},
   url = {http://dx.doi.org/10.1007/BF01212710},
   Abstract = {Using properties of an integral transform giving directly
             the matrix elements of a quantum mechanical operator from
             the corresponding classical function, we restrict the class
             of distributions corresponding to bounded operators. As a
             consequence, we can exhibit a class of functions yielding
             trace-class operators, and give a bound on their trace-norm.
             © 1980 Springer-Verlag.},
   Doi = {10.1007/BF01212710},
   Key = {fds287114}
}

@article{fds287115,
   Author = {Daubechies, I and Klauder, JR},
   Title = {Constructing measures for path integrals},
   Journal = {Journal of Mathematical Physics},
   Volume = {23},
   Number = {10},
   Pages = {1806-1822},
   Publisher = {AIP Publishing},
   Year = {1981},
   Month = {January},
   ISSN = {0022-2488},
   url = {http://dx.doi.org/10.1063/1.525234},
   Abstract = {The overcompleteness of the coherent states for the
             Heisenberg-Weyl group implies that many different integral
             kernels can be used to represent the same operator. Within
             such an equivalence class we construct an integral kernel to
             represent the quantum-mechanical evolution operator for
             certain dynamical systems in the form of a path integral
             that involves genuine (Wiener) measures on continuous
             phase-space paths. To achieve this goal it is necessary to
             employ an expression for the classical action different from
             the usual one. © 1982 American Institute of
             Physics.},
   Doi = {10.1063/1.525234},
   Key = {fds287115}
}

@article{fds287116,
   Author = {Daubechies, I},
   Title = {Continuity statements and counterintuitive examples in
             connection with Weyl quantization},
   Journal = {Journal of Mathematical Physics},
   Volume = {24},
   Number = {6},
   Pages = {1453-1461},
   Publisher = {AIP Publishing},
   Year = {1982},
   Month = {January},
   ISSN = {0022-2488},
   url = {http://dx.doi.org/10.1063/1.525882},
   Abstract = {We use the properties of an integral transform relating a
             classical function f with the matrix elements between
             coherent states of its quantal counterpart Q f, to derive
             continuity properties of the Weyl transform from classes of
             distributions to classes of quadratic forms. We also give
             examples of pathological behavior of the Weyl transform with
             respect to other topologies (e.g., bounded functions leading
             to unbounded operators). © 1983 American Institute of
             Physics.},
   Doi = {10.1063/1.525882},
   Key = {fds287116}
}

@article{fds287117,
   Author = {Daubechies, I and Grossmann, A and Reignier, J},
   Title = {An integral transform related to quantization. II. Some
             mathematical properties},
   Journal = {Journal of Mathematical Physics},
   Volume = {24},
   Number = {2},
   Pages = {239-254},
   Publisher = {AIP Publishing},
   Year = {1982},
   Month = {January},
   ISSN = {0022-2488},
   url = {http://dx.doi.org/10.1063/1.525699},
   Abstract = {We study in more detail the mathematical properties of the
             integral transform relating the matrix elements between
             coherent states of a quantum operator to the corresponding
             classical function. Explicit families of Hilbert spaces are
             constructed between which the integral transform is an
             isomorphism. © 1983 American Institute of
             Physics.},
   Doi = {10.1063/1.525699},
   Key = {fds287117}
}

@article{fds287118,
   Author = {Klauder, JR and Daubechies, I},
   Title = {Measures for path integrals},
   Journal = {Physical Review Letters},
   Volume = {48},
   Number = {3},
   Pages = {117-120},
   Year = {1982},
   Month = {January},
   ISSN = {0031-9007},
   url = {http://dx.doi.org/10.1103/PhysRevLett.48.117},
   Abstract = {By exploitation of the overcompleteness of coherent states
             expressions are presented for path integrals in terms of
             genuine (Wiener) path-space measures for driven harmonic
             oscillators which when projected onto the subspace spanned
             by coherent-state matrix elements yield the appropriate
             quantum mechanical propagator. © 1982 The American Physical
             Society.},
   Doi = {10.1103/PhysRevLett.48.117},
   Key = {fds287118}
}

@article{fds287121,
   Author = {Daubechies, I and Klauder, JR},
   Title = {Measures for more quadratic path integrals},
   Journal = {Letters in Mathematical Physics},
   Volume = {7},
   Number = {3},
   Pages = {229-234},
   Publisher = {Springer Nature America, Inc},
   Year = {1983},
   Month = {May},
   ISSN = {0377-9017},
   url = {http://dx.doi.org/10.1007/BF00400438},
   Abstract = {We show that the coherent state matrix elements of the
             quantum mechanical propagator for all quadratic Hamiltonians
             may be represented as the limit of path integrals with
             respect to appropriately modified Wiener measures as the
             associated diffusion constant tends to infinity. © 1983 D.
             Reidel Publishing Company.},
   Doi = {10.1007/BF00400438},
   Key = {fds287121}
}

@article{fds287119,
   Author = {Daubechies, I},
   Title = {An uncertainty principle for fermions with generalized
             kinetic energy},
   Journal = {Communications in Mathematical Physics},
   Volume = {90},
   Number = {4},
   Pages = {511-520},
   Publisher = {Springer Nature},
   Year = {1983},
   Month = {December},
   ISSN = {0010-3616},
   url = {http://dx.doi.org/10.1007/BF01216182},
   Abstract = {We derive semiclassical upper bounds for the number of bound
             states and the sum of negative eigenvalues of the
             one-particle Hamiltonians h=f(-i∇)+V(x) acting on
             L2(ℝn). These bounds are then used to derive a lower bound
             on the kinetic energy {Mathematical expression} for an
             N-fermion wavefunction ψ. We discuss two examples in more
             detail:f(p)=|p| and f(p)=(p2+m2)1/2-m, both in three
             dimensions. © 1983 Springer-Verlag.},
   Doi = {10.1007/BF01216182},
   Key = {fds287119}
}

@article{fds287120,
   Author = {Daubechies, I and Lieb, EH},
   Title = {One-electron relativistic molecules with Coulomb
             interaction},
   Journal = {Communications in Mathematical Physics},
   Volume = {90},
   Number = {4},
   Pages = {497-510},
   Publisher = {Springer Nature},
   Year = {1983},
   Month = {December},
   ISSN = {0010-3616},
   url = {http://dx.doi.org/10.1007/BF01216181},
   Abstract = {As an approximation to a relativistic one-electron molecule,
             we study the operator {Mathematical expression} with Zj≧0,
             e-2=137.04. H is bounded below if and only if e2Zj≦2/π
             all j. Assuming this condition, the system is unstable when
             e2∑Zj>2/π in the sense that E0=inf spec(H)→-∞ as the
             Rj→0, all j. We prove that the nuclear Coulomb repulsion
             more than restores stability; namely {Mathematical
             expression}. We also show that E0 is an increasing function
             of the internuclear distances |Ri-Rj|. © 1983
             Springer-Verlag.},
   Doi = {10.1007/BF01216181},
   Key = {fds287120}
}

@article{fds287122,
   Author = {Klauder, JR and Daubechies, I},
   Title = {Quantum Mechanical Path Integrals with Wiener Measures for
             all Polynomial Hamiltonians},
   Journal = {Physical Review Letters},
   Volume = {52},
   Number = {14},
   Pages = {1161-1164},
   Publisher = {American Physical Society (APS)},
   Year = {1984},
   Month = {January},
   ISSN = {0031-9007},
   url = {http://dx.doi.org/10.1103/PhysRevLett.52.1161},
   Abstract = {We construct arbitrary matrix elements of the quantum
             evolution operator for a wide class of self-adjoint
             canonical Hamiltonians, including those which are polynomial
             in the Heisenberg operators, as the limit of well defined
             path integrals involving Wiener measure on phase space, as
             the diffusion constant diverges. A related construction
             achieves a similar result for an arbitrary spin Hamiltonian.
             © 1984 The American Physical Society.},
   Doi = {10.1103/PhysRevLett.52.1161},
   Key = {fds287122}
}

@article{fds287123,
   Author = {Daubechies, I and Lieb, EH},
   Title = {Relativistic Molecules with Coulomb Interaction},
   Journal = {North-Holland Mathematics Studies},
   Volume = {92},
   Number = {C},
   Pages = {143-148},
   Publisher = {Elsevier},
   Year = {1984},
   Month = {January},
   ISSN = {0304-0208},
   url = {http://dx.doi.org/10.1016/S0304-0208(08)73689-2},
   Abstract = {As an approximation to a relativistic one-electron molecule,
             we study the operator, with Zj≥ 0 for all j. H is bounded
             below iff for all j. Under this condition, we show that 1)
             The system is stable when the nuclear repulsion is taken
             into account, i.e., where E0 inf spec H.2) the ground state
             energy E0is an increasing function of the internuclear
             distances | Rj- Rk. © 1984, Elsevier Science & Technology.
             All rights reserved.},
   Doi = {10.1016/S0304-0208(08)73689-2},
   Key = {fds287123}
}

@article{fds287124,
   Author = {Daubechies, I},
   Title = {One electron molecules with relativistic kinetic energy:
             Properties of the discrete spectrum},
   Journal = {Communications in Mathematical Physics},
   Volume = {94},
   Number = {4},
   Pages = {523-535},
   Publisher = {Springer Nature},
   Year = {1984},
   Month = {December},
   ISSN = {0010-3616},
   url = {http://dx.doi.org/10.1007/BF01403885},
   Abstract = {We discuss the discrete spectrum of the operator
             {Mathematical expression}. More specifically, we study 1)
             the behaviour of the eigenvalues when the internuclear
             distances contract, 2) the existence of a c-independent
             lower bound for HK(c)-mc2, 3) the nonrelativistic limit of
             the eigenvalues of HK(c)-mc2. © 1984 Springer-Verlag.},
   Doi = {10.1007/BF01403885},
   Key = {fds287124}
}

@article{fds287125,
   Author = {Daubechies, I and Klauder, JR},
   Title = {Quantum-mechanical path integrals with Wiener measure for
             all polynomial Hamiltonians. II},
   Journal = {Journal of Mathematical Physics},
   Volume = {26},
   Number = {9},
   Pages = {2239-2256},
   Publisher = {AIP Publishing},
   Year = {1985},
   Month = {January},
   ISSN = {0022-2488},
   url = {http://dx.doi.org/10.1063/1.526803},
   Abstract = {The coherent-state representation of quantum-mechanical
             propagators as well-defined phase-space path integrals
             involving Wiener measure on continuous phase-space paths in
             the limit that the diffusion constant diverges is formulated
             and proved. This construction covers a wide class of
             self-adjoint Hamiltonians, including all those which are
             polynomials in the Heisenberg operators; in fact, this
             method also applies to maximal symmetric Hamiltonians that
             do not possess a self-adjoint extension. This construction
             also leads to a natural covariance of the path integral
             under canonical transformations. An entirely parallel
             discussion for spin variables leads to the representation of
             the propagator for an arbitrary spin-operator Hamiltonian as
             well-defined path integrals involving Wiener measure on the
             unit sphere, again in the limit that the diffusion constant
             diverges. © 1985 American Institute of Physics.},
   Doi = {10.1063/1.526803},
   Key = {fds287125}
}

@article{fds287126,
   Author = {Daubechies, I and Grossmann, A and Meyer, Y},
   Title = {Painless nonorthogonal expansions},
   Journal = {Journal of Mathematical Physics},
   Volume = {27},
   Number = {5},
   Pages = {1271-1283},
   Publisher = {AIP Publishing},
   Year = {1986},
   Month = {January},
   ISSN = {0022-2488},
   url = {http://dx.doi.org/10.1063/1.527388},
   Abstract = {In a Hilbert spaced ℋ, discrete families of vectors {hj}
             with the property that f = ∑j〈j|f〉h j for every f in
             ℋ are considered. This expansion formula is obviously true
             if the family is an orthonormal basis of ℋ, but also can
             hold in situations where the hj are not mutually orthogonal
             and are "overcomplete." The two classes of examples studied
             here are (i) appropriate sets of Weyl-Heisenberg coherent
             states, based on certain (non-Gaussian) fiducial vectors,
             and (ii) analogous families of affine coherent states. It is
             believed, that such "quasiorthogonal expansions" will be a
             useful tool in many areas of theoretical physics and applied
             mathematics. © 1966 American Institute of
             Physics.},
   Doi = {10.1063/1.527388},
   Key = {fds287126}
}

@article{fds287127,
   Author = {Daubechies, I and Klauder, JR and Paul, T},
   Title = {Wiener measures for path integrals with affine kinematic
             variables},
   Journal = {Journal of Mathematical Physics},
   Volume = {28},
   Number = {1},
   Pages = {85-102},
   Publisher = {AIP Publishing},
   Year = {1987},
   Month = {January},
   ISSN = {0022-2488},
   url = {http://dx.doi.org/10.1063/1.527812},
   Abstract = {The results obtained earlier have been generalized to show
             that the path integral for the affine coherent state matrix
             element of a unitary evolution operator exp(-iTH) can be
             written as a well-defined Wiener integral, involving Wiener
             measure on the Lobachevsky half-plane, in the limit that the
             diffusion constant diverges. This approach works for a wide
             class of Hamiltonians, including, e.g., -d2/dx2 + V(x) on
             L2(ℝ +), with V sufficiently singular at x = 0. © 1987
             American Institute of Physics.},
   Doi = {10.1063/1.527812},
   Key = {fds287127}
}

@article{fds287130,
   Author = {Daubechies, I},
   Title = {Time-Frequency Localization Operators: A Geometric Phase
             Space Approach},
   Journal = {IEEE Transactions on Information Theory},
   Volume = {34},
   Number = {4},
   Pages = {605-612},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {1988},
   Month = {January},
   url = {http://dx.doi.org/10.1109/18.9761},
   Abstract = {We define a set of operators which localize in both time and
             frequency. These operators are similar to but different from
             the low-pass time-limiting operators, the singular functions
             of which are the prolate spheroidal wave functions. Our
             construction differs from the usual approach in that we
             treat the time-frequency plane as one geometric whole (phase
             space) rather than as two separate spaces. For disk-shaped
             or ellipse-shaped domains in the time-frequency plane, the
             associated localization operators are remarkably simple.
             Their eigenfunctions are Hermite functions, and the
             corresponding eigenvalues are given by simple explicit
             formulas involving the incomplete gamma functions. © 1988
             IEEE},
   Doi = {10.1109/18.9761},
   Key = {fds287130}
}

@article{fds318289,
   Author = {Daubechies, I},
   Title = {Orthonormal bases of compactly supported
             wavelets},
   Journal = {Communications on Pure and Applied Mathematics},
   Volume = {41},
   Number = {7},
   Pages = {909-996},
   Publisher = {WILEY},
   Year = {1988},
   Month = {January},
   url = {http://dx.doi.org/10.1002/cpa.3160410705},
   Abstract = {We construct orthonormal bases of compactly supported
             wavelets, with arbitrarily high regularity. The order of
             regularity increases linearly with the support width. We
             start by reviewing the concept of multiresolution analysis
             as well as several algorithms in vision decomposition and
             reconstruction. The construction then follows from a
             synthesis of these different approaches. Copyright © 1988
             Wiley Periodicals, Inc., A Wiley Company},
   Doi = {10.1002/cpa.3160410705},
   Key = {fds318289}
}

@article{fds318290,
   Author = {Daubechies, I and Grossmann, A},
   Title = {Frames in the bargmann space of entire functions},
   Journal = {Communications on Pure and Applied Mathematics},
   Volume = {41},
   Number = {2},
   Pages = {151-164},
   Publisher = {WILEY},
   Year = {1988},
   Month = {January},
   url = {http://dx.doi.org/10.1002/cpa.3160410203},
   Abstract = {We look at the decomposition of arbitrary f in L2(R) in
             terms of the family of functions φmn(x) = π−1/4exp{ −
             1/2imnab + i max − 1/2(x − nb)2}, with a, b > 0. We
             derive bounds and explicit formulas for the minimal
             expansion coefficients in the case where ab = 2π/N, N an
             integer ≧ 2. Transported to the Hilbert space F of entire
             functions introduced by V. Bargmann, these results are
             expressed as inequalities of the form We conjecture that
             these inequalities remain true for all a, b such that ab <
             2π. Copyright © 1988 Wiley Periodicals, Inc., A Wiley
             Company},
   Doi = {10.1002/cpa.3160410203},
   Key = {fds318290}
}

@article{fds287128,
   Author = {Daubechies, I and Paul, T},
   Title = {Time-frequency localisation operators-a geometric phase
             space approach: II. The use of dilations},
   Journal = {Inverse Problems},
   Volume = {4},
   Number = {3},
   Pages = {661-680},
   Publisher = {IOP Publishing},
   Year = {1988},
   Month = {December},
   ISSN = {0266-5611},
   url = {http://dx.doi.org/10.1088/0266-5611/4/3/009},
   Abstract = {Operators which localise both in time and frequency are
             constructed. They restrict to a finite time interval and cut
             off low as well as high frequencies (band-pass filters).
             Explicit expressions for eigenvalues and eigenfunctions
             (Laguerre functions) are given.},
   Doi = {10.1088/0266-5611/4/3/009},
   Key = {fds287128}
}

@article{fds287129,
   Author = {Daubechies, I},
   Title = {Wavelet transform, time-frequency localization and signal
             analysis},
   Volume = {25 n 13},
   Pages = {42},
   Year = {1988},
   Month = {December},
   Abstract = {Summary form only given, as follows. Two different
             procedures are studied by which a frequency analysis of a
             time-dependent signal can be effected, locally in time. The
             first procedure is the short-time or windowed Fourier
             transform; the second is the wavelet transform, in which
             high-frequency components are studied with sharper time
             resolution than low-frequency components. The similarities
             and the differences between these two methods are discussed.
             For both schemes a detailed study is made of the
             reconstruction method and its stability, as a function of
             the chosen time-frequency density. Finally the notion of
             time-frequency localization is made precise, within this
             framework, by two localization theorems.},
   Key = {fds287129}
}

@article{fds287131,
   Author = {Daubechies, I},
   Title = {Wavelets: A tool for time-frequency analysis},
   Pages = {98},
   Year = {1989},
   Month = {December},
   Abstract = {Summary form only given. In the simplest case, a family
             wavelets is generated by dilating and translating a single
             function of one variable: ha,b(x) = |a|-1/2h (x-b/a). The
             parameters a and b may vary continuously, or be restricted
             to a discrete lattice of values a = a0m, b = na0mb0. If the
             dilation and translation steps a0 and b0 are not too large,
             then any L2-function can be completely characterized by its
             inner products with the elements of such a discrete lattice
             of wavelets. Moreover, one can construct numerically stable
             algorithms for the reconstruction of a function from these
             inner products (the wavelet coefficients). For special
             choices of the wavelet h decomposition and reconstruction
             can be done very fast, via a tree algorithm. The wavelet
             coefficients of a function give a time-frequency
             decomposition of the function, with higher time-resolution
             for high-frequency than for low-frequency components. The
             analysis can easily be extended to higher dimensions. An
             especially important case is orthonormal bases of wavelets.
             It turns out that there exist functions h, with very good
             regularity and decay properties, such that the discrete
             lattice with a0 = 2 and b = 1 leads to an orthonormal set of
             functions hmn that spans all of L2(R). Such orthonormal
             bases are always associated with efficient tree
             algorithms.},
   Key = {fds287131}
}

@article{fds287134,
   Author = {Daubechies, I},
   Title = {The Wavelet Transform, Time-Frequency Localization and
             Signal Analysis},
   Journal = {IEEE Transactions on Information Theory},
   Volume = {36},
   Number = {5},
   Pages = {961-1005},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {1990},
   Month = {January},
   url = {http://dx.doi.org/10.1109/18.57199},
   Abstract = {Two different procedures are studied by which a frequency
             analysis of a time-dependent signal can be effected, locally
             in time. The first procedure is the short-time or windowed
             Fourier transform, the second is the “wavelet
             transform,” in which high frequency components are studied
             with sharper time resolution than low frequency components.
             The similarities and the differences between these two
             methods are discussed. For both schemes a detailed study is
             made of the reconstruction method and its stability, as a
             function of the chosen time-frequency density. Finally the
             notion of “time-frequency localization” is made precise,
             within this framework, by two localization theorems. © 1990
             IEEE},
   Doi = {10.1109/18.57199},
   Key = {fds287134}
}

@article{fds287133,
   Author = {Antonini, M and Barlaud, M and Mathieu, P and Daubechies,
             I},
   Title = {Image coding using vector quantization in the wavelet
             transform domain},
   Journal = {ICASSP, IEEE International Conference on Acoustics, Speech
             and Signal Processing - Proceedings},
   Volume = {4},
   Pages = {2297-2300},
   Publisher = {IEEE},
   Year = {1990},
   Month = {December},
   url = {http://dx.doi.org/10.1109/icassp.1990.116036},
   Abstract = {A two-step scheme for image compression that takes into
             account psychovisual features in space and frequency domains
             is proposed. A wavelet transform is first used in order to
             obtain a set of orthonormal subclasses of images; the
             original image is decomposed at different scales using a
             pyramidal algorithm architecture. The decomposition is along
             the vertical and horizontal directions and maintains the
             number of pixels required to describe the image at a
             constant. Second, according to Shannon's rate-distortion
             theory, the wavelet coefficients are vector quantized using
             a multiresolution codebook. To encode the wavelet
             coefficients, a noise-shaping bit-allocation procedure which
             assumes that details at high resolution are less visible to
             the human eye is proposed. In order to allow the receiver to
             recognize a picture as quickly as possible at minimum cost,
             a progressive transmission scheme is presented. The wavelet
             transform is particularly well adapted to progressive
             transmission.},
   Doi = {10.1109/icassp.1990.116036},
   Key = {fds287133}
}

@article{fds287135,
   Author = {Antonini, M and Barlaud, M and Mathieu, P and Daubechies,
             I},
   Title = {Image coding using wavelet transform.},
   Journal = {IEEE transactions on image processing : a publication of the
             IEEE Signal Processing Society},
   Volume = {1},
   Number = {2},
   Pages = {205-220},
   Year = {1992},
   Month = {January},
   ISSN = {1057-7149},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/18296155},
   Abstract = {A scheme for image compression that takes into account
             psychovisual features both in the space and frequency
             domains is proposed. This method involves two steps. First,
             a wavelet transform used in order to obtain a set of
             biorthogonal subclasses of images: the original image is
             decomposed at different scales using a pyramidal algorithm
             architecture. The decomposition is along the vertical and
             horizontal directions and maintains constant the number of
             pixels required to describe the image. Second, according to
             Shannon's rate distortion theory, the wavelet coefficients
             are vector quantized using a multiresolution codebook. To
             encode the wavelet coefficients, a noise shaping bit
             allocation procedure which assumes that details at high
             resolution are less visible to the human eye is proposed. In
             order to allow the receiver to recognize a picture as
             quickly as possible at minimum cost, a progressive
             transmission scheme is presented. It is shown that the
             wavelet transform is particularly well adapted to
             progressive transmission.},
   Doi = {10.1109/83.136597},
   Key = {fds287135}
}

@article{fds318288,
   Author = {Cohen, A and Daubechies, I and Feauveau, J},
   Title = {Biorthogonal bases of compactly supported
             wavelets},
   Journal = {Communications on Pure and Applied Mathematics},
   Volume = {45},
   Number = {5},
   Pages = {485-560},
   Publisher = {WILEY},
   Year = {1992},
   Month = {January},
   url = {http://dx.doi.org/10.1002/cpa.3160450502},
   Abstract = {Orthonormal bases of compactly supported wavelet bases
             correspond to subband coding schemes with exact
             reconstruction in which the analysis and synthesis filters
             coincide. We show here that under fairly general conditions,
             exact reconstruction schemes with synthesis filters
             different from the analysis filters give rise to two dual
             Riesz bases of compactly supported wavelets. We give
             necessary and sufficient conditions for biorthogonality of
             the corresponding scaling functions, and we present a
             sufficient conditions for the decay of their Fourier
             transforms. We study the regularity of these biorthogonal
             bases. We provide several families of examples, all
             symmetric (corresponding to “linear phase” filters). In
             particular we can construct symmetric biorthogonal wavelet
             bases with arbitraily high preassigned regularity; we also
             show how to construct symmetric biorthogonal wavelet bases
             “close” to a (nonsymmetric) orthonormal basis. Copyright
             © 1992 Wiley Periodicals, Inc., A Wiley
             Company},
   Doi = {10.1002/cpa.3160450502},
   Key = {fds318288}
}

@article{fds330517,
   Author = {Moayeri, N and Daubechies, I and Song, Q and Wang,
             HS},
   Title = {Wavelet transform image coding using trellis coded vector
             quantization},
   Journal = {ICASSP, IEEE International Conference on Acoustics, Speech
             and Signal Processing - Proceedings},
   Volume = {4},
   Pages = {405-408},
   Publisher = {IEEE},
   Year = {1992},
   Month = {January},
   ISBN = {0780305329},
   url = {http://dx.doi.org/10.1109/ICASSP.1992.226350},
   Abstract = {A combination of trellis coded quantization (TCQ) and its
             vector alphabet generalization TCVQ is used to code the
             coefficients resulting from a biorthogonal wavelet transform
             in an image. TCVQ is a vector trellis coder with fixed rate,
             very good rate-distortion performance, and yet reasonable
             implementation complexity. The experimental results show
             that the Lena image can be coded by this coding system at
             the rate of 0.265 bpp to yield a peak signal-to-noise ratio
             (PSNR) of about 29 dB. This PSNR is about 3 dB larger than
             that obtained by a coding system of the same rate that uses
             VQ to obtain the wavelet transform coefficients. Naturally,
             the performance of the TCQ/TCVQ wavelet transform coder can
             be improved if entropy-coded TCQ and TCVQ coders are
             employed.},
   Doi = {10.1109/ICASSP.1992.226350},
   Key = {fds330517}
}

@article{fds317217,
   Author = {Cohen, A and Daubechies, I},
   Title = {A stability criterion for biorthogonal wavelet bases and
             their related subband coding scheme},
   Journal = {Duke Mathematical Journal},
   Volume = {68},
   Number = {2},
   Pages = {313-335},
   Publisher = {Duke University Press},
   Year = {1992},
   Month = {January},
   ISSN = {0012-7094},
   url = {http://dx.doi.org/10.1215/S0012-7094-92-06814-1},
   Doi = {10.1215/S0012-7094-92-06814-1},
   Key = {fds317217}
}

@article{fds287132,
   Author = {Daubechies, I and Lagarias, JC},
   Title = {Sets of matrices all infinite products of which
             converge},
   Journal = {Linear Algebra and Its Applications},
   Volume = {161},
   Number = {C},
   Pages = {227-263},
   Year = {1992},
   Month = {January},
   ISSN = {0024-3795},
   url = {http://dx.doi.org/10.1016/0024-3795(92)90012-Y},
   Abstract = {An infinite product ∏∞i=1Mi of matrices converges (on
             the right) if limi→∞ M1 ... Mi exists. A set
             ∑={Ai:i≥1}of n x n matrices is called an RCP set (right-
             convergent product set) if all infinite products with each
             element drawn from ∑ converge. Such sets of matrices arise
             in constructing self-similar objects like von Koch's
             snowflake curve, in various interpolation schemes, in
             constructing wavelets of compact support, and in studying
             nonhomogeneous Markov chains. This paper gives necessary
             conditions and also some sufficient conditions for a set ∑
             to be an RCP set. These are conditions on the eigenvalues
             and left eigenspaces of matrices in ∑ and finite products
             of these matrices. Necessary and sufficient conditions are
             given for a finite set ∑ to be an RCP set having a limit
             function M∑(d)=π∞i=1Adi , where d=(d1,...,dn,...),
             which is a continuous function on the space of all sequences
             d with the sequence topology. Finite RCP sets of
             column-stochastic matrices are completely characterized.
             Some results are given on the problem of algorithmically
             deciding if a given set ∑ is an RCP set. ©
             1992.},
   Doi = {10.1016/0024-3795(92)90012-Y},
   Key = {fds287132}
}

@article{fds287136,
   Author = {Daubechies, I},
   Title = {Two Theorems on Lattice Expansions},
   Journal = {IEEE Transactions on Information Theory},
   Volume = {39},
   Number = {1},
   Pages = {3-6},
   Year = {1993},
   Month = {January},
   url = {http://dx.doi.org/10.1109/18.179336},
   Abstract = {It is shown that there is a trade-off between the smoothness
             and decay properties of the dual functions, occurring in the
             lattice expansion problem. More precisely, it is shown that
             if g and [formula Omitted] are dual, then 1) at least one of
             H<sup>1/2</sup>g and [formula Omitted] is not in [formula
             Omitted] at least one of Hg and [formula Omitted] is not in
             [formula Omitted]. Here, H is the operator-1/(4π<sup>2</sup>)d<sup>2</sup>/(dt<sup>2</sup>)
             + t<sup>2</sup>. The first result is a generalization of a
             theorem first stated by Balian and independently by Low,
             which was recently rigorously proved by Coifman and Semmes;
             a new, much shorter proof was very recently given by Battle.
             Battle suggests a theorem of type (i), but our result is
             stronger in the sense that certain implicit assumptions made
             by Battle are removed. Result 2) is new and relies heavily
             on the fact that, when G E W<sup>2,2</sup>(S) with [formula
             Omited] and G(0) = 0, then [formula Omitted]. The latter
             result was not known to us and may be of independent
             interest. © 1993 IEEE},
   Doi = {10.1109/18.179336},
   Key = {fds287136}
}

@article{fds287138,
   Author = {Cohen, A and Daubechies, I and Vial, P},
   Title = {Wavelets on the interval and fast wavelet
             transforms},
   Journal = {Applied and Computational Harmonic Analysis},
   Volume = {1},
   Number = {1},
   Pages = {54-81},
   Publisher = {Elsevier BV},
   Year = {1993},
   Month = {January},
   url = {http://dx.doi.org/10.1006/acha.1993.1005},
   Abstract = {We discuss several constructions of orthonormal wavelet
             bases on the interval, and we introduce a new construction
             that avoids some of the disadvantages of earlier
             constructions. © 1993 Academic Press Inc.},
   Doi = {10.1006/acha.1993.1005},
   Key = {fds287138}
}

@article{fds287137,
   Author = {Daubechies, I and Huang, Y},
   Title = {A decay theorem for refinable functions},
   Journal = {Applied Mathematics Letters},
   Volume = {7},
   Number = {4},
   Pages = {1-4},
   Publisher = {Elsevier BV},
   Year = {1994},
   Month = {January},
   ISSN = {0893-9659},
   url = {http://dx.doi.org/10.1016/0893-9659(94)90001-9},
   Abstract = {We show that a refinable function with absolutely summable
             mask cannot have exponential decay in both time and
             frequency. © 1994.},
   Doi = {10.1016/0893-9659(94)90001-9},
   Key = {fds287137}
}

@article{fds315773,
   Author = {Daubechies, I and Landau, HJ and Landau, Z},
   Title = {Gabor Time-Frequency Lattices and the Wexler-Raz
             Identity},
   Journal = {Journal of Fourier Analysis and Applications},
   Volume = {1},
   Number = {4},
   Pages = {437-478},
   Publisher = {Springer Nature},
   Year = {1994},
   Month = {January},
   ISSN = {1069-5869},
   url = {http://dx.doi.org/10.1007/s00041-001-4018-3},
   Abstract = {Gabor time-frequency lattices are sets of functions of the
             form (Formula presented.) generated from a given function
             (Formula presented.) by discrete translations in time and
             frequency. They are potential tools for the decomposition
             and handling of signals that, like speech or music, seem
             over short intervals to have well-defined frequencies that,
             however, change with time. It was recently observed that the
             behavior of a lattice (Formula presented.) can be connected
             to that of a dual lattice (Formula presented.) Here we
             establish this interesting relationship and study its
             properties. We then clarify the results by applying the
             theory of von Neumann algebras. One outcome is a simple
             proof that for (Formula presented.) to span (Formula
             presented.) the lattice (Formula presented.) must have at
             least unit density. Finally, we exploit the connection
             between the two lattices to construct expansions having
             improved convergence and localization properties. © 1994,
             Birkhäuser Boston. All rights reserved.},
   Doi = {10.1007/s00041-001-4018-3},
   Key = {fds315773}
}

@article{fds325015,
   Author = {Daubechies, I},
   Title = {Two Recent Results on Wavelets: Wavelet Bases for the
             Interval, and Biorthogonal Wavelets Diagonalizing the
             Derivative Operator},
   Journal = {Wavelet Analysis and Its Applications},
   Volume = {3},
   Number = {C},
   Pages = {237-257},
   Year = {1994},
   Month = {January},
   url = {http://dx.doi.org/10.1016/B978-0-12-632370-2.50013-1},
   Abstract = {The following two questions are often asked by researchers
             interested in applying wavelet bases to concrete numerical
             problems: 1) how does one adapt a wavelet basis on IR to a
             wavelet basis on an interval without terrible edge effects?
             2) how does the wavelet transform deal with the derivative
             operator? This paper reviews several answers to each of
             these questions, including some recent constructions and
             observations. © 1994, Academic Press, Inc.},
   Doi = {10.1016/B978-0-12-632370-2.50013-1},
   Key = {fds325015}
}

@article{fds316148,
   Author = {Friedlander, S and Birman, JS and Daubechies, I},
   Title = {A celebration or women in mathematics},
   Journal = {Notices of the American Mathematical Society},
   Volume = {42},
   Number = {1},
   Pages = {32-42},
   Year = {1995},
   Month = {January},
   ISSN = {0002-9920},
   Key = {fds316148}
}

@article{fds287139,
   Author = {Daubechies, I and Huang, Y},
   Title = {How does truncation of the mask affect a refinable
             function?},
   Journal = {Constructive Approximation},
   Volume = {11},
   Number = {3},
   Pages = {365-380},
   Publisher = {Springer Nature},
   Year = {1995},
   Month = {September},
   ISSN = {0176-4276},
   url = {http://dx.doi.org/10.1007/BF01208560},
   Abstract = {If the mask of a refinable function has infinitely many
             coefficients, or if the coefficients are irrational, then it
             is often replaced by a finite mask with coefficients with
             terminating decimal expansions when it comes to
             applications. This note studies how such truncation affects
             the refinable function. © 1995 Springer-Verlag New York,
             Inc.},
   Doi = {10.1007/BF01208560},
   Key = {fds287139}
}

@article{fds287140,
   Author = {Cohen, A and Daubechies, I and Ron, A},
   Title = {How smooth is the smoothest function in a given refinable
             space?},
   Journal = {Applied and Computational Harmonic Analysis},
   Volume = {3},
   Number = {1},
   Pages = {87-89},
   Publisher = {Elsevier BV},
   Year = {1996},
   Month = {January},
   url = {http://dx.doi.org/10.1006/acha.1996.0008},
   Doi = {10.1006/acha.1996.0008},
   Key = {fds287140}
}

@article{fds287141,
   Author = {Cohen, A and Daubechies, I},
   Title = {A new technique to estimate the regularity of refinable
             functions},
   Journal = {Revista Matematica Iberoamericana},
   Volume = {12},
   Number = {2},
   Pages = {527-591},
   Publisher = {European Mathematical Publishing House},
   Year = {1996},
   Month = {January},
   url = {http://dx.doi.org/10.4171/RMI/207},
   Abstract = {We study the regularity of refinable functions by analyzing
             the spectral properties of special operators associated to
             the refinement equation; in particular, we use the Fredholm
             determinant theory to derive numerical estimates for the
             spectral radius of these operators in certain spaces. This
             new technique is particularly useful for estimating the
             regularity in the cases where the refinement equation has an
             infinite number of nonzero coefficients and in the
             multidimensional cases.},
   Doi = {10.4171/RMI/207},
   Key = {fds287141}
}

@article{fds287142,
   Author = {Daubechies, I},
   Title = {Where do wavelets come from? - a personal point of
             view},
   Journal = {Proceedings of the IEEE},
   Volume = {84},
   Number = {4},
   Pages = {510-513},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {1996},
   Month = {April},
   url = {http://dx.doi.org/10.1109/5.488696},
   Abstract = {The subject area of wavelets is connected to older ideas in
             many other fields, including pure and applied mathematics,
             physics, computer science and engineering. The rapid
             development of wavelet tools led to the application of
             wavelets as a standard part of mathematical tool kits.
             Developed wavelet tools also complemented more established
             mathematical techniques. A wavelet transform was developed
             from spline functions of harmonic analysis used in the
             quantum field theory, wavelet decomposition for
             multiresolution using recursive filtering algorithms and
             Fourier transform with transform functions generated from a
             Gaussian window Dyadic regroupings.},
   Doi = {10.1109/5.488696},
   Key = {fds287142}
}

@article{fds287146,
   Author = {Cohen, A and Daubechies, I and Plonka, G},
   Title = {Regularity of Refinable Function Vectors},
   Journal = {Journal of Fourier Analysis and Applications},
   Volume = {3},
   Number = {3},
   Pages = {x4-323},
   Year = {1997},
   Month = {January},
   url = {http://dx.doi.org/10.1007/bf02649113},
   Abstract = {We study the existence and regularity of compactly supported
             solutions φ = (φν)ν=0r- 1 of vector refinement
             equations. The space spanned by the translates of φν can
             only provide approximation order if the refinement mask P
             has certain particular factorization properties. We show,
             how the factorization of P can lead to decay of |φ̂ν(u)|
             as |u| →∞. The results on decay are used to prove
             uniqueness of solutions and convergence of the cascade
             algorithm.},
   Doi = {10.1007/bf02649113},
   Key = {fds287146}
}

@article{fds287143,
   Author = {Unser, M and Daubechies, I},
   Title = {On the approximation power of convolution-based least
             squares versus interpolation},
   Journal = {IEEE Transactions on Signal Processing},
   Volume = {45},
   Number = {7},
   Pages = {1697-1711},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {1997},
   Month = {December},
   ISSN = {1053-587X},
   url = {http://dx.doi.org/10.1109/78.599940},
   Abstract = {There are many signal processing tasks for which
             convolution-based continuous signal representations such as
             splines and wavelets provide an interesting and practical
             alternative to the more traditional sine-based methods. The
             coefficients of the corresponding signal approximations are
             typically obtained by direct sampling (interpolation or
             quasiinterpolation) or by using least squares techniques
             that apply a prefilter prior to sampling. Here, we compare
             the performance of these approaches and provide quantitative
             error estimates that can be used for the appropriate
             selection of the sampling step h. Specifically, we review
             several results in approximation theory with a special
             emphasis on the Strang-Fix conditions, which relate the
             general O(hL) behavior of the error to the ability of the
             representation to reproduce polynomials of degree n -L -1.
             We use this theory to derive pointwise error estimates for
             the various algorithms and to obtain the asymptotic limit of
             the LI -error as h tends to zero. We also propose a new
             improved LI -error bound for the least squares case. In the
             process, we provide all the relevant bound constants for
             polynomial splines. Some of our results suggest the
             existence of an intermediate range of sampling steps where
             the least squares method is roughly equivalent to an
             interpolator with twice the order. We present experimental
             examples that illustrate the theory and confirm the adequacy
             of our various bound and limit determinations. © 1997
             IEEE.},
   Doi = {10.1109/78.599940},
   Key = {fds287143}
}

@article{fds287144,
   Author = {Chassande-Mottin, E and Daubechies, I and Auger, F and Flandrin,
             P},
   Title = {Differential reassignment},
   Journal = {IEEE Signal Processing Letters},
   Volume = {4},
   Number = {10},
   Pages = {293-294},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {1997},
   Month = {December},
   ISSN = {1070-9908},
   url = {http://dx.doi.org/10.1109/97.633772},
   Abstract = {A geometrical description is given for reassignment vector
             fields of spectrograms. These vector fields are shown to be
             connected with both an intrinsic phase characterization and
             a scalar potential. This allows for the generalization of
             the original reassignment process to a differential version
             based on a dynamical evolution of time-frequency
             particles.},
   Doi = {10.1109/97.633772},
   Key = {fds287144}
}

@article{fds287147,
   Author = {Daubechies, I},
   Title = {From the Original Framer to Present-Day Time-Freuency and
             Time-Scale Frames},
   Journal = {Journal of Fourier Analysis and Applications},
   Volume = {3},
   Number = {5},
   Pages = {x1-486},
   Year = {1997},
   Month = {December},
   Key = {fds287147}
}

@article{fds287204,
   Author = {Calderbank, AR and Daubechies, I and Sweldens, W and Yeo,
             BL},
   Title = {Lossless image compression using integer to integer wavelet
             transforms},
   Journal = {IEEE International Conference on Image Processing},
   Volume = {1},
   Pages = {596-599},
   Publisher = {IEEE Comput. Soc},
   Year = {1997},
   Month = {December},
   url = {http://dx.doi.org/10.1109/icip.1997.647983},
   Abstract = {Invertible wavelet transforms that map integers to integers
             are important for lossless representations. In this paper,
             we present an approach to build integer to integer wavelet
             transforms based upon the idea of factoring wavelet
             transforms into lifting steps. This allows the construction
             of an integer version of every wavelet transform. We
             demonstrate the use of these transforms in lossless image
             compression.},
   Doi = {10.1109/icip.1997.647983},
   Key = {fds287204}
}

@article{fds287148,
   Author = {Daubechies, I and Sweldens, W},
   Title = {Factoring Wavelet Transforms into Lifting
             Steps},
   Journal = {Journal of Fourier Analysis and Applications},
   Volume = {4},
   Number = {3},
   Pages = {x1-268},
   Year = {1998},
   Month = {January},
   url = {http://dx.doi.org/10.1007/bf02476026},
   Abstract = {This article is essentially tutorial in nature. We show how
             any discrete wavelet transform or two band subband filtering
             with finite filters can be decomposed into a finite sequence
             of simple filtering steps, which we call lifting steps but
             that are also known as ladder structures. This decomposition
             corresponds to a factorization of the polyphase matrix of
             the wavelet or subband filters into elementary matrices.
             That such a factorization is possible is well-known to
             algebraists (and expressed by the formula SL(n; R[z, z-1]) =
             E(z; z-1])); it is also used in linear systems theory in the
             electrical engineering community. We present here a
             self-contained derivation, building the decomposition from
             basic principles such as the Euclidean algorithm, with a
             focus on applying it to wavelet filtering. This
             factorization provides an alternative for the lattice
             factorization, with the advantage that it can also be used
             in the biorthogonal, i.e., non-unitary case. Like the
             lattice factorization, the decomposition presented here
             asymptotically reduces the computational complexity of the
             transform by a factor two. It has other applications, such
             as the possibility of defining a wavelet-like transform that
             maps integers to integers.},
   Doi = {10.1007/bf02476026},
   Key = {fds287148}
}

@article{fds287150,
   Author = {Daubechies, I},
   Title = {Recent results in wavelet applications},
   Journal = {Journal of Electronic Imaging},
   Volume = {7},
   Number = {4},
   Pages = {719-724},
   Publisher = {SPIE-Intl Soc Optical Eng},
   Year = {1998},
   Month = {January},
   ISSN = {1017-9909},
   url = {http://dx.doi.org/10.1117/1.482659},
   Abstract = {We present three recent developments in wavelets and
             subdivision: wavelet-type transforms that map integers to
             integers, with an application to lossless coding for images;
             rate-distortion bounds that realize the compression given by
             nonlinear approximation theorems for a model where wavelet
             compression outperforms the Karhunen-Loève approach; and
             smoothness results for irregularly spaced subdivision
             schemes, related to wavelet compression for irregularly
             spaced data. © 1998 SPIE and IS&T.},
   Doi = {10.1117/1.482659},
   Key = {fds287150}
}

@article{fds287205,
   Author = {Calderbank, AR and Daubechies, I and Sweldens, W and Yeo,
             BL},
   Title = {Wavelet Transforms That Map Integers to Integers},
   Journal = {Applied and Computational Harmonic Analysis},
   Volume = {5},
   Number = {3},
   Pages = {332-369},
   Publisher = {Elsevier BV},
   Year = {1998},
   Month = {January},
   url = {http://dx.doi.org/10.1006/acha.1997.0238},
   Abstract = {Invertible wavelet transforms that map integers to integers
             have important applications in lossless coding. In this
             paper we present two approaches to build integer to integer
             wavelet transforms. The first approach is to adapt the
             precoder of Laroia et al., which is used in information
             transmission; we combine it with expansion factors for the
             high and low pass band in subband filtering. The second
             approach builds upon the idea of factoring wavelet
             transforms into socalled lifting steps. This allows the
             construction of an integer version of every wavelet
             transform. Finally, we use these approaches in a lossless
             image coder and compare the results to those given in the
             literature. © 1998 Academic Press.},
   Doi = {10.1006/acha.1997.0238},
   Key = {fds287205}
}

@article{fds348056,
   Author = {Daubechies, I},
   Title = {Recent results in wavelet applications},
   Journal = {Proceedings of SPIE - The International Society for Optical
             Engineering},
   Volume = {3391},
   Pages = {2-9},
   Year = {1998},
   Month = {March},
   url = {http://dx.doi.org/10.1117/12.304919},
   Abstract = {We present three recent developments in wavelets and
             subdivision: wavelet-type transforms that map integers to
             integers, with an application to lossless coding for images;
             rate-distortion bounds that realize the compression given by
             nonlinear approximation theorems for a model where wavelet
             compression outperforms the Karhunen-Loeve approach; and
             smoothness results for irregularly spaced subdivision
             schemes, related to wavelet compression for irregularly
             spaced data.},
   Doi = {10.1117/12.304919},
   Key = {fds348056}
}

@article{fds287145,
   Author = {Donoho, DL and Vetterli, M and Devore, RA and Daubechies,
             I},
   Title = {Data compression and harmonic analysis},
   Journal = {IEEE Transactions on Information Theory},
   Volume = {44},
   Number = {6},
   Pages = {2435-2476},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {1998},
   Month = {December},
   ISSN = {0018-9448},
   url = {http://dx.doi.org/10.1109/18.720544},
   Abstract = {In this paper we review some recent interactions between
             harmonic analysis and data compression. The story goes back
             of course to Shannon's R(D) theory in the case of Gaussian
             stationary processes, which says that transforming into a
             Fourier basis followed by block coding gives an optimal
             lossy compression technique; practical developments like
             transformbased image compression have been inspired by this
             result. In this paper we also discuss connections perhaps
             less familiar to the Information Theory community, growing
             out of the field of harmonic analysis. Recent harmonic
             analysis constructions, such as wavelet transforms and Gabor
             transforms, are essentially optimal transforms for transform
             coding in certain settings. Some of these transforms are
             under consideration for future compression standards. We
             discuss some of the lessons of harmonic analysis in this
             century. Typically, the problems and achievements of this
             field have involved goals that were not obviously related to
             practical data compression, and have used a language not
             immediately accessible to outsiders. Nevertheless, through
             an extensive generalization of what Shannon called the
             "sampling theorem," harmonic analysis has succeeded in
             developing new forms of functional representation which turn
             out to have significant data compression interpretations. We
             explain why harmonic analysis has interacted with data
             compression, and we describe some interesting recent ideas
             in the field that may affect data compression in the future.
             © 1998 IEEE.},
   Doi = {10.1109/18.720544},
   Key = {fds287145}
}

@article{fds287149,
   Author = {Daubechies, I},
   Title = {Preface},
   Journal = {Wavelet Analysis and Its Applications},
   Volume = {9},
   Number = {C},
   Pages = {5},
   Year = {1998},
   Month = {December},
   ISSN = {1874-608X},
   url = {http://dx.doi.org/10.1016/S1874-608X(98)80024-1},
   Doi = {10.1016/S1874-608X(98)80024-1},
   Key = {fds287149}
}

@article{fds287151,
   Author = {Daubechies, I and Guskov, I and Schröder, P and Sweldens,
             W},
   Title = {Wavelets on irregular point sets},
   Journal = {Philosophical Transactions of the Royal Society A:
             Mathematical, Physical and Engineering Sciences},
   Volume = {357},
   Number = {1760},
   Pages = {2397-2413},
   Publisher = {The Royal Society},
   Year = {1999},
   Month = {January},
   ISSN = {1364-503X},
   url = {http://dx.doi.org/10.1098/rsta.1999.0439},
   Abstract = {In this article we review techniques for building and
             analysing wavelets on irregular point sets in one and two
             dimensions. We discuss current results on both the practical
             and theoretical sides. In particular, we focus on
             subdivision schemes and commutation rules. Several examples
             are included.},
   Doi = {10.1098/rsta.1999.0439},
   Key = {fds287151}
}

@article{fds287152,
   Author = {Daubechies, I and Guskov, I and Sweldens, W},
   Title = {Regularity of irregular subdivision},
   Journal = {Constructive Approximation},
   Volume = {15},
   Number = {3},
   Pages = {381-426},
   Year = {1999},
   Month = {January},
   url = {http://dx.doi.org/10.1007/s003659900114},
   Abstract = {We study the smoothness of the limit function for
             one-dimensional unequally spaced interpolating subdivision
             schemes. The new grid points introduced at every level can
             lie in irregularly spaced locations between old, adjacent
             grid points and not only midway as is usually the case. For
             the natural generalization of the four-point scheme
             introduced by Dubuc and Dyn, Levin, and Gregory, we show
             that, under some geometric restrictions, the limit function
             is always C1; under slightly stronger restrictions we show
             that the limit function is almost C2, the same regularity as
             in the regularly spaced case.},
   Doi = {10.1007/s003659900114},
   Key = {fds287152}
}

@article{fds287154,
   Author = {Cvetkovic, Z and Daubechies, I},
   Title = {Single-bit oversampled A/D conversion with exponential
             accuracy in the bit-rate},
   Journal = {Data Compression Conference Proceedings},
   Pages = {343-352},
   Publisher = {IEEE Comput. Soc},
   Year = {2000},
   Month = {January},
   ISSN = {1068-0314},
   url = {http://dx.doi.org/10.1109/dcc.2000.838174},
   Abstract = {We present a scheme for simple oversampled analog-to-digital
             conversion, with single bit quantization and exponential
             error decay in the bit-rate. The scheme is based on
             recording positions of zero-crossings of the input signal
             added to a deterministic dither function. This information
             can be represented in a manner which requires only
             logarithmic increase of the bit rate with the oversampling
             factor, r. The input band-limited signal can be
             reconstructed from this information locally, and with a mean
             squared error which is inversely proportional to the square
             of the oversampling factor. MSE = O (l/r 2). Consequently,
             the mean squared error of this scheme exhibits exponential
             decay in the bit-rate.},
   Doi = {10.1109/dcc.2000.838174},
   Key = {fds287154}
}

@article{fds287153,
   Author = {Balan, R and Daubechies, I and Vaishampayan, V},
   Title = {The analysis and design of windowed fourier frame based
             multiple description source coding schemes},
   Journal = {IEEE Transactions on Information Theory},
   Volume = {46},
   Number = {7},
   Pages = {2491-2536},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2000},
   Month = {December},
   ISSN = {0018-9448},
   url = {http://dx.doi.org/10.1109/18.887860},
   Abstract = {In this paper the windowed Fourier encoding-decoding scheme
             applied to the multiple description compression problem is
             analyzed. In the general case, four window functions are
             needed to define the encoder and decoder, although this
             number can be reduced to three or two by using time-shift or
             frequency-shift division schemes. The encoding coefficients
             are next divided into two groups according to the eveness of
             either modulation or translation index. The distortion on
             each channel is analyzed using the Zak transform. For the
             optimal windows, explicit representation formulas are
             obtained and nonlocalization results are proved. Asymptotic
             formulas of the total distortion and transmission rate are
             established and the redundancy is shown to trade off between
             these two. © 2000 IEEE.},
   Doi = {10.1109/18.887860},
   Key = {fds287153}
}

@article{fds287157,
   Author = {Cohen, A and Dahmen, W and Daubechies, I and Devore,
             R},
   Title = {Tree Approximation and Optimal Encoding},
   Journal = {Applied and Computational Harmonic Analysis},
   Volume = {11},
   Number = {2},
   Pages = {192-226},
   Publisher = {Elsevier BV},
   Year = {2001},
   Month = {September},
   url = {http://dx.doi.org/10.1006/acha.2001.0336},
   Abstract = {Tree approximation is a new form of nonlinear approximation
             which appears naturally in some applications such as image
             processing and adaptive numerical methods. It is somewhat
             more restrictive than the usual n-term approximation. We
             show that the restrictions of tree approximation cost little
             in terms of rates of approximation. We then use that result
             to design encoders for compression. These encoders are
             universal (they apply to general functions) and progressive
             (increasing accuracy is obtained by sending bit stream
             increments). We show optimality of the encoders in the sense
             that they provide upper estimates for the Kolmogorov entropy
             of Besov balls. © 2001 Academic Press.},
   Doi = {10.1006/acha.2001.0336},
   Key = {fds287157}
}

@article{fds287155,
   Author = {Daubechies, I and Guskov, I and Sweldens, W},
   Title = {Commutation for irregular subdivision},
   Journal = {Constructive Approximation},
   Volume = {17},
   Number = {4},
   Pages = {479-513},
   Publisher = {Springer Nature},
   Year = {2001},
   Month = {December},
   url = {http://dx.doi.org/10.1007/s00365-001-0001-0},
   Abstract = {We present a generalization of the commutation formula to
             irregular subdivision schemes and wavelets. We show how, in
             the noninterpolating case, the divided differences need to
             be adapted to the subdivision scheme. As an example we
             include the construction of an entire family of biorthogonal
             compactly supported irregular knot B-spline wavelets
             starting from Lagrangian interpolation.},
   Doi = {10.1007/s00365-001-0001-0},
   Key = {fds287155}
}

@article{fds287156,
   Author = {Daubechies, I and DeVore, R and Güntürk, CS and Vaishampayan,
             VA},
   Title = {Beta expansions: A new approach to digitally corrected A/D
             conversion},
   Journal = {Proceedings - IEEE International Symposium on Circuits and
             Systems},
   Volume = {2},
   Pages = {784-787},
   Publisher = {IEEE},
   Year = {2002},
   Month = {January},
   url = {http://dx.doi.org/10.1109/ISCAS.2002.1011470},
   Abstract = {We introduce a new architecture for pipelined (and also
             algorithmic) A/D converters that give exponentially accurate
             conversion using inaccurate comparators. An error analysis
             of a sigma-delta converter with an imperfect comparator and
             a constant input reveals a self-correction property that is
             not inherited by the successive refinement quantization
             algorithm that underlies both pipelined multistage A/D
             converters as well as algorithmic A/D converters. Motivated
             by this example, we introduce a new A/D converter- the Beta
             Converter-which has the same self-correction property as a
             sigma-delta converter but which exhibits higher order
             (exponential) accuracy with respect to the bit rate as
             compared to a sigma-delta converter, which exhibits only
             polynomial accuracy.},
   Doi = {10.1109/ISCAS.2002.1011470},
   Key = {fds287156}
}

@article{fds330516,
   Author = {Cvetković, Z and Daubechies, I and Logan, BF},
   Title = {Interpolation of bandlimited functions from quantized
             irregular samples},
   Journal = {Data Compression Conference Proceedings},
   Volume = {2002-January},
   Pages = {412-421},
   Publisher = {IEEE Comput. Soc},
   Year = {2002},
   Month = {January},
   ISBN = {0769514774},
   url = {http://dx.doi.org/10.1109/DCC.2002.999981},
   Abstract = {The problem of reconstructing a π-bandlimited signal f from
             its quantized samples taken at an irregular sequence of
             points (tk)k∈ZZ arises in oversampled analog-to-digital
             conversion. The input signal can be reconstructed from the
             quantized samples (f(tk))k∈ZZ by estimating samples
             (f(n/λ))n∈ZZ, where λ is the average uniform density of
             the sequence (tk)k∈ZZ, assumed here to be greater than
             one, followed by linear low-pass filtering. We study three
             techniques for estimating samples (f(n/λ))n∈ZZ from
             quantized irregular samples (f(tk))k∈ZZ, including
             Lagrangian interpolation, and two other techniques which
             result in a better overall accuracy of oversampled A/D
             conversion.},
   Doi = {10.1109/DCC.2002.999981},
   Key = {fds330516}
}

@article{fds287158,
   Author = {Daubechies, I and Han, B},
   Title = {The canonical dual frame of a wavelet frame},
   Journal = {Applied and Computational Harmonic Analysis},
   Volume = {12},
   Number = {3},
   Pages = {269-285},
   Year = {2002},
   Month = {May},
   ISSN = {1063-5203},
   url = {http://dx.doi.org/10.1006/acha.2002.0381},
   Abstract = {In this paper we show that there exist wavelet frames that
             have nice dual wavelet frames, but for which the canonical
             dual frame does not consist of wavelets, i.e., cannot be
             generated by the translates and dilates of a single
             function. © 2002 Elsevier Science (USA).},
   Doi = {10.1006/acha.2002.0381},
   Key = {fds287158}
}

@article{fds287206,
   Author = {Calderbank, AR and Daubechies, I},
   Title = {The pros and cons of democracy},
   Journal = {IEEE Transactions on Information Theory},
   Volume = {48},
   Number = {6},
   Pages = {1721-1725},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2002},
   Month = {June},
   ISSN = {0018-9448},
   url = {http://dx.doi.org/10.1109/TIT.2002.1003852},
   Abstract = {The concept of democracy was introduced in which the
             individual bits in a coarsely quantized representation of a
             signal were given equal weight in the approximation to the
             original signal. It was proved that such democratic
             representations could not achieve the same accuracy as
             optimal nondemocratic schemes. Convolutional decoding was
             found to be convenient in digital to analog
             conversion.},
   Doi = {10.1109/TIT.2002.1003852},
   Key = {fds287206}
}

@article{fds287159,
   Author = {Daubechies, I and Planchon, F},
   Title = {Adaptive Gabor transforms},
   Journal = {Applied and Computational Harmonic Analysis},
   Volume = {13},
   Number = {1},
   Pages = {1-21},
   Publisher = {Elsevier BV},
   Year = {2002},
   Month = {July},
   url = {http://dx.doi.org/10.1016/S1063-5203(02)00003-9},
   Abstract = {We aim to provide time-frequency representations of a
             one-dimensional signal where the window is locally adapted
             to the signal, thus providing a better readability of the
             representation. © 2002 Elsevier Science (USA). All rights
             reserved.},
   Doi = {10.1016/S1063-5203(02)00003-9},
   Key = {fds287159}
}

@article{fds287160,
   Author = {Cohen, A and Daubechies, I and Guleryuz, OG and Orchard,
             MT},
   Title = {On the importance of combining wavelet-based nonlinear
             approximation with coding strategies},
   Journal = {IEEE Transactions on Information Theory},
   Volume = {48},
   Number = {7},
   Pages = {1895-1921},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2002},
   Month = {July},
   ISSN = {0018-9448},
   url = {http://dx.doi.org/10.1109/TIT.2002.1013132},
   Abstract = {This paper provides a mathematical analysis of transform
             compression in its relationship to linear and nonlinear
             approximation theory. Contrasting linear and nonlinear
             approximation spaces, we show that there are interesting
             classes of functions/random processes which are much more
             compactly represented by wavelet-based nonlinear
             approximation. These classes include locally smooth signals
             that have singularities, and provide a model for many
             signals encountered in practice, in particular for images.
             However, we also show that nonlinear approximation results
             do not always translate to efficient compression strategies
             in a rate-distortion sense. Based on this observation, we
             construct compression techniques and formulate the family of
             functions/stochastic processes for which they provide
             efficient descriptions in a rate-distortion sense. We show
             that this family invariably leads to Besov spaces, yielding
             a natural relationship among Besov smoothness,
             linear/nonlinear approximation order, and compression
             performance in a rate-distortion sense. The designed
             compression techniques show similarities to modern
             high-performance transform codecs, allowing us to establish
             relevant rate-distortion estimates and identify performance
             limits.},
   Doi = {10.1109/TIT.2002.1013132},
   Key = {fds287160}
}

@article{fds287161,
   Author = {Daubechies, I and Han, B and Ron, A and Shen, Z},
   Title = {Framelets: MRA-based constructions of wavelet
             frames},
   Journal = {Applied and Computational Harmonic Analysis},
   Volume = {14},
   Number = {1},
   Pages = {1-46},
   Publisher = {Elsevier BV},
   Year = {2003},
   Month = {January},
   url = {http://dx.doi.org/10.1016/S1063-5203(02)00511-0},
   Abstract = {We discuss wavelet frames constructed via multiresolution
             analysis (MRA), with emphasis on tight wavelet frames. In
             particular, we establish general principles and specific
             algorithms for constructing framelets and tight framelets,
             and we show how they can be used for systematic
             constructions of spline, pseudo-spline tight frames, and
             symmetric bi-frames with short supports and high
             approximation orders. Several explicit examples are
             discussed. The connection of these frames with
             multiresolution analysis guarantees the existence of fast
             implementation algorithms, which we discuss briefly as well.
             © 2002 Elsevier Science (USA). All rights
             reserved.},
   Doi = {10.1016/S1063-5203(02)00511-0},
   Key = {fds287161}
}

@article{fds287162,
   Author = {Cohen, A and Dahmen, W and Daubechies, I and DeVore,
             R},
   Title = {Harmonic analysis of the space BV},
   Journal = {Revista Matematica Iberoamericana},
   Volume = {19},
   Number = {1},
   Pages = {235-263},
   Publisher = {European Mathematical Publishing House},
   Year = {2003},
   Month = {January},
   url = {http://dx.doi.org/10.4171/RMI/345},
   Abstract = {We establish new results on the space BV of functions with
             bounded variation. While it is well known that this space
             admits no unconditional basis, we show that it is "almost"
             characterized by wavelet expansions in the following sense:
             if a function f is in BV, its coefficient sequence in a BV
             normalized wavelet basis satisfies a class of weak-l1 type
             estimates. These weak estimates can be employed to prove
             many interesting results. We use them to identify the
             interpolation spaces between BV and Sobolev or Besov spaces,
             and to derive new Gagliardc-Nirenberg-type
             inequalities.},
   Doi = {10.4171/RMI/345},
   Key = {fds287162}
}

@article{fds287164,
   Author = {Daubechies, I and Devore, R},
   Title = {Approximating a bandlimited function using very coarsely
             quantized data: A family of stable sigma-delta modulators of
             arbitrary order},
   Journal = {Annals of Mathematics},
   Volume = {158},
   Number = {2},
   Pages = {679-710},
   Publisher = {Annals of Mathematics, Princeton U},
   Year = {2003},
   Month = {January},
   ISSN = {0003-486X},
   url = {http://dx.doi.org/10.4007/annals.2003.158.679},
   Doi = {10.4007/annals.2003.158.679},
   Key = {fds287164}
}

@article{fds287108,
   Author = {Rudin, C and Daubechies, I and Schapire, RE},
   Title = {On the dynamics of boosting},
   Journal = {Advances in Neural Information Processing
             Systems},
   Pages = {1101-1108},
   Publisher = {M I T PRESS},
   Editor = {Thrun, S and Saul, LK and Schölkopf, B},
   Year = {2004},
   Month = {January},
   ISBN = {9780262201520},
   url = {http://papers.nips.cc/book/advances-in-neural-information-processing-systems-16-2003},
   Abstract = {In order to understand AdaBoost's dynamics, especially its
             ability to maximize margins, we derive an associated
             simplified nonlinear iterated map and analyze its behavior
             in low-dimensional cases. We find stable cycles for these
             cases, which can explicitly be used to solve for Ada-
             Boost's output. By considering AdaBoost as a dynamical
             system, we are able to prove R̈atsch and Warmuth's
             conjecture that AdaBoost may fail to converge to a
             maximal-margin combined classifier when given a 'nonoptimal'
             weak learning algorithm. AdaBoost is known to be a
             coordinate descent method, but other known algorithms that
             explicitly aim to maximize the margin (such as AdaBoost and
             arc-gv) are not. We consider a differentiable function for
             which coordinate ascent will yield a maximum margin
             solution. We then make a simple approximation to derive a
             new boosting algorithm whose updates are slightly more
             aggressive than those of arcgv.},
   Key = {fds287108}
}

@article{fds287165,
   Author = {Daubechies, I and Han, B},
   Title = {Pairs of dual wavelet frames from any two refinable
             functions},
   Journal = {Constructive Approximation},
   Volume = {20},
   Number = {3},
   Pages = {325-352},
   Publisher = {Springer Nature},
   Year = {2004},
   Month = {January},
   url = {http://dx.doi.org/10.1007/s00365-004-0567-4},
   Abstract = {Starting from any two compactly supported refutable
             functions in L 2 (R) with dilation factor d, we show that it
             is always possible to construct 2d wavelet functions with
             compact support such that they generate a pair of dual
             d-wavelet frames in L2 (R). Moreover, the number of
             vanishing moments of each of these wavelet frames is equal
             to the approximation order of the dual MRA; this is the
             highest possible. In particular, when we consider symmetric
             refinable functions, the constructed dual wavelets are also
             symmetric or antisymmetric. As a consequence, for any
             compactly supported refinable function φ in L2 (R), it is
             possible to construct, explicitly and easily, wavelets that
             are finite linear combinations of translates φ(d -k), and
             that generate a wavelet frame with an arbitrarily
             preassigned number of vanishing moments. We illustrate the
             general theory by examples of such pairs of dual wavelet
             frames derived from B-spline functions.},
   Doi = {10.1007/s00365-004-0567-4},
   Key = {fds287165}
}

@article{fds287166,
   Author = {Rudin, C and Schapire, RE and Daubechies, I},
   Title = {Boosting based on a smooth margin},
   Journal = {Lecture Notes in Artificial Intelligence (Subseries of
             Lecture Notes in Computer Science)},
   Volume = {3120},
   Pages = {502-517},
   Publisher = {Springer Berlin Heidelberg},
   Year = {2004},
   Month = {January},
   ISSN = {0302-9743},
   url = {http://dx.doi.org/10.1007/978-3-540-27819-1_35},
   Abstract = {We study two boosting algorithms, Coordinate Ascent Boosting
             and Approximate Coordinate Ascent Boosting, which are
             explicitly designed to produce maximum margins. To derive
             these algorithms, we introduce a smooth approximation of the
             margin that one can maximize in order to produce a maximum
             margin classifier. Our first algorithm is simply coordinate
             ascent on this function, involving a line search at each
             step. We then make a simple approximation of this line
             search to reveal our second algorithm. These algorithms are
             proven to asymptotically achieve maximum margins, and we
             provide two convergence rate calculations. The second
             calculation yields a faster rate of convergence than the
             first, although the first gives a more explicit (still fast)
             rate. These algorithms are very similar to AdaBoost in that
             they are based on coordinate ascent, easy to implement, and
             empirically tend to converge faster than other boosting
             algorithms. Finally, we attempt to understand AdaBoost in
             terms of our smooth margin, focusing on cases where AdaBoost
             exhibits cyclic behavior.},
   Doi = {10.1007/978-3-540-27819-1_35},
   Key = {fds287166}
}

@article{fds287167,
   Author = {Daubechies, I and Runborg, O and Sweldens, W},
   Title = {Normal multiresolution approximation of curves},
   Journal = {Constructive Approximation},
   Volume = {20},
   Number = {3},
   Pages = {399-463},
   Publisher = {Springer Nature},
   Year = {2004},
   Month = {January},
   url = {http://dx.doi.org/10.1007/s00365-003-0543-4},
   Abstract = {A multiresolution analysis of a curve is normal if each
             wavelet detail vector with respect to a certain subdivision
             scheme lies in the local normal direction. In this paper we
             study properties such as regularity, convergence, and
             stability of a normal multiresolution analysis. In
             particular, we show that these properties critically depend
             on the underlying subdivision scheme and that, in general,
             the convergence of normal multiresolution approximations
             equals the convergence of the underlying subdivision
             scheme.},
   Doi = {10.1007/s00365-003-0543-4},
   Key = {fds287167}
}

@article{fds287163,
   Author = {Daubechies, I and Teschke, G},
   Title = {Wavelet based image decomposition by variational
             functionals},
   Journal = {Proceedings of SPIE - The International Society for Optical
             Engineering},
   Volume = {5266},
   Pages = {94-105},
   Publisher = {SPIE},
   Year = {2004},
   Month = {May},
   url = {http://dx.doi.org/10.1117/12.516051},
   Abstract = {We discuss a wavelet based treatment of variational problems
             arising in the context of image processing, inspired by
             papers of Vese-Osher and Osher-Solé-Vese, in particular, we
             introduce a special class of variational functionals, that
             induce a decomposition of images in oscillating and cartoon
             components. Cartoons are often modeled by BV functions. In
             the setting of Vese et.el. and Osher et.al. the
             incorporation of BV penalty terms leads to PDE schemes that
             are numerically intensive. We propose to embed the problem
             in a wavelet framework. This provides us with elegant and
             numerically efficient schemes even though a basic
             requirement, the involvement of the space BV, has to be
             softened slightly. We show results on test images of our
             wavelet algorithm with a B11(L1) penalty term, and we
             compare them with the BV restorations of
             Osher-Solé-Vese.},
   Doi = {10.1117/12.516051},
   Key = {fds287163}
}

@article{fds287169,
   Author = {Daubechies, I and Defrise, M and De Mol and C},
   Title = {An iterative thresholding algorithm for linear inverse
             problems with a sparsity constraint},
   Journal = {Communications on Pure and Applied Mathematics},
   Volume = {57},
   Number = {11},
   Pages = {1413-1457},
   Publisher = {WILEY},
   Year = {2004},
   Month = {November},
   url = {http://dx.doi.org/10.1002/cpa.20042},
   Abstract = {We consider linear inverse problems where the solution is
             assumed to have a sparse expansion on an arbitrary
             preassigned orthonormal basis. We prove that replacing the
             usual quadratic regularizing penalties by weighted of ℓP -
             penalties on the coefficients of such expansions, with 1 ≤
             p ≤ 2, still regularizes the problem. Use of such ℓP-
             penalized problems with p < 2 is often advocated when one
             expects the underlying ideal noiseless solution to have a
             sparse expansion with respect to the basis under
             consideration. To compute the corresponding regularized
             solutions, we analyze an iterative algorithm that amounts to
             a Landweber iteration with thresholding (or nonlinear
             shrinkage) applied at each iteration step. We prove that
             this algorithm converges in norm. © 2004 Wiley Periodicals,
             Inc.},
   Doi = {10.1002/cpa.20042},
   Key = {fds287169}
}

@article{fds287109,
   Author = {Rudin, C and Daubechies, I and Schapire, RE},
   Title = {The dynamics of AdaBoost: Cyclic behavior and convergence of
             margins},
   Journal = {Journal of Machine Learning Research},
   Volume = {5},
   Pages = {1557-1595},
   Year = {2004},
   Month = {December},
   Abstract = {In order to study the convergence properties of the AdaBoost
             algorithm, we reduce AdaBoost to a nonlinear iterated map
             and study the evolution of its weight vectors. This
             dynamical systems approach allows us to understand
             AdaBoost's convergence properties completely in certain
             cases; for these cases we find stable cycles, allowing us to
             explicitly solve for AdaBoost's output. Using this unusual
             technique, we are able to show that AdaBoost does not always
             converge to a maximum margin combined classifier, answering
             an open question. In addition, we show that "nonoptimal"
             AdaBoost (where the weak learning algorithm does not
             necessarily choose the best weak classifier at each
             iteration) may fail to converge to a maximum margin
             classifier, even if "optimal" AdaBoost produces a maximum
             margin. Also, we show that if AdaBoost cycles, it cycles
             among "support vectors", i.e., examples that achieve the
             same smallest margin.},
   Key = {fds287109}
}

@article{fds287102,
   Author = {Daubechies, I and Lieb, EH},
   Title = {One-electron relativistic molecules with coulomb
             interaction},
   Pages = {471-484},
   Publisher = {Springer Verlag},
   Year = {2005},
   Month = {January},
   url = {http://dx.doi.org/10.1007/3-540-27056-6_33},
   Abstract = {As an approximation to a relativistic one-electron molecule,
             we study the operator H=(-Δ+m2)1/2-e2 Z j|x-Rj|-1 with Zj0,
             e -2=137.04. H is bounded below if and only if e2 Z j>2/π,
             all j. Assuming this condition, the system is unstable when
             e2ΣZj>2/π in the sense that E 0=inf spec (H) → - ∞ as
             the Rj → 0, all j. We prove that the nuclear Coulomb
             repulsion more than restores stability; namely E0+0.069e2
             ZiZj|R i-Rj|-10. We also show that E0 is an increasing
             function of the internuclear distances |Ri-R j|. © 2005
             Springer-Verlag Berlin Heidelberg New York.},
   Doi = {10.1007/3-540-27056-6_33},
   Key = {fds287102}
}

@article{fds287107,
   Author = {Daubechies, I and Drakakis, K and Khovanova, T},
   Title = {A detailed study of the attachment strategies of new
             autonomous systems in the as connectivity
             graph},
   Journal = {Internet Mathematics},
   Volume = {2},
   Number = {2},
   Pages = {185-246},
   Publisher = {Internet Mathematics},
   Year = {2005},
   Month = {January},
   url = {http://dx.doi.org/10.1080/15427951.2005.10129103},
   Abstract = {The connectivity of the autonomous systems (ASs) in the
             Internet can be modeled as a time-evolving random graph,
             whose nodes represent ASs and whose edges represent direct
             connections between them. Even though this graph has some
             random aspects, its properties show it to be fundamentally
             different from “traditional” random graphs. In the first
             part of this paper, we use real BGP data to study some
             properties of the AS connectivity graph and its evolution in
             time. In the second part, we build a simple model that is
             inspired by observations made in the first part, and we
             discuss simulations of this model.},
   Doi = {10.1080/15427951.2005.10129103},
   Key = {fds287107}
}

@article{fds287168,
   Author = {Pierpaoli, E and Anthoine, S and Huffenberger, K and Daubechies,
             I},
   Title = {Reconstructing Sunyaev-Zel'dovich clusters in future cosmic
             microwave background experiments},
   Journal = {Monthly Notices of the Royal Astronomical
             Society},
   Volume = {359},
   Number = {1},
   Pages = {261-271},
   Publisher = {Oxford University Press (OUP)},
   Year = {2005},
   Month = {May},
   url = {http://dx.doi.org/10.1111/j.1365-2966.2005.08896.x},
   Abstract = {We present a new method for component separation aimed at
             extracting Sunyaev-Zel'dovich (SZ) galaxy clusters from
             multifrequency maps of cosmic microwave background (CMB)
             experiments. This method is designed to recover
             non-Gaussian, spatially localized and sparse signals. We
             first characterize the cluster non-Gaussianity by studying
             it on simulated SZ maps. We then apply our estimator on
             simulated observations of the Planck and Atacama Cosmology
             Telescope (ACT) experiments. The method presented here
             outperforms multifrequency Wiener filtering, both in the
             reconstructed average intensity for given input and in the
             associated error. In the absence of point source
             contamination, this technique reconstructs the ACT (Planck)
             bright (big) cluster central y parameter with an intensity
             that is about 84 (43) per cent of the original input value.
             The associated error in the reconstruction is about 12 and
             27 per cent for the 50 (12) ACT (Planck) clusters
             considered. For ACT, the error is dominated by beam
             smearing. In the Planck case, the error in the
             reconstruction is largely determined by the noise level: a
             noise reduction by a factor of 7 would imply almost perfect
             reconstruction and 10 per cent error for a large sample of
             clusters. We conclude that the selection function of Planck
             clusters will strongly depend on the noise properties in
             different sky regions, as well as the specific cluster
             extraction method assumed. © 2005 RAS.},
   Doi = {10.1111/j.1365-2966.2005.08896.x},
   Key = {fds287168}
}

@article{fds287170,
   Author = {Daubechies, I and Teschke, G},
   Title = {Variational image restoration by means of wavelets:
             Simultaneous decomposition, deblurring, and
             denoising},
   Journal = {Applied and Computational Harmonic Analysis},
   Volume = {19},
   Number = {1},
   Pages = {1-16},
   Publisher = {Elsevier BV},
   Year = {2005},
   Month = {July},
   url = {http://dx.doi.org/10.1016/j.acha.2004.12.004},
   Abstract = {Inspired by papers of Vese-Osher [Modeling textures with
             total variation minimization and oscillating patterns in
             image processing, Technical Report 02-19, 2002] and
             Osher-Solé-Vese [Image decomposition and restoration using
             total variation minimization and the H-1 norm, Technical
             Report 02-57, 2002] we present a wavelet-based treatment of
             variational problems arising in the field of image
             processing. In particular, we follow their approach and
             discuss a special class of variational functionals that
             induce a decomposition of images into oscillating and
             cartoon components and possibly an appropriate 'noise'
             component. In the setting of [Modeling textures with total
             variation minimization and oscillating patterns in image
             processing, Technical Report 02-19, 2002] and [Image
             decomposition and restoration using total variation
             minimization and the H-1 norm, Technical Report 02-57,
             2002], the cartoon component of an image is modeled by a BV
             function; the corresponding incorporation of BV penalty
             terms in the variational functional leads to PDE schemes
             that are numerically intensive. By replacing the BV penalty
             term by a B11(L1) term (which amounts to a slightly stronger
             constraint on the minimizer), and writing the problem in a
             wavelet framework, we obtain elegant and numerically
             efficient schemes with results very similar to those
             obtained in [Modeling textures with total variation
             minimization and oscillating patterns in image processing,
             Technical Report 02-19, 2002] and [Image decomposition and
             restoration using total variation minimization and the H-1
             norm, Technical Report 02-57, 2002]. This approach allows
             us, moreover, to incorporate general bounded linear blur
             operators into the problem so that the minimization leads to
             a simultaneous decomposition, deblurring and denoising. ©
             2004 Elsevier Inc. All rights reserved.},
   Doi = {10.1016/j.acha.2004.12.004},
   Key = {fds287170}
}

@article{fds287172,
   Author = {Roussos, E and Roberts, S and Daubechies, I},
   Title = {Variational Bayesian learning for wavelet independent
             component analysis},
   Journal = {AIP Conference Proceedings},
   Volume = {803},
   Pages = {274-281},
   Publisher = {AIP},
   Year = {2005},
   Month = {November},
   ISSN = {0094-243X},
   url = {http://dx.doi.org/10.1063/1.2149805},
   Abstract = {In an exploratory approach to data analysis, it is often
             useful to consider the observations as generated from a set
             of latent generators or "sources" via a generally unknown
             mapping. For the noisy overcomplete case, where we have more
             sources than observations, the problem becomes extremely
             ill-posed. Solutions to such inverse problems can, in many
             cases, be achieved by incorporating prior knowledge about
             the problem, captured in the form of constraints. This
             setting is a natural candidate for the application of the
             Bayesian methodology, allowing us to incorporate "soft"
             constraints in a natural manner. The work described in this
             paper is mainly driven by problems in functional magnetic
             resonance imaging of the brain, for the neuro-scientific
             goal of extracting relevant "maps" from the data. This can
             be stated as a 'blind' source separation problem. Recent
             experiments in the field of neuroscience show that these
             maps are sparse, in some appropriate sense. The separation
             problem can be solved by independent component analysis
             (ICA), viewed as a technique for seeking sparse components,
             assuming appropriate distributions for the sources. We
             derive a hybrid wavelet-ICA model, transforming the signals
             into a domain where the modeling assumption of sparsity of
             the coefficients with respect to a dictionary is natural. We
             follow a graphical modeling formalism, viewing ICA as a
             probabilistic generative model. We use hierarchical source
             and mixing models and apply Bayesian inference to the
             problem. This allows us to perform model selection in order
             to infer the complexity of the representation, as well as
             automatic denoising. Since exact inference and learning in
             such a model is intractable, we follow a variational
             Bayesian mean-field approach in the conjugate-exponential
             family of distributions, for efficient unsupervised learning
             in multi-dimensional settings. The performance of the
             proposed algorithm is demonstrated on some representative
             experiments. © 2005 American Institute of
             Physics.},
   Doi = {10.1063/1.2149805},
   Key = {fds287172}
}

@article{fds287173,
   Author = {Zou, J and Gilbert, A and Strauss, M and Daubechies,
             I},
   Title = {Theoretical and experimental analysis of a randomized
             algorithm for Sparse Fourier transform analysis},
   Journal = {Journal of Computational Physics},
   Volume = {211},
   Number = {2},
   Pages = {572-595},
   Publisher = {Elsevier BV},
   Year = {2006},
   Month = {January},
   url = {http://dx.doi.org/10.1016/j.jcp.2005.06.005},
   Abstract = {We analyze a sublinear RAℓSFA (randomized algorithm for
             Sparse Fourier analysis) that finds a near-optimal B-term
             Sparse representation R for a given discrete signal S of
             length N, in time and space poly (B, log(N)), following the
             approach given in [A.C. Gilbert, S. Guha, P. Indyk, S.
             Muthukrishnan, M. Strauss, Near-Optimal Sparse Fourier
             Representations via Sampling, STOC, 2002]. Its time cost
             poly (log(N)) should be compared with the superlinear Ω(N
             log N) time requirement of the Fast Fourier Transform (FFT).
             A straightforward implementation of the RAℓSFA, as
             presented in the theoretical paper [A.C. Gilbert, S. Guha,
             P. Indyk, S. Muthukrishnan, M. Strauss, Near-Optimal Sparse
             Fourier Representations via Sampling, STOC, 2002], turns out
             to be very slow in practice. Our main result is a greatly
             improved and practical RAℓSFA. We introduce several new
             ideas and techniques that speed up the algorithm. Both
             rigorous and heuristic arguments for parameter choices are
             presented. Our RAℓSFA constructs, with probability at
             least 1 - δ, a near-optimal B-term representation R in time
             poly(B) log(N) log(1/δ)/ε2 log(M) such that ∥S - R∥22
             ≤ (1 + ε) ∥S - Ropt∥22. Furthermore, this RAℓSFA
             implementation already beats the FFTW for not unreasonably
             large N. We extend the algorithm to higher dimensional cases
             both theoretically and numerically. The crossover point lies
             at N ≃ 70, 000 in one dimension, and at N ≃ 900 for data
             on a N × N grid in two dimensions for small B signals where
             there is noise. © 2005 Elsevier Inc. All rights
             reserved.},
   Doi = {10.1016/j.jcp.2005.06.005},
   Key = {fds287173}
}

@article{fds287174,
   Author = {Daubechies, I and DeVore, RA and Güntürk, CS and Vaishampayan,
             VA},
   Title = {A/D conversion with imperfect quantizers},
   Journal = {IEEE Transactions on Information Theory},
   Volume = {52},
   Number = {3},
   Pages = {874-885},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2006},
   Month = {March},
   ISSN = {0018-9448},
   url = {http://dx.doi.org/10.1109/TIT.2005.864430},
   Abstract = {This paper analyzes mathematically the effect of quantizer
             threshold imperfection commonly encountered in the circuit
             implementation of analog-to-digltal (A/D) converters such as
             pulse code modulation (PCM) and sigma-delta (∑Δ)
             modulation. ∑Δ modulation, which is based on coarse
             quantization of oversampled (redundant) samples of a signal,
             enjoys a type of self-correction property for quantizer
             threshold errors (bias) that is not shared by PCM. Although
             "classical" ∑Δ modulation is inferior to PCM in the
             rate-distortion sense, this robustness feature is believed
             to be one of the reasons why ∑Δ modulation is preferred
             over PCM in A/D converters with imperfect quantizers.
             Motivated by these facts, other encoders are constructed in
             this paper that use redundancy to obtain a similar
             self-correction property, but that achieve higher order
             accuracy relative to bit rate compared to classical ∑Δ.
             More precisely, two different types of encoders are
             introduced that exhibit exponential accuracy in the bit rate
             (in contrast to the polynomial-type accuracy of classical
             ∑Δ) while possessing the self-correction property. ©
             2006 IEEE.},
   Doi = {10.1109/TIT.2005.864430},
   Key = {fds287174}
}

@article{fds287176,
   Author = {Daubechies, I and Yilmaz, O},
   Title = {Robust and practical analog-to-digital conversion with
             exponential precision},
   Journal = {IEEE Transactions on Information Theory},
   Volume = {52},
   Number = {8},
   Pages = {3533-3545},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2006},
   Month = {August},
   ISSN = {0018-9448},
   url = {http://dx.doi.org/10.1109/TIT.2006.878220},
   Abstract = {Beta-encoders with error correction were introduced by
             Daubechies, DeVore, Güntürk and Vaishampayan as an
             alternative to pulse-code modulation (PCM) for
             analog-to-digital conversion. An N -bit beta-encoder
             quantizes a real number by computing one of its N-bit
             truncated β-expansions where β ∈ (1, 2) determines the
             base of expansion. These encoders have (almost) optimal
             rate-distortion properties like PCM; furthermore, they
             exploit the redundancy of beta-expansions and thus they are
             robust with respect to quantizer imperfections. However,
             these encoders have the shortcoming that the decoder needs
             to know the value of the base of expansion β, a gain factor
             in the circuit used by the encoder, which is an impractical
             constraint. We present a method to implement beta-encoders
             so that they are also robust with respect to uncertainties
             of the value of β. The method relies upon embedding the
             value of β in the encoded bitstream.We show that this can
             be done without a priori knowledge of β by the transmitting
             party. Moreover the algorithm still works if the value of β
             changes (slowly) during the implementation. © 2006
             IEEE.},
   Doi = {10.1109/TIT.2006.878220},
   Key = {fds287176}
}

@article{fds287171,
   Author = {Hughes, SM and Daubechies, I},
   Title = {Simpler alternatives to information theoretic similarity
             metrics for multimodal image alignment},
   Journal = {Proceedings - International Conference on Image Processing,
             ICIP},
   Pages = {365-368},
   Publisher = {IEEE},
   Year = {2006},
   Month = {December},
   ISSN = {1522-4880},
   url = {http://dx.doi.org/10.1109/ICIP.2006.313169},
   Abstract = {Mutual information (MI) based methods for image registration
             enjoy great experimental success and are becoming widely
             used. However, they impose a large computational burden that
             limits their use; many applications would benefit from a
             reduction of the computational load. Although the
             theoretical justification for these methods draws upon the
             stochastic concept of mutual information, in practice, such
             methods actually seek the best alignment by maximizing a
             number that is (deterministically) computed from the two
             images. These methods thus optimize a fixed function, the
             "similarity metric," over different candidate alignments of
             the two images. Accordingly, we study the important features
             of the computationally complex MI similarity metric with the
             goal of distilling them into simpler surrogate functions
             that are easier to compute. More precisely, we show that
             maximizing the MI similarity metric is equivalent to
             minimizing a certain distance metric between equivalence
             classes of images, where images f and g are said to be
             equivalent if there exists a bijection ø such that f (x) =
             ø(g (x)) for all x. We then show how to design new
             similarity metrics for image alignment with this property.
             Although we preserve only this aspect of MI, our new metrics
             show equal alignment accuracy and similar robustness to
             noise, while significantly decreasing computation time. We
             conclude that even the few properties of MI preserved by our
             method suffice for accurate registration and may in fact be
             responsible for MI's success. ©2006 IEEE.},
   Doi = {10.1109/ICIP.2006.313169},
   Key = {fds287171}
}

@article{fds335534,
   Author = {Daubechies, I and Teschke, G and Vese, L},
   Title = {Iteratively solving linear inverse problems under general
             convex constraints},
   Journal = {Inverse Problems and Imaging},
   Volume = {1},
   Number = {1},
   Pages = {29-46},
   Publisher = {American Institute of Mathematical Sciences
             (AIMS)},
   Year = {2007},
   Month = {January},
   url = {http://dx.doi.org/10.3934/ipi.2007.1.29},
   Abstract = {We consider linear inverse problems where the solution is
             assumed to fulfill some general homogeneous convex
             constraint. We develop an algorithm that amounts to a
             projected Landweber iteration and that provides and
             iterative approach to the solution of this inverse problem.
             For relatively moderate assumptions on the constraint we can
             always prove weak convergence of the iterative scheme. In
             certain cases, i.e. for special families of convex
             constraints, weak convergence implies norm convergence. The
             presented approach covers a wide range of problems, e.g.
             Besov– or BV–restoration for which we present also
             numerical experiments in the context of image
             processing.},
   Doi = {10.3934/ipi.2007.1.29},
   Key = {fds335534}
}

@article{fds287175,
   Author = {Daubechies, I and Lazarsfeld, R and Morgan, J and Okounkov, A and Tao,
             T},
   Title = {Reply to Davey, Henriksen, Marković and Pratt
             [4]},
   Journal = {Notices of the American Mathematical Society},
   Volume = {54},
   Number = {6},
   Pages = {694},
   Year = {2007},
   Month = {June},
   ISSN = {0002-9920},
   Key = {fds287175}
}

@article{fds287178,
   Author = {Loris, I and Nolet, G and Daubechies, I and Dahlen,
             FA},
   Title = {Tomographic inversion using ℓ1-norm
             regularization of wavelet coefficients},
   Journal = {Geophysical Journal International},
   Volume = {170},
   Number = {1},
   Pages = {359-370},
   Publisher = {Oxford University Press (OUP)},
   Year = {2007},
   Month = {July},
   ISSN = {0956-540X},
   url = {http://dx.doi.org/10.1111/j.1365-246X.2007.03409.x},
   Abstract = {We propose the use of ℓ1 regularization in a wavelet basis
             for the solution of linearized seismic tomography problems
             Am = d, allowing for the possibility of sharp
             discontinuities superimposed on a smoothly varying
             background. An iterative method is used to find a sparse
             solution m that contains no more fine-scale structure than
             is necessary to fit the data d to within its assigned
             errors. © 2007 The Authors Journal compilation © 2007
             RAS.},
   Doi = {10.1111/j.1365-246X.2007.03409.x},
   Key = {fds287178}
}

@article{fds287177,
   Author = {Daubechies, I and Runborg, O and Zou, J},
   Title = {A sparse spectral method for homogenization multiscale
             problems},
   Journal = {Multiscale Modeling and Simulation},
   Volume = {6},
   Number = {3},
   Pages = {711-740},
   Publisher = {Society for Industrial & Applied Mathematics
             (SIAM)},
   Year = {2007},
   Month = {August},
   ISSN = {1540-3459},
   url = {http://dx.doi.org/10.1137/060676258},
   Abstract = {We develop a new sparse spectral method, in which the fast
             Fourier transform (FFT) is replaced by RAℓSFA (randomized
             algorithm of sparse Fourier analysis); this is a sublinear
             randomized algorithm that takes time O(B log N) to recover a
             B-term Fourier representation for a signal of length N,
             where we assume B ≪ N. To illustrate its potential, we
             consider the parabolic homogenization problem with a
             characteristic fine scale size ε. For fixed tolerance the
             sparse method has a computational cost of O( logε ) per
             time step, whereas standard methods cost at least O(ε-1).
             We present a theoretical analysis as well as numerical
             results; they show the advantage of the new method in speed
             over the traditional spectral methods when ε is very small.
             We also show some ways to extend the methods to hyperbolic
             and elliptic problems. © 2007 Society for Industrial and
             Applied Mathematics.},
   Doi = {10.1137/060676258},
   Key = {fds287177}
}

@article{fds287179,
   Author = {Cvetkovic, Z and Daubechies, I and Logan, BF},
   Title = {Single-bit oversampled A/D conversion with exponential
             accuracy in the bit rate},
   Journal = {IEEE Transactions on Information Theory},
   Volume = {53},
   Number = {11},
   Pages = {3979-3989},
   Year = {2007},
   Month = {November},
   ISSN = {0018-9448},
   url = {http://dx.doi.org/10.1109/TIT.2007.907508},
   Abstract = {A scheme for simple oversampled analog-to-digital (A/D)
             conversion using single-bit quantization is presented. The
             scheme is based on recording positions of zero-crossings of
             the input signal added to a deterministic dither function.
             This information can be represented in a manner such that
             the bit rate increases only logarithmically with the
             oversampling factor r. The input band-limited signal can be
             reconstructed from this information locally with O(1/r)
             pointwise error, resulting in an exponentially decaying
             distortion-rate characteristic. In the course of studying
             the accuracy of the proposed A/D conversion scheme, some new
             results are established about reconstruction of band-limited
             signals from irregular samples using linear combination of
             functions with fast decay. Schemes for local interpolation
             of band-limited signals from quantized irregular samples are
             also proposed. © 2007 IEEE.},
   Doi = {10.1109/TIT.2007.907508},
   Key = {fds287179}
}

@article{fds287180,
   Author = {Rudin, C and Schapire, RE and Daubechies, I},
   Title = {Analysis of boosting algorithms using the smooth margin
             function},
   Journal = {Annals of Statistics},
   Volume = {35},
   Number = {6},
   Pages = {2723-2768},
   Publisher = {Institute of Mathematical Statistics},
   Year = {2007},
   Month = {December},
   ISSN = {0090-5364},
   url = {http://dx.doi.org/10.1214/009053607000000785},
   Abstract = {We introduce a useful tool for analyzing boosting algorithms
             called the "smooth margin function," a differentiable
             approximation of the usual margin for boosting algorithms.
             We present two boosting algorithms based on this smooth
             margin, "coordinate ascent boosting" and "approximate
             coordinate ascent boosting," which are similar to Freund and
             Schapire's AdaBoost algorithm and Breiman's arc-gv
             algorithm. We give convergence rates to the maximum margin
             solution for both of our algorithms and for arc-gv. We then
             study AdaBoost's convergence properties using the smooth
             margin function. We precisely bound the margin attained by
             AdaBoost when the edges of the weak classifiers fall within
             a specified range. This shows that a previous bound proved
             by Ratsch and Warmuth is exactly tight. Furthermore, we use
             the smooth margin to capture explicit properties of AdaBoost
             in cases where cyclic behavior occurs. © Institute of
             Mathematical Statistics, 2007.},
   Doi = {10.1214/009053607000000785},
   Key = {fds287180}
}

@article{fds287181,
   Author = {Daubechies, I and Teschke, G and Vese, L},
   Title = {On some iterative concepts for image restoration},
   Journal = {Advances in Imaging and Electron Physics},
   Volume = {150},
   Pages = {1-51},
   Publisher = {Elsevier},
   Year = {2008},
   Month = {January},
   ISSN = {1076-5670},
   url = {http://dx.doi.org/10.1016/S1076-5670(07)00001-8},
   Abstract = {Several iterative strategies for solving inverse problems in
             the context of signal and image processing are discussed.
             Problems for which it is reasonable to assume that the
             solution has a sparse expansion with respect to a wavelet
             basis or frame are focused. A variational formulation of the
             problem is considered and an iteration scheme for which the
             iterates approximate the solution is constructed. The
             concrete problem of simultaneously denoising, decomposing,
             and deblurring a given image, is discussed. The associated
             variational formulation of the problem contains terms that
             promote sparsity and smoothness. A natural extension to
             vector-valued inverse problems is also considered. In the
             linear case, and under fairly general assumptions on the
             constraint, its is proved that weak convergence of the
             iterative scheme always holds.},
   Doi = {10.1016/S1076-5670(07)00001-8},
   Key = {fds287181}
}

@article{fds287184,
   Author = {Johnson, CR and Hendriks, E and Berezhnoy, IJ and Brevdo, E and Hughes,
             SM and Daubechies, I and Li, J and Postma, E and Wang,
             JZ},
   Title = {Image processing for artist identification: Computerized
             analysis of Vincent van Gogh's painting brushstrokes},
   Journal = {IEEE Signal Processing Magazine},
   Volume = {25},
   Number = {4},
   Pages = {37-48},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2008},
   Month = {January},
   ISSN = {1053-5888},
   url = {http://dx.doi.org/10.1109/MSP.2008.923513},
   Abstract = {A description on the approaches to brushwork analysis and
             artist identification within the framework of data set is
             given. Image processing is now a reality in painting
             analysis as high resolution and richer data are also
             available. A summary on the results that are obtained by
             several groups are presented who uses wavelet decomposition
             of the same data set. Better results can easily be achieved
             with the use of wider range of signal analysis tools.
             Furthermore, there is a growth within interested
             researchers, targeted conference sessions, and specialist
             workshops regarding painting analysis.},
   Doi = {10.1109/MSP.2008.923513},
   Key = {fds287184}
}

@article{fds287182,
   Author = {Daubechies, I and DeVore, R and Fornasier, M and Güntürk,
             S},
   Title = {Iteratively Re-weighted Least Squares minimization: Proof of
             faster than linear rate for sparse recovery},
   Journal = {CISS 2008, The 42nd Annual Conference on Information
             Sciences and Systems},
   Pages = {26-29},
   Publisher = {IEEE},
   Year = {2008},
   Month = {September},
   url = {http://dx.doi.org/10.1109/CISS.2008.4558489},
   Abstract = {Given an m × N matrix Φ, with m < N, the system of
             equations Φx = y is typically underdetermined and has
             infinitely many solutions. Various forms of optimization can
             extract a "best" solution. One of the oldest is to select
             the one with minimal l2 norm. It has been shown that in many
             applications a better choice is the minimal l1 norm
             solution. This is the case in Compressive Sensing, when
             sparse solutions are sought. The minimal l1 norm solution
             can be found by using linear programming; an alternative
             method is Iterative Re-weighted Least Squares (IRLS), which
             in some cases is numerically faster. The main step of IRLS
             finds, for a given weight w, the solution with smallest
             l2(w) norm; this weight is updated at every iteration step:
             if x(n) is the solution at step n, then w(n) is defined by
             wi(n):= 1/|xi(n)|, i = 1,..., N. We give a specific recipe
             for updating weights that avoids technical shortcomings in
             other approaches, and for which we can prove convergence
             under certain conditions on the matrix Φ known as the
             Restricted Isometry Property. We also show that if there is
             a sparse solution, then the limit of the proposed algorithm
             is that sparse solution. It is also shown that whenever the
             solution at a given iteration is sufficiently close to the
             limit, then the remaining steps of the algorithm converge
             exponentially fast. In the standard version of the
             algorithm, designed to emulate l1-minimization, the
             exponenital rate is linear; in adapted versions aimed at
             1T-minimization with T <1, we prove faster than linear rate.
             © 2008 IEEE.},
   Doi = {10.1109/CISS.2008.4558489},
   Key = {fds287182}
}

@article{fds287183,
   Author = {Daubechies, I and Fornasier, M and Loris, I},
   Title = {Accelerated projected gradient method for linear inverse
             problems with sparsity constraints},
   Journal = {Journal of Fourier Analysis and Applications},
   Volume = {14},
   Number = {5-6},
   Pages = {764-792},
   Publisher = {Springer Nature},
   Year = {2008},
   Month = {December},
   ISSN = {1069-5869},
   url = {http://dx.doi.org/10.1007/s00041-008-9039-8},
   Abstract = {Regularization of ill-posed linear inverse problems via ℓ1
             penalization has been proposed for cases where the solution
             is known to be (almost) sparse. One way to obtain the
             minimizer of such an ℓ1 penalized functional is via an
             iterative soft-thresholding algorithm. We propose an
             alternative implementation to ℓ1-constraints, using a
             gradient method, with projection on ℓ1-balls. The
             corresponding algorithm uses again iterative
             soft-thresholding, now with a variable thresholding
             parameter. We also propose accelerated versions of this
             iterative method, using ingredients of the (linear) steepest
             descent method. We prove convergence in norm for one of
             these projected gradient methods, without and with
             acceleration. © 2008 Birkhäuser Boston.},
   Doi = {10.1007/s00041-008-9039-8},
   Key = {fds287183}
}

@article{fds287186,
   Author = {Polatkan, G and Jafarpour, S and Brasoveanu, A and Hughes, S and Daubechies, I},
   Title = {Detection of forgery in paintings using supervised
             learning},
   Journal = {Proceedings - International Conference on Image Processing,
             ICIP},
   Pages = {2921-2924},
   Publisher = {IEEE},
   Year = {2009},
   Month = {January},
   ISSN = {1522-4880},
   url = {http://dx.doi.org/10.1109/ICIP.2009.5413338},
   Abstract = {This paper examines whether machine learning and image
             analysis tools can be used to assist art experts in the
             authentication of unknown or disputed paintings. Recent work
             on this topic [1] has presented some promising initial
             results. Our reexamination of some of these recently
             successful experiments shows that variations in image
             clarity in the experimental datasets were correlated with
             authenticity, and may have acted as a confounding factor,
             artificially improving the results. To determine the extent
             of this factor's influence on previous results, we provide a
             new "ground truth" data set in which originals and copies
             are known and image acquisition conditions are uniform.
             Multiple previously-successful methods are found ineffective
             on this new confounding-factor-free dataset, but we
             demonstrate that supervised machine learning on features
             derived from Hidden-Markov-Tree-modeling of the paintings'
             wavelet coefficients has the potential to distinguish copies
             from originals in the new dataset. ©2009
             IEEE.},
   Doi = {10.1109/ICIP.2009.5413338},
   Key = {fds287186}
}

@article{fds373498,
   Author = {Dahlke, S and Daubechies, I and Elad, M and Kutyniok, G and Teschke,
             G},
   Title = {Executive Summary of Dagstuhl Seminar on Structured
             Decompositions and Efficient Algorithms (08492)},
   Journal = {Dagstuhl Seminar Proceedings},
   Volume = {8492},
   Year = {2009},
   Month = {January},
   Abstract = {New emerging technologies such as high-precision sensors or
             new MRI machines drive us towards a challenging quest for
             new, more effective, and more daring mathematical models and
             algorithms. Therefore, in the last few years researchers
             have started to investigate different methods to efficiently
             represent or extract relevant information from complex, high
             dimensional and/or multimodal data. Efficiently in this
             context means a representation that is linked to the
             features or characteristics of interest, thereby typically
             providing a sparse expansion of such. Besides the
             construction of new and advanced ansatz systems the central
             question is how to design algorithms that are able to treat
             complex and high dimensional data and that efficiently
             perform a suitable approximation of the signal. One of the
             main challenges is to design new sparse approximation
             algorithms that would ideally combine, with an adjustable
             tradeoff, two properties: a provably good 'quality' of the
             resulting decomposition under mild assumptions on the
             analyzed sparse signal, and numerically efficient design.
             The topic is driven by applications as well as by
             theoretical questions. Therefore, the aim of this seminar
             was to bring together a good mixture of scientists with
             different backgrounds in order to discuss recent progress as
             well as new challenging perspectives. In particular, it was
             intended to strengthen the interaction of mathematicians and
             computer scientists.},
   Key = {fds373498}
}

@article{fds287090,
   Author = {Daubechies, I},
   Title = {The wavelet transform, time-frequency localization and
             signal analysis},
   Pages = {442-486},
   Year = {2009},
   Month = {January},
   Abstract = {Two different procedures are studied by which a rrequency
             analysis of a time-dependenl signal can be effected, locally
             in lime. The lirst procedure is the short-time or windowed
             Fourier transform, the second is the "wavelet transform," in
             which high frequency components are sludied wilh sharper
             time resolution than low frequency components. The
             similarities and the differences between these two methods
             are discussed. For both scbemes a detailed study is made of
             Ibe reconslruetion method and ils stability, as a function
             of the chosen time-frequency density. Finally the notion of
             "time-frequency localization" is made precise, within this
             framework, by two localization theorems.},
   Key = {fds287090}
}

@article{fds287091,
   Author = {Daubechies, I and Grossmann, A and Meyer, Y},
   Title = {Painless nonorthogonal expansions},
   Pages = {372-384},
   Year = {2009},
   Month = {January},
   Abstract = {In a Hilbert space {Hilbert space}, discrete families of
             vectors {hj} with the property that f = ΣJ <hJ|f> hJ for
             every f in {Hilbert space} are considered. This expansion
             formula is obviously true if the family is an orthonorma1
             basis of {Hilbert space}, but also can hold in situations
             where the hj are not mutually orthogonal and are
             "overcomplete." The two classes of examples studied here are
             (i) appropriate sets of Weyl-Heisenberg coherent states,
             based on certain (non-Gaussian) fiducial vectors, and (ii)
             analogous families of affine coherent states. It is
             believed, that such "quasiorthogonal expansions" will be a
             useful tool in many areas of theoretical physics and applied
             mathematics.},
   Key = {fds287091}
}

@article{fds287092,
   Author = {Brown, K and Cartmill, M},
   Title = {Foreword},
   Volume = {9781400827268},
   Pages = {xv-xvi},
   Booktitle = {The Scientific Bases of Human Anatomy by C.O.
             Oxnard},
   Publisher = {Wiley-Blackwell},
   Year = {2009},
   Month = {January},
   ISBN = {0691114536},
   url = {http://dx.doi.org/10.1515/9781400827268.xv},
   Doi = {10.1515/9781400827268.xv},
   Key = {fds287092}
}

@article{fds287187,
   Author = {Daubechies, I and Roussos, E and Takerkart, S and Benharrosh, M and Golden, C and D'Ardenne, K and Richter, W and Cohen, JD and Haxby,
             J},
   Title = {Independent component analysis for brain fMRI does not
             select for independence.},
   Journal = {Proceedings of the National Academy of Sciences of the
             United States of America},
   Volume = {106},
   Number = {26},
   Pages = {10415-10422},
   Year = {2009},
   Month = {June},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/19556548},
   Abstract = {InfoMax and FastICA are the independent component analysis
             algorithms most used and apparently most effective for brain
             fMRI. We show that this is linked to their ability to handle
             effectively sparse components rather than independent
             components as such. The mathematical design of better
             analysis tools for brain fMRI should thus emphasize other
             mathematical characteristics than independence.},
   Doi = {10.1073/pnas.0903525106},
   Key = {fds287187}
}

@article{fds287188,
   Author = {Brodie, J and Daubechies, I and De Mol and C and Giannone, D and Loris,
             I},
   Title = {Sparse and stable Markowitz portfolios.},
   Journal = {Proceedings of the National Academy of Sciences of the
             United States of America},
   Volume = {106},
   Number = {30},
   Pages = {12267-12272},
   Year = {2009},
   Month = {July},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/19617537},
   Abstract = {We consider the problem of portfolio selection within the
             classical Markowitz mean-variance framework, reformulated as
             a constrained least-squares regression problem. We propose
             to add to the objective function a penalty proportional to
             the sum of the absolute values of the portfolio weights.
             This penalty regularizes (stabilizes) the optimization
             problem, encourages sparse portfolios (i.e., portfolios with
             only few active positions), and allows accounting for
             transaction costs. Our approach recovers as special cases
             the no-short-positions portfolios, but does allow for short
             positions in limited number. We implement this methodology
             on two benchmark data sets constructed by Fama and French.
             Using only a modest amount of training data, we construct
             portfolios whose out-of-sample performance, as measured by
             Sharpe ratio, is consistently and significantly better than
             that of the naïve evenly weighted portfolio.},
   Doi = {10.1073/pnas.0904287106},
   Key = {fds287188}
}

@article{fds287185,
   Author = {Jafarpour, S and Polatkan, G and Brevdo, E and Hughes, S and Brasoveanu,
             A and Daubechies, I},
   Title = {Stylistic analysis of paintings using wavelets and machine
             learning},
   Journal = {European Signal Processing Conference},
   Pages = {1220-1224},
   Year = {2009},
   Month = {December},
   ISSN = {2219-5491},
   Abstract = {Wavelet transforms and machine learning tools can be used to
             assist art experts in the stylistic analysis of paintings. A
             dual-tree complex wavelet transform, Hidden Markov Tree
             modeling and Random Forest classifiers are used here for a
             stylistic analysis of Vincent van Gogh's paintings with
             results on two stylometry challenges that concern "dating,
             resp. extracting distinguishing features". © EURASIP,
             2009.},
   Key = {fds287185}
}

@article{fds287189,
   Author = {Daubechies, I and Devore, R and Fornasier, M and Güntürk,
             CS},
   Title = {Iteratively reweighted least squares minimization for sparse
             recovery},
   Journal = {Communications on Pure and Applied Mathematics},
   Volume = {63},
   Number = {1},
   Pages = {1-38},
   Publisher = {WILEY},
   Year = {2010},
   Month = {January},
   ISSN = {0010-3640},
   url = {http://dx.doi.org/10.1002/cpa.20303},
   Abstract = {Under certain conditions (known as the restricted isometry
             property, or RIP) on the m × N matrix Φ (wherem < N),
             vectors x ∈ R{double-struck}N that are sparse (i.e., have
             most of their entries equal to 0) can be recovered exactly
             from y:= Φx even though Φ-1(y). is typically an
             (N-m)-dimensional hyperplane; in addition, x is then equal
             to the element in Φ-1(y) of minimal l1-norm. This minimal
             element can be identified via linear programming algorithms.
             We study an alternative method of determining x, as the
             limit of an iteratively reweighted least squares (IRLS)
             algorithm. The main step of this IRLS finds, for a given
             weight vector w, the element in Φ-1(y) with smallest
             l2.(w)-norm. If x(n) is the solution at iteration step n,
             then the new weight w(n)i/ is defined by for a decreasing
             sequence of adaptively defined εn; this updated weight is
             then used to obtain x(n+1)/ and the process is repeated. We
             prove that when ̂ satisfies the RIP conditions, the
             sequence x(n) converges for all y, regardless of whether
             Φ-1(y), contains a sparse vector. If there is a sparse
             vector in Φ-1(y), then the limit is this sparse vector, and
             when x.(n) is sufficiently close to the limit, the remaining
             steps of the algorithm converge exponentially fast (linear
             convergence in the terminology of numerical optimization).
             The same algorithm with the "heavier" weight, can recover
             sparse solutions as well; more importantly, we show its
             local convergence is superlinear and approaches a quadratic
             rate for approaching 0. © 2009 Wiley Periodicals,
             Inc.},
   Doi = {10.1002/cpa.20303},
   Key = {fds287189}
}

@article{fds287191,
   Author = {Kobiler, O and Lipman, Y and Therkelsen, K and Daubechies, I and Enquist, LW},
   Title = {Herpesviruses carrying a Brainbow cassette reveal
             replication and expression of limited numbers of incoming
             genomes.},
   Journal = {Nature communications},
   Volume = {1},
   Pages = {146},
   Year = {2010},
   Month = {January},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/21266996},
   Abstract = {Whether all the infectious herpesvirus particles entering a
             cell are able to replicate and/or express their genomes is
             not known. Here, we developed a general method to determine
             the number of viral genomes expressed in an infected cell.
             We constructed and analysed fluorophore expression from a
             recombinant pseudorabies virus (PRV263) carrying a Brainbow
             cassette (Cre-conditional expression of different
             fluorophores). Using three isogenic strains derived from
             PRV263, each expressing a single fluorophore, we analysed
             the colour composition of cells infected with these three
             viruses at different multiplicities. We estimate that fewer
             than seven incoming genomes are expressed per cell. In
             addition, those templates that are expressed are the genomes
             selected for replication and packaging into virions. This
             finite limit on the number of viral genomes that can be
             expressed is an intrinsic property of the infected cell and
             may be influenced by viral and cellular factors.},
   Doi = {10.1038/ncomms1145},
   Key = {fds287191}
}

@article{fds287192,
   Author = {Loris, I and Douma, H and Nolet, G and Daubechies, I and Regone,
             C},
   Title = {Nonlinear regularization techniques for seismic
             tomography},
   Journal = {Journal of Computational Physics},
   Volume = {229},
   Number = {3},
   Pages = {890-905},
   Publisher = {Elsevier BV},
   Year = {2010},
   Month = {February},
   ISSN = {0021-9991},
   url = {http://dx.doi.org/10.1016/j.jcp.2009.10.020},
   Abstract = {The effects of several nonlinear regularization techniques
             are discussed in the framework of 3D seismic tomography.
             Traditional, linear, ℓ2 penalties are compared to
             so-called sparsity promoting ℓ1 and ℓ0 penalties, and a
             total variation penalty. Which of these algorithms is judged
             optimal depends on the specific requirements of the
             scientific experiment. If the correct reproduction of model
             amplitudes is important, classical damping towards a smooth
             model using an ℓ2 norm works almost as well as minimizing
             the total variation but is much more efficient. If gradients
             (edges of anomalies) should be resolved with a minimum of
             distortion, we prefer ℓ1 damping of Daubechies-4 wavelet
             coefficients. It has the additional advantage of yielding a
             noiseless reconstruction, contrary to simple ℓ2
             minimization ('Tikhonov regularization') which should be
             avoided. In some of our examples, the ℓ0 method produced
             notable artifacts. In addition we show how nonlinear ℓ1
             methods for finding sparse models can be competitive in
             speed with the widely used ℓ2 methods, certainly under
             noisy conditions, so that there is no need to shun ℓ1
             penalizations. © 2009 Elsevier Inc. All rights
             reserved.},
   Doi = {10.1016/j.jcp.2009.10.020},
   Key = {fds287192}
}

@article{fds287093,
   Author = {Daubechies, I},
   Title = {Wavelets and applications},
   Pages = {848-862},
   Year = {2010},
   Month = {July},
   Key = {fds287093}
}

@article{fds287190,
   Author = {Lipman, Y and Chen, X and Daubechies, I and Funkhouser,
             T},
   Title = {Symmetry factored embedding and distance},
   Journal = {ACM SIGGRAPH 2010 Papers, SIGGRAPH 2010},
   Volume = {29},
   Number = {4},
   Pages = {1-1},
   Publisher = {Association for Computing Machinery (ACM)},
   Year = {2010},
   Month = {July},
   ISSN = {0730-0301},
   url = {http://dx.doi.org/10.1145/1778765.1778840},
   Abstract = {We introduce the Symmetry Factored Embedding (SFE) and the
             Symmetry Factored Distance (SFD) as new tools to analyze and
             represent symmetries in a point set. The SFE provides new
             coordinates in which symmetry is "factored out," and the SFD
             is the Euclidean distance in that space. These constructions
             characterize the space of symmetric correspondences between
             points - i.e., orbits. A key observation is that a set of
             points in the same orbit appears as a clique in a
             correspondence graph induced by pairwise similarities. As a
             result, the problem of finding approximate and partial
             symmetries in a point set reduces to the problem of
             measuring connectedness in the correspondence graph, a
             well-studied problem for which spectral methods provide a
             robust solution. We provide methods for computing the SFE
             and SFD for extrinsic global symmetries and then extend them
             to consider partial extrinsic and intrinsic cases. During
             experiments with difficult examples, we find that the
             proposed methods can characterize symmetries in inputs with
             noise, missing data, non-rigid deformations, and complex
             symmetries, without a priori knowledge of the symmetry
             group. As such, we believe that it provides a useful tool
             for automatic shape analysis in applications such as
             segmentation and stationary point detection.},
   Doi = {10.1145/1778765.1778840},
   Key = {fds287190}
}

@article{fds287193,
   Author = {Daubechies, I and Güntürk, CS and Wang, Y and Yilmaz,
             O},
   Title = {The golden ratio encoder},
   Journal = {IEEE Transactions on Information Theory},
   Volume = {56},
   Number = {10},
   Pages = {5097-5110},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2010},
   Month = {October},
   ISSN = {0018-9448},
   url = {http://dx.doi.org/10.1109/TIT.2010.2059750},
   Abstract = {This paper proposes a novel Nyquist-rate analog-to-digital
             (A/D) conversion algorithm which achieves exponential
             accuracy in the bit-rate despite using imperfect components.
             The proposed algorithm is based on a robust implementation
             of a beta-encoder with β = φ =(1+ √5)/2, the golden
             ratio. It was previously shown that beta-encoders can be
             implemented in such a way that their exponential accuracy is
             robust against threshold offsets in the quantizer element.
             This paper extends this result by allowing for imperfect
             analog multipliers with imprecise gain values as well.
             Furthermore, a formal computational model for algorithmic
             encoders and a general test bed for evaluating their
             robustness is proposed. © 2010 IEEE.},
   Doi = {10.1109/TIT.2010.2059750},
   Key = {fds287193}
}

@article{fds287105,
   Author = {Daubechies, I},
   Title = {The work of Yves Meyer},
   Journal = {Proceedings of the International Congress of Mathematicians
             2010, ICM 2010},
   Pages = {114-124},
   Year = {2010},
   Month = {December},
   Abstract = {Yves Meyer has made numerous contributions to mathematics,
             several of which will be reviewed here, in particular in
             number theory, harmonic analysis and partial differential
             equations. His work in harmonic analysis led him naturally
             to take an interest in wavelets, when they emerged in the
             early 1980s; his synthesis of the advanced theoretical
             results in singular integral operator theory, established by
             himself and others, and of the requirements imposed by
             practical applications, led to enormous progress for wavelet
             theory and its applications. Wavelets and wavelet packets
             are now standard, extremely useful tools in many
             disciplines; their success is due in large measure to the
             vision, the insight and the enthusiasm of Yves
             Meyer.},
   Key = {fds287105}
}

@article{fds287209,
   Author = {Lipman, Y and Al-Aifari, R and Daubechies, I},
   Title = {The continuous Procrustes distance between two
             surfaces},
   Journal = {Communications in Pure and Applied Mathematics},
   Volume = {66},
   Number = {6},
   Pages = {934-964},
   Publisher = {WILEY},
   Year = {2011},
   url = {http://dx.doi.org/10.1002/cpa.21444},
   Abstract = {http://arxiv.org/abs/1106.4588},
   Doi = {10.1002/cpa.21444},
   Key = {fds287209}
}

@article{fds201618,
   Author = {B. Cornelis and A. Dooms and I. Daubechies and P.
             Schelkens},
   Title = {Report on Digital Image Processing for Art
             Historians},
   Journal = {Sampling Theory and Applications, SampTA '09, Marseille
             France, May 18-22, 2009},
   Year = {2011},
   Key = {fds201618}
}

@article{fds303540,
   Author = {Daubechies, I and Lu, J and Wu, H-T},
   Title = {Synchrosqueezed wavelet transforms: An empirical mode
             decomposition-like tool},
   Journal = {Applied and Computational Harmonic Analysis},
   Volume = {30},
   Number = {2},
   Pages = {243-261},
   Publisher = {Elsevier BV},
   Year = {2011},
   Month = {March},
   url = {http://arxiv.org/abs/0912.2437v1},
   Abstract = {The EMD algorithm is a technique that aims to decompose into
             their building blocks functions that are the superposition
             of a (reasonably) small number of components, well separated
             in the time-frequency plane, each of which can be viewed as
             approximately harmonic locally, with slowly varying
             amplitudes and frequencies. The EMD has already shown its
             usefulness in a wide range of applications including
             meteorology, structural stability analysis, medical studies.
             On the other hand, the EMD algorithm contains heuristic and
             ad hoc elements that make it hard to analyze mathematically.
             In this paper we describe a method that captures the flavor
             and philosophy of the EMD approach, albeit using a different
             approach in constructing the components. The proposed method
             is a combination of wavelet analysis and reallocation
             method. We introduce a precise mathematical definition for a
             class of functions that can be viewed as a superposition of
             a reasonably small number of approximately harmonic
             components, and we prove that our method does indeed succeed
             in decomposing arbitrary functions in this class. We provide
             several examples, for simulated as well as real data. ©
             2010 Elsevier Inc. All rights reserved.},
   Doi = {10.1016/j.acha.2010.08.002},
   Key = {fds303540}
}

@article{fds287199,
   Author = {Wu, HT and Flandrin, P and Daubechies, I},
   Title = {One or two frequencies? the synchrosqueezing
             answers},
   Journal = {Advances in Adaptive Data Analysis},
   Volume = {3},
   Number = {1-2},
   Pages = {29-39},
   Publisher = {World Scientific Pub Co Pte Lt},
   Year = {2011},
   Month = {April},
   ISSN = {1793-5369},
   url = {http://dx.doi.org/10.1142/S179353691100074X},
   Abstract = {The synchrosqueezed transform was proposed recently in
             [Daubechies et al. (2009)] as an alternative to the
             empirical mode decomposition (EMD) [Huang et al. (1998)], to
             decompose composite signals into a sum of "modes" that each
             have well-defined instantaneous frequencies. This paper
             presents, for synchrosqueezing, a study similar to that in
             [Rilling and Flandrin (2008)] for EMD, of how two signals
             with close frequencies are recognized and represented as
             such. © 2011 World Scientific Publishing
             Company.},
   Doi = {10.1142/S179353691100074X},
   Key = {fds287199}
}

@article{fds287103,
   Author = {Dooms, A and Daubechies, I},
   Title = {Wavelets},
   Pages = {135-154},
   Publisher = {Wiley-VCH Verlag GmbH & Co. KGaA},
   Year = {2011},
   Month = {April},
   url = {http://dx.doi.org/10.1002/9783527635245.ch7},
   Doi = {10.1002/9783527635245.ch7},
   Key = {fds287103}
}

@article{fds287198,
   Author = {Bunn, JM and Boyer, DM and Lipman, Y and St Clair and EM and Jernvall, J and Daubechies, I},
   Title = {Comparing Dirichlet normal surface energy of tooth crowns, a
             new technique of molar shape quantification for dietary
             inference, with previous methods in isolation and in
             combination.},
   Journal = {American journal of physical anthropology},
   Volume = {145},
   Number = {2},
   Pages = {247-261},
   Year = {2011},
   Month = {June},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/21469070},
   Abstract = {Inferred dietary preference is a major component of
             paleoecologies of extinct primates. Molar occlusal shape
             correlates with diet in living mammals, so teeth are a
             potentially useful structure from which to reconstruct diet
             in extinct taxa. We assess the efficacy of Dirichlet normal
             energy (DNE) calculated for molar tooth surfaces for
             reflecting diet. We evaluate DNE, which uses changes in
             normal vectors to characterize curvature, by directly
             comparing this metric to metrics previously used in dietary
             inference. We also test whether combining methods improves
             diet reconstructions. The study sample consisted of 146
             lower (mandibular) second molars belonging to 24 euarchontan
             taxa. Five shape quantification metrics were calculated on
             each molar: DNE, shearing quotient, shearing ratio, relief
             index, and orientation patch count rotated (OPCR).
             Statistical analyses were completed for each variable to
             assess effects of taxon and diet. Discriminant function
             analysis was used to assess ability of combinations of
             variables to predict diet. Values differ significantly by
             diets for all variables, although shearing ratios and OPCR
             do not distinguish statistically between insectivores and
             folivores or omnivores and frugivores. Combined analyses
             were much more effective at predicting diet than any metric
             alone. Alone, relief index and DNE were most effective at
             predicting diet. OPCR was the least effective alone but is
             still valuable as the only quantitative measure of surface
             complexity. Of all methods considered, DNE was the least
             methodologically sensitive, and its effectiveness suggests
             it will be a valuable tool for dietary reconstruction.},
   Doi = {10.1002/ajpa.21489},
   Key = {fds287198}
}

@article{fds287208,
   Author = {Lipman, Y and Daubechies, I},
   Title = {Conformal Wasserstein distances: Comparing surfaces in
             polynomial time},
   Journal = {Advances in Mathematics},
   Volume = {227},
   Number = {3},
   Pages = {1047-1077},
   Publisher = {Elsevier BV},
   Year = {2011},
   Month = {June},
   ISSN = {0001-8708},
   url = {http://dx.doi.org/10.1016/j.aim.2011.01.020},
   Abstract = {We present a constructive approach to surface comparison
             realizable by a polynomial-time algorithm. We determine the
             "similarity" of two given surfaces by solving a
             mass-transportation problem between their conformal
             densities. This mass transportation problem differs from the
             standard case in that we require the solution to be
             invariant under global Möbius transformations. We present
             in detail the case where the surfaces to compare are
             disk-like; we also sketch how the approach can be
             generalized to other types of surfaces. © 2011 Elsevier
             Inc.},
   Doi = {10.1016/j.aim.2011.01.020},
   Key = {fds287208}
}

@article{fds287207,
   Author = {Wolff, J and Martens, M and Jafarpour, S and Daubechies, I and Calderbank, R},
   Title = {Uncovering elements of style},
   Journal = {ICASSP, IEEE International Conference on Acoustics, Speech
             and Signal Processing - Proceedings},
   Pages = {1017-1020},
   Publisher = {IEEE},
   Year = {2011},
   Month = {August},
   ISSN = {1520-6149},
   url = {http://dx.doi.org/10.1109/ICASSP.2011.5946579},
   Abstract = {This paper relates the style of 16th century Flemish
             paintings by Goossen van der Weyden (GvdW) to the style of
             preliminary sketches or underpaintings made prior to
             executing the painting. Van der Weyden made underpaintings
             in markedly different styles for reasons as yet not
             understood by art historians. The analysis presented here
             starts from a classification of the underpaintings into four
             distinct styles by experts in art history. Analysis of the
             painted surfaces by a combination of wavelet analysis,
             hidden Markov trees and boosting algorithms can distinguish
             the four underpainting styles with greater than 90%
             cross-validation accuracy. On a subsequent blind test this
             classifier provided insight into the hypothesis by art
             historians that different patches of the finished painting
             were executed by different hands. © 2011
             IEEE.},
   Doi = {10.1109/ICASSP.2011.5946579},
   Key = {fds287207}
}

@article{fds287194,
   Author = {Ružić, T and Cornelis, B and Platiša, L and Pižurica, A and Dooms,
             A and Philips, W and Martens, M and De Mey and M and Daubechies,
             I},
   Title = {Virtual restoration of the Ghent altarpiece using crack
             detection and inpainting},
   Journal = {Lecture Notes in Computer Science (including subseries
             Lecture Notes in Artificial Intelligence and Lecture Notes
             in Bioinformatics)},
   Volume = {6915 LNCS},
   Pages = {417-428},
   Publisher = {Springer Berlin Heidelberg},
   Year = {2011},
   Month = {September},
   ISSN = {0302-9743},
   url = {http://dx.doi.org/10.1007/978-3-642-23687-7_38},
   Abstract = {In this paper, we present a new method for virtual
             restoration of digitized paintings, with the special focus
             on the Ghent Altarpiece (1432), one of Belgium's greatest
             masterpieces. The goal of the work is to remove cracks from
             the digitized painting thereby approximating how the
             painting looked like before ageing for nearly 600 years and
             aiding art historical and palaeographical analysis. For
             crack detection, we employ a multiscale morphological
             approach, which can cope with greatly varying thickness of
             the cracks as well as with their varying intensities (from
             dark to the light ones). Due to the content of the painting
             (with extremely many fine details) and complex type of
             cracks (including inconsistent whitish clouds around them),
             the available inpainting methods do not provide satisfactory
             results on many parts of the painting. We show that
             patch-based methods outperform pixel-based ones, but leaving
             still much room for improvements in this application. We
             propose a new method for candidate patch selection, which
             can be combined with different patch-based inpainting
             methods to improve their performance in crack removal. The
             results demonstrate improved performance, with less
             artefacts and better preserved fine details. © 2011
             Springer-Verlag.},
   Doi = {10.1007/978-3-642-23687-7_38},
   Key = {fds287194}
}

@article{fds287210,
   Author = {Simons, FJ and Loris, I and Nolet, G and Daubechies, IC and Voronin, S and Judd, JS and Vetter, PA and Charléty, J and Vonesch,
             C},
   Title = {Solving or resolving global tomographic models with
             spherical wavelets, and the scale and sparsity of seismic
             heterogeneity},
   Journal = {Geophysical Journal International},
   Volume = {187},
   Number = {2},
   Pages = {969-988},
   Year = {2011},
   Month = {November},
   ISSN = {0956-540X},
   url = {http://dx.doi.org/10.1111/j.1365-246X.2011.05190.x},
   Abstract = {We propose a class of spherical wavelet bases for the
             analysis of geophysical models and for the tomographic
             inversion of global seismic data. Its multiresolution
             character allows for modelling with an effective spatial
             resolution that varies with position within the Earth. Our
             procedure is numerically efficient and can be implemented
             with parallel computing. We discuss two possible types of
             discrete wavelet transforms in the angular dimension of the
             cubed sphere. We describe benefits and drawbacks of these
             constructions and apply them to analyse the information in
             two published seismic wave speed models of the mantle, using
             the statistics of wavelet coefficients across scales. The
             localization and sparsity properties of wavelet bases allow
             finding a sparse solution to inverse problems by iterative
             minimization of a combination of the ℓ2 norm of the data
             residuals and the ℓ1 norm of the model wavelet
             coefficients. By validation with realistic synthetic
             experiments we illustrate the likely gains from our new
             approach in future inversions of finite-frequency seismic
             data. © 2011 The Authors Geophysical Journal International
             © 2011 RAS.},
   Doi = {10.1111/j.1365-246X.2011.05190.x},
   Key = {fds287210}
}

@article{fds287211,
   Author = {Boyer, D and Lipman, Y and Clair, ES and Puente, J and Funkhouser, T and Patel, B and Jernvall, J and Daubechies, I},
   Title = {Algorithms to automatically quantify the geometric
             similarity of anatomical surfaces},
   Journal = {Proceedings of the National Academy of Sciences},
   Volume = {108},
   Number = {45},
   Pages = {18221-18226},
   Year = {2011},
   Month = {November},
   ISSN = {1091-6490},
   url = {http://www.ncbi.nlm.nih.gov/pubmed/22025685},
   Abstract = {http://arxiv.org/abs/1110.3649},
   Doi = {10.1073/pnas.1112822108},
   Key = {fds287211}
}

@article{fds287196,
   Author = {Simons, FJ and Loris, I and Brevdo, E and Daubechies,
             IC},
   Title = {Wavelets and wavelet-like transforms on the sphere and their
             application to geophysical data inversion},
   Journal = {Proceedings of SPIE - The International Society for Optical
             Engineering},
   Volume = {8138},
   Publisher = {SPIE},
   Year = {2011},
   Month = {November},
   ISSN = {0277-786X},
   url = {http://dx.doi.org/10.1117/12.892285},
   Abstract = {Many flexible parameterizations exist to represent data on
             the sphere. In addition to the venerable spherical
             harmonics, we have the Slepian basis, harmonic splines,
             wavelets and wavelet-like Slepian frames. In this paper we
             focus on the latter two: spherical wavelets developed for
             geophysical applications on the cubed sphere, and the
             Slepian "tree", a new construction that combines a quadratic
             concentration measure with wavelet-like multiresolution. We
             discuss the basic features of these mathematical tools, and
             illustrate their applicability in parameterizing large-scale
             global geophysical (inverse) problems. © 2011 Copyright
             Society of Photo-Optical Instrumentation Engineers
             (SPIE).},
   Doi = {10.1117/12.892285},
   Key = {fds287196}
}

@article{fds287195,
   Author = {Platiša, L and Cornells, B and Ružić, T and Pižurica, A and Dooms,
             A and Martens, M and De Mey and M and Daubechies, I},
   Title = {Spatiogram features to characterize pearls in
             paintings},
   Journal = {Proceedings - International Conference on Image Processing,
             ICIP},
   Pages = {801-804},
   Publisher = {IEEE},
   Year = {2011},
   Month = {December},
   ISSN = {1522-4880},
   url = {http://dx.doi.org/10.1109/ICIP.2011.6116677},
   Abstract = {Objective characterization of jewels in paintings,
             especially pearls, has been a long lasting challenge for art
             historians. The way an artist painted pearls reflects his
             ability to observing nature and his knowledge of
             contemporary optical theory. Moreover, the painterly
             execution may also be considered as an individual
             characteristic useful in distinguishing hands. In this work,
             we propose a set of image analysis techniques to analyze and
             measure spatial characteristics of the digital images of
             pearls, all relying on the so called spatiogram image
             representation. Our experimental results demonstrate good
             correlation between the new metrics and the visually
             observed image features, and also capture the degree of
             realism of the visual appearance in the painting. In that
             sense, these results set the basis in creating a practical
             tool for art historical attribution and give strong
             motivation for further investigations in this direction. ©
             2011 IEEE.},
   Doi = {10.1109/ICIP.2011.6116677},
   Key = {fds287195}
}

@article{fds287197,
   Author = {Anitha, A and Brasoveanu, A and Duarte, MF and Hughes, SM and Daubechies, I and Dik, J and Janssens, K and Alfeld,
             M},
   Title = {Virtual underpainting reconstruction from X-ray fluorescence
             imaging data},
   Journal = {European Signal Processing Conference},
   Pages = {1239-1243},
   Year = {2011},
   Month = {December},
   ISSN = {2219-5491},
   Abstract = {This paper describes our work on the problem of
             reconstructing the original visual appearance of
             underpaintings (paintings that have been painted over and
             are now covered by a new surface painting) from noninvasive
             X-ray fluorescence imaging data of their canvases. This
             recently-developed imaging technique yields data revealing
             the concentrations of various chemical elements at each
             spatial location across the canvas. These concentrations in
             turn result from pigments present in both the surface
             painting and the underpainting beneath. Reconstructing a
             visual image of the underpainting from this data involves
             repairing acquisition artifacts in the dataset,
             underdetermined source separation into surface and
             underpainting features, identification and inpainting of
             areas of information loss, and finally estimation of the
             original paint colors from the chemical element data. We
             will describe methods we have developed to address each of
             these stages of underpainting recovery and show results on
             lost underpaintings. © EURASIP, 2011.},
   Key = {fds287197}
}

@article{fds215104,
   Author = {I. Daubechies},
   Title = {Developing Mathematical Tools to Investigate
             Art},
   Booktitle = {Bridges 2012 Proceedings},
   Publisher = {Jacobs Publishing},
   Address = {http://www.mathartfun.com},
   Editor = {Robert Bosch and Douglas McKenna and Reza Sarhangi},
   Year = {2012},
   Abstract = {This paper tells the history of a project investigating
             authenticity and forgery in Van Gogh paintings using
             mathematical tools based on wavelet transformations.},
   Key = {fds215104}
}

@article{fds287201,
   Author = {Cohen, A and Daubechies, I and DeVore, R and Kerkyacharian, G and Picard, D},
   Title = {Capturing Ridge Functions in High Dimensions from Point
             Queries},
   Journal = {Constructive Approximation},
   Volume = {35},
   Number = {2},
   Pages = {225-243},
   Publisher = {Springer Nature},
   Year = {2012},
   Month = {April},
   ISSN = {0176-4276},
   url = {http://dx.doi.org/10.1007/s00365-011-9147-6},
   Abstract = {Constructing a good approximation to a function of many
             variables suffers from the "curse of dimensionality".
             Namely, functions on ℝ N with smoothness of order s can in
             general be captured with accuracy at most O(n -s/N) using
             linear spaces or nonlinear manifolds of dimension n. If N is
             large and s is not, then n has to be chosen inordinately
             large for good accuracy. The large value of N often
             precludes reasonable numerical procedures. On the other
             hand, there is the common belief that real world problems in
             high dimensions have as their solution, functions which are
             more amenable to numerical recovery. This has led to the
             introduction of models for these functions that do not
             depend on smoothness alone but also involve some form of
             variable reduction. In these models it is assumed that,
             although the function depends on N variables, only a small
             number of them are significant. Another variant of this
             principle is that the function lives on a low dimensional
             manifold. Since the dominant variables (respectively the
             manifold) are unknown, this leads to new problems of how to
             organize point queries to capture such functions. The
             present paper studies where to query the values of a ridge
             function f(x)=g(a · x) when both a∈ℝ N and g ∈ C[0,1]
             are unknown. We establish estimates on how well f can be
             approximated using these point queries under the assumptions
             that g ∈ C s[0,1]. We also study the role of sparsity or
             compressibility of a in such query problems. © 2011
             Springer Science+Business Media, LLC.},
   Doi = {10.1007/s00365-011-9147-6},
   Key = {fds287201}
}

@article{fds287202,
   Author = {Roussos, E and Roberts, S and Daubechies, I},
   Title = {Variational Bayesian learning of sparse representations and
             its application in functional neuroimaging},
   Journal = {Lecture Notes in Computer Science (including subseries
             Lecture Notes in Artificial Intelligence and Lecture Notes
             in Bioinformatics)},
   Volume = {7263 LNAI},
   Pages = {218-225},
   Publisher = {Springer Berlin Heidelberg},
   Year = {2012},
   Month = {November},
   ISSN = {0302-9743},
   url = {http://dx.doi.org/10.1007/978-3-642-34713-9_28},
   Abstract = {Recent theoretical and experimental work in imaging
             neuroscience reveals that activations inferred from
             functional MRI data have sparse structure. We view sparse
             representation as a problem in Bayesian inference, following
             a machine learning approach, and construct a structured
             generative latent-variable model employing adaptive
             sparsity-inducing priors. The construction allows for
             automatic complexity control and regularization as well as
             denoising. Experimental results with benchmark datasets show
             that the proposed algorithm outperforms standard tools for
             model-free decompositions such as independent component
             analysis. © 2012 Springer-Verlag.},
   Doi = {10.1007/978-3-642-34713-9_28},
   Key = {fds287202}
}

@article{fds287100,
   Author = {Wu, T and Polatkan, G and Steel, D and Brown, W and Daubechies, I and Calderbank, R},
   Title = {Painting analysis using wavelets and probabilistic topic
             models},
   Journal = {2013 IEEE International Conference on Image Processing, ICIP
             2013 - Proceedings},
   Pages = {3264-3268},
   Publisher = {IEEE},
   Year = {2013},
   Month = {January},
   url = {http://dx.doi.org/10.1109/ICIP.2013.6738672},
   Abstract = {In this paper, computer-based techniques for stylistic
             analysis of paintings are applied to the five panels of the
             14th century Peruzzi Altarpiece by Giotto di Bondone.
             Features are extracted by combining a dual-tree complex
             wavelet transform with a hidden Markov tree (HMT) model.
             Hierarchical clustering is used to identify stylistic
             keywords in image patches, and keyword frequencies are
             calculated for sub-images that each contains many patches. A
             generative hierarchical Bayesian model learns stylistic
             patterns of keywords; these patterns are then used to
             characterize the styles of the sub-images; this in turn,
             permits to discriminate between paintings. Results suggest
             that such unsupervised probabilistic topic models can be
             useful to distill characteristic elements of style. © 2013
             IEEE.},
   Doi = {10.1109/ICIP.2013.6738672},
   Key = {fds287100}
}

@article{fds320874,
   Author = {Puente, J and Boyer, DM and Gladman, JT and Daubechies,
             IC},
   Title = {Automated approaches to geometric morphometrics.},
   Journal = {AMERICAN JOURNAL OF PHYSICAL ANTHROPOLOGY},
   Volume = {150},
   Pages = {226-226},
   Publisher = {WILEY-BLACKWELL},
   Year = {2013},
   Month = {January},
   Key = {fds320874}
}

@article{fds303542,
   Author = {Lipman, Y and Puente, J and Daubechies, I},
   Title = {Conformal Wasserstein distance: II. Computational aspects
             and extensions},
   Journal = {Mathematics of Computation},
   Volume = {82},
   Number = {281},
   Pages = {331-381},
   Publisher = {American Mathematical Society (AMS)},
   Year = {2013},
   Month = {January},
   url = {http://arxiv.org/abs/1103.4681v2},
   Abstract = {This paper is a companion paper to [Yaron Lipman and Ingrid
             Daubechies, Conformal Wasserstein distances: Comparing
             surfaces in polynomial time, Adv. in Math. (ELS), 227
             (2011), no. 3, 1047-1077, (2011)]. We provide numerical
             procedures and algorithms for computing the alignment of and
             distance between two disk-type surfaces. We provide a
             convergence analysis of the discrete approximation to the
             arising mass-transportation problems. We furthermore
             generalize the framework to support sphere-type surfaces,
             and prove a result connecting this distance to local
             geodesic distortion. Finally, we perform numerical
             experiments on several surface datasets and compare them to
             state-of-the-art methods. © 2012 American Mathematical
             Society.},
   Doi = {10.1090/S0025-5718-2012-02569-5},
   Key = {fds303542}
}

@article{fds287200,
   Author = {Cornelis, B and Ružić, T and Gezels, E and Dooms, A and Pižurica, A and Platiša, L and Cornelis, J and Martens, M and De Mey and M and Daubechies,
             I},
   Title = {Crack detection and inpainting for virtual restoration of
             paintings: The case of the Ghent Altarpiece},
   Journal = {Signal Processing},
   Volume = {93},
   Number = {3},
   Pages = {605-619},
   Publisher = {Elsevier BV},
   Year = {2013},
   Month = {March},
   ISSN = {0165-1684},
   url = {http://www.sciencedirect.com/science/article/pii/S0165168412002526},
   Abstract = {Digital image processing is proving to be of great help in
             the analysis and documentation of our vast cultural
             heritage. In this paper, we present a new method for the
             virtual restoration of digitized paintings with special
             attention for the Ghent Altarpiece (1432), a large polyptych
             panel painting of which very few digital reproductions
             exist. We achieve our objective by detecting and digitally
             removing cracks. The detection of cracks is particularly
             difficult because of the varying content features in
             different parts of the polyptych. Three new detection
             methods are proposed and combined in order to detect cracks
             of different sizes as well as varying brightness.
             Semi-supervised clustering based post-processing is used to
             remove objects falsely labelled as cracks. For the
             subsequent inpainting stage, a patch-based technique is
             applied to handle the noisy nature of the images and to
             increase the performance for crack removal. We demonstrate
             the usefulness of our method by means of a case study where
             the goal is to improve readability of the depiction of text
             in a book, present in one of the panels, in order to assist
             paleographers in its deciphering. © 2012 Elsevier
             B.V.},
   Doi = {10.1016/j.sigpro.2012.07.022},
   Key = {fds287200}
}

@article{fds287203,
   Author = {Anitha, A and Brasoveanu, A and Duarte, M and Hughes, S and Daubechies,
             I and Dik, J and Janssens, K and Alfeld, M},
   Title = {Restoration of X-ray fluorescence images of hidden
             paintings},
   Journal = {Signal Processing},
   Volume = {93},
   Number = {3},
   Pages = {592-604},
   Publisher = {Elsevier BV},
   Year = {2013},
   Month = {March},
   ISSN = {0165-1684},
   url = {http://dx.doi.org/10.1016/j.sigpro.2012.09.027},
   Abstract = {This paper describes our methods for repairing and restoring
             images of hidden paintings (paintings that have been painted
             over and are now covered by a new surface painting) that
             have been obtained via noninvasive X-ray fluorescence
             imaging of their canvases. This recently developed imaging
             technique measures the concentrations of various chemical
             elements at each two-dimensional spatial location across the
             canvas. These concentrations in turn result from pigments
             present both in the surface painting and in the hidden
             painting beneath. These X-ray fluorescence images provide
             the best available data from which to noninvasively study a
             hidden painting. However, they are typically marred by
             artifacts of the imaging process, features of the surface
             painting, and areas of information loss. Repairing and
             restoring these images thus consists of three stages: (1)
             repairing acquisition artifacts in the dataset, (2) removal
             of features in the images that result from the surface
             painting rather than the hidden painting, and (3)
             identification and repair of areas of information loss. We
             describe methods we have developed to address each of these
             stages: a total-variation minimization approach to artifact
             correction, a novel method for underdetermined blind source
             separation with multimodal side information to address
             surface feature removal, and two application-specific new
             methods for automatically identifying particularly thick or
             X-ray absorbent surface features in the painting. Finally,
             we demonstrate the results of our methods on a hidden
             painting by the artist Vincent van Gogh. © 2012 Elsevier
             B.V.},
   Doi = {10.1016/j.sigpro.2012.09.027},
   Key = {fds287203}
}

@article{fds287104,
   Author = {Charléty, J and Voronin, S and Nolet, G and Loris, I and Simons, FJ and Sigloch, K and Daubechies, IC},
   Title = {Global seismic tomography with sparsity constraints:
             Comparison with smoothing and damping regularization},
   Journal = {Journal of Geophysical Research: Planets},
   Volume = {118},
   Number = {9},
   Pages = {4887-4899},
   Publisher = {American Geophysical Union (AGU)},
   Year = {2013},
   Month = {September},
   ISSN = {0148-0227},
   url = {http://dx.doi.org/10.1002/jgrb.50326},
   Abstract = {We present a realistic application of an inversion scheme
             for global seismic tomography that uses as prior information
             the sparsity of a solution, defined as having few nonzero
             coefficients under the action of a linear transformation. In
             this paper, the sparsifying transform is a wavelet
             transform. We use an accelerated iterative soft-thresholding
             algorithm for a regularization strategy, which produces
             sparse models in the wavelet domain. The approach and scheme
             we present may be of use for preserving sharp edges in a
             tomographic reconstruction and minimizing the number of
             features in the solution warranted by the data. The method
             is tested on a data set of time delays for finite-frequency
             tomography using the USArray network, the first application
             in global seismic tomography to real data. The approach
             presented should also be suitable for other imaging
             problems. From a comparison with a more traditional
             inversion using damping and smoothing constraints, we show
             that (1) we generally retrieve similar features, (2) fewer
             nonzero coefficients under a properly chosen representation
             (such as wavelets) are needed to explain the data at the
             same level of root-mean-square misfit, (3) the model is
             sparse or compressible in the wavelet domain, and (4) we do
             not need to construct a heterogeneous mesh to capture the
             available resolution. Key Points Global tomography with
             solution sparsity in a certain basis as prior
             informationOne-norm of model wavelet coefficients as
             constraint regularizes the inversionFirst realistic
             application on actual data for global seismic tomography. ©
             2013 American Geophysical Union. All Rights
             Reserved.},
   Doi = {10.1002/jgrb.50326},
   Key = {fds287104}
}

@article{fds303539,
   Author = {Cornelis, B and Yang, Y and Vogelstein, JT and Dooms, A and Daubechies,
             I and Dunson, D},
   Title = {Bayesian crack detection in ultra high resolution multimodal
             images of paintings},
   Journal = {2013 18th International Conference on Digital Signal
             Processing, DSP 2013},
   Year = {2013},
   Month = {December},
   url = {http://arxiv.org/abs/1304.5894v2},
   Abstract = {The preservation of our cultural heritage is of paramount
             importance. Thanks to recent developments in digital
             acquisition techniques, powerful image analysis algorithms
             are developed which can be useful non-invasive tools to
             assist in the restoration and preservation of art. In this
             paper we propose a semi-supervised crack detection method
             that can be used for high-dimensional acquisitions of
             paintings coming from different modalities. Our dataset
             consists of a recently acquired collection of images of the
             Ghent Altarpiece (1432), one of Northern Europe's most
             important art masterpieces. Our goal is to build a
             classifier that is able to discern crack pixels from the
             background consisting of non-crack pixels, making optimal
             use of the information that is provided by each modality. To
             accomplish this we employ a recently developed
             non-parametric Bayesian classifier, that uses tensor
             factorizations to characterize any conditional probability.
             A prior is placed on the parameters of the factorization
             such that every possible interaction between predictors is
             allowed while still identifying a sparse subset among these
             predictors. The proposed Bayesian classifier, which we will
             refer to as conditional Bayesian tensor factorization or
             CBTF, is assessed by visually comparing classification
             results with the Random Forest (RF) algorithm. © 2013
             IEEE.},
   Doi = {10.1109/ICDSP.2013.6622710},
   Key = {fds303539}
}

@article{fds350211,
   Author = {Bourguignon, JP and Daubechies, I and Kim, MH and Park,
             Y},
   Title = {Why STEM (science, technology, engineering and
             mathematics)?},
   Journal = {Proceeding of the International Congress of Mathematicans,
             ICM 2014},
   Volume = {1},
   Pages = {787-797},
   Year = {2014},
   Month = {January},
   ISBN = {9788961058049},
   Key = {fds350211}
}

@article{fds287097,
   Author = {Yin, R and Dunson, D and Cornelis, B and Brown, B and Ocon, N and Daubechies, I},
   Title = {Digital cradle removal in X-ray images of art
             paintings},
   Journal = {2014 IEEE International Conference on Image Processing, ICIP
             2014},
   Pages = {4299-4303},
   Publisher = {IEEE},
   Year = {2014},
   Month = {January},
   ISBN = {9781479957514},
   url = {http://dx.doi.org/10.1109/ICIP.2014.7025873},
   Abstract = {We introduce an algorithm that removes the deleterious
             effect of cradling on X-ray images of paintings on wooden
             panels. The algorithm consists of a three stage procedure.
             Firstly, the cradled regions are located automatically. The
             second step consists of separating the X-ray image into a
             textural and image component. In the last step the algorithm
             learns to distinguish between the texture caused by the
             wooden cradle and the texture belonging to the original
             painted wooden panel. The results obtained with our method
             are compared with those obtained manually by best current
             practice.},
   Doi = {10.1109/ICIP.2014.7025873},
   Key = {fds287097}
}

@article{fds287101,
   Author = {Wu, H-T and Hseu, S-S and Bien, M-Y and Kou, YR and Daubechies,
             I},
   Title = {Evaluating physiological dynamics via synchrosqueezing:
             prediction of ventilator weaning.},
   Journal = {IEEE transactions on bio-medical engineering},
   Volume = {61},
   Number = {3},
   Pages = {736-744},
   Year = {2014},
   Month = {March},
   ISSN = {0018-9294},
   url = {http://dx.doi.org/10.1109/tbme.2013.2288497},
   Abstract = {Oscillatory phenomena abound in many types of signals.
             Identifying the individual oscillatory components that
             constitute an observed biological signal leads to profound
             understanding about the biological system. The instantaneous
             frequency (IF), the amplitude modulation (AM), and their
             temporal variability are widely used to describe these
             oscillatory phenomena. In addition, the shape of the
             oscillatory pattern, repeated in time for an oscillatory
             component, is also an important characteristic that can be
             parametrized appropriately. These parameters can be viewed
             as phenomenological surrogates for the hidden dynamics of
             the biological system. To estimate jointly the IF, AM, and
             shape, this paper applies a novel and robust time-frequency
             analysis tool, referred to as the synchrosqueezing transform
             (SST). The usefulness of the model and SST are shown
             directly in predicting the clinical outcome of ventilator
             weaning. Compared with traditional respiration parameters,
             the breath-to-breath variability has been reported to be a
             better predictor of the outcome of the weaning procedure. So
             far, however, all these indices normally require at least 20
             min of data acquisition to ensure predictive power.
             Moreover, the robustness of these indices to the inevitable
             noise is rarely discussed. We find that based on the
             proposed model, SST and only 3 min of respiration data, the
             ROC area under curve of the prediction accuracy is 0.76. The
             high predictive power that is achieved in the weaning
             problem, despite a shorter evaluation period, and the
             stability to noise suggest that other similar kinds of
             signal may likewise benefit from the proposed model and
             SST.},
   Doi = {10.1109/tbme.2013.2288497},
   Key = {fds287101}
}

@article{fds300272,
   Author = {A. Pizurica and L. Platisa and T. Ruzic and B. Cornelis and A. Dooms and M.
             Martens, H. Dubois and B. Devolder and M. De Mey and I.
             Daubechies},
   Title = {Digital Image Processing of The Ghent Altarpiece: Supporting
             the painting's study and conservation treatment},
   Journal = {IEEE Signal Processing Magazine},
   Volume = {32},
   Pages = {112-122},
   Publisher = {IEEE},
   Year = {2015},
   Key = {fds300272}
}

@article{fds287098,
   Author = {Boyer, DM and Puente, J and Gladman, JT and Glynn, C and Mukherjee, S and Yapuncich, GS and Daubechies, I},
   Title = {A new fully automated approach for aligning and comparing
             shapes.},
   Journal = {Anat Rec (Hoboken)},
   Volume = {298},
   Number = {1},
   Pages = {249-276},
   Year = {2015},
   Month = {January},
   ISSN = {1932-8486},
   url = {http://dx.doi.org/10.1002/ar.23084},
   Abstract = {Three-dimensional geometric morphometric (3DGM) methods for
             placing landmarks on digitized bones have become
             increasingly sophisticated in the last 20 years, including
             greater degrees of automation. One aspect shared by all 3DGM
             methods is that the researcher must designate initial
             landmarks. Thus, researcher interpretations of homology and
             correspondence are required for and influence
             representations of shape. We present an algorithm allowing
             fully automatic placement of correspondence points on
             samples of 3D digital models representing bones of different
             individuals/species, which can then be input into standard
             3DGM software and analyzed with dimension reduction
             techniques. We test this algorithm against several samples,
             primarily a dataset of 106 primate calcanei represented by
             1,024 correspondence points per bone. Results of our
             automated analysis of these samples are compared to a
             published study using a traditional 3DGM approach with 27
             landmarks on each bone. Data were analyzed with
             morphologika(2.5) and PAST. Our analyses returned strong
             correlations between principal component scores, similar
             variance partitioning among components, and similarities
             between the shape spaces generated by the automatic and
             traditional methods. While cluster analyses of both
             automatically generated and traditional datasets produced
             broadly similar patterns, there were also differences.
             Overall these results suggest to us that automatic
             quantifications can lead to shape spaces that are as
             meaningful as those based on observer landmarks, thereby
             presenting potential to save time in data collection,
             increase completeness of morphological quantification,
             eliminate observer error, and allow comparisons of shape
             diversity between different types of bones. We provide an R
             package for implementing this analysis.},
   Doi = {10.1002/ar.23084},
   Key = {fds287098}
}

@article{fds287099,
   Author = {Wang, YG and Wu, H-T and Daubechies, I and Li, Y and Estes, EH and Soliman,
             EZ},
   Title = {Automated J wave detection from digital 12-lead
             electrocardiogram.},
   Journal = {Journal of electrocardiology},
   Volume = {48},
   Number = {1},
   Pages = {21-28},
   Year = {2015},
   Month = {January},
   ISSN = {0022-0736},
   url = {http://dx.doi.org/10.1016/j.jelectrocard.2014.10.006},
   Abstract = {In this report we provide a method for automated detection
             of J wave, defined as a notch or slur in the descending
             slope of the terminal positive wave of the QRS complex,
             using signal processing and functional data analysis
             techniques. Two different sets of ECG tracings were selected
             from the EPICARE ECG core laboratory, Wake Forest School of
             Medicine, Winston Salem, NC. The first set was a training
             set comprised of 100 ECGs of which 50 ECGs had J-wave and
             the other 50 did not. The second set was a test set (n=116
             ECGs) in which the J-wave status (present/absent) was only
             known by the ECG Center staff. All ECGs were recorded using
             GE MAC 1200 (GE Marquette, Milwaukee, Wisconsin) at 10mm/mV
             calibration, speed of 25mm/s and 500HZ sampling rate. All
             ECGs were initially inspected visually for technical errors
             and inadequate quality, and then automatically processed
             with the GE Marquette 12-SL program 2001 version (GE
             Marquette, Milwaukee, WI). We excluded ECG tracings with
             major abnormalities or rhythm disorder. Confirmation of the
             presence or absence of a J wave was done visually by the ECG
             Center staff and verified once again by three of the
             coauthors. There was no disagreement in the identification
             of the J wave state. The signal processing and functional
             data analysis techniques applied to the ECGs were conducted
             at Duke University and the University of Toronto. In the
             training set, the automated detection had sensitivity of
             100% and specificity of 94%. For the test set, sensitivity
             was 89% and specificity was 86%. In conclusion, test results
             of the automated method we developed show a good J wave
             detection accuracy, suggesting possible utility of this
             approach for defining and detection of other complex ECG
             waveforms.},
   Doi = {10.1016/j.jelectrocard.2014.10.006},
   Key = {fds287099}
}

@article{fds303541,
   Author = {Polatkan, G and Zhou, M and Carin, L and Blei, D and Daubechies,
             I},
   Title = {A Bayesian Nonparametric Approach to Image
             Super-Resolution.},
   Journal = {IEEE transactions on pattern analysis and machine
             intelligence},
   Volume = {37},
   Number = {2},
   Pages = {346-358},
   Year = {2015},
   Month = {February},
   url = {http://arxiv.org/abs/1209.5019v1},
   Abstract = {Super-resolution methods form high-resolution images from
             low-resolution images. In this paper, we develop a new
             Bayesian nonparametric model for super-resolution. Our
             method uses a beta-Bernoulli process to learn a set of
             recurring visual patterns, called dictionary elements, from
             the data. Because it is nonparametric, the number of
             elements found is also determined from the data. We test the
             results on both benchmark and natural images, comparing with
             several other models from the research literature. We
             perform large-scale human evaluation experiments to assess
             the visual quality of the results. In a first
             implementation, we use Gibbs sampling to approximate the
             posterior. However, this algorithm is not feasible for
             large-scale data. To circumvent this, we then develop an
             online variational Bayes (VB) algorithm. This algorithm
             finds high quality dictionaries in a fraction of the time
             needed by the Gibbs sampler.},
   Doi = {10.1109/tpami.2014.2321404},
   Key = {fds303541}
}

@article{fds287095,
   Author = {Yang, H and Lu, J and Brown, WP and Daubechies, I and Ying,
             L},
   Title = {Quantitative Canvas Weave Analysis Using 2-D Synchrosqueezed
             Transforms: Application of time-frequency analysis to art
             investigation},
   Journal = {Signal Processing Magazine, IEEE},
   Volume = {32},
   Number = {4},
   Pages = {55-63},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2015},
   Month = {July},
   ISSN = {1053-5888},
   url = {http://hdl.handle.net/10161/12009 Duke open
             access},
   Abstract = {Quantitative canvas weave analysis has many applications in
             art investigations of paintings, including dating,
             forensics, and canvas rollmate identification.
             Traditionally, canvas analysis is based on X-radiographs.
             Prior to serving as a painting canvas, a piece of fabric is
             coated with a priming agent; smoothing its surface makes
             this layer thicker between and thinner right on top of weave
             threads. These variations affect the X-ray absorption,
             making the weave pattern stand out in X-ray images of the
             finished painting. To characterize this pattern, it is
             customary to visually inspect small areas within the
             X-radiograph and count the number of horizontal and vertical
             weave threads; averages of these then estimate the overall
             canvas weave density. The tedium of this process typically
             limits its practice to just a few sample regions of the
             canvas. In addition, it does not capture more subtle
             information beyond weave density, such as thread angles or
             variations in the weave pattern. Signal processing
             techniques applied to art investigation are now increasingly
             used to develop computer-assisted canvas weave analysis
             tools.},
   Doi = {10.1109/MSP.2015.2406882},
   Key = {fds287095}
}

@article{fds287096,
   Author = {Pizurica, A and Platisa, L and Ruzic, T and Cornelis, B and Dooms, A and Martens, M and Dubois, H and Devolder, B and De Mey and M and Daubechies,
             I},
   Title = {Digital image processing of the ghent altarpiece: Supporting
             the painting's study and conservation treatment},
   Journal = {IEEE Signal Processing Magazine},
   Volume = {32},
   Number = {4},
   Pages = {112-122},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2015},
   Month = {July},
   ISSN = {1053-5888},
   url = {http://dx.doi.org/10.1109/MSP.2015.2411753},
   Abstract = {Hanging in the Saint Bavo Cathedral in Ghent, Belgium, is
             The Ghent Altarpiece, also known as The Adoration of the
             Mystic Lamb (see Figure 1). According to an inscription on
             the outer frames, it was painted by brothers Hubert and Jan
             van Eyck for Joos Vijd and?his wife Elisabeth Borluut in
             1432. It is one of the most admired and influential
             paintings in the history of art and has given rise to many
             intriguing questions that have been puzzling art historians
             to date [11]. Moreover, the material history of the panels
             is very complicated. They were hidden, dismantled, moved
             away, stolen, and recovered during riots, fires and wars.
             The recovery of the panels by the U.S. Army in the Nazi
             hoards deep in the Altaussee salt mines has particularly
             marked memories. One panel was stolen in 1934 and never
             recovered. Besides varying conservation conditions, the
             panels underwent numerous restoration treatments and were
             even partially painted over.},
   Doi = {10.1109/MSP.2015.2411753},
   Key = {fds287096}
}

@article{fds287094,
   Author = {Daubechies, I and Saab, R},
   Title = {A Deterministic Analysis of Decimation for Sigma-Delta
             Quantization of Bandlimited Functions},
   Journal = {IEEE Signal Processing Letters},
   Volume = {22},
   Number = {11},
   Pages = {2093-2096},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2015},
   Month = {November},
   ISSN = {1070-9908},
   url = {http://dx.doi.org/10.1109/LSP.2015.2459758},
   Abstract = {We study Sigma-Delta (Σ Δ) quantization of oversampled
             bandlimited functions. We prove that digitally integrating
             blocks of bits and then down-sampling, a process known as
             decimation, can efficiently encode the associated Σ Δ
             bit-stream. It allows a large reduction in the bit-rate
             while still permitting good approximation of the underlying
             bandlimited function via an appropriate reconstruction
             kernel. Specifically, in the case of stable rth order Σ Δ
             schemes we show that the reconstruction error decays
             exponentially in the bit-rate. For example, this result
             applies to the 1-bit, greedy, first-order Σ Δ
             scheme.},
   Doi = {10.1109/LSP.2015.2459758},
   Key = {fds287094}
}

@article{fds315774,
   Author = {Daubechies, I and Wang, YG and Wu, H-T},
   Title = {ConceFT: concentration of frequency and time via a
             multitapered synchrosqueezed transform.},
   Journal = {Philosophical transactions. Series A, Mathematical,
             physical, and engineering sciences},
   Volume = {374},
   Number = {2065},
   Pages = {20150193},
   Year = {2016},
   Month = {April},
   ISSN = {1364-503X},
   url = {http://dx.doi.org/10.1098/rsta.2015.0193},
   Abstract = {A new method is proposed to determine the time-frequency
             content of time-dependent signals consisting of multiple
             oscillatory components, with time-varying amplitudes and
             instantaneous frequencies. Numerical experiments as well as
             a theoretical analysis are presented to assess its
             effectiveness.},
   Doi = {10.1098/rsta.2015.0193},
   Key = {fds315774}
}

@article{fds315775,
   Author = {Huang, NE and Daubechies, I and Hou, TY},
   Title = {Adaptive data analysis: theory and applications.},
   Journal = {Philosophical transactions. Series A, Mathematical,
             physical, and engineering sciences},
   Volume = {374},
   Number = {2065},
   Pages = {20150207},
   Year = {2016},
   Month = {April},
   ISSN = {1364-503X},
   url = {http://dx.doi.org/10.1098/rsta.2015.0207},
   Doi = {10.1098/rsta.2015.0207},
   Key = {fds315775}
}

@article{fds317216,
   Author = {Yin, R and Monson, E and Honig, E and Daubechies, I and Maggioni,
             M},
   Title = {Object recognition in art drawings: Transfer of a neural
             network},
   Journal = {ICASSP, IEEE International Conference on Acoustics, Speech
             and Signal Processing - Proceedings},
   Volume = {2016-May},
   Pages = {2299-2303},
   Publisher = {IEEE},
   Year = {2016},
   Month = {May},
   ISBN = {9781479999880},
   ISSN = {1520-6149},
   url = {http://dx.doi.org/10.1109/ICASSP.2016.7472087},
   Abstract = {We consider the problem of recognizing objects in
             collections of art works, in view of automatically labeling,
             searching and organizing databases of art works. To avoid
             manually labelling objects, we introduce a framework for
             transferring a convolutional neural network (CNN), trained
             on available large collections of labelled natural images,
             to the context of drawings. We retrain both the top and the
             bottom layer of the network, responsible for the high-level
             classiication output and the low-level features detection
             respectively, by transforming natural images into drawings.
             We apply this procedure to the drawings in the Jan Brueghel
             Wiki, and show the transferred CNN learns a discriminative
             metric on drawings and achieves good recognition accuracy.
             We also discuss why standard descriptor-based methods is
             problematic in the context of drawings.},
   Doi = {10.1109/ICASSP.2016.7472087},
   Key = {fds317216}
}

@article{fds321988,
   Author = {Deligiannis, N and Mota, JFC and Cornelis, B and Rodrigues, MRD and Daubechies, I},
   Title = {X-ray image separation via coupled dictionary
             learning},
   Journal = {Proceedings - International Conference on Image Processing,
             ICIP},
   Volume = {2016-August},
   Pages = {3533-3537},
   Publisher = {IEEE},
   Year = {2016},
   Month = {August},
   ISBN = {9781467399616},
   url = {http://dx.doi.org/10.1109/ICIP.2016.7533017},
   Abstract = {In support of art investigation, we propose a new source
             separation method that unmixes a single X-ray scan acquired
             from double-sided paintings. Unlike prior source separation
             methods, which are based on statistical or structural
             incoherence of the sources, we use visual images taken from
             the front- and back-side of the panel to drive the
             separation process. The coupling of the two imaging
             modalities is achieved via a new multi-scale dictionary
             learning method. Experimental results demonstrate that our
             method succeeds in the discrimination of the sources, while
             state-of-the-art methods fail to do so.},
   Doi = {10.1109/ICIP.2016.7533017},
   Key = {fds321988}
}

@article{fds323650,
   Author = {Daubechies, I and Defrise, M and De Mol and C},
   Title = {Sparsity-enforcing regularisation and ISTA
             revisited},
   Journal = {Inverse Problems},
   Volume = {32},
   Number = {10},
   Pages = {104001-104001},
   Publisher = {IOP Publishing},
   Year = {2016},
   Month = {August},
   url = {http://dx.doi.org/10.1088/0266-5611/32/10/104001},
   Abstract = {About two decades ago, the concept of sparsity emerged in
             different disciplines such as statistics, imaging, signal
             processing and inverse problems, and proved to be useful for
             several applications. Sparsity-enforcing constraints or
             penalties were then shown to provide a viable alternative to
             the usual quadratic ones for the regularisation of ill-posed
             problems. To compute the corresponding regularised
             solutions, a simple, iterative and provably convergent
             algorithm was proposed and later on referred to as the
             iterative soft-thresholding algorithm. This paper provides a
             brief review of these early results as well as that of the
             subsequent literature, albeit from the authors' limited
             perspective. It also presents the previously unpublished
             proof of an extension of the original framework.},
   Doi = {10.1088/0266-5611/32/10/104001},
   Key = {fds323650}
}

@article{fds318287,
   Author = {Yin, R and Cornelis, B and Fodor, G and Ocon, N and Dunson, D and Daubechies, I},
   Title = {Removing cradle artifacts in X-ray images of
             paintings},
   Journal = {SIAM Journal on Imaging Sciences},
   Volume = {9},
   Number = {3},
   Pages = {1247-1272},
   Publisher = {Society for Industrial & Applied Mathematics
             (SIAM)},
   Year = {2016},
   Month = {August},
   url = {http://dx.doi.org/10.1137/15M1053554},
   Abstract = {We propose an algorithm that removes the visually unpleasant
             effects of cradling in X-ray images of panel paintings, with
             the goal of improving the X-ray image readability by art
             experts. The algorithm consists of three stages. In the
             first stage the location of the cradle is detected
             automatically and the grayscale inconsistency, caused by the
             thickness of the cradle, is corrected. In a second stage we
             use a method called morphological component analysis to
             separate the X-ray image into a so-called cartoon part and a
             texture part, where the latter contains mostly the wood
             grain from both the panel and the cradling. The algorithm
             next learns a Bayesian factor model that distinguishes
             between the texture patterns that originate from the cradle
             and those from other components such as the panel and/or the
             painting on the panel surface, and finally uses this to
             remove the textures associated with the cradle. We apply the
             algorithm to a number of historically important paintings on
             panel. We also show how it can be used to digitally remove
             stretcher artifacts from X-rays of paintings on canvas. We
             compare our results with those obtained manually by best
             current practices in art conservation as well as on a ground
             truth dataset, consisting of X-ray images of a painting
             before and after removal of the physically attached
             cradle.},
   Doi = {10.1137/15M1053554},
   Key = {fds318287}
}

@article{fds320873,
   Author = {O'Neal, WT and Wang, YG and Wu, H-T and Zhang, Z-M and Li, Y and Tereshchenko, LG and Estes, EH and Daubechies, I and Soliman,
             EZ},
   Title = {Electrocardiographic J Wave and Cardiovascular Outcomes in
             the General Population (from the Atherosclerosis Risk In
             Communities Study).},
   Journal = {The American journal of cardiology},
   Volume = {118},
   Number = {6},
   Pages = {811-815},
   Year = {2016},
   Month = {September},
   url = {http://dx.doi.org/10.1016/j.amjcard.2016.06.047},
   Abstract = {The association between the J wave, a key component of the
             early repolarization pattern, and adverse cardiovascular
             outcomes remains unclear. Inconsistencies have stemmed from
             the different methods used to measure the J wave. We
             examined the association between the J wave, detected by an
             automated method, and adverse cardiovascular outcomes in
             14,592 (mean age = 54 ± 5.8 years; 56% women; 26% black)
             participants from the Atherosclerosis Risk In Communities
             (ARIC) study. The J wave was detected at baseline (1987 to
             1989) and during follow-up study visits (1990 to 1992, 1993
             to 1995, and 1996 to 1998) using a fully automated method.
             Sudden cardiac death, coronary heart disease death, and
             cardiovascular mortality were ascertained from hospital
             discharge records, death certificates, and autopsy data
             through December 31, 2010. A total of 278 participants
             (1.9%) had evidence of a J wave. Over a median follow-up of
             22 years, 4,376 of the participants (30%) died. In a
             multivariable Cox regression analysis adjusted for
             demographics, cardiovascular risk factors, and potential
             confounders, the J wave was not associated with an increased
             risk of sudden cardiac death (hazard ratio [HR] 0.74, 95% CI
             0.36 to 1.50), coronary heart disease death (HR 0.72, 95% CI
             0.40 to 1.32), or cardiovascular mortality (HR 1.16, 95% CI
             0.87 to 1.56). An interaction was detected for
             cardiovascular mortality by gender with men (HR 1.54, 95% CI
             1.09 to 2.19) having a stronger association than women (HR
             0.74, 95% CI 0.43 to 1.25; P-interaction = 0.030). In
             conclusion, our findings suggest that the J wave is a benign
             entity that is not associated with an increased risk for
             sudden cardiac arrest in middle-aged adults in the United
             States.},
   Doi = {10.1016/j.amjcard.2016.06.047},
   Key = {fds320873}
}

@article{fds318286,
   Author = {Wu, H-T and Lewis, GF and Davila, MI and Daubechies, I and Porges,
             SW},
   Title = {Optimizing Estimates of Instantaneous Heart Rate from Pulse
             Wave Signals with the Synchrosqueezing Transform.},
   Journal = {Methods of information in medicine},
   Volume = {55},
   Number = {5},
   Pages = {463-472},
   Year = {2016},
   Month = {October},
   url = {http://dx.doi.org/10.3414/me16-01-0026},
   Abstract = {<h4>Background</h4>With recent advances in sensor and
             computer technologies, the ability to monitor peripheral
             pulse activity is no longer limited to the laboratory and
             clinic. Now inexpensive sensors, which interface with
             smartphones or other computer-based devices, are expanding
             into the consumer market. When appropriate algorithms are
             applied, these new technologies enable ambulatory monitoring
             of dynamic physiological responses outside the clinic in a
             variety of applications including monitoring fatigue,
             health, workload, fitness, and rehabilitation. Several of
             these applications rely upon measures derived from
             peripheral pulse waves measured via contact or non-contact
             photoplethysmography (PPG). As technologies move from
             contact to non-contact PPG, there are new challenges. The
             technology necessary to estimate average heart rate over a
             few seconds from a noncontact PPG is available. However, a
             technology to precisely measure instantaneous heat rate
             (IHR) from non-contact sensors, on a beat-to-beat basis, is
             more challenging.<h4>Objectives</h4>The objective of this
             paper is to develop an algorithm with the ability to
             accurately monitor IHR from peripheral pulse waves, which
             provides an opportunity to measure the neural regulation of
             the heart from the beat-to-beat heart rate pattern (i.e.,
             heart rate variability).<h4>Methods</h4>The adaptive
             harmonic model is applied to model the contact or
             non-contact PPG signals, and a new methodology, the
             Synchrosqueezing Transform (SST), is applied to extract IHR.
             The body sway rhythm inherited in the non-contact PPG signal
             is modeled and handled by the notion of wave-shape
             function.<h4>Results</h4>The SST optimizes the extraction of
             IHR from the PPG signals and the technique functions well
             even during periods of poor signal to noise. We contrast the
             contact and non-contact indices of PPG derived heart rate
             with a criterion electrocardiogram (ECG). ECG and PPG
             signals were monitored in 21 healthy subjects performing
             tasks with different physical demands. The root mean square
             error of IHR estimated by SST is significantly better than
             commonly applied methods such as autoregressive (AR) method.
             In the walking situation, while AR method fails, SST still
             provides a reasonably good result.<h4>Conclusions</h4>The
             SST processed PPG data provided an accurate estimate of the
             ECG derived IHR and consistently performed better than
             commonly applied methods such as autoregressive
             method.},
   Doi = {10.3414/me16-01-0026},
   Key = {fds318286}
}

@article{fds328056,
   Author = {Fodor, G and Cornelis, B and Yin, R and Dooms, A and Daubechies,
             I},
   Title = {Cradle removal in X-ray images of panel paintings},
   Journal = {Image Processing On Line},
   Volume = {7},
   Pages = {23-42},
   Publisher = {Image Processing On Line},
   Year = {2017},
   Month = {January},
   url = {http://dx.doi.org/10.5201/ipol.2017.174},
   Abstract = {We address the problem of mitigating the visually
             displeasing effects of cradling in X-ray images of panel
             paintings. The proposed algorithm consists of three stages.
             In the first stage the location of the cradling is detected
             semi-automatically and the grayscale inconsistency, caused
             by the thickness of the cradling, is adjusted. In a second
             stage we use a blind source separation method to decompose
             the X-ray image into a so-called cartoon part and a texture
             part, where the latter contains mostly the wood grain from
             both the panel as well as the cradling. In the third and
             final stage the algorithm tries to learn the distinction
             between the texture patterns that originate from the
             cradling and those from other components such as the panel
             and/or the painting. The goal of the proposed research is to
             improve the readability of X-ray images of paintings for art
             experts.},
   Doi = {10.5201/ipol.2017.174},
   Key = {fds328056}
}

@article{fds324089,
   Author = {Cornelis, B and Yang, H and Goodfriend, A and Ocon, N and Lu, J and Daubechies, I},
   Title = {Removal of Canvas Patterns in Digital Acquisitions of
             Paintings},
   Journal = {IEEE Transactions on Image Processing},
   Volume = {26},
   Number = {1},
   Pages = {160-171},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2017},
   Month = {January},
   url = {http://dx.doi.org/10.1109/tip.2016.2621413},
   Abstract = {We address the removal of canvas artifacts from
             high-resolution digital photographs and X-ray images of
             paintings on canvas. Both imaging modalities are common
             investigative tools in art history and art conservation.
             Canvas artifacts manifest themselves very differently
             according to the acquisition modality; they can hamper the
             visual reading of the painting by art experts, for instance,
             in preparing a restoration campaign. Computer-aided canvas
             removal is desirable for restorers when the painting on
             canvas they are preparing to restore has acquired over the
             years a much more salient texture. We propose a new
             algorithm that combines a cartoon-texture decomposition
             method with adaptive multiscale thresholding in the
             frequency domain to isolate and suppress the canvas
             components. To illustrate the strength of the proposed
             method, we provide various examples, for acquisitions in
             both imaging modalities, for paintings with different types
             of canvas and from different periods. The proposed algorithm
             outperforms previous methods proposed for visual photographs
             such as morphological component analysis and Wiener
             filtering and it also works for the digital removal of
             canvas artifacts in X-ray images.},
   Doi = {10.1109/tip.2016.2621413},
   Key = {fds324089}
}

@article{fds329099,
   Author = {Voronin, S and Daubechies, I},
   Title = {An iteratively reweighted least squares algorithm for sparse
             regularization},
   Volume = {693},
   Pages = {391-411},
   Booktitle = {Contemporary Mathematics},
   Publisher = {American Mathematical Society},
   Year = {2017},
   Month = {January},
   ISBN = {9781470428365},
   url = {http://dx.doi.org/10.1090/conm/693/13941},
   Abstract = {We present a new algorithm and the corresponding convergence
             analysis for the regularization of linear inverse problems
             with sparsity constraints, applied to a new generalized
             sparsity promoting functional. The algorithm is based on the
             idea of iteratively reweighted least squares, reducing the
             minimization at every iteration step to that of a functional
             including only ℓ2 -norms. This amounts to smoothing of the
             absolute value function that appears in the generalized
             sparsity promoting penalty we consider, with the smoothing
             becoming iteratively less pronounced. We demonstrate that
             the sequence of iterates of our algorithm converges to a
             limit that minimizes the original functional.},
   Doi = {10.1090/conm/693/13941},
   Key = {fds329099}
}

@article{fds327595,
   Author = {Yin, R and Gao, T and Lu, YM and Daubechies, I},
   Title = {A tale of two bases: Local-nonlocal regularization on image
             patches with convolution framelets},
   Journal = {SIAM Journal on Imaging Sciences},
   Volume = {10},
   Number = {2},
   Pages = {711-750},
   Publisher = {Society for Industrial & Applied Mathematics
             (SIAM)},
   Year = {2017},
   Month = {January},
   url = {http://dx.doi.org/10.1137/16M1091447},
   Abstract = {We propose an image representation scheme combining the
             local and nonlocal characterization of patches in an image.
             Our representation scheme can be shown to be equivalent to a
             tight frame constructed from convolving local bases (e.g.,
             wavelet frames, discrete cosine transforms, etc.) with
             nonlocal bases (e.g., spectral basis induced by nonlinear
             dimension reduction on patches), and we call the resulting
             frame elements convolution framelets. Insight gained from
             analyzing the proposed representation leads to a novel
             interpretation of a recent high-performance patch-based
             image processing algorithm using the point integral method
             (PIM) and the low dimensional manifold model (LDMM) [S.
             Osher, Z. Shi, and W. Zhu, Low Dimensional Manifold Model
             for Image Processing, Tech. Rep., CAM report 16-04, UCLA,
             Los Angeles, CA, 2016]. In particular, we show that LDMM is
             a weighted ℓ2-regularization on the coefficients obtained
             by decomposing images into linear combinations of
             convolution framelets; based on this understanding, we
             extend the original LDMM to a reweighted version that yields
             further improved results. In addition, we establish the
             energy concentration property of convolution framelet
             coefficients for the setting where the local basis is
             constructed from a given nonlocal basis via a linear
             reconstruction framework; a generalization of this framework
             to unions of local embeddings can provide a natural setting
             for interpreting BM3D, one of the state-of-the-art image
             denoising algorithms.},
   Doi = {10.1137/16M1091447},
   Key = {fds327595}
}

@article{fds325388,
   Author = {Deligiannis, N and Mota, JFC and Cornelis, B and Rodrigues, MRD and Daubechies, I},
   Title = {Multi-Modal Dictionary Learning for Image Separation With
             Application in Art Investigation.},
   Journal = {IEEE transactions on image processing : a publication of the
             IEEE Signal Processing Society},
   Volume = {26},
   Number = {2},
   Pages = {751-764},
   Year = {2017},
   Month = {February},
   url = {http://dx.doi.org/10.1109/tip.2016.2623484},
   Abstract = {In support of art investigation, we propose a new source
             separation method that unmixes a single X-ray scan acquired
             from double-sided paintings. In this problem, the X-ray
             signals to be separated have similar morphological
             characteristics, which brings previous source separation
             methods to their limits. Our solution is to use photographs
             taken from the front-and back-side of the panel to drive the
             separation process. The crux of our approach relies on the
             coupling of the two imaging modalities (photographs and
             X-rays) using a novel coupled dictionary learning framework
             able to capture both common and disparate features across
             the modalities using parsimonious representations; the
             common component captures features shared by the multi-modal
             images, whereas the innovation component captures
             modality-specific information. As such, our model enables
             the formulation of appropriately regularized convex
             optimization procedures that lead to the accurate separation
             of the X-rays. Our dictionary learning framework can be
             tailored both to a single- and a multi-scale framework, with
             the latter leading to a significant performance improvement.
             Moreover, to improve further on the visual quality of the
             separated images, we propose to train coupled dictionaries
             that ignore certain parts of the painting corresponding to
             craquelure. Experimentation on synthetic and real data -
             taken from digital acquisition of the Ghent Altarpiece
             (1432) - confirms the superiority of our method against the
             state-of-the-art morphological component analysis technique
             that uses either fixed or trained dictionaries to perform
             image separation.},
   Doi = {10.1109/tip.2016.2623484},
   Key = {fds325388}
}

@article{fds363412,
   Author = {Daubechies, I and Maes, S},
   Title = {A nonlinear squeezing of the continuous wavelet transform
             based on auditory nerve models},
   Pages = {527-546},
   Booktitle = {Wavelets in Medicine and Biology},
   Year = {2017},
   Month = {November},
   ISBN = {9780849394836},
   Key = {fds363412}
}

@article{fds332858,
   Author = {Alaifari, R and Daubechies, I and Grohs, P and Thakur,
             G},
   Title = {Reconstructing Real-Valued Functions from Unsigned
             Coefficients with Respect to Wavelet and Other
             Frames},
   Journal = {Journal of Fourier Analysis and Applications},
   Volume = {23},
   Number = {6},
   Pages = {1480-1494},
   Year = {2017},
   Month = {December},
   url = {http://dx.doi.org/10.1007/s00041-016-9513-7},
   Abstract = {In this paper we consider the following problem of phase
             retrieval: given a collection of real-valued band-limited
             functions {ψλ}L2(Rd) that constitutes a semi-discrete
             frame, we ask whether any real-valued function f∈ L2(Rd)
             can be uniquely recovered from its unsigned convolutions { |
             f∗ ψλ| } λ∈Λ. We find that under some mild
             assumptions on the semi-discrete frame and if f has
             exponential decay at ∞, it suffices to know | f∗ ψλ|
             on suitably fine lattices to uniquely determine f (up to a
             global sign factor). We further establish a local stability
             property of our reconstruction problem. Finally, for two
             concrete examples of a (discrete) frame of L2(Rd) , d= 1 , 2
             , we show that through sufficient oversampling one obtains a
             frame such that any real-valued function with exponential
             decay can be uniquely recovered from its unsigned frame
             coefficients.},
   Doi = {10.1007/s00041-016-9513-7},
   Key = {fds332858}
}

@article{fds333315,
   Author = {Xu, JX and Yang, HY and Daubechies, ID},
   Title = {Recursive Diffeomorphism-Based Regression for Shape
             Functions},
   Volume = {50},
   Number = {1},
   Pages = {5-32},
   Publisher = {Society for Industrial & Applied Mathematics
             (SIAM)},
   Year = {2018},
   Month = {January},
   url = {http://dx.doi.org/10.1137/16M1097535},
   Abstract = {This paper proposes a recursive diffeomorphism-based
             regression method for the one-dimensional generalized mode
             decomposition problem that aims at extracting generalized
             modes αk(t)sk(2πNkφk(t)) from their superpositionKk=1
             αk(t)sk(2πNkφk(t)). We assume that the instantaneous
             information, e.g., αk(t) and Nkφk(t), is determined by,
             e.g., a one-dimensional synchrosqueezed transform or some
             other methods. Our main contribution is to propose a novel
             approach based on diffeomorphisms and nonparametric
             regression to estimate wave shape functions sk(t). This
             leads to a framework for the generalized mode decomposition
             problem under a weak well-separation condition. Numerical
             examples of synthetic and real data are provided to
             demonstrate the successful application of our
             approach.},
   Doi = {10.1137/16M1097535},
   Key = {fds333315}
}

@article{fds329931,
   Author = {Gao, T and Yapuncich, GS and Daubechies, I and Mukherjee, S and Boyer,
             DM},
   Title = {Development and Assessment of Fully Automated and Globally
             Transitive Geometric Morphometric Methods, With Application
             to a Biological Comparative Dataset With High Interspecific
             Variation.},
   Journal = {Anat Rec (Hoboken)},
   Volume = {301},
   Number = {4},
   Pages = {636-658},
   Year = {2018},
   Month = {April},
   url = {http://dx.doi.org/10.1002/ar.23700},
   Abstract = {Automated geometric morphometric methods are promising tools
             for shape analysis in comparative biology, improving
             researchers' abilities to quantify variation extensively (by
             permitting more specimens to be analyzed) and intensively
             (by characterizing shapes with greater fidelity). Although
             use of these methods has increased, published automated
             methods have some notable limitations: pairwise
             correspondences are frequently inaccurate and pairwise
             mappings are not globally consistent (i.e., they lack
             transitivity across the full sample). Here, we reassess the
             accuracy of published automated methods-cPDist (Boyer et al.
             Proc Nat Acad Sci 108 () 18221-18226) and auto3Dgm (Boyer et
             al.: Anat Rec 298 () 249-276)-and evaluate several
             modifications to these methods. We show that a substantial
             percentage of alignments and pairwise maps between specimens
             of dissimilar geometries were inaccurate in the study of
             Boyer et al. (Proc Nat Acad Sci 108 () 18221-18226), despite
             a taxonomically partitioned variance structure of continuous
             Procrustes distances. We show these inaccuracies are
             remedied using a globally informed methodology within a
             collection of shapes, rather than relying on pairwise
             comparisons (c.f. Boyer et al.: Anat Rec 298 () 249-276).
             Unfortunately, while global information generally enhances
             maps between dissimilar objects, it can degrade the quality
             of correspondences between similar objects due to the
             accumulation of numerical error. We explore a number of
             approaches to mitigate this degradation, quantify their
             performance, and compare the generated pairwise maps (and
             the shape space characterized by these maps) to a "ground
             truth" obtained from landmarks manually collected by
             geometric morphometricians. Novel methods both improve the
             quality of the pairwise correspondences relative to cPDist
             and achieve a taxonomic distinctiveness comparable to
             auto3Dgm. Anat Rec, 301:636-658, 2018. © 2017 Wiley
             Periodicals, Inc.},
   Doi = {10.1002/ar.23700},
   Key = {fds329931}
}

@article{fds340382,
   Author = {Yin, R and Daubechies, I},
   Title = {Directional Wavelet Bases Constructions with Dyadic Quincunx
             Subsampling},
   Journal = {Journal of Fourier Analysis and Applications},
   Volume = {24},
   Number = {3},
   Pages = {872-907},
   Publisher = {Springer Nature},
   Year = {2018},
   Month = {June},
   url = {http://dx.doi.org/10.1007/s00041-017-9540-z},
   Abstract = {We construct directional wavelet systems that will enable
             building efficient signal representation schemes with good
             direction selectivity. In particular, we focus on wavelet
             bases with dyadic quincunx subsampling. In our previous work
             (Yin, in: Proceedings of the 2015 international conference
             on sampling theory and applications (SampTA), 2015), we show
             that the supports of orthonormal wavelets in our framework
             are discontinuous in the frequency domain, yet this
             irregularity constraint can be avoided in frames, even with
             redundancy factor <2. In this paper, we focus on the
             extension of orthonormal wavelets to biorthogonal wavelets
             and show that the same obstruction of regularity as in
             orthonormal schemes exists in biorthogonal schemes. In
             addition, we provide a numerical algorithm for biorthogonal
             wavelets construction where the dual wavelets can be
             optimized, though at the cost of deteriorating the primal
             wavelets due to the intrinsic irregularity of biorthogonal
             schemes.},
   Doi = {10.1007/s00041-017-9540-z},
   Key = {fds340382}
}

@article{fds342140,
   Author = {Zhu, W and Qiu, Q and Huang, J and Calderbank, R and Sapiro, G and Daubechies, I},
   Title = {LDMNet: Low Dimensional Manifold Regularized Neural
             Networks},
   Journal = {Proceedings of the IEEE Computer Society Conference on
             Computer Vision and Pattern Recognition},
   Pages = {2743-2751},
   Year = {2018},
   Month = {December},
   url = {http://dx.doi.org/10.1109/CVPR.2018.00290},
   Abstract = {Deep neural networks have proved very successful on
             archetypal tasks for which large training sets are
             available, but when the training data are scarce, their
             performance suffers from overfitting. Many existing methods
             of reducing overfitting are data-independent. Data-dependent
             regularizations are mostly motivated by the observation that
             data of interest lie close to a manifold, which is typically
             hard to parametrize explicitly. These methods usually only
             focus on the geometry of the input data, and do not
             necessarily encourage the networks to produce geometrically
             meaningful features. To resolve this, we propose the
             Low-Dimensional-Manifold-regularized neural Network
             (LDMNet), which incorporates a feature regularization method
             that focuses on the geometry of both the input data and the
             output features. In LDMNet, we regularize the network by
             encouraging the combination of the input data and the output
             features to sample a collection of low dimensional
             manifolds, which are searched efficiently without explicit
             parametrization. To achieve this, we directly use the
             manifold dimension as a regularization term in a variational
             functional. The resulting Euler-Lagrange equation is a
             Laplace-Beltrami equation over a point cloud, which is
             solved by the point integral method without increasing the
             computational complexity. In the experiments, we show that
             LDMNet significantly outperforms widely-used regularizers.
             Moreover, LDMNet can extract common features of an object
             imaged via different modalities, which is very useful in
             real-world applications such as cross-spectral face
             recognition.},
   Doi = {10.1109/CVPR.2018.00290},
   Key = {fds342140}
}

@article{fds341333,
   Author = {Shan, S and Kovalsky, SZ and Winchester, JM and Boyer, DM and Daubechies, I},
   Title = {ariaDNE: A robustly implemented algorithm for Dirichlet
             energy of the normal},
   Journal = {Methods in Ecology and Evolution},
   Volume = {10},
   Number = {4},
   Pages = {541-552},
   Year = {2019},
   Month = {April},
   url = {http://dx.doi.org/10.1111/2041-210X.13148},
   Abstract = {Shape characterizers are metrics that quantify aspects of
             the overall geometry of a three-dimensional (3D) digital
             surface. When computed for biological objects, the values of
             a shape characterizer are largely independent of homology
             interpretations and often contain a strong ecological and
             functional signal. Thus, shape characterizers are useful for
             understanding evolutionary processes. Dirichlet normal
             energy (DNE) is a widely used shape characterizer in
             morphological studies. Recent studies found that DNE is
             sensitive to various procedures for preparing 3D mesh from
             raw scan data, raising concerns regarding comparability and
             objectivity when utilizing DNE in morphological research. We
             provide a robustly implemented algorithm for computing the
             Dirichlet energy of the normal (ariaDNE) on 3D meshes. We
             show through simulation that the effects of
             preparation-related mesh surface attributes, such as
             triangle count, mesh representation, noise, smoothing and
             boundary triangles, are much more limited on ariaDNE than
             DNE. Furthermore, ariaDNE retains the potential of DNE for
             biological studies, illustrated by its effectiveness in
             differentiating species by dietary preferences. Use of
             ariaDNE can dramatically enhance the assessment of the
             ecological aspects of morphological variation by its
             stability under different 3D model acquisition methods and
             preparation procedure. Towards this goal, we provide scripts
             for computing ariaDNE and ariaDNE values for specimens used
             in previously published DNE analyses.},
   Doi = {10.1111/2041-210X.13148},
   Key = {fds341333}
}

@article{fds346309,
   Author = {Sabetsarvestani, Z and Sober, B and Higgitt, C and Daubechies, I and Rodrigues, MRD},
   Title = {Artificial intelligence for art investigation: Meeting the
             challenge of separating x-ray images of the Ghent
             Altarpiece.},
   Journal = {Science advances},
   Volume = {5},
   Number = {8},
   Pages = {eaaw7416},
   Year = {2019},
   Month = {August},
   url = {http://dx.doi.org/10.1126/sciadv.aaw7416},
   Abstract = {X-ray images of polyptych wings, or other artworks painted
             on both sides of their support, contain in one image content
             from both paintings, making them difficult for experts to
             "read." To improve the utility of these x-ray images in
             studying these artworks, it is desirable to separate the
             content into two images, each pertaining to only one side.
             This is a difficult task for which previous approaches have
             been only partially successful. Deep neural network
             algorithms have recently achieved remarkable progress in a
             wide range of image analysis and other challenging tasks.
             We, therefore, propose a new self-supervised approach to
             this x-ray separation, leveraging an available convolutional
             neural network architecture; results obtained for details
             from the <i>Adam</i> and <i>Eve</i> panels of the <i>Ghent
             Altarpiece</i> spectacularly improve on previous
             attempts.},
   Doi = {10.1126/sciadv.aaw7416},
   Key = {fds346309}
}

@article{fds339576,
   Author = {Alaifari, R and Daubechies, I and Grohs, P and Yin,
             R},
   Title = {Stable Phase Retrieval in Infinite Dimensions},
   Journal = {Foundations of Computational Mathematics},
   Volume = {19},
   Number = {4},
   Pages = {869-900},
   Publisher = {Springer Nature America, Inc},
   Year = {2019},
   Month = {August},
   url = {http://dx.doi.org/10.1007/s10208-018-9399-7},
   Abstract = {The problem of phase retrieval is to determine a signal f∈
             H, with H a Hilbert space, from intensity measurements |
             F(ω) | , where F(ω) : = ⟨ f, φω⟩ are measurements of
             f with respect to a measurement system (φω)ω∈Ω⊂H.
             Although phase retrieval is always stable in the
             finite-dimensional setting whenever it is possible (i.e.
             injectivity implies stability for the inverse problem), the
             situation is drastically different if H is
             infinite-dimensional: in that case phase retrieval is never
             uniformly stable (Alaifari and Grohs in SIAM J Math Anal
             49(3):1895–1911, 2017; Cahill et al. in Trans Am Math Soc
             Ser B 3(3):63–76, 2016); moreover, the stability
             deteriorates severely in the dimension of the problem
             (Cahill et al. 2016). On the other hand, all empirically
             observed instabilities are of a certain type: they occur
             whenever the function |F| of intensity measurements is
             concentrated on disjoint sets Dj⊂ Ω , i.e. when
             F=∑j=1kFj where each Fj is concentrated on Dj (and k≥
             2). Motivated by these considerations, we propose a new
             paradigm for stable phase retrieval by considering the
             problem of reconstructing F up to a phase factor that is not
             global, but that can be different for each of the subsets
             Dj, i.e. recovering F up to the equivalence
             F∼∑j=1keiαjFj.We present concrete applications (for
             example in audio processing) where this new notion of
             stability is natural and meaningful and show that in this
             setting stable phase retrieval can actually be achieved, for
             instance, if the measurement system is a Gabor frame or a
             frame of Cauchy wavelets.},
   Doi = {10.1007/s10208-018-9399-7},
   Key = {fds339576}
}

@article{fds352387,
   Author = {Pu, W and Sober, B and Daly, N and Higgitt, C and Daubechies, I and Rodrigues, MRD},
   Title = {A connected auto-encoders based approach for image
             separation with side information: With applications to art
             investigation},
   Journal = {ICASSP, IEEE International Conference on Acoustics, Speech
             and Signal Processing - Proceedings},
   Volume = {2020-May},
   Pages = {2213-2217},
   Year = {2020},
   Month = {May},
   ISBN = {9781509066315},
   url = {http://dx.doi.org/10.1109/ICASSP40776.2020.9054651},
   Abstract = {X-radiography is a widely used imaging technique in art
             investigation, whether to investigate the condition of a
             painting or provide insights into artists' techniques and
             working methods. In this paper, we propose a new
             architecture based on the use of 'connected' auto-encoders
             in order to separate mixed X-ray images acquired from
             double-sided paintings, where in addition to the mixed X-ray
             image one can also exploit the two RGB images associated
             with the front and back of the painting. This proposed
             architecture uses convolutional autoencoders that extract
             features from the RGB images that can be employed to (1)
             reproduce both of the original RGB images, (2) reconstruct
             the associated separated X-ray images, and (3) regenerate
             the mixed X-ray image. It operates in a totally
             self-supervised fashion without the need for examples
             containing both the mixed X-ray images and the separated
             ones. Based on images from the double-sided wing panels from
             the famous Ghent Altarpiece, painted in 1432 by the brothers
             Hubert and Jan Van Eyck, the proposed algorithm has been
             experimentally verified to outperform state-of-theart X-ray
             separation methods in art investigation applications.},
   Doi = {10.1109/ICASSP40776.2020.9054651},
   Key = {fds352387}
}

@article{fds361762,
   Author = {Pu, W and Huang, J and Sober, B and Daly, N and Higgitt, C and Dragotti,
             PL and Daubechies, I and Rodrigues, MRD},
   Title = {A Learning Based Approach to Separate Mixed X-Ray Images
             Associated with Artwork with Concealed Designs},
   Journal = {European Signal Processing Conference},
   Volume = {2021-August},
   Pages = {1491-1495},
   Year = {2021},
   Month = {January},
   ISBN = {9789082797060},
   url = {http://dx.doi.org/10.23919/EUSIPCO54536.2021.9616096},
   Abstract = {X-ray images are widely used in the study of paintings. When
             a painting has hidden sub-surface features (e.g., reuse of
             the canvas or revision of a composition by the artist), the
             resulting X-ray images can be hard to interpret as they
             include contributions from both the surface painting and the
             hidden design. In this paper we propose a self-supervised
             deep learning-based image separation approach that can be
             applied to the X-ray images from such paintings ('mixed
             X-ray images') to separate them into two hypothetical X-ray
             images, one containing information related to the visible
             painting only and the other containing the hidden features.
             The proposed approach involves two steps: (1) separation of
             the mixed X-ray image into two images, guided by the
             combined use of a reconstruction and an exclusion loss; (2)
             even allocation of the error map into the two individual,
             separated X-ray images, yielding separation results that
             have an appearance that is more familiar in relation to
             X-ray images. The proposed method was demonstrated on a real
             painting with hidden content, Doña Isabel de Porcel by
             Francisco de Goya, to show its effectiveness.},
   Doi = {10.23919/EUSIPCO54536.2021.9616096},
   Key = {fds361762}
}

@article{fds356124,
   Author = {Fulwood, EL and Shan, S and Winchester, JM and Kirveslahti, H and Ravier, R and Kovalsky, S and Daubechies, I and Boyer,
             DM},
   Title = {Insights from macroevolutionary modelling and ancestral
             state reconstruction into the radiation and historical
             dietary ecology of Lemuriformes (Primates,
             Mammalia).},
   Journal = {BMC ecology and evolution},
   Volume = {21},
   Number = {1},
   Pages = {60},
   Year = {2021},
   Month = {April},
   url = {http://dx.doi.org/10.1186/s12862-021-01793-x},
   Abstract = {<h4>Background</h4>Lemurs once rivalled the diversity of
             rest of the primate order despite thier confinement to the
             island of Madagascar. We test the adaptive radiation model
             of Malagasy lemur diversity using a novel combination of
             phylogenetic comparative methods and geometric methods for
             quantifying tooth shape.<h4>Results</h4>We apply
             macroevolutionary model fitting approaches and disparity
             through time analysis to dental topography metrics
             associated with dietary adaptation, an aspect of mammalian
             ecology which appears to be closely related to
             diversification in many clades. Metrics were also
             reconstructed at internal nodes of the lemur tree and these
             reconstructions were combined to generate dietary
             classification probabilities at internal nodes using
             discriminant function analysis. We used these
             reconstructions to calculate rates of transition toward
             folivory per million-year intervals. Finally, lower second
             molar shape was reconstructed at internal nodes by modelling
             the change in shape of 3D meshes using squared change
             parsimony along the branches of the lemur tree. Our analyses
             of dental topography metrics do not recover an early burst
             in rates of change or a pattern of early partitioning of
             subclade disparity. However, rates of change in adaptations
             for folivory were highest during the Oligocene, an interval
             of possible forest expansion on the island.<h4>Conclusions</h4>There
             was no clear phylogenetic signal of bursts of morphological
             evolution early in lemur history. Reconstruction of the
             molar morphologies corresponding to the ancestral nodes of
             the lemur tree suggest that this may have been driven by a
             shift toward defended plant resources, however. This
             suggests a response to the ecological opportunity offered by
             expanding forests, but not necessarily a classic adaptive
             radiation initiated by dispersal to Madagascar.},
   Doi = {10.1186/s12862-021-01793-x},
   Key = {fds356124}
}

@article{fds357497,
   Author = {Fornasier, M and Vybíral, J and Daubechies, I},
   Title = {Robust and resource efficient identification of shallow
             neural networks by fewest samples},
   Journal = {Information and Inference},
   Volume = {10},
   Number = {2},
   Pages = {625-695},
   Year = {2021},
   Month = {June},
   url = {http://dx.doi.org/10.1093/imaiai/iaaa036},
   Abstract = {We address the structure identification and the uniform
             approximation of sums of ridge functions f(x)=∑ i=1m
             gi(ai,x) on Rd, representing a general form of a shallow
             feed-forward neural network, from a small number of query
             samples. Higher order differentiation, as used in our
             constructive approximations, of sums of ridge functions or
             of their compositions, as in deeper neural network, yields a
             natural connection between neural network weight
             identification and tensor product decomposition
             identification. In the case of the shallowest feed-forward
             neural network, second-order differentiation and tensors of
             order two (i.e., matrices) suffice as we prove in this
             paper. We use two sampling schemes to perform approximate
             differentiation-active sampling, where the sampling points
             are universal, actively and randomly designed, and passive
             sampling, where sampling points were preselected at random
             from a distribution with known density. Based on multiple
             gathered approximated first-and second-order differentials,
             our general approximation strategy is developed as a
             sequence of algorithms to perform individual sub-tasks. We
             first perform an active subspace search by approximating the
             span of the weight vectors a_1, a m. Then we use a
             straightforward substitution, which reduces the
             dimensionality of the problem from d to m. The core of the
             construction is then the stable and efficient approximation
             of weights expressed in terms of rank-1 matrices a_i otimes
             a_i, realized by formulating their individual identification
             as a suitable nonlinear program. We prove the successful
             identification by this program of weight vectors being close
             to orthonormal and we also show how we can constructively
             reduce to this case by a whitening procedure, without loss
             of any generality. We finally discuss the implementation and
             the performance of the proposed algorithmic pipeline with
             extensive numerical experiments, which illustrate and
             confirm the theoretical results.},
   Doi = {10.1093/imaiai/iaaa036},
   Key = {fds357497}
}

@article{fds361304,
   Author = {Daubechies, I},
   Title = {Wavelets at your service},
   Pages = {48-57},
   Booktitle = {The Art And Practice Of Mathematics: Interviews At The
             Institute For Mathematical Sciences, National University Of
             Singapore, 2010-2020},
   Year = {2021},
   Month = {June},
   ISBN = {9789811219580},
   Key = {fds361304}
}

@article{fds358750,
   Author = {Cheng, C and Daubechies, I and Dym, N and Lu, J},
   Title = {Stable phase retrieval from locally stable and conditionally
             connected measurements},
   Journal = {Applied and Computational Harmonic Analysis},
   Volume = {55},
   Pages = {440-465},
   Year = {2021},
   Month = {November},
   url = {http://dx.doi.org/10.1016/j.acha.2021.07.001},
   Abstract = {In this paper, we study the stability of phase retrieval
             problems via a family of locally stable phase retrieval
             frame measurements in Banach spaces, which we call
             “locally stable and conditionally connected” (LSCC)
             measurement schemes. For any signal f in the Banach space,
             we associate it with a weighted graph Gf, defined by the
             LSCC measurement scheme, and show that the phase
             retrievability of the signal f is determined by the
             connectivity of Gf. We quantify the phase retrieval
             stability of the signal by two common measures of graph
             connectivity: The Cheeger constant for real-valued signals,
             and algebraic connectivity for complex-valued signals. We
             then use our results to study the stability of two phase
             retrieval models. In the first model, we study a
             finite-dimensional phase retrieval problem from locally
             supported measurements such as the windowed Fourier
             transform. We show that signals “without large holes”
             are phase retrievable, and that for such signals in Rd the
             phase retrieval stability constant grows proportionally to
             d1/2, while in Cd it grows proportionally to d. The second
             model we consider is an infinite-dimensional phase retrieval
             problem in a shift-invariant space. In infinite-dimension
             spaces, even phase retrievable signals can have the Cheeger
             constant being zero, and hence have an infinite stability
             constant. We give an example of signals with monotone
             polynomial decay which has the Cheeger constant being zero,
             and an example with exponential decay which has a strictly
             positive Cheeger constant.},
   Doi = {10.1016/j.acha.2021.07.001},
   Key = {fds358750}
}

@article{fds360557,
   Author = {Fulwood, EL and Shan, S and Winchester, JM and Gao, T and Kirveslahti,
             H and Daubechies, I and Boyer, DM},
   Title = {Reconstructing dietary ecology of extinct strepsirrhines
             (Primates, Mammalia) with new approaches for characterizing
             and analyzing tooth shape},
   Journal = {Paleobiology},
   Volume = {47},
   Number = {4},
   Pages = {612-631},
   Year = {2021},
   Month = {November},
   url = {http://dx.doi.org/10.1017/pab.2021.9},
   Abstract = {The morphological and ecological diversity of lemurs and
             lorisiformes once rivaled that of the rest of the primate
             order. Here, we assemble a dataset of 3D models representing
             the second mandibular molars of a wide range of extant and
             fossil strepsirrhines encompassing this diversity. We use
             these models to distill quantitative descriptors of tooth
             form and then analyze these data using new analytical
             methods. We employ a recently developed dental topography
             metric (ariaDNE), which is less sensitive to details of
             random error in 3D model quality than previously used
             metrics (e.g., DNE); Bayesian multinomial modeling with
             metrics designed to measure overfitting risk; and a tooth
             segmentation algorithm that allows the shapes of
             disaggregated tooth surface features to be quantified using
             dental topography metrics. This approach is successful at
             reclassifying extant strepsirrhine primates to known dietary
             ecology and indicates that the averaging of morphological
             information across the tooth surface does not interfere with
             the ability of dental topography metrics to predict dietary
             adaptation. When the most informative combination of dental
             topography metrics is applied to extinct species, many
             subfossil lemurs and the most basal fossil strepsirrhines
             are predicted to have been primarily frugivorous or
             gummivorous. This supports an ecological contraction among
             the extant lemurs and the importance of frugivory in the
             origins of crown Strepsirrhini, potentially to avoid
             competition with more insectivorous and folivorous members
             of Paleogene Afro-Arabian primate faunas.},
   Doi = {10.1017/pab.2021.9},
   Key = {fds360557}
}

@article{fds364205,
   Author = {Pu, W and Huang, J-J and Sober, B and Daly, N and Higgitt, C and Daubechies, I and Dragotti, PL and Rodrigues, MRD},
   Title = {Mixed X-Ray Image Separation for Artworks With Concealed
             Designs.},
   Journal = {IEEE transactions on image processing : a publication of the
             IEEE Signal Processing Society},
   Volume = {31},
   Pages = {4458-4473},
   Year = {2022},
   Month = {January},
   url = {http://dx.doi.org/10.1109/tip.2022.3185488},
   Abstract = {In this paper, we focus on X-ray images (X-radiographs) of
             paintings with concealed sub-surface designs (e.g., deriving
             from reuse of the painting support or revision of a
             composition by the artist), which therefore include
             contributions from both the surface painting and the
             concealed features. In particular, we propose a
             self-supervised deep learning-based image separation
             approach that can be applied to the X-ray images from such
             paintings to separate them into two hypothetical X-ray
             images. One of these reconstructed images is related to the
             X-ray image of the concealed painting, while the second one
             contains only information related to the X-ray image of the
             visible painting. The proposed separation network consists
             of two components: the analysis and the synthesis
             sub-networks. The analysis sub-network is based on learned
             coupled iterative shrinkage thresholding algorithms (LCISTA)
             designed using algorithm unrolling techniques, and the
             synthesis sub-network consists of several linear mappings.
             The learning algorithm operates in a totally self-supervised
             fashion without requiring a sample set that contains both
             the mixed X-ray images and the separated ones. The proposed
             method is demonstrated on a real painting with concealed
             content, Do na Isabel de Porcel by Francisco de Goya, to
             show its effectiveness.},
   Doi = {10.1109/tip.2022.3185488},
   Key = {fds364205}
}

@article{fds356405,
   Author = {Daubechies, I and DeVore, R and Foucart, S and Hanin, B and Petrova,
             G},
   Title = {Nonlinear Approximation and (Deep) ReLU Networks},
   Journal = {Constructive Approximation},
   Volume = {55},
   Number = {1},
   Pages = {127-172},
   Year = {2022},
   Month = {February},
   url = {http://dx.doi.org/10.1007/s00365-021-09548-z},
   Abstract = {This article is concerned with the approximation and
             expressive powers of deep neural networks. This is an active
             research area currently producing many interesting papers.
             The results most commonly found in the literature prove that
             neural networks approximate functions with classical
             smoothness to the same accuracy as classical linear methods
             of approximation, e.g., approximation by polynomials or by
             piecewise polynomials on prescribed partitions. However,
             approximation by neural networks depending on n parameters
             is a form of nonlinear approximation and as such should be
             compared with other nonlinear methods such as variable knot
             splines or n-term approximation from dictionaries. The
             performance of neural networks in targeted applications such
             as machine learning indicate that they actually possess even
             greater approximation power than these traditional methods
             of nonlinear approximation. The main results of this article
             prove that this is indeed the case. This is done by
             exhibiting large classes of functions which can be
             efficiently captured by neural networks where classical
             nonlinear methods fall short of the task. The present
             article purposefully limits itself to studying the
             approximation of univariate functions by ReLU networks. Many
             generalizations to functions of several variables and other
             activation functions can be envisioned. However, even in
             this simplest of settings considered here, a theory that
             completely quantifies the approximation power of neural
             networks is still lacking.},
   Doi = {10.1007/s00365-021-09548-z},
   Key = {fds356405}
}

@article{fds365842,
   Author = {Daubechies, I and Devore, R and Dym, N and Faigenbaum-Golovin, S and Kovalsky, SZ and Lin, KC and Park, J and Petrova, G and Sober,
             B},
   Title = {Neural Network Approximation of Refinable
             Functions},
   Journal = {IEEE Transactions on Information Theory},
   Volume = {69},
   Number = {1},
   Pages = {482-495},
   Year = {2023},
   Month = {January},
   url = {http://dx.doi.org/10.1109/TIT.2022.3199601},
   Abstract = {In the desire to quantify the success of neural networks in
             deep learning and other applications, there is a great
             interest in understanding which functions are efficiently
             approximated by the outputs of neural networks. By now,
             there exists a variety of results which show that a wide
             range of functions can be approximated with sometimes
             surprising accuracy by these outputs. For example, it is
             known that the set of functions that can be approximated
             with exponential accuracy (in terms of the number of
             parameters used) includes, on one hand, very smooth
             functions such as polynomials and analytic functions and, on
             the other hand, very rough functions such as the Weierstrass
             function, which is nowhere differentiable. In this paper, we
             add to the latter class of rough functions by showing that
             it also includes refinable functions. Namely, we show that
             refinable functions are approximated by the outputs of deep
             ReLU neural networks with a fixed width and increasing depth
             with accuracy exponential in terms of their number of
             parameters. Our results apply to functions used in the
             standard construction of wavelets as well as to functions
             constructed via subdivision algorithms in Computer Aided
             Geometric Design.},
   Doi = {10.1109/TIT.2022.3199601},
   Key = {fds365842}
}

@article{fds371115,
   Author = {Pu, W and Sober, B and Daly, N and Zhou, C and Sabetsarvestani, Z and Higgitt, C and Daubechies, I and Rodrigues, MRD},
   Title = {Image Separation With Side Information: A Connected
             Auto-Encoders Based Approach.},
   Journal = {IEEE transactions on image processing : a publication of the
             IEEE Signal Processing Society},
   Volume = {32},
   Pages = {2931-2946},
   Year = {2023},
   Month = {January},
   url = {http://dx.doi.org/10.1109/tip.2023.3275872},
   Abstract = {X-radiography (X-ray imaging) is a widely used imaging
             technique in art investigation. It can provide information
             about the condition of a painting as well as insights into
             an artist's techniques and working methods, often revealing
             hidden information invisible to the naked eye. X-radiograpy
             of double-sided paintings results in a mixed X-ray image and
             this paper deals with the problem of separating this mixed
             image. Using the visible color images (RGB images) from each
             side of the painting, we propose a new Neural Network
             architecture, based upon 'connected' auto-encoders, designed
             to separate the mixed X-ray image into two simulated X-ray
             images corresponding to each side. This connected
             auto-encoders architecture is such that the encoders are
             based on convolutional learned iterative shrinkage
             thresholding algorithms (CLISTA) designed using algorithm
             unrolling techniques, whereas the decoders consist of simple
             linear convolutional layers; the encoders extract sparse
             codes from the visible image of the front and rear paintings
             and mixed X-ray image, whereas the decoders reproduce both
             the original RGB images and the mixed X-ray image. The
             learning algorithm operates in a totally self-supervised
             fashion without requiring a sample set that contains both
             the mixed X-ray images and the separated ones. The
             methodology was tested on images from the double-sided wing
             panels of the Ghent Altarpiece, painted in 1432 by the
             brothers Hubert and Jan van Eyck. These tests show that the
             proposed approach outperforms other state-of-the-art X-ray
             image separation methods for art investigation
             applications.},
   Doi = {10.1109/tip.2023.3275872},
   Key = {fds371115}
}

@article{fds371294,
   Author = {Shan, S and Daubechies, I},
   Title = {Diffusion Maps: Using the Semigroup Property for Parameter
             Tuning},
   Volume = {Part F6},
   Pages = {409-424},
   Booktitle = {Applied and Numerical Harmonic Analysis},
   Year = {2023},
   Month = {January},
   url = {http://dx.doi.org/10.1007/978-3-030-45847-8_18},
   Abstract = {Diffusion maps (DM) constitute a classic dimension reduction
             technique, for data lying on or close to a (relatively)
             low-dimensional manifold embedded in a much larger
             dimensional space. It consists in constructing a spectral
             parametrization for the manifold from simulated random walks
             or diffusion paths on the dataset. However, DM is hard to
             tune in practice. In particular, the task to set a diffusion
             time t when constructing the diffusion kernel matrix is
             critical. We address this problem by using the semigroup
             property of the diffusion operator. We propose a semigroup
             criterion for picking the “right” value for t.
             Experiments show that this principled approach is effective
             and robust.},
   Doi = {10.1007/978-3-030-45847-8_18},
   Key = {fds371294}
}

@article{fds370363,
   Author = {Xu, J and Li, Y and Yang, H and Dunson, D and Daubechies,
             I},
   Title = {PiPs: A kernel-based optimization scheme for analyzing
             non-stationary 1D signals},
   Journal = {Applied and Computational Harmonic Analysis},
   Volume = {66},
   Pages = {1-17},
   Year = {2023},
   Month = {September},
   url = {http://dx.doi.org/10.1016/j.acha.2023.04.002},
   Abstract = {This paper proposes a novel kernel-based optimization scheme
             to handle tasks in the analysis, e.g., signal spectral
             estimation and single-channel source separation of 1D
             non-stationary oscillatory data. The key insight of our
             optimization scheme for reconstructing the time-frequency
             information is that when a nonparametric regression is
             applied on some input values, the output regressed points
             would lie near the oscillatory pattern of the oscillatory 1D
             signal only if these input values are a good approximation
             of the ground-truth phase function. In this work, Gaussian
             Process (GP) is chosen to conduct this nonparametric
             regression: the oscillatory pattern is encoded as the
             Pattern-inducing Points (PiPs) which act as the training
             data points in the GP regression; while the targeted phase
             function is fed in to compute the correlation kernels,
             acting as the testing input. Better approximated phase
             function generates more precise kernels, thus resulting in
             smaller optimization loss error when comparing the
             kernel-based regression output with the original signals. To
             the best of our knowledge, this is the first algorithm that
             can satisfactorily handle fully non-stationary oscillatory
             data, close and crossover frequencies, and general
             oscillatory patterns. Even in the example of a signal
             produced by slow variation in the parameters of a
             trigonometric expansion, we show that PiPs admits
             competitive or better performance in terms of accuracy and
             robustness than existing state-of-the-art
             algorithms.},
   Doi = {10.1016/j.acha.2023.04.002},
   Key = {fds370363}
}

@article{fds372370,
   Author = {Balmaceda, JM and Clemens, CH and Daubechies, I and Pineda, AR and Rusu,
             G and Waldschmidt, M},
   Title = {Graduate Assistantships in Developing Countries (GRAID)
             Supporting Mathematics Graduate Students in the Countries
             that Need it Most},
   Journal = {Notices of the American Mathematical Society},
   Volume = {70},
   Number = {8},
   Pages = {1281-1284},
   Year = {2023},
   Month = {September},
   url = {http://dx.doi.org/10.1090/noti2749},
   Doi = {10.1090/noti2749},
   Key = {fds372370}
}

@article{fds374555,
   Author = {Duprez, F and Crombin, M and Daubechies, I and Devries, N and Durant, V and El Khalil and M and Audag, N},
   Title = {[Update on manual bronchial clearance techniques (adults and
             adolescents)].},
   Journal = {Revue des maladies respiratoires},
   Volume = {41},
   Number = {1},
   Pages = {43-50},
   Year = {2024},
   Month = {January},
   url = {http://dx.doi.org/10.1016/j.rmr.2023.10.006},
   Abstract = {In adults and teenagers, airway clearance physiotherapy
             techniques (ACPT) are various and numerous. However, they
             for still awaiting scientific validation. Among ACPTs, Slow
             Expiration with the Glottis Opened in the Lateral Posture
             (ELTGOL), Autogenic Drainage (DA), and Active Cycling
             Breathing Technique (ACBT) present a Grade B level of
             evidence with weak recommendations. Even though these
             maneuvers are widely applied, precise description of chest
             physiotherapy (CP) is largely absent from the scientific
             literature; it is difficult to standardize its
             implementation and reproduce the results; scientific
             validation and faithful execution of the techniques are
             consequently problematic. In this paper, the authors aim to
             depict each of the three CP techniques as precisely as
             possible; with this in mind, graphic modeling of the
             different respiratory exercises is presented in such a way
             that they can be easily learned, applied and reproduced by
             physiotherapists.},
   Doi = {10.1016/j.rmr.2023.10.006},
   Key = {fds374555}
}

 

dept@math.duke.edu
ph: 919.660.2800
fax: 919.660.2821

Mathematics Department
Duke University, Box 90320
Durham, NC 27708-0320