Publications of Robert Calderbank    :recent first  combined listing:

%% Journal articles or Book chapters PUBLISHED   
@article{fds236010,
   Author = {Calderbank, AR},
   Title = {70,58,5.},
   Journal = {IEEE Transactions on Information Theory},
   Volume = {IT-32},
   Number = {6},
   Pages = {828-833},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {1986},
   url = {http://dx.doi.org/10.1109/TIT.1986.1057232},
   Abstract = {Uniformly packed, linear e-error-correcting codes are
             considered. In particular, it is shown that the nonexistence
             of a uniformly packed left bracket 70,58,5 right bracket
             code C PERPEND is proved by examining geometries associated
             with the 3-weight code C.},
   Doi = {10.1109/TIT.1986.1057232},
   Key = {fds236010}
}

@article{fds236056,
   Author = {Calderbank, AR and Li, WCW and Poonen, B},
   Title = {A 2-adic approach to the analysis of cyclic
             codes},
   Journal = {IEEE Transactions on Information Theory},
   Volume = {43},
   Number = {3},
   Pages = {977-986},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {1997},
   Month = {December},
   ISSN = {0018-9448},
   url = {http://dx.doi.org/10.1109/18.568706},
   Abstract = {This paper describes how 2-adic numbers can be used to
             analyze the structure of binary cyclic codes and of cyclic
             codes defined over ℤ 2a, a ≥ 2, the ring of integers
             modulo 2 a. It provides a 2-adic proof of a theorem of
             McEliece that characterizes the possible Hamming weights
             that can appear in a binary cyclic code. A generalization of
             this theorem is derived that applies to cyclic codes over
             ℤ 2a that are obtained from binary cyclic codes by a
             sequence of Hensel lifts. This generalization characterizes
             the number of times a residue modulo 2 a appears as a
             component of an arbitrary codeword in the cyclic code. The
             limit of the sequence of Hensel lifts is a universal code
             defined over the 2-adic integers. This code was first
             introduced by Calderbank and Sloane (1995), and is the main
             subject of this paper. Binary cyclic codes and cyclic codes
             over ℤ 2aare obtained from these universal codes by
             reduction modulo some power of 2. A special case of
             particular interest is cyclic codes over ℤ 4 that are
             obtained from binary cyclic codes by means of a single
             Hensel lift. The binary images of such codes under the Gray
             isometry include the Kerdock, Preparata, and
             Delsarte-Goethals codes. These are nonlinear binary codes
             that contain more codewords than any linear code presently
             known. Fundamental understanding of the composition of
             codewords in cyclic codes over ℤ 4 is central to the
             search for more families of optimal codes. This paper also
             constructs even unimodular lattices from the Hensel lift of
             extended binary cyclic codes that are self-dual with all
             Hamming weights divisible by 4. The Leech lattice arises in
             this way as do extremal lattices in dimensions 32 through
             48. © 1997 IEEE.},
   Doi = {10.1109/18.568706},
   Key = {fds236056}
}

@article{fds235760,
   Author = {Wang, L and Carlson, DE and Rodrigues, MRD and Calderbank, R and Carin,
             L},
   Title = {A Bregman matrix and the gradient of mutual information for
             vector Poisson and Gaussian channels},
   Journal = {IEEE Transactions on Information Theory},
   Volume = {60},
   Number = {5},
   Pages = {2611-2629},
   Publisher = {IEEE},
   Year = {2014},
   ISSN = {0018-9448},
   url = {http://dx.doi.org/10.1109/TIT.2014.2307068},
   Abstract = {A generalization of Bregman divergence is developed and
             utilized to unify vector Poisson and Gaussian channel
             models, from the perspective of the gradient of mutual
             information. The gradient is with respect to the measurement
             matrix in a compressive-sensing setting, and mutual
             information is considered for signal recovery and
             classification. Existing gradient-of-mutual-information
             results for scalar Poisson models are recovered as special
             cases, as are known results for the vector Gaussian model.
             The Bregman-divergence generalization yields a Bregman
             matrix, and this matrix induces numerous matrix-valued
             metrics. The metrics associated with the Bregman matrix are
             detailed, as are its other properties. The Bregman matrix is
             also utilized to connect the relative entropy and mismatched
             minimum mean squared error. Two applications are considered:
             1) compressive sensing with a Poisson measurement model and
             2) compressive topic modeling for analysis of a document
             corpora (word-count data). In both of these settings, we use
             the developed theory to optimize the compressive measurement
             matrix, for signal recovery and classification. © 1963-2012
             IEEE.},
   Doi = {10.1109/TIT.2014.2307068},
   Key = {fds235760}
}

@article{fds343642,
   Author = {Beirami, A and Calderbank, R and Christiansen, MM and Duffy, KR and Medard, M},
   Title = {A Characterization of Guesswork on Swiftly Tilting
             Curves},
   Journal = {IEEE Transactions on Information Theory},
   Volume = {65},
   Number = {5},
   Pages = {2850-2871},
   Year = {2019},
   Month = {May},
   url = {http://dx.doi.org/10.1109/TIT.2018.2879477},
   Abstract = {Given a collection of strings, each with an associated
             probability of occurrence, the guesswork of each of them is
             their position in a list ordered from most likely to least
             likely, breaking ties arbitrarily. The guesswork is central
             to several applications in information theory: average
             guesswork provides a lower bound on the expected
             computational cost of a sequential decoder to decode
             successfully the transmitted message; the complementary
             cumulative distribution function of guesswork gives the
             error probability in list decoding; the logarithm of
             guesswork is the number of bits needed in optimal lossless
             one-to-one source coding; and the guesswork is the number of
             trials required of an adversary to breach a password
             protected system in a brute-force attack. In this paper, we
             consider memoryless string sources that generate strings
             consisting of independent and identically distributed
             characters drawn from a finite alphabet, and characterize
             their corresponding guesswork. Our main tool is the tilt
             operation on a memoryless string source. We show that the
             tilt operation on a memoryless string source parametrizes an
             exponential family of memoryless string sources, which we
             refer to as the tilted family of the string source. We
             provide an operational meaning to the tilted families by
             proving that two memoryless string sources result in the
             same guesswork on all strings of all lengths if and only if
             their respective categorical distributions belong to the
             same tilted family. Establishing some general properties of
             the tilt operation, we generalize the notions of weakly
             typical set and asymptotic equipartition property to tilted
             weakly typical sets of different orders. We use this new
             definition to characterize the large deviations for all
             atypical strings and characterize the volume of tilted
             weakly typical sets of different orders. We subsequently
             build on this characterization to prove large deviation
             bounds on guesswork and provide an accurate approximation of
             its probability mass function.},
   Doi = {10.1109/TIT.2018.2879477},
   Key = {fds343642}
}

@article{fds303202,
   Author = {Harms, A and Bajwa, WU and Calderbank, R},
   Title = {A constrained random demodulator for sub-nyquist
             sampling},
   Journal = {IEEE Transactions on Signal Processing},
   Volume = {61},
   Number = {3},
   Pages = {707-723},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2013},
   Month = {February},
   url = {http://arxiv.org/abs/1204.0839v3},
   Abstract = {This paper presents a significant modification to the Random
             Demodulator (RD) of Tropp et al. for sub-Nyquist sampling of
             frequency-sparse signals. The modification, termed
             constrained random demodulator, involves replacing the
             random waveform, essential to the operation of the RD, with
             a constrained random waveform that has limits on its
             switching rate because fast switching waveforms may be hard
             to generate cleanly. The result is a relaxation on the
             hardware requirements with a slight, but manageable,
             decrease in the recovery guarantees. The paper also
             establishes the importance of properly choosing the
             statistics of the constrained random waveform. If the power
             spectrum of the random waveform matches the distribution on
             the tones of the input signal (i.e., the distribution is
             proportional to the power spectrum), then recovery of the
             input signal tones is improved. The theoretical guarantees
             provided in the paper are validated through extensive
             numerical simulations and phase transition plots.© 2012
             IEEE.},
   Doi = {10.1109/TSP.2012.2231077},
   Key = {fds303202}
}

@article{fds236073,
   Author = {Howard, SD and Calderbank, AR and Searle, SJ},
   Title = {A fast reconstruction algorithm for deterministic
             compressive sensing using second order reed-muller
             codes},
   Journal = {CISS 2008, The 42nd Annual Conference on Information
             Sciences and Systems},
   Pages = {11-15},
   Publisher = {IEEE},
   Year = {2008},
   Month = {September},
   url = {http://dx.doi.org/10.1109/CISS.2008.4558486},
   Abstract = {This paper proposes a deterministic compressed sensing
             matrix that comes by design with a very fast reconstruction
             algorithm, in the sense that its complexity depends only on
             the number of measurements n and not on the signal dimension
             N. The matrix construction is based on the second order
             Reed-Muller codes and associated functions. This matrix does
             not have RIP uniformly with respect to all k-sparse vectors,
             but it acts as a near isometry on k-sparse vectors with very
             high probability. © 2008 IEEE.},
   Doi = {10.1109/CISS.2008.4558486},
   Key = {fds236073}
}

@article{fds236055,
   Author = {Gelblum, EA and Calderbank, AR},
   Title = {A forbidden rate region for generalized cross
             constellations},
   Journal = {IEEE Transactions on Information Theory},
   Volume = {43},
   Number = {1},
   Pages = {335-341},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {1997},
   Month = {December},
   ISSN = {0018-9448},
   url = {http://dx.doi.org/10.1109/18.567754},
   Abstract = {An analysis of the Generalized Cross Constellation (GCC) is
             presented and a new perspective on its coding algorithm is
             described. We show how the GCC can be used to address
             generic sets of symbol points in any multidimensional space
             through an example based on the matched spectral null coding
             used in magnetic recording devices. We also prove that there
             is a forbidden rate region of fractional coding rates that
             are practically unrealizable using the GCC construction. We
             introduce the idea of a constellation tree and show how its
             decomposition can be used to design GCC's matching desired
             parameters. Following this analysis, an algorithm to design
             the optimal rate GCC from a restriction on the maximum size
             of its constellation signal set is given, and a formula for
             determining the size of the GCC achieving a desired coding
             rate is derived. We finish with an upper bound on the size
             of the constellation expansion ratio. © 1997
             IEEE.},
   Doi = {10.1109/18.567754},
   Key = {fds236055}
}

@article{fds235787,
   Author = {Calderbank, AR and Wales, DB},
   Title = {A global code invariant under the Higman-Sims
             group},
   Journal = {Journal of Algebra},
   Volume = {75},
   Number = {1},
   Pages = {233-260},
   Year = {1982},
   Month = {January},
   ISSN = {0021-8693},
   url = {http://dx.doi.org/10.1016/0021-8693(82)90073-4},
   Doi = {10.1016/0021-8693(82)90073-4},
   Key = {fds235787}
}

@article{fds343581,
   Author = {Calderbank, R},
   Title = {A Good Method of Combining Codes},
   Journal = {Linear Algebra and Its Applications},
   Volume = {32},
   Pages = {115-124},
   Year = {1980},
   Month = {January},
   url = {http://dx.doi.org/10.1016/0024-3795(80)90011-7},
   Abstract = {Let q be an odd prime power, and suppose q−1 (mod8),
             Let C(q) and C(q)∗ be the two extended binary quadratic
             residue codes (QR codes) of length q+1, and let
             T(q)={(a+x;b+x;a+b+x):a,b∈C(q),x∈C(q)∗}. We establish
             a square root bound on the minimum weight in T(q). Since the
             same type of bound applies to C(q) and C(q)∗, this is a
             good method of combining codes. © 1980, All rights
             reserved.},
   Doi = {10.1016/0024-3795(80)90011-7},
   Key = {fds343581}
}

@article{fds236066,
   Author = {Calderbank, AR and Harbin, RH and Rains, EM and Shor, PW and Sloane,
             NJA},
   Title = {A Group-Theoretic Framework for the Construction of Packings
             in Grassmannian Spaces},
   Journal = {Journal of Algebraic Combinatorics},
   Volume = {9},
   Number = {2},
   Pages = {129-140},
   Year = {1999},
   Month = {January},
   url = {http://dx.doi.org/10.1023/A:1018673825179},
   Abstract = {By using totally isotropic subspaces in an orthogonal space
             Ω+(2i, 2), several infinite families of packings of
             2k-dimensional subspaces of real 2i -dimensional space are
             constructed, some of which are shown to be optimal packings.
             A certain Clifford group underlies the construction and
             links this problem with Barnes-Wall lattices, Kerdock sets
             and quantum-error-correcting codes.},
   Doi = {10.1023/A:1018673825179},
   Key = {fds236066}
}

@article{fds331058,
   Author = {Calderbank, AR and Hammons, AR and Kumar, PV and Sloane, NJA and Solé,
             P},
   Title = {A linear construction for certain kerdock and preparata
             codes},
   Journal = {Bulletin of the American Mathematical Society},
   Volume = {29},
   Number = {2},
   Pages = {218-222},
   Publisher = {American Mathematical Society (AMS)},
   Year = {1993},
   Month = {January},
   url = {http://dx.doi.org/10.1090/S0273-0979-1993-00426-9},
   Abstract = {The Nordstrom-Robinson, Kerdock, and (slightly modified)
             Pre-parata codes are shown to be linear over ℤ4, the
             integers mod 4. The Kerdock and Preparata codes are duals
             over ℤ4, and the Nordstrom-Robinson code is self-dual. All
             these codes are just extended cyclic codes over ℤ4. This
             provides a simple definition for these codes and explains
             why their Hamming weight distributions are dual to each
             other. First-and second-order Reed-Muller codes are also
             linear codes over ℤ4, but Hamming codes in general are
             not, nor is the Golay code. © 1993 American Mathematical
             Society.},
   Doi = {10.1090/S0273-0979-1993-00426-9},
   Key = {fds331058}
}

@article{fds235924,
   Author = {Qureshi, TR and Zoltowski, MD and Calderbank, R},
   Title = {A MIMO-OFDM channel estimation scheme utilizing
             complementary sequences},
   Journal = {ICASSP, IEEE International Conference on Acoustics, Speech
             and Signal Processing - Proceedings},
   Pages = {2677-2680},
   Publisher = {IEEE},
   Year = {2009},
   Month = {September},
   ISSN = {1520-6149},
   url = {http://dx.doi.org/10.1109/ICASSP.2009.4960174},
   Abstract = {We present a pilot-assisted method for estimating the
             frequency selective channel in a MIMO-OFDM system. The pilot
             sequence is designed using the DFT of the Golay
             complementary sequences. Novel exploitation of the perfect
             autocorrelation property of Golay complementary sequences,
             in conjunction with OSTBC based pilot waveform scheduling
             across multiple OFDM frames, facilitates simple separation
             of the channel mixtures at the receive antennas. The DFT
             length used to transform the complementary sequence into the
             frequency domain is shown to be a key critical parameter for
             correctly estimating the channel. This channel estimation
             scheme is then extended to antenna arrays of arbitrary
             sizes. ©2009 IEEE.},
   Doi = {10.1109/ICASSP.2009.4960174},
   Key = {fds235924}
}

@article{fds236060,
   Author = {Fan, JL and Calderbank, AR},
   Title = {A modified concatenated coding scheme, with applications to
             magnetic data storage},
   Journal = {IEEE Transactions on Information Theory},
   Volume = {44},
   Number = {4},
   Pages = {1565-1574},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {1998},
   Month = {December},
   ISSN = {0018-9448},
   url = {http://dx.doi.org/10.1109/18.681333},
   Abstract = {When a block modulation code is concatenated with an
             errorcorrection code (ECC) in the standard way, the use of a
             modulation code with long blocklengths results in error
             propagation. This correspondence analyzes the performance of
             modified concatenation, which involves reversing the order
             of modulation and ECC. This modified scheme reduces error
             propagation, provides greater flexibility in the choice of
             parameters, and facilitates soft-decision decoding, with
             little or no loss in transmission rate. In particular,
             examples are presented which show how this technique can
             allow fewer interleaves per sector in hard disk drives, and
             permit the use of more sophisticated block modulation codes
             which are better suited to the channel. © 1998
             IEEE.},
   Doi = {10.1109/18.681333},
   Key = {fds236060}
}

@article{fds236024,
   Author = {Calderbank, AR and Herro, MA and Telang, V},
   Title = {A Multilevel Approach to the Design of DC-Free Line
             Codes},
   Journal = {IEEE Transactions on Information Theory},
   Volume = {35},
   Number = {3},
   Pages = {579-583},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {1989},
   Month = {January},
   url = {http://dx.doi.org/10.1109/18.30980},
   Abstract = {A multilevel approach to the design of dc-free line codes is
             presented. The advantages of codes designed by this method
             over similar codes presented by Ferreira and Blaum are the
             improved run length/ accumulated charge parameters, higher
             transmission rate, and the systematic nature of the code
             construction. The multilevel structure also allows the use
             of suboptimum multistage decoding procedures. © 1989
             IEEE},
   Doi = {10.1109/18.30980},
   Key = {fds236024}
}

@article{fds235795,
   Author = {Calderbank, R and Mazo, JE},
   Title = {A New Description of Trellis Codes},
   Journal = {IEEE Transactions on Information Theory},
   Volume = {30},
   Number = {6},
   Pages = {784-791},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {1984},
   Month = {January},
   url = {http://dx.doi.org/10.1109/TIT.1984.1056976},
   Abstract = {A trellis code is a “sliding window” method of encoding
             a binary data stream as a sequence of real or complex
             numbers that are input to a noisy transmission channel.
             Ungerboeck has constructed simple trellis codes that provide
             the same noise immunity as is given by increasing the power
             of uncoded transmission by factors ranging from two to four.
             His method is to specify an underlying convolutional code
             and a rule (mapping by set partitioning) that maps the
             output of this code onto a fixed signal constallation. A new
             description of a trellis code is given that combines these
             two steps into one. The new description is analytic rather
             than graphical. Many practical codes can be described very
             simply, and strict bounds on performance can be obtained. A
             method for differential encoding trellis codes is presented
             that was suggested by the authors' representation. © 1984
             IEEE},
   Doi = {10.1109/TIT.1984.1056976},
   Key = {fds235795}
}

@article{fds235776,
   Author = {Goparaju, S and Calderbank, R},
   Title = {A new sub-packetization bound for minimum storage
             regenerating codes},
   Journal = {IEEE International Symposium on Information Theory -
             Proceedings},
   Pages = {1616-1620},
   Publisher = {IEEE},
   Year = {2013},
   Month = {December},
   ISSN = {2157-8095},
   url = {http://dx.doi.org/10.1109/ISIT.2013.6620500},
   Abstract = {Codes for distributed storage systems are often designed to
             sustain failure of multiple storage disks. Specifically, an
             (n, k) MDS code stores k symbols in n disks such that the
             overall system is tolerant to a failure of up to n - k
             disks. However, access to at least k disks is still required
             to repair a single erasure. To reduce repair bandwidth,
             array codes are used where the stored symbols or packets are
             vectors of length ℓ. MDS array codes can potentially
             repair a single erasure using a fraction l/(n - k) of data
             stored in the surviving nodes. We ask the following
             question: for a given (n, k), what is the minimum
             vector-length or sub-packetization factor ℓ required to
             achieve this optimal fraction? For exact recovery of
             systematic disks in an MDS code of low redundancy, i.e. k/n
             > 1/2, the best known explicit codes [1] have a
             sub-packetization factor I which is exponential in k. It has
             been conjectured [2] that for a fixed number of parity
             nodes, it is in fact necessary for ℓ to be exponential in
             k. In this paper, we provide new converse bounds on k for a
             given ℓ We prove that k ≤ ℓ2 for an arbitrary but
             fixed number of parity nodes r = n ™ k. For the practical
             case of 2 parity nodes, we prove a stronger result that k
             ≤ 4ℓ. © 2013 IEEE.},
   Doi = {10.1109/ISIT.2013.6620500},
   Key = {fds235776}
}

@article{fds236031,
   Author = {Calderbank, AR and Coffman, EG and Flatto, L},
   Title = {A Note Extending the Analysis of Two-Head Disk Systems to
             More General Seek-Time Characteristics},
   Journal = {IEEE Transactions on Computers},
   Volume = {38},
   Number = {11},
   Pages = {1584-1586},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {1989},
   Month = {January},
   url = {http://dx.doi.org/10.1109/12.42130},
   Abstract = {We analyze a model of a movable-head disk system with two
             read/write heads maintained a fixed distance d apart on each
             arm. Successive request-addresses are assumed to be
             independent random variables, uniformly distributed over the
             set of cylinders. The purpose of an earlier analysis was to
             find that value of d which minimizes the expected seek time
             per request, assuming that seek time varies linearly with
             the distance z traveled by the heads. In this note, we
             extend this analysis to more general seek-time
             characteristics which take into account nonlinear
             acceleration effects. Detailed results, combining both
             analysis and simulation experiments, are presented for seek
             times linear in zα 0  α  1. An unexpected result of
             the study was that the value of d which minimizes expected
             seek time is very nearly independent of α. © 1989
             IEEE},
   Doi = {10.1109/12.42130},
   Key = {fds236031}
}

@article{fds236079,
   Author = {Qureshi, TR and Zoltowski, MD and Calderbank, R},
   Title = {A novel approach to Doppler compensation and estimation for
             multiple targets in MIMO radar with unitary waveform matrix
             scheduling},
   Journal = {ICASSP, IEEE International Conference on Acoustics, Speech
             and Signal Processing - Proceedings},
   Pages = {2473-2476},
   Publisher = {IEEE},
   Year = {2012},
   Month = {October},
   ISSN = {1520-6149},
   url = {http://dx.doi.org/10.1109/ICASSP.2012.6288417},
   Abstract = {In this paper, we present a method of detecting the range
             and Doppler phase of a point target using multiple antennas.
             As a key illustrative example, we consider a 4 x 4 system
             employing a unitary matrix waveform set, e.g., formed from
             Golay complementary sequences. When a non-negligible Doppler
             shift is induced by the target motion, the waveform matrix
             formed from the complementary sequences is no longer
             unitary, resulting in significantly degraded target range
             estimates. To solve this problem, we adopt a subspace based
             approach exploiting the observation that the receive matrix
             formed from matched filtering of the reflected waveforms has
             a (non-trivial) null-space. Through processing of the
             waveforms with the appropriate vector from the null-space,
             we can significantly improve the range detection
             performance. Also, another very important target aspect is
             the velocity with which the target is moving, and to
             determine that, the exact Doppler phase shift induced by the
             target motion needs to be estimated with reasonable
             accuracy. To accomplish this task, we develop a strategy
             that uses the MUSIC algorithm to estimate the Doppler phase,
             and we use simulations to show that the phase estimates
             obtained are reasonably accurate even at low SNRs. © 2012
             IEEE.},
   Doi = {10.1109/ICASSP.2012.6288417},
   Key = {fds236079}
}

@article{fds335327,
   Author = {Hashemi, J and Campbell, K and Carpenter, K and Harris, A and Qiu, Q and Tepper, M and Espinosa, S and Schaich Borg and J and Marsan, S and Calderbank, R and Baker, J and Egger, HL and Dawson, G and Sapiro,
             G},
   Title = {A scalable app for measuring autism risk behaviors in young
             children: A technical validity and feasibility
             study},
   Journal = {Proceedings of the 5th EAI International Conference on
             Wireless Mobile Communication and Healthcare},
   Pages = {23-27},
   Publisher = {ICST},
   Year = {2015},
   url = {http://dx.doi.org/10.4108/eai.14-10-2015.2261939},
   Abstract = {In spite of recent advances in the genetics and neuroscience
             of early childhood mental health, behavioral observation is
             still the gold standard in screening, diagnosis, and outcome
             assessment. Unfortunately, clinical observation is often
             sub-jective, needs significant rater training, does not
             capture data from participants in their natural environment,
             and is not scalable for use in large populations or for
             longitu-dinal monitoring. To address these challenges, we
             devel-oped and tested a self-contained app designed to
             measure toddlers' social communication behaviors in a
             primary care, school, or home setting. Twenty 16-30 month
             old children with and without autism participated in this
             study. Tod-dlers watched the developmentally-Appropriate
             visual stim-uli on an iPad in a pediatric clinic and in our
             lab while the iPad camera simultaneously recorded video of
             the child's behaviors. Automated computer vision algorithms
             coded emotions and social referencing to quantify autism
             risk be-haviors. We validated our automatic computer coding
             by comparing the computer-generated analysis of facial
             expres-sion and social referencing to human coding of these
             behav-iors. We report our method and propose the development
             and testing of measures of young children's behaviors as the
             first step toward development of a novel, fully integrated,
             low-cost, scalable screening tool for autism and other
             neu-rodevelopmental disorders of early childhood.},
   Doi = {10.4108/eai.14-10-2015.2261939},
   Key = {fds335327}
}

@article{fds235942,
   Author = {Sirianunpiboon, S and Howard, SD and Calderbank,
             AR},
   Title = {A scheme for fully polarimetric MIMO multiuser
             detection},
   Journal = {Conference Record - Asilomar Conference on Signals, Systems
             and Computers},
   Pages = {1461-1465},
   Publisher = {IEEE},
   Year = {2009},
   Month = {December},
   ISSN = {1058-6393},
   url = {http://dx.doi.org/10.1109/ACSSC.2009.5470081},
   Abstract = {Multiple-Input Multiple-Output (MIMO) functionality has been
             shown to dramatically increase the capacity of wireless
             communication systems when the environment provides rich
             multipath scattering. In a predominantly Line-of-Sight (LOS)
             environment, the loss of diversity reduces the potential
             gain considerably. Recent studies have shown that systems
             which use a fully -polarimetric antenna such as triad
             antenna at the both end of the link can provides stability
             in performance across a full range of propagation
             environments from LOS to pure Rayleigh scattering. These
             systems also provide resilience to arbitrary rotations of
             the transmit and receive antennas. This paper considers the
             multiuser downlink where both the base station and each user
             is equipped with a triad antenna. The system uses a CDMA
             transmitting scheme in which each user is assigned a two
             dimensional subspace and the transmitted information symbol
             for each user is coded across this subspace. The received
             signal for each user after despreading is equivalent to the
             decoding problem of a single user with space-time block code
             (STBC) system. We demonstrate that the transmitted
             information symbols can be chosen to have the structure of
             STBC with full rate, full-diversity and low complexity
             decoding. We also show how to generalize our basic MIMO CDMA
             scheme to apply to an inhomogeneous multiuser scenario where
             the base station has two transmit antennas, but each user is
             equipped with either one or two receive antennas and wish to
             use different STBCs. © 2009 IEEE.},
   Doi = {10.1109/ACSSC.2009.5470081},
   Key = {fds235942}
}

@article{fds235885,
   Author = {Howard, SD and Calderbank, AR and Moran, W},
   Title = {A simple signal processing architecture for instantaneous
             radar polarimetry},
   Journal = {IEEE Transactions on Information Theory},
   Volume = {53},
   Number = {4},
   Pages = {1282-1289},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2007},
   Month = {April},
   ISSN = {0018-9448},
   url = {http://dx.doi.org/10.1109/TIT.2007.892809},
   Abstract = {This paper describes a new radar primitive that enables
             instantaneous radar polarimetry at essentially no increase
             in signal processing complexity. This primitive coordinates
             transmission of distinct waveforms on orthogonal
             polarizations and applies a unitary matched filter bank on
             receive. This avoids the information loss inherent in
             single-channel matched filters. A further advantage of this
             scheme is the elimination of range sidelobes. © 2007
             IEEE.},
   Doi = {10.1109/TIT.2007.892809},
   Key = {fds235885}
}

@article{fds236064,
   Author = {Naguib, AF and Tarokh, V and Seshadri, N and Calderbank,
             AR},
   Title = {A space-time coding modem for high-data-rate wireless
             communications},
   Journal = {IEEE Journal on Selected Areas in Communications},
   Volume = {16},
   Number = {8},
   Pages = {1459-1477},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {1998},
   Month = {October},
   ISSN = {0733-8716},
   url = {http://dx.doi.org/10.1109/49.730454},
   Abstract = {This paper presents the theory and practice of a new
             advanced modem technology suitable for high-data-rate
             wireless communications and presents its performance over a
             frequency-flat Rayleigh fading channel. The new technology
             is based on space-time coded modulation (STCM) [1]-[5] with
             multiple transmit and/or multiple receive antennas and
             orthogonal pilot sequence insertion (O-PSI). In this
             approach, data is encoded by a space-time (ST) channel
             encoder and the output of the encoder is split into N
             streams to be simultaneously transmitted using N transmit
             antennas. The transmitter inserts periodic orthogonal pilot
             sequences in each of the simultaneously transmitted bursts.
             The receiver uses those pilot sequences to estimate the
             fading channel. When combined with an appropriately designed
             interpolation filter, accurate channel state information
             (CSI) can be estimated for the decoding process. Simulation
             results of the proposed modem, as applied to the IS-136
             cellular standard, are presented. We present the frame error
             rate (FER) performance results as a function of the
             signal-to-noise ratio (SNR) and the maximum Doppler
             frequency, in the presence of timing and frequency offset
             errors. Simulation results show that for 10% FER, a 32-state
             eight-phase-shift keyed (8-PSK) ST code with two transmit
             and two receive antennas can support data rates up to 55.8
             kb/s on a 30-kHz channel, at an SNR of 11.7 dB and a maximum
             Doppler frequency of 180 Hz. Simulation results for other
             codes and other channel conditions are also provided. We
             also compare the performance of the proposed STCM scheme
             with delay-diversity schemes and conclude that STCM can
             provide significant SNR improvement over simple delay
             diversity.},
   Doi = {10.1109/49.730454},
   Key = {fds236064}
}

@article{fds236030,
   Author = {Calderbank, AR and Delsarte, P and Sloane, NJA},
   Title = {A strengthening of the Assmus-Mattson Theorem},
   Pages = {41},
   Year = {1990},
   Month = {December},
   Abstract = {Summary form only given. Let w1 = d, w2, ..., ws be the
             weights of the nonzero code words in a binary linear [n, k,
             d] code C, and let w1′, w2′, ..., ws′ be the nonzero
             weights in the dual code C⊥. Let t be an integer in the
             range 0 < t < d such that there are at most d - t weights
             wi′ with 0 < wi′ ≤ n - t. Assmus and Mattson proved
             that the words of any weight wi in C form a t-design. Let δ
             = 0 or 1, according to whether C is even or not, and let B
             denote the set of code words of weight d. The present
             authors have proved that if w2 ≥ d + 4, then either (1) t
             = 1, d is odd, and B partitions {1, 2, ..., n}, or (2) B is
             a (t + δ + 1)-design, or (3) B is a {1, ..., t + δ, t + δ
             + 2}-design. If C is a self-orthogonal binary code with all
             weights divisible by 4, then the result extends to code
             words of any given weight. The special case of code words of
             minimal weight in extremal self-dual codes also follows from
             a theorem of Venkov and Koch.},
   Key = {fds236030}
}

@article{fds235802,
   Author = {Calderbank, AR and Sloanc, NJ},
   Title = {A Strengthening of the Assmus-Mattson Theorem},
   Journal = {IEEE Transactions on Information Theory},
   Volume = {37},
   Number = {5},
   Pages = {1261-1268},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {1991},
   Month = {January},
   url = {http://dx.doi.org/10.1109/18.133244},
   Abstract = {Let W1 = d, w2,…, wsbe the weights of the nonzero
             codewords in a binary linear [n, k, d] code C, and let w'1,
             w'2,…, w's' be the nonzero weights in the dual code CT.
             Let t be an integer in the range 0 < t < d such that there
             are at most d — t weights w'iwith 0 < w'i ≤ n — t.
             Assmus and Mattson proved that the words of any weight wi in
             C form a t-design. We show that if w2≤d + 4 then either
             the words of any nonzero weight wi form a (t+1)-design or
             else the codewords of minimal weight d form a {1,2,…, t,
             t+2}-design. If in addition C is self-dual with all weights
             divisible by 4 then the codewords of any given weight wiform
             either a (t + 1)-design or a {1,2,…, t, t + 2}-design. The
             special case of this result for codewords of minimal weight
             in an extremal self-dual code with all weights divisible by
             4 also follows from a theorem of Venkov and Koch; however
             our proof avoids the use of modular forms. © 1991
             IEEE},
   Doi = {10.1109/18.133244},
   Key = {fds235802}
}

@article{fds235937,
   Author = {Calderbank, R and Howard, S and Jafarpour, S},
   Title = {A sublinear algorithm for sparse reconstruction with
             ℓ2/ ℓ2 recovery
             guarantees},
   Journal = {CAMSAP 2009 - 2009 3rd IEEE International Workshop on
             Computational Advances in Multi-Sensor Adaptive
             Processing},
   Pages = {209-212},
   Publisher = {IEEE},
   Year = {2009},
   Month = {December},
   url = {http://dx.doi.org/10.1109/CAMSAP.2009.5413298},
   Abstract = {Compressed Sensing aims to capture attributes of a sparse
             signal using very few measurements. Candès and Tao showed
             that sparse reconstruction is possible if the sensing matrix
             acts as a near isometry on all k-sparse signals. This
             property holds with overwhelming probability if the entries
             of the matrix are generated by an iid Gaussian or Bernoulli
             process. There has been significant recent interest in an
             alternative signal processing framework; exploiting
             deterministic sensing matrices that with overwhelming
             probability act as a near isometry on k-sparse vectors with
             uniformly random support, a geometric condition that is
             called the Statistical Restricted Isometry Property or
             StRIP. This paper considers a family of deterministic
             sensing matrices satisfying the StRIP that are based on
             Delsarte-Goethals Codes codes (binary chirps) and a k-sparse
             reconstruction algorithm with sublinear complexity. In the
             presence of stochastic noise in the data domain, this paper
             derives bounds on the ℓ2 accuracy of approximation in
             terms of the ℓ2 norm of the measurement noise and the
             accuracy of the best k-sparse approximation, also measured
             in the ℓ2 norm. This type of ℓ2/ℓ2 bound is tighter
             than the standard ℓ2/ℓ1 or ℓ1/ℓ1 bounds. © 2009
             IEEE.},
   Doi = {10.1109/CAMSAP.2009.5413298},
   Key = {fds235937}
}

@article{fds326906,
   Author = {Calderbank, R},
   Title = {Abuse and Disabled People: Vulnerability or social
             indifference?},
   Journal = {Disability & Society},
   Volume = {15},
   Number = {3},
   Pages = {521-534},
   Publisher = {Informa UK Limited},
   Year = {2000},
   Month = {May},
   url = {http://dx.doi.org/10.1080/713661966},
   Doi = {10.1080/713661966},
   Key = {fds326906}
}

@article{fds235887,
   Author = {Sira, SP and Cochran, D and Papandreou-Suppappola, A and Morrell, D and Moran, W and Howard, SD and Calderbank, R},
   Title = {Adaptive waveform design for improved detection of low-RCS
             targets in heavy sea clutter},
   Journal = {IEEE Journal on Selected Topics in Signal
             Processing},
   Volume = {1},
   Number = {1},
   Pages = {56-66},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2007},
   Month = {June},
   ISSN = {1932-4553},
   url = {http://dx.doi.org/10.1109/JSTSP.2007.897048},
   Abstract = {The dynamic adaptation of waveforms for transmission by
             active radar has been facilitated by the availability of
             waveform-agile sensors. In this paper, we propose a method
             to employ waveform agility to improve the detection of low
             radar-cross section (RCS) targets on the ocean surface that
             present low signal-to-clutter ratios due to high sea states
             and low grazing angles. Employing the expectation-maximization
             algorithm to estimate the time-varying parameters for
             compound-Gaussian sea clutter, we develop a generalized
             likelihood ratio test (GLRT) detector and identify a range
             bin of interest. The clutter estimates are then used to
             dynamically design a phase-modulated waveform that minimizes
             the out-of-bin clutter contributions to this range bin. A
             simulation based on parameters derived from real sea clutter
             data demonstrates that our approach provides around 10 dB
             improvement in detection performance over a nonadaptive
             system. © 2007 IEEE.},
   Doi = {10.1109/JSTSP.2007.897048},
   Key = {fds235887}
}

@article{fds235842,
   Author = {Diggavi, SN and Al-Dhahir, N and Calderbank, AR},
   Title = {Algebraic properties of space-time block codes in
             intersymbol interference multiple-access
             channels},
   Journal = {IEEE Transactions on Information Theory},
   Volume = {49},
   Number = {10},
   Pages = {2403-2414},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2003},
   Month = {October},
   url = {http://dx.doi.org/10.1109/TIT.2003.817833},
   Abstract = {In this paper, we study the multiple-access channel where
             users employ space-time block codes (STBC). The problem is
             formulated in the context of an intersymbol interference
             (ISI) multiple-access channel which occurs for transmission
             over frequency-selective channels. The algebraic structure
             of the STBC is utilized to design joint interference
             suppression, equalization, and decoding schemes. Each of the
             K users transmits using Mt = 2 transmit antennas and a
             time-reversed STBC suitable for frequency-selective
             channels. We first show that a diversity order of 2Mr (v +
             1) is achievable at full transmission rate for each user,
             when we have Mr receive antennas, channel memory of v, and
             an optimal multiuser maximum-likelihood (ML) decoder is
             used. Due to the decoding complexity of the ML detector we
             study the algebraic structure of linear multiuser detectors
             which utilize the properties of the STBC. We do this both in
             the transform (D-domain) formulation and when we impose
             finite block-length constraints (matrix formulation). The
             receiver is designed to utilize the algebraic structure of
             the codes in order to preserve the block quaternionic
             structure of the equivalent channel for each user. We also
             explore some algebraic properties of D-domain quaternionic
             matrices and of quaternionic circulant block matrices that
             arise in this study.},
   Doi = {10.1109/TIT.2003.817833},
   Key = {fds235842}
}

@article{fds235799,
   Author = {Calderbank, AR and Sloane, NJA},
   Title = {An Eight-Dimensional Trellis Code},
   Journal = {Proceedings of the IEEE},
   Volume = {74},
   Number = {5},
   Pages = {757-759},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {1986},
   Month = {January},
   url = {http://dx.doi.org/10.1109/PROC.1986.13542},
   Abstract = {An 8-state trellis code is described that uses a signal
             constellation from the 8-dimensional Gosset lattice E8 It
             can be used for example to transmit data at 9.6, 14.4, and
             19.2 kbits/s with a nominal coding gain of close to 6 dB. ©
             1986 IEEE},
   Doi = {10.1109/PROC.1986.13542},
   Key = {fds235799}
}

@article{fds236012,
   Author = {Brouwer, AE and Calderbank, AR},
   Title = {An Erdös-Ko-Rado theorem for regular intersecting families
             of octads},
   Journal = {Graphs and Combinatorics},
   Volume = {2},
   Number = {1},
   Pages = {309-316},
   Publisher = {Springer Nature},
   Year = {1986},
   Month = {December},
   ISSN = {0911-0119},
   url = {http://dx.doi.org/10.1007/BF01788105},
   Abstract = {Codewords of weight 8 in the [24, 12] binary Golay code are
             called octads. A family ℱ of octads is said to be a
             regular intersecting family if ℱ is a 1-design and |x ∩
             y| ≠ 0 for all x, y ∈ ℱ. We prove that if ℱ is a
             regular intersecting family of octads then |ℱ| ≤ 69.
             Equality holds if and only if ℱ is a quasi-symmetric
             2-(24, 8, 7) design. We then apply techniques from coding
             theory to prove nonexistence of this extremal configuration.
             © 1986 Springer-Verlag.},
   Doi = {10.1007/BF01788105},
   Key = {fds236012}
}

@article{fds303197,
   Author = {Goparaju, S and Tamo, I and Calderbank, R},
   Title = {An improved sub-packetization bound for minimum storage
             regenerating codes},
   Journal = {IEEE Transactions on Information Theory},
   Volume = {60},
   Number = {5},
   Pages = {2770-2779},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2014},
   Month = {January},
   url = {http://arxiv.org/abs/1305.3498v1},
   Abstract = {Distributed storage systems employ codes to provide
             resilience to failure of multiple storage disks. In
             particular, an (n, κ) maximum distance separable (MDS) code
             stores k symbols in n disks such that the overall system is
             tolerant to a failure of up to n ? k disks. However, access
             to at least k disks is still required to repair a single
             erasure. To reduce repair bandwidth, array codes are used
             where the stored symbols or packets are vectors of length
             The MDS array codes have the potential to repair a single
             erasure using a fraction 1/(n ?κ) of data stored in the
             remaining disks. We introduce new methods of analysis, which
             capitalize on the translation of the storage system problem
             into a geometric problem on a set of operators and
             subspaces. In particular, we ask the following question: for
             a given (n, κ), what is the minimum vector-length or
             subpacketization factor - required to achieve this optimal
             fraction? For exact recovery of systematic disks in an MDS
             code of low redundancy, i.e., κ/n > 1/2, the best known
             explicit codes have a subpacketization factor , which is
             exponential in k. It has been conjectured that for a fixed
             number of parity nodes, it is in fact necessary for to be
             exponential in k. In this paper, we provide a new
             log-squared converse bound on k for a given -, and prove
             that k ≤ 2 log2 (logδ +1), for an arbitrary number of
             parity nodes r = n ? k, where δ = r/(r ? 1). © 1963-2012
             IEEE.},
   Doi = {10.1109/TIT.2014.2309000},
   Key = {fds303197}
}

@article{fds235818,
   Author = {Vijay Kumar and PV and Helleseth, T and Calderbank,
             AR},
   Title = {An upper bound for some exponential sums over Galois rings
             and applications},
   Journal = {IEEE International Symposium on Information Theory -
             Proceedings},
   Pages = {70},
   Publisher = {IEEE},
   Year = {1994},
   Month = {January},
   url = {http://dx.doi.org/10.1109/ISIT.1994.394900},
   Abstract = {An upper bound for Weil-type exponential sums over Galois
             rings is presented together with some examples where the
             bound is tight. The bound may be regarded as the Galois-ring
             analogue of the well-known Weil-Carlitz-Uchiyama bound for
             exponential sums over finite fields. An application of the
             bound to the design of large families of eight-phase
             sequences having low correlation is also given. © 1994
             IEEE.},
   Doi = {10.1109/ISIT.1994.394900},
   Key = {fds235818}
}

@article{fds236042,
   Author = {Kumar, PV and Helleseth, T and Calderbank, AR},
   Title = {An Upper Bound for Weil Exponential Sums over Galois Rings
             and Applications},
   Journal = {IEEE Transactions on Information Theory},
   Volume = {41},
   Number = {2},
   Pages = {456-468},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {1995},
   Month = {January},
   url = {http://dx.doi.org/10.1109/18.370147},
   Abstract = {We present an analog of the well-known Weil-Carlitz-Uchiyama
             upper bound for exponential sums over finite fields for
             exponential sums over Galois rings. Some examples are given
             where the hound is tight. The bound has immediate
             application to the design of large families of
             phase-shift-keying sequences having low correlation and an
             alphabet of size pe, p prime, e ≥ 2. Some new
             constructions of eight-phase sequences are provided. © 1995
             IEEE},
   Doi = {10.1109/18.370147},
   Key = {fds236042}
}

@article{fds236072,
   Author = {Suvorova, S and Moron, B and Howard, S and Calderbank,
             R},
   Title = {Application of Doppler resilient complementary waveforms to
             target tracking},
   Journal = {ICASSP, IEEE International Conference on Acoustics, Speech
             and Signal Processing - Proceedings},
   Pages = {1497-1500},
   Publisher = {IEEE},
   Year = {2008},
   Month = {September},
   ISSN = {1520-6149},
   url = {http://dx.doi.org/10.1109/ICASSP.2008.4517905},
   Abstract = {The use of complementary codes as a means of reducing radar
             range sidelobes is well-known, but lack of resilience to
             Doppler is often cited as a reason not to deploy them. This
             work describes techniques for providing Doppler resilience
             with an emphasis on tailoring Doppler performance to the
             specific aim of target tracking. The Doppler performance can
             be varied by suitably changing the order of transmission of
             multiple sets of complementary waveforms. We have developed
             a method that improves Doppler performance significantly by
             arranging the transmission of multiple copies of
             complementary waveforms according to the first order
             Reed-Müller codes. Here we demonstrate significant tracking
             gains in the context of accelerating targets by the use of
             adaptively chosen waveform sequences of this kind, compared
             to both a fixed sequence of similar waveforms, and an LFM
             waveform. ©2008 IEEE.},
   Doi = {10.1109/ICASSP.2008.4517905},
   Key = {fds236072}
}

@article{fds235890,
   Author = {Thangaraj, A and Dihidar, S and Calderbank, AR and McLaughlin, SW and Merolla, JM},
   Title = {Applications of LDPC codes to the wiretap
             channel},
   Journal = {IEEE Transactions on Information Theory},
   Volume = {53},
   Number = {8},
   Pages = {2933-2945},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2007},
   Month = {August},
   ISSN = {0018-9448},
   url = {http://dx.doi.org/10.1109/TIT.2007.901143},
   Abstract = {With the advent of quantum key distribution (QKD) systems,
             perfect (i.e., information-theoretic) security can now be
             achieved for distribution of a cryptographic key. QKD
             systems and similar protocols use classical error-correcting
             codes for both error correction (for the honest parties to
             correct errors) and privacy amplification (to make an
             eavesdropper fully ignorant). From a coding perspective, a
             good model that corresponds to such a setting is the wire
             tap channel introduced by Wyner in 1975. In this
             correspondence, we study fundamental limits and coding
             methods for wire tap channels. We provide an alternative
             view of the proof for secrecy capacity of wire tap channels
             and show how capacity achieving codes can be used to achieve
             the secrecy capacity for any wiretap channel. We also
             consider binary erasure channel and binary symmetric channel
             special cases for the wiretap channel and propose specific
             practical codes. In some cases our designs achieve the
             secrecy capacity and in others the codes provide security at
             rates below secrecy capacity. For the special case of a
             noiseless main channel and binary erasure channel, we
             consider encoder and decoder design for codes achieving
             secrecy on the wiretap channel; we show that it is possible
             to construct linear-time decodable secrecy codes based on
             low-density parity-check (LDPC) codes that achieve secrecy.
             © 2007 IEEE.},
   Doi = {10.1109/TIT.2007.901143},
   Key = {fds235890}
}

@article{fds236063,
   Author = {Naguib, AF and Seshadri, N and Calderbank, AR},
   Title = {Applications of space-time block codes and interference
             suppression for high capacity and high data rate wireless
             systems},
   Journal = {Conference Record of the Asilomar Conference on Signals,
             Systems and Computers},
   Volume = {2},
   Pages = {1803-1810},
   Year = {1998},
   Month = {December},
   Abstract = {This paper presents a combined interference suppression and
             ML decoding scheme for space-time block codes that can
             effectively suppress interference from other co-channel
             users while providing each user with a diversity benefit. We
             consider a multiuser environment with K synchronous
             co-channel users, each is equipped with N transmit antennas
             and uses the space-time block coding. By exploiting the
             temporal and spatial structure of these codes, we develop a
             minimum mean-squared error (MMSE) interference suppression
             technique. Assuming that the receiver uses M≥K receive
             antennas, these technique will perfectly suppress the
             interference from the K-1 co-channel space-time users and
             provide a diversity order of N×(M-K+1) to each of the K
             users. Moreover, this MMSE solution tends itself to an
             adaptive implementation and does not require any explicit
             knowledge about the interference. In conjunction with this
             interference suppression technique, we show how space-time
             block codes can be used to increasing the capacity and/or
             data rate of wireless communication systems.},
   Key = {fds236063}
}

@article{fds235796,
   Author = {Calderbank, AR and Mazo, JE and Wei, VK},
   Title = {ASYMPTOTIC UPPER BOUNDS ON THE MINIMUM DISTANCE OF TRELLIS
             CODES.},
   Journal = {IEEE Transactions on Communications},
   Volume = {COM-33},
   Number = {4},
   Pages = {305-309},
   Year = {1985},
   Month = {January},
   url = {http://dx.doi.org/10.1109/tcom.1985.1096299},
   Abstract = {A trellis code is a 'sliding window' method of encoding a
             binary data stream as a sequence of signal points. When a
             trellis code is used to encode data at the rate of k
             bits/channel symbol, each channel input depends not only on
             the most recent block of k bits to enter the encoder, but
             will also depend on a set of upsilon bits preceding this
             block. The upsilon bits determine the state of the encoder
             and the most recent block of k bits generates the channel
             symbol conditional on the encoder state. The performance of
             a trellis code depends on a suitably defined minimum
             distance property of that code. This paper obtained upper
             bounds on this minimum distance that are better than any
             previously known.},
   Doi = {10.1109/tcom.1985.1096299},
   Key = {fds235796}
}

@article{fds236003,
   Author = {Applebaum, L and Bajwa, WU and Duarte, MF and Calderbank,
             R},
   Title = {Asynchronous code-division random access using convex
             optimization},
   Journal = {Physical Communication},
   Volume = {5},
   Number = {2},
   Pages = {129-147},
   Publisher = {Elsevier BV},
   Year = {2012},
   Month = {June},
   ISSN = {1874-4907},
   url = {http://dx.doi.org/10.1016/j.phycom.2011.09.006},
   Abstract = {Many applications in cellular systems and sensor networks
             involve a random subset of a large number of users
             asynchronously reporting activity to a base station. This
             paper examines the problem of multiuser detection (MUD) in
             random access channels for such applications. Traditional
             orthogonal signaling ignores the random nature of user
             activity in this problem and limits the total number of
             users to be on the order of the number of signal space
             dimensions. Contention-based schemes, on the other hand,
             suffer from delays caused by colliding transmissions and the
             hidden node problem. In contrast, this paper presents a
             novel pairing of an asynchronous non-orthogonal
             code-division random access scheme with a convex
             optimization-based MUD algorithm that overcomes the issues
             associated with orthogonal signaling and contention-based
             methods. Two key distinguishing features of the proposed MUD
             algorithm are that it does not require knowledge of the
             delay or channel state information of every user and it has
             polynomial-time computational complexity. The main
             analytical contribution of this paper is the relationship
             between the performance of the proposed MUD algorithm in the
             presence of arbitrary or random delays and two simple
             metrics of the set of user codewords. The study of these
             metrics is then focused on two specific sets of codewords,
             random binary codewords and specially constructed algebraic
             codewords, for asynchronous random access. The ensuing
             analysis confirms that the proposed scheme together with
             either of these two codeword sets significantly outperforms
             the orthogonal signaling-based random access in terms of the
             total number of users in the system. © 2011 Elsevier
             B.V.},
   Doi = {10.1016/j.phycom.2011.09.006},
   Key = {fds236003}
}

@article{fds235806,
   Author = {Calderbank, AR},
   Title = {Balanced Codes and Nonequiprobable Signaling},
   Journal = {IEEE Transactions on Information Theory},
   Volume = {38},
   Number = {3},
   Pages = {1119-1122},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {1992},
   Month = {January},
   url = {http://dx.doi.org/10.1109/18.135651},
   Abstract = {The problem of shaping signal constellations that are
             designed for the Gaussian channel is considered. The signal
             constellation consists of all points from some translate of
             a lattice A, that lie within a region $. The signal
             constellation is partitioned into T annular
             subconstellations Q<inf>0</inf>, • • •,
             0<inf>r</inf>_! by scaling the region 0L. Signal points in
             the same subconstellation are used equiprobably, and a
             shaping code selects region fi, with frequency fi. If the
             signal constellation is partitioned into annular
             subconstellations of unequal size, then absent some
             cleverness, the transmission rate will vary with the choice
             of codeword in the shaping code, and it will be necessary to
             queue the data in buffers. It is described how balanced
             binary codes constructed by Knuth can be used to avoid a
             data rate that is probabilistic. The basic idea is that if
             symbols 0 and 1 represent constellations of unequal size,
             and if all shaping codewords have equally many 0’s and
             l’s, then the data rate will be deterministic. © 1992
             IEEE},
   Doi = {10.1109/18.135651},
   Key = {fds235806}
}

@article{fds236028,
   Author = {Calderbank, AR and Mazo, JE},
   Title = {Baseband Line Codes Via Spectral Factorization},
   Journal = {IEEE Journal on Selected Areas in Communications},
   Volume = {7},
   Number = {6},
   Pages = {914-928},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {1989},
   Month = {January},
   url = {http://dx.doi.org/10.1109/49.29614},
   Abstract = {We describe a method of designing baseband line codes with
             prescribed spectral nulls in the transmitted spectrum. These
             codes have the important property that the transmitted power
             is adjustable (with a concomitant change in spectral shape,
             i.e., null width) and can be made arbitrarily close to the
             innovations power, while keeping the minimum distance
             between signal points (or sequences) constant. The method is
             a generalization of the approach introduced by Forney and
             Calderbank [7] that discusses a spectral null at dc. We
             apply results from linear prediction theory; the essential
             design step requires the spectral factorization of a certain
             trigonometric polynomial. The line code that results can
             easily be used in conjunction with a large class of trellis
             coded modulation schemes (just as in [7]). In the second
             part of the paper, we construct specific baseband codes
             using a representation of the general theory that involves a
             dither variable, which is used to create integer symbols and
             to minimize the size of the symbol alphabet. We emphasize
             the design of line codes with a double null at dc employing
             the symbol alphabet {±1, ±3}. © 1989 IEEE},
   Doi = {10.1109/49.29614},
   Key = {fds236028}
}

@article{fds236023,
   Author = {Calderbank, AR and Mazo, JE and Lee, TA},
   Title = {Baseband Trellis Codes with a Spectral Null at
             Zero},
   Journal = {IEEE Transactions on Information Theory},
   Volume = {34},
   Number = {3},
   Pages = {425-434},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {1988},
   Month = {January},
   url = {http://dx.doi.org/10.1109/18.6023},
   Abstract = {A method is described for modifying classical N-dimensional
             trellis codes to provide baseband codes that combine a
             spectral null at dc with significant coding gain. The
             information rate of the classical code is decreased by one
             bit, and this extra redundancy is used to keep the running
             digital sum bounded. Equivalently, if the rate is held
             constant, then twice as many signal points are needed,
             causing a power penalty of 6/N dB. Baseband trellis codes
             are presented for several information rates together with
             complete spectral plots and performance comparisons. A
             method of constructing baseband codes with multiple spectral
             nulls is also described. © 1988 IEEE},
   Doi = {10.1109/18.6023},
   Key = {fds236023}
}

@article{fds235894,
   Author = {Sirianunpiboon, S and Calderbank, AR and Howard,
             SD},
   Title = {Bayesian analysis of interference cancellation for Alamouti
             multiplexing},
   Journal = {IEEE Transactions on Information Theory},
   Volume = {54},
   Number = {10},
   Pages = {4755-4761},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2008},
   Month = {October},
   ISSN = {0018-9448},
   url = {http://dx.doi.org/10.1109/TIT.2008.929012},
   Abstract = {Space-time codes built out of Alamouti components have been
             adopted in wireless standards such as UMTS, IEEE 802.11n,
             and IEEE 802.16, where they facilitate higher data rates
             through multiplexing of parallel data streams and the
             addition of two or more antennas at the receiver that
             perform interference cancellation. This correspondence
             provides new theoretical insight into different algorithms
             for interference cancellation through a Bayesian analysis
             that expresses performance as a function of signal-to-noise
             ratio (SNR) in terms of the "angles" between different
             space-time coded data streams. © 2008 IEEE.},
   Doi = {10.1109/TIT.2008.929012},
   Key = {fds235894}
}

@article{fds235982,
   Author = {Harms, A and Bajwa, WU and Calderbank, R},
   Title = {Beating Nyquist through correlations: A constrained random
             demodulator for sampling of sparse bandlimited
             signals},
   Journal = {ICASSP, IEEE International Conference on Acoustics, Speech
             and Signal Processing - Proceedings},
   Pages = {5968-5971},
   Publisher = {IEEE},
   Year = {2011},
   Month = {August},
   ISSN = {1520-6149},
   url = {http://dx.doi.org/10.1109/ICASSP.2011.5947721},
   Abstract = {Technological constraints severely limit the rate at which
             analog-to-digital converters can reliably sample signals.
             Recently, Tropp et al. proposed an architecture, termed the
             random demodulator (RD), that attempts to overcome this
             obstacle for sparse bandlimited signals. One integral
             component of the RD architecture is a white noise-like,
             bipolar modulating waveform that changes polarity at a rate
             equal to the signal bandwidth. Since there is a hardware
             limitation to how fast analog waveforms can change polarity
             without undergoing shape distortion, this leads to the RD
             also having a constraint on the maximum allowable bandwidth.
             In this paper, an extension of the RD, termed the
             constrained random demodulator (CRD), is proposed that
             bypasses this bottleneck by replacing the original
             modulating waveform with a run-length limited (RLL)
             modulating waveform that changes polarity at a slower rate
             than the signal bandwidth. One of the main contributions of
             the paper is establishing that the CRD, despite employing a
             modulating waveform with correlations, enjoys some
             theoretical guarantees for certain RLL waveforms. In
             addition, for a given sampling rate and rate of change in
             the modulating waveform polarity, numerical simulations
             confirm that the CRD, using an appropriate RLL waveform, can
             sample a signal with an even wider bandwidth without a
             significant loss in performance. © 2011
             IEEE.},
   Doi = {10.1109/ICASSP.2011.5947721},
   Key = {fds235982}
}

@article{fds236001,
   Author = {Jafarpour, S and Duarte, MF and Calderbank, R},
   Title = {Beyond worst-case reconstruction in deterministic compressed
             sensing},
   Journal = {IEEE International Symposium on Information Theory -
             Proceedings},
   Pages = {1852-1856},
   Publisher = {IEEE},
   Year = {2012},
   Month = {October},
   url = {http://dx.doi.org/10.1109/ISIT.2012.6283601},
   Abstract = {The role of random measurement in compressive sensing is
             analogous to the role of random codes in coding theory. In
             coding theory, decoders that can correct beyond the minimum
             distance of a code allow random codes to achieve the Shannon
             limit. In compressed sensing, the counterpart of minimum
             distance is the spark of the measurement matrix, i.e., the
             size of the smallest set of linearly dependent columns. This
             paper constructs a family of measurement matrices where the
             columns are formed by exponentiating codewords from a
             classical binary error-correcting code of block length M.
             The columns can be partitioned into mutually unbiased bases,
             and the spark of the corresponding measurement matrix is
             shown to be O(√M) by identifying a configuration of
             columns that plays a role similar to that of the Dirac comb
             in classical Fourier analysis. Further, an explicit basis
             for the null space of these measurement matrices is given in
             terms of indicator functions of binary self-dual codes.
             Reliable reconstruction of k-sparse inputs is shown for k of
             order M/log(M) which is best possible and far beyond the
             worst case lower bound provided by the spark. © 2012
             IEEE.},
   Doi = {10.1109/ISIT.2012.6283601},
   Key = {fds236001}
}

@article{fds235804,
   Author = {Calderbank, AR and Frankl, P},
   Title = {Binary codes and quasi-symmetric designs},
   Journal = {Discrete Mathematics},
   Volume = {83},
   Number = {2-3},
   Pages = {201-204},
   Publisher = {Elsevier BV},
   Year = {1990},
   Month = {August},
   ISSN = {0012-365X},
   url = {http://dx.doi.org/10.1016/0012-365X(90)90005-3},
   Abstract = {We obtain a new necessary condition for the existence of a
             2-(υ, k, λ) design where the block intersection sizes
             s1,s2,...,sn satisfy s1≡s2≡...≡sn≡s( mod 2). This
             condition eliminates quasi-symmetric 2-(20, 10, 18) and
             2-(60, 30, 58) designs. Quasi-symmetric 2- (20, 8, 14)
             designs are eliminated by an ad hoc coding theoretic
             argument. © 1990.},
   Doi = {10.1016/0012-365X(90)90005-3},
   Key = {fds235804}
}

@article{fds236013,
   Author = {Calderbank, AR and Heegard, C and Lee, TA},
   Title = {Binary Convolutional Codes with Application to Magnetic
             Recording},
   Journal = {IEEE Transactions on Information Theory},
   Volume = {32},
   Number = {6},
   Pages = {797-815},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {1986},
   Month = {January},
   url = {http://dx.doi.org/10.1109/TIT.1986.1057245},
   Abstract = {Calderbank, Heegard, and Ozarow [1] have suggested a method
             of designing codes for channels with intersymbol
             interference, such as the magnetic recording channel. These
             codes are designed to exploit intersymbol interference. The
             standard method is to minimize intersymbol interference by
             constraining the input to the channel using run-length
             limited sequences. Calderbank, Heegard, and Ozarow
             considered an idealized model of an intersymbol interference
             channel that leads to the problem of designing codes for a
             partial response channel with transfer function (1 —
             DN)/2, where the channel inputs are constrained to be ± 1.
             This problem is considered here. Channel inputs are
             generated using a nontrivial coset of a binary convolutional
             code. The coset is chosen to limit the zero-run length of
             the output of the channel and so maintain clock
             synchronization. The minimum squared Euclidean distance
             between outputs corresponding to distinct inputs is bounded
             below by the free distance of a second convolutional code
             which we call the magnitude code. An interesting feature of
             the analysis is that magnitude codes that are catastrophic
             may perform better than those that are noncatastrophic.
             Copyright © 1986 by The Institute of Electrical and
             Electronics Engineers, Inc.},
   Doi = {10.1109/TIT.1986.1057245},
   Key = {fds236013}
}

@article{fds236007,
   Author = {Calderbank, AR and Heegard, C and Lee, TA},
   Title = {BINARY CONVOLUTIONAL CODES WITH APPLICATION TO MAGNETIC
             RECORDING.},
   Pages = {42},
   Year = {1986},
   Month = {December},
   Abstract = {Summary form only given. A. R. Calderbank et. al. have
             suggested a method of designing codes for channels with
             intersymbol interference, such as the magnetic recording
             channel. They considered an idealized model of the magnetic
             recording channel that leads to the problem of designing
             codes for a partial response channel with transfer function
             (1 - D**N)/2 where the channel inputs are constrained to be
             plus or minus 1. This problem is considered here. Channel
             inputs are generated using a nontrivial coset of a binary
             convolution code. The coset is chosen to limit the zero-run
             length of the output of the channel and so maintain clock
             synchronization. The minimum squared Euclidean distance
             between outputs corresponding to distinct inputs is bounded
             below by the free distance of a second convolutional code
             called the magnitude code. An interesting feature of the
             analysis is that magnitude codes that are catastrophic may
             perform better than those that are noncatastrophic.},
   Key = {fds236007}
}

@article{fds235753,
   Author = {Goparaju, S and Calderbank, R},
   Title = {Binary cyclic codes that are locally repairable},
   Journal = {IEEE International Symposium on Information Theory -
             Proceedings},
   Pages = {676-680},
   Publisher = {IEEE},
   Year = {2014},
   Month = {January},
   ISSN = {2157-8095},
   url = {http://dx.doi.org/10.1109/ISIT.2014.6874918},
   Abstract = {Codes for storage systems aim to minimize the repair
             locality, which is the number of disks (or nodes) that
             participate in the repair of a single failed disk.
             Simultaneously, the code must sustain a high rate, operate
             on a small finite field to be practically significant and be
             tolerant to a large number of erasures. To this end, we
             construct new families of binary linear codes that have an
             optimal dimension (rate) for a given minimum distance and
             locality. Specifically, we construct cyclic codes that are
             locally repairable for locality 2 and distances 2, 6 and 10.
             In doing so, we discover new upper bounds on the code
             dimension, and prove the optimality of enabling local repair
             by provisioning disjoint groups of disks. Finally, we extend
             our construction to build codes that have multiple repair
             sets for each disk. © 2014 IEEE.},
   Doi = {10.1109/ISIT.2014.6874918},
   Key = {fds235753}
}

@article{fds235884,
   Author = {Aggarwal, V and Calderbank, AR},
   Title = {Boolean functions, projection operators and quantum error
             correcting codes},
   Journal = {IEEE International Symposium on Information Theory -
             Proceedings},
   Pages = {2091-2095},
   Publisher = {IEEE},
   Year = {2007},
   Month = {December},
   url = {http://dx.doi.org/10.1109/ISIT.2007.4557529},
   Abstract = {This paper describes a common mathematical framework for the
             design of additive and non-additive Quantum Error Correcting
             Codes. It is based on a correspondence between boolean
             functions and projection operators. The new framework
             extends to operator quantum error correcting codes. ©2007
             IEEE.},
   Doi = {10.1109/ISIT.2007.4557529},
   Key = {fds235884}
}

@article{fds235906,
   Author = {Aggarwal, V and Calderbank, AR},
   Title = {Boolean functions, projection operators, and quantum error
             correcting codes},
   Journal = {IEEE Transactions on Information Theory},
   Volume = {54},
   Number = {4},
   Pages = {1700-1707},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2008},
   Month = {April},
   ISSN = {0018-9448},
   url = {http://dx.doi.org/10.1109/TIT.2008.917720},
   Abstract = {This paper describes a fundamental correspondence between
             Boolean functions and projection operators in Hilbert space.
             The correspondence is widely applicable, and it is used in
             this paper to provide a common mathematical framework for
             the design of both additive and nonadditive quantum error
             correcting codes. The new framework leads to the
             construction of a variety of codes including an infinite
             class of codes that extend the original ((5, 6, 2)) code
             found by Rains et al It also extends to operator quantum
             error correcting codes. © 2008 IEEE.},
   Doi = {10.1109/TIT.2008.917720},
   Key = {fds235906}
}

@article{fds235946,
   Author = {Bennatan, A and Aggarwal, V and Wu, Y and Calderbank, AR and Hoydis, J and Chindapol, A},
   Title = {Bounds and lattice-based transmission strategies for the
             phase-faded dirty-paper channel},
   Journal = {IEEE Transactions on Wireless Communications},
   Volume = {8},
   Number = {7},
   Pages = {3620-3627},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2009},
   Month = {July},
   ISSN = {1536-1276},
   url = {http://dx.doi.org/10.1109/TWC.2009.080569},
   Abstract = {We consider a fading version of the dirty-paper problem, as
             proposed by Grover and Sahai. In this formulation, the
             various signals involved are complex-valued, and the
             interference (known only to the transmitter) is multiplied
             by a random complex-valued coefficient, whose phase is known
             only to the receiver. We focus on a compound channel
             formulation, and seek to maximize the worst-case
             performance. We present an achievable strategy modeled on
             the lattice-based approach of Erez, Shamai and Zamir and
             propose heuristic methods to optimize its parameters. We
             also derive an upper bound on the maximum achievable
             transmission rates. Our bounds are shown to be tight in some
             settings, yielding a complete characterization of capacity.
             We also provide simulation results, indicating the practical
             effectiveness of our approaches. © 2009
             IEEE.},
   Doi = {10.1109/TWC.2009.080569},
   Key = {fds235946}
}

@article{fds235896,
   Author = {Bennatan, A and Calderbank, AR and Shamai, S},
   Title = {Bounds on the MMSE of "bad" LDPC codes at rates above
             capacity},
   Journal = {46th Annual Allerton Conference on Communication, Control,
             and Computing},
   Pages = {1065-1072},
   Publisher = {IEEE},
   Year = {2008},
   Month = {December},
   url = {http://dx.doi.org/10.1109/ALLERTON.2008.4797677},
   Abstract = {We present bounds on the minimum mean square error (MMSE) of
             LDPC codes at rates above capacity. One potential
             application for MMSE estimation involves cooperative
             communication. A relay following a compress-and-forward (CF)
             strategy could first compute an estimate of the transmitted
             codeword, to reduce the level of noise in the retransmitted
             signal. Our first bound is based on an analysis of the LDPC
             belief-propagation decoder. A second bound relies on the
             relationship between the mutual information and the MMSE,
             which was discovered by Guo et al.. We compute our bounds
             for "bad" LDPC codes (requiring SNRs that are far above the
             Shannon limit, for reliable communications to be possible)
             and show that such codes substantially outperform "good"
             codes. This advantage of "bad" codes implies an interesting
             degree of freedom in the design of codes for cooperative
             communications. © 2008 IEEE.},
   Doi = {10.1109/ALLERTON.2008.4797677},
   Key = {fds235896}
}

@article{fds235990,
   Author = {Wu, Y and Viswanathan, H and Klein, T and Haner, M and Calderbank,
             R},
   Title = {Capacity optimization in networks with heterogeneous radio
             access technologies},
   Journal = {GLOBECOM - IEEE Global Telecommunications
             Conference},
   Publisher = {IEEE},
   Year = {2011},
   Month = {December},
   url = {http://dx.doi.org/10.1109/GLOCOM.2011.6134226},
   Abstract = {As it becomes common for wireless service providers (WSP) to
             employ multiple heterogeneous radio access technologies
             (RAT), the management of the combined resources across
             multiple RATs arises as an important issue. The WSP's
             objective is to assign different users to the different RATs
             so as to maximize network capacity (or total utility) while
             ensuring that individual users' quality of service (QoS)
             requirements are met. In this paper, we consider this
             resource allocation problem for two scenarios: voice
             communication and video communication. For voice
             communication, we propose a stable and optimal assignment
             scheme based on the deferred acceptance algorithm for both
             static and online cases. For video communication,
             identifying the NP-hardness of the problem, we propose and
             compare a set of heuristic algorithms including a
             low-complexity, high-performance scheme. © 2011
             IEEE.},
   Doi = {10.1109/GLOCOM.2011.6134226},
   Key = {fds235990}
}

@article{fds236039,
   Author = {Seshadri, N and Calderbank, AR and Pottie, GJ},
   Title = {Channel coding for co-channel interference suppression in
             wireless communications},
   Journal = {IEEE International Conference on Communications},
   Volume = {2},
   Pages = {884-888},
   Year = {1995},
   Month = {January},
   Abstract = {Co-channel interference is a major impairment in wireless
             systems with channel re-use. In practice the performance of
             time division multiple access (TDMA) and frequency division
             multiple access (FDMA) systems is limited by a few dominant
             co-channel interferers. We present channel codes that are
             matched to an adaptive linear receiver, so that the
             combination provides interference suppression. It is shown
             that a simple one symbol parity check code is capable of
             suppressing one interferer, a repetition code of length N is
             capable of suppressing N-1 interferers, and a code of K
             information symbols and N channel symbols is capable of
             suppressing N/K interferers. With simple parity check codes
             and repetition codes, the tap setting of an adaptive
             combiner can be configured using a 20-40 symbol training
             sequence. Simulations for multiple cells reveal significant
             capacity improvement is possible.},
   Key = {fds236039}
}

@article{fds236038,
   Author = {Seshadri, N and Calderbank, AR and Pottie, G},
   Title = {Channel coding for cochannel interference suppression in
             wireless communication systems},
   Journal = {IEEE Vehicular Technology Conference},
   Volume = {2},
   Pages = {629-633},
   Year = {1995},
   Month = {January},
   Abstract = {Cochannel interference is a major impairment in cellular
             systems. In practice the performance of time and frequency
             division multiple access (TDMA and FDMA) systems is limited
             by a few dominant cochannel interferers. Conventional
             channel coding techniques treat this interference as noise
             to mitigate its effect. In this work it is shown that
             cochannel interference can be totally suppressed by using a
             adaptive non-linear decoder. We illustrate our ideas using
             codes in real (complex) field as well as codes in finite
             field. The decoding technique can be combined with antenna
             diversity to offer increased interference suppression
             capability.},
   Key = {fds236038}
}

@article{fds236034,
   Author = {Pottie, GJ and Calderbank, AR},
   Title = {Channel coding strategies for cellular radio},
   Journal = {Proceedings of the 1993 IEEE International Symposium on
             Information Theory},
   Pages = {251},
   Year = {1993},
   Month = {January},
   Abstract = {To improve re-use of time/frequency slots in a cellular
             radio system, it is desirable for the average interference
             levels seen by all users to be made approximately equal. We
             provide constructions based on orthogonal Latin squares that
             guarantee different sets of users to interfere in successive
             slots. We illustrate how this may be combined with
             convolutional coding to provide large performance
             improvement with low delay in a slow hopped
             system.},
   Key = {fds236034}
}

@article{fds236047,
   Author = {Pottie, GJ and Calderbank, AR},
   Title = {Channel Coding Strategies for Cellular Radio},
   Journal = {IEEE Transactions on Vehicular Technology},
   Volume = {44},
   Number = {4},
   Pages = {763-770},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {1995},
   Month = {January},
   url = {http://dx.doi.org/10.1109/25.467960},
   Abstract = {To improve re-use of time/frequency slots in a cellular
             radio system, it is desirable for the average interference
             levels seen by all users to be made approximately equal. We
             provide constructions based on orthogonal latin squares that
             guarantee different sets of users will interfere in
             successive slots. When signal to interference ratios are
             independent from successive symbols, channel codes can
             provide a large diversity gain which is far in excess of the
             gain against additive noise. Consequently, coding with
             interleaving fits together very naturally with this
             construction. We illustrate how to achieve large performance
             improvement using convolutional codes with low decoding
             delay in a slow hopped system. © 1995 IEEE},
   Doi = {10.1109/25.467960},
   Key = {fds236047}
}

@article{fds235917,
   Author = {Zoltowski, MD and Qureshi, TR and Calderbank, R},
   Title = {Channel estimation for MIMO-OFDM using complementary
             codes},
   Journal = {RWS 2009 IEEE Radio and Wireless Symposium,
             Proceedings},
   Pages = {159-162},
   Publisher = {IEEE},
   Year = {2009},
   Month = {July},
   url = {http://dx.doi.org/10.1109/RWS.2009.4957309},
   Abstract = {We present a pilot-assisted method for estimating the
             frequency selective channel in a MIMO-OFDM (Multiple Input
             Multiple Output - Orthogonal Frequency Division
             Multiplexing) system. The pilot sequence is designed using
             the DFT (Discrete Fourier Transform) of the Golay
             complementary sequences. Novel exploitation of the perfect
             autocorrelation property of the Golay codes, in conjunction
             with OSTBC (Orthogonal Space-Time Block Code) based pilot
             waveform scheduling across multiple OFDM frames, facilitates
             simple separation of the channel mixtures at the receive
             antennas. The DFT length used to transform the complementary
             sequence into the frequency domain is shown to be a key
             critical parameter for correctly estimating the channel.
             NMSE (Normalized Mean Squared Error) between the actual and
             the estimated channel is used to characterize the estimation
             performance. ©2009 IEEE.},
   Doi = {10.1109/RWS.2009.4957309},
   Key = {fds235917}
}

@article{fds235945,
   Author = {Applebaum, L and Howard, SD and Searle, S and Calderbank,
             R},
   Title = {Chirp sensing codes: Deterministic compressed sensing
             measurements for fast recovery},
   Journal = {Applied and Computational Harmonic Analysis},
   Volume = {26},
   Number = {2},
   Pages = {283-290},
   Publisher = {Elsevier BV},
   Year = {2009},
   Month = {March},
   ISSN = {1063-5203},
   url = {http://dx.doi.org/10.1016/j.acha.2008.08.002},
   Abstract = {Compressed sensing is a novel technique to acquire sparse
             signals with few measurements. Normally, compressed sensing
             uses random projections as measurements. Here we design
             deterministic measurements and an algorithm to accomplish
             signal recovery with computational efficiency. A measurement
             matrix is designed with chirp sequences forming the columns.
             Chirps are used since an efficient method using FFTs can
             recover the parameters of a small superposition. We show
             that this type of matrix is valid as compressed sensing
             measurements. This is done by bounding the eigenvalues of
             sub-matrices, as well as an empirical comparison with random
             projections. Further, by implementing our algorithm,
             simulations show successful recovery of signals with
             sparsity levels similar to those possible by matching
             pursuit with random measurements. For sufficiently sparse
             signals, our algorithm recovers the signal with
             computational complexity O (K log K) for K measurements.
             This is a significant improvement over existing algorithms.
             Crown Copyright © 2008.},
   Doi = {10.1016/j.acha.2008.08.002},
   Key = {fds235945}
}

@article{fds235987,
   Author = {Applebaum, L and Bajwa, WU and Calderbank, R and Howard,
             S},
   Title = {Choir codes: Coding for full duplex interference
             management},
   Journal = {2011 49th Annual Allerton Conference on Communication,
             Control, and Computing, Allerton 2011},
   Pages = {1-8},
   Publisher = {IEEE},
   Year = {2011},
   Month = {December},
   url = {http://dx.doi.org/10.1109/Allerton.2011.6120141},
   Abstract = {Communication networks conventionally operate with
             half-duplex methods and interference avoiding schemes to
             manage multiple transceivers. Here we consider a method in
             which nodes transmit and receive in concert to achieve full
             duplex communication without transmitter coordination. We
             build on a recent framework for full-duplex communication in
             ad-hoc wireless networks recently proposed by Zhang, Luo and
             Guo. An individual node in the wireless network either
             transmits or it listens to transmissions from other nodes
             but it cannot do both at the same time. There might be as
             many nodes as there are MAC addresses but we assume that
             only a small subset of nodes contribute to the superposition
             received at any given node in the network. We develop
             deterministic algebraic coding methods that allow
             simultaneous communication across the entire network. We
             call such codes choir codes. Users are assigned subspaces of
             F 2m to define their transmit and listen times. Codewords on
             these subspaces are designed and proven to adhere to bounds
             on worst-case coherence and the associated matrix spectral
             norm. This in turn provides guarantees for multi-user
             detection using convex optimization. Further, we show that
             matrices for each receiver's listening times can be related
             by permutations, thus guaranteeing fairness between
             receivers. Compared with earlier work using random codes,
             our methods have significant improvements including reduced
             decoding/detection error and non-asymptotic results.
             Simulation results verify that, as a method to manage
             interference, our scheme has significant advantages over
             seeking to eliminate or align interference through extensive
             exchange of fine-grained channel state information. © 2011
             IEEE.},
   Doi = {10.1109/Allerton.2011.6120141},
   Key = {fds235987}
}

@article{fds235962,
   Author = {Wu, Y and Calderbank, R},
   Title = {Circulant space-time codes for integration with
             beamforming},
   Journal = {ICASSP, IEEE International Conference on Acoustics, Speech
             and Signal Processing - Proceedings},
   Pages = {2550-2553},
   Publisher = {IEEE},
   Year = {2010},
   Month = {January},
   ISSN = {1520-6149},
   url = {http://dx.doi.org/10.1109/ICASSP.2010.5496288},
   Abstract = {This paper provides a framework for designing space-time
             codes to take advantage of a small number of feedback bits
             from the receiver. The new codes are based on circulant
             matrices and simple conditions are derived that guarantee
             full rate and full diversity. In the absence of feedback,
             Symbol Error Rate (SER) performance is shown to be similar
             to that of Diagonal Algebraic Space-Time (DAST) codes, both
             for Maximum Likelihood (ML) decoding and for suboptimal
             linear decoding. Decoding complexity of circulant codes is
             similar to the DAST codes and encoding is slightly less
             complex. In the presence of a small number of feedback bits
             from the receiver the circulant construction is shown to
             permit integration of space-time coding with a fixed set of
             beams by simply advancing the phase on one of the antennas.
             This integration is not possible within the DAST framework.
             Integration of space-time codes with beamforming makes it
             possible to achieve ML decoding performance with only linear
             decoding complexity or to improve upon ML performance of the
             original code. ©2010 IEEE.},
   Doi = {10.1109/ICASSP.2010.5496288},
   Key = {fds235962}
}

@article{fds236071,
   Author = {Calderbank, R and Dickinson, B},
   Title = {CISS 2008, The 42nd Annual Conference on Information
             Sciences and Systems: Welcome},
   Journal = {CISS 2008, The 42nd Annual Conference on Information
             Sciences and Systems},
   Pages = {i-ii},
   Publisher = {IEEE},
   Year = {2008},
   Month = {September},
   url = {http://dx.doi.org/10.1109/CISS.2008.4558477},
   Doi = {10.1109/CISS.2008.4558477},
   Key = {fds236071}
}

@article{fds235833,
   Author = {Calderbank, AR and Pottie, G and Seshadri, N},
   Title = {Cochannel interference suppression through time/space
             diversity},
   Journal = {IEEE Transactions on Information Theory},
   Volume = {46},
   Number = {3},
   Pages = {922-932},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2000},
   Month = {May},
   ISSN = {0018-9448},
   url = {http://dx.doi.org/10.1109/18.841171},
   Abstract = {Wireless systems are subject to a time-varying and unknown a
             priori combination of cochannel interference, fading, and
             Gaussian noise. It is well known that multiple antennas can
             provide diversity in space that allows system tradeoffs
             between interference suppression and mitigation of fading.
             This paper describes how to achieve these same tradeoffs
             through diversity in time provided by channel coding. The
             mathematical description of time diversity is identical to
             that of space diversity, and what emerges is a unified
             framework for signal processing. Decoding algorithms are
             provided for repetition codes, rate 1/n convolutional codes,
             first-order Reed-Muller codes, and a new class of linear
             combination codes that provide cochannel interference
             suppression. In all cases it is possible to trade
             performance for complexity by choosing between joint
             estimation and a novel low-complexity linear canceler
             structure that treats interference as noise. This means that
             a single code can be used in a variety of system
             environments just by changing the processing in the
             receiver.},
   Doi = {10.1109/18.841171},
   Key = {fds235833}
}

@article{fds236074,
   Author = {Wu, Y and Calderbank, R},
   Title = {Code diversity in multiple antenna wireless
             communication},
   Journal = {IEEE International Symposium on Information Theory -
             Proceedings},
   Pages = {1078-1082},
   Year = {2008},
   Month = {September},
   url = {http://dx.doi.org/10.1109/ISIT.2008.4595153},
   Abstract = {The standard approach to the design of individual space-time
             codes is based on optimizing diversity and coding gain. This
             geometric approach leads to remarkable examples, such as the
             Golden Code, for which the complexity of Maximum Likelihood
             (ML) decoding is considerable. Code diversity is an
             alternative approach where a small number of feedback bits
             are used to select from a family of space-time codes.
             Feedback can be combined with sub-optimal low complexity
             decoding of the component codes to match ML decoding
             performance of any individual code in the family. It can
             also be combined with ML decoding of the component codes to
             improve performance beyond ML decoding performance of any
             individual code. One method of implementing code diversity
             is the use of feedback to adapt the phase of a transmitted
             signal. Phase adaptation with the 4 × 4 Quasi-Orthogonal
             Space-Time Code (QOSTBC) is shown to be almost information
             lossless; that is, this form of space-time coding does not
             reduce the capacity of the underlying multiple antenna
             wireless channel. Code diversity can also be used to improve
             performance of multi-user detection by reducing interference
             between users. Phase adaptation with two Alamouti users
             makes it possible for the Zero Forcing (ZF) or decorrelating
             detector to match the performance of ML joint detection. ©
             2008 IEEE.},
   Doi = {10.1109/ISIT.2008.4595153},
   Key = {fds236074}
}

@article{fds235950,
   Author = {Wu, Y and Calderbank, R},
   Title = {Code diversity in multiple antenna wireless
             communication},
   Journal = {IEEE Journal on Selected Topics in Signal
             Processing},
   Volume = {3},
   Number = {6},
   Pages = {928-938},
   Year = {2009},
   ISSN = {1932-4553},
   url = {http://dx.doi.org/10.1109/JSTSP.2009.2035861},
   Abstract = {The standard approach to the design of individual space-time
             codes is based on optimizing diversity and coding gains.
             This geometric approach leads to remarkable examples, such
             as perfect space-time block codes (Perfect space-time block
             codes. F. Oggier , Trans. Inf. Theory, vol. 52, no. 9, pp.
             38853902, Sep. 2006), for which the complexity of
             maximum-likelihood (ML) decoding is considerable. Code
             diversity is an alternative and complementary approach where
             a small number of feedback bits are used to select from a
             family of space-time codes. Different codes lead to
             different induced channels at the receiver, where channel
             state information (CSI) is used to instruct the transmitter
             how to choose the code. This method of feedback provides
             gains associated with beamforming while minimizing the
             number of feedback bits. Thus, code diversity can be viewed
             as the integration of space-time coding with a fixed set of
             beams. It complements the standard approach to code design
             by taking advantage of different (possibly equivalent)
             realizations of a particular code design. Feedback can be
             combined with suboptimal low-complexity decoding of the
             component codes to match ML decoding performance of any
             individual code in the family. It can also be combined with
             ML decoding of the component codes to improve performance
             beyond ML decoding performance of any individual code. One
             method of implementing code diversity is the use of feedback
             to adapt the phase of a transmitted signal. The values of
             code diversity is verified in the simulations on 4 × 4
             Quasi-Orthogonal space-time Block Code (QOSTBC), multi-user
             detection of Alamouti signaling and the Golden code. It
             shows that our code diversity scheme is more robust in the
             case of erroneous feedback compared with other low-rate
             feedback schemes such as transmit antenna selection and its
             variations. This paper introduces a family of full rate
             circulant codes which can be linearly decoded by Fourier
             decomposition of circulant matrices within the code
             diversity framework. © 2009 IEEE.},
   Doi = {10.1109/JSTSP.2009.2035861},
   Key = {fds235950}
}

@article{fds235810,
   Author = {Calderbank, AR and Georghiades, CN},
   Title = {Coding for the unsynchronized optical OPPM
             channel},
   Journal = {Proceedings of the 1993 IEEE International Conference on
             Communications},
   Pages = {557-561},
   Year = {1993},
   Month = {January},
   Abstract = {Random OPPM sequences result in an unrecoverable error floor
             on both the probability of erroneous synchronization and the
             probability of symbol error when only chip synchronization
             is present. It is known, however, that for a given sequence
             length M, a subset of the set of all possible sequences is
             synchronizable in the sense that in the absence of noise,
             the receiver can correctly symbol-synchronize by observing M
             or more symbol intervals. In this paper we design finite
             state machines and codes over a J-ary alphabet which produce
             sequences with the property that every subsequence of length
             L is synchronizable and introduce algorithms that utilize
             the memory in the encoded sequences to produce joint
             estimates of timing and sequences.},
   Key = {fds235810}
}

@article{fds235903,
   Author = {Sirianunpiboon, S and Calderbank, AR and Howard,
             SD},
   Title = {Cognitive decoding and the Golden code},
   Journal = {European Signal Processing Conference},
   Year = {2008},
   Month = {December},
   ISSN = {2219-5491},
   Abstract = {Space time signal processing starts with a system of linear
             equations where signals are multiplied by channel gains, and
             the standard criteria for the design of space time codes
             focus on differences between codewords at the transmitter.
             The value of algebraic constructions is to transfer
             structure (correlation) at the transmitter to structure at
             the receiver, and the focus of this paper is the induced
             channel at the receiver. We use the Golden code to explore
             the idea of introducing structure at the transmitter to
             enable low complexity decoding at the receiver. This is an
             important special case, since the Golden code is
             incorporated in the IEEE 802.16 standard, but the value of
             our approach is not limited to this example. We describe a
             cognitive decoder for the Golden code with complexity O(N 2)
             that comes within 3dB of full MAP/ML decoding. The decoder
             is cognitive in that it uses channel state information to
             choose between two algorithms in a way that is independent
             of the signal-to-noise ratio. The primary algorithm is
             interference cancellation which fails to perform well on a
             proportion of channels. We identify the channel conditions
             for which inteference cancellation fails and show that for
             these channels the decoding problem effectively reduces to a
             single receive antenna decoding problem for which we have
             developed an efficient zero forcing algorithm. Previous
             hybrid approaches based on sphere decoding have cubic worst
             case complexity and employ decision rules based on condition
             number of the posterior covariance matrix. Interference
             cancellation is different in that orientation of the
             covariance matters. The cognitive decoder for the Golden
             code provides a uniform solution to different wireless
             environments (Rayleigh/Rician) that combine rich scattering
             and line of sight components. The gap between cognitive and
             full MAP/ML decoding reduces to essentially ML performance
             as the line of sight component becomes more dominant.
             copyright by EURASIP.},
   Key = {fds235903}
}

@article{fds303203,
   Author = {Chi, Y and Calderbank, R},
   Title = {Coherence-based performance guarantees of Orthogonal
             Matching Pursuit},
   Journal = {2012 50th Annual Allerton Conference on Communication,
             Control, and Computing, Allerton 2012},
   Pages = {2003-2009},
   Publisher = {IEEE},
   Year = {2012},
   Month = {December},
   url = {http://arxiv.org/abs/1209.6267v1},
   Abstract = {In this paper, we present coherence-based performance
             guarantees of Orthogonal Matching Pursuit (OMP) for both
             support recovery and signal reconstruction of sparse signals
             when the measurements are corrupted by noise. In particular,
             two variants of OMP either with known sparsity level or with
             a stopping rule are analyzed. It is shown that if the
             measurement matrix X C n×p satisfies the strong coherence
             property, then with n O(k log p), OMP will recover a
             k-sparse signal with high probability. In particular, the
             performance guarantees obtained here separate the properties
             required of the measurement matrix from the properties
             required of the signal, which depends critically on the
             minimum signal to noise ratio rather than the power profiles
             of the signal. We also provide performance guarantees for
             partial support recovery. Comparisons are given with other
             performance guarantees for OMP using worst-case analysis and
             the sorted one step thresholding algorithm. © 2012
             IEEE.},
   Doi = {10.1109/Allerton.2012.6483468},
   Key = {fds303203}
}

@article{fds236077,
   Author = {Chen, M and Carson, W and Rodrigues, M and Calderbank, R and Carin,
             L},
   Title = {Communications inspired linear discriminant
             analysis},
   Journal = {Proceedings of the 29th International Conference on Machine
             Learning, ICML 2012},
   Volume = {1},
   Pages = {919-926},
   Year = {2012},
   Month = {October},
   url = {http://hdl.handle.net/10161/8956 Duke open
             access},
   Abstract = {We study the problem of supervised linear dimensionality
             reduction, taking an information-theoretic viewpoint. The
             linear projection matrix is designed by maximizing the
             mutual information between the projected signal and the
             class label. By harnessing a recent theoretical result on
             the gradient of mutual information, the above optimization
             problem can be solved directly using gradient descent,
             without requiring simplification of the objective function.
             Theoretical analysis and empirical comparison are made
             between the proposed method and two closely related methods,
             and comparisons are also made with a method in which Rényi
             entropy is used to define the mutual information (in this
             case the gradient may be computed simply, under a special
             parameter setting). Relative to these alternative
             approaches, the proposed method achieves promising results
             on real datasets. Copyright 2012 by the author(s)/owner(s).},
   Key = {fds236077}
}

@article{fds235786,
   Author = {Carson, WR and Chen, M and Rodrigues, MRD and Calderbank, R and Carin,
             L},
   Title = {Communications-inspired projection design with application
             to compressive sensing},
   Journal = {SIAM Journal on Imaging Sciences},
   Volume = {5},
   Number = {4},
   Pages = {1182-1212},
   Publisher = {Society for Industrial & Applied Mathematics
             (SIAM)},
   Year = {2012},
   Month = {January},
   ISSN = {1936-4954},
   url = {http://hdl.handle.net/10161/8952 Duke open
             access},
   Abstract = {We consider the recovery of an underlying signal x ∈ ℂm
             based on projection measurements of the form y = Mx+w, where
             y ∈ ℂℓ and w is measurement noise; we are interested
             in the case ℓ ≪ m. It is assumed that the signal model
             p(x) is known and that w ~ CN(w; 0,Σw) for known Σ w. The
             objective is to design a projection matrix M ∈ ℂℓ×m
             to maximize key information-theoretic quantities with
             operational significance, including the mutual information
             between the signal and the projections I(x; y) or the Rényi
             entropy of the projections hα (y) (Shannon entropy is a
             special case). By capitalizing on explicit characterizations
             of the gradients of the information measures with respect to
             the projection matrix, where we also partially extend the
             well-known results of Palomar and Verdu ́ from the mutual
             information to the Rényi entropy domain, we reveal the key
             operations carried out by the optimal projection designs:
             mode exposure and mode alignment. Experiments are considered
             for the case of compressive sensing (CS) applied to imagery.
             In this context, we provide a demonstration of the
             performance improvement possible through the application of
             the novel projection designs in relation to conventional
             ones, as well as justification for a fast online projection
             design method with which state-of-the-art adaptive CS signal
             recovery is achieved. © 2012 Society for Industrial and
             Applied Mathematics.},
   Doi = {10.1137/120878380},
   Key = {fds235786}
}

@article{fds235898,
   Author = {Zoltowski, MD and Qureshi, TR and Calderbank, R},
   Title = {Complementary codes based channel estimation for MIMO-OFDM
             systems},
   Journal = {46th Annual Allerton Conference on Communication, Control,
             and Computing},
   Pages = {133-138},
   Publisher = {IEEE},
   Year = {2008},
   Month = {December},
   url = {http://dx.doi.org/10.1109/ALLERTON.2008.4797546},
   Abstract = {We present a pilot-assisted method for estimating the
             frequency selective channel in a MIMO-OFDM (Multiple Input
             Multiple Output - Orthogonal Frequency Division
             Multiplexing) system. The pilot sequence is designed using
             the DFT (Discrete Fourier Transform) of the Golay
             complementary sequences. Novel exploitation of the perfect
             autocorrelation property of the Golay codes, in conjunction
             with OSTBC (Orthogonal Space-Time Block Code) based pilot
             waveform scheduling across multiple OFDM frames, facilitates
             simple separation of the channel mixtures at the receive
             antennas. The DFT length used to transform the complementary
             sequence into the frequency domain is shown to be a key
             critical parameter for correctly estimating the channel.
             NMSE (Normalized Mean Squared Error) between the actual and
             the estimated channel is used to characterize the estimation
             performance. © 2008 IEEE.},
   Doi = {10.1109/ALLERTON.2008.4797546},
   Key = {fds235898}
}

@article{fds235774,
   Author = {Wang, M and Xu, W and Calderbank, R},
   Title = {Compressed sensing with corrupted participants},
   Journal = {ICASSP, IEEE International Conference on Acoustics, Speech
             and Signal Processing - Proceedings},
   Pages = {4653-4657},
   Publisher = {IEEE},
   Year = {2013},
   Month = {October},
   ISSN = {1520-6149},
   url = {http://dx.doi.org/10.1109/ICASSP.2013.6638542},
   Abstract = {Compressed sensing (CS) theory promises one can recover
             real-valued sparse signal from a small number of linear
             measurements. Motivated by network monitoring with link
             failures, we for the first time consider the problem of
             recovering signals that contain both real-valued entries and
             corruptions, where the real entries represent transmission
             delays on normal links and the corruptions represent failed
             links. Unlike conventional CS, here a measurement is
             real-valued only if it does not include a failed link, and
             it is corrupted otherwise. We prove that O((d + 1)max(d, k)
             log n) nonadaptive measurements are enough to recover all
             n-dimensional signals that contain k nonzero real entries
             and d corruptions. We provide explicit constructions of
             measurements and recovery algorithms. We also analyze the
             performance of signal recovery when the measurements contain
             errors. © 2013 IEEE.},
   Doi = {10.1109/ICASSP.2013.6638542},
   Key = {fds235774}
}

@article{fds235963,
   Author = {Wu, Y and Chi, Y and Calderbank, R},
   Title = {Compressive blind source separation},
   Journal = {Proceedings - International Conference on Image Processing,
             ICIP},
   Pages = {89-92},
   Publisher = {IEEE},
   Year = {2010},
   Month = {December},
   ISSN = {1522-4880},
   url = {http://dx.doi.org/10.1109/ICIP.2010.5652624},
   Abstract = {The central goal of compressive sensing is to reconstruct a
             signal that is sparse or compressible in some basis using
             very few measurements. However reconstruction is often not
             the ultimate goal and it is of considerable interest to be
             able to deduce attributes of the signal from the
             measurements without explicitly reconstructing the full
             signal. This paper solves the blind source separation
             problem not in the high dimensional data domain, but in the
             low dimensional measurement domain. It develops a Bayesian
             inference framework that integrates hidden Markov models for
             sources with compressive measurement. Posterior
             probabilities are calculated using a Markov Chain Monte
             Carlo (MCMC) algorithm. Simulation results are provided for
             one-dimensional signals and for two-dimensional images,
             where hidden Markov tree models of the wavelet coefficients
             are considered. The integrated Bayesian framework is shown
             to outperform standard approaches where the mixtures are
             separated in the data domain. © 2010 IEEE.},
   Doi = {10.1109/ICIP.2010.5652624},
   Key = {fds235963}
}

@article{fds303199,
   Author = {Reboredo, H and Renna, F and Calderbank, R and Rodrigues,
             MRD},
   Title = {Compressive classification},
   Journal = {IEEE International Symposium on Information Theory -
             Proceedings},
   Pages = {674-678},
   Publisher = {IEEE},
   Year = {2013},
   Month = {December},
   url = {http://arxiv.org/abs/1302.4660v1},
   Abstract = {This paper presents fundamental limits associated with
             compressive classification of Gaussian mixture source
             models. In particular, we offer an asymptotic
             characterization of the behavior of the (upper bound to the)
             misclassification probability associated with the optimal
             Maximum-A-Posteriori (MAP) classifier that depends on
             quantities that are dual to the concepts of diversity gain
             and coding gain in multi-antenna communications. The
             diversity, which is shown to determine the rate at which the
             probability of misclassification decays in the low noise
             regime, is shown to depend on the geometry of the source,
             the geometry of the measurement system and their interplay.
             The measurement gain, which represents the counterpart of
             the coding gain, is also shown to depend on geometrical
             quantities. It is argued that the diversity order and the
             measurement gain also offer an optimization criterion to
             perform dictionary learning for compressive classification
             applications. © 2013 IEEE.},
   Doi = {10.1109/ISIT.2013.6620311},
   Key = {fds303199}
}

@article{fds236082,
   Author = {Xie, Y and Chi, Y and Applebaum, L and Calderbank,
             R},
   Title = {Compressive demodulation of mutually interfering
             signals},
   Journal = {2012 IEEE Statistical Signal Processing Workshop, SSP
             2012},
   Pages = {592-595},
   Publisher = {IEEE},
   Year = {2012},
   Month = {November},
   url = {http://dx.doi.org/10.1109/SSP.2012.6319768},
   Abstract = {The challenge of Multiuser Detection (MUD) is that of
             demodulating mutually interfering signals given that at any
             time instant the number of active users is typically small.
             The promise of compressed sensing is the demodulation of
             sparse superpositions of signature waveforms from very few
             measurements. This paper considers signature waveforms that
             are are drawn from a Gabor frame. It describes a MUD
             architecture that uses subsampling to convert analog input
             to a digital signal, and then uses iterative matching
             pursuit to recover the active users. Compressive
             demodulation requires K logN samples to recover K active
             users whereas standard MUD requires N samples. The paper
             provides theoretical performance guarantees and consistent
             numerical simulations. © 2012 IEEE.},
   Doi = {10.1109/SSP.2012.6319768},
   Key = {fds236082}
}

@article{fds235773,
   Author = {Renna, F and Rodrigues, MRD and Chen, M and Calderbank, R and Carin,
             L},
   Title = {Compressive sensing for incoherent imaging systems with
             optical constraints},
   Journal = {ICASSP, IEEE International Conference on Acoustics, Speech
             and Signal Processing - Proceedings},
   Pages = {5484-5488},
   Publisher = {IEEE},
   Year = {2013},
   Month = {October},
   ISSN = {1520-6149},
   url = {http://dx.doi.org/10.1109/ICASSP.2013.6638712},
   Abstract = {We consider the problem of linear projection design for
             incoherent optical imaging systems. We propose a
             computationally efficient method to obtain effective
             measurement kernels that satisfy the physical constraints
             imposed by an optical system, starting first from arbitrary
             kernels, including those that satisfy a less demanding power
             constraint. Performance is measured in terms of mutual
             information between the source input and the projection
             measurement, as well as reconstruction error for real world
             images. A clear improvement in the quality of image
             reconstructions is shown with respect to both random and
             adaptive projection designs in the literature. © 2013
             IEEE.},
   Doi = {10.1109/ICASSP.2013.6638712},
   Key = {fds235773}
}

@article{fds235746,
   Author = {Bajwa, WU and Duarte, MF and Calderbank, R},
   Title = {Conditioning of Random Block Subdictionaries With
             Applications to Block-Sparse Recovery and
             Regression},
   Journal = {IEEE Transactions on Information Theory},
   Volume = {61},
   Number = {7},
   Pages = {4060-4079},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2015},
   Month = {July},
   ISSN = {0018-9448},
   url = {http://dx.doi.org/10.1109/TIT.2015.2429632},
   Abstract = {The linear model, in which a set of observations is assumed
             to be given by a linear combination of columns of a matrix
             (often termed a dictionary), has long been the mainstay of
             the statistics and signal processing literature. One
             particular challenge for inference under linear models is
             understanding the conditions on the dictionary under which
             reliable inference is possible. This challenge has attracted
             renewed attention in recent years, since many modern
             inference problems (e.g., high-dimensional statistics and
             compressed sensing) deal with the underdetermined setting,
             in which the number of observations is much smaller than the
             number of columns in the dictionary. This paper makes
             several contributions for this setting when the set of
             observations is given by a linear combination of a small
             number of groups of columns of the dictionary, termed the
             block-sparse case. First, it specifies conditions on the
             dictionary under which most block submatrices of the
             dictionary (often termed block subdictionaries) are well
             conditioned. This result is fundamentally different from
             prior work on block-sparse inference because: 1) it provides
             conditions that can be explicitly computed in polynomial
             time; 2) the given conditions translate into near-optimal
             scaling of the number of columns of the block
             subdictionaries as a function of the number of observations
             for a large class of dictionaries; and 3) it suggests that
             the spectral norm, rather than the column/block coherences
             of the dictionary, fundamentally limits the scaling of
             dimensions of the well-conditioned block subdictionaries.
             Second, in order to help understand the significance of this
             result in the context of block-sparse inference, this paper
             investigates the problems of block-sparse recovery and
             block-sparse regression in underdetermined settings. In both
             of these problems, this paper utilizes its result concerning
             conditioning of block subdictionaries and establishes that
             near-optimal block-sparse recovery and block-sparse
             regression is possible for a large class of dictionaries as
             long as the dictionary satisfies easily computable
             conditions and the coefficients describing the linear
             combination of groups of columns can be modeled through a
             mild statistical prior. Third, the paper reports extensive
             numerical experiments that highlight the effects of
             different measures of the dictionary in block-sparse
             inference problems.},
   Doi = {10.1109/TIT.2015.2429632},
   Key = {fds235746}
}

@article{fds235991,
   Author = {Li, Y and Papachristodoulou, A and Chiang, M and Calderbank,
             AR},
   Title = {Congestion control and its stability in networks with delay
             sensitive traffic},
   Journal = {Computer Networks},
   Volume = {55},
   Number = {1},
   Pages = {20-32},
   Publisher = {Elsevier BV},
   Year = {2011},
   Month = {January},
   ISSN = {1389-1286},
   url = {http://dx.doi.org/10.1016/j.comnet.2010.07.001},
   Abstract = {We consider congestion control in a network with delay
             sensitive/ insensitive traffic, modelled by adding explicit
             delay terms to the utility function measuring user's
             happiness on the Quality of Service (QoS). A new Network
             Utility Maximization (NUM) problem is formulated and solved
             in a decentralized way via appropriate algorithms
             implemented at the users (primal) and/or links (dual). For
             the dual algorithm, delay-independent and delay-dependent
             stability conditions are derived when propagation delays are
             taken into account. A system with voice and data traffic is
             considered as example and the properties of the congestion
             control algorithm are assessed. © 2010 Elsevier B.V. All
             rights reserved.},
   Doi = {10.1016/j.comnet.2010.07.001},
   Key = {fds235991}
}

@article{fds235874,
   Author = {Li, Y and Chiang, M and Calderbank, AR},
   Title = {Congestion control in networks with delay sensitive
             traffic},
   Journal = {GLOBECOM - IEEE Global Telecommunications
             Conference},
   Pages = {2746-2751},
   Publisher = {IEEE},
   Year = {2007},
   Month = {December},
   url = {http://dx.doi.org/10.1109/GLOCOM.2007.520},
   Abstract = {We study the congestion control in a network where the users
             may have different types of traffic, such as the traffic
             with fixed/variable rate, delay sensitive/insensitive, etc.
             To reflect the different requirements on delay by different
             applications, explicit terms of delay are added to the
             utility function. We analyze the essential dynamics for the
             network utility maximization (NUM) with the new utility
             functions. Compared with the basic NUM where the utility
             function is only a function of rate, the dynamics for link
             price is now related to the delay term added in the utility
             function. The analysis is applied to the system with voice
             and data traffic, and distributed algorithms are proposed to
             allocate the resource such that the utility of voice and
             data is jointly optimized. The numerical results show that
             by the new price dynamics, we can accomplish optimal
             congestion control for users with delay sensitive/insensitive
             traffic in a network. In particular, in a network with data
             and voice traffic with priority queueing, the algorithm can
             lead the network to achieve higher quality of voice traffic
             and higher throughput of data traffic, with the sacrifice of
             the packet delay of data traffic. © 2007
             IEEE.},
   Doi = {10.1109/GLOCOM.2007.520},
   Key = {fds235874}
}

@article{fds235930,
   Author = {Calderbank, R and Casazza, PG and Heinecke, A and Kutyniok, G and Pezeshki, A},
   Title = {Constructing fusion frames with desired parameters},
   Journal = {Proceedings of SPIE - The International Society for Optical
             Engineering},
   Volume = {7446},
   Publisher = {SPIE},
   Year = {2009},
   Month = {November},
   ISSN = {0277-786X},
   url = {http://dx.doi.org/10.1117/12.825782},
   Abstract = {A fusion frame is a frame-like collection of subspaces in a
             Hilbert space. It generalizes the concept of a frame system
             for signal representation. In this paper, we study the
             existence and construction of fusion frames. We first
             introduce two general methods, namely the spatial complement
             and the Naimark complement, for constructing a new fusion
             frame from a given fusion frame. We then establish existence
             conditions for fusion frames with desired properties. In
             particular, we address the following question: Given M, N, m
             ∈ N and {λj}Mj =1, does there exist a fusion frame in RM
             with N subspaces of dimension m for which {λj} Mj =1are the
             eigenvalues of the associated fusion frame operator? We
             address this problem by providing an algorithm which
             computes such a fusion frame for almost any collection of
             parameters M, N, m ∈ N and {λj}Mj =1. Moreover, we show
             how this procedure can be applied, if subspaces are to be
             added to a given fusion frame to force it to become tight.©
             2009 SPIE.},
   Doi = {10.1117/12.825782},
   Key = {fds235930}
}

@article{fds235845,
   Author = {Calderbank, AR and Diggavi, S and Das, S and Al-Dhahir,
             N},
   Title = {Construction and analysis of a new 4 × 4 orthogonal
             space-time block code},
   Journal = {IEEE International Symposium on Information Theory -
             Proceedings},
   Pages = {310},
   Year = {2004},
   Month = {October},
   Abstract = {The construction and the analysis of a nonlinear 4×4 full
             rate, full-diversity orthogonal space-time block codes was
             discussed. The main aspect was the design, analysis and low
             complexity decoding of a full rate full diversity orthogonal
             STBC for four transmit antennas that was constructed by
             means of 2×2 arrays over the quaternions. It was observed
             that the structure of the code was a generalization of the
             2×2 Alamouti code and were reduced if the quaternions in
             the code were replaced by complex numbers. The results show
             that the PSK signalling code has full diversity while QPSK
             signalling code had no constellation expansion.},
   Key = {fds235845}
}

@article{fds235822,
   Author = {Calderbank, AR and Mcguire, G},
   Title = {Construction of a (64,237,12) Code via Galois
             Rings},
   Journal = {Designs, Codes, and Cryptography},
   Volume = {10},
   Number = {2},
   Pages = {157-165},
   Year = {1997},
   Month = {January},
   url = {http://dx.doi.org/10.1023/A:1008240319733},
   Abstract = {Certain nonlinear binary codes contain more codewords than
             any comparable linear code presently known. These include
             the Kerdock and Preparata codes, which exist for all lengths
             4m ≥ 16. At length 16 they coincide to give the
             Nordstrom-Robinson code. This paper constructs a nonlinear
             (64, 237, 12) code as the binary image, under the Gray map,
             of an extended cyclic code defined over the integers modulo
             4 using Galois rings. The Nordstrom-Robinson code is defined
             in this same way, and like the Nordstrom-Robinson code, the
             new code is better than any linear code that is presently
             known.},
   Doi = {10.1023/A:1008240319733},
   Key = {fds235822}
}

@article{fds235972,
   Author = {Calderbank, R and Howard, S and Jafarpour, S},
   Title = {Construction of a large class of deterministic sensing
             matrices that satisfy a statistical isometry
             property},
   Journal = {IEEE Journal on Selected Topics in Signal
             Processing},
   Volume = {4},
   Number = {2},
   Pages = {358-374},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2010},
   Month = {April},
   ISSN = {1932-4553},
   url = {http://dx.doi.org/10.1109/JSTSP.2010.2043161},
   Abstract = {Compressed Sensing aims to capture attributes of κ-sparse
             signals using very few measurements. In the standard
             compressed sensing paradigm, the N × C measurement matrix
             Φ is required to act as a near isometry on the set of all
             κ-sparse signals (restricted isometry property or RIP).
             Although it is known that certain probabilistic processes
             generate N × C matrices that satisfy RIP with high
             probability, there is no practical algorithm for verifying
             whether a given sensing matrix Φ has this property, crucial
             for the feasibility of the standard recovery algorithms. In
             contrast, this paper provides simple criteria that guarantee
             that a deterministic sensing matrix satisfying these
             criteria acts as a near isometry on an overwhelming majority
             of κ-sparse signals; in particular, most such signals have
             a unique representation in the measurement domain.
             Probability still plays a critical role, but it enters the
             signal model rather than the construction of the sensing
             matrix. An essential element in our construction is that we
             require the columns of the sensing matrix to form a group
             under pointwise multiplication. The construction allows
             recovery methods for which the expected performance is
             sub-linear in C, and only quadratic in N, as compared to the
             super-linear complexity in C of the Basis Pursuit or
             Matching Pursuit algorithms; the focus on expected
             performance is more typical of mainstream signal processing
             than the worst case analysis that prevails in standard
             compressed sensing. Our framework encompasses many families
             of deterministic sensing matrices, including those formed
             from discrete chirps, DelsarteGoethals codes, and extended
             BCH codes. © IEEE.},
   Doi = {10.1109/JSTSP.2010.2043161},
   Key = {fds235972}
}

@article{fds235932,
   Author = {Wu, Y and Calderbank, R},
   Title = {Construction of high rate super-orthogonal space-time block
             codes},
   Journal = {IEEE International Conference on Communications},
   Publisher = {IEEE},
   Year = {2009},
   Month = {November},
   ISSN = {0536-1486},
   url = {http://dx.doi.org/10.1109/ICC.2009.5198848},
   Abstract = {It is standard practice to integrate outer trellis codes
             with inner space-time block codes to increase coding gain,
             but the drawback is a decrease in rate. Jafarkhani and
             Seshadri [1] have introduced an alternative method of
             combining multiple inner orthogonal space-time codes with
             outer trellis codes that both preserves rate and increases
             coding gain. However their work is limited to orthogonal
             codes, for which the achievable rate is typically low. This
             paper presents a method of achieving higher transmission
             rates by integrating higher rate non-orthogonal space with
             outer trellis codes, and new methods are introduced to avoid
             catastrophic codes. The method is presented with reference
             to the particular example of the Silver Code, but it applies
             to all multiplexed orthogonal designs and to more general
             codes. ©2009 IEEE.},
   Doi = {10.1109/ICC.2009.5198848},
   Key = {fds235932}
}

@article{fds236016,
   Author = {Herro, MA and Telang, V and Calderbank, AR},
   Title = {Construction of trellis-decodable error-correcting line
             codes},
   Volume = {25 n 13},
   Pages = {63-64},
   Year = {1988},
   Month = {December},
   Abstract = {Summary form only given, as follows. The design of balanced
             error-correcting codes has received a lot of attention in
             recent literature. Besides their error-control capability,
             these codes also have power spectral densities that make
             them attractive for use on the fiber optic channel and for
             data storage on magnetic tape. Since these codes are
             balanced, the number of ones in every code word equals the
             number of zeros. This property guarantees a null at DC in
             the power spectral densities of these codes. The authors
             show ways of constructing single error-correcting balanced
             codes with dmi n = 4. They construct code words by a
             two-layered method. They first define a set of balanced
             symbols consisting of a sequence of zeros and ones (with the
             number of ones equal to the number of zeros). Thus any
             sequence of these symbols will be balanced. The code words
             are constructed by concatenating these symbols in a way that
             guarantees the minimum distance of the code to be 4, i.e.,
             dm in = 4.},
   Key = {fds236016}
}

@article{fds235900,
   Author = {Li, Y and Li, Z and Chiang, M and Calderbank, AR},
   Title = {Content-aware distortion-fair video streaming in
             networks},
   Journal = {GLOBECOM - IEEE Global Telecommunications
             Conference},
   Pages = {1768-1773},
   Publisher = {IEEE},
   Year = {2008},
   Month = {December},
   url = {http://dx.doi.org/10.1109/GLOCOM.2008.ECP.342},
   Abstract = {Internet is experiencing an explosive growth of video
             traffic. Given the limited network bandwidth resources, how
             to provide Internet users with good video playback quality
             is a key problem. For video clips competing bandwidth, we
             propose an approach of Content-Aware distortion-Fair (CAF)
             video delivery scheme, which is assumed to be aware of the
             characteristics of video frames and ensures max-min
             distortion fair sharing among video flows. Different from
             bandwidth fair sharing, CAF targets video playback quality
             fairness for the reason that users care about video quality
             rather than bandwidth. The proposed CAF approach does not
             need an analytical rate-distortion function which is
             difficult to estimate, but instead, it uses the explicit
             distortion of every frame which is induced by frame drop.
             Our CAF approach is fast and practical with content-aware
             cooperation. Experimental results show that the proposed
             approach yields better quality of service when the network
             is congested compared with the approach not rate-distortion
             optimized, and it makes competing video clips help each
             other to get fair playback quality. © 2008
             IEEE.},
   Doi = {10.1109/GLOCOM.2008.ECP.342},
   Key = {fds235900}
}

@article{fds235989,
   Author = {Dang, W and Pezeshki, A and Howard, S and Moran, W and Calderbank,
             R},
   Title = {Coordinating complementary waveforms for sidelobe
             suppression},
   Journal = {Conference Record - Asilomar Conference on Signals, Systems
             and Computers},
   Pages = {2096-2100},
   Publisher = {IEEE},
   Year = {2011},
   Month = {December},
   ISSN = {1058-6393},
   url = {http://dx.doi.org/10.1109/ACSSC.2011.6190398},
   Abstract = {We present a general method for constructing radar transmit
             pulse trains and receive filters for which the radar
             point-spread function in delay and Doppler, given by the
             cross-ambiguity function of the transmit pulse train and the
             pulse train used in the receive filter, is essentially free
             of range sidelobes inside a Doppler interval around the
             zero-Doppler axis. The transmit pulse train is constructed
             by coordinating the transmission of a pair of Golay
             complementary waveforms across time according to zeros and
             ones in a binary sequence P. The pulse train used to filter
             the received signal is constructed in a similar way, in
             terms of sequencing the Golay waveforms, but each waveform
             in the pulse train is weighted by an element from another
             sequence Q. We show that a spectrum jointly determined by P
             and Q sequences controls the size of the range sidelobes of
             the cross-ambiguity function and by properly choosing P and
             Q we can clear out the range sidelobes inside a Doppler
             interval around the zero-Doppler axis. The joint design of P
             and Q enables a tradeoff between the order of the spectral
             null for range sidelobe suppression and the signal-to-noise
             ratio at the receiver output. We establish this trade-off
             and derive a necessary and sufficient condition for the
             construction of P and Q sequences that produce a null of a
             desired order. © 2011 IEEE.},
   Doi = {10.1109/ACSSC.2011.6190398},
   Key = {fds235989}
}

@article{fds236027,
   Author = {Forney, GD and Calderbank, AR},
   Title = {Coset Codes for Partial Response Channels; or, Coset Codes
             with Spectral Nulls},
   Journal = {IEEE Transactions on Information Theory},
   Volume = {35},
   Number = {5},
   Pages = {925-943},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {1989},
   Month = {January},
   url = {http://dx.doi.org/10.1109/18.42211},
   Abstract = {Known coset codes are adapted for use on partial response
             channels or to generate signals with spectral nulls. By the
             use of methods of coset precoding and running digital sum
             feedback, any desired tradeoff can be achieved between the
             power and spectra of the relevant sequences, up to the
             optimum tradeoff possible. A fundamental theorem specifying
             this optimum tradeoff is given. A maximum likelihood
             sequence estimator (MLSE) decoder for the original code may
             be used for the adapted code, and such a decoder then
             attains the minimum squared distance of the original code.
             These methods sometimes generate codes with greater minimum
             squared distance than that of the original code; this
             distance can be attained by augmented decoders, although
             such decoders inherently require long decoding delays and
             may be subjected to quasi-catastrophic error propagation. We
             conclude that, at least for sequences supporting large
             numbers of bits per symbol, coset codes can be adapted to
             achieve effectively the same performance and complexity on
             partial response channels, or for sequences with spectral
             nulls, as they do in the ordinary memoryless case. © 1989
             IEEE},
   Doi = {10.1109/18.42211},
   Key = {fds236027}
}

@article{fds236020,
   Author = {Forney, GD and Calderbank, AR},
   Title = {COSET codes for partial response; or, codes with spectral
             nulls},
   Volume = {25 n 13},
   Pages = {141},
   Year = {1988},
   Month = {December},
   Abstract = {Summary form only given, as follows. Known coset codes are
             adapted for use on partial response channels or to generate
             signals with spectral nulls. By use of methods of coset
             precoding and running digital sum feedback, any desired
             tradeoff can be achieved between the power and spectra of
             the relevant sequences, up to the optimum tradeoff possible.
             A fundamental theorem specifying this optimum tradeoff is
             given. An MLSE decoder for the original code may be used for
             the adapted code, and such a decoder then attains the
             minimum squared distance of the original code. These methods
             sometimes generate codes with greater minimum squared
             distance than that of the original code, which can be
             attained by augmented decoders, although such decoders
             inherently require long decoding delays and may be subject
             to quasi-catastrophic error propagation. The general
             conclusion is that, at least for sequences that support
             large number of bits per symbol, one can obtain the same
             kinds of performance and complexity on partial response
             channels, or for sequences with spectral nulls, as can be
             obtained with the same coset codes in the ordinary
             memoryless case.},
   Key = {fds236020}
}

@article{fds235778,
   Author = {Jacobvitz, AN and Calderbank, R and Sorin, DJ},
   Title = {Coset coding to extend the lifetime of memory},
   Journal = {Proceedings - International Symposium on High-Performance
             Computer Architecture},
   Pages = {222-233},
   Publisher = {IEEE},
   Year = {2013},
   Month = {July},
   ISSN = {1530-0897},
   url = {http://dx.doi.org/10.1109/HPCA.2013.6522321},
   Abstract = {Some recent memory technologies, including phase change
             memory (PCM), have lifetime reliabilities that are affected
             by write operations. We propose the use of coset coding to
             extend the lifetimes of these memories. The key idea of
             coset coding is that it performs a one-to-many mapping from
             each dataword to a coset of vectors, and having multiple
             possible vectors provides the flexibility to choose the
             vector to write that optimizes lifetime. Our technique,
             FlipMin, uses coset coding and, for each write, selects the
             vector that minimizes the number of bits that must flip. We
             also show how FlipMin can be synergistically combined with
             the ability to tolerate bit erasures. Thus, our techniques
             help to prevent bits from wearing out and can then tolerate
             those bits that do wear out. © 2013 IEEE.},
   Doi = {10.1109/HPCA.2013.6522321},
   Key = {fds235778}
}

@article{fds235807,
   Author = {Calderbank, AR},
   Title = {Covering bounds for codes},
   Journal = {Journal of Combinatorial Theory, Series A},
   Volume = {60},
   Number = {1},
   Pages = {117-122},
   Publisher = {Elsevier BV},
   Year = {1992},
   Month = {January},
   ISSN = {0097-3165},
   url = {http://dx.doi.org/10.1016/0097-3165(92)90041-R},
   Abstract = {Given an [n, k]R code C, and a subcode H of C with
             codimension j, define SHj(C) = maxx∈F2n {d(x, H) + d(x, C
             H)}, and define the j-norm, Sj(C) to be the minimum value of
             SHj(C) as H ranges over the subcodes with codimension j. We
             prove that if k (n + 1) > R (R + 1), then S1(C) ≤ 2R + 1.
             © 1992.},
   Doi = {10.1016/0097-3165(92)90041-R},
   Key = {fds235807}
}

@article{fds236033,
   Author = {Calderbank, AR},
   Title = {Covering machines},
   Journal = {Discrete Mathematics},
   Volume = {106-107},
   Number = {C},
   Pages = {105-110},
   Publisher = {Elsevier BV},
   Year = {1992},
   Month = {September},
   ISSN = {0012-365X},
   url = {http://dx.doi.org/10.1016/0012-365X(92)90536-O},
   Abstract = {We construct 2-state covering machines from binary linear
             codes with a sufficiently rich subcode structure. The goal
             is to trade multiple covering properties for increased
             redundancy. We explain why the expected covering properties
             of covering machines should be superior to those of codes
             obtained by iterating the ADS construction. ©
             1992.},
   Doi = {10.1016/0012-365X(92)90536-O},
   Key = {fds236033}
}

@article{fds235811,
   Author = {Calderbank, AR and Fishburn, PC and Rabinovich,
             A},
   Title = {Covering properties of convolutional codes and associated
             lattices},
   Journal = {Proceedings of the 1993 IEEE International Symposium on
             Information Theory},
   Pages = {141},
   Year = {1993},
   Month = {January},
   Abstract = {This talk describes methods for analyzing the expected and
             worst-case performance of sequence based methods of
             quantization. We suppose that the quantization algorithm is
             dynamic programming, where the current step depends on a
             vector of path metrics, which we call a metric function. Our
             principal objective is a concise representation of these
             metric functions and the possible trajectories of the
             dynamic programming algorithm. We shall consider
             quantization of equiprobable binary data using a
             convolutional code. Here the additive group of the code
             splits the set of metric functions into a finite collection
             of subsets. The subsets form the vertices of a directed
             graph, where edges are labelled by aggregate incremental
             increases in mean squared error (msc). Paths in this graph
             correspond both to trajectories of the Viterbi algorithm,
             and to cosets of the code. For the rate 1/2 convolutional
             code [1 + D2, 1 + D + D2], this graph has only 9 vertices.
             In this case it is particularly simple to calculate per
             dimension expected and worst case mse, and performance is
             similar to the binary [24, 12] Colay code. Our methods also
             apply to quantization of arbitrary sysmmetric probability
             distributions on [0, 1] using convolutional codes. For the
             uniform distribution on [0, 1], the expected mse is the
             second moment of the 'Voronoi region' of an infinite
             dimensional lattice determined by the convolutional code. It
             may also be interpreted as an increase in the reliability of
             a transmission scheme obtained by nonequiprobable
             signalling. For certain convolutional codes we obtain a
             formula for expected mse that depends only on the
             distribution of differences for a single pair of path
             metrics.},
   Key = {fds235811}
}

@article{fds236043,
   Author = {Calderbank, AR and Fishburn, PC},
   Title = {Covering Properties of Convolutional Codes and Associated
             Lattices},
   Journal = {IEEE Transactions on Information Theory},
   Volume = {41},
   Number = {3},
   Pages = {732-746},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {1995},
   Month = {January},
   url = {http://dx.doi.org/10.1109/18.382019},
   Abstract = {The paper describes Markov methods for analyzing the
             expected and worst case performance of sequence-based
             methods of quantization. We suppose that the quantization
             algorithm is dynamic programming, where the current step
             depends on a vector of path metrics, which we call a metric
             function. Our principal objective is a concise
             representation of these metric functions and the possible
             trajectories of the dynamic programming algorithm. We shall
             consider quantization of equiprobable binary data using a
             convolutional code. Here the additive group of the code
             splits the set of metric functions into a finite collection
             of subsets. The subsets form the vertices of a directed
             graph, where edges are labeled by aggregate incremental
             increases in mean squared error (mse). Paths in this graph
             correspond both to trajectories of the Viterbi algorithm,
             and to cosets of the code. For the rate 1/2 convolutional
             code [1 + D2, 1 + D + D2], this graph has only nine
             vertices. In this case it is particularly simple to
             calculate per dimension expected and worst case mse, and
             performance is slightly better than the binary [24, 12]
             Golay code. Our methods also apply to quantization of
             arbitrary symmetric probability distributions on [0, 1]
             using convolutional codes. For the uniform distribution on
             [0, 1], the expected mse is the second moment of the
             “Voronoi region” of an infinite-dimensional lattice
             determined by the convolutional code. It may also be
             interpreted as an increase in the reliability of a
             transmission scheme obtained by nonequiprobable signaling.
             For certain convolutional codes we obtain a formula for
             expected mse that depends only on the distribution of
             differences for a single pair of path metrics. © 1995
             IEEE},
   Doi = {10.1109/18.382019},
   Key = {fds236043}
}

@article{fds235800,
   Author = {Calderbank, AR},
   Title = {Covering radius and the chromatic number of Kneser
             graphs},
   Journal = {Journal of Combinatorial Theory, Series A},
   Volume = {54},
   Number = {1},
   Pages = {129-131},
   Publisher = {Elsevier BV},
   Year = {1990},
   Month = {January},
   ISSN = {0097-3165},
   url = {http://dx.doi.org/10.1016/0097-3165(90)90011-K},
   Abstract = {Let C be a binary linear code with covering radius R and let
             C0 be a subcode of C with codimension i. We prove that the
             covering radius R0 of C satisfies R0 ≤ 2R + 2i - 1, by
             setting up a graph coloring problem involving Kneser graphs.
             © 1990.},
   Doi = {10.1016/0097-3165(90)90011-K},
   Key = {fds235800}
}

@article{fds235985,
   Author = {Calderbank, R and Jafarpour, S and Nastasescu,
             M},
   Title = {Covering radius and the Restricted Isometry
             Property},
   Journal = {2011 IEEE Information Theory Workshop, ITW
             2011},
   Pages = {558-562},
   Publisher = {IEEE},
   Year = {2011},
   Month = {December},
   url = {http://dx.doi.org/10.1109/ITW.2011.6089564},
   Abstract = {The Restricted Isometry Property or RIP introduced by Candes
             and Tao requires an n × p dictionary to act as a near
             isometry on all k-sparse signals. This paper provides a very
             simple condition under which a dictionary Φ (C) obtained by
             exponentiating codewords from a binary linear code C
             satisfies the RIP with high probability. The method is to
             bound the difference between the dictionary Φ(C) and a
             second dictionary A generated by a random Bernoulli process
             which is known to satisfy the RIP with high probability. The
             difference Δ-Φ (C) is controlled by the covering radius of
             C, a fundamental parameter that is bounded above by the
             number of weights in the dual code C ⊥ (the external
             distance of C). The main result complements a more
             sophisticated asymptotic analysis by Babadi and Tarokh of
             the distribution of eigenvalues of random submatrices of
             Φ(C). In this analysis, divergence from the distribution
             corresponding to the full Bernoulli matrix depends on a
             different fundamental parameter of C, namely the minimum
             distance of the dual code C ⊥. © 2011
             IEEE.},
   Doi = {10.1109/ITW.2011.6089564},
   Key = {fds235985}
}

@article{fds235877,
   Author = {Dusad, S and Diggavi, SN and Calderbank, AR},
   Title = {Cross layer utility of diversity embedded
             codes},
   Journal = {2006 IEEE Conference on Information Sciences and Systems,
             CISS 2006 - Proceedings},
   Pages = {795-800},
   Publisher = {IEEE},
   Year = {2006},
   Month = {January},
   url = {http://dx.doi.org/10.1109/CISS.2006.286575},
   Abstract = {Diversity embedded codes are opportunistic codes designed
             for the wireless fading channel. They are high-rate
             space-time codes which have embedded within them a
             high-diversity (low rate) code. In this paper, we focus on
             the application of diversity embedded code to transmission
             of images over wireless channels. We match the diversity
             embedded code with a hierarchical (layered) source coder and
             quantify the image quality as compared to a single-layer
             space-time code. These preliminary results suggest that
             diversity embedded codes might be the right physical layer
             functionality required for wireless multimedia transmission.
             © 2006 IEEE.},
   Doi = {10.1109/CISS.2006.286575},
   Key = {fds235877}
}

@article{fds236049,
   Author = {Calderbank, AR and McGuire, G and Kumar, PV and Helleseth,
             T},
   Title = {Cyclic codes over ℤ4 locator polynomials, and
             newton's identities},
   Journal = {IEEE Transactions on Information Theory},
   Volume = {42},
   Number = {1},
   Pages = {217-226},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {1996},
   Month = {December},
   ISSN = {0018-9448},
   url = {http://dx.doi.org/10.1109/18.481791},
   Abstract = {Certain nonlinear binary codes contain more code-words than
             any comparable linear code presently known. These include
             the Kerdock and Preparata codes that can be very simply
             constructed as binary images, under the Gray map, of linear
             codes over ℤ4 that are defined by means of parity checks
             involving Galois rings. This paper describes how Fourier
             transforms on Galois rings and elementary symmetric
             functions can be used to derive lower bounds on the minimum
             distance of such codes. These methods and techniques from
             algebraic geometry are applied to find the exact minimum
             distance of a family of ℤ4-linear codes with length 2m (m,
             odd) and size 22m+1-5m-2. The Gray image of the code of
             length 32 is the best (64, 237) code that is presently
             known. This paper also determines the exact minimum Lee
             distance of the linear codes over ℤ4 that are obtained
             from the extended binary two- and three-error-correcting BCH
             codes by Hensel lifting. The Gray image of the Hensel lift
             of the three-error-correcting BCH code of length 32 is the
             best (64, 232) code that is presently known. This code also
             determines an extremal 32-dimensional even unimodular
             lattice. © 1996 IEEE.},
   Doi = {10.1109/18.481791},
   Key = {fds236049}
}

@article{fds326754,
   Author = {Qiu, Q and Thompson, A and Calderbank, R and Sapiro,
             G},
   Title = {Data Representation Using the Weyl Transform},
   Journal = {IEEE Transactions on Signal Processing},
   Volume = {64},
   Number = {7},
   Pages = {1844-1853},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2016},
   Month = {April},
   url = {http://dx.doi.org/10.1109/TSP.2015.2505661},
   Abstract = {The Weyl transform is introduced as a rich framework for
             data representation. Transform coefficients are connected to
             the Walsh-Hadamard transform of multiscale autocorrelations,
             and different forms of dyadic periodicity in a signal are
             shown to appear as different features in its Weyl
             coefficients. The Weyl transform has a high degree of
             symmetry with respect to a large group of multiscale
             transformations, which allows compact yet discriminative
             representations to be obtained by pooling coefficients. The
             effectiveness of the Weyl transform is demonstrated through
             the example of textured image classification.},
   Doi = {10.1109/TSP.2015.2505661},
   Key = {fds326754}
}

@article{fds303198,
   Author = {Goparaju, S and Rouayheb, SE and Calderbank, R and Poor,
             HV},
   Title = {Data secrecy in distributed storage systems under exact
             repair},
   Journal = {2013 International Symposium on Network Coding, NetCod
             2013},
   Year = {2013},
   Month = {September},
   url = {http://arxiv.org/abs/1304.3156v2},
   Abstract = {The problem of securing data against eavesdropping in
             distributed storage systems is studied. The focus is on
             systems that use linear codes and implement exact repair to
             recover from node failures. The maximum file size that can
             be stored securely is determined for systems in which all
             the available nodes help in repair (i.e., repair degree d =
             n-1, where n is the total number of nodes) and for any
             number of compromised nodes. Similar results in the
             literature are restricted to the case of at most two
             compromised nodes. Moreover, new explicit upper bounds are
             given on the maximum secure file size for systems with d < n
             - 1. The key ingredients for the contribution of this paper
             are new results on subspace intersection for the data
             downloaded during repair. The new bounds imply the
             interesting fact that the maximum amount of data that can be
             stored securely decreases exponentially with the number of
             compromised nodes. Whether this exponential decrease is
             fundamental or is a consequence of the exactness and
             linearity constraints remains an open question. © 2013
             IEEE.},
   Doi = {10.1109/NetCod.2013.6570831},
   Key = {fds303198}
}

@article{fds343654,
   Author = {Qiu, Q and Cheng, X and Calderbank, R and Sapiro,
             G},
   Title = {DCFNet: Deep Neural Network with Decomposed Convolutional
             Filters},
   Journal = {35th International Conference on Machine Learning, ICML
             2018},
   Volume = {9},
   Pages = {6687-6696},
   Year = {2018},
   Month = {January},
   Abstract = {Filters in a Convolutional Neural Network (CNN) contain
             model parameters learned from enormous amounts of data. In
             this paper, we suggest to decompose convolutional filters in
             CNN as a truncated expansion with pre-fixed bases, namely
             the Decomposed Convolutional Filters network (DCFNet), where
             the expansion coefficients remain learned from data. Such a
             structure not only reduces the number of trainable
             parameters and computation, but also imposes filter
             regularity by bases truncation. Through extensive
             experiments, we consistently observe that DCFNet maintains
             accuracy for image classification tasks with a significant
             reduction of model parameters, particularly with
             Fourier-Bessel (FB) bases, and even with random bases.
             Theoretically, we analyze the representation stability of
             DCFNet with respect to input variations, and prove
             representation stability under generic assumptions on the
             expansion coefficients. The analysis is consistent with the
             empirical observations.},
   Key = {fds343654}
}

@article{fds235761,
   Author = {Wang, L and Carlson, DE and Rodrigues, M and Wilcox, D and Calderbank,
             R and Carin, L},
   Title = {Designed measurements for vector count data},
   Journal = {Advances in neural information processing
             systems},
   Pages = {1142-1150},
   Year = {2013},
   ISSN = {1049-5258},
   Abstract = {We consider design of linear projection measurements for a
             vector Poisson signal model. The projections are performed
             on the vector Poisson rate,X ∈ ℝ+n, and the observed
             data are a vector of counts, Y ∈ ℤ +m. The projection
             matrix is designed by maximizing mutual information between
             Y and X, I(Y;X). When there is a latent class label C ∈
             {1; : : : ;L} associated with X, we onsider the mutual
             information with respect to Y and C, I(Y ;C). New analytic
             expressions for the gradient of I(Y ;X) and I(Y ;C) are
             presented, with gradient performed with respect to the
             measurement matrix. Connections are made to the more widely
             studied Gaussian measurement model. Example results are
             presented for compressive topic modeling of a document
             corpora (word counting), and hyperspectral compressive
             sensing for chemical classification (photon
             counting).},
   Key = {fds235761}
}

@article{fds235970,
   Author = {Singh, A and Nowak, R and Calderbank, R},
   Title = {Detecting weak but hierarchically-structured patterns in
             networks},
   Journal = {Journal of Machine Learning Research},
   Volume = {9},
   Pages = {749-756},
   Year = {2010},
   Month = {December},
   ISSN = {1532-4435},
   Abstract = {The ability to detect weak distributed activation patterns
             in networks is critical to several applications, such as
             identifying the onset of anomalous activity or incipient
             congestion in the Internet, or faint traces of a biochemical
             spread by a sensor network. This is a challenging problem
             since weak distributed patterns can be invisible in per node
             statistics as well as a global network-wide aggregate. Most
             prior work considers situations in which the
             activation/non-activation of each node is statistically
             independent, but this is unrealistic in many problems. In
             this paper, we consider structured patterns arising from
             statistical dependencies in the activation process. Our
             contributions are three-fold. First, we propose a
             sparsifying transform that succinctly represents structured
             activation patterns that conform to a hierarchical
             dependency graph. Second, we establish that the proposed
             transform facilitates detection of very weak activation
             patterns that cannot be detected with existing methods.
             Third, we show that the structure of the hierarchical
             dependency graph governing the activation process, and hence
             the network transform, can be learnt from very few
             (logarithmic in network size) independent snapshots of
             network activity. Copyright 2010 by the authors.},
   Key = {fds235970}
}

@article{fds235983,
   Author = {Applebaum, L and Bajwa, WU and Calderbank, AR and Haupt, J and Nowak,
             R},
   Title = {Deterministic pilot sequences for sparse channel estimation
             in OFDM systems},
   Journal = {17th DSP 2011 International Conference on Digital Signal
             Processing, Proceedings},
   Publisher = {IEEE},
   Year = {2011},
   Month = {September},
   url = {http://dx.doi.org/10.1109/ICDSP.2011.6005024},
   Abstract = {This paper examines the problem of multipath channel
             estimation in single-antenna orthogonal frequency division
             multiplexing (OFDM) systems. In particular, we study the
             problem of pilot assisted channel estimation in wideband
             OFDM systems, where the time-domain (discrete) channel is
             approximately sparse. Existing works on this topic
             established that techniques from the compressed sensing
             literature can yield accurate channel estimates using a
             relatively small number of pilot tones, provided the pilots
             are selected randomly. Here, we describe a general purpose
             procedure for deterministic selection of pilot tones to be
             used for channel estimation, and establish guarantees for
             channel estimation accuracy using these sequences along with
             recovery techniques from the compressed sensing literature.
             Simulation results are presented to demonstrate the
             effectiveness of the proposed procedure in practice. © 2011
             IEEE.},
   Doi = {10.1109/ICDSP.2011.6005024},
   Key = {fds235983}
}

@article{fds235836,
   Author = {Diggavi, SN and Al-Dhahir, N and Stamoulis, A and Calderbank,
             AR},
   Title = {Differential space-time coding for frequency-selective
             channels},
   Journal = {IEEE Communications Letters},
   Volume = {6},
   Number = {6},
   Pages = {253-255},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2002},
   Month = {June},
   ISSN = {1089-7798},
   url = {http://dx.doi.org/10.1109/LCOMM.2002.1010872},
   Abstract = {In this letter we introduce two space-time transmission
             schemes which allow full-rate and full-diversity noncoherent
             communications using two transmit antennas over fading
             frequency-selective channels. The first scheme operates in
             the frequency domain where it combines differential Alamouti
             space-time block-coding (STBC) with OFDM. The second scheme
             operates in the time domain and employs differential
             time-reversal STBC to guarantee blind channel
             identifiability without the need for temporal oversampling
             or multiple receive antennas.},
   Doi = {10.1109/LCOMM.2002.1010872},
   Key = {fds235836}
}

@article{fds235752,
   Author = {Nokleby, M and Rodrigues, M and Calderbank, R},
   Title = {Discrimination on the grassmann manifold: Fundamental limits
             of subspace classifiers},
   Journal = {IEEE International Symposium on Information Theory -
             Proceedings},
   Pages = {3012-3016},
   Publisher = {IEEE},
   Year = {2014},
   Month = {January},
   ISSN = {2157-8095},
   url = {http://dx.doi.org/10.1109/ISIT.2014.6875387},
   Abstract = {Repurposing tools and intuitions from Shannon theory, we
             derive fundamental limits on the reliable classification of
             high-dimensional signals from low-dimensional features. We
             focus on the classification of linear and affine subspaces
             and suppose the features to be noisy linear projections.
             Leveraging a syntactic equivalence of discrimination between
             subspaces and communications over vector wireless channels,
             we derive asymptotic bounds on classifier performance.
             First, we define the classification capacity, which
             characterizes necessary and sufficient relationships between
             the signal dimension, the number of features, and the number
             of classes to be discriminated, as all three quantities
             approach infinity. Second, we define the
             diversitydiscrimination tradeoff, which characterizes
             relationships between the number of classes and the
             misclassification probability as the signal-to-noise ratio
             approaches infinity. We derive inner and outer bounds on
             these measures, revealing precise relationships between
             signal dimension and classifier performance. © 2014
             IEEE.},
   Doi = {10.1109/ISIT.2014.6875387},
   Key = {fds235752}
}

@article{fds235748,
   Author = {Nokleby, M and Rodrigues, M and Calderbank, R},
   Title = {Discrimination on the Grassmann Manifold: Fundamental Limits
             of Subspace Classifiers},
   Journal = {IEEE Transactions on Information Theory},
   Volume = {61},
   Number = {4},
   Pages = {2133-2147},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2015},
   Month = {April},
   ISSN = {0018-9448},
   url = {http://dx.doi.org/10.1109/TIT.2015.2407368},
   Abstract = {We derive fundamental limits on the reliable classification
             of linear and affine subspaces from noisy, linear features.
             Drawing an analogy between discrimination among subspaces
             and communication over vector wireless channels, we define
             two Shannon-inspired characterizations of asymptotic
             classifier performance. First, we define the classification
             capacity, which characterizes the necessary and sufficient
             conditions for vanishing misclassification probability as
             the signal dimension, the number of features, and the number
             of subspaces to be discriminated all approach infinity.
             Second, we define the diversity-discrimination tradeoff,
             which, by analogy with the diversity-multiplexing tradeoff
             of fading vector channels, characterizes relationships
             between the number of discernible subspaces and the
             misclassification probability as the feature noise power
             approaches zero. We derive upper and lower bounds on these
             quantities which are tight in many regimes. Numerical
             results, including a face recognition application, validate
             the results in practice.},
   Doi = {10.1109/TIT.2015.2407368},
   Key = {fds235748}
}

@article{fds235831,
   Author = {Jana, R and Al-Dhahir, N and Calderbank, R},
   Title = {Distance spectrum computation for equalized MIMO multipath
             fading channels},
   Journal = {2000 IEEE Wireless Communications and Networking
             Conference},
   Pages = {293-297},
   Year = {2000},
   Month = {December},
   Abstract = {In this paper we estimate bit error probability bounds for
             finite-length delay-optimised multi-input multi-output
             (MIMO) equalizers. These equalizers shorten the impulse
             response memory of frequency-selective MIMO channels by
             minimizing the average energy of the error sequence between
             the equalized MIMO channel impulse response and the target
             impulse response. We answer an important question in this
             paper namely, how much asymptotic loss in SNR do we expect
             as a result of this shortening? A partial distance spectrum
             for a 2 × 2 MIMO channel is evaluated with or without
             channel shortening equalizers. The union bound is then used
             to upper bound the bit error probability. Similarly, the
             lower bound is computed from the squared minimum Euclidean
             distance. Numerical results show that the expected loss is
             in the order of 2.5 dB for realistic wireless channel
             environments.},
   Key = {fds235831}
}

@article{fds235850,
   Author = {Lee, JW and Chiang, M and Calderbank, AR},
   Title = {Distributed algorithms for optimal rate-reliability tradeoff
             in networks},
   Journal = {IEEE International Symposium on Information Theory -
             Proceedings},
   Volume = {2005},
   Pages = {2246-2250},
   Publisher = {IEEE},
   Year = {2005},
   Month = {December},
   url = {http://dx.doi.org/10.1109/ISIT.2005.1523747},
   Abstract = {The current framework of network utility maximization for
             distributed rate allocation assumes fixed channel code
             rates. However, by adapting the physical layer channel
             coding, different rate-reliability tradeoffs can be achieved
             on each link and for each end user. Consider a network where
             each user has a utility function that depends on both signal
             quality and data rate, and each link may provide a 'fatter'
             ('thinner') information 'pipe' by allowing a higher (lower)
             decoding error probability. We propose two distributed,
             pricing-based algorithms to attain optimal rate-reliability
             tradeoff, with an interpretation that each user provides its
             willingness to pay for reliability to the network and the
             network feeds back congestion prices to users. The proposed
             algorithms converge to a tradeoff point between rate and
             reliability, which is proved to be globally optimal for
             codes with sufficiently large codeword lengths and user
             utilities with sufficiently negative curvatures.},
   Doi = {10.1109/ISIT.2005.1523747},
   Key = {fds235850}
}

@article{fds235899,
   Author = {Dusad, S and Diggavi, SN and Al-Dhahir, N and Calderbank,
             AR},
   Title = {Diversity embedded codes: Theory and practice},
   Journal = {IEEE Journal on Selected Topics in Signal
             Processing},
   Volume = {2},
   Number = {2},
   Pages = {202-219},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2008},
   Month = {December},
   ISSN = {1932-4553},
   url = {http://dx.doi.org/10.1109/JSTSP.2008.923817},
   Abstract = {Diversity embedded codes are high-rate space-time codes that
             have a high-diversity code embedded within them. They allow
             a form of communication where the high-rate code
             opportunistically takes advantage of good channel
             realizations while the embedded high-diversity code provides
             guarantees that at least part of the information is received
             reliably. Over the past few years, code designs and
             fundamental limits of performance for such codes have been
             developed. In this paper, we review these ideas by giving
             the developments in a unified framework. In particular, we
             present both the coding technique as well as
             information-theoretic bounds in the context of Intersymbol
             Interference (ISI) channels. We investigate the systems
             implications of diversity embedded codes by examining value
             to network utility maximization, unequal error protection
             for wireless transmission, rate opportunism and packet delay
             optimization. © 2008 IEEE.},
   Doi = {10.1109/JSTSP.2008.923817},
   Key = {fds235899}
}

@article{fds235891,
   Author = {Diggavi, SN and Calderbank, AR and Dusad, S and Al-Dhahir,
             N},
   Title = {Diversity embedded space-time codes},
   Journal = {IEEE Transactions on Information Theory},
   Volume = {54},
   Number = {1},
   Pages = {33-50},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2008},
   Month = {January},
   ISSN = {0018-9448},
   url = {http://dx.doi.org/10.1109/TIT.2007.911280},
   Abstract = {Rate and diversity impose a fundamental tradeoff in wireless
             communication. High-rate space-time codes come at a cost of
             lower reliability (diversity), and high reliability
             (diversity) implies a lower rate. However, wireless networks
             need to support applications with very different
             quality-of-service (QoS) requirements, and it is natural to
             ask what characteristics should be built into the physical
             layer link in order to accommodate them. In this paper, we
             design high-rate space-time codes that have a high-diversity
             code embedded within them. This allows a form of
             communication where the high-rate code opportunistically
             takes advantage of good channel realizations while the
             embedded high-diversity code provides guarantees that at
             least part of the information is received reliably.We
             provide constructions of linear and nonlinear codes for a
             fixed transmit alphabet constraint. The nonlinear
             constructions are a natural generalization to wireless
             channels of multilevel codes developed for the additive
             white Gaussian noise (AWGN) channel that are matched to
             binary partitions of quadrature amplitude modulation (QAM)
             and phase-shift keying (PSK) constellations. The importance
             of set-partitioning to code design for the wireless channel
             is that it provides a mechanism for translating constraints
             in the binary domain into lower bounds on diversity
             protection in the complex domain. We investigate the systems
             implications of embedded diversity codes by examining value
             to unequal error protection, rate opportunism, and packet
             delay optimization. These applications demonstrate that
             diversity-embedded codes have the potential to outperform
             traditional single-layer codes in moderate signal-to-noise
             (SNR) regimes. © 2008 IEEE.},
   Doi = {10.1109/TIT.2007.911280},
   Key = {fds235891}
}

@article{fds235879,
   Author = {Sirianunpiboon, S and Howard, SD and Calderbank,
             AR},
   Title = {Diversity gains across line of sight and rich scattering
             environments from space-polarization-time
             codes},
   Journal = {Proceedings of the 2007 IEEE Information Theory Workshop on
             Information Theory for Wireless Networks,
             ITW},
   Pages = {1-5},
   Publisher = {IEEE},
   Year = {2007},
   Month = {December},
   url = {http://dx.doi.org/10.1109/ITWITWN.2007.4318020},
   Abstract = {Space-time codes built out of Alamouti components have been
             adopted in wireless standards such as UMTS, IEEE 802.11n and
             IEEE 802.16 where they facilitate higher data rates through
             multiplexing of parallel data streams and the addition of
             two or more antennas at the receiver that perform
             interference cancellation. This paper provides new
             theoretical insight into an algorithm for interference
             cancellation through a Bayesian analysis that expresses
             performance as a function of SNR in terms of the "angles"
             between different space-time coded data streams. Our
             approach provides insights into the coupling of channel
             coding to spatial and polarization degrees of freedom.
             ©2007 IEEE.},
   Doi = {10.1109/ITWITWN.2007.4318020},
   Key = {fds235879}
}

@article{fds235742,
   Author = {Naguib, AF and Calderbank, AR},
   Title = {Diversity in wireless systems},
   Volume = {9780521851053},
   Pages = {44-65},
   Publisher = {Cambridge University Press},
   Year = {2006},
   Month = {January},
   url = {http://dx.doi.org/10.1017/CBO9780511616815.004},
   Abstract = {Introduction The main impairment in wireless channels is
             fading or random fluctuation of the signal level. This
             signal fluctuation happens across time, frequency, and
             space. Diversity techniques provide the receiver with
             multiple independent looks at the signal to improve
             reception. Each one of those independent looks is considered
             a diversity branch. The probability that all diversity
             branches will fade at the same time goes down as the number
             of branches increases. Hence, with a high probability, there
             will be at least one branch or link with a good signal such
             that the transmitted data can be detected reliably. Wireless
             channels are, in general, characterized by
             frequency-selective multipath propagation, Doppler-induced
             time-selective fading, and spaceselective fading. An emitted
             signal propagating through the wireless channel is reflected
             and scattered from a large number of scatterers, thereby
             arriving at the receiver through different paths and hence
             arriving at different times. This results in the time
             dispersion of the transmitted signal. A measure of this
             dispersion is called the channel delay spread Tmax. The
             coherence bandwidth of the channel Bc ≈ l/Tmax measures
             the frequency bandwidth over which the propagation channel
             remains correlated. Therefore, a propagation channel with a
             small delay spread will have a large coherence bandwidth,
             i.e., the channel frequency response will remain correlated
             over a large bandwidth, and vice versa. In addition,
             transmitter and receiver mobility as well as changes in the
             propagation medium induce time variations in the propagation
             channel.},
   Doi = {10.1017/CBO9780511616815.004},
   Key = {fds235742}
}

@article{fds235841,
   Author = {Diggavi, SN and Al-Dhahir, N and Calderbank, AR},
   Title = {Diversity-Embedded Space-Time Codes},
   Journal = {Conference Record / IEEE Global Telecommunications
             Conference},
   Volume = {4},
   Pages = {1909-1914},
   Year = {2003},
   Month = {December},
   Abstract = {Rate and diversity impose a fundamental trade-off in
             space-time coding. High-rate space-time codes come at a cost
             of lower diversity, and high reliability (diversity) implies
             a lower rate. In this paper we explore a different point of
             view where we design high-rate space-time codes that have a
             high-diversity code embedded within them. This allows a form
             of communication where the high-rate code opportunistically
             takes advantage of good channel realizations whereas the
             embedded high-diversity code ensures that at least part of
             the information is received reliably. We explore this point
             of view with design issues, along with some preliminary
             progress on code constructions and some information-theoretic
             considerations.},
   Key = {fds235841}
}

@article{fds235881,
   Author = {Suvorova, S and Howard, S and Moran, B and Calderbank, R and Pezeshki,
             A},
   Title = {Doppler resilience, reed-müller codes and complementary
             waveforms},
   Journal = {Conference Record - Asilomar Conference on Signals, Systems
             and Computers},
   Pages = {1839-1843},
   Publisher = {IEEE},
   Year = {2007},
   Month = {December},
   ISSN = {1058-6393},
   url = {http://dx.doi.org/10.1109/ACSSC.2007.4487553},
   Abstract = {While the use of complementary waveforms has been considered
             as a technique for providing essentially perfect range
             sidelobe performance in radar systems, its lack of
             resilience to Doppler is often cited as a reason not to
             deploy it. This work describes and examines techniques both
             for providing Doppler resilience as well as tailoring
             Doppler performance to specific aims. The Doppler
             performance can be varied by suitably changing the order of
             transmission of multiple sets of complementary waveforms. We
             propose a method which improves Doppler performance
             significantly in specific Doppler ranges by arranging the
             transmission of multiple copies of complementary waveforms
             according to a suitable choice from the first order
             Reed-Müller codes. We provide both a theoretical analysis
             and computer simulations of the Doppler response of waveform
             sequences constructed in this way. © 2007
             IEEE.},
   Doi = {10.1109/ACSSC.2007.4487553},
   Key = {fds235881}
}

@article{fds235880,
   Author = {Pezeshki, A and Calderbank, R and Howard, SD and Moran,
             W},
   Title = {Doppler resilient golay complementary pairs for
             radar},
   Journal = {IEEE Workshop on Statistical Signal Processing
             Proceedings},
   Pages = {483-487},
   Publisher = {IEEE},
   Year = {2007},
   Month = {December},
   url = {http://dx.doi.org/10.1109/SSP.2007.4301305},
   Abstract = {We present a systematic way of constructing a Doppler
             resilient sequence of Golay complementary waveforms for
             radar, for which the composite ambiguity function maintains
             ideal shape at small Doppler shifts. The idea is to
             determine a sequence of Golay pairs that annihilates the
             low-order terms of the Taylor expansion of the composite
             ambiguity function. The Prouhet-Thue-Morse sequence plays a
             key role in the construction of Doppler resilient sequences
             of Golay pairs. We extend this construction to multiple
             dimensions. In particular, we consider radar polarimetry,
             where the dimensions are realized by two orthogonal
             polarizations. We determine a sequence of two-by-two
             Alamouti matrices, where the entries involve Golay pairs and
             for which the matrix-valued composite ambiguity function
             vanishes at small Doppler shifts. ©2007
             IEEE.},
   Doi = {10.1109/SSP.2007.4301305},
   Key = {fds235880}
}

@article{fds235892,
   Author = {Pezeshki, A and Calderbank, AR and Moran, W and Howard,
             SD},
   Title = {Doppler resilient Golay complementary waveforms},
   Journal = {IEEE Transactions on Information Theory},
   Volume = {54},
   Number = {9},
   Pages = {4254-4266},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2008},
   Month = {September},
   ISSN = {0018-9448},
   url = {http://dx.doi.org/10.1109/TIT.2008.928292},
   Abstract = {We describe a method of constructing a sequence (pulse
             train) of phase-coded waveforms, for which the ambiguity
             function is free of range sidelobes along modest Doppler
             shifts. The constituent waveforms are Golay complementary
             waveforms which have ideal ambiguity along the zero Doppler
             axis but are sensitive to nonzero Doppler shifts. We extend
             this construction to multiple dimensions, in particular to
             radar polarimetry, where the two dimensions are realized by
             orthogonal polarizations. Here we determine a sequence of
             two-by-two Alamouti matrices where the entries involve Golay
             pairs and for which the range sidelobes associated with a
             matrix-valued ambiguity function vanish at modest Doppler
             shifts. The Prouhet-Thue-Morse sequence plays a key role in
             the construction of Doppler resilient sequences of Golay
             complementary waveforms. © 2008 IEEE.},
   Doi = {10.1109/TIT.2008.928292},
   Key = {fds235892}
}

@article{fds236059,
   Author = {Calderbank, AR and Sloane, NJA},
   Title = {Double Circulant Codes over ℤ4 and even
             Unimodular Lattices},
   Journal = {Journal of Algebraic Combinatorics},
   Volume = {6},
   Number = {2},
   Pages = {119-131},
   Year = {1997},
   Month = {January},
   url = {http://dx.doi.org/10.1023/A:1008639004036},
   Abstract = {With the help of some new results about weight enumerators
             of self-dual codes over ℤ4 we investigate a class of
             double circulant codes over ℤ4, one of which leads to an
             extremal even unimodular 40-dimensional lattice. It is
             conjectured that there should be "Nine more constructions of
             the Leech lattice".},
   Doi = {10.1023/A:1008639004036},
   Key = {fds236059}
}

@article{fds326883,
   Author = {Thompson, A and Robles, FE and Wilson, JW and Deb, S and Calderbank, R and Warren, WS},
   Title = {Dual-wavelength pump-probe microscopy analysis of melanin
             composition.},
   Journal = {Scientific reports},
   Volume = {6},
   Pages = {36871},
   Year = {2016},
   Month = {November},
   url = {http://dx.doi.org/10.1038/srep36871},
   Abstract = {Pump-probe microscopy is an emerging technique that provides
             detailed chemical information of absorbers with
             sub-micrometer spatial resolution. Recent work has shown
             that the pump-probe signals from melanin in human skin
             cancers correlate well with clinical concern, but it has
             been difficult to infer the molecular origins of these
             differences. Here we develop a mathematical framework to
             describe the pump-probe dynamics of melanin in human
             pigmented tissue samples, which treats the ensemble of
             individual chromophores that make up melanin as Gaussian
             absorbers with bandwidth related via Frenkel excitons. Thus,
             observed signals result from an interplay between the
             spectral bandwidths of the individual underlying
             chromophores and spectral proximity of the pump and probe
             wavelengths. The model is tested using a dual-wavelength
             pump-probe approach and a novel signal processing method
             based on gnomonic projections. Results show signals can be
             described by a single linear transition path with different
             rates of progress for different individual pump-probe
             wavelength pairs. Moreover, the combined dual-wavelength
             data shows a nonlinear transition that supports our
             mathematical framework and the excitonic model to describe
             the optical properties of melanin. The novel gnomonic
             projection analysis can also be an attractive generic tool
             for analyzing mixing paths in biomolecular and analytical
             chemistry.},
   Doi = {10.1038/srep36871},
   Key = {fds326883}
}

@article{fds235862,
   Author = {Chul, J and Calderbank, AR},
   Title = {Effective coding gain for space-time codes},
   Journal = {IEEE International Symposium on Information Theory -
             Proceedings},
   Pages = {252-256},
   Publisher = {IEEE},
   Year = {2006},
   Month = {December},
   url = {http://dx.doi.org/10.1109/ISIT.2006.261844},
   Abstract = {The performance of space-time codes is evaluated in terms of
             diversity gain and coding gain, two measures which describe
             the worst-case pairwise error probability between codewords
             at high signal-to-noise ratio (SNR). We introduce the
             concept of effective coding gain to provide an estimate on
             the bit error rate (BER) at low-to-moderate SNR. This
             concept connects the number of nearest neighbours with
             degradation in error performance. We demonstrate the value
             of the new concept through analysis of space-time block
             codes for the quasi-static Rayleigh fading channel. © 2006
             IEEE.},
   Doi = {10.1109/ISIT.2006.261844},
   Key = {fds235862}
}

@article{fds235921,
   Author = {Jafarpour, S and Xu, W and Hassibi, B and Calderbank,
             R},
   Title = {Efficient and robust compressed sensing using optimized
             expander graphs},
   Journal = {IEEE Transactions on Information Theory},
   Volume = {55},
   Number = {9},
   Pages = {4299-4308},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2009},
   Month = {January},
   ISSN = {0018-9448},
   url = {http://dx.doi.org/10.1109/TIT.2009.2025528},
   Abstract = {Expander graphs have been recently proposed to construct
             efficient compressed sensing algorithms. In particular, it
             has been shown that any n-dimensional vector that is
             k-sparse can be fully recovered using O(k log n)
             measurements and only O(k log n) simple recovery iterations.
             In this paper, we improve upon this result by considering
             expander graphs with expansion coefficient beyond 3\4 and
             show that, with the same number of measurements, only O(k)
             recovery iterations are required, which is a significant
             improvement when n is large. In fact, full recovery can be
             accomplished by at most 2 k very simple iterations. The
             number of iterations can be reduced arbitrarily close to k,
             and the recovery algorithm can be implemented very
             efficiently using a simple priority queue with total
             recovery time O(n log(n/k))). We also show that by
             tolerating a small penalty on the number of measurements,
             and not on the number of recovery iterations, one can use
             the efficient construction of a family of expander graphs to
             come up with explicit measurement matrices for this method.
             We compare our result with other recently developed
             expander-graph-based methods and argue that it compares
             favorably both in terms of the number of required
             measurements and in terms of the time complexity and the
             simplicity of recovery. Finally, we will show how our
             analysis extends to give a robust algorithm that finds the
             position and sign of the k significant elements of an almost
             k-sparse signal and then, using very simple optimization
             techniques, finds a k-sparse signal which is close to the
             best k-term approximation of the original signal. © 2009
             IEEE.},
   Doi = {10.1109/TIT.2009.2025528},
   Key = {fds235921}
}

@article{fds331053,
   Author = {Xu, D and Li, Y and Chiang, M and Calderbank, AR},
   Title = {Elastic Service Availability: Utility Framework and Optimal
             Provisioning},
   Journal = {IEEE Journal on Selected Areas in Communications},
   Volume = {26},
   Number = {6},
   Pages = {55-65},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2008},
   Month = {January},
   url = {http://dx.doi.org/10.1109/JSACOCN.2008.030607},
   Abstract = {Service availability is one of the most closely scrutinized
             metrics in offering network services. It is important to
             cost effectively provision a managed and differentiated
             network with various service availability guarantees under a
             unified platform. In particular, demands for availability
             may be elastic and such elasticity can be leveraged to
             improve cost-effectiveness. In this paper, we establish the
             framework of provisioning elastic service availability
             through network utility maximization, and propose an optimal
             and distributed solution using differentiated failure
             recovery schemes. First, we develop a utility function with
             configurable parameters to represent the satisfaction
             perceived by a user upon service availability as well as its
             allowed source rate. Second, adopting Quality of Protection
             [1] and shared path protection, we transform optimal
             provisioning of elastic service availability into a convex
             optimization problem. The desirable service availability and
             source rate for each user can be achieved using a
             price-based distributed algorithm. Finally, we numerically
             show the tradeoff between the throughput and the service
             availability obtained by users in various network
             topologies. This investigation quantifies several
             engineering implications. For example, indiscriminately
             provisioning service availabilities for different kinds of
             users within one network leads to noteworthy sub-optimality
             in total network utility. The profile of bandwidth usage
             also illustrates that provisioning high service availability
             exclusively for critical applications leads to significant
             waste in bandwidth resource. © 2008, IEEE. All rights
             reserved.},
   Doi = {10.1109/JSACOCN.2008.030607},
   Key = {fds331053}
}

@article{fds235895,
   Author = {Dusad, S and Diggavi, SN and Calderbank, AR},
   Title = {Embedded rank distance codes for ISI channels},
   Journal = {IEEE Transactions on Information Theory},
   Volume = {54},
   Number = {11},
   Pages = {4866-4886},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2008},
   Month = {November},
   ISSN = {0018-9448},
   url = {http://dx.doi.org/10.1109/TIT.2008.929960},
   Abstract = {Designs for transmit alphabet constrained space-time codes
             naturally lead to questions about the design of rank
             distance codes. Recently, diversity embedded multilevel
             space-time codes for flat-fading channels have been designed
             from sets of binary matrices with rank distance guarantees
             over the binary field by mapping them onto quadrature
             amplitude modulation (QAM) and phase-shift keying (PSK)
             constellations. In this paper, we demonstrate that diversity
             embedded space-time codes for fading intersymbol
             interference (ISI) channels can be designed with provable
             rank distance guarantees. As a corollary, we obtain an
             asymptotic characterization of the fixed transmit alphabet
             rate-diversity tradeoff for multiple antenna fading ISI
             channels. The key idea is to construct and analyze
             properties of binary matrices with a particular structure
             (Toeplitz structure) induced by ISI channels. © 2008
             IEEE.},
   Doi = {10.1109/TIT.2008.929960},
   Key = {fds235895}
}

@article{fds236005,
   Author = {Wu, Y and Jia, T and Calderbank, R and Duel-Hallen, A and Hallen,
             H},
   Title = {Enabling code diversity for mobile radio channels using
             long-range fading prediction},
   Journal = {IEEE Transactions on Wireless Communications},
   Volume = {11},
   Number = {12},
   Pages = {4362-4371},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2012},
   Month = {November},
   ISSN = {1536-1276},
   url = {http://dx.doi.org/10.1109/TWC.2012.101712.111884},
   Abstract = {Code diversity integrates space-time coding with beamforming
             by using a small number of feedback bits to select from a
             family of space-time codes. Different codes lead to
             different induced channels at the receiver, where Channel
             State Information (CSI) is used to instruct the transmitter
             how to choose the code. Feedback can be combined with
             sub-optimal low complexity decoding of the component codes
             to match Maximum-Likelihood (ML) decoding performance of any
             individual code in the family. It can also be combined with
             ML decoding of the component codes to improve performance
             beyond ML decoding performance of any individual code. Prior
             analysis of code diversity did not take into account the
             effect of the mobile speed and the delay in the feedback
             channel. This paper demonstrates the practicality of code
             diversity in space-time coded systems by showing that
             performance gains based on instantaneous feedback are
             largely preserved when long-range prediction of time-varying
             correlated fading channels is employed to compensate for the
             effect of the feedback delay. To maintain prediction
             accuracy for realistic SNR, noise reduction that employs
             oversampled pilots is used prior to fading prediction. We
             also propose a robust low pilot rate method that utilizes
             interleaving to improve the spectral efficiency. Simulations
             are presented for two channel models: the conventional Jakes
             model and a realistic physical channel model where the
             parameters associated with the reflectors vary in time and
             the arrival rays have different strengths and asymmetric
             arrival angles. © 2002-2012 IEEE.},
   Doi = {10.1109/TWC.2012.101712.111884},
   Key = {fds236005}
}

@article{fds235911,
   Author = {Li, Y and Li, Z and Chiang, M and Calderbank, AR},
   Title = {Energy-efficient video transmission scheduling for wireless
             peer-to-peer live streaming},
   Journal = {2009 6th IEEE Consumer Communications and Networking
             Conference, CCNC 2009},
   Publisher = {IEEE},
   Year = {2009},
   Month = {April},
   url = {http://dx.doi.org/10.1109/CCNC.2009.4784766},
   Abstract = {The Peer-to-Peer (P2P) streaming has shown as an effective
             solution for wireline video applications, while for the
             wireless video streaming applications, the limited radio
             resource and battery energy are the main constraints on the
             way of P2P applications. An important issue in live video
             streaming quality of service is to avoid playback buffer
             underflow, and a challenge from wireless applications is the
             desire of energy efficiency. The problem we try to solve is
             how to utilize P2P schemes in video streaming and schedule
             the video transmission among peers to minimize the "freeze-
             ups" in playback caused by buffer underflow. In this work,
             we propose energy-efficient algorithm for the video
             transmission scheduling in wireless P2P live streaming
             system, to minimize the playback freeze-ups among peers.
             Further the algorithm is extended to two scenarios: peers'
             reluctance of consuming battery energy and allowing
             overhearing, with alternative energy-efficient algorithms
             proposed for the second scenario. Numerical results show the
             effectiveness of the proposed algorithms. The results also
             demonstrate that peers' selfishness may reduce the energy
             efficiency, but allowing overhearing could increase energy
             efficiency. ©2009 IEEE.},
   Doi = {10.1109/CCNC.2009.4784766},
   Key = {fds235911}
}

@article{fds235933,
   Author = {Aggarwal, V and Calderbank, R and Gilbert, G and Weinstein,
             YS},
   Title = {Engineering fault tolerance for realistic quantum systems
             via the full error dynamics of quantum codes},
   Journal = {IEEE International Symposium on Information Theory -
             Proceedings},
   Pages = {958-962},
   Publisher = {IEEE},
   Year = {2009},
   Month = {November},
   url = {http://dx.doi.org/10.1109/ISIT.2009.5205593},
   Abstract = {The standard approach to quantum fault tolerance is to
             calculate error thresholds on basic gates in the limit of
             arbitrarily many concatenation levels. In contrast this
             paper takes the number of qubits and the target
             implementation accuracy as given, and provides a framework
             for engineering the constrained quantum system to the
             required tolerance. The approach requires solving the full
             dynamics of the quantum system for an arbitrary admixture
             (biased or unbiased) of Pauli errors. The inaccuracy between
             ideal and implemented quantum systems is captured by the
             supremum of the Schatten-k norm of the difference between
             the ideal and implemented density matrices taken over all
             density matrices. This is a more complete analysis than the
             standard approach, where an intricate combination of worst
             case assumptions and combinatorial analysis is used to
             analyze the special case of equiprobable errors. Conditions
             for fault tolerance are now expressed in terms of error
             regions rather than a single number (the standard error
             threshold). In the important special case of a stochastic
             noise model and a single logical qubit, an optimization over
             all 2 × 2 density matrices is required to obtain the full
             dynamics. The complexity of this calculation is greatly
             simplified through reduction to an optimization over only
             three projectors. Error regions are calculated for the
             standard 5- and 7-qubit codes. Knowledge of the full
             dynamics makes it possible to design sophisticated
             concatenation strategies that go beyond repeatedly using the
             same code, and these strategies can achieve target fault
             tolerance thresholds with fewer qubits. © 2009
             IEEE.},
   Doi = {10.1109/ISIT.2009.5205593},
   Key = {fds235933}
}

@article{fds235935,
   Author = {Aggarwal, V and Applebaum, L and Bennatan, A and Calderbank, AR and Howard, SD and Searle, SJ},
   Title = {Enhanced CDMA communications using compressed-sensing
             reconstruction methods},
   Journal = {2009 47th Annual Allerton Conference on Communication,
             Control, and Computing, Allerton 2009},
   Pages = {1211-1215},
   Publisher = {IEEE},
   Year = {2009},
   Month = {December},
   url = {http://dx.doi.org/10.1109/ALLERTON.2009.5394537},
   Abstract = {We propose a simple method for downlink communications based
             on second order Reed-Muller sequences which generalize the
             Walsh sequences that are used in orthogonal CDMA. In our
             approach, coding occurs at the chip level (i.e. we use a
             spreading factor of 1) and different users are not
             orthogonalized. Our decoding algorithm is borrowed from work
             on fast reconstruction of signals for compressed-sensing.
             This algorithm allows for low-complexity multiuser
             detection. ©2009 IEEE.},
   Doi = {10.1109/ALLERTON.2009.5394537},
   Key = {fds235935}
}

@article{fds235951,
   Author = {Aggarwal, V and Sankar, L and Calderbank, AR and Poor,
             HV},
   Title = {Ergodic layered erasure one-sided interference
             channels},
   Journal = {2009 IEEE Information Theory Workshop, ITW
             2009},
   Pages = {574-578},
   Publisher = {IEEE},
   Year = {2009},
   Month = {December},
   url = {http://dx.doi.org/10.1109/ITW.2009.5351176},
   Abstract = {The sum capacity of a class of layered erasure onesided
             interference channels is developed under the assumption of
             no channel state information at the transmitters. Outer
             bounds are presented for this model and are shown to be
             tight for the following sub-classes: i) weak, ii) strong
             (mix of strong but not very strong (SnVS) and very strong
             (VS)), iii) ergodic very strong (mix of strong and weak),
             and (iv) a sub-class of mixed interference (mix of SnVS and
             weak). Each sub-class is uniquely defined by the fading
             statistics. © 2009 IEEE.},
   Doi = {10.1109/ITW.2009.5351176},
   Key = {fds235951}
}

@article{fds235843,
   Author = {Tarokh, V and Naguib, A and Seshadri, N and Calderbank,
             AR},
   Title = {Erratum: Space-time codes for high data rate wireless
             communications: Performance criteria in the presence of
             channel estimation errors, mobility, and multiple paths
             (IEEE Trans. Commun. (1999) 47, (199-207))},
   Journal = {IEEE Transactions on Communications},
   Volume = {51},
   Number = {12},
   Pages = {2141},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2003},
   Month = {January},
   url = {http://dx.doi.org/10.1109/TCOMM.2003.822179},
   Doi = {10.1109/TCOMM.2003.822179},
   Key = {fds235843}
}

@article{fds236076,
   Author = {Jafarpour, S and Pezeshki, A and Calderbank, R},
   Title = {Experiments with compressively sampled images and a new
             debluring-denoising algorithm},
   Journal = {Proceedings - 10th IEEE International Symposium on
             Multimedia, ISM 2008},
   Pages = {66-73},
   Publisher = {IEEE},
   Year = {2008},
   Month = {December},
   url = {http://dx.doi.org/10.1109/ISM.2008.119},
   Abstract = {In this paper we will examine the effect of different
             parameters in the quality of real compressively sampled
             images in the compressed sensing framework. We will select a
             variety of different real images of different types and test
             the quality of the recovered images, the recovery time, and
             required resources when different measurement methods with
             different parameters are used or when different recovering
             methods are applied. Then we will propose an algorithm to
             reduce the noise in the recovered images and sharpen them
             simultaneously. The algorithm exploits a well-known
             bilateral filtering in order to increase the confidence in
             margins and edges, and then uses an adaptive unsharp mask
             method to sharpen the images. The adaptive unsharp mask
             method extends the ordinary unsharp mask method and uses
             machine learning square loss minimization and regression in
             order to learn the optimal unsharping parameters. We will
             argue why both bilateral filtering and unsharp mask methods
             should be used in the algorithm simultaneously. Finally, we
             will show the results of applying the algorithm on real
             images that are recovered using the compressed sensing
             method and we will interpret the experimental results. ©
             2008 IEEE.},
   Doi = {10.1109/ISM.2008.119},
   Key = {fds236076}
}

@article{fds235852,
   Author = {Das, S and Al-Dhahir, N and Calderbank, AR},
   Title = {Exploiting algebraic structure in cross-layer
             design},
   Journal = {2005 International Conference on Wireless Networks,
             Communications and Mobile Computing},
   Volume = {2},
   Pages = {1466-1471},
   Publisher = {IEEE},
   Year = {2005},
   Month = {December},
   url = {http://dx.doi.org/10.1109/WIRLES.2005.1549629},
   Abstract = {We demonstrate the value of algebraic structure to
             cross-layer design of multiple-antenna wireless
             communication systems. At the network layer we will develop
             techniques for multiple access (many to one) and broadcast
             (one to many) communication where algebraic structure
             enables very simple implementation. At the physical layer,
             we emphasize enabling mobility and integrating receive chain
             functionality (for example, channel estimation, joint
             decoding and equalization) at a level of complexity that is
             comparable to single-antenna systems. Algebraic structure
             will make it possible to integrate these different functions
             very efficiently. Another important theme in this work is
             the emphasis on measuring the value of innovation at the
             physical layer in terms of networking throughput or coverage
             area of broadband wireless systems such as WiFi and WiMAX.
             © 2005 IEEE.},
   Doi = {10.1109/WIRLES.2005.1549629},
   Key = {fds235852}
}

@article{fds343648,
   Author = {Mappouras, G and Vahid, A and Calderbank, R and Sorin,
             DJ},
   Title = {Extending flash lifetime in embedded processors by expanding
             analog choice},
   Journal = {IEEE Transactions on Computer-Aided Design of Integrated
             Circuits and Systems},
   Volume = {37},
   Number = {11},
   Pages = {2462-2473},
   Year = {2018},
   Month = {November},
   url = {http://dx.doi.org/10.1109/TCAD.2018.2857059},
   Abstract = {We extend the lifetime of Flash memory in embedded
             processors by exploiting the fact that data from sensors is
             inherently analog. Prior work in the computer architecture
             community has assumed that all data is digital and has
             overlooked the opportunities available when working with
             analog data, such as the data recorded by sensors. In this
             paper, we introduce redundancy into the quantization of
             sensor data in order to provide several alternative
             representations. Notably, we tradeoff distortion-the
             difference between the sensed analog value and the digital
             quantization of that value-to improve lifetime. Our
             simulations show that when combining rate, distortion, and
             lifetime tradeoffs we can extend Flash lifetime at a far
             smaller capacity cost compared to prior work. More
             specifically the simulated system shows that it is possible
             to achieve up to 2.75 × less capacity cost compared to
             redundant Flash memory and 1.29 × less capacity cost
             compared to the state of the art coding schemes.},
   Doi = {10.1109/TCAD.2018.2857059},
   Key = {fds343648}
}

@article{fds331056,
   Author = {Calderbank, AR and Delsarte, P},
   Title = {Extending the t-design concept},
   Journal = {Transactions of the American Mathematical
             Society},
   Volume = {338},
   Number = {2},
   Pages = {941-952},
   Publisher = {American Mathematical Society (AMS)},
   Year = {1993},
   Month = {January},
   url = {http://dx.doi.org/10.1090/S0002-9947-1993-1134756-0},
   Abstract = {Let 픅 be a family of k-subsets of a υ-set V, with 1 ≤ k
             ≤ υ/2. Given only the inner distribution of 픅, i.e.,
             the number of pairs of blocks that meet in j points (with j
             = 0, 1, …, k), we are able to completely describe the
             regularity with which 픅 meets an arbitrary t-subset of V,
             for each order t (with 1 ≤ t ≤ υ/2). This description
             makes use of a linear transform based on a system of dual
             Hahn polynomials with parameters υ, k, t. The main
             regularity parameter is the dimension of a well-defined
             subspace of ℝt+1, called the t-form space of 픅. (This
             subspace coincides with ℝt+1 if and only if 픅 is a
             t-design.) We show that the t-form space has the structure
             of an ideal, and we explain how to compute its canonical
             generator. © 1993 American Mathematical
             Society.},
   Doi = {10.1090/S0002-9947-1993-1134756-0},
   Key = {fds331056}
}

@article{fds235875,
   Author = {Howard, SD and Sirianunpiboon, S and Calderbank,
             AR},
   Title = {Fast decoding of the golden code by diophantine
             approximation},
   Journal = {2007 IEEE Information Theory Workshop, ITW 2007,
             Proceedings},
   Pages = {590-594},
   Publisher = {IEEE},
   Year = {2007},
   Month = {December},
   url = {http://dx.doi.org/10.1109/ITW.2007.4313140},
   Abstract = {The Golden Code is incorporated in the IEEE 802.16 standard
             and is representative of a large class of space-time codes
             where performance is determined by the difficulty of solving
             an associated Diophantine approximation problem. This paper
             develops a new approach to coherent decoding where channel
             state information is used to select from a plurality of
             zero-forcing decoders. The selection is made to maximize
             effective signal to noise ratio and the underlying
             Diophantine geometry guarantees that at least one of the
             available choices is good. The approach is described in
             detail for the important special case of the Golden code but
             it applies to a larger class of space-time codes where it
             provides a means of avoiding the uncertainties and
             implementation complexity associated with sphere decoding.
             In particular it extends battery life at the mobile terminal
             by eliminating the need for a second receive antenna.
             Simulation results for the Golden Code show performance
             within 2 dB of full maximum-likelihood decoding with worst
             case complexity that is quadratic in the size of the QAM
             signal constellation. © 2007 IEEE.},
   Doi = {10.1109/ITW.2007.4313140},
   Key = {fds235875}
}

@article{fds235993,
   Author = {Sirinaunpiboon, S and Calderbank, AR and Howard,
             SD},
   Title = {Fast essentially maximum likelihood decoding of the Golden
             code},
   Journal = {IEEE Transactions on Information Theory},
   Volume = {57},
   Number = {6},
   Pages = {3537-3541},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2011},
   Month = {June},
   ISSN = {0018-9448},
   url = {http://dx.doi.org/10.1109/TIT.2011.2136870},
   Abstract = {The Golden code is a full-rate full-diversity space-time
             code which has been incorporated in the IEEE 802.16 (WiMAX)
             standard. The worst case complexity of a tree-based sphere
             decoder for a square QAM constellation is O(N 3), where n is
             the size of the underlying QAM constellation; the worst case
             will dominate average decoding complexity on any channel
             with a significant line of sight component. In this paper,
             we present a simple algorithm with quadratic complexity for
             decoding the Golden code that can be employed by mobile
             terminals with either one or two receive antennas, that is
             resilient to near singularity of the channel matrix, and
             that gives essentially maximum likelihood (ML) performance.
             Dual use is an advantage, since there will likely be some
             IEEE 802.16 mobile terminals with one receive antenna and
             some with two antennas. The key to the quadratic algorithm
             is a maximization of the likelihood function with respect to
             one of the pair of signal points conditioned on the other.
             This choice is made by comparing the determinants of two
             covariance matrices, and the underlying geometry of the
             Golden code guarantees that one of these choices is good
             with high probability. © 2011 IEEE.},
   Doi = {10.1109/TIT.2011.2136870},
   Key = {fds235993}
}

@article{fds235981,
   Author = {Krishnamurthy, K and Bajwa, WU and Willett, R and Calderbank,
             R},
   Title = {Fast level set estimation from projection
             measurements},
   Journal = {IEEE Workshop on Statistical Signal Processing
             Proceedings},
   Pages = {585-588},
   Publisher = {IEEE},
   Year = {2011},
   Month = {September},
   url = {http://dx.doi.org/10.1109/SSP.2011.5967766},
   Abstract = {Estimation of the level set of a function (i.e., regions
             where the function exceeds some value) is an important
             problem with applications in digital elevation maps, medical
             imaging, and astronomy. In many applications, however, the
             function of interest is acquired through indirect
             measurements, such as tomographic projections,
             coded-aperture measurements, or pseudo-random projections
             associated with compressed sensing. This paper describes a
             new methodology and associated theoretical analysis for
             rapid and accurate estimation of the level set from such
             projection measurements. The proposed method estimates the
             level set from projection measurements without an
             intermediate function reconstruction step, thereby leading
             to significantly faster computation. In addition, the
             coherence of the projection operator and McDiarmid's
             inequality are used to characterize the estimator's
             performance. © 2011 IEEE.},
   Doi = {10.1109/SSP.2011.5967766},
   Key = {fds235981}
}

@article{fds235971,
   Author = {Sirianunpiboon, S and Wu, Y and Calderbank, AR and Howard,
             SD},
   Title = {Fast optimal decoding of multiplexed orthogonal designs by
             conditional optimization},
   Journal = {IEEE Transactions on Information Theory},
   Volume = {56},
   Number = {3},
   Pages = {1106-1113},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2010},
   Month = {March},
   ISSN = {0018-9448},
   url = {http://dx.doi.org/10.1109/TIT.2009.2039069},
   Abstract = {This paper focuses on conditional optimization as a decoding
             primitive for high rate spacetime codes that are obtained by
             multiplexing in the spatial and code domains. The approach
             is a crystallization of the work of Hottinen which applies
             to spacetime codes that are assisted by quasi-orthogonality.
             It is independent of implementation and is more general in
             that it can be applied to spacetime codes such as the Golden
             Code and perfect spacetime block codes, that are not
             assisted by quasi-orthogonality, to derive fast decoders
             with essentially maximum likelihood (ML) performance. The
             conditions under which conditional optimization leads to
             reduced complexity ML decoding are captured in terms of the
             induced channel at the receiver. These conditions are then
             translated back to the transmission domain leading to codes
             that are constructed by multiplexing orthogonal designs. The
             methods are applied to several block spacetime codes
             obtained by multiplexing Alamouti blocks where it leads to
             ML decoding with complexity O(N2) where $N$ is the size of
             the underlying QAM signal constellation. A new code is
             presented that tests commonly accepted design principles and
             for which decoding by conditional optimization is both fast
             and ML. The two design principles for perfect spacetime
             codes are nonvanishing determinant of pairwise differences
             and cubic shaping, and it is cubic shaping that restricts
             the possible multiplexing structures. The new code shows
             that it is possible to give up on cubic shaping without
             compromising code performance or decoding complexity. ©
             2006 IEEE.},
   Doi = {10.1109/TIT.2009.2039069},
   Key = {fds235971}
}

@article{fds236083,
   Author = {Calderbank, R and Jafarpour, S},
   Title = {Finding needles in compressed haystacks},
   Journal = {ICASSP, IEEE International Conference on Acoustics, Speech
             and Signal Processing - Proceedings},
   Pages = {3441-3444},
   Publisher = {IEEE},
   Year = {2012},
   Month = {October},
   ISSN = {1520-6149},
   url = {http://dx.doi.org/10.1109/ICASSP.2012.6288656},
   Abstract = {In this paper, we investigate the problem of compressed
             learning, i.e. learning directly in the compressed domain.
             In particular, we provide tight bounds demonstrating that
             the linear kernel SVMs classifier in the measurement domain,
             with high probability, has true accuracy close to the
             accuracy of the best linear threshold classifier in the data
             domain. Furthermore, we indicate that for a family of
             well-known deterministic compressed sensing matrices,
             compressed learning is provided on the fly. Finally, we
             support our claims with experimental results in the texture
             analysis application. © 2012 IEEE.},
   Doi = {10.1109/ICASSP.2012.6288656},
   Key = {fds236083}
}

@article{fds235948,
   Author = {Wu, Y and Davis, LM and Calderbank, AR},
   Title = {Finite precision analysis for space-time
             decoding},
   Journal = {IEEE Transactions on Signal Processing},
   Volume = {57},
   Number = {12},
   Pages = {4861-4870},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2009},
   Month = {December},
   ISSN = {1053-587X},
   url = {http://dx.doi.org/10.1109/TSP.2009.2026068},
   Abstract = {Low complexity optimal (or nearly optimal) decoders for
             space-time codes have recently been under intensive
             investigation. For example, recent works by Sirianunpiboon
             and others show that the Silver code and the Golden code can
             be decoded optimally (or nearly optimally) with quadratic
             decoding complexity. Fast decodability makes them very
             attractive in practice. In implementing these decoders,
             floating-point to fixed-point conversion (FFC) needs to be
             carefully undertaken to minimize hardware cost while
             retaining decoding performance. The process of quantization
             for fixed-point representations is often ignored by research
             community and lacks investigation, and so FFC is often
             conducted heuristically based on simulations. This paper
             studies the effects of quantization to space-time coded
             systems from an information theoretic perspective. It shows
             the analytical relationship between quantization error and
             decoding performance deterioration. This paper also proposes
             a general finite precision implementation methodology
             including two FFC criteria for space-time coded systems
             within an integer optimization framework. As a particular
             example, this paper examines the finite precision
             implementation of the quadratic optimal decoding algorithm
             of the Silver code. However, our methodology and techniques
             can be applied to general space-time codes. © 2009
             IEEE.},
   Doi = {10.1109/TSP.2009.2026068},
   Key = {fds235948}
}

@article{fds235835,
   Author = {Al-Dhahir, N and Naguib, AF and Calderbank, AR},
   Title = {Finite-length MIMO decision feedback equalization for
             space-time block-coded signals over multipath-fading
             channels},
   Journal = {IEEE Transactions on Vehicular Technology},
   Volume = {50},
   Number = {4},
   Pages = {1176-1182},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2001},
   Month = {July},
   ISSN = {0018-9545},
   url = {http://dx.doi.org/10.1109/25.938592},
   Abstract = {A finite-length optimized-delay multi-input-multi-output
             (MIMO) mean-square-error decision-feedback equalizer for
             space-time block-coded transmissions over
             multipath-fadingchannels is presented. Alamouti's space-time
             block code with two transmit and two receive antennas on a
             typical urban EDGE channel is taken as a case study. We
             propose a combined equalization and decoding scheme under
             the constraint of linear processing complexity (no trellis
             search) at the receiver. Performance comparisons are made
             with the single-transmit/single-receive antenna case and the
             case of MIMO feedforward linear equalization only with no
             decision feedback.},
   Doi = {10.1109/25.938592},
   Key = {fds235835}
}

@article{fds235954,
   Author = {Raginsky, M and Jafarpour, S and Willett, R and Calderbank,
             R},
   Title = {Fishing in poisson streams: Focusing on the whales, ignoring
             the minnows},
   Journal = {2010 44th Annual Conference on Information Sciences and
             Systems, CISS 2010},
   Publisher = {IEEE},
   Year = {2010},
   Month = {June},
   url = {http://dx.doi.org/10.1109/CISS.2010.5464841},
   Abstract = {This paper describes a low-complexity approach for
             reconstructing average packet arrival rates and
             instantaneous packet counts at a router in a communication
             network, where the arrivals of packets in each flow follow a
             Poisson process. Assuming that the rate vector of this
             Poisson process is sparse or approximately sparse, the goal
             is to maintain a compressed summary of the process sample
             paths using a small number of counters, such that at any
             time it is possible to reconstruct both the total number of
             packets in each flow and the underlying rate vector. We show
             that these tasks can be accomplished efficiently and
             accurately using compressed sensing with expander graphs. In
             particular, the compressive counts are a linear
             transformation of the underlying counting process by the
             adjacency matrix of an unbalanced expander. Such a matrix is
             binary and sparse, which allows for efficient incrementing
             when new packets arrive. We describe, analyze, and compare
             two methods that can be used to estimate both the current
             vector of total packet counts and the underlying vector of
             arrival rates. ©2010 IEEE.},
   Doi = {10.1109/CISS.2010.5464841},
   Key = {fds235954}
}

@article{fds236037,
   Author = {Gelblum, EA and Calderbank, AR},
   Title = {Forbidden rate region for generalized cross
             constellations},
   Journal = {IEEE International Symposium on Information Theory -
             Proceedings},
   Pages = {149},
   Year = {1995},
   Month = {January},
   Abstract = {An analysis of the Generalized Cross Constellation (GCC) is
             presented and a new perspective on its coding algorithm is
             described. We show how the GCC can be used to address
             generic sets of symbol points in any multidimensional space
             through an example based on the matched spectral null coding
             used in magnetic recording devices. We also prove that there
             is a forbidden rate region of fractional coding rates that
             are practically unrealizable using the GCC construction. We
             introduce the idea of a constellation tree and show how its
             decomposition can be used to design GCC's matching desired
             parameters. Following this analysis, an algorithm to design
             the optimal rate GCC from a restriction on the maximum size
             of its constellation signal set is given, and a formula for
             determining the size of the GCC achieving a desired coding
             rate is derived. We finish with an upper bound on the size
             of the constellation expansion ratio.},
   Key = {fds236037}
}

@article{fds235798,
   Author = {Calderbank, AR and Sloane, NJA},
   Title = {Four‐Dimensional Modulation With an Eight‐State Trellis
             Code},
   Journal = {AT&T Technical Journal},
   Volume = {64},
   Number = {5},
   Pages = {1005-1018},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {1985},
   Month = {January},
   url = {http://dx.doi.org/10.1002/j.1538-7305.1985.tb00451.x},
   Abstract = {A trellis code is a “sliding window” method for encoding
             a binary data stream {ai}, ai = 0, 1, as a sequence of
             signal points drawn from Rn. The rule for assigning signal
             points depends on the state of the encoder. In this paper n
             = 4, and the signal points are 4‐tuples of odd integers.
             We describe an infinite family of eight‐state trellis
             codes. For k = 3, 4, 5, … we construct a trellis encoder
             with a rate of k bits/four‐dimensional signal. We propose
             that the codes with rates k = 8 and 12 be considered for use
             in modems designed to achieve data rates of 9.6 kb/s and
             14.4 kb/s, respectively. © 1985 AT&T Technical
             Journal},
   Doi = {10.1002/j.1538-7305.1985.tb00451.x},
   Key = {fds235798}
}

@article{fds235984,
   Author = {Mixon, DG and Bajwa, WU and Calderbank, R},
   Title = {Frame coherence and sparse signal processing},
   Journal = {IEEE International Symposium on Information Theory -
             Proceedings},
   Pages = {663-667},
   Publisher = {IEEE},
   Year = {2011},
   Month = {October},
   url = {http://dx.doi.org/10.1109/ISIT.2011.6034214},
   Abstract = {The sparse signal processing literature often uses random
             sensing matrices to obtain performance guarantees.
             Unfortunately, in the real world, sensing matrices do not
             always come from random processes. It is therefore desirable
             to evaluate whether an arbitrary matrix, or frame, is
             suitable for sensing sparse signals. To this end, the
             present paper investigates two parameters that measure the
             coherence of a frame: worst-case and average coherence. We
             first provide several examples of frames that have small
             spectral norm, worst-case coherence, and average coherence.
             Next, we present a new lower bound on worst-case coherence
             and compare it to the Welch bound. Later, we propose an
             algorithm that decreases the average coherence of a frame
             without changing its spectral norm or worst-case coherence.
             Finally, we use worst-case and average coherence, as opposed
             to the Restricted Isometry Property, to garner near-optimal
             probabilistic guarantees on both sparse signal detection and
             reconstruction in the presence of noise. This contrasts with
             recent results that only guarantee noiseless signal recovery
             from arbitrary frames, and which further assume independence
             across the nonzero entries of the signalin a sense,
             requiring small average coherence replaces the need for such
             an assumption. © 2011 IEEE.},
   Doi = {10.1109/ISIT.2011.6034214},
   Key = {fds235984}
}

@article{fds235940,
   Author = {Sirianunpiboon, S and Howard, SD and Calderbank, AR and Davis,
             LM},
   Title = {Fully-polarimetric MIMO to improve throughput and
             reliability across propagation conditions},
   Journal = {IEEE Vehicular Technology Conference},
   Publisher = {IEEE},
   Year = {2009},
   Month = {December},
   ISSN = {1550-2252},
   url = {http://dx.doi.org/10.1109/VETECF.2009.5379016},
   Abstract = {Multiple-Input Multiple-Output (MIMO) functionality has been
             shown to dramatically increase the capacity of wireless
             communication systems when the environment provides rich
             multipath scattering. In a predominantly Line-of-Sight (LOS)
             environment, the loss of diversity reduces the potential
             gain considerably. This can be remedied in part by the use
             of dual-polarized antennas, which increases the rank of the
             wireless channel and introduces diversity, while minimizing
             the antenna's form factor. However the performance of a
             dual-polarized antenna is still degraded by antenna
             rotations that are typical of mobile terminal operation.
             This paper presents a solution which uses a triad antenna at
             the transmitter and a triad at the receiver, to provide a
             8-10 dB gain over the baseline dual-polarized system. A
             triad is composed of three orthogonal dipoles oriented in
             perpendicular directions. A triad antenna can generate an
             arbitrary oscillating dipole moment at the transmitter and
             consequently an arbitrary polarized electric field at the
             receiver, subject only to the constraints imposed by the
             physics of the Electromagnetic (EM) field. We show that, in
             LOS environments, the capacity of the channel is invariant
             under arbitrary rotations of the transmit and/or receive
             antennas about their centres. Simulation results show that
             the performance is stable as the propagation environment
             varies from rich scattering to pure LOS. A full rate 3×3
             Space-Time Block Code (STBC) is proposed for the triad
             system that is designed for low complexity decoding. © 2009
             Crown.},
   Doi = {10.1109/VETECF.2009.5379016},
   Key = {fds235940}
}

@article{fds331064,
   Author = {Calderbank, AR and Fishburn, PC and Spencer, JH},
   Title = {Functions that Never Agree},
   Journal = {European Journal of Combinatorics},
   Volume = {7},
   Number = {3},
   Pages = {207-210},
   Publisher = {Elsevier BV},
   Year = {1986},
   Month = {January},
   url = {http://dx.doi.org/10.1016/S0195-6698(86)80023-3},
   Abstract = {Consider functions f1, . . . , fk defined on an n-element
             set I with the property that if x ∈ I then f1(x), . . . ,
             fk(x) are all distinct. We shall say that the functions f1,
             . . . , fk never agree. Let ρ(f1, . . . , fk) be the size
             of the largest subset I* of I for which f1(I), . . . , fk
             (I*) are all disjoint, and let ρk (n) = min{ρ (f1, . . . ,
             fk)} where the minimum is taken over all functions f1, . . .
             , fk that never agree. We prove that ρk(n) ⩾ n/kk, and
             that in the limit as n → ∞, the ratio ρk (n)/n →
             1/kk. For k = 2 we describe how the function p (f1, f2) can
             be interpreted as a measure of the bipartiteness of a graph.
             When n = 2l2+l we prove that ρ2(n) = (l2+l)/2. © 1986,
             Academic Press Inc. (London) Limited. All rights
             reserved.},
   Doi = {10.1016/S0195-6698(86)80023-3},
   Key = {fds331064}
}

@article{fds235814,
   Author = {Pottie, GJ and Calderbank, AR},
   Title = {Further asymptotic upper bounds on the minimum distance of
             trellis codes},
   Journal = {IEEE Transactions on Information Theory},
   Volume = {39},
   Number = {4},
   Pages = {1428-1434},
   Year = {1993},
   url = {http://dx.doi.org/10.1109/18.243464},
   Abstract = {Asymptotic upper bounds on the minimum distance of trellis
             codes are derived. A universal bound and bounds specific to
             PSK and QAM signal sets are obtained.},
   Doi = {10.1109/18.243464},
   Key = {fds235814}
}

@article{fds235834,
   Author = {Stamoulis, A and Al-Dhahir, N and Calderbank, AR},
   Title = {Further results on interference cancellation and space-time
             block codes},
   Journal = {Conference Record of the Asilomar Conference on Signals,
             Systems and Computers},
   Volume = {1},
   Pages = {257-261},
   Year = {2001},
   Month = {January},
   url = {http://dx.doi.org/10.1109/acssc.2001.986916},
   Abstract = {Space-Time Blocks Codes (STBC) make use of a rich algebraic
             structure to provide diversity gains with small decoding
             complexity. In this work, we show that the rich algebraic
             structure of STBC reduces the hardware and software
             complexity of interference cancellation (IC) techniques.
             Additionally, after the IC stage, transmitted symbols can
             still be recovered with space-time diversity gains. We
             present three illustrative examples of IC in wireless
             networks where co-channel users employ STBC. First, we show
             that any STBC that is based on an orthogonal design allows
             IC of two co-channel users with simple linear processing.
             Second, we show that for the Alamouti STBC, K > 2 users can
             be detected with simple linear processing, while still
             ensuring space-time diversity gains. Third, capitalizing on
             recent work on single-carrier frequency-domain STBC, we
             study how the aforementioned IC schemes can be modified for
             frequency-selective channels.},
   Doi = {10.1109/acssc.2001.986916},
   Key = {fds235834}
}

@article{fds236070,
   Author = {Pezeshki, A and Kutyniok, G and Calderbank, R},
   Title = {Fusion frames and robust dimension reduction},
   Journal = {CISS 2008, The 42nd Annual Conference on Information
             Sciences and Systems},
   Pages = {264-268},
   Publisher = {IEEE},
   Year = {2008},
   Month = {September},
   url = {http://dx.doi.org/10.1109/CISS.2008.4558533},
   Abstract = {We consider the linear minimum meansquared error (LMMSE)
             estimation of a random vector of interest from its fusion
             frame measurements in presence noise and subspace erasures.
             Each fusion frame measurement is a low-dimensional vector
             whose elements are inner products of an orthogonal basis for
             a fusion frame subspace and the random vector of interest.
             We derive bounds on the mean-squared error (MSE) and show
             that the MSE will achieve its lower bound if the fusion
             frame is tight. We prove that tight fusion frames consisting
             of equidimensional subspaces have maximum robustness with
             respect to erasures of one subspace, and that the optimal
             dimension depends on SNR. We also show that tight fusion
             frames consisting of equi-dimensional subspaces with equal
             pairwise chordal distances are most robust with respect to
             two and more subspace erasures, and refer to such fusion
             frames as equi-distance tight fusion frames. Finally, we
             show that the squared chordal distance between the subspaces
             in such fusion frames meets the so-called simplex bound, and
             thereby establish a connection between equidistance tight
             fusion frames and optimal Grassmannian packings. © 2008
             IEEE.},
   Doi = {10.1109/CISS.2008.4558533},
   Key = {fds236070}
}

@article{fds362596,
   Author = {Calderbank, R and Casazza, PG and Heinecke, A and Kutyniok, G and Pezeshki, A},
   Title = {Fusion Frames: Existence and Construction},
   Year = {2009},
   Month = {June},
   Abstract = {Fusion frame theory is an emerging mathematical theory that
             provides a natural framework for performing hierarchical
             data processing. A fusion frame is a frame-like collection
             of subspaces in a Hilbert space, thereby generalizing the
             concept of a frame for signal representation. In this paper,
             we study the existence and construction of fusion frames. We
             first present a complete characterization of a special class
             of fusion frames, called Parseval fusion frames. The value
             of Parseval fusion frames is that the inverse fusion frame
             operator is equal to the identity and therefore signal
             reconstruction can be performed with minimal complexity. We
             then introduce two general methods -- the spatial complement
             and the Naimark complement -- for constructing a new fusion
             frame from a given fusion frame. We then establish existence
             conditions for fusion frames with desired properties. In
             particular, we address the following question: Given $M, N,
             m \in \NN$ and $\{\lambda_j\}_{j=1}^M$, does there exist a
             fusion frame in $\RR^M$ with $N$ subspaces of dimension $m$
             for which $\{\lambda_j\}_{j=1}^M$ are the eigenvalues of the
             associated fusion frame operator? We address this problem by
             providing an algorithm which computes such a fusion frame
             for almost any collection of parameters $M, N, m \in \NN$
             and $\{\lambda_j\}_{j=1}^M$. Moreover, we show how this
             procedure can be applied, if subspaces are to be added to a
             given fusion frame to force it to become
             Parseval.},
   Key = {fds362596}
}

@article{fds235805,
   Author = {Calderbank, AR and Mazo, JE},
   Title = {Generalizing the simple alternate-mark-inversion line code
             provides enhanced immunity to additive noise, as well as
             spectral shaping},
   Journal = {IEEE Communications Magazine},
   Volume = {29},
   Number = {12},
   Pages = {58-67},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {1991},
   Month = {January},
   url = {http://dx.doi.org/10.1109/35.120352},
   Abstract = {Generalizations of the simple alternate-mark-inversion (AMI)
             line code that provide enhanced immunity to additive noise
             as well as spectral shaping are considered. The first
             technique is the extension of balanced disparity methods to
             multilevel signaling alphabets. This is a small step beyond
             simple codes such as AMI that are used to transmit binary
             PCM over twisted-pair cables. An important feature of this
             method (and later methods) is the use of suboptimal
             decoders. The most sophisticated technique used was
             Tomlinson filtering, and here it was possible to require a
             spectral null in the line code spectrum with a certain
             minimum width, and to minimize line code power subject to
             this requirement. This technique is compared to methods
             introduced by B. H. Marcus and P. H. Siegel (1987) in
             magnetic recording that provide spectral nulls at rational
             multiples of the symbol frequency. Theoretical
             possibilities, rather than techniques proven superior for a
             particular application, are addressed.},
   Doi = {10.1109/35.120352},
   Key = {fds235805}
}

@article{fds236019,
   Author = {Calderbank, AR},
   Title = {Geometric invariants for quasi-symmetric
             designs},
   Journal = {Journal of Combinatorial Theory, Series A},
   Volume = {47},
   Number = {1},
   Pages = {101-110},
   Publisher = {Elsevier BV},
   Year = {1988},
   Month = {January},
   ISSN = {0097-3165},
   url = {http://dx.doi.org/10.1016/0097-3165(88)90044-1},
   Abstract = {Let p be an odd prime. We derive new necessary conditions
             for the existence of 2 - (ν, k, λ) designs where the block
             intersection sizes s1, s2, ..., sn satisfy s1 ≡ s2 ≡ ...
             ≡ sn (mod p). The method is to define a nondegenerate
             scalar product on a 2m-dimensional vector space and to
             construct an m-dimensional totally singular subspace. This
             result is a generalization to nonsymmetric designs of the
             Bruck-Ryser-Chowla theorem. © 1988.},
   Doi = {10.1016/0097-3165(88)90044-1},
   Key = {fds236019}
}

@article{fds352482,
   Author = {Nguyen, DM and Calderbank, R and Deligiannis, N},
   Title = {Geometric Matrix Completion With Deep Conditional Random
             Fields.},
   Journal = {IEEE transactions on neural networks and learning
             systems},
   Volume = {31},
   Number = {9},
   Pages = {3579-3593},
   Year = {2020},
   Month = {September},
   url = {http://dx.doi.org/10.1109/tnnls.2019.2945111},
   Abstract = {The problem of completing high-dimensional matrices from a
             limited set of observations arises in many big data
             applications, especially recommender systems. The existing
             matrix completion models generally follow either a memory-
             or a model-based approach, whereas geometric matrix
             completion (GMC) models combine the best from both
             approaches. Existing deep-learning-based geometric models
             yield good performance, but, in order to operate, they
             require a fixed structure graph capturing the relationships
             among the users and items. This graph is typically
             constructed by evaluating a pre-defined similarity metric on
             the available observations or by using side information,
             e.g., user profiles. In contrast, Markov-random-fields-based
             models do not require a fixed structure graph but rely on
             handcrafted features to make predictions. When no side
             information is available and the number of available
             observations becomes very low, existing solutions are pushed
             to their limits. In this article, we propose a GMC approach
             that addresses these challenges. We consider matrix
             completion as a structured prediction problem in a
             conditional random field (CRF), which is characterized by a
             maximum a posteriori (MAP) inference, and we propose a deep
             model that predicts the missing entries by solving the MAP
             inference problem. The proposed model simultaneously learns
             the similarities among matrix entries, computes the CRF
             potentials, and solves the inference problem. Its training
             is performed in an end-to-end manner, with a method to
             supervise the learning of entry similarities. Comprehensive
             experiments demonstrate the superior performance of the
             proposed model compared to various state-of-the-art models
             on popular benchmark data sets and underline its superior
             capacity to deal with highly incomplete matrices.},
   Doi = {10.1109/tnnls.2019.2945111},
   Key = {fds352482}
}

@article{fds235939,
   Author = {Chi, Y and Calderbank, R and Pezeshki, A},
   Title = {Golay complementary waveforms for sparse delay-Doppler radar
             imaging},
   Journal = {CAMSAP 2009 - 2009 3rd IEEE International Workshop on
             Computational Advances in Multi-Sensor Adaptive
             Processing},
   Pages = {177-180},
   Publisher = {IEEE},
   Year = {2009},
   Month = {December},
   url = {http://dx.doi.org/10.1109/CAMSAP.2009.5413308},
   Abstract = {We present a new approach to radar imaging that exploits
             sparsity in the matched filter domain to enable high
             resolution imaging of targets in delay and Doppler. We show
             that the vector of radar cross-ambiguity values at any fixed
             test delay cell has a sparse representation in a Vandermonde
             frame that is obtained by discretizing the Doppler axis. The
             expansion coefficients are given by the auto-correlation
             functions of the transmitted waveforms. We show that the
             orthogonal matching pursuit (OMP) algorithm can then be
             easily used to identify the locations of the radar targets
             in delay and Doppler. Unambiguous imaging in delay is
             enabled by alternating between a Golay pair of phase coded
             waveforms at the transmission to eliminate delay sidelobe
             effects. We then extend our work to multi-channel radar, by
             developing a sparse recovery approach for
             dually-polarimetric radar. We exploit sparsity in a bank of
             matched filters, each of which is matched to an entry of an
             Alamouti matrix of Golay waveforms to recover a co-polar or
             cross-polar polarization scattering component. © 2009
             IEEE.},
   Doi = {10.1109/CAMSAP.2009.5413308},
   Key = {fds235939}
}

@article{fds236048,
   Author = {Calderbank, AR and Shor, PW},
   Title = {Good quantum error-correcting codes exist},
   Journal = {Physical Review A - Atomic, Molecular, and Optical
             Physics},
   Volume = {54},
   Number = {2},
   Pages = {1098-1105},
   Publisher = {American Physical Society (APS)},
   Year = {1996},
   Month = {January},
   url = {http://dx.doi.org/10.1103/PhysRevA.54.1098},
   Abstract = {A quantum error-correcting code is defined to be a unitary
             mapping (encoding) of k qubits (two-state quantum systems)
             into a subspace of the quantum state space of n qubits such
             that if any t of the qubits undergo arbitrary decoherence,
             not necessarily independently, the resulting n qubits can be
             used to faithfully reconstruct the original quantum state of
             the k encoded qubits. Quantum error-correcting codes are
             shown to exist with asymptotic rate k/n=1-2[Formula
             Presented](2t/n) where [Formula Presented](p) is the binary
             entropy function -p[Formula Presented]p-(1-p)[Formula
             Presented](1-p). Upper bounds on this asymptotic rate are
             given. © 1996 The American Physical Society.},
   Doi = {10.1103/PhysRevA.54.1098},
   Key = {fds236048}
}

@article{fds235988,
   Author = {Nokleby, M and Bajwa, WU and Calderbank, R and Aazhang,
             B},
   Title = {Gossiping in groups: Distributed averaging over the wireless
             medium},
   Journal = {2011 49th Annual Allerton Conference on Communication,
             Control, and Computing, Allerton 2011},
   Pages = {1242-1249},
   Publisher = {IEEE},
   Year = {2011},
   Month = {December},
   url = {http://dx.doi.org/10.1109/Allerton.2011.6120310},
   Abstract = {We present an approach to gossip algorithms tailored to the
             practical considerations of wireless communications.
             Traditional gossip algorithms operate via the pairwise
             exchange of estimates, which fails to capture the broadcast
             and superposition nature of the wireless medium. Adapting
             the virtual full-duplex framework of Guo and Zhang, we
             construct a communications scheme in which each node can
             broadcast its estimate to its neighbors while simultaneously
             receiving its neighbors' estimates. This full-duplex scheme
             gives rise to group gossip, a more flexible family of gossip
             algorithms built on multilateral, rather than pairwise,
             exchanges. Our approach obviates the need for
             orthogonalization or medium access; only local information
             and synchronization are necessary. Additionally, group
             gossip has better convergence properties than does
             randomized gossip. Group gossip permits a tighter bound on
             the convergence speed than randomized gossip, and in general
             the upper bound on the convergence time is at most one-third
             that of randomized gossip. © 2011 IEEE.},
   Doi = {10.1109/Allerton.2011.6120310},
   Key = {fds235988}
}

@article{fds235976,
   Author = {Ashikhmin, A and Calderbank, AR},
   Title = {Grassmannian packings from operator ReedMuller
             codes},
   Journal = {IEEE Transactions on Information Theory},
   Volume = {56},
   Number = {11},
   Pages = {5689-5714},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2010},
   Month = {November},
   ISSN = {0018-9448},
   url = {http://dx.doi.org/10.1109/TIT.2010.2070192},
   Abstract = {This paper introduces multidimensional generalizations of
             binary Reed-Muller codes where the codewords are projection
             operators, and the corresponding subspaces are widely
             separated with respect to the chordal distance on
             Grassmannian space. Parameters of these Grassmannian
             packings are derived and a low complexity decoding algorithm
             is developed by modifying standard decoding algorithms for
             binary Reed-Muller codes. The subspaces are associated with
             projection operators determined by Pauli matrices appearing
             in the theory of quantum error correction and this
             connection with quantum stabilizer codes may be of
             independent interest. The Grassmannian packings constructed
             here find application in noncoherent wireless communication
             with multiple antennas, where separation with respect to the
             chordal distance on Grassmannian space guarantees closeness
             to the channel capacity. It is shown that the capacity of
             the noncoherent multiple-inputmultiple-output (MIMO) channel
             at both low and moderate signal-to-noise ratio (SNR) (under
             the constraint that only isotropically distributed unitary
             matrices are used for information transmission) is closely
             approximated by these packings. © 2006 IEEE.},
   Doi = {10.1109/TIT.2010.2070192},
   Key = {fds235976}
}

@article{fds235846,
   Author = {Diggavi, SN and Al-Dhahir, N and Stamoulis, A and Calderbank,
             AR},
   Title = {Great expectations: The value of spatial diversity in
             wireless networks},
   Journal = {Proceedings of the IEEE},
   Volume = {92},
   Number = {2},
   Pages = {219-270},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2004},
   Month = {January},
   url = {http://dx.doi.org/10.1109/JPROC.2003.821914},
   Abstract = {In this paper, the effect of spatial diversity on the
             throughput and reliability of wireless networks is examined.
             Spatial diversity is realized through multiple independently
             fading transmit/receive antenna paths in single-user
             communication and through independently fading links in
             multiuser communication. Adopting spatial diversity as a
             central theme, we start by studying its information-theoretic
             foundations, then we illustrate its benefits across the
             physical (signal transmission/coding and receiver signal
             processing) and networking (resource allocation, routing,
             and applications) layers. Throughout the paper, we discuss
             engineering intuition and tradeoffs, emphasizing the strong
             interactions between the various network functionalities. ©
             2004 IEEE.},
   Doi = {10.1109/JPROC.2003.821914},
   Key = {fds235846}
}

@article{fds235876,
   Author = {Calderbank, R and Kobayashi, H},
   Title = {Greetings},
   Journal = {2006 IEEE Conference on Information Sciences and Systems,
             CISS 2006 - Proceedings},
   Pages = {3},
   Publisher = {IEEE},
   Year = {2006},
   Month = {January},
   url = {http://dx.doi.org/10.1109/CISS.2006.286417},
   Doi = {10.1109/CISS.2006.286417},
   Key = {fds235876}
}

@article{fds236081,
   Author = {Nokleby, M and Bajwa, WU and Calderbank, R and Aazhang,
             B},
   Title = {Hierarchical averaging over wireless sensor
             networks},
   Journal = {ICASSP, IEEE International Conference on Acoustics, Speech
             and Signal Processing - Proceedings},
   Pages = {3121-3124},
   Publisher = {IEEE},
   Year = {2012},
   Month = {October},
   ISSN = {1520-6149},
   url = {http://dx.doi.org/10.1109/ICASSP.2012.6288576},
   Abstract = {We introduce an approach to gossip algorithms that exploits
             three aspects of the wireless medium: superposition,
             broadcast, and power control. Instead of sending pairwise
             messages between neighbors on a fixed network topology, we
             construct gossip algorithms in which nodes can
             simultaneously recover multiple neighbors' messages and in
             which nodes can adjust the set of their neighbors by
             adjusting transmit power. We present two averaging
             algorithms, each based on a hierarchical clustering of the
             network. In the first algorithm, clusters of nodes transmit
             their estimates locally and randomly select a representative
             node for communications at the next level. In the second,
             each cluster mutually averages and then cooperatively
             transmits at the next level. For path-loss environments,
             these schemes achieve order-optimal or near order-optimal
             performance. © 2012 IEEE.},
   Doi = {10.1109/ICASSP.2012.6288576},
   Key = {fds236081}
}

@article{fds235938,
   Author = {Bollapalli, KC and Wu, Y and Gulati, K and Khatri, S and Calderbank,
             AR},
   Title = {Highly parallel decoding of space-time codes on graphics
             processing units},
   Journal = {2009 47th Annual Allerton Conference on Communication,
             Control, and Computing, Allerton 2009},
   Pages = {1262-1269},
   Publisher = {IEEE},
   Year = {2009},
   Month = {December},
   url = {http://dx.doi.org/10.1109/ALLERTON.2009.5394528},
   Abstract = {Graphics Processing Units (GPUs) with a few hundred
             extremely simple processors represent a paradigm shift for
             highly parallel computations. We use this emergent GPU
             architecture to provide a first demonstration of the
             feasibility of real time ML decoding (in software) of a high
             rate space-time block code that is representative of codes
             incorporated in 4th generation wireless standards such as
             WiMAX and LTE. The decoding algorithm is conditional
             optimization which reduces to a parallel calculation that is
             a natural fit to the architecture of low cost
             GPUs.Experimental results demonstrate that asymptotically
             the GPU implementation is more than 700 times faster than a
             standard serial implementation. These results suggest that
             GPU architectures have the potential to improve the cost /
             performance tradeoff of 4th generation wireless base
             stations. Additional benefits might include reducing the
             time required for system development and the time required
             for configuration and testing of wireless base stations.
             ©2009 IEEE.},
   Doi = {10.1109/ALLERTON.2009.5394528},
   Key = {fds235938}
}

@article{fds236080,
   Author = {Carson, WR and Rodrigues, MRD and Chen, M and Carin, L and Calderbank,
             R},
   Title = {How to focus the discriminative power of a
             dictionary},
   Journal = {ICASSP, IEEE International Conference on Acoustics, Speech
             and Signal Processing - Proceedings},
   Pages = {1365-1368},
   Publisher = {IEEE},
   Year = {2012},
   Month = {October},
   ISSN = {1520-6149},
   url = {http://dx.doi.org/10.1109/ICASSP.2012.6288144},
   Abstract = {This paper is motivated by the challenge of high fidelity
             processing of images using a relatively small set of
             projection measurements. This is a problem of great interest
             in many sensing applications, for example where high
             photodetector counts are precluded by a combination of
             available power, form factor and expense. The emerging
             methods of dictionary learning and compressive sensing offer
             great potential for addressing this challenge. Combining
             these methods requires that the signals of interest be
             representable as a sparse combination of elements of some
             dictionary. This paper develops a method that aligns the
             discriminative power of such a dictionary with the physical
             limitations of the imaging system. Alignment is accomplished
             by designing a projection matrix that exposes and then
             aligns the modes of the noise with those of the dictionary.
             The design algorithm is obtained by modifying an algorithm
             for designing the pre-filter to maximize the rate and
             reliability of a Multiple Input Multiple Output (MIMO)
             communications channel. The difference is that in the
             communications problem a source is being matched to a
             channel, whereas in the imaging problem a channel, or
             equivalently the noise covariance, is being matched to a
             source. Our results shown that using the proposed
             communications design framework we can reduce reconstruction
             error between 20%, after only 20 projections of a 28 x 28
             image, and 10% after 100 projections. Furthermore, we
             noticeably see the superior quality of the reconstructed
             images. © 2012 IEEE.},
   Doi = {10.1109/ICASSP.2012.6288144},
   Key = {fds236080}
}

@article{fds235747,
   Author = {Harms, A and Bajwa, WU and Calderbank, R},
   Title = {Identification of Linear Time-Varying Systems Through
             Waveform Diversity},
   Journal = {IEEE Transactions on Signal Processing},
   Volume = {63},
   Number = {8},
   Pages = {2070-2084},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2015},
   Month = {April},
   ISSN = {1053-587X},
   url = {http://dx.doi.org/10.1109/TSP.2015.2407319},
   Abstract = {Linear, time-varying (LTV) systems composed of time shifts,
             frequency shifts, and complex amplitude scalings are
             operators that act on continuous finite-energy waveforms.
             This paper presents a novel, resource-efficient method for
             identifying the parametric description of such systems,
             i.e., the time shifts, frequency shifts, and scalings, from
             the sampled response to linear frequency modulated (LFM)
             waveforms, with emphasis on the application to radar
             processing. If the LTV operator is probed with a
             sufficiently diverse set of LFM waveforms, then the system
             can be identified with high accuracy. In the case of
             noiseless measurements, the identification is perfect, while
             in the case of noisy measurements, the accuracy is inversely
             proportional to the noise level. The use of parametric
             estimation techniques with recently proposed denoising
             algorithms allows the estimation of the parameters with high
             accuracy.},
   Doi = {10.1109/TSP.2015.2407319},
   Key = {fds235747}
}

@article{fds235768,
   Author = {Simpson, MJ and Wilson, JW and Matthews, TE and Duarte, M and Calderbank, R and Warren, WS},
   Title = {Imaging the distribution of melanin in human skin lesions
             with pump-probe microscopy},
   Journal = {Optics InfoBase Conference Papers},
   Year = {2011},
   Month = {December},
   Key = {fds235768}
}

@article{fds235848,
   Author = {Calderbank, AR and Gilbert, A and Levchenko, K and Muthukrishnan, S and Strauss, M},
   Title = {Improved range-summable random variable construction
             algorithms},
   Journal = {Proceedings of the Annual ACM-SIAM Symposium on Discrete
             Algorithms},
   Pages = {840-849},
   Year = {2005},
   Month = {July},
   Abstract = {Range-summable universal hash functions, also known as
             range-summable random variables, are binary-valued hash
             functions which can efficiently hash single values as well
             as ranges of values from the domain. They have found several
             applications in the area of data stream processing where
             they are used to construct sketches - small-space summaries
             of the input sequence. We present two new constructions of
             range-summable universal hash functions on n-bit strings,
             one based on Reed-Muller codes which gives k-universal
             hashing using O(n log k) space arid time for point
             operations and O(n 2 1og k) for range operations, and
             another based on a new subcode of the second-order
             Reed-Muller code, which gives 5-universal hashing using O(n)
             space, O(n log 3 n) time for point operations, and O(n 3)
             time for range operations. We also present a new sketch data
             structure using the new hash functions which improves
             several previous results.},
   Key = {fds235848}
}

@article{fds331060,
   Author = {Calderbank, AR and Frankl, P},
   Title = {Improved Upper Bounds Concerning the Erdős-Ko-Rado
             Theorem},
   Journal = {Combinatorics, Probability and Computing},
   Volume = {1},
   Number = {2},
   Pages = {115-122},
   Publisher = {Cambridge University Press (CUP)},
   Year = {1992},
   Month = {January},
   url = {http://dx.doi.org/10.1017/S0963548300000134},
   Abstract = {A family [formula omitted] of k-element sets of an n-set is
             called t-intersecting if any two of its members overlap in
             at least t-elements. The Erdős-Ko-Rado Theorem gives a best
             possible upper bound for such a family if n ≥ n0(k, t).
             One of the most exciting open cases is when t = 2, n = 2k.
             The present paper gives an essential improvement on the
             upper bound for this case. The proofs use linear algebra and
             yield more general results. © 1992, Cambridge University
             Press. All rights reserved.},
   Doi = {10.1017/S0963548300000134},
   Key = {fds331060}
}

@article{fds235868,
   Author = {Sira, SP and Cochran, D and Papandreou-Suppappola, A and Morrell, D and Moran, B and Howards, S and Calderbank, R},
   Title = {Improving detection in sea clutter using waveform
             scheduling},
   Journal = {ICASSP, IEEE International Conference on Acoustics, Speech
             and Signal Processing - Proceedings},
   Volume = {3},
   Pages = {III1241-III1244},
   Publisher = {IEEE},
   Year = {2007},
   Month = {August},
   ISSN = {1520-6149},
   url = {http://dx.doi.org/10.1109/ICASSP.2007.367068},
   Abstract = {In this paper, we propose a method to exploit waveform
             agility in modern radars to improve performance in the
             challenging task of detecting small targets on the ocean
             surface in heavy clutter. The approach exploits the
             compound-Gaussian model for sea clutter returns to achieve
             clutter suppression by forming an orthogonal projection of
             the received signal into the clutter subspace. Waveform
             scheduling is then performed by incorporating the
             information about the clutter into the design of the next
             transmitted waveform. A simulation study demonstrates the
             effectiveness of our approach. © 2007 IEEE.},
   Doi = {10.1109/ICASSP.2007.367068},
   Key = {fds235868}
}

@article{fds235830,
   Author = {Naguib, AF and Seshádri, N and Calderbank, AR},
   Title = {Increasing data rate over wireless channels: Space time
             coding and signal processing for high data rate wireless
             communications},
   Journal = {IEEE Signal Processing Magazine},
   Volume = {17},
   Number = {3},
   Pages = {76-92},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2000},
   Month = {January},
   ISSN = {1053-5888},
   url = {http://dx.doi.org/10.1109/79.841731},
   Doi = {10.1109/79.841731},
   Key = {fds235830}
}

@article{fds235793,
   Author = {Calderbank, AR and Chung, FRK and Sturtevant, DG},
   Title = {Increasing sequences with nonzero block sums and increasing
             paths in edge-ordered graphs},
   Journal = {Discrete Mathematics},
   Volume = {50},
   Number = {C},
   Pages = {15-28},
   Publisher = {Elsevier BV},
   Year = {1984},
   Month = {January},
   ISSN = {0012-365X},
   url = {http://dx.doi.org/10.1016/0012-365X(84)90031-1},
   Abstract = {Consider the maximum length f(k) of a (lexicographically)
             increasing sequence of vectors in GF(2)k with the property
             that the sum of the vectors in any consecutive subsequence
             is nonzero modulo 2. We prove that 23 48 · 2k ≤ f(k) ≤
             ( 1 2 + o(1))2k. A related problem is the following. Suppose
             the edges of the complete graph Kn are labelled by the
             numbers 1,2,..., (2n). What is the minimum α(n), over all
             edge labellings, of the maximum length of a simple path with
             increasing edge labels? We prove that α(n) ≤ ( 1 2 +
             o(1))n. © 1984.},
   Doi = {10.1016/0012-365X(84)90031-1},
   Key = {fds235793}
}

@article{fds236026,
   Author = {Calderbank, AR and Sloane, NJA},
   Title = {Inequalities for Covering Codes},
   Journal = {IEEE Transactions on Information Theory},
   Volume = {34},
   Number = {5},
   Pages = {1276-1280},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {1988},
   Month = {January},
   url = {http://dx.doi.org/10.1109/18.21257},
   Abstract = {Any code C with covering radius R must satisfy a set of
             linear inequalities that involve the Lloyd polynomial LR(x);
             these generalize the sphere bound. The “syndrome graphs”
             associated with a linear code C help to keep track of low
             weight vectors in the same coset of C (if there are too many
             such vectors C cannot exist). As illustrations it is shown
             that t[17,10] = 3 and t[23,15] = 3, where t[n, k] is the
             smallest covering radius of any [n, k] code. © 1988
             IEEE.},
   Doi = {10.1109/18.21257},
   Key = {fds236026}
}

@article{fds236022,
   Author = {Calderbank, AR},
   Title = {Inequalities for quasi-symmetric designs},
   Journal = {Journal of Combinatorial Theory, Series A},
   Volume = {48},
   Number = {1},
   Pages = {53-64},
   Publisher = {Elsevier BV},
   Year = {1988},
   Month = {January},
   ISSN = {0097-3165},
   url = {http://dx.doi.org/10.1016/0097-3165(88)90074-X},
   Abstract = {A 2-design is said to be quasi-symmetric if there are two
             block intersection sizes. We obtain inequalities satisfied
             by the parameters of a quasi-symmetric design using linear
             programming techniques. The same methods apply to codes with
             covering radius 2 with the property that the number of
             codewords at distance 2 from a given vector ν depends on
             the distance of ν from the code. © 1988.},
   Doi = {10.1016/0097-3165(88)90074-X},
   Key = {fds236022}
}

@article{fds235931,
   Author = {Aggarwal, V and Sankar, L and Calderbank, AR and Poor,
             HV},
   Title = {Information secrecy from multiple eavesdroppers in
             orthogonal relay channels},
   Journal = {IEEE International Symposium on Information Theory -
             Proceedings},
   Pages = {2607-2611},
   Publisher = {IEEE},
   Year = {2009},
   Month = {November},
   url = {http://dx.doi.org/10.1109/ISIT.2009.5205963},
   Abstract = {The secrecy capacity of relay channels with orthogonal
             components is studied in the presence of additional passive
             eavesdropper nodes. The relay and destination receive
             signals from the source on two orthogonal channels such that
             the destination also receives transmissions from the relay
             on its channel. The eavesdropper(s) can overhear either one
             or both of the orthogonal channels. For a single
             eavesdropper node, the secrecy capacity is shown to be
             achieved by a partial decodeand-forward (PDF) scheme when
             the eavesdropper can overhear only one of the two orthogonal
             channels. For the case of two eavesdropper nodes, secrecy
             capacity is shown to be achieved by PDF for a sub-class of
             channels. © 2009 IEEE.},
   Doi = {10.1109/ISIT.2009.5205963},
   Key = {fds235931}
}

@article{fds326748,
   Author = {Wang, L and Chen, M and Rodrigues, M and Wilcox, D and Calderbank, R and Carin, L},
   Title = {Information-Theoretic Compressive Measurement
             Design.},
   Journal = {IEEE transactions on pattern analysis and machine
             intelligence},
   Volume = {39},
   Number = {6},
   Pages = {1150-1164},
   Year = {2017},
   Month = {June},
   url = {http://dx.doi.org/10.1109/tpami.2016.2568189},
   Abstract = {An information-theoretic projection design framework is
             proposed, of interest for feature design and compressive
             measurements. Both Gaussian and Poisson measurement models
             are considered. The gradient of a proposed
             information-theoretic metric (ITM) is derived, and a
             gradient-descent algorithm is applied in design; connections
             are made to the information bottleneck. The fundamental
             solution structure of such design is revealed in the case of
             a Gaussian measurement model and arbitrary input statistics.
             This new theoretical result reveals how ITM parameter
             settings impact the number of needed projection
             measurements, with this verified experimentally. The ITM
             achieves promising results on real data, for both signal
             recovery and classification.},
   Doi = {10.1109/tpami.2016.2568189},
   Key = {fds326748}
}

@article{fds235754,
   Author = {Nokleby, M and Rodrigues, M and Calderbank, R},
   Title = {Information-theoretic criteria for the design of compressive
             subspace classifiers},
   Journal = {ICASSP, IEEE International Conference on Acoustics, Speech
             and Signal Processing - Proceedings},
   Pages = {3067-3071},
   Publisher = {IEEE},
   Year = {2014},
   Month = {January},
   ISSN = {1520-6149},
   url = {http://dx.doi.org/10.1109/ICASSP.2014.6854164},
   Abstract = {Using Shannon theory, we derive fundamental, asymptotic
             limits on the classification of low-dimensional subspaces
             from compressive measurements. We identify a syntactic
             equivalence between the classification of subspaces and the
             communication of codewords over non-coherent,
             multiple-antenna channels, from which we derive sharp bounds
             on the number of classes that can be discriminated with low
             misclassification probability as a function of the signal
             dimensionality and the signal-to-noise ratio. While the
             bounds are asymptotic in the limit of high dimension, they
             provide intuition for classifier design at finite dimension.
             We validate this intuition via an application to face
             recognition. © 2014 IEEE.},
   Doi = {10.1109/ICASSP.2014.6854164},
   Key = {fds235754}
}

@article{fds235770,
   Author = {Nokleby, M and Calderbank, R and Rodrigues, MRD},
   Title = {Information-theoretic limits on the classification of
             Gaussian mixtures: Classification on the Grassmann
             manifold},
   Journal = {2013 IEEE Information Theory Workshop, ITW
             2013},
   Publisher = {IEEE},
   Year = {2013},
   Month = {December},
   url = {http://dx.doi.org/10.1109/ITW.2013.6691253},
   Abstract = {Motivated by applications in high-dimensional signal
             processing, we derive fundamental limits on the performance
             of compressive linear classifiers. By analogy with Shannon
             theory, we define the classification capacity, which
             quantifies the maximum number of classes that can be
             discriminated with low probability of error, and the
             diversity-discrimination tradeoff, which quantifies the
             tradeoff between the number of classes and the probability
             of classification error. For classification of Gaussian
             mixture models, we identify a duality between classification
             and communications over non-coherent multiple-antenna
             channels. This duality allows us to characterize the
             classification capacity and diversity-discrimination
             tradeoff using existing results from multiple-antenna
             communication. We also identify the easiest possible
             classification problems, which correspond to low-dimensional
             subspaces drawn from an appropriate Grassmann manifold. ©
             2013 IEEE.},
   Doi = {10.1109/ITW.2013.6691253},
   Key = {fds235770}
}

@article{fds235864,
   Author = {Calderbank, AR and Howard, SD and Moran, W and Pezeshki, A and Zoltowski, M},
   Title = {Instantaneous radar polarimetry with multiple
             dually-polarized antennas},
   Journal = {Conference Record - Asilomar Conference on Signals, Systems
             and Computers},
   Pages = {757-761},
   Publisher = {IEEE},
   Year = {2006},
   Month = {December},
   ISSN = {1058-6393},
   url = {http://dx.doi.org/10.1109/ACSSC.2006.354850},
   Abstract = {Fully Polarimetric radar systems are capable of
             simultaneously transmitting and receiving in two orthogonal
             polarizations. Instantaneous radar polarimetry exploits both
             polarization modes of a dually-polarized radar transmitter
             and receiver on a pulse by pulse basis, and can improve the
             radar detection performance and suppress range sidelobes. In
             this paper, we extend the use of instantaneous radar
             polarimetry for radar systems with multiple dually-polarized
             transmit and receive antennas. Alamouti signal processing is
             used to coordinate transmission of Golay pairs of phase
             codes waveforms across polarizations and multiple antennas.
             The integration of multi-antenna signal processing with
             instantaneous radar polarimetry can further improve the
             detection performance, at a computational cost comparable to
             single channel matched filtering.},
   Doi = {10.1109/ACSSC.2006.354850},
   Key = {fds235864}
}

@article{fds235934,
   Author = {Wu, Y and Jia, T and Calderbank, R and Duel-Hallen, A and Hallen,
             H},
   Title = {Integration of code diversity and long-range channel
             prediction in wireless communication},
   Journal = {Proceedings of the 2009 International Conference on
             Electromagnetics in Advanced Applications, ICEAA
             '09},
   Pages = {241-244},
   Publisher = {IEEE},
   Year = {2009},
   Month = {December},
   url = {http://dx.doi.org/10.1109/ICEAA.2009.5297458},
   Abstract = {Code diversity integrates space-time coding with beamforming
             by using a small number of feedback bits to select from a
             family of space-time codes. Different codes lead to
             different induced channels at the receiver, where Channel
             State information (CSI) is used to instruct the transmitter
             how to choose the code. Feedback can be combined with
             sub-optimal low complexity decoding of the component codes
             to match Maximum-Likelihood (ML) decoding performance of any
             individual code in the family. It can also be combined with
             ML decoding of the component codes to improve performance
             beyond ML decoding performance of any individual code. Prior
             analysis of code diversity did not take into account the
             effect of the mobile speed and the delay in the feedback
             channel. This paper demonstrates the practicality of code
             diversity in space-time coded systems by showing that
             predicted performance gains based on instantaneous feedback
             are largely preserved when the feedback is based on
             long-range prediction of rapidly time-varying correlated
             fading channels. Simulations are presented for two channel
             models; the first is the Jakes model where angles of arrival
             are uniformly distributed and the arrival rays have equal
             strengths, and the second is a model derived from a physical
             scattering environment where the parameters associated with
             the reflectors vary in time and the arrival rays have
             different strengths and non-symmetric arrival angles. ©
             2009 IEEE.},
   Doi = {10.1109/ICEAA.2009.5297458},
   Key = {fds235934}
}

@article{fds235952,
   Author = {Li, Y and Li, Z and Chiang, M and Calderbank, AR},
   Title = {Intelligent video network engineering with distributed
             optimization: Two case studies},
   Journal = {Studies in Computational Intelligence},
   Volume = {280},
   Pages = {253-290},
   Publisher = {Springer Berlin Heidelberg},
   Year = {2010},
   Month = {March},
   ISSN = {1860-949X},
   url = {http://dx.doi.org/10.1007/978-3-642-11686-5_8},
   Abstract = {Video is becoming the dominant traffic over the Internet. To
             provide better Quality of Service (QoS) to the end users,
             while also achieve network resource efficiency, is an
             important problem for both network operators, content
             providers and consumers. In this work, we present
             intelligent video networking solutions for IPTV and
             Peer-to-Peer (P2P) systems that optimizes the users' QoS
             experiences while under network resource constraints. Given
             the limited network bandwidth resources, how to provide
             Internet users with good video playback Quality of Service
             (QoS) is a key problem. For IPTV systems video clips
             competing bandwidth, we propose an approach of Content-Aware
             distortion-Fair (CAF) video delivery scheme, which is aware
             of the characteristics of video frames and ensures max-min
             distortion fair sharing among video flows. Different from
             bandwidth fair sharing, CAF targets end-to-end video
             playback quality fairness among users when bandwidth is
             insufficient, based on the fact that users directly care
             about video quality rather than bandwidth. The proposed CAF
             approach does not require rate-distortion modeling of the
             source, which is difficult to estimate, but instead, it
             exploits the temporal prediction structure of the video
             sequences along with a frame drop distortion metric to guide
             resource allocations and coordination. Experimental results
             show that the proposed approach operates with limited
             overhead in computation and communication, and yields better
             QoS, especially when the network is congested. For Internet
             based video broadcasting applications such as IPTV, the
             Peer-to-Peer (P2P) streaming scheme has been found to be an
             effective solution. An important issue in live broadcasting
             is to avoid playback buffer underflow. How to utilize the
             playback buffer and upload bandwidth of peers to minimize
             the freeze-ups in playback, is the problem we try to solve.
             We propose a successive water-filling (SWaF) algorithm for
             the video transmission scheduling in P2P live streaming
             system, to minimize the playback freeze-ups among peers.
             SWaF algorithm only needs each peer to optimally transmit
             (within its uploading bandwidth) part of its available video
             segments in the buffer to other peers requiring the content
             and pass small amount message to some other peers. Moreover,
             SWaF has low complexity and provable optimality. Numerical
             results demonstrated the effectiveness of the proposed
             algorithm. © 2010 Springer-Verlag Berlin
             Heidelberg.},
   Doi = {10.1007/978-3-642-11686-5_8},
   Key = {fds235952}
}

@article{fds236068,
   Author = {Calderbank, AR and Duel-Hallen, A and Fishburn, PC and Rabinovich,
             A},
   Title = {Interpolation by convolutional codes, overload distortion,
             and the erasure channel},
   Journal = {IEEE Transactions on Information Theory},
   Volume = {45},
   Number = {1},
   Pages = {94-105},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {1999},
   Month = {December},
   ISSN = {0018-9448},
   url = {http://dx.doi.org/10.1109/18.746774},
   Abstract = {This paper investigates how closely randomly generated
             binary source sequences can be matched by convolutional code
             codewords. What distinguishes it from prior work is that a
             randomly chosen subsequence with density A is to be matched
             as closely as possible. The so-called marked bits of the
             subsequence could indicate overload quantization points for
             a source sample generated from the tails of a probability
             distribution. They might also indicate bits where the
             initial estimate is considered reliable, as might happen in
             iterated decoding. The capacity of a convolutional code to
             interpolate the marked subsequence might be viewed as a
             measure of its ability to handle overload distortion. We
             analyze this capacity using a Markov chain whose states are
             sets of subsets of trellis vertices of the convolutional
             code. We investigate the effect of memory on the probability
             of perfect interpolation and calculate the residual rate on
             the unmarked bits of the binary source sequence. We relate
             our interpolation methodology to sequence-based methods of
             quantization and use it to analyze the performance of
             convolutional codes on the pure erasure channel. © 1999
             IEEE.},
   Doi = {10.1109/18.746774},
   Key = {fds236068}
}

@article{fds235741,
   Author = {Calderbank, AR and Naguib, AF},
   Title = {Introduction to space-time codes},
   Volume = {9780521851053},
   Pages = {133-153},
   Publisher = {Cambridge University Press},
   Year = {2006},
   Month = {January},
   url = {http://dx.doi.org/10.1017/CBO9780511616815.008},
   Abstract = {Introduction Information-theoretic analysis by Foschini
             (1996) and by Telatar (1999) shows that multiple antennas at
             the transmitter and receiver enable very high rate wireless
             communication. Space-time codes, introduced by Tarokh.
             (1998), improve the reliability of communication over fading
             channels by correlating signals across different transmit
             antennas. Design criteria developed for the high-SNR regime
             in Tarokh. (1998) and Guey. (1999) are presented in Section
             7.3 from the perspective of typical error events (following
             the exposition by Tse and Viswanath (2005)). Techniques for
             multiple access and broadcast communication are described
             very briefly in Sections 7.9 and 7.10, where algebraic
             structure enables simple implementation. The emphasis
             throughout is on low cost, low complexity mobile receivers.
             Section 7.2 provides a description of set partitioning,
             which was developed by Ungerboeck (1982) as the basis of
             code design for the additive white Gaussian noise (AWGN)
             channel. The importance of set partitioning to code design
             for the AWGN channel is that it provides a lower bound on
             squared Euclidean distance between signals that depends only
             on the binary sum of signal labels. Section 7.9 describes
             the importance of set partitioning to code design for
             wireless channels, where it provides a mechanism for
             translating constraints in the binary domain into lower
             bounds on diversity protection in the complex domain.
             Section 7.4 describes space-time trellis codes, starting
             from simple delay diversity, and then using intuition about
             the product distance to realize additional coding
             gain.},
   Doi = {10.1017/CBO9780511616815.008},
   Key = {fds235741}
}

@article{fds235949,
   Author = {Matz, G and Calderbank, R and Mecklenbrauker, C and Naguib, A and Viterbo, E},
   Title = {Introduction to the issue on managing complexity in
             multiuser MIMO systems},
   Journal = {IEEE Journal on Selected Topics in Signal
             Processing},
   Volume = {3},
   Number = {6},
   Pages = {906-909},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2009},
   Month = {December},
   ISSN = {1932-4553},
   url = {http://dx.doi.org/10.1109/JSTSP.2009.2036955},
   Doi = {10.1109/JSTSP.2009.2036955},
   Key = {fds235949}
}

@article{fds235866,
   Author = {Lee, JW and Chiang, M and Calderbank, AR},
   Title = {Jointly optimal congestion and contention control based on
             network utility maximization},
   Journal = {IEEE Communications Letters},
   Volume = {10},
   Number = {3},
   Pages = {216-218},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2006},
   Month = {March},
   ISSN = {1089-7798},
   url = {http://dx.doi.org/10.1109/LCOMM.2006.1603389},
   Abstract = {We study joint end-to-end congestion control and per-link
             medium access control (MAC) in ad-hoc networks. We use a
             network utility maximization formulation, in which by
             adjusting the types of utility functions, we can accommodate
             multi-class services as well as exploit the tradeoff between
             efficiency and fairness of resource allocation. Despite the
             inherent difficulties of non-convexity and non-separability
             of the optimization problem, we show that, with
             readily-verifiable sufficient conditions, we can develop a
             distributed algorithm that converges to the globally and
             jointly optimal rate allocation and persistence
             probabilities. © 2006 IEEE.},
   Doi = {10.1109/LCOMM.2006.1603389},
   Key = {fds235866}
}

@article{fds235856,
   Author = {Lee, JW and Chiang, M and Calderbank, AR},
   Title = {Jointly optimal congestion and medium access control in ad
             hoc wireless networks},
   Journal = {IEEE Vehicular Technology Conference},
   Volume = {1},
   Pages = {284-288},
   Publisher = {IEEE},
   Year = {2006},
   Month = {December},
   ISSN = {1550-2252},
   url = {http://dx.doi.org/10.1109/VETECS.2004.1387959},
   Abstract = {We study joint end-to-end congestion control and per-link
             medium access control (MAC) in ad-hoc wireless networks. We
             use a network utility maximization formulation, in which by
             adjusting the types of utility functions, we can accommodate
             multi-class services as well as exploit the tradeoff between
             efficiency and fairness of resource allocation. Despite the
             inherent difficulties of non-convexity and non-separability
             of the optimization problem, we show that, under
             readily-verifiable sufficient conditions, we can develop a
             distributed algorithm that converges to the globally and
             jointly optimal rate allocation and persistence
             probabilities. A key contribution is that our results can
             accommodate general concave utility function rather than
             just the logarithmic utility function in existing results.
             © 2006 IEEE.},
   Doi = {10.1109/VETECS.2004.1387959},
   Key = {fds235856}
}

@article{fds235775,
   Author = {Chi, Y and Calderbank, R},
   Title = {Knowledge-enhanced matching pursuit},
   Journal = {ICASSP, IEEE International Conference on Acoustics, Speech
             and Signal Processing - Proceedings},
   Pages = {6576-6580},
   Publisher = {IEEE},
   Year = {2013},
   Month = {October},
   ISSN = {1520-6149},
   url = {http://dx.doi.org/10.1109/ICASSP.2013.6638933},
   Abstract = {Compressive Sensing is possible when the sensing matrix acts
             as a near isometry on signals of interest that can be
             sparsely or compressively represented. The attraction of
             greedy algorithms such as Orthogonal Matching Pursuit is
             their simplicity. However they fail to take advantage of
             both the structure of the sensing matrix and any prior
             information about the sparse signal. This paper introduces
             an oblique projector to matching pursuit algorithms to
             enhance detection of a component that is present in the
             signal by reducing interference from other candidate
             components based on prior information about the signal as
             well as the structure of the sensing matrix. Numerical
             examples demonstrate that performance as a function of SNR
             is superior to conventional matching pursuit. © 2013
             IEEE.},
   Doi = {10.1109/ICASSP.2013.6638933},
   Key = {fds235775}
}

@article{fds235817,
   Author = {Vijay Kumar and PV and Helleseth, T and Calderbank, AR and Roger
             Hammons, AR},
   Title = {Large families of quaternary sequences with low
             correlation},
   Journal = {IEEE International Symposium on Information Theory -
             Proceedings},
   Pages = {71},
   Publisher = {IEEE},
   Year = {1994},
   Month = {January},
   url = {http://dx.doi.org/10.1109/ISIT.1994.394899},
   Abstract = {A nested chain of quaternary-sequence families having low
             correlation values is presented. The families are suitable
             for application in CDMA systems employing QPSK modulation.
             © 1994 IEEE.},
   Doi = {10.1109/ISIT.1994.394899},
   Key = {fds235817}
}

@article{fds235762,
   Author = {Vijay Kumar and P and Helleseth, T and Calderbank, AR and Hammons,
             AR},
   Title = {Large families of quaternary sequences with low
             correlation},
   Journal = {IEEE International Symposium on Information Theory -
             Proceedings},
   Pages = {71-},
   Year = {1994},
   Month = {December},
   ISSN = {2157-8095},
   url = {http://dx.doi.org/10.1109/ISIT.1994.394899},
   Abstract = {A nested chain of quaternary-sequence families having low
             correlation values is presented. The families are suitable
             for application in CDMA systems employing QPSK
             modulation.},
   Doi = {10.1109/ISIT.1994.394899},
   Key = {fds235762}
}

@article{fds236051,
   Author = {Vijay Kumar and P and Helleseth, T and Calderbank, AR and Roger Hammons,
             A},
   Title = {Large families of quaternary sequences with low
             correlation},
   Journal = {IEEE Transactions on Information Theory},
   Volume = {42},
   Number = {2},
   Pages = {579-592},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {1996},
   Month = {December},
   ISSN = {0018-9448},
   url = {http://dx.doi.org/10.1109/18.485726},
   Abstract = {A family of quaternary (Z4-alphabet) sequences of length L =
             T -1, size M > L2 +3X+2, and maximum nontrivial correlation
             parameter Cmax < 2√L + 1 + 1 is presented. The sequence
             family always contains the four-phase family A. When r is
             odd, it includes the family of binary Gold sequences. The
             sequence family is easily generated using two shift
             registers, one binary, the other quaternary. The
             distribution of correlation values is provided. The
             construction can be extended to produce a chain of sequence
             families, with each family in the chain containing the
             preceding family. This gives the design flexibility with
             respect to the number of intermittent users that can be
             supported, in a code-division multiple-access cellular radio
             system. When r is odd, the sequence families in the chain
             correspond to shortened Z4 -linear versions of the
             Delsarte-Goethals codes. Index Terms-. © 1996
             IEEE.},
   Doi = {10.1109/18.485726},
   Key = {fds236051}
}

@article{fds235995,
   Author = {Thejaswi, PSC and Bennatan, A and Zhang, J and Calderbank, AR and Cochran, D},
   Title = {Layered coding for interference channels with partial
             transmitter side information},
   Journal = {IEEE Transactions on Information Theory},
   Volume = {57},
   Number = {5},
   Pages = {2765-2780},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2011},
   Month = {May},
   ISSN = {0018-9448},
   url = {http://dx.doi.org/10.1109/TIT.2011.2119670},
   Abstract = {A two-user interference channel is considered where each
             transmitter has access to a part of the information intended
             to the other destination. A primary objective is to maximize
             the information rates, by exploring the cooperation between
             the transmitters for interference mitigation, based on the
             partial side information. It is clear that full cooperation
             between the transmitters is not possible since each
             transmitter has only a part of the side information. With
             this insight, several "layered coding" schemes, consisting
             of binning and superposition at different stages, are
             developed. These schemes are are carefully built on coding
             strategies for the classical interference channel and node
             cooperation mechanisms. In particular, two layered coding
             schemes, which are based on a combination of MIMO broadcast
             coding and the HanKobayashi (HK) coding, are thoroughly
             studied : The first one, namely layered coding with binning,
             makes heavy use of the GelfandPinsker binning and the HK
             coding and the second one, namely layered superposition
             coding, involves superposition coding over different tiers.
             Rate regions corresponding to the proposed schemes are
             derived. Then the application of these coding schemes are
             illustrated for the Gaussian case and numerical results
             corroborate that the proposed layered coding schemes yield
             substantial gains at high SNR. © 2011 IEEE.},
   Doi = {10.1109/TIT.2011.2119670},
   Key = {fds235995}
}

@article{fds235883,
   Author = {Chiang, M and Low, SH and Calderbank, AR and Doyle,
             JC},
   Title = {Layering as optimization decomposition: A mathematical
             theory of network architectures},
   Journal = {Proceedings of the IEEE},
   Volume = {95},
   Number = {1},
   Pages = {255-312},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2007},
   Month = {January},
   ISSN = {0018-9219},
   url = {http://dx.doi.org/10.1109/JPROC.2006.887322},
   Abstract = {Network protocols in layered architectures have historically
             been obtained on an ad hoc basis, and many of the recent
             cross-layer designs are also conducted through piecemeal
             approaches. Network protocol stacks may instead be
             holistically analyzed and systematically designed as
             distributed solutions to some global optimization problems.
             This paper presents a survey of the recent efforts towards a
             systematic understanding of layering as optimization
             decomposition, where the overall communication network is
             modeled by a generalized network utility maximization
             problem, each layer corresponds to a decomposed subproblem,
             and the interfaces among layers are quantified as functions
             of the optimization variables coordinating the subproblems.
             There can be many alternative decompositions, leading to a
             choice of different layering architectures. This paper
             surveys the current status of horizontal decomposition into
             distributed computation, and vertical decomposition into
             functional modules such as congestion control, routing,
             scheduling, random access, power control, and channel
             coding. Key messages and methods arising from many recent
             works are summarized, and open issues discussed. Through
             case studies, it is illustrated how layering as Optimization
             Decomposition provides a common language to think about
             modularization in the face of complex, networked
             interactions, a unifying, top-down approach to design
             protocol stacks, and a mathematical theory of network
             architectures © 2006 IEEE.},
   Doi = {10.1109/JPROC.2006.887322},
   Key = {fds235883}
}

@article{fds235873,
   Author = {Chiang, M and Low, SH and Calderbank, AR and Doyle,
             JC},
   Title = {Layering as optimization decomposition: Current status and
             open issues},
   Journal = {2006 IEEE Conference on Information Sciences and Systems,
             CISS 2006 - Proceedings},
   Pages = {355-362},
   Publisher = {IEEE},
   Year = {2006},
   Month = {January},
   url = {http://dx.doi.org/10.1109/CISS.2006.286492},
   Abstract = {Network protocols in layered architectures have historically
             been obtained on an ad-hoc basis, and much of the recent
             cross-layer designs are conducted through piecemeal
             approaches. Network protocols may instead be holistically
             analyzed and systematically designed as distributed
             solutions to some global optimization problems in the form
             of generalized Network Utility Maximization (NUM), providing
             insight on what they optimize and structures of the network
             protocol stack. This paper presents a short survey of the
             recent efforts towards a systematic understanding of
             "layering" as "optimization decomposition", where the
             overall communication network is modeled by a generalized
             NUM problem, each layer corresponds to a decomposed
             subproblem, and the interfaces among layers are quantified
             as functions of the optimization variables coordinating the
             subproblems. Furthermore, there are many alternative
             decompositions, each leading to a different layering
             architecture. Industry adoption of this unifying framework
             has also started. Here we summarize the current status of
             horizontal decomposition into distributed computation and
             vertical decomposition into functional modules such as
             congestion control, routing, scheduling, random access,
             power control, and coding. Key messages and methodologies
             arising out of many recent work are listed. Then we present
             a list of challenging open issues in this area and the
             initial progress made on some of them. © 2006
             IEEE.},
   Doi = {10.1109/CISS.2006.286492},
   Key = {fds235873}
}

@article{fds235858,
   Author = {Chiang, M and Low, SH and Calderbank, AR and Doyle,
             JC},
   Title = {Layering as optimization decomposition: Framework and
             examples},
   Journal = {2006 IEEE Information Theory Workshop, ITW
             2006},
   Pages = {52-56},
   Year = {2006},
   Month = {November},
   Abstract = {Network protocols in layered architectures have historically
             been obtained primarily on an ad-hoc basis. Recent research
             has shown that network protocols may instead be holistically
             analyzed and systematically designed as distributed
             solutions to some global optimization problems in the form
             of Network Utility Maximization (NUM), providing insight
             into what they optimize and structures of the network
             protocol stack. This paper presents a short survey of the
             recent efforts towards a systematic understanding of
             'layering' as 'optimization decomposition', where the
             overall communication network is modeled by a generalized
             NUM problem, each layer corresponds to a decomposed
             subproblem, and the interfaces among layers are quantified
             as functions of the optimization variables coordinating the
             subproblems. Different decompositions lead to alternative
             layering architectures. We summarize several examples of
             horizontal decomposition into distributed computation and
             vertical decomposition into functional modules such as
             congestion control, routing, scheduling, random access,
             power control, and coding. © 2006 IEEE.},
   Key = {fds235858}
}

@article{fds235871,
   Author = {Chiang, M and Low, SH and Calderbank, AR and Doyle,
             JC},
   Title = {Layering as optimization decomposition: Questions and
             answers},
   Journal = {Proceedings - IEEE Military Communications Conference
             MILCOM},
   Publisher = {IEEE},
   Year = {2006},
   Month = {January},
   url = {http://dx.doi.org/10.1109/MILCOM.2006.302293},
   Abstract = {Network protocols in layered architectures have historically
             been obtained on an ad-hoc basis, and much of the recent
             cross-layer designs are conducted through piecemeal
             approaches. Network protocols may instead be holistically
             analyzed and systematically designed as distributed
             solutions to some global optimization problems in the form
             of generalized Network Utility Maximization (NUM), providing
             insight on what they optimize and on the structures of
             network protocol stacks. In the form of 10 Questions and
             Answers, this paper presents a short survey of the recent
             efforts towards a systematic understanding of "layering" as
             "optimization decomposition". The overall communication
             network is modeled by a generalized NUM problem, each layer
             corresponds to a decomposed subproblem, and the interfaces
             among layers are quantified as functions of the optimization
             variables coordinating the subproblems. Furthermore, there
             are many alternative decompositions, each leading to a
             different layering architecture. Industry adoption of this
             unifying framework has also started. Here we summarize the
             current status of horizontal decomposition into distributed
             computation and vertical decomposition into functional
             modules such as congestion control, routing, scheduling,
             random access, power control, and coding. We also discuss
             under-explored future research directions in this area. More
             importantly than proposing any particular crosslayer design,
             this framework is working towards a mathematical foundation
             of network architectures and the design process of
             modularization.},
   Doi = {10.1109/MILCOM.2006.302293},
   Key = {fds235871}
}

@article{fds343643,
   Author = {Zhu, W and Qiu, Q and Huang, J and Calderbank, R and Sapiro, G and Daubechies, I},
   Title = {LDMNet: Low Dimensional Manifold Regularized Neural
             Networks},
   Journal = {Proceedings of the IEEE Computer Society Conference on
             Computer Vision and Pattern Recognition},
   Pages = {2743-2751},
   Year = {2018},
   Month = {December},
   url = {http://dx.doi.org/10.1109/CVPR.2018.00290},
   Abstract = {Deep neural networks have proved very successful on
             archetypal tasks for which large training sets are
             available, but when the training data are scarce, their
             performance suffers from overfitting. Many existing methods
             of reducing overfitting are data-independent. Data-dependent
             regularizations are mostly motivated by the observation that
             data of interest lie close to a manifold, which is typically
             hard to parametrize explicitly. These methods usually only
             focus on the geometry of the input data, and do not
             necessarily encourage the networks to produce geometrically
             meaningful features. To resolve this, we propose the
             Low-Dimensional-Manifold-regularized neural Network
             (LDMNet), which incorporates a feature regularization method
             that focuses on the geometry of both the input data and the
             output features. In LDMNet, we regularize the network by
             encouraging the combination of the input data and the output
             features to sample a collection of low dimensional
             manifolds, which are searched efficiently without explicit
             parametrization. To achieve this, we directly use the
             manifold dimension as a regularization term in a variational
             functional. The resulting Euler-Lagrange equation is a
             Laplace-Beltrami equation over a point cloud, which is
             solved by the point integral method without increasing the
             computational complexity. In the experiments, we show that
             LDMNet significantly outperforms widely-used regularizers.
             Moreover, LDMNet can extract common features of an object
             imaged via different modalities, which is very useful in
             real-world applications such as cross-spectral face
             recognition.},
   Doi = {10.1109/CVPR.2018.00290},
   Key = {fds343643}
}

@article{fds235918,
   Author = {Islam, KMZ and Rabiei, P and Al-Dhahir, N and Diggavi, SN and Calderbank, AR},
   Title = {Linear diversity-embedding STBC: Design issues and
             applications},
   Journal = {IEEE Transactions on Communications},
   Volume = {57},
   Number = {6},
   Pages = {1578-1583},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2009},
   Month = {July},
   ISSN = {0090-6778},
   url = {http://dx.doi.org/10.1109/TCOMM.2009.06.080008},
   Abstract = {We design a novel class of space-time codes, called linear
             diversity-embedding space-time block codes (LDE-STBC) where
             a high-rate STBC is linearly superimposed on a highdiversity
             STBC without requiring channel knowledge at the transmitter.
             In applying this scheme to multimedia wireless
             communications, each traffic type constitutes a transmission
             layer that operates at a suitable rate-diversity tradeoff
             point according to its quality-of-service requirements.
             This, in turn, provides an unequal-error-protection (UEP)
             capability to the different information traffic types and
             allows a form of wireless communications where the high-rate
             STBC opportunistically takes advantage of good channel
             realizations while the embedded high-diversity STBC ensures
             that at least part of the information is decoded reliably.
             We investigate transceiver design issues specific to
             LDE-STBC including reduced-complexity coherent decoding and
             effective schemes to vary the coding gain to further enhance
             UEP capabilities of the code. Furthermore, we investigate
             the application of LDE-STBC to wireless multicasting and
             demonstrate its performance advantage over conventional
             equal-error-protection STBC. © 2009 IEEE.},
   Doi = {10.1109/TCOMM.2009.06.080008},
   Key = {fds235918}
}

@article{fds236014,
   Author = {Calderbank, AR and Sloane, NJA},
   Title = {Linear inequalities for covering codes},
   Volume = {25 n 13},
   Pages = {33},
   Year = {1988},
   Month = {December},
   Abstract = {Summary form only given, as follows. Any code C with
             covering radius R must satisfy a set of linear inequalities
             that involve the Lloyd polynomial LR(x); these generalize
             the sphere bound. The syndrome graphs associated with a
             linear code C help to keep track of low weight vectors in
             the same coset of C (if there are too many such vectors C
             cannot exist). As illustrations it is shown that t[17, 10] =
             3 and t[23, 15] = 3, where t[n, k] is the smallest covering
             radius of any [n, k] code.},
   Key = {fds236014}
}

@article{fds235828,
   Author = {Calderbank, AR and Daubechies, I and Sweldens, W and Yeo,
             BL},
   Title = {Lossless image compression using integer to integer wavelet
             transforms},
   Journal = {IEEE International Conference on Image Processing},
   Volume = {1},
   Pages = {596-599},
   Publisher = {IEEE Comput. Soc},
   Year = {1997},
   Month = {December},
   url = {http://dx.doi.org/10.1109/icip.1997.647983},
   Abstract = {Invertible wavelet transforms that map integers to integers
             are important for lossless representations. In this paper,
             we present an approach to build integer to integer wavelet
             transforms based upon the idea of factoring wavelet
             transforms into lifting steps. This allows the construction
             of an integer version of every wavelet transform. We
             demonstrate the use of these transforms in lossless image
             compression.},
   Doi = {10.1109/icip.1997.647983},
   Key = {fds235828}
}

@article{fds235925,
   Author = {Howard, SD and Sirianunpiboon, S and Calderbank,
             AR},
   Title = {Low complexity essentially maximum likelihood decoding of
             perfect space-time block codes},
   Journal = {ICASSP, IEEE International Conference on Acoustics, Speech
             and Signal Processing - Proceedings},
   Pages = {2725-2728},
   Publisher = {IEEE},
   Year = {2009},
   Month = {September},
   ISSN = {1520-6149},
   url = {http://dx.doi.org/10.1109/ICASSP.2009.4960186},
   Abstract = {Perfect space-time block codes (STBCs) were first introduced
             by Oggier et al. to have full rate, full diversity and
             non-vanishing determinant. A maximum likelihood decoder
             based on the sphere decoder has been used for efficient
             decoding of perfect STBCs. However the worst-case complexity
             for the sphere decoder is an exhaustive search. In this
             paper we present a reduced complexity algorithm for 3 x 3
             perfect STBC which gives essentially maximum likelihood (ML)
             performance and which can be extended to other perfect STBC.
             The algorithm is based on the conditional maximization of
             the likelihood function with respect to one of the set of
             signal points given another. There are a number of choices
             for which signal points to condition on and the underlying
             structure of the code guarantees that one of the choices is
             good with high probability. Furthermore, the approach can be
             integrated with the sphere decoding algorithm with worst
             case complexity corresponding exactly to that of our
             algorithm. ©2009 Australian Crown Copyright.},
   Doi = {10.1109/ICASSP.2009.4960186},
   Key = {fds235925}
}

@article{fds235764,
   Author = {Xie, Y and Chi, Y and Calderbank, R},
   Title = {Low-rank matrix recovery with poison noise},
   Journal = {2013 IEEE Global Conference on Signal and Information
             Processing, GlobalSIP 2013 - Proceedings},
   Pages = {622},
   Publisher = {IEEE},
   Year = {2013},
   Month = {December},
   url = {http://dx.doi.org/10.1109/GlobalSIP.2013.6736959},
   Abstract = {Estimating an image M* +m1×m2 from its linear measurements
             under Poisson noise is an important problem arises from
             applications such as optical imaging, nuclear medicine and
             x-ray imaging [1]. When the image M* has a low-rank
             structure, we can use a small number of linear measurements
             to recover M*, also known as low-rank matrix recovery. This
             is related to compressed sensing, where the goal is to
             develop efficient data acquisition systems by exploiting
             sparsity of underlying signals. © 2013 IEEE.},
   Doi = {10.1109/GlobalSIP.2013.6736959},
   Key = {fds235764}
}

@article{fds236057,
   Author = {Tarokh, V and Naguib, A and Seshadri, N and Calderbank,
             AR},
   Title = {Low-rate multi-dimensional space-time codes for both slow
             and rapid fading channels},
   Journal = {IEEE International Symposium on Personal, Indoor and Mobile
             Radio Communications, PIMRC},
   Volume = {3},
   Pages = {1206-1210},
   Year = {1997},
   Month = {December},
   Abstract = {We consider the design of channel codes for improving the
             data rate and/or the reliability of communications using
             multiple transmit antennas over a fading channel. It is
             assumed that the transmitter does not know the channel but
             seeks to choose a codebook that guarantees a diversity gain
             of r1 when there is no mobility and a diversity gain of
             r2≥r1 when the channel is fast fading. A solution to this
             problem is unveiled in this paper. Here, the encoded data is
             split into n streams that are simultaneously transmitted
             using n transmit antennas. The signal received at each
             receive antenna is a superposition of the faded versions of
             the n transmitted signals. We derive performance criteria
             for designing codes having the aforementioned properties.
             Performance is shown to be determined by diversity advantage
             quantified by a rank/distance and coding advantage
             quantified by a determinant/product criterion. The criteria
             is used to design codes for both slow and rapid fading
             channels. The constructed codes have remarkable performance
             in low signal to noise ratios and are suitable for improving
             the frequency reuse factor under a variety of mobility
             conditions.},
   Key = {fds236057}
}

@article{fds236035,
   Author = {Calderbank, AR and Fishburn, PC},
   Title = {Maximal three-independent subsets of {0, 1,
             2}n},
   Journal = {Designs, Codes and Cryptography},
   Volume = {4},
   Number = {4},
   Pages = {203-211},
   Publisher = {Springer Nature},
   Year = {1994},
   Month = {October},
   ISSN = {0925-1022},
   url = {http://dx.doi.org/10.1007/BF01388452},
   Abstract = {We consider a variant of the classical problem of finding
             the size of the largest cap in the r-dimensional projective
             geometry PG(r, 3) over the field IF3 with 3 elements. We
             study the maximum size f(n) of a subset S of IF3n with the
             property that the only solution to the equation x1+x2+x3=0
             is x1=x2=x3. Let cn=f(n)1/n and c=sup{c1, c2, ...}. We prove
             that c>2.21, improving the previous lower bound of 2.1955
             ... © 1994 Kluwer Academic Publishers.},
   Doi = {10.1007/BF01388452},
   Key = {fds236035}
}

@article{fds236084,
   Author = {Duarte, MF and Matthews, TE and Warren, WS and Calderbank,
             R},
   Title = {Melanoma classification from hidden Markov tree
             features},
   Journal = {ICASSP, IEEE International Conference on Acoustics, Speech
             and Signal Processing - Proceedings},
   Pages = {685-688},
   Year = {2012},
   Month = {October},
   ISSN = {1520-6149},
   url = {http://dx.doi.org/10.1109/ICASSP.2012.6287976},
   Abstract = {Melanoma detection relies on visual inspection of skin
             samples under the microscope via a qualitative set of
             indicators, causing large discordance among pathologists.
             New developments in pump-probe imaging enable the extraction
             of melanin intensity levels from skin samples and provide
             baseline qualitative figures for melanoma detection and
             classification. However, such basic figures do not capture
             the diverse types of cellular structure that distinguish
             different stages of melanoma. In this paper, we propose an
             initial approach for feature extraction for classification
             purposes via Hidden Markov Tree models trained on skin
             sample melanin intensity images. Our experimental results
             show that the proposed features provide a mathematical
             microscope that is able to better discriminate cellular
             structure, enabling successful classification of skin
             samples that are mislabeled when the baseline melanin
             intensity qualitative figures are used. © 2012
             IEEE.},
   Doi = {10.1109/ICASSP.2012.6287976},
   Key = {fds236084}
}

@article{fds235743,
   Author = {Biglieri, E and Calderbank, R and Constantinides, A and Goldsmith, A and Paulraj, A and Poor, HV},
   Title = {MIMO wireless communications},
   Journal = {MIMO Wireless Communications},
   Volume = {9780521873284},
   Pages = {1-323},
   Publisher = {Cambridge University Press},
   Year = {2007},
   Month = {January},
   url = {http://dx.doi.org/10.1017/CBO9780511618420},
   Abstract = {Multiple-input multiple-output (MIMO) technology constitutes
             a breakthrough in the design of wireless communications
             systems, and is already at the core of several wireless
             standards. Exploiting multipath scattering, MIMO techniques
             deliver significant performance enhancements in terms of
             data transmission rate and interference reduction. This book
             is a detailed introduction to the analysis and design of
             MIMO wireless systems. Beginning with an overview of MIMO
             technology, the authors then examine the fundamental
             capacity limits of MIMO systems. Transmitter design,
             including precoding and space-time coding, is then treated
             in depth, and the book closes with two chapters devoted to
             receiver design. Written by a team of leading experts, the
             book blends theoretical analysis with physical insights, and
             highlights a range of key design challenges. It can be used
             as a textbook for advanced courses on wireless
             communications, and will also appeal to researchers and
             practitioners working on MIMO wireless systems.},
   Doi = {10.1017/CBO9780511618420},
   Key = {fds235743}
}

@article{fds235912,
   Author = {Qureshi, T and Zoltowski, M and Calderbank, R},
   Title = {MIMO-OFDM channel estimation using golay complementary
             sequences},
   Journal = {2009 International Waveform Diversity and Design Conference
             Proceedings, WDD 2009},
   Pages = {253-257},
   Publisher = {IEEE},
   Year = {2009},
   Month = {April},
   url = {http://dx.doi.org/10.1109/WDDC.2009.4800355},
   Abstract = {We present a pilot-assisted method for estimating the
             frequency selective channel in a MIMO-OFDM (Multiple Input
             Multiple Output - Orthogonal Frequency Division
             Multiplexing) system. The pilot sequence is designed using
             the DFT (Discrete Fourier Transform) of the Golay
             complementary sequences. Novel exploitation of the perfect
             autocorrelation property of Golay complementary sequences,
             in conjunction with OSTBC (Orthogonal Space Time Block Code)
             based pilot waveform scheduling across multiple OFDM frames,
             facilitates simple separation of the channel mixtures at the
             receive antennas. The DFT length used to transform the
             complementary sequence into the frequency domain is shown to
             be a key critical parameter for correctly estimating the
             channel. This channel estimation scheme is then extended to
             antenna arrays of arbitrary sizes. ©2009
             IEEE.},
   Doi = {10.1109/WDDC.2009.4800355},
   Key = {fds235912}
}

@article{fds235758,
   Author = {Calderbank, AB and Forney, GD and Vardy, A},
   Title = {Minimal tail-biting trellises: The Golay code and
             more},
   Journal = {IEEE International Symposium on Information Theory -
             Proceedings},
   Pages = {255},
   Publisher = {IEEE},
   Year = {1998},
   Month = {December},
   ISSN = {2157-8095},
   url = {http://dx.doi.org/10.1109/ISIT.1998.708860},
   Abstract = {A 16-state tail-biting trellis for the binary (24,12,8)
             Golay code C 24 is exhibited. This tail-biting trellis is
             minimal. It has many other nice properties, and may be
             simpler to decode than the minimal conventional trellis for
             C24. Unwrapping this tail-biting trellis produces a
             periodically-time-varying 16-state rate-1/2 convolutional
             code. This «Golay convolutional code» is the first known
             example of a self-dual doubly-even binary linear
             convolutional code. It has greater minimum distance than any
             16-state time-invariant convolutional code of rate 1/2.
             Demonstrably minimal tail-biting trellises are given for
             several other binary and nonbinary self-dual codes. For some
             nonbinary codes, tail-biting trellises based on code
             generators over a group are simpler than any tail-biting
             trellis based on linear generators over a field. © 1998
             IEEE.},
   Doi = {10.1109/ISIT.1998.708860},
   Key = {fds235758}
}

@article{fds236067,
   Author = {Calderbank, AR and Forney, GD and Vardy, A},
   Title = {Minimal tail-biting trellises: the Golay code and
             more},
   Journal = {IEEE Transactions on Information Theory},
   Volume = {45},
   Number = {5},
   Pages = {1435-1455},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {1999},
   Month = {January},
   ISSN = {0018-9448},
   url = {http://dx.doi.org/10.1109/18.771145},
   Abstract = {Tail-biting trellis representations of block codes are
             investigated. We develop some elementary theory, and present
             several intriguing examples, which we hope will stimulate
             further developments in this field. In particular, we
             construct a 16-state 12-section structurally invariant
             tail-biting trellis for the (24, 12, 8) binary Golay code.
             This tail-biting trellis representation is minimal: it
             simultaneously minimizes all conceivable measures of state
             complexity. Moreover, it compares favorably with the minimal
             conventional 12-section trellis for the Golay code, which
             has 256 states at its midpoint, or with the best
             quasi-cyclic representation of this code, which leads to a
             64-state tail-biting trellis. Unwrapping this tail-biting
             trellis produces a periodically time-varying 16-state rate-
             1/2 'convolutional Golay code' with d = 8, which has
             attractive performance/complexity properties. We furthermore
             show that the (6, 3, 4) quarternary hexacode has a minimal
             8-state group tail-biting trellis, even though it has no
             such linear trellis over F 4. Minimal tail-biting trellises
             are also constructed for the (8, 4, 4) binary Hamming code,
             the (4, 2, 3) ternary tetracode, the (4, 2, 3) code over F
             4, and the Z 4-linear (8, 4, 4) octacode.},
   Doi = {10.1109/18.771145},
   Key = {fds236067}
}

@article{fds326887,
   Author = {Sokolić, J and Renna, F and Calderbank, R and Rodrigues,
             MRD},
   Title = {Mismatch in the Classification of Linear Subspaces:
             Sufficient Conditions for Reliable Classification},
   Journal = {IEEE Transactions on Signal Processing},
   Volume = {64},
   Number = {12},
   Pages = {3035-3050},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2016},
   Month = {June},
   url = {http://dx.doi.org/10.1109/TSP.2016.2537272},
   Abstract = {This paper considers the classification of linear subspaces
             with mismatched classifiers. In particular, we assume a
             model where one observes signals in the presence of
             isotropic Gaussian noise and the distribution of the signals
             conditioned on a given class is Gaussian with a zero mean
             and a low-rank covariance matrix. We also assume that the
             classifier knows only a mismatched version of the parameters
             of input distribution in lieu of the true parameters. By
             constructing an asymptotic low-noise expansion of an upper
             bound to the error probability of such a mismatched
             classifier, we provide sufficient conditions for reliable
             classification in the low-noise regime that are able to
             sharply predict the absence of a classification error floor.
             Such conditions are a function of the geometry of the true
             signal distribution, the geometry of the mismatched signal
             distributions as well as the interplay between such
             geometries, namely, the principal angles and the overlap
             between the true and the mismatched signal subspaces.
             Numerical results demonstrate that our conditions for
             reliable classification can sharply predict the behavior of
             a mismatched classifier both with synthetic data and in a
             motion segmentation and a hand-written digit classification
             applications.},
   Doi = {10.1109/TSP.2016.2537272},
   Key = {fds326887}
}

@article{fds235992,
   Author = {Chi, Y and Gomaa, A and Al-Dhahir, N and Calderbank,
             R},
   Title = {MMSE-optimal training sequences for spectrally-efficient
             Multi-User MIMO-OFDM systems},
   Journal = {European Signal Processing Conference},
   Pages = {634-638},
   Year = {2011},
   Month = {December},
   ISSN = {2219-5491},
   Abstract = {This paper proposes a new family of optimal training
             sequences in terms of minimizing the mean-square channel
             estimation error for spectrally-efficient Multi-User
             MIMO-OFDM systems with an arbitrary number of transmit
             antennas and an arbitrary number of training symbols. It
             addresses uplink transmission scenarios where the users
             overlap in time and frequency and are separated using
             spatial processing at the base station. In particular,
             optimal training sequences can be obtained easily from
             standard signal constellations such as QPSK with desired low
             PAPR, making it appealing for practical use. © 2011
             EURASIP.},
   Key = {fds235992}
}

@article{fds235956,
   Author = {Bajwa, WU and Calderbank, R and Jafarpour, S},
   Title = {Model selection: Two fundamental measures of coherence and
             their algorithmic significance},
   Journal = {IEEE International Symposium on Information Theory -
             Proceedings},
   Pages = {1568-1572},
   Publisher = {IEEE},
   Year = {2010},
   Month = {August},
   url = {http://dx.doi.org/10.1109/ISIT.2010.5513474},
   Abstract = {The problem of model selection arises in a number of
             contexts, such as compressed sensing, subset selection in
             linear regression, estimation of structures in graphical
             models, and signal denoising. This paper generalizes the
             notion of incoherence in the existing literature on model
             selection and introduces two fundamental measures of
             coherence - termed as the worst-case coherence and the
             average coherence - among the columns of a design matrix. In
             particular, it utilizes these two measures of coherence to
             provide an in-depth analysis of a simple one-step
             thresholding (OST) algorithm for model selection. One of the
             key insights offered by the ensuing analysis is that OST is
             feasible for model selection as long as the design matrix
             obeys an easily verifiable property. In addition, the paper
             also characterizes the model-selection performance of OST in
             terms of the worst-case coherence, μ, and establishes that
             OST performs near-optimally in the low signal-to-noise ratio
             regime for N × C design matrices with μ ≈ O(N-1/2).
             Finally, in contrast to some of the existing literature on
             model selection, the analysis in the paper is nonasymptotic
             in nature, it does not require knowledge of the true model
             order, it is applicable to generic (random or deterministic)
             design matrices, and it neither requires submatrices of the
             design matrix to have full rank, nor does it assume a
             statistical prior on the values of the nonzero entries of
             the data vector. © 2010 IEEE.},
   Doi = {10.1109/ISIT.2010.5513474},
   Key = {fds235956}
}

@article{fds235957,
   Author = {Goel, S and Aggarwal, V and Yener, A and Calderbank,
             AR},
   Title = {Modeling location uncertainty for eavesdroppers: A secrecy
             graph approach},
   Journal = {IEEE International Symposium on Information Theory -
             Proceedings},
   Pages = {2627-2631},
   Publisher = {IEEE},
   Year = {2010},
   Month = {August},
   url = {http://dx.doi.org/10.1109/ISIT.2010.5513702},
   Abstract = {In this paper, we consider end-to-end secure communication
             in a large wireless network, where the locations of
             eavesdroppers are uncertain. Our framework attempts to
             bridge the gap between physical layer security under
             uncertain channel state information of the eavesdropper and
             network level connectivity under security constraints, by
             modeling location uncertainty directly at the network level
             as correlated node and link failures in a secrecy graph.
             Bounds on the percolation threshold are obtained for square
             and triangular lattices, and bounds on mean degree are
             obtained for Poisson secrecy graphs. Both analytic and
             simulation results show the dramatic effect of uncertainty
             in location of eavesdroppers on connectivity in a secrecy
             graph. © 2010 IEEE.},
   Doi = {10.1109/ISIT.2010.5513702},
   Key = {fds235957}
}

@article{fds236045,
   Author = {Calderbank, AR and Sloane, NJA},
   Title = {Modular and p-adic cyclic codes},
   Journal = {Designs, Codes and Cryptography},
   Volume = {6},
   Number = {1},
   Pages = {21-35},
   Publisher = {Springer Nature},
   Year = {1995},
   Month = {July},
   ISSN = {0925-1022},
   url = {http://dx.doi.org/10.1007/BF01390768},
   Abstract = {This paper presents some basic theorems giving the structure
             of cyclic codes of length n over the ring of integers modulo
             pa and over the p-adic numbers, where p is a prime not
             dividing n. An especially interesting example is the 2-adic
             cyclic code of length 7 with generator polynomial
             X3+λX2+(λ-1)X-1, where λ satisfies λ2 - λ + 2 = 0. This
             is the 2-adic generalization of both the binary Hamming code
             and the quaternary octacode (the latter being equivalent to
             the Nordstrom-Robinson code). Other examples include the
             2-adic Golay code of length 24 and the 3-adic Golay code of
             length 12. © 1995 Kluwer Academic Publishers.},
   Doi = {10.1007/BF01390768},
   Key = {fds236045}
}

@article{fds235819,
   Author = {McLaughlin, SW and Calderbank, AR and Laroia, R},
   Title = {Modulation codes for multi-amplitude optical recording
             channels},
   Journal = {National Conference Publication - Institution of Engineers,
             Australia},
   Volume = {2},
   Number = {94 /9},
   Pages = {941-945},
   Year = {1994},
   Month = {December},
   Abstract = {We consider the problem of coding for a recording channel.
             Traditional magnetic or optical channels employ saturation
             recording, where the input to the channel is a two-level
             waveform. A new optical recording channel has been developed
             that supports unsaturated, M-ary (M≥3) signal levels. In
             this paper we introduce the new channel and discuss
             modulation codes for it. In particular, we consider the
             analysis and design of new partial response codes for these
             channels. Comparisons are made with M-ary runlength limited
             codes. The partial response codes achieve the largest known
             storage density with coding gains of 3 - 5.5 dB above M-ary
             runlength limited codes.},
   Key = {fds235819}
}

@article{fds236021,
   Author = {Pottie, GJ and Taylor, DP and Calderbank, AR},
   Title = {Multi-level channel codes based on partitioning},
   Volume = {25 n 13},
   Pages = {166},
   Year = {1988},
   Month = {December},
   Abstract = {Summary form only given, as follows. Imai and Hirakawa have
             proposed a multilevel coding method based on binary block
             codes that admits a staged decoding procedure. This method
             has been extended to the design of codes for the Gaussian
             channel by Ginzburg and Tanner. The authors show that coset
             codes (including lattice, Ungerboeck, and binary codes) and
             indeed any codes which rely on a partitioning of the signal
             set may be described by one formalism, and all can be used
             in a multilevel scheme. The combination of such codes in a
             multilevel scheme often leads to reduced decoding complexity
             for the same performance as previously published schemes.
             The authors discuss some alternatives to the staged decoding
             structure, and the tradeoffs involved. They present as
             examples powerful multi-level schemes for the Gaussian
             channel and for channels that are subject to both Gaussian
             and impulsive noise.},
   Key = {fds236021}
}

@article{fds236017,
   Author = {Calderbank, AR},
   Title = {Multi-level trellis codes for the Gaussian channel and for
             channels subject to impulsive noise},
   Journal = {Proceedings - IEEE Military Communications
             Conference},
   Volume = {2},
   Pages = {673-678},
   Year = {1988},
   Month = {December},
   Abstract = {The author designs multilevel trellis codes based on
             lattices and cosets that provide greater immunity to
             Gaussian noise and/or greater resistance to impulse noise
             than previous approaches. He shows how to calculate
             minimum-squared distance and path multiplicity in terms of
             the norms and multiplicities of the different cosets. The
             multilevel structure allows the redundancy in the coset
             selection procedure to be allocated efficiently among the
             different levels. The proposed codes admit a staged decoding
             procedure that requires very few trellis states and has
             performance/complexity advantages over maximum-likelihood
             decoding.},
   Key = {fds236017}
}

@article{fds343646,
   Author = {Michelusi, N and Nokleby, M and Mitra, U and Calderbank,
             R},
   Title = {Multi-Scale Spectrum Sensing in Dense Multi-Cell Cognitive
             Networks},
   Journal = {IEEE Transactions on Communications},
   Volume = {67},
   Number = {4},
   Pages = {2673-2688},
   Year = {2019},
   Month = {April},
   url = {http://dx.doi.org/10.1109/TCOMM.2018.2886020},
   Abstract = {Multi-scale spectrum sensing is proposed to overcome the
             cost of full network state information on the spectrum
             occupancy of primary users (PUs) in dense multi-cell
             cognitive networks. Secondary users (SUs) estimate the local
             spectrum occupancies and aggregate them hierarchically to
             estimate spectrum occupancy at multiple spatial scales.
             Thus, SUs obtain fine-grained estimates of spectrum
             occupancies of nearby cells, more relevant to scheduling
             tasks, and coarse-grained estimates of those of distant
             cells. An agglomerative clustering algorithm is proposed to
             design a cost-effective aggregation tree, matched to the
             structure of interference, robust to local estimation
             errors, and delays. Given these multi-scale estimates, the
             SU traffic is adapted in a decentralized fashion in each
             cell, to optimize the trade-off among SU cell throughput,
             interference caused to PUs, and mutual SU interference.
             Numerical evaluations demonstrate a small degradation in SU
             cell throughput (up to 15% for a 0 dB interference-to-noise
             ratio experienced at PUs) compared to a scheme with full
             network state information, using only one-third of the cost
             incurred in the exchange of spectrum estimates. The proposed
             interference-matched design is shown to significantly
             outperform a random tree design, by providing more relevant
             information for network control, and a state-of-the-art
             consensus-based algorithm, which does not leverage the
             spatio-temporal structure of interference across the
             network.},
   Doi = {10.1109/TCOMM.2018.2886020},
   Key = {fds343646}
}

@article{fds236025,
   Author = {Calderbank, AR},
   Title = {Multilevel Codes and Multistage Decoding},
   Journal = {IEEE Transactions on Communications},
   Volume = {37},
   Number = {3},
   Pages = {222-229},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {1989},
   Month = {January},
   url = {http://dx.doi.org/10.1109/26.20095},
   Abstract = {Imai and Hirakawa have proposed a multilevel coding method
             based on binary block codes that admits a staged decoding
             procedure. This method has been extended to the design of
             codes for the Gaussian channel that admit multistage
             decoding procedures by Ginzburg, by Sayegh, and by Tanner.
             In this paper, we extend the multilevel coding method to
             coset codes and we show how to calculate minimum squared
             distance and path multiplicity in terms of the norms and
             multiplicities of the different cosets. The multilevel
             structure allows the redundancy in the coset selection
             procedure to be allocated efficiently among the different
             levels. It also allows the use of suboptimal multistage
             decoding procedures that have performance/complexity
             advantages over maximum likelihood decoding. © 1989
             IEEE},
   Doi = {10.1109/26.20095},
   Key = {fds236025}
}

@article{fds235812,
   Author = {Calderbank, AR and Seshadri, N},
   Title = {Multilevel codes for unequal error protection},
   Journal = {Proceedings of the 1993 IEEE International Symposium on
             Information Theory},
   Pages = {183},
   Year = {1993},
   Month = {January},
   Abstract = {In many speech and image coding schemes, some of the coded
             bits are extremely sensitive to channel errors while some
             others exhibit very little sensitivity. In order to make the
             best use of channel redundancy, unequal error protection
             (UEP) codes are needed. In a bandlimited environment, such
             coding and the modulation should be integrated. In this
             paper, the authors propose two combined UEP coding and
             modulation schemes.},
   Key = {fds235812}
}

@article{fds235815,
   Author = {Calderbank, AR and Seshadri, N},
   Title = {Multilevel Codes for Unequal Error Protection},
   Journal = {IEEE Transactions on Information Theory},
   Volume = {39},
   Number = {4},
   Pages = {1234-1248},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {1993},
   Month = {January},
   url = {http://dx.doi.org/10.1109/18.243441},
   Abstract = {In many speech and image coding schemes. some of the coded
             bits are extremely sensitive to channel errors while some
             others exhibit very little sensitivity. In order to make the
             best use of channel redundancy, unequal error protection
             (UEP) codes are needed. In a bandlimited environment, such
             coding and the modulation should be integrated. Two combined
             UEP coding and modulation schemes are proposed. The first
             method multiplexes different coded signal constellations,
             with each coded constellation providing a different level of
             error protection. The novelty here is that a codeword
             specifies the multiplexing rule and the choice of the
             codeword from a fixed codebook is used to convey additional
             important information. The decoder determines the
             multiplexing rule before decoding the rest of the data. The
             second method is based on partitioning a signal
             constellation into disjoint subsets, where the most
             important data sequence is encoded, using most of the
             available redundancy, to specify a sequence of subsets. The
             partitioning and code construction is done to maximize the
             minimum Euclidean distance between two different valid
             subset sequences. This leads to novel ways of partitioning
             the signal constellations into subsets. Finally, the less
             important data selects a sequence of signal points to be
             transmitted from the subsets. A side benefit of the proposed
             set partitioning procedure is a reduction in the number of
             nearest neighbors, sometimes even over the uncoded signal
             constellation. Many of the codes designed provided virtually
             error free transmission (greater than 6-dB coding gain) for
             some fraction (for example, 25%) of the data while providing
             a coding gain of 1–2 dB for the remaining data with
             respect to uncoded transmission. The two methods can also be
             combined to realize new coded signal constellations for
             unequal error protection. © 1993, IEEE. All rights
             reserved.},
   Doi = {10.1109/18.243441},
   Key = {fds235815}
}

@article{fds236075,
   Author = {Chui, J and Calderbank, AR},
   Title = {Multilevel diversity-embedded space-time codes for video
             broadcasting over WiMAX},
   Journal = {IEEE International Symposium on Information Theory -
             Proceedings},
   Pages = {1068-1072},
   Publisher = {IEEE},
   Year = {2008},
   Month = {September},
   url = {http://dx.doi.org/10.1109/ISIT.2008.4595151},
   Abstract = {Advances in wireless technologies, such as WiMAX [1], allow
             high data rates and high reliability through the use of
             MIMO-OFDM. However, they are not optimally designed for
             broadcasting. The nature of the wireless medium may cause an
             entire frame to be in outage with little chance of recovery.
             One strategy to overcome this deficit is to employ diversity
             embedding, which protect different bits with different
             diversity orders. Such codes exhibit the property that even
             if the entire frame is in outage, a subset of the frame may
             still be reliably recovered. In this paper, we present
             space-time codes designed for MIMO-OFDM systems which
             achieve diversity embedding. We demonstrate how these codes
             can increase PSNR for video broadcasting in WiMAX © 2008
             IEEE.},
   Doi = {10.1109/ISIT.2008.4595151},
   Key = {fds236075}
}

@article{fds235967,
   Author = {Applebaum, L and Bajwa, WU and Duarte, MF and Calderbank,
             R},
   Title = {Multiuser detection in asynchronous on-off random access
             channels using lasso},
   Journal = {2010 48th Annual Allerton Conference on Communication,
             Control, and Computing, Allerton 2010},
   Pages = {130-137},
   Publisher = {IEEE},
   Year = {2010},
   Month = {December},
   url = {http://dx.doi.org/10.1109/ALLERTON.2010.5706898},
   Abstract = {This paper considers on-off random access channels where
             users transmit either a one or a zero to a base station.
             Such channels represent an abstraction of control channels
             used for scheduling requests in third-generation cellular
             systems and uplinks in wireless sensor networks deployed for
             target detection. This paper introduces a novel
             convex-optimization-based scheme for multiuser detection
             (MUD) in asynchronous on-off random access channels that
             does not require knowledge of the delays or the
             instantaneous received signal-to-noise ratios of the
             individual users at the base station. For any fixed number
             of temporal signal space dimensions N and maximum delay τ
             in the system, the proposed scheme can accommodate M ≲
             exp(O(N1/3)) total users and k ≲ N/log M active users in
             the system-a significant improvement over the k ≤ M ≲ N
             scaling suggested by the use of classical matched-filtering-
             based approaches to MUD employing orthogonal signaling.
             Furthermore, the computational complexity of the proposed
             scheme differs from that of a similar oracle-based scheme
             with perfect knowledge of the user delays by at most a
             factor of log(N+τ). Finally, the results presented in here
             are non-asymptotic, in contrast to related previous work for
             synchronous channels that only guarantees that the
             probability of MUD error at the base station goes to zero
             asymptotically in M. ©2010 IEEE.},
   Doi = {10.1109/ALLERTON.2010.5706898},
   Key = {fds235967}
}

@article{fds235919,
   Author = {Tan, CW and Calderbank, AR},
   Title = {Multiuser detection of alamouti signals},
   Journal = {IEEE Transactions on Communications},
   Volume = {57},
   Number = {7},
   Pages = {2080-2089},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2009},
   Month = {January},
   ISSN = {0090-6778},
   url = {http://dx.doi.org/10.1109/TCOMM.2009.07.070592},
   Abstract = {In a MIMO multiple-access channel where users employ
             Space-Time Block Codes (STBC), interference cancellation can
             be used to suppress co-channel interference and recover the
             desired signal of each user at the receiver. Leveraging the
             special properties of Alamouti matrices, we first show that
             spatial multiplexing of Alamouti signals retains the
             space-time diversity gain of Alamouti signaling using our
             proposed low-complexity Alamouti BLAST-MMSE (A-BLAST)
             Algorithm. Next, in contrast to traditional transmit
             diversity that focuses on STBC construction at the
             transmitter, this paper looks at transmit diversity from the
             perspective of the receiver. In other words, the receiver
             gets to choose the STBC's, which are favourable to the
             channel assuming a fixed BLAST receive algorithm. In a
             multiuserMAC setting, we first present a systematic
             methodology to exploit different decomposition structure in
             Alamouti matrices, each with different tradeoff between
             performance and decoding complexity using possibly different
             MIMO receive algorithms. We then demonstrate that the notion
             of angles (the inner product of two quaternionic vectors)
             between multiuser channels determines the performance of
             MIMO receive algorithms. As an application of the general
             theory, we transform the decoding problem for several types
             of Quasi-Orthogonal STBC (QOSTBC) into multiuser detection
             of virtual Alamouti users. Building upon our A-BLAST
             Algorithm, we propose new algorithms for decoding
             single-user and multiuser QOSTBC. In particular, we show
             that bit error probability is a function of the quaternionic
             angle between virtual users (for a single user) or multiple
             users. This angle varies with the type of QOSTBC and leads
             to a new form of adaptive modulation called code diversity,
             where feedback instructs the transmitter how to choose from
             a plurality of codes. © 2009 IEEE.},
   Doi = {10.1109/TCOMM.2009.07.070592},
   Key = {fds235919}
}

@article{fds235840,
   Author = {Diggavi, SN and Al-Dhahir, N and Calderbank, AR},
   Title = {Multiuser joint equalization and decoding of space-time
             codes},
   Journal = {IEEE International Conference on Communications},
   Volume = {4},
   Pages = {2643-2647},
   Year = {2003},
   Month = {July},
   Abstract = {In this paper we study the multiple-access channel where
             users employ space-time block codes (STBC). The problem is
             formulated in the context of an inter-symbol interference
             (ISI) multiple-access channel. The algebraic structure of
             the STBC is utilized to design joint interference
             suppression, equalization, and decoding schemes. Each user
             transmits using 2 transmit antennas and a time-reversed
             space-time block code suitable for frequency-selective
             channels. We first show that a diversity order of 2Mτ(ν+1)
             is achievable at full transmission rate for each user, when
             we have Mτ receive antennas, channel memory of ν and an
             optimal multiuser maximum-likelihood (ML) decoder is used.
             Due to the decoding complexity of the ML detector we study
             the algebraic structure of linear multiuser detectors which
             utilize the properties of the STBC. We do this both in the
             transform domain (D-domain formulation) and when we impose
             finite block length constraints (matrix formulation). The
             receiver is designed to utilize the algebraic structure of
             the codes in order to preserve the block quaternionic
             structure of the equivalent channel for each
             user.},
   Key = {fds235840}
}

@article{fds235929,
   Author = {Hande, P and Chiang, M and Calderbank, R and Rangan,
             S},
   Title = {Network pricing and rate allocation with content provider
             participation},
   Journal = {Proceedings - IEEE INFOCOM},
   Pages = {990-998},
   Publisher = {IEEE},
   Year = {2009},
   Month = {October},
   ISSN = {0743-166X},
   url = {http://dx.doi.org/10.1109/INFCOM.2009.5062010},
   Abstract = {Pricing content-providers for connectivity to endusers and
             setting connection parameters based on the price is an
             evolving model on the Internet. The implications are heavily
             debated in telecom policy circles, and some advocates of
             "Network Neutrality" have opposed price based
             differentiation in connectivity. However, pricing content
             providers can possibly subsidize the end-user's cost of
             connectivity, and the consequent increase in end-user demand
             can benefit ISPs and content providers. This paper provides
             a framework to quantify the precise trade-off in the
             distribution of benefits among ISPs, content-providers, and
             end-users. The framework generalizes the well-known utility
             maximization based rate allocation model, which has been
             extensively studied as an interplay between the ISP and the
             end-users, to incorporate pricing of content-providers. We
             derive the resulting equilibrium prices and data rates in
             two different ISP market conditions: competition and
             monopoly. Network neutrality based restriction on
             content-provider pricing is then modeled as a constraint on
             the maximum price that can be charged to content-providers.
             We demonstrate that, in addition to gains in total and
             enduser surplus, content-provider experiences a net surplus
             from participation in rate allocation under low cost of
             connectivity. The surplus gains are, however, limited under
             monopoly conditions in comparison to competition in the ISP
             market. © 2009 IEEE.},
   Doi = {10.1109/INFCOM.2009.5062010},
   Key = {fds235929}
}

@article{fds235902,
   Author = {Li, Y and Tian, C and Diggavi, S and Chiang, M and Calderbank,
             AR},
   Title = {Network resource allocation for competing multiple
             description transmissions},
   Journal = {GLOBECOM - IEEE Global Telecommunications
             Conference},
   Pages = {2366-2371},
   Publisher = {IEEE},
   Year = {2008},
   Month = {December},
   url = {http://dx.doi.org/10.1109/GLOCOM.2008.ECP.455},
   Abstract = {To provide real-time multimedia services over a network is
             challenging due to the stringent delay requirements in the
             presence of complex network dynamics. Yet such services are
             beginning to be deployed over best effort networks. Multiple
             description (MD) coding is one approach to transmit the
             media over diverse (multiple) paths to reduce the
             detrimental effects caused by path failures or delay. The
             novelty of this work is to investigate the resource
             allocation in a network, where there are several competing
             MD coded streams. This is done by considering a framework
             that chooses the operating points for asymmetric MD coding
             to maximize total quality of the users, while these streams
             are sent over multiple routing paths. We study the joint
             optimization of multimedia (source) coding and congestion
             control in wired networks. These ideas are extended to joint
             source coding and channel coding in wireless networks. In
             both situations, we propose distributed algorithms for
             optimal resource allocation. In the presence of path loss
             and competing users, the service quality to any particular
             MD stream could be uncertain. In such circumstances it might
             be tempting to expect that greater redundancy in the MD
             streams is needed to protect against such failures. However,
             one surprising aspect of our study reveals that for large
             number of users competing for the same resources, the
             overall system could benefit through opportunistic
             (hierarchical) strategies. In general networks, our studies
             indicate that the user composition varies from conservative
             to opportunistic operating points, depending on the number
             of users and their network vantage points. © 2008
             IEEE.},
   Doi = {10.1109/GLOCOM.2008.ECP.455},
   Key = {fds235902}
}

@article{fds235973,
   Author = {Li, Y and Tian, C and Diggavi, S and Chiang, M and Calderbank,
             A},
   Title = {Network resource allocation for competing multiple
             description transmissions},
   Journal = {IEEE Transactions on Communications},
   Volume = {58},
   Number = {5},
   Pages = {1493-1504},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2010},
   Month = {May},
   ISSN = {0090-6778},
   url = {http://dx.doi.org/10.1109/TCOMM.2010.05.080551},
   Abstract = {Providing real-time multimedia services over a besteffort
             network is challenging due to the stringent delay
             requirements in the presence of complex network dynamics.
             Multiple description (MD) coding is one approach to transmit
             the media over diverse (multiple) paths to reduce the
             detrimental effects caused by path failures or delay. The
             novelty of this work is to investigate the resource
             allocation in a network, where there are several competing
             MD coded streams. This is done by considering a framework
             that chooses the operating points for asymmetric MD coding
             to maximize total quality of the users, while these streams
             are sent over multiple routing paths. The framework is based
             on the theoretical modeling where we consider two
             descriptions and high source coding rate region approximated
             within small constants. We study the joint optimization of
             multimedia (source) coding and congestion control in wired
             networks. These ideas are extended to joint source coding
             and channel coding in wireless networks. In both situations,
             we propose distributed algorithms for optimal resource
             allocation. In the presence of path loss and competing
             users, the service quality to any particular MD stream could
             be uncertain. In such circumstances it might be tempting to
             expect that we need greater redundancy in the MD streams to
             protect against such failures. However, one surprising
             aspect of our study reveals that for large number of users
             who compete for the same resources, the overall system could
             benefit through opportunistic (hierarchical) strategies. In
             general networks, our studies indicate that the user
             composition varies from conservative to opportunistic
             operating points, depending on the number of users and their
             network vantage points. © 2006 IEEE.},
   Doi = {10.1109/TCOMM.2010.05.080551},
   Key = {fds235973}
}

@article{fds235859,
   Author = {Lee, JW and Chiang, M and Calderbank, AR},
   Title = {Network utility maximization and price-based distributed
             algorithms for rate-reliability tradeoff},
   Journal = {Proceedings - IEEE INFOCOM},
   Publisher = {IEEE},
   Year = {2006},
   Month = {December},
   ISSN = {0743-166X},
   url = {http://dx.doi.org/10.1109/INFOCOM.2006.110},
   Abstract = {The current framework of network utility maximization for
             rate allocation and its price-based algorithms assumes that
             each link provides a fixed-size transmission 'pipe' and each
             user's utility is a function of transmission rate only.
             These assumptions break down in many practical systems,
             where, by adapting the physical layer channel coding or
             transmission diversity, different tradeoffs between rate and
             reliability can be achieved. In network utility maximization
             problems formulated in this paper, the utility for each user
             depends on both transmission rate and signal quality, with
             an intrinsic tradeoff between the two. Each link may also
             provide a higher (lower) rate on the transmission 'pipes' by
             allowing a higher (lower) decoding error probability.
             Despite non-separability and nonconvexity of these
             optimization problems, we propose new pricebased distributed
             algorithms and prove their convergence to the globally
             optimal rate-reliability tradeoff under readily-verifiable
             sufficient conditions. We first consider networks in which
             the rate-reliability tradeoff is controlled by adapting
             channel code rates in each link's physical layer error
             correction codes, and propose two distributed algorithms
             based on pricing, which respectively implement the
             'integrated' and 'differentiated' policies of dynamic
             ratereliability adjustment. In contrast to the classical
             price-based rate control algorithms, in our algorithms each
             user provides an offered price for its own reliability to
             the network while the network provides congestion prices to
             users. The proposed algorithms converge to a tradeoff point
             between rate and reliability, which we prove to be a
             globally optimal one for channel codes with sufficiently
             large coding length and utilities whose curvatures are
             sufficiently negative. Under these conditions, the proposed
             algorithms can thus generate the Pareto optimal tradeoff
             curves between rate and reliability for all the users. The
             distributed algorithms and convergence proofs are extended
             for wireless MIMO multi-hop networks, in which diversity and
             multiplexing gains of each link are controlled to achieve
             the optimal ratereliability tradeoff. © 2006
             IEEE.},
   Doi = {10.1109/INFOCOM.2006.110},
   Key = {fds235859}
}

@article{fds235751,
   Author = {Goparaju, S and El Rouayheb and S and Calderbank,
             R},
   Title = {New codes and inner bounds for exact repair in distributed
             storage systems},
   Journal = {IEEE International Symposium on Information Theory -
             Proceedings},
   Pages = {1036-1040},
   Publisher = {IEEE},
   Year = {2014},
   Month = {January},
   ISSN = {2157-8095},
   url = {http://dx.doi.org/10.1109/ISIT.2014.6874990},
   Abstract = {We study the exact-repair tradeoff between storage and
             repair bandwidth in distributed storage systems. We give new
             inner bounds for the tradeoff region and provide code
             constructions that achieve these bounds. © 2014
             IEEE.},
   Doi = {10.1109/ISIT.2014.6874990},
   Key = {fds235751}
}

@article{fds235756,
   Author = {Goparaju, S and Rouayheb, SE and Calderbank, R},
   Title = {New codes and inner bounds for exact repair in distributed
             storage systems},
   Journal = {2014 48th Annual Conference on Information Sciences and
             Systems, CISS 2014},
   Publisher = {IEEE},
   Year = {2014},
   Month = {January},
   url = {http://dx.doi.org/10.1109/CISS.2014.6814148},
   Abstract = {We study the exact-repair tradeoff between storage and
             repair bandwidth in distributed storage systems (DSS). We
             give new inner bounds for the tradeoff region and provide
             code constructions that achieve these bounds. © 2014
             IEEE.},
   Doi = {10.1109/CISS.2014.6814148},
   Key = {fds235756}
}

@article{fds235791,
   Author = {Calderbank, AR and Sloane, NJA},
   Title = {NEW FAMILY OF CODES FOR DIAL-UP VOICE LINES.},
   Pages = {673-676},
   Year = {1984},
   Month = {December},
   Abstract = {Using a certain 4-dimensional lattice, a new family of codes
             has been constructed that has a very low error rate. Two
             particular codes in this family appear to be excellent
             candidates for use in modems operating at 9. 6 and 14. 4
             kb/s, respectively, on dial-up voice telephone lines. The
             new codes are trellis codes; the output of the encoder is a
             4-tuple of odd integers that depends on the input data and
             the state of the encoder.},
   Key = {fds235791}
}

@article{fds235863,
   Author = {Das, S and Al-Dhahir, N and Calderbank, R and Chui,
             J},
   Title = {New full-diversity high-rate space-time block codes based on
             selective power scaling},
   Journal = {European Signal Processing Conference},
   Year = {2006},
   Month = {December},
   ISSN = {2219-5491},
   Abstract = {We design a new rate-5/4 full-diversity orthogonal STBC for
             QPSK and 2 transmit antennas by enlarging the signalling set
             from the set of quaternions used in the Alamouti [1] code.
             Selective power scaling of information symbols is used to
             guarantee full-diversity while maximizing the coding gain
             and minimizing the transmitted signal peak-to-minimum power
             ratio. The optimum power scaling factor is derived using two
             equivalent criteria and shown to outperform schemes based on
             only constellation rotation while still enjoying a
             lowcomplexity ML decoding algorithm. Extensions to the case
             of 4 transmit antennas are reported in [4].},
   Key = {fds235863}
}

@article{fds235947,
   Author = {Rabiei, P and Al-Dhahir, N and Calderbank, R},
   Title = {New rate-2 STBC design for 2 TX with reduced-complexity
             maximum likelihood decoding},
   Journal = {IEEE Transactions on Wireless Communications},
   Volume = {8},
   Number = {4},
   Pages = {1803-1813},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2009},
   Month = {April},
   ISSN = {1536-1276},
   url = {http://dx.doi.org/10.1109/TWC.2009.071323},
   Abstract = {We propose a new full-rate space-time block code (STBC) for
             two transmit antennas which can be designed to achieve
             maximum diversity or maximum capacity while enjoying
             optimized coding gain and reduced-complexity
             maximum-likelihood (ML) decoding. The maximum transmit
             diversity (MTD) construction provides a diversity order of
             2N r for any number of receive antennas N r at the cost of
             channel capacity loss. The maximum channel capacity (MCC)
             construction preserves the mutual information between the
             transmit and the received vectors while sacrificing
             diversity. The system designer can switch between the two
             constructions through a simple parameter change based on the
             operating signal-to-noise ratio (SNR), signal constellation
             size and number of receive antennas. Thanks to their special
             algebraic structure, both constructions enjoy low-complexity
             ML decoding proportional to the square of the signal
             constellation size making them attractive alternatives to
             existing full-diversity full-rate STBCs in [6], [3] which
             have high ML decoding complexity proportional to the fourth
             order of the signal constellation size. Furthermore, we
             design a differential transmission scheme for our proposed
             STBC, derive the exact ML differential decoding rule, and
             compare its performance with competitive schemes. Finally,
             we investigate transceiver design and performance of our
             proposed STBC in spatial multiple-access scenarios and over
             frequency-selective channels. © 2006 IEEE.},
   Doi = {10.1109/TWC.2009.071323},
   Key = {fds235947}
}

@article{fds236015,
   Author = {Calderbank, AR and Sloane, NJA},
   Title = {New Trellis Codes Based on Lattices and Cosets},
   Journal = {IEEE Transactions on Information Theory},
   Volume = {33},
   Number = {2},
   Pages = {177-195},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {1987},
   Month = {January},
   url = {http://dx.doi.org/10.1109/TIT.1987.1057291},
   Abstract = {A new technique is proposed for constructing trellis codes,
             which provides an alternative to Ungerboeck's method of
             “set partitioning.” The new codes use a signal
             constellation consisting of points from an n-dimensional
             lattice A, with an equal number of points from each coset of
             a sublattice A'. One part of the input stream drives a
             generalized convolutional code whose outputs are cosets of
             A’, while the other part selects points from these cosets.
             Several of the new codes are better than those previously
             known. © 1987 IEEE},
   Doi = {10.1109/TIT.1987.1057291},
   Key = {fds236015}
}

@article{fds236009,
   Author = {Calderbank, AR and Sloane, NJA},
   Title = {NEW TRELLIS CODES.},
   Pages = {59},
   Year = {1986},
   Month = {December},
   Abstract = {Summary form only given. A technique is proposed for
             constructing multidimensional trellis codes that provides an
             alternative to Ungerboeck's method of 'set partitioning. '
             The new codes use a signal constellation consisting of
             points from an n-dimensional lattice LAMBDA , with an equal
             number of points from each coset of a sublattice LAMBDA
             prime . One part of the input stream drives a generalized
             convolutional code whose outputs are cosets of LAMBDA prime
             , while the other part selects points from these cosets.
             This technique allows the path multiplicity to be calculated
             easily. It is also possible to describe methods of
             differential encoding very simply.},
   Key = {fds236009}
}

@article{fds236029,
   Author = {Calderbank, AR and Ozarow, LH},
   Title = {Non-equiprobable signaling on the Gaussian
             channel},
   Pages = {145},
   Year = {1990},
   Month = {December},
   Abstract = {Summary form only given, as follows. Many signaling schemes
             for the Gaussian channel are based on finite-dimensional
             lattices. The signal constellation consists of all lattice
             points within a region R, and the shape of this region
             determines the average signal power. In the limit as N →
             ∞, the shape gain the N-sphere over the N-cube approaches
             πe/6 = 1.53 dB. It is shown that the full asymptotic shape
             gain can be realized in any fixed dimension by
             nonequiprobable signaling. Shaping schemes that achieve a
             significant fraction of the available asymptotic shaping
             gain are described. The peak-to-average-power ratio of these
             schemes is superior to that of equiprobable signaling
             schemes based on Voronoi regions of multidimensional
             lattices. The new shaping schemes admit a simple staged
             demodulation procedure.},
   Key = {fds236029}
}

@article{fds235801,
   Author = {Calderbank, AR and Ozarow, LH},
   Title = {Nonequiprobable Signaling on the Gaussian
             Channel},
   Journal = {IEEE Transactions on Information Theory},
   Volume = {36},
   Number = {4},
   Pages = {726-740},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {1990},
   Month = {January},
   url = {http://dx.doi.org/10.1109/18.53734},
   Abstract = {Many signaling schemes for the Gaussian channel are based on
             finite-dimensional lattices. The signal constellation
             consists of all lattice points within a region and the shape
             of this region determines the average signal power.
             Spherical signal constellations minimize average signal
             power, and in the limit as N→∞ the shape gain of the
             N-sphere over the N-cube approaches πe/6≈ 1.53 dB. A
             nonequiprobable signaling scheme is described that
             approaches this full asymptotic shape gain in any fixed
             dimension. A signal constellation ft is partitioned into T
             subconstellations Ω<inf>0</inf>, …, Ω<inf>T-1</inf>of
             equal size by scaling a basic region ℛ. Signal points in
             the same subconstellation are used equiprobably, and a
             shaping code selects the subconstellation ft, with frequency
             f<inf>i</inf>. Shaping codes make it possible to achieve any
             desired fractional bit rate. We compare our schemes with
             equiprobable signaling schemes based on Voronoi regions of
             multidimensional lattices. For comparable shape gain and
             constellation expansion ratio, the peak to average power
             ratio of our schemes is superior. Furthermore a simple table
             look-up is all that is required to address points in our
             constellations. This is not the case for Voronoi
             constellations where the complexity of addressing signal
             points is governed by the complexity of decoding the
             lattice. We also show that it is possible to integrate
             coding and nonequiprobable signaling within a common
             multilevel framework. © 1990 IEEE},
   Doi = {10.1109/18.53734},
   Key = {fds235801}
}

@article{fds235847,
   Author = {Oggier, FE and Sloane, NJA and Diggavi, SN and Calderbank,
             R},
   Title = {Nonintersecting subspaces based on finite
             alphabets},
   Journal = {IEEE International Symposium on Information Theory -
             Proceedings},
   Pages = {455},
   Year = {2004},
   Month = {October},
   Abstract = {Codewords were constructed subject to the constraint that
             the elements of the codewords use symbols from a fixed,
             small constellation. The subspaces constructed remain
             nonintersecting when lifted to the complex field. The
             construction shows that the codewords are nonintersecting
             over the finite field. The construction gives full diversity
             order when the elements of the codewords are restricted to
             come from a finite field.},
   Key = {fds235847}
}

@article{fds235854,
   Author = {Oggier, FE and Sloane, NJA and Diggavi, SN and Calderbank,
             AR},
   Title = {Nonintersecting subspaces based on finite
             alphabets},
   Journal = {IEEE Transactions on Information Theory},
   Volume = {51},
   Number = {12},
   Pages = {4320-4325},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2005},
   Month = {December},
   ISSN = {0018-9448},
   url = {http://dx.doi.org/10.1109/TIT.2005.858946},
   Abstract = {Two subspaces of a vector space are here called
             "nonintersecting" if they meet only in the zero vector.
             Motivated by the design of noncoherent multiple-antenna
             communications systems, we consider the following question.
             How many pairwise nonintersecting Mt-dimensional subspaces
             of an m-dimensional vector space V over a field F can be
             found, if the generator matrices for the subspaces may
             contain only symbols from a given finite alphabet A ⊆ F?
             The most important case is when F is the field of complex
             numbers C; then Mt is the number of antennas. If A = F =
             GF(q) it is shown that the number of nonintersecting
             subspaces is at most (qm - 1)/(qMt - 1), and that this bound
             can be attained if and only if m is divisible by Mt.
             Furthermore, these subspaces remain nonintersecting when
             "lifted" to the complex field. It follows that the finite
             field case is essentially completely solved. In the case
             when F = C only the case Mt = 2 is considered. It is shown
             that if A is a PSK-configuration, consisting of the 2r
             complex roots of unity, the number of nonintersecting planes
             is at least 2r(m-2) and at most 2r(m-1)-1 (the lower bound
             may in fact be the best that can be achieved. © 2005
             IEEE.},
   Doi = {10.1109/TIT.2005.858946},
   Key = {fds235854}
}

@article{fds235809,
   Author = {Calderbank, AR and Fishburn, PC},
   Title = {Normalized second moment of the binary lattice determined by
             a convolutional code},
   Journal = {Proceedings of the 1993 IEEE International Symposium on
             Information Theory},
   Pages = {137},
   Year = {1993},
   Month = {January},
   Abstract = {The output of a finite state machine is a collection of
             codewords that can be searched efficiently to find the
             optimum codeword with respect to any nonnegative measure
             that can be calculated on a symbol by symbol basis.
             Applications involving trellis codes are considered.
             Although details are given for the convolutional code, the
             method can be applied to arbitrary codes.},
   Key = {fds235809}
}

@article{fds235865,
   Author = {Das, S and Al-Dhahir, N and Calderbank, R},
   Title = {Novel full-diversity high-rate STBC for 2 and 4 transmit
             antennas},
   Journal = {IEEE Communications Letters},
   Volume = {10},
   Number = {3},
   Pages = {171-173},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2006},
   Month = {March},
   ISSN = {1089-7798},
   url = {http://dx.doi.org/10.1109/LCOMM.2006.1603374},
   Abstract = {We design a new rate-5/4 full-diversity orthogonal
             space-time block code (STBC) for QPSK and 2 transmit
             antennas (TX) by enlarging the signalling set from the set
             of quaternions used in the Alamouti [1] code. Selective
             power scaling of information symbols is used to guarantee
             full-diversity while maximizing the coding gain (CG) and
             minimizing the transmitted signal peak-to-minimum power
             ratio (PMPR). The optimum power scaling factor is derived
             analytically and shown to outperform schemes based only on
             constellation rotation while still enjoying a low-complexity
             maximum likelihood (ML) decoding algorithm. Finally, we
             extend our designs to the case of 4 TX by enlarging the set
             of Quasi-Orthogonal STBC with power scaling. Extensions to
             general M-PSK constellations are straightforward. © 2006
             IEEE.},
   Doi = {10.1109/LCOMM.2006.1603374},
   Key = {fds235865}
}

@article{fds326905,
   Author = {Calderbank, R and Sloane, NJ},
   Title = {Obituary. Claude Shannon (1916-2001).},
   Journal = {Nature},
   Volume = {410},
   Number = {6830},
   Pages = {768},
   Year = {2001},
   Month = {April},
   url = {http://dx.doi.org/10.1038/35071223},
   Doi = {10.1038/35071223},
   Key = {fds326905}
}

@article{fds236053,
   Author = {Calderbank, AR and McGuire, G and Poonen, B and Rubinstein,
             M},
   Title = {On a conjecture of Helleseth regarding pairs of binary
             m-sequences},
   Journal = {IEEE Transactions on Information Theory},
   Volume = {42},
   Number = {3},
   Pages = {988-990},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {1996},
   Month = {December},
   ISSN = {0018-9448},
   url = {http://dx.doi.org/10.1109/18.490561},
   Abstract = {Binary m-sequences are maximal-length sequences generated by
             shift registers of length m, that are employed in
             navigation, radar, and spread-spectrum communication. It is
             well known that given a pair of distinct /«-sequences, the
             crosscorrelation function must take on at least three
             values. This correspondence addresses a conjecture made by
             Helleseth in 1976, that if rn is a power of 2, then there
             are no pairs of binary m-sequences with a 3-valued
             crosscorrelation function. This conjecture is proved under
             the assumption that the three correlation values are
             symmetric about -1. © 1996 IEEE.},
   Doi = {10.1109/18.490561},
   Key = {fds236053}
}

@article{fds331065,
   Author = {Calderbank, AR and Goethals, JM},
   Title = {On a Pair of Dual Subschemes of the Hamming Scheme
             Hn(q)},
   Journal = {European Journal of Combinatorics},
   Volume = {6},
   Number = {2},
   Pages = {133-147},
   Publisher = {Elsevier BV},
   Year = {1985},
   Month = {January},
   url = {http://dx.doi.org/10.1016/S0195-6698(85)80004-4},
   Abstract = {We consider codes in the Hamming association scheme Hn(q)
             with interesting metric properties. We describe how a
             uniformly packed linear code C determines a pair of dual
             subschemes. The existence of this pair of subschemes is used
             to establish restrictions on the possible distances between
             codewords in the dual code C⊥. These restrictions also
             apply to arbitrary codes with degree e + 1 and strength 2e
             or 2e + 1. An analogous result gives necessary conditions
             for the existence of non-linear uniformly packed codes. When
             q = 2 we determine the possible parameters of uniformly
             packed 2-error-correcting linear codes. © 1985, Academic
             Press Inc. (London) Limited. All rights reserved.},
   Doi = {10.1016/S0195-6698(85)80004-4},
   Key = {fds331065}
}

@article{fds236041,
   Author = {Best, MR and Burnashev, MV and Lévy, Y and Rabinovich, A and Fishburn,
             PC and Calderbank, AR and Costello, DJ},
   Title = {On a Technique to Calculate the Exact Performance of a
             Convolutional Code},
   Journal = {IEEE Transactions on Information Theory},
   Volume = {41},
   Number = {2},
   Pages = {441-447},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {1995},
   Month = {January},
   url = {http://dx.doi.org/10.1109/18.370145},
   Abstract = {A Markovian technique is described to calculate the exact
             performance of the Viterbi algorithm used as either a
             channel decoder or a source encoder for a convolutional
             code. The probability of information bit error and the
             expected Hamming distortion are computed for codes of
             various rates and constraint lengths. The concept of
             tie-breaking rules is introduced and its influence on
             decoder performance is examined. Computer simulation is used
             to verify the accuracy of the results. Finally, we discuss
             the issue of when a coded system outperforms an uncoded
             system in light of the new results. © 1995
             IEEE},
   Doi = {10.1109/18.370145},
   Key = {fds236041}
}

@article{fds235853,
   Author = {Thangaraj, A and Dihidar, S and Calderbank, AR and McLaughlin, SW and Merolla, JM},
   Title = {On achieving capacity on the wire tap channel using LDPC
             codes},
   Journal = {IEEE International Symposium on Information Theory -
             Proceedings},
   Volume = {2005},
   Pages = {1498-1502},
   Publisher = {IEEE},
   Year = {2005},
   Month = {December},
   url = {http://dx.doi.org/10.1109/ISIT.2005.1523593},
   Abstract = {We investigate the use of capacity and near-capacity
             achieving LPDC codes on the wire tap channel, where the dual
             conditions of reliable communications and security are
             required. We show that good codes for conventional channels
             (like BSC and BEC) also have interesting and useful security
             properties. In this paper we show the connection between the
             decoding threshold of the code and its security against
             eavesdropping. We also give practical code constructions for
             some special cases of the wire tap channel and show that
             security (in the Shannon sense) is a function of the
             decoding threshold. Some of these constructions achieve the
             secrecy capacity as denned by Wyner. These codes provide
             secure communications without conventional key distribution
             and provide a physical-layer approach for either secure
             communications or key distribution.},
   Doi = {10.1109/ISIT.2005.1523593},
   Key = {fds235853}
}

@article{fds235749,
   Author = {Calderbank, R and Thompson, A and Xie, Y},
   Title = {On block coherence of frames},
   Volume = {38},
   Number = {1},
   Pages = {50-71},
   Year = {2013},
   Month = {July},
   ISSN = {1063-5203},
   url = {http://dx.doi.org/10.1016/j.acha.2014.03.003},
   Abstract = {Block coherence of matrices plays an important role in
             analyzing the performance of block compressed sensing
             recovery algorithms (Bajwa and Mixon, 2012). In this paper,
             we characterize two block coherence metrics: worst-case and
             average block coherence. First, we present lower bounds on
             worst-case block coherence, in both the general case and
             also when the matrix is constrained to be a union of
             orthobases. We then present deterministic matrix
             constructions based upon Kronecker products which obtain
             these lower bounds. We also characterize the worst-case
             block coherence of random subspaces. Finally, we present a
             flipping algorithm that can improve the average block
             coherence of a matrix, while maintaining the worst-case
             block coherence of the original matrix. We provide numerical
             examples which demonstrate that our proposed deterministic
             matrix construction performs well in block compressed
             sensing.},
   Doi = {10.1016/j.acha.2014.03.003},
   Key = {fds235749}
}

@article{fds236002,
   Author = {Zeng, M and Calderbank, R and Cui, S},
   Title = {On design of rateless codes over dying binary erasure
             channel},
   Journal = {IEEE Transactions on Communications},
   Volume = {60},
   Number = {4},
   Pages = {889-894},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2012},
   Month = {April},
   ISSN = {0090-6778},
   url = {http://dx.doi.org/10.1109/TCOMM.2012.022712.110038},
   Abstract = {In this paper, we study a practical coding scheme for the
             dying binary erasure channel (DBEC), which is a binary
             erasure channel (BEC) subject to a random fatal failure. We
             consider the rateless codes and optimize the degree
             distribution to maximize the average recovery probability.
             In particular, we first study the upper bound of the average
             recovery probability, based on which we define the objective
             function as the gap between the upper bound and the average
             recovery probability achieved by a particular degree
             distribution. We then seek the optimal degree distribution
             by minimizing the objective function. A simple and heuristic
             approach is also proposed to provide a suboptimal but good
             degree distribution. Simulation results are presented to
             show the significant performance gain over the conventional
             LT codes. © 2012 IEEE.},
   Doi = {10.1109/TCOMM.2012.022712.110038},
   Key = {fds236002}
}

@article{fds235839,
   Author = {Diggavi, SN and Al-Dhahir, N and Calderbank, AR},
   Title = {On interference cancellation and high-rate space-time
             codes},
   Journal = {IEEE International Symposium on Information Theory -
             Proceedings},
   Pages = {238},
   Year = {2003},
   Month = {January},
   url = {http://dx.doi.org/10.1109/isit.2003.1228252},
   Abstract = {We study the design and decoding of high-rate space-time
             codes in two contexts. The first part of the paper examines
             the multiple-access channel (MAC) where users employ
             space-time block codes (STBC). The problem is formulated in
             the context of an inter-symbol interference (ISI) channel
             which occurs for transmission over frequency-selective
             channels. We show that a diversity order of 2Mτ (v + 1) is
             achievable at full transmission rate for each user, when we
             have Mτ receive antennas, channel memory of v and an
             optimal multiuser maximum-likelihood (ML) decoder is used.
             In the second part, we examine high-rate space-time codes
             that have a high-diversity code embedded within them. This
             can also be viewed as unequal error protection codes
             designed for unequal diversity order, which is the metric
             suitable for fading channels.},
   Doi = {10.1109/isit.2003.1228252},
   Key = {fds235839}
}

@article{fds235916,
   Author = {Aggarwal, V and Bennatan, A and Calderbank, AR},
   Title = {On maximizing coverage in Gaussian relay
             channels},
   Journal = {IEEE Transactions on Information Theory},
   Volume = {55},
   Number = {6},
   Pages = {2518-2536},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2009},
   Month = {June},
   ISSN = {0018-9448},
   url = {http://dx.doi.org/10.1109/TIT.2009.2018337},
   Abstract = {Results for Gaussian relay channels typically focus on
             maximizing transmission rates for given locations of the
             source, relay, and destination. We introduce an alternative
             perspective, where the objective is maximizing coverage for
             a given rate. The new objective captures the problem of how
             to deploy relays to provide a given level of service to a
             particular geographic area, where the relay locations become
             a design parameter that can be optimized. We evaluate the
             decode-and-forward (DF) and compress-and-forward (CF)
             strategies for the relay channel with respect to the new
             objective of maximizing coverage. When the objective is
             maximizing rate, different locations of the destination
             favor different strategies. When the objective is coverage
             for a given rate, and the relay is able to decode, DF is
             uniformly superior in that it provides coverage at any point
             served by CF. When the channel model is modified to include
             random fading, we show that the monotone ordering of
             coverage regions is not always maintained. While the
             coverage provided by DF is sensitive to changes in the
             location of the relay and the path loss exponent, CF
             exhibits a more graceful degradation with respect to such
             changes. The techniques used to approximate coverage regions
             are new and may be of independent interest. © 2009
             IEEE.},
   Doi = {10.1109/TIT.2009.2018337},
   Key = {fds235916}
}

@article{fds235878,
   Author = {Aggarwal, V and Bennatan, A and Calderbank, AR},
   Title = {On maximizing coverage in Gaussian relay
             networks},
   Journal = {Proceedings of the 2007 IEEE Information Theory Workshop on
             Information Theory for Wireless Networks,
             ITW},
   Pages = {37-41},
   Publisher = {IEEE},
   Year = {2007},
   Month = {December},
   url = {http://dx.doi.org/10.1109/ITWITWN.2007.4318027},
   Abstract = {Results for Gaussian relay channels typically focus on
             maximizing transmission rates for given locations of the
             source, relay and destination. We consider an alternative
             approach, focusing on maximizing coverage for a given rate.
             This novel perspective enables treatment of the relay
             location as a design parameter, producing an extra degree of
             freedom that may be optimized. Focusing on coverage, we
             evaluate existing approaches, like decode and forward (DF),
             compress and forward (CF) and compare them with upper
             bounds. In the process, we obtain some surprising insights
             on the performance of these approaches. ©2007
             IEEE.},
   Doi = {10.1109/ITWITWN.2007.4318027},
   Key = {fds235878}
}

@article{fds235979,
   Author = {Wu, Y and Zheng, H and Calderbank, R and Kulkarni, S and Poor,
             HV},
   Title = {On optimal precoding in wireless multicast
             systems},
   Journal = {ICASSP, IEEE International Conference on Acoustics, Speech
             and Signal Processing - Proceedings},
   Pages = {3068-3071},
   Publisher = {IEEE},
   Year = {2011},
   Month = {August},
   ISSN = {1520-6149},
   url = {http://dx.doi.org/10.1109/ICASSP.2011.5946306},
   Abstract = {Precoding has been extensively studied for point-to-point
             communications, including the problems of constructing the
             precoding codebook and selecting the best precoder. This
             paper investigates precoding for a multicast channel in
             which a base station is sending the same information to all
             users and each user sends back the index of its best
             precoding matrix. It is assumed that users do not
             collaborate and that no channel state information is known
             at the base station. Optimization problems are formulated to
             reduce the packet drop rate. A set of probabilistic
             algorithms that effectively reduce the average package drop
             rate are presented. It is shown numerically that these new
             schemes lead to significant improvements. © 2011
             IEEE.},
   Doi = {10.1109/ICASSP.2011.5946306},
   Key = {fds235979}
}

@article{fds235757,
   Author = {Vaishampayan, VA and Calderbank, AR and Batllo,
             JC},
   Title = {On reducing granular distortion in multiple description
             quantization},
   Journal = {IEEE International Symposium on Information Theory -
             Proceedings},
   Pages = {98},
   Publisher = {IEEE},
   Year = {1998},
   Month = {December},
   ISSN = {2157-8095},
   url = {http://dx.doi.org/10.1109/ISIT.1998.708685},
   Abstract = {There is a gap of 3.07 dB between the distortion product of
             a multiple description quantizer and the multiple
             description rate distortion bound. In this paper we seek to
             close this gap through the design of a quantizer with
             smaller granular distortion. © 1998 IEEE.},
   Doi = {10.1109/ISIT.1998.708685},
   Key = {fds235757}
}

@article{fds235901,
   Author = {Lee, JW and Chiang, M and Calderbank, AR},
   Title = {On the achievable efficiency-fairness tradeoff in
             utility-optimal MAC protocols},
   Journal = {IEICE Transactions on Communications},
   Volume = {E91-B},
   Number = {4},
   Pages = {1231-1234},
   Publisher = {Institute of Electronics, Information and Communications
             Engineers (IEICE)},
   Year = {2008},
   Month = {January},
   ISSN = {0916-8516},
   url = {http://dx.doi.org/10.1093/ietcom/e91-b.4.1231},
   Abstract = {We use the network utility maximization (NUM) framework to
             create an efficient and fair medium access control (MAC)
             protocol for wireless networks. By adjusting the parameters
             in the utility objective functions of NUM problems, we
             control the tradeoff between efficiency and fairness of
             radio resource allocation through a rigorous and systematic
             design. In this paper, we propose a scheduling-based MAC
             protocol. Since it provides an upper-bound on the achievable
             performance, it establishes the optimality benchmarks for
             comparison with other algorithms in related work. Copyright
             © 2008 The Institute of Electronics, Information and
             Communication Engineers.},
   Doi = {10.1093/ietcom/e91-b.4.1231},
   Key = {fds235901}
}

@article{fds235927,
   Author = {Wu, Y and Davis, LM and Calderbank, R},
   Title = {On the capacity of the discrete-time channel with uniform
             output quantization},
   Journal = {IEEE International Symposium on Information Theory -
             Proceedings},
   Pages = {2194-2198},
   Publisher = {IEEE},
   Year = {2009},
   Month = {November},
   url = {http://dx.doi.org/10.1109/ISIT.2009.5205826},
   Abstract = {This paper provides new insight into the classical problem
             of determining both the capacity of the discrete-time
             channel with uniform output quantization and the capacity
             achieving input distribution. It builds on earlier work by
             Gallager and Witsenhausen to provide a detailed analysis of
             two particular quantization schemes. The first is saturation
             quantization where overflows are mapped to the nearest
             quantization bin, and the second is modulo quantization
             where overflows are mapped to the nearest quantization bin
             after reduction by some modulus. Both the capacity of modulo
             quantization and the capacity achieving input distribution
             are determined. When the additive noise is gaussian and
             relatively small, the capacity of saturation quantization is
             shown to be bounded below by that of modulo quantization. In
             the limit of arbitrarily many uniform quantization levels,
             it is shown that the difference between the upper and lower
             bounds on capacity given by Ihara is only 0.26 bits. © 2008
             IEEE.},
   Doi = {10.1109/ISIT.2009.5205826},
   Key = {fds235927}
}

@article{fds235965,
   Author = {Wu, Y and Achtzehn, A and Petrova, M and Mähönen, P and Calderbank,
             R},
   Title = {On the effect of feedback delay on limited-rate beamforming
             systems},
   Journal = {GLOBECOM - IEEE Global Telecommunications
             Conference},
   Publisher = {IEEE},
   Year = {2010},
   Month = {January},
   url = {http://dx.doi.org/10.1109/GLOCOM.2010.5684288},
   Abstract = {The use of beamforming to enable higher data rates in
             telecommunications is widely appreciated, but performance
             gains are typically calculated assuming delay-free feedback
             from the receiver and neglecting processing time. This paper
             introduces a mathematical framework based on outage
             probability that measures the extent to which current
             channel state information is accurate. Performance gains
             from beamforming can then be evaluated as a function of the
             currency of system state. Results are provided for Multiple
             Input Single Output (MISO) and for Multiuser Multiple Input
             Multiple Output (MU-MIMO) systems. Outage probabilities and
             effective diversity orders are calculated for widely used
             methods of beamforming such as Transmit Antenna Selection as
             a function of the speed of channel variation. ©2010
             IEEE.},
   Doi = {10.1109/GLOCOM.2010.5684288},
   Key = {fds235965}
}

@article{fds235986,
   Author = {Gomaa, A and Chi, Y and Al-Dhahir, N and Calderbank,
             R},
   Title = {On training signal design for multi-user MIMO-OFDM:
             Performance analysis and tradeoffs},
   Journal = {IEEE Vehicular Technology Conference},
   Publisher = {IEEE},
   Year = {2011},
   Month = {December},
   ISSN = {1550-2252},
   url = {http://dx.doi.org/10.1109/VETECF.2011.6092844},
   Abstract = {This paper addresses spectrally-efficient multiantenna
             multi-carrier uplink transmission scenarios where the users
             overlap in time and frequency and are separated using
             spatial processing at the base station. The robustness of
             the proposed training sequences to residual carrier
             frequency offset and phase noise is evaluated analytically.
             This analysis reveals an interesting design tradeoff between
             the Peak-to-Average Power Ratio of a training sequence and
             the increase in channel estimation mean squared error over
             the ideal case when these two impairments are not present.
             © 2011 IEEE.},
   Doi = {10.1109/VETECF.2011.6092844},
   Key = {fds235986}
}

@article{fds343580,
   Author = {Calderbank, R},
   Title = {On uniformly packed [n, n-fc,4] codes over gf(Q) and a class
             of caps in ?g(k-l, q)},
   Journal = {Journal of the London Mathematical Society},
   Volume = {s2-26},
   Number = {2},
   Pages = {365-384},
   Year = {1982},
   Month = {January},
   url = {http://dx.doi.org/10.1112/jlms/s2-26.2.365},
   Abstract = {We determine all uniformly packed [n, k, 4] codes over GF
             (2) and we derive a non-trivial necessary condition for the
             existence of uniformly packed [k, 4] codes over GF (q),
             where q 2 is a prime power. This condition allows us to
             classify uniformly packed [n, k, 4] codes over GF (4). As a
             corollary we obtain a necessary condition for the existence
             of a projective (n, k, h lt h 2) set S in PG (k1, q) with
             the property that no three points of S are collinear. A
             further corollary is a necessary condition for the linear
             representation of partial quadrangles. © 1982, Oxford
             University Press. All rights reserved.},
   Doi = {10.1112/jlms/s2-26.2.365},
   Key = {fds343580}
}

@article{fds236018,
   Author = {Calderbank, AR and Coffman, EG and Flatto, L},
   Title = {Optimal directory placement on disk storage
             devices},
   Journal = {Journal of the ACM (JACM)},
   Volume = {35},
   Number = {2},
   Pages = {433-446},
   Publisher = {Association for Computing Machinery (ACM)},
   Year = {1988},
   Month = {April},
   url = {http://dx.doi.org/10.1145/42282.42287},
   Abstract = {Two mathematical models dealing with optimal placement of
             directories on disk devices are analyzed. Storage addresses
             on the disk are approximated by points in the interval [0,
             1]. Requests for information on the disk are represented by
             a sequence of file names. To process a request, a read-write
             head is first moved to a directory kept on the disk that
             specifies the address of the file, and then a head is moved
             to the specified address. The addresses are assumed to be
             independent and uniform on [0,1].In the first model we
             consider a system of two heads separated by a fixed distance
             d and a directory situated at 0 ≤ x ≤ 1. In the second
             model we consider a system consisting of one head and n ≥
             2 directories at 0 ≤ x1 < x2 < … < xn ≤ 1. For both
             models we study the problem of finding those values of the
             parameters that minimize the expected head motion to process
             a request in statistical equilibrium. © 1988, ACM. All
             rights reserved.},
   Doi = {10.1145/42282.42287},
   Key = {fds236018}
}

@article{fds235870,
   Author = {Xu, D and Li, Y and Chiang, M and Calderbank, AR},
   Title = {Optimal provisioning of elastic service availability},
   Journal = {Proceedings - IEEE INFOCOM},
   Pages = {1505-1513},
   Publisher = {IEEE},
   Year = {2007},
   Month = {September},
   ISSN = {0743-166X},
   url = {http://dx.doi.org/10.1109/INFCOM.2007.177},
   Abstract = {Service availability is one of the most closely scrutinized
             metrics in offering network services. The network vendor can
             earn more revenue from the customers by guaranteeing higher
             service availability at the cost of higher operational
             expense. It is important to cost-effectively provision a
             managed and differentiated network with various service
             availability guarantees under a unified platform. In this
             paper, we establish the framework of provisioning elastic
             service availability through network utility maximization,
             and propose an optimal and distributed solution using
             differentiated failure recovery schemes. First, we develop a
             utility function with configurable parameters to represent
             the satisfaction perceived by a user upon service
             availability as well as its allowed source rate. Second,
             adopting Quality of Protection [1] and shared path
             protection, we transform optimal provisioning of elastic
             service availability into a convex optimization problem. The
             desirable service availability and source rate for each user
             can be achieved using a price-based distributed algorithm.
             Finally, we numerically show the tradeoff between the
             throughput and the service availability obtained by users in
             various network topologies. Several quantitative
             observations are made from this investigation. For example,
             indiscriminately provisioning service availabilities for
             different kinds of users within one network leads to
             noteworthy sub-optimality in total network utility. The
             profile of bandwidth usage also illustrates that
             provisioning high service availability exclusively for
             critical applications leads to significant waste in
             bandwidth resource. © 2007 IEEE.},
   Doi = {10.1109/INFCOM.2007.177},
   Key = {fds235870}
}

@article{fds235869,
   Author = {Li, Y and Chiang, M and Calderbank, AR and Diggavi,
             SN},
   Title = {Optimal rate-reliability-delay tradeoff in networks with
             composite links},
   Journal = {Proceedings - IEEE INFOCOM},
   Pages = {526-534},
   Publisher = {IEEE},
   Year = {2007},
   Month = {September},
   ISSN = {0743-166X},
   url = {http://dx.doi.org/10.1109/INFCOM.2007.68},
   Abstract = {Networks need to accommodate diverse applications with
             different Quality-of-Service (QoS) requirements. New ideas
             at the physical layer are being developed for this purpose,
             such as diversity embedded coding, which is a technique that
             combines high rates with high reliability. We address the
             problem of how to fully utilize different rate-reliability
             characteristics at the physical layer to support different
             types of traffic over a network and to jointly maximize
             their utilities. We set up a new framework based on utility
             maximization for networks with composite links, meaning that
             each link consists of sub-links that can attain different
             rate-reliability characteristics simultaneously. We
             incorporate delay, in addition to rate and reliability, into
             the utility functions. To accommodate different types of
             traffic, we propose distributed algorithms for the optimal
             rate-reliability-delay tradeoff based on capacity division
             and priority queueing. Numerical results show that compared
             with traditional codes, the new codes can provide higher
             network utilities for all traffic types simultaneously. The
             results also show that priority queueing achieves higher
             network utility than capacity division. © 2007
             IEEE.},
   Doi = {10.1109/INFCOM.2007.68},
   Key = {fds235869}
}

@article{fds235915,
   Author = {Li, Y and Chiang, M and Calderbank, AR and Diggavi,
             SN},
   Title = {Optimal rate-reliability-delay tradeoff in networks with
             composite links},
   Journal = {IEEE Transactions on Communications},
   Volume = {57},
   Number = {5},
   Pages = {1390-1401},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2009},
   Month = {June},
   ISSN = {0090-6778},
   url = {http://dx.doi.org/10.1109/TCOMM.2009.05.070198},
   Abstract = {Networks need to accommodate diverse applications with
             different Quality-of-Service (QoS) requirements. New ideas
             at the physical layer are being developed for this purpose,
             such as diversity embedded coding, which is a technique that
             combines high rates with high reliability. We address the
             problem of how to fully utilize different rate-reliability
             characteristics at the physical layer to support different
             types of traffic over a network and to jointly maximize
             their utilities. We set up a new framework based on utility
             maximization for networks with composite links, meaning that
             each link consists of sub-links that can attain different
             rate-reliability characteristics simultaneously. We
             incorporate delay, in addition to rate and reliability, into
             the utility functions. To accommodate different types of
             traffic, we propose distributed algorithms converging to the
             optimal rate-reliability-delay tradeoff based on capacity
             division and priority queueing. Numerical results show that
             compared with traditional codes, the new codes can provide
             higher network utilities for all traffic types
             simultaneously. The results also show that priority queueing
             achieves higher network utility than capacity division. ©
             2009 IEEE.},
   Doi = {10.1109/TCOMM.2009.05.070198},
   Key = {fds235915}
}

@article{fds235910,
   Author = {Li, Z and Li, Y and Chiang, M and Calderbank, R and Chen,
             YC},
   Title = {Optimal transmission scheduling for scalable wireless video
             broadcast with rateless erasure correction
             code},
   Journal = {2009 6th IEEE Consumer Communications and Networking
             Conference, CCNC 2009},
   Publisher = {IEEE},
   Year = {2009},
   Month = {April},
   url = {http://dx.doi.org/10.1109/CCNC.2009.4784712},
   Abstract = {With the advances in wireless technology and explosive
             growth of mobile devices and wireless networks, mobile TV is
             becoming a popular application. The main technical challenge
             to wireless video broadcast is to provide the best quality
             of service possible under the radio resource constraints. In
             this paper we propose an application layer middleware
             solution that utilizes the scalability in video coding with
             rateless erasure correction codes to achieve a balance in
             the quality of service (QoS) and radio resource efficiency.
             Simulation results demonstrate the effectiveness of the
             solution. ©2009 IEEE.},
   Doi = {10.1109/CCNC.2009.4784712},
   Key = {fds235910}
}

@article{fds235794,
   Author = {Calderbank, AR and Coffman, EG and Flatto, L},
   Title = {Optimum Head Separation in a Disk System with Two Read/Write
             Heads},
   Journal = {Journal of the ACM (JACM)},
   Volume = {31},
   Number = {4},
   Pages = {826-838},
   Publisher = {Association for Computing Machinery (ACM)},
   Year = {1984},
   Month = {September},
   url = {http://dx.doi.org/10.1145/1634.1638},
   Abstract = {A mathematical model of computer disk storage devices having
             two movable read/write heads is studied. Storage addresses
             are approximated by points in the continuous interval [0,
             1], and requests for information on the disk are processed
             first-come-first-served. We assume that the disk heads are
             maintained a fixed distance d apart; that is, in processing
             a request, both heads are moved the same distance in the
             same direction. Assuming that successive requested locations
             are independently and uniformly distributed over [0, 1], we
             calculate the invariant measure of a Markov chain
             representing successive head positions under the
             nearer-server rule: Requests in [0, a t] are processed by
             the left head, those in [1 - d, 1] by the right head, and
             those in [d, 1 - d] by the nearer of the two heads. Our
             major objective is the equilibrium expected distance E(d)
             that the heads are moved in processing a request. For the
             problem of designing the separation distance d, we show that
             E (0.44657) ffi 0.16059 ffi mindE(d). Thus, a basic insight
             of the analysis is that a system with two heads performs
             more than twice as well as a system with a single head. The
             results are compared with those for other two-head disk
             systems. Finally, numerical results are presented that
             demonstrate that the nearer-server rule is very nearly
             optimal under the fixed head-separation constraint. © 1984,
             ACM. All rights reserved.},
   Doi = {10.1145/1634.1638},
   Key = {fds235794}
}

@article{fds235765,
   Author = {Wu, T and Polatkan, G and Steel, D and Brown, W and Daubechies, I and Calderbank, R},
   Title = {Painting analysis using wavelets and probabilistic topic
             models},
   Journal = {2013 IEEE International Conference on Image Processing, ICIP
             2013 - Proceedings},
   Pages = {3264-3268},
   Publisher = {IEEE},
   Year = {2013},
   Month = {January},
   url = {http://dx.doi.org/10.1109/ICIP.2013.6738672},
   Abstract = {In this paper, computer-based techniques for stylistic
             analysis of paintings are applied to the five panels of the
             14th century Peruzzi Altarpiece by Giotto di Bondone.
             Features are extracted by combining a dual-tree complex
             wavelet transform with a hidden Markov tree (HMT) model.
             Hierarchical clustering is used to identify stylistic
             keywords in image patches, and keyword frequencies are
             calculated for sub-images that each contains many patches. A
             generative hierarchical Bayesian model learns stylistic
             patterns of keywords; these patterns are then used to
             characterize the styles of the sub-images; this in turn,
             permits to discriminate between paintings. Results suggest
             that such unsupervised probabilistic topic models can be
             useful to distill characteristic elements of style. © 2013
             IEEE.},
   Doi = {10.1109/ICIP.2013.6738672},
   Key = {fds235765}
}

@article{fds331063,
   Author = {Calderbank, AR and Robinson, RW and Hanlon, P},
   Title = {Partitions into even and odd block size and some unusual
             characters of the symmetric groups},
   Journal = {Proceedings of the London Mathematical Society},
   Volume = {s3-53},
   Number = {2},
   Pages = {288-320},
   Publisher = {Oxford University Press (OUP)},
   Year = {1986},
   Month = {January},
   url = {http://dx.doi.org/10.1112/plms/s3-53.2.288},
   Abstract = {For each n and k, let Π(i, k) denote the poset of all
             partitions of n having every block size congruent to i mod
             k. Attach to Πn(i, k) a unique maximal or minimal element
             if it does not already have one, and denote the resulting
             poset Πn(i, k). Results of Björner, Sagan, and Wachs show
             that Πn(0, k) and Πn(1, k) are lexicographically
             shellable, and hence Cohen-Macaulay. Let βn(0, k) and
             βn(0, k) denote the characters of S„ acting on the unique
             non-vanishing reduced homology groups of Πn(0, k) and
             Πn(1, k).This paper is divided into three parts. In the
             first part, we use combinatorial methods to derive defining
             equations for the generating functions of the character
             values of the βn(i, k). The most elegant of these states
             that the generating function for the characters βn(1, k) (t
             = 0, 1,…) is the inverse in the composition ring (or
             plethysm ring) to the generating function for the
             corresponding trivial characters εni+l. In the second part,
             we use these cycle index sum equations to examine the values
             of the characters βn(1, 2) and βn(0, 2). We show that the
             values of βn(0, 2) are simple multiples of the tangent
             numbers and that the restrictions of the βn(0, 2) to Sn-1
             are the skew characters examined by Foulkes (whose values
             are always plus or minus a tangent number). In the case
             βn(0, 2) a number of remarkable results arise. First it is
             shown that a series of polynomials (pσ(λ): σeSn) which
             are connected with our cycle index sum equations satisfy
             βn(1, 2)(σ) = pσ (0) or pσ (1) depending on whether n is
             odd or even. Next it is shown that the pσ(λ) have integer
             roots which obey a simple recursion. Lastly it is shown that
             the pσ(λ)have a combinatorial interpretation. If the rank
             function of Πn(1, k) is naturally modified to depend on a
             then the polynomials pσ(λ) are the Birkhoff polynomials of
             the fixed point posets Πn(1, k)σ In the last part we prove
             a conjecture of R. P. Stanley which indentifies the
             restriction of βn(0, 2) to Sn-1, as a skew character. A
             consequence of this result is a simple combinatorial method
             for decomposing βn(0, k) into irreducibles. © 1986 Oxford
             University Press.},
   Doi = {10.1112/plms/s3-53.2.288},
   Key = {fds331063}
}

@article{fds236000,
   Author = {Raginsky, M and Jafarpour, S and Harmany, ZT and Marcia, RF and Willett,
             RM and Calderbank, R},
   Title = {Performance bounds for expander-based compressed sensing in
             poisson noise},
   Journal = {IEEE Transactions on Signal Processing},
   Volume = {59},
   Number = {9},
   Pages = {4139-4153},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2011},
   Month = {September},
   ISSN = {1053-587X},
   url = {http://dx.doi.org/10.1109/TSP.2011.2157913},
   Abstract = {This paper provides performance bounds for compressed
             sensing in the presence of Poisson noise using expander
             graphs. The Poisson noise model is appropriate for a variety
             of applications, including low-light imaging and digital
             streaming, where the signal-independent and/or bounded noise
             models used in the compressed sensing literature are no
             longer applicable. In this paper, we develop a novel sensing
             paradigm based on expander graphs and propose a maximum a
             posteriori (MAP) algorithm for recovering sparse or
             compressible signals from Poisson observations. The geometry
             of the expander graphs and the positivity of the
             corresponding sensing matrices play a crucial role in
             establishing the bounds on the signal reconstruction error
             of the proposed algorithm. We support our results with
             experimental demonstrations of reconstructing average packet
             arrival rates and instantaneous packet counts at a router in
             a communication network, where the arrivals of packets in
             each flow follow a Poisson process. © 2011
             IEEE.},
   Doi = {10.1109/TSP.2011.2157913},
   Key = {fds236000}
}

@article{fds235941,
   Author = {Jafarpour, S and Willett, R and Raginsky, M and Calderbank,
             R},
   Title = {Performance bounds for expander-based compressed sensing in
             the presence of Poisson noise},
   Journal = {Conference Record - Asilomar Conference on Signals, Systems
             and Computers},
   Pages = {513-517},
   Year = {2009},
   Month = {December},
   ISSN = {1058-6393},
   url = {http://dx.doi.org/10.1109/ACSSC.2009.5469879},
   Abstract = {This paper provides performance bounds for compressed
             sensing in the presence of Poisson noise using expander
             graphs. The Poisson noise model is appropriate for a variety
             of applications, including low-light imaging and digital
             streaming, where the signal-independent and/or bounded noise
             models used in the compressed sensing literature are no
             longer applicable. In this paper, we develop a novel sensing
             paradigm based on expander graphs and propose a MAP
             algorithm for recovering sparse or compressible signals from
             Poisson observations. The geometry of the expander graphs
             and the positivity of the corresponding sensing matrices
             play a crucial role in establishing the bounds on the signal
             reconstruction error of the proposed algorithm. The geometry
             of the expander graphs makes them provably superior to
             random dense sensing matrices, such as Gaussian or partial
             Fourier ensembles, for the Poisson noise model.We support
             our results with experimental demonstrations. © 2009
             IEEE.},
   Doi = {10.1109/ACSSC.2009.5469879},
   Key = {fds235941}
}

@article{fds236036,
   Author = {Betts, W and Calderbank, AR and Laroia, R},
   Title = {Performance of Nonuniform Constellations on the Gaussian
             Channel},
   Journal = {IEEE Transactions on Information Theory},
   Volume = {40},
   Number = {5},
   Pages = {1633-1638},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {1994},
   Month = {January},
   url = {http://dx.doi.org/10.1109/18.333880},
   Abstract = {Testing of high-speed voiceband modems has revealed a
             significant increase in distortion for points near the
             perimeter of a QAM signal constellation. This distortion
             increases with distance from the center of the constellation
             and limits performance at data rates above 19.2 kb / s. The
             perimeter distortion can be reduced by transforming the
             signal constellation so that points near the center are
             closer together, and points near the perimeter are further
             apart. When the channel SNR is high, such a transformation
             reduces immunity to Gaussian noise because points near the
             center of the transformed constellation are closer together
             than in a uniformly spaced constellation with the same
             average power. This paper demonstrates theoretically that
             for channel SNR's of practical interest, there is actually a
             small gain in immunity to Gaussian noise. In fact, an
             appropriate coded modulation scheme can produce gains of
             about 0.25 dB. © 1994 IEEE},
   Doi = {10.1109/18.333880},
   Key = {fds236036}
}

@article{fds235782,
   Author = {Duarte, MF and Jafarpour, S and Calderbank, AR},
   Title = {Performance of the Delsarte-Goethals frame on clustered
             sparse vectors},
   Journal = {IEEE Transactions on Signal Processing},
   Volume = {61},
   Number = {8},
   Pages = {1998-2008},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2013},
   Month = {April},
   ISSN = {1053-587X},
   url = {http://dx.doi.org/10.1109/TSP.2013.2242064},
   Abstract = {The Delsarte-Goethals frame (DGF) has been proposed for
             deterministic compressive sensing of sparse and compressible
             signals. Results in compressive sensing theory show that the
             DGF enables successful recovery of an overwhelming majority
             of sufficiently sparse signals. However, these results do
             not give a characterization of the sparse vectors for which
             the recovery procedure fails. In this paper, we present a
             formal analysis of the DGF that highlights the presence of
             clustered sparse vectors within its null space. This in turn
             implies that sparse recovery performance is diminished for
             sparse vectors that have their nonzero entries clustered
             together. Such clustered structure is present in compressive
             imaging applications, where commonly-used raster scannings
             of 2-D discrete wavelet transform representations yield
             clustered sparse representations for natural images. Prior
             work leverages this structure by proposing specially
             tailored sparse recovery algorithms that partition the
             recovery of the input vector into known clustered and
             unclustered portions. Alternatively, we propose new
             randomized and deterministic raster scannings for clustered
             coefficient vectors that improve recovery performance.
             Experimental results verify the aforementioned analysis and
             confirm the predicted improvements for both noiseless and
             noisy measurement regimes. © 1991-2012 IEEE.},
   Doi = {10.1109/TSP.2013.2242064},
   Key = {fds235782}
}

@article{fds235914,
   Author = {Sirianunpiboon, S and Davis, LM and Calderbank,
             R},
   Title = {Performance of the golden code in the presence of
             polarization diversity},
   Journal = {Proceedings of the 2009 Australian Communications Theory
             Workshop, AusCTW 2009},
   Pages = {23-27},
   Publisher = {IEEE},
   Year = {2009},
   Month = {June},
   url = {http://dx.doi.org/10.1109/AUSCTW.2009.4805594},
   Abstract = {The performance of a multiple-input multipleoutput (MIMO)
             system depends strongly on the scattering environment and
             antenna spacing. The use of dual-polarized antennas is a
             promising alternative both in terms of diversity and
             effective use of space. In this paper we investigate the
             performance of the Golden code with polarization diversity;
             where the two spatially separated antennas are replaced by a
             single dual polarised antenna. We analyse the performance of
             the Golden code in terms of an angle between the channels
             corresponding to the two receivers, which allows us to
             predict performance without necessarily needing to resort to
             simulations. Analysis and simulation results show that with
             the introduction of polarization diversity the performance
             of the Golden code can be made consistently good across both
             rich scattering and line of sight (LOS) conditions. © 2009
             IEEE.},
   Doi = {10.1109/AUSCTW.2009.4805594},
   Key = {fds235914}
}

@article{fds303201,
   Author = {Chi, Y and Eldar, YC and Calderbank, R},
   Title = {PETRELS: Parallel subspace estimation and tracking by
             recursive least squares from partial observations},
   Journal = {IEEE Transactions on Signal Processing},
   Volume = {61},
   Number = {23},
   Pages = {5947-5959},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2013},
   Month = {November},
   url = {http://arxiv.org/abs/1207.6353v2},
   Abstract = {Many real world datasets exhibit an embedding of
             low-dimensional structure in a high-dimensional manifold.
             Examples include images, videos and internet traffic data.
             It is of great significance to estimate and track the
             low-dimensional structure with small storage requirements
             and computational complexity when the data dimension is
             high. Therefore we consider the problem of reconstructing a
             data stream from a small subset of its entries, where the
             data is assumed to lie in a low-dimensional linear subspace,
             possibly corrupted by noise. We further consider tracking
             the change of the underlying subspace, which can be applied
             to applications such as video denoising, network monitoring
             and anomaly detection. Our setting can be viewed as a
             sequential low-rank matrix completion problem in which the
             subspace is learned in an online fashion. The proposed
             algorithm, dubbed Parallel Estimation and Tracking by
             REcursive Least Squares (PETRELS), first identifies the
             underlying low-dimensional subspace, and then reconstructs
             the missing entries via least-squares estimation if
             required. Subspace identification is performed via a
             recursive procedure for each row of the subspace matrix in
             parallel with discounting for previous observations.
             Numerical examples are provided for direction-of-arrival
             estimation and matrix completion, comparing PETRELS with
             state of the art batch algorithms. © 1991-2012
             IEEE.},
   Doi = {10.1109/TSP.2013.2282910},
   Key = {fds303201}
}

@article{fds236078,
   Author = {Chi, Y and Eldar, YC and Calderbank, R},
   Title = {PETRELS: Subspace estimation and tracking from partial
             observations},
   Journal = {ICASSP, IEEE International Conference on Acoustics, Speech
             and Signal Processing - Proceedings},
   Pages = {3301-3304},
   Publisher = {IEEE},
   Year = {2012},
   Month = {October},
   ISSN = {1520-6149},
   url = {http://dx.doi.org/10.1109/ICASSP.2012.6288621},
   Abstract = {We consider the problem of reconstructing a data stream from
             a small subset of its entries, where the data stream is
             assumed to lie in a low-dimensional linear subspace,
             possibly corrupted by noise. It is also important to track
             the change of underlying subspace for many applications.
             This problem can be viewed as a sequential low-rank matrix
             completion problem in which the subspace is learned in an
             online fashion. The proposed algorithm, called Parallel
             Estimation and Tracking by REcursive Least Squares
             (PETRELS), identifies the underlying low-dimensional
             subspace via a recursive procedure for each row of the
             subspace matrix in parallel, and then reconstructs the
             missing entries via least-squares estimation if required.
             PETRELS outperforms previous approaches by discounting
             observations in order to capture long-term behavior of the
             data stream and be able to adapt to it. Numerical examples
             are provided for direction-of-arrival estimation and matrix
             completion, comparing PETRELS with state of the art batch
             algorithms. © 2012 IEEE.},
   Doi = {10.1109/ICASSP.2012.6288621},
   Key = {fds236078}
}

@article{fds235861,
   Author = {Minn, H and Li, Y and Al-Dhahir, N and Calderbank,
             R},
   Title = {Pilot designs for consistent frequency offset estimation in
             OFDM systems},
   Journal = {IEEE International Conference on Communications},
   Volume = {10},
   Pages = {4566-4571},
   Publisher = {IEEE},
   Year = {2006},
   Month = {January},
   ISSN = {0536-1486},
   url = {http://dx.doi.org/10.1109/ICC.2006.255359},
   Abstract = {This paper presents pilot designs for consistent frequency
             offset estimation of OFDM systems in frequencyselective
             fading channels. We describe two design approaches, namely
             consistency in the probabilistic sense and absolute
             consistency. Existing preambles and pilot designs in the
             literature do not guarantee the absolute consistency. We
             derive general criteria for both approaches, present
             sufficient conditions on the pilot structures, and derive
             simple pilot designs satisfying these conditions. Absolute
             consistency should not be compromised in emergency-related
             or other critical communication scenarios and our proposed
             consistent pilot designs address this need. © 2006
             IEEE.},
   Doi = {10.1109/ICC.2006.255359},
   Key = {fds235861}
}

@article{fds235886,
   Author = {Li, Y and Minn, H and Al-Dhahir, N and Calderbank,
             AR},
   Title = {Pilot designs for consistent frequency-offset estimation in
             OFDM systems},
   Journal = {IEEE Transactions on Communications},
   Volume = {55},
   Number = {5},
   Pages = {864-877},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2007},
   Month = {May},
   ISSN = {0090-6778},
   url = {http://dx.doi.org/10.1109/TCOMM.2007.896105},
   Abstract = {This paper presents pilot designs for consistent
             frequency-offset estimation of orthogonal frequency-division
             multiplexing systems in frequency-selective fading channels.
             We describe two design approaches, namely, consistency in
             the probabilistic sense and absolute consistency. Existing
             preambles and pilot designs in the literature do not
             guarantee the absolute consistency. We derive general
             criteria for both approaches, present sufficient conditions
             on the pilot structures over the maximum carrier frequency
             offset (CFO) estimation range (half of the sampling rate),
             and derive simple pilot designs satisfying these conditions.
             We also extend the sufficient conditions to any arbitrary
             but fixed CFO estimation range, and present some generalized
             design patterns. Furthermore, the CFO estimation
             performances of distinct consistent pilot designs can be
             quite different at moderate or low signal-to-noise ratio
             (SNR) due to different statistics of outliers which also
             yields a link failure. We develop efficient pilot-design
             criteria that provide both consistency and robustness
             against outliers at moderate-to-low SNR. Our consistent
             pilot designs facilitate flexible and economical
             implementation, while our robust pilot designs enable
             wireless links with less outage and better resilience. ©
             2007 IEEE.},
   Doi = {10.1109/TCOMM.2007.896105},
   Key = {fds235886}
}

@article{fds235922,
   Author = {Gilbert, G and Weinstein, YS and Aggarwal, V and Calderbank,
             AR},
   Title = {Practical quantum fault tolerance},
   Journal = {Proceedings of SPIE - The International Society for Optical
             Engineering},
   Volume = {7342},
   Publisher = {SPIE},
   Year = {2009},
   Month = {September},
   ISSN = {0277-786X},
   url = {http://dx.doi.org/10.1117/12.818683},
   Abstract = {The standard approach to quantum fault tolerance is to
             calculate error thresholds on basic gates in the limit of
             arbitrarily many concatenation levels. In contrast this
             paper takes the number of qubits and the target
             implementation accuracy as given, and provides a framework
             for engineering the constrained quantum system to the
             required tolerance. The approach requires solving the full
             dynamics of the quantum system for an arbitrary admixture
             (biased or unbiased) of Pauli errors. The inaccuracy between
             ideal and implemented quantum systems is captured by the
             supremum of the Schatten k-norm of the difference between
             the ideal and implemented density matrices taken over all
             density matrices. This is a more complete analysis than the
             standard approach, where an intricate combination of worst
             case assumptions and combinatorial analysis is used to
             analyze the special case of equiprobable errors. Conditions
             for fault tolerance are now expressed in terms of error
             regions rather than a single number (the standard error
             threshold). In the important special case of a stochastic
             noise model and a single logical qubit, an optimization over
             all 2×2 density matrices is required to obtain the full
             dynamics. The complexity of this calculation is greatly
             simplified through reduction to an optimization over only
             three projectors. Error regions are calculated for the
             standard 5- and 7-qubit codes. Knowledge of the full
             dynamics makes it possible to design sophisticated
             concatenation strategies that go beyond repeatedly using the
             same code, and these strategies can achieve target fault
             tolerance thresholds with fewer qubits. © 2009
             SPIE.},
   Doi = {10.1117/12.818683},
   Key = {fds235922}
}

@article{fds235867,
   Author = {Lee, JW and Chiang, M and Calderbank, AR},
   Title = {Price-based distributed algorithms for rate-reliability
             tradeoff in network utility maximization},
   Journal = {IEEE Journal on Selected Areas in Communications},
   Volume = {24},
   Number = {5},
   Pages = {962-976},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2006},
   Month = {May},
   ISSN = {0733-8716},
   url = {http://dx.doi.org/10.1109/JSAC.2006.872877},
   Abstract = {The current framework of network utility maximization for
             rate allocation and its price-based algorithms assumes that
             each link provides a fixed-size transmission "pipe" and each
             user's utility is a function of transmission rate only.
             These assumptions break down in many practical systems,
             where, by adapting the physical layer channel coding or
             transmission diversity, different tradeoffs between rate and
             reliability can be achieved. In network utility maximization
             problems formulated in this paper, the utility for each user
             depends on both transmission rate and signal quality, with
             an intrinsic tradeoff between the two. Each link may also
             provide a higher (or lower) rate on the transmission "pipes"
             by allowing a higher (or lower) decoding error probability.
             Despite non-separability and nonconvexity of these
             optimization problems, we propose new price-based
             distributed algorithms and prove their convergence to the
             globally optimal rate-reliability tradeoff under
             readily-verifiable sufficient conditions. We first consider
             networks in which the rate-reliability tradeoff is
             controlled by adapting channel code rates in each link's
             physical-layer error correction codes, and propose two
             distributed algorithms based on pricing, which respectively
             implement the "integrated" and "differentiated" policies of
             dynamic rate-reliability adjustment. In contrast to the
             classical price-based rate control algorithms, in our
             algorithms, each user provides an offered price for its own
             reliability to the network, while the network provides
             congestion prices to users. The proposed algorithms converge
             to a tradeoff point between rate and reliability, which we
             prove to be a globally optimal one for channel codes with
             sufficiently large coding length and utilities whose
             curvatures are sufficiently negative. Under these
             conditions, the proposed algorithms can thus generate the
             Pareto optimal tradeoff curves between rate and reliability
             for all the users. In addition, the distributed algorithms
             and convergence proofs are extended for wireless
             multiple-inpit-multiple-output multihop networks, in which
             diversity and multiplexing gains of each link are controlled
             to achieve the optimal rate-reliability tradeoff. Numerical
             examples confirm that there can be significant enhancement
             of the network utility by distributively trading-off rate
             and reliability, even when only some of the links can
             implement dynamic reliability. © 2006 IEEE.},
   Doi = {10.1109/JSAC.2006.872877},
   Key = {fds235867}
}

@article{fds235958,
   Author = {Chiang, M and Hande, P and Kim, H and Ha, S and Calderbank,
             R},
   Title = {Pricing broadband: Survey and open problems},
   Journal = {ICUFN 2010 - 2nd International Conference on Ubiquitous and
             Future Networks},
   Pages = {303-308},
   Publisher = {IEEE},
   Year = {2010},
   Month = {October},
   url = {http://dx.doi.org/10.1109/ICUFN.2010.5547185},
   Abstract = {Driven by the emerging directions from the FCC and the
             broadband market, this paper aims at answering the
             fundamental question of how to use pricing as a lever to
             enable universal broadband coverage and effective network
             management in the United States. We address differential
             pricing as a network management tool, i.e., what to charge,
             how to charge, and how much to charge. We also outline
             research towards multi-platform two-sided pricing focusing
             on ISP that charges both content and application providers.
             Open problems are highlghted. As a next step, through
             collaboration we will combine the access to large-scale
             empirical data with rigorous modeling and analysis; we will
             go all the way from data collection through mathematical
             analysis to practical impact on policy decisions and ISP
             business decisions, thus closing the loop in the study of
             network economics for universal broadband coverage. © 2010
             IEEE.},
   Doi = {10.1109/ICUFN.2010.5547185},
   Key = {fds235958}
}

@article{fds235953,
   Author = {Hande, P and Chiang, M and Calderbank, R and Zhang,
             J},
   Title = {Pricing under constraints in access networks: Revenue
             maximization and congestion management},
   Journal = {Proceedings - IEEE INFOCOM},
   Publisher = {IEEE},
   Year = {2010},
   Month = {June},
   ISSN = {0743-166X},
   url = {http://dx.doi.org/10.1109/INFCOM.2010.5461954},
   Abstract = {This paper investigates pricing of Internet connectivity
             services in the context of a monopoly ISP selling broadband
             access to consumers. We first study the optimal combination
             of flat-rate and usage-based access price components for
             maximization of ISP revenue, subject to a capacity
             constraint on the datarate demand. Next, we consider
             time-varying consumer utilities for broadband data rates
             that can result in uneven demand for data-rate over time.
             Practical considerations limit the viability of altering
             prices over time to smoothen out the demanded datarate.
             Despite such constraints on pricing, our analysis reveals
             that the ISP can retain the revenue by setting a low usage
             fee and dropping packets of consumer demanded data that
             exceed capacity. Regulatory attention on ISP congestion
             management discourages such "technical" practices and
             promotes economics based approaches. We characterize the
             loss in ISP revenue from an economics based approach.
             Regulatory requirements further impose limitations on price
             discrimination across consumers, and we derive the revenue
             loss to the ISP from such restrictions. We then develop
             partial recovery of revenue loss through non-linear pricing
             that does not explicitly discriminate across consumers.
             While determination of the access price is ultimately based
             on additional considerations beyond the scope of this paper,
             the analysis here can serve as a benchmark to structure
             access price in broadband access networks. ©2010
             IEEE.},
   Doi = {10.1109/INFCOM.2010.5461954},
   Key = {fds235953}
}

@article{fds235767,
   Author = {Reboredo, H and Renna, F and Calderbank, R and Rodrigues,
             MRD},
   Title = {Projections designs for compressive classification},
   Journal = {2013 IEEE Global Conference on Signal and Information
             Processing, GlobalSIP 2013 - Proceedings},
   Pages = {1029-1032},
   Publisher = {IEEE},
   Year = {2013},
   Month = {December},
   url = {http://dx.doi.org/10.1109/GlobalSIP.2013.6737069},
   Abstract = {This paper puts forth projections designs for compressive
             classification of Gaussian mixture models. In particular, we
             capitalize on the asymptotic characterization of the
             behavior of an (upper bound to the) misclassification
             probability associated with the optimal Maximum-A-Posteriori
             (MAP) classifier, which depends on quantities that are dual
             to the concepts of the diversity gain and coding gain in
             multi-antenna communications, to construct measurement
             designs that maximize the diversity-order of the measurement
             model. Numerical results demonstrate that the new
             measurement designs substantially outperform random
             measurements. Overall, the analysis and the designs cast
             geometrical insight about the mechanics of compressive
             classification problems. © 2013 IEEE.},
   Doi = {10.1109/GlobalSIP.2013.6737069},
   Key = {fds235767}
}

@article{fds236046,
   Author = {McGuire, G and Calderbank, AR},
   Title = {Proof of a Conjecture of Sarwate and Pursley Regarding Pairs
             of Binary m-Sequences},
   Journal = {IEEE Transactions on Information Theory},
   Volume = {41},
   Number = {4},
   Pages = {1153-1155},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {1995},
   Month = {January},
   url = {http://dx.doi.org/10.1109/18.391260},
   Abstract = {Binary m-sequences are maximal length sequences generated by
             shift registers of length m, that are employed in
             navigation, radar, and spread-spectrum communications
             systems, because of their crosscorrelation properties. It is
             well known that given a pair of distinct m-sequences, the
             crosscorrelation function must take on at least three
             values. This correspondence considers crosscorrelation
             functions that take on exactly three values, and where these
             values are preferred in that they are small. The main result
             is a proof of a conjecture made by Sarwate and Pursley in
             1980, that if m ≡ 0 (mod 4) then there are no preferred
             pairs of binary m-sequences. The proof makes essential use
             of a deep theorem of McEliece that restricts the possible
             weights that can occur in a binary cyclic code. © 1995
             IEEE},
   Doi = {10.1109/18.391260},
   Key = {fds236046}
}

@article{fds326756,
   Author = {Carpenter, KLH and Sprechmann, P and Calderbank, R and Sapiro, G and Egger, HL},
   Title = {Quantifying Risk for Anxiety Disorders in Preschool
             Children: A Machine Learning Approach.},
   Journal = {PLoS One},
   Volume = {11},
   Number = {11},
   Pages = {e0165524},
   Year = {2016},
   url = {http://dx.doi.org/10.1371/journal.pone.0165524},
   Abstract = {Early childhood anxiety disorders are common, impairing, and
             predictive of anxiety and mood disorders later in childhood.
             Epidemiological studies over the last decade find that the
             prevalence of impairing anxiety disorders in preschool
             children ranges from 0.3% to 6.5%. Yet, less than 15% of
             young children with an impairing anxiety disorder receive a
             mental health evaluation or treatment. One possible reason
             for the low rate of care for anxious preschoolers is the
             lack of affordable, timely, reliable and valid tools for
             identifying young children with clinically significant
             anxiety. Diagnostic interviews assessing psychopathology in
             young children require intensive training, take hours to
             administer and code, and are not available for use outside
             of research settings. The Preschool Age Psychiatric
             Assessment (PAPA) is a reliable and valid structured
             diagnostic parent-report interview for assessing
             psychopathology, including anxiety disorders, in 2 to 5 year
             old children. In this paper, we apply machine-learning tools
             to already collected PAPA data from two large community
             studies to identify sub-sets of PAPA items that could be
             developed into an efficient, reliable, and valid screening
             tool to assess a young child's risk for an anxiety disorder.
             Using machine learning, we were able to decrease by an order
             of magnitude the number of items needed to identify a child
             who is at risk for an anxiety disorder with an accuracy of
             over 96% for both generalized anxiety disorder (GAD) and
             separation anxiety disorder (SAD). Additionally, rather than
             considering GAD or SAD as discrete/binary entities, we
             present a continuous risk score representing the child's
             risk of meeting criteria for GAD or SAD. Identification of a
             short question-set that assesses risk for an anxiety
             disorder could be a first step toward development and
             validation of a relatively short screening tool feasible for
             use in pediatric clinics and daycare/preschool
             settings.},
   Doi = {10.1371/journal.pone.0165524},
   Key = {fds326756}
}

@article{fds235823,
   Author = {Calderbank, AR and Rains, EM and Shor, PW and Sloane,
             NJA},
   Title = {Quantum error correction and orthogonal geometry},
   Journal = {Physical Review Letters},
   Volume = {78},
   Number = {3},
   Pages = {405-408},
   Publisher = {American Physical Society (APS)},
   Year = {1997},
   Month = {January},
   url = {http://dx.doi.org/10.1103/PhysRevLett.78.405},
   Abstract = {A group theoretic framework is introduced that simplifies
             the description of known quantum error-correcting codes and
             greatly facilitates the construction of new examples. Codes
             are given which map 3 qubits to 8 qubits correcting 1 error,
             4 to 10 qubits correcting 1 error, 1 to 13 qubits correcting
             2 errors, and 1 to 29 qubits correcting 5 errors. © 1997
             The American Physical Society.},
   Doi = {10.1103/PhysRevLett.78.405},
   Key = {fds235823}
}

@article{fds236054,
   Author = {Calderbank, AR and Rains, EM and Shor, PW and Sloane,
             NJA},
   Title = {Quantum error correction via codes over GF(4)},
   Journal = {IEEE International Symposium on Information Theory -
             Proceedings},
   Pages = {292},
   Publisher = {IEEE},
   Year = {1997},
   Month = {December},
   url = {http://dx.doi.org/10.1109/ISIT.1997.613213},
   Abstract = {The unreasonable effectiveness of quantum computing is
             founded on coherent quantum superposition or entanglement
             which allows a large number of calculations to be performed
             simultaneously. This coherence is lost as a quantum system
             interacts with its environment. In the present paper the
             problem of finding quantum-error-correcting codes is
             transformed into one of finding additive codes over the
             field GF(4) which are self-orthogonal with respect to a
             certain trace inner product. Many new codes and new bounds
             are presented, as well as a table of upper and lower bounds
             on such codes of length up to 30 qubits. © 1997
             IEEE.},
   Doi = {10.1109/ISIT.1997.613213},
   Key = {fds236054}
}

@article{fds235808,
   Author = {Blokhuis, A and Calderbank, AR},
   Title = {Quasi-symmetric designs and the Smith Normal
             Form},
   Journal = {Designs, Codes and Cryptography},
   Volume = {2},
   Number = {2},
   Pages = {189-206},
   Publisher = {Springer Nature},
   Year = {1992},
   Month = {June},
   ISSN = {0925-1022},
   url = {http://dx.doi.org/10.1007/BF00124897},
   Abstract = {We obtain necessary conditions for the existence of a 2 -
             (ν, k, λ) design, for which the block intersection sizes
             s1, s2, ..., snsatisfy s1 ≡ s2 ≡ ... ≡ sn ≡ s (mod
             pe),where p is a prime and the exponent e is odd. These
             conditions are obtained from restriction on the Smith Normal
             Form of the incidence matrix of the design. We also obtain
             restrictions on the action of the automorphism group of a 2
             - (ν, k, λ) design on points and on blocks. © 1992 Kluwer
             Academic Publishers.},
   Doi = {10.1007/BF00124897},
   Key = {fds235808}
}

@article{fds236040,
   Author = {Bonnecaze, A and Solé, P and Calderbank, AR},
   Title = {Quaternary Quadratic Residue Codes and Unimodular
             Lattices},
   Journal = {IEEE Transactions on Information Theory},
   Volume = {41},
   Number = {2},
   Pages = {366-377},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {1995},
   Month = {January},
   url = {http://dx.doi.org/10.1109/18.370138},
   Abstract = {We construct new self-dual and isodual codes over the
             integers modulo 4. The binary images of these codes under
             the Gray map are nonlinear, but formally self-dual. The
             construction involves Hensel lifting of binary cyclic codes.
             Quaternary quadratic residue codes are obtained by Hensel
             lifting of the classical binary quadratic residue codes.
             Repeated Hensel lifting produces a universal code defined
             over the 2-adic integers. We investigate the connections
             between this universal code and the codes defined over Z4,
             the composition of the automorphism group, and the structure
             of idempotents over Z4. We also derive a square root hound
             on the minimum Lee weight, and explore the connections with
             the finite Fourier transform. Certain self-dual codes over
             Z4 are shown to determine even unimodular lattices,
             including the extended quadratic residue code of length q +
             1, where q ≡ −1(mod8) is a prime power. When q = 23, the
             quaternary Golay code determines the Leech lattice in this
             way. This is perhaps the simplest construction for this
             remarkable lattice that is known. © 1995
             IEEE},
   Doi = {10.1109/18.370138},
   Key = {fds236040}
}

@article{fds235755,
   Author = {Carpenter, K and Sprechmann, P and Fiori, M and Calderbank, R and Egger,
             H and Sapiro, G},
   Title = {Questionnaire simplification for fast risk analysis of
             children's mental health},
   Journal = {ICASSP, IEEE International Conference on Acoustics, Speech
             and Signal Processing - Proceedings},
   Pages = {6009-6013},
   Publisher = {IEEE},
   Year = {2014},
   Month = {January},
   ISSN = {1520-6149},
   url = {http://dx.doi.org/10.1109/ICASSP.2014.6854757},
   Abstract = {Early detection and treatment of psychiatric disorders on
             children has shown significant impact in their subsequent
             development and quality of life. The assessment of
             psychopathology in childhood is commonly carried out by
             performing long comprehensive interviews such as the widely
             used Preschool Age Psychiatric Assessment (PAPA).
             Unfortunately, the time required to complete a full
             interview is too long to apply it at the scale of the actual
             population at risk, and most of the population goes
             undiagnosed or is diagnosed significantly later than
             desired. In this work, we aim to learn from unique and very
             rich previously collected PAPA examples the
             inter-correlations between different questions in order to
             provide a reliable risk analysis in the form of a much
             shorter interview. This helps to put such important risk
             analysis at the hands of regular practitioners, including
             teachers and family doctors. We use for this purpose the
             alternating decision trees algorithm, which combines
             decision trees with boosting to produce small and
             interpretable decision rules. Rather than a binary
             prediction, the algorithm provides a measure of confidence
             in the classification outcome. This is highly desirable from
             a clinical perspective, where it is preferable to abstain a
             decision on the low-confidence cases and recommend further
             screening. In order to prevent over-fitting, we propose to
             use network inference analysis to predefine a set of
             candidate question with consistent high correlation with the
             diagnosis. We report encouraging results with high levels of
             prediction using two independently collected datasets. The
             length and accuracy of the developed method suggests that it
             could be a valuable tool for preliminary evaluation in
             everyday care. © 2014 IEEE.},
   Doi = {10.1109/ICASSP.2014.6854757},
   Key = {fds235755}
}

@article{fds235913,
   Author = {Chi, Y and Pezeshki, A and Calderbank, R and Howard,
             S},
   Title = {Range sidelobe suppression in a desired Doppler
             interval},
   Journal = {2009 International Waveform Diversity and Design Conference
             Proceedings, WDD 2009},
   Pages = {258-262},
   Publisher = {IEEE},
   Year = {2009},
   Month = {April},
   url = {http://dx.doi.org/10.1109/WDDC.2009.4800356},
   Abstract = {We present a novel method of constructing a Doppler
             resilient pulse train of Golay complementary waveforms, for
             which the range sidelobes of the pulse train ambiguity
             function vanish inside a desired Doppler interval. This is
             accomplished by coordinating the transmission of a Golay
             pair of phase coded waveforms in time according to the 1's
             and -1's in a biphase sequence. The magnitude of the range
             sidelobes of the pulse train ambiguity function is shown to
             be proportional to the magnitude spectrum of the biphase
             sequence. Range sidelobes inside a desired Doppler interval
             are suppressed by selecting a sequence whose spectrum has a
             high-order null at a Doppler frequency inside the desired
             interval. We show that the spectrum of the biphase sequence
             obtained by oversampling the length-2M Prouhet-Thue-Morse
             (PTM) sequence by a factor m has an Mth-order null at all
             rational Doppler shifts Θ0 = 2πl /m, where l ≠ 0 and
             m≠ 1 are co-prime integers. This spectrum also has an (M -
             1)th-order null at zero Doppler and (M - h - 1)th-order
             nulls at all Doppler shifts Θ0 = 2πl /(2hm), where l ≠ 0
             andm ≠ 1 are again co-prime and 1 ≤ h ≤ M - 1. ©2009
             IEEE.},
   Doi = {10.1109/WDDC.2009.4800356},
   Key = {fds235913}
}

@article{fds235882,
   Author = {Dusad, S and Diggavi, SN and Calderbank, AR},
   Title = {Rank distance codes for ISI channels},
   Journal = {Proceedings of the 2007 IEEE Information Theory Workshop on
             Information Theory for Wireless Networks,
             ITW},
   Pages = {32-36},
   Publisher = {IEEE},
   Year = {2007},
   Month = {December},
   url = {http://dx.doi.org/10.1109/ITWITWN.2007.4318026},
   Abstract = {Designs for transmit alphabet constrained space-time codes
             naturally lead to questions about the design of rank
             distance codes. Recently, diversity embedded multi-level
             space-time codes for flat fading channels have been designed
             by using sets of binary matrices with rank distance
             guarantees over the binary field and mapping them onto QAM
             and PSK constellations. In this paper we give the design of
             diversity embedded space-time codes for fading Inter-Symbol
             Interference (ISI) channels with provable rank distance
             guarantees. In the process of doing so we also get a
             (asymptotic) characterization of the rate-diversity
             trade-off for multiple antenna fading ISI channels when
             there is a fixed transmit alphabet constraint. The key idea
             is to construct and analyze properties of binary matrices
             with the particular structure induced by ISI channels.
             ©2007 IEEE.},
   Doi = {10.1109/ITWITWN.2007.4318026},
   Key = {fds235882}
}

@article{fds235780,
   Author = {Harms, A and Bajwa, WU and Calderbank, R},
   Title = {Rapid sensing of underutilized, wideband spectrum using the
             Random Demodulator},
   Journal = {Conference Record - Asilomar Conference on Signals, Systems
             and Computers},
   Pages = {1940-1944},
   Publisher = {IEEE},
   Year = {2012},
   Month = {December},
   ISSN = {1058-6393},
   url = {http://dx.doi.org/10.1109/ACSSC.2012.6489377},
   Abstract = {Efficient spectrum sensing is an important problem given the
             large and increasing demand for wireless spectrum and the
             need to protect incumbent users. We can more efficiently use
             large swaths of underutilized spectrum by designing spectrum
             sensors that can quickly, and power-efficiently, find and
             opportunistically communicate over unused (or underutilized)
             pieces of spectrum, such as television bands. In this paper,
             we concentrate on a particular sensing architecture, the
             Random Demodulator (RD), and look at two aspects of the
             problem. First, we offer fundamental limits on how
             efficiently any algorithm can perform the sensing operation
             with the RD. Second, we analyze a very simple,
             low-complexity algorithm called one-step thresholding that
             has been shown to work near-optimally for certain
             measurement classes in a low SNR setting or when the
             non-zero input coefficients are nearly equal. We rigorously
             establish that the RD architecture is well-suited for
             near-optimal recovery of the locations of the non-zero
             frequency coefficients in similar settings using one-step
             thresholding and perform numerical experiments to offer some
             confirmation of our results. © 2012 IEEE.},
   Doi = {10.1109/ACSSC.2012.6489377},
   Key = {fds235780}
}

@article{fds235897,
   Author = {Thejaswi P.S. and C and Bennatan, A and Zhang, J and Calderbank, R and Cochran, D},
   Title = {Rate-achievability strategies for two-hop interference
             flows},
   Journal = {46th Annual Allerton Conference on Communication, Control,
             and Computing},
   Pages = {1432-1439},
   Publisher = {IEEE},
   Year = {2008},
   Month = {January},
   url = {http://dx.doi.org/10.1109/ALLERTON.2008.4797731},
   Abstract = {We consider a basic model for two-hop transmissions of two
             information flows which interfere with each other. In this
             model, two sources simultaneously transmit to two relays (in
             the first hop), which then simultaneously transmit to two
             destinations (in the second hop). While the transmission
             during the first hop is essentially the transmission over a
             classical interference channel, the transmission in the
             second hop enjoys an interesting advantage. Specifically, as
             a by-product of the Han-Kobayashi transmission scheme
             applied to the first hop, each of the relays (in the second
             hop) has access to some of the data that is intended to the
             other destination, in addition to its own data. As recently
             observed by Simeone et al., this opens the door to
             cooperation between the relays. In this paper, we observe
             that the cooperation can take the form of distributed MIMO
             broadcast, thus greatly enhancing its effectiveness at high
             SNR. However, since each relay is only aware of part of the
             data beyond its own, full cooperation is not possible. We
             propose several approaches that combine MIMO broadcast
             strategies (including "dirty paper") with standard
             non-cooperative strategies for the interference channel.
             Numerical results are provided, which indicate that our
             approaches provide substantial benefits at high SNR. © 2008
             IEEE.},
   Doi = {10.1109/ALLERTON.2008.4797731},
   Key = {fds235897}
}

@article{fds235771,
   Author = {Tarokh, V and Naguib, A and Seshadri, N and Calderbank,
             AR},
   Title = {Recent progress in space-time block and trellis
             coding},
   Journal = {IEEE International Symposium on Information Theory -
             Proceedings},
   Pages = {314},
   Publisher = {IEEE},
   Year = {1998},
   Month = {December},
   ISSN = {2157-8095},
   url = {http://dx.doi.org/10.1109/ISIT.1998.708919},
   Abstract = {Techniques for transmission and reception over wireless
             channels using multiple transmit antennas are presented. ©
             1998 IEEE.},
   Doi = {10.1109/ISIT.1998.708919},
   Key = {fds235771}
}

@article{fds235766,
   Author = {Renna, F and Calderbank, R and Carin, L and Rodrigues,
             MRD},
   Title = {Reconstruction of Gaussian mixture models from compressive
             measurements: A phase transition view},
   Journal = {2013 IEEE Global Conference on Signal and Information
             Processing, GlobalSIP 2013 - Proceedings},
   Pages = {628},
   Publisher = {IEEE},
   Year = {2013},
   Month = {December},
   url = {http://dx.doi.org/10.1109/GlobalSIP.2013.6736965},
   Abstract = {We characterize the minimum number of measurements needed to
             drive to zero the minimum mean squared error (MMSE) of
             Gaussian mixture model (GMM) input signals in the low-noise
             regime. The result also hints at almost phase-transition
             optimal recovery procedures based on a classification and
             reconstruction approach. © 2013 IEEE.},
   Doi = {10.1109/GlobalSIP.2013.6736965},
   Key = {fds235766}
}

@article{fds303196,
   Author = {Renna, F and Calderbank, R and Carin, L and Rodrigues,
             MRD},
   Title = {Reconstruction of signals drawn from a gaussian mixture via
             noisy compressive measurements},
   Journal = {IEEE Transactions on Signal Processing},
   Volume = {62},
   Number = {9},
   Pages = {2265-2277},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2014},
   Month = {May},
   url = {http://arxiv.org/abs/1307.0861v2},
   Abstract = {This paper determines to within a single measurement the
             minimum number of measurements required to successfully
             reconstruct a signal drawn from a Gaussian mixture model in
             the low-noise regime. The method is to develop upper and
             lower bounds that are a function of the maximum dimension of
             the linear subspaces spanned by the Gaussian mixture
             components. The method not only reveals the existence or
             absence of a minimum mean-squared error (MMSE) error floor
             (phase transition) but also provides insight into the MMSE
             decay via multivariate generalizations of the MMSE dimension
             and the MMSE power offset, which are a function of the
             interaction between the geometrical properties of the kernel
             and the Gaussian mixture. These results apply not only to
             standard linear random Gaussian measurements but also to
             linear kernels that minimize the MMSE. It is shown that
             optimal kernels do not change the number of measurements
             associated with the MMSE phase transition, rather they
             affect the sensed power required to achieve a target MMSE in
             the low-noise regime. Overall, our bounds are tighter and
             sharper than standard bounds on the minimum number of
             measurements needed to recover sparse signals associated
             with a union of subspaces model, as they are not asymptotic
             in the signal dimension or signal sparsity. © 2014
             IEEE.},
   Doi = {10.1109/TSP.2014.2309560},
   Key = {fds303196}
}

@article{fds343207,
   Author = {Calderbank, R and Jafarpour, S},
   Title = {Reed Muller Sensing Matrices and the LASSO},
   Journal = {SEQUENCES AND THEIR APPLICATIONS-SETA 2010},
   Volume = {6338},
   Pages = {442-+},
   Year = {2010},
   Key = {fds343207}
}

@article{fds235964,
   Author = {Calderbank, R and Jafarpour, S},
   Title = {Reed Muller sensing matrices and the LASSO (Invited
             paper)},
   Journal = {Lecture Notes in Computer Science (including subseries
             Lecture Notes in Artificial Intelligence and Lecture Notes
             in Bioinformatics)},
   Volume = {6338 LNCS},
   Pages = {442-463},
   Publisher = {Springer Berlin Heidelberg},
   Year = {2010},
   Month = {November},
   ISSN = {0302-9743},
   url = {http://dx.doi.org/10.1007/978-3-642-15874-2_37},
   Abstract = {We construct two families of deterministic sensing matrices
             where the columns are obtained by exponentiating codewords
             in the quaternary Delsarte-Goethals code DG(m,r). This
             method of construction results in sensing matrices with low
             coherence and spectral norm. The first family, which we call
             Delsarte-Goethals frames, are 2m - dimensional tight frames
             with redundancy 2rm . The second family, which we call
             Delsarte-Goethals sieves, are obtained by subsampling the
             column vectors in a Delsarte-Goethals frame. Different rows
             of a Delsarte-Goethals sieve may not be orthogonal, and we
             present an effective algorithm for identifying all pairs of
             non-orthogonal rows. The pairs turn out to be duplicate
             measurements and eliminating them leads to a tight frame.
             Experimental results suggest that all DG(m,r) sieves with m
             ≤ 15 and r ≥ 2 are tight-frames; there are no duplicate
             rows. For both families of sensing matrices, we measure
             accuracy of reconstruction (statistical 0 - 1 loss) and
             complexity (average reconstruction time) as a function of
             the sparsity level k. Our results show that DG frames and
             sieves outperform random Gaussian matrices in terms of
             noiseless and noisy signal recovery using the LASSO. © 2010
             Springer-Verlag.},
   Doi = {10.1007/978-3-642-15874-2_37},
   Key = {fds235964}
}

@article{fds235955,
   Author = {Chi, Y and Wu, Y and Calderbank, R},
   Title = {Regularized blind detection for MIMO communications},
   Journal = {IEEE International Symposium on Information Theory -
             Proceedings},
   Pages = {2108-2112},
   Publisher = {IEEE},
   Year = {2010},
   Month = {August},
   url = {http://dx.doi.org/10.1109/ISIT.2010.5513407},
   Abstract = {Multiple-Input Multiple-Output (MIMO) systems improve the
             throughput and reliability of wireless communications.
             Perfect Channel State Information (CSI) is needed at the
             receiver to perform coherent detection and achieve the
             optimal gain of the system. In fast fading and low SNR
             regimes, it is hard or impossible to obtain perfect CSI,
             which leads the receiver to operate without knowledge of the
             CSI and perform blind detection. In reality CSI may be
             available to the receiver but this CSI may be insufficient
             to support coherent detection. In this paper, we fill the
             gap between coherent and blind detection by considering a
             more realistic model where the receiver knows the statistics
             of the channel, that is Channel Distribution Information
             (CDI). We propose a new detection algorithm, called
             Regularized Blind Detection (RBD), where coherent and blind
             detection can be viewed as special cases in our model. The
             algorithm estimates CDI from any training symbols that are
             available and maximizes performance given the estimated CDI.
             Simulations demonstrate significant improvement in
             performance over blind detection. Our work can be viewed as
             a systematic exploration of space between coherent and blind
             detection with a strong Bayesian statistic flavor. © 2010
             IEEE.},
   Doi = {10.1109/ISIT.2010.5513407},
   Key = {fds235955}
}

@article{fds235849,
   Author = {Howard, SD and Moran, W and Calderbank, AR and Schmitt, HA and Savage,
             CO},
   Title = {Relationships between radar ambiguity and coding
             theory},
   Journal = {ICASSP, IEEE International Conference on Acoustics, Speech
             and Signal Processing - Proceedings},
   Volume = {V},
   Pages = {V897-V900},
   Publisher = {IEEE},
   Year = {2005},
   Month = {January},
   ISSN = {1520-6149},
   url = {http://dx.doi.org/10.1109/ICASSP.2005.1416449},
   Abstract = {We investigate the theory of the finite discrete
             Heisenberg-Weyl group in relation to the development of
             adaptive radar. We contend that this group can form the
             basis for the representation of the radar environment in
             terms of operators on the space of waveforms. We also
             demonstrate, following recent developments in the theory of
             error correcting codes, that the finite discrete
             Heisenberg-Weyl group provides a unified basis for the
             construction of useful waveforms/sequences for radar,
             communications and the theory of error correcting codes. ©
             2005 IEEE.},
   Doi = {10.1109/ICASSP.2005.1416449},
   Key = {fds235849}
}

@article{fds331055,
   Author = {Calderbank, AR and Hanlon, P and Sundaram, S},
   Title = {Representations of the symmetric group in deformations of
             the free lie algebra},
   Journal = {Transactions of the American Mathematical
             Society},
   Volume = {341},
   Number = {1},
   Pages = {315-333},
   Publisher = {American Mathematical Society (AMS)},
   Year = {1994},
   Month = {January},
   url = {http://dx.doi.org/10.1090/S0002-9947-1994-1153011-7},
   Abstract = {We consider, for a given complex parameter a, the
             nonassociative product defined on the tensor algebra of
             á-dimensional complex vector space by the left-normed
             bracketing is defined recursively to be the bracketing
             sequence The linear subspace spanned by all multilinear
             left-normed bracketings of homogeneous degree n, in the
             basis vectors is then an 5M-module Vn(a). Note that Vn(l) is
             the Lie representation Lie. of S. afforded by the
             áth-degree multilinear component of the free Lie algebra.
             Also, K.(-l) is the subspace of simple Jordan products in
             the free associative algebra as studied by Robbins [Ro].
             Among our preliminary results is the observation that when a
             is not a root of unity, the module V.(a) is simply the
             regular representation. Thrall [T] showed that the regular
             representation of the symmetric group S. can be written as a
             direct sum of tensor products of symmetrised Lie modules Vi.
             In this paper we determine the structure of the
             representations V.(a) as a sum of a subset of these Vx. The
             Vx, indexed by the partitions X of n, are defined as
             follows: let m! be the multiplicity of the part i in X, let
             Lie, be the Lie representation of 5, and let ik denote the
             trivial character of the symmetric group denote the
             character of the wreath product Sm.[Si] of Smiacting on
             copies of. Then Vxis isomorphic to the Our theorem now
             states that when a is a primitive pi root of unity, the
             module V.(a) is isomorphic to the direct sum where X runs.
             © 1994 American Mathematical Society.},
   Doi = {10.1090/S0002-9947-1994-1153011-7},
   Key = {fds331055}
}

@article{fds235772,
   Author = {Tang, A and Lee, JW and Huang, J and Chiang, M and Calderbank,
             AR},
   Title = {Reverse engineering MAC},
   Journal = {2006 4th International Symposium on Modeling and
             Optimization in Mobile, Ad Hoc and Wireless Networks, WiOpt
             2006},
   Publisher = {IEEE},
   Year = {2006},
   Month = {December},
   url = {http://dx.doi.org/10.1109/WIOPT.2006.1666466},
   Abstract = {This paper reverse engineers backoff-based random-access MAC
             protocols in ad-hoc networks. We show that contention
             resolution algorithm in such protocols is implicitly
             participating in a non-cooperative game. Each link attempts
             to maximize a selfish local utility function, whose exact
             shape is reverse engineered from protocol description,
             through a stochastic subgradient method in which link
             updates its persistence probability based on its
             transmission success or failure. We prove that existence of
             a Nash equilibrium is guaranteed in general. minimum amount
             of backoff aggressiveness needed for uniqueness of Nash
             equilibrium and convergence of best response strategy are
             established as a function of user density. Convergence
             properties and connection with best response strategy are
             also proved for variants of stochastic-subgradient-based
             dynamics of game. Together with known results in reverse
             engineering TCP and BGP, this paper completes recent efforts
             in reverse engineering main protocols in layers 2-4. © 2006
             IEEE.},
   Doi = {10.1109/WIOPT.2006.1666466},
   Key = {fds235772}
}

@article{fds235889,
   Author = {Lee, JW and Tang, A and Huang, J and Chiang, M and Calderbank,
             AR},
   Title = {Reverse-engineering MAC: A non-cooperative game
             model},
   Journal = {IEEE Journal on Selected Areas in Communications},
   Volume = {25},
   Number = {6},
   Pages = {1135-1147},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2007},
   Month = {August},
   ISSN = {0733-8716},
   url = {http://dx.doi.org/10.1109/JSAC.2007.070808},
   Abstract = {This paper reverse-engineers backoff-based random-access MAC
             protocols in ad-hoc networks. We show that the contention
             resolution algorithm in such protocols is implicitly
             participating in a non-cooperative game. Each link attempts
             to maximize a selfish local utility function, whose exact
             shape is reverse-engineered from the protocol description,
             through a stochastic subgradient method in which the link
             updates its persistence probability based on its
             transmission success or failure. We prove that existence of
             a Nash equilibrium is guaranteed in general. Then we
             establish the minimum amount of backoff aggressiveness
             needed, as a function of density of active users, for
             uniqueness of Nash equilibrium and convergence of the best
             response strategy. Convergence properties and connection
             with the best response strategy are also proved for variants
             of the stochastic-subgradient-based dynamics of the game.
             Together with known results in reverse-engineering TCP and
             BGP, this paper further advances the recent efforts in
             reverse-engineering layers 2-4 protocols. In contrast to the
             TCP reverse-engineering results in earlier literature, MAC
             reverse-engineering highlights the non-cooperative nature of
             random access. © 2007 IEEE.},
   Doi = {10.1109/JSAC.2007.070808},
   Key = {fds235889}
}

@article{fds235966,
   Author = {Bajwa, WU and Calderbank, R and Jafarpour, S},
   Title = {Revisiting model selection and recovery of sparse signals
             using one-step thresholding},
   Journal = {2010 48th Annual Allerton Conference on Communication,
             Control, and Computing, Allerton 2010},
   Pages = {977-984},
   Publisher = {IEEE},
   Year = {2010},
   Month = {December},
   url = {http://dx.doi.org/10.1109/ALLERTON.2010.5707015},
   Abstract = {This paper studies non-asymptotic model selection and
             recovery of sparse signals in high-dimensional, linear
             inference problems. In contrast to the existing literature,
             the focus here is on the general case of arbitrary design
             matrices and arbitrary nonzero entries of the signal. In
             this regard, it utilizes two easily computable measures of
             coherence - termed as the worstcase coherence and the
             average coherence - among the columns of a design matrix to
             analyze a simple, model-order agnostic one-step thresholding
             (OST) algorithm. In particular, the paper establishes that
             if the design matrix has reasonably small worst-case and
             average coherence then OST performs near-optimal model
             selection when either (i) the energy of any nonzero entry of
             the signal is close to the average signal energy per nonzero
             entry or (ii) the signal-to-noise ratio (SNR) in the
             measurement system is not too high. Further, the paper shows
             that if the design matrix in addition has sufficiently small
             spectral norm then OST also exactly recovers most sparse
             signals whose nonzero entries have approximately the same
             magnitude even if the number of nonzero entries scales
             almost linearly with the number of rows of the design
             matrix. Finally, the paper also presents various classes of
             random and deterministic design matrices that can be used
             together with OST to successfully carry out near-optimal
             model selection and recovery of sparse signals under certain
             SNR regimes or for certain classes of signals. ©2010
             IEEE.},
   Doi = {10.1109/ALLERTON.2010.5707015},
   Key = {fds235966}
}

@article{fds235908,
   Author = {Kutyniok, G and Pezeshki, A and Calderbank, R and Liu,
             T},
   Title = {Robust dimension reduction, fusion frames, and Grassmannian
             packings},
   Journal = {Applied and Computational Harmonic Analysis},
   Volume = {26},
   Number = {1},
   Pages = {64-76},
   Publisher = {Elsevier BV},
   Year = {2009},
   Month = {January},
   ISSN = {1063-5203},
   url = {http://dx.doi.org/10.1016/j.acha.2008.03.001},
   Abstract = {We consider estimating a random vector from its measurements
             in a fusion frame, in presence of noise and subspace
             erasures. A fusion frame is a collection of subspaces, for
             which the sum of the projection operators onto the subspaces
             is bounded below and above by constant multiples of the
             identity operator. We first consider the linear minimum
             mean-squared error (LMMSE) estimation of the random vector
             of interest from its fusion frame measurements in the
             presence of additive white noise. Each fusion frame
             measurement is a vector whose elements are inner products of
             an orthogonal basis for a fusion frame subspace and the
             random vector of interest. We derive bounds on the
             mean-squared error (MSE) and show that the MSE will achieve
             its lower bound if the fusion frame is tight. We then
             analyze the robustness of the constructed LMMSE estimator to
             erasures of the fusion frame subspaces. We limit our erasure
             analysis to the class of tight fusion frames and assume that
             all erasures are equally important. Under these assumptions,
             we prove that tight fusion frames consisting of
             equi-dimensional subspaces have maximum robustness (in the
             MSE sense) with respect to erasures of one subspace among
             all tight fusion frames, and that the optimal subspace
             dimension depends on signal-to-noise ratio (SNR). We also
             prove that tight fusion frames consisting of
             equi-dimensional subspaces with equal pairwise chordal
             distances are most robust with respect to two and more
             subspace erasures, among the class of equi-dimensional tight
             fusion frames. We call such fusion frames equi-distance
             tight fusion frames. We prove that the squared chordal
             distance between the subspaces in such fusion frames meets
             the so-called simplex bound, and thereby establish
             connections between equi-distance tight fusion frames and
             optimal Grassmannian packings. Finally, we present several
             examples for the construction of equi-distance tight fusion
             frames. © 2008 Elsevier Inc. All rights
             reserved.},
   Doi = {10.1016/j.acha.2008.03.001},
   Key = {fds235908}
}

@article{fds235872,
   Author = {Li, Y and Minn, H and Al-Dhahir, N and Calderbank,
             R},
   Title = {Robust pilot design for consistent carrier frequency offset
             estimation},
   Journal = {Proceedings - IEEE Military Communications Conference
             MILCOM},
   Publisher = {IEEE},
   Year = {2006},
   Month = {January},
   url = {http://dx.doi.org/10.1109/MILCOM.2006.302204},
   Abstract = {Consistent pilot designs [6] for carrier frequency offset
             (CFO) estimation eliminate ambiguity in the noise-free
             estimation metric trajectory regardless of the channel
             impulse response. Their importance is more pronounced in
             emergency and disaster situations since an inconsistent CFO
             estimate will result in a link failure. The CFO estimation
             performances of distinct consistent pilot designs can be
             quite different at moderate or low SNR due to different
             statistics of outlier which also yields a link failure. In
             this paper, we develop novel pilot designs that provide both
             consistency and robustness against outliers. We also propose
             new generalized pilot designs that yield consistency over
             any arbitrary but fixed CFO estimation range and that
             include our previous consistent pilot designs in [6] as a
             special case which provides consistency over the maximum CFO
             estimation range (half of the sampling rate). Our new
             consistent pilot designs facilitate more flexible and
             economical implementation while our robust pilot designs
             enable wireless links with less outage and better
             resilience.},
   Doi = {10.1109/MILCOM.2006.302204},
   Key = {fds235872}
}

@article{fds235920,
   Author = {Aggarwal, V and Sankar, L and Calderbank, AR and Poor,
             HV},
   Title = {Secrecy capacity of a class of orthogonal relay eavesdropper
             channels},
   Journal = {Eurasip Journal on Wireless Communications and
             Networking},
   Volume = {2009},
   Number = {1},
   Pages = {494696-494696},
   Publisher = {Springer Nature},
   Year = {2009},
   Month = {September},
   ISSN = {1687-1472},
   url = {http://dx.doi.org/10.1155/2009/494696},
   Abstract = {The secrecy capacity of relay channels with orthogonal
             components is studied in the presence of an additional
             passive eavesdropper node. The relay and destination receive
             signals from the source on two orthogonal channels such that
             the destination also receives transmissions from the relay
             on its channel. The eavesdropper can overhear either one or
             both of the orthogonal channels. Inner and outer bounds on
             the secrecy capacity are developed for both the discrete
             memoryless and the Gaussian channel models. For the discrete
             memoryless case, the secrecy capacity is shown to be
             achieved by a partial decode-and-forward (PDF) scheme when
             the eavesdropper can overhear only one of the two orthogonal
             channels. Two new outer bounds are presented for the
             Gaussian model using recent capacity results for a Gaussian
             multiantenna point-to-point channel with a multiantenna
             eavesdropper. The outer bounds are shown to be tight for two
             subclasses of channels. The first subclass is one in which
             the source and relay are clustered, and the eavesdropper
             receives signals only on the channel from the source and the
             relay to the destination, for which the PDF strategy is
             optimal. The second is a subclass in which the source does
             not transmit to the relay, for which a noise-forwarding
             strategy is optimal. Copyright © 2009 Vaneet Aggarwal et
             al.},
   Doi = {10.1155/2009/494696},
   Key = {fds235920}
}

@article{fds235926,
   Author = {Aggarwal, V and Sankar, L and Calderbank, AR and Poor,
             HV},
   Title = {Secrecy capacity of a class of orthogonal relay eavesdropper
             channels},
   Journal = {Information Theory and Applications Workshop, ITA
             2009},
   Pages = {295-300},
   Publisher = {IEEE},
   Year = {2009},
   Month = {September},
   url = {http://dx.doi.org/10.1109/ITA.2009.5044960},
   Abstract = {The secrecy capacity is developed for a class of relay
             channels with orthogonal components and a passive
             eavesdropper node. The relay and destination receive signals
             from the source on two orthogonal channels such that the
             destination also receives transmissions from the relay on
             its channel. The eavesdropper can overhear either one or
             both of the orthogonal channels. Inner and outer bounds on
             the secrecy capacity are developed for both the discrete
             memoryless and the Gaussian channel models. For the discrete
             memoryless case, the secrecy capacity is shown to be
             achieved by a partial decode-and-forward (PDF) scheme when
             the eavesdropper can overhear only one of the two orthogonal
             channels. Two new outer bounds are presented for the
             Gaussian model using recent capacity results for a Gaussian
             multi-antenna channel with a multi-antenna eavesdropper. The
             outer bounds are shown to be tight for two sub-classes of
             channels. The first sub-class is one in which the source and
             relay are clustered and the eavesdropper overhears on only
             one of the two channels for which the PDF strategy is
             optimal. The second is a sub-class in which the source does
             not transmit to the relay for which a noise-forwarding
             strategy is optimal. © 2009 IEEE.},
   Doi = {10.1109/ITA.2009.5044960},
   Key = {fds235926}
}

@article{fds235959,
   Author = {Chi, Y and Pezeshki, A and Scharf, L and Calderbank,
             R},
   Title = {Sensitivity to basis mismatch in compressed
             sensing},
   Journal = {ICASSP, IEEE International Conference on Acoustics, Speech
             and Signal Processing - Proceedings},
   Pages = {3930-3933},
   Publisher = {IEEE},
   Year = {2010},
   Month = {January},
   ISSN = {1520-6149},
   url = {http://dx.doi.org/10.1109/ICASSP.2010.5495800},
   Abstract = {Compressed sensing theory suggests that successful inversion
             of an image of the physical world from its modal parameters
             can be achieved at measurement dimensions far lower than the
             image dimension, provided that the image is sparse in an a
             priori known basis. The assumed basis for sparsity typically
             corresponds to a gridding of the parameter space, e.g., an
             DFT grid in spectrum analysis. However, in reality no
             physical field is sparse in the DFT basis or in an a priori
             known basis. No matter how finely we grid the parameter
             space the sources may not lie in the center of the grid
             cells and there is always mismatch between the assumed and
             the actual bases for sparsity. In this paper, we study the
             sensitivity of compressed sensing (basis pursuit to be
             exact) to mismatch between the assumed and the actual
             sparsity bases. Our mathematical analysis and numerical
             examples show that the performance of basis pursuit degrades
             considerably in the presence of basis mismatch. ©2010
             IEEE.},
   Doi = {10.1109/ICASSP.2010.5495800},
   Key = {fds235959}
}

@article{fds235994,
   Author = {Chi, Y and Scharf, LL and Pezeshki, A and Calderbank,
             AR},
   Title = {Sensitivity to basis mismatch in compressed
             sensing},
   Journal = {IEEE Transactions on Signal Processing},
   Volume = {59},
   Number = {5},
   Pages = {2182-2195},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2011},
   Month = {May},
   ISSN = {1053-587X},
   url = {http://dx.doi.org/10.1109/TSP.2011.2112650},
   Abstract = {The theory of compressed sensing suggests that successful
             inversion of an image of the physical world (broadly defined
             to include speech signals, radar/sonar returns, vibration
             records, sensor array snapshot vectors, 2-D images, and so
             on) for its source modes and amplitudes can be achieved at
             measurement dimensions far lower than what might be expected
             from the classical theories of spectrum or modal analysis,
             provided that the image is sparse in an apriori known basis.
             For imaging problems in spectrum analysis, and passive and
             active radar/sonar, this basis is usually taken to be a DFT
             basis. However, in reality no physical field is sparse in
             the DFT basis or in any apriori known basis. No matter how
             finely we grid the parameter space the sources may not lie
             in the center of the grid cells and consequently there is
             mismatch between the assumed and the actual bases for
             sparsity. In this paper, we study the sensitivity of
             compressed sensing to mismatch between the assumed and the
             actual sparsity bases. We start by analyzing the effect of
             basis mismatch on the best k-term approximation error, which
             is central to providing exact sparse recovery guarantees. We
             establish achievable bounds for the ℓ1 error of the best
             k-term approximation and show that these bounds grow
             linearly with the image (or grid) dimension and the mismatch
             level between the assumed and actual bases for sparsity. We
             then derive bounds, with similar growth behavior, for the
             basis pursuit ℓ1 recovery error, indicating that the
             sparse recovery may suffer large errors in the presence of
             basis mismatch. Although, we present our results in the
             context of basis pursuit, our analysis applies to any sparse
             recovery principle that relies on the accuracy of best
             k-term approximations for its performance guarantees. We
             particularly highlight the problematic nature of basis
             mismatch in Fourier imaging, where spillage from off-grid
             DFT components turns a sparse representation into an
             incompressible one. We substantiate our mathematical
             analysis by numerical examples that demonstrate a
             considerable performance degradation for image inversion
             from compressed sensing measurements in the presence of
             basis mismatch, for problem sizes common to radar and sonar.
             © 2011 IEEE.},
   Doi = {10.1109/TSP.2011.2112650},
   Key = {fds235994}
}

@article{fds235797,
   Author = {Calderbank, AR and Coffman, EG and Flatto, L},
   Title = {SEQUENCING PROBLEMS IN TWO-SERVER SYSTEMS.},
   Journal = {Mathematics of Operations Research},
   Volume = {10},
   Number = {4},
   Pages = {585-598},
   Publisher = {Institute for Operations Research and the Management
             Sciences (INFORMS)},
   Year = {1985},
   Month = {January},
   url = {http://dx.doi.org/10.1287/moor.10.4.585},
   Abstract = {We analyze a service system in which two identical servers
             move one at a time along a linear array of N positions.
             Requests for service, each designating one of the N
             positions, join a first-in-first queue, where processing of
             the nth request does not begin until processing of the
             (n-1)th requested is completed. Processing the nth request
             entails determining which server to move, moving this server
             to the requested position, and then performing the service.
             Several potential applications of the model are mentioned,
             the most notable being the design of computer storage
             systems with multiple access devices. Within a simple
             probability model we compare server-selection policies in
             terms of the equilibrium expected distance a server is moved
             in processing a request. Distance is measured under two
             regimes, both assigning a unit distance between adjacent
             positions.},
   Doi = {10.1287/moor.10.4.585},
   Key = {fds235797}
}

@article{fds331066,
   Author = {Calderbank, AR and Coffinan, EG and Flatto, L},
   Title = {Sequencing two Servers On a Sphere},
   Journal = {Communications in Statistics. Stochastic
             Models},
   Volume = {1},
   Number = {1},
   Pages = {17-28},
   Publisher = {Informa UK Limited},
   Year = {1985},
   Month = {January},
   url = {http://dx.doi.org/10.1080/15326348508807002},
   Abstract = {We analyze a service system in which two servers move
             independently on the surface of the n-dimensional sphere.
             Requests for service arrive independently and uniformly over
             the surface. the ith request is to be served completely
             before service of the (i+l)st request begins. in an earlier
             paper the authors showed that the nearer server (NS) policy
             is optimal among all server selection policies in the sense
             that it minimizes the equilibrium expected angular distance
             E(D) which a server moves to process z request. in the
             present paper we obtain for all n the integral equation
             satisfied by the equilibrium measure for the angular
             distance @ between servers under the NS policy. the equation
             is solved numerically. We also show that E (D)+E (a) = n,
             and use this to compute E (D). © 1985, Taylor & Francis
             Group, LLC. All rights reserved.},
   Doi = {10.1080/15326348508807002},
   Key = {fds331066}
}

@article{fds235923,
   Author = {Pezeshki, A and Calderbank, R and Scharf, LL},
   Title = {Sidelobe suppression in a desired range/Doppler
             interval},
   Journal = {IEEE National Radar Conference - Proceedings},
   Publisher = {IEEE},
   Year = {2009},
   Month = {September},
   ISSN = {1097-5659},
   url = {http://dx.doi.org/10.1109/RADAR.2009.4977144},
   Abstract = {We present simple methods for constructing radar waveforms
             whose ambiguity functions are free of sidelobes inside a
             desired range or Doppler interval. We exploit the
             time-frequency duality between pulse amplitude modulation
             (PAM) and orthogonal frequency division multiplexing (OFDM)
             to sequence Golay complementary codes across time or
             frequency and clear out range/Doppler sidelobes. Proper
             sequencing of complementary codes in time (PAM design)
             enables the annihilation of range sidelobes along a desired
             Doppler interval. The dual design, i.e., OFDM signaling of
             complementary codes, enables the annihilation of Doppler
             sidelobes along a desired range interval. The two designs
             can be used sequentially to bring weak targets out of the
             sidelobes of nearby strong reflectors inside a range-Doppler
             interval of interest. ©2009 IEEE.},
   Doi = {10.1109/RADAR.2009.4977144},
   Key = {fds235923}
}

@article{fds236050,
   Author = {Calderbank, AR},
   Title = {Signal design for co-channel interference suppression with
             applications to wireless communications},
   Journal = {Electro International, Conference Proceedings},
   Pages = {47},
   Year = {1996},
   Month = {January},
   Abstract = {Co-channel interference is a major impairment in wireless
             systems with channel (frequency and/or time) re-use. In
             practice the performance of Time Division Multiple Access
             (TDMA) and Frequency Division Multiple Access (FDMA) systems
             is limited by a few dominant co-channel interferers. These
             can be removed by means of multiple antennas but it is
             difficult to demand this at the mobile because of technology
             limitations. The standard solution is to treat co-channel
             interference as Gaussian noise and to employ powerful
             channel codes. However, this solution is far from optimal
             since the decoder is using an inappropriate metric for
             decoding. In this paper it is shown that a more effective
             use of system redundancy is to design channel codes that are
             matched to an adaptive linear receiver, so that the
             combination provides interference suppression. It is shown
             that a simple one symbol parity check code is capable of
             suppressing one interferer, a repetition code of length N is
             capable of suppressing N - 1 interferers, and a code of K
             information symbols and N channel symbols is capable of
             suppressing N/K interferers.},
   Key = {fds236050}
}

@article{fds290773,
   Author = {Wang, L and Huang, J and Yuan, X and Krishnamurthy, K and Greenberg, J and Cevher, V and Rodrigues, MRD and Brady, D and Calderbank, R and Carin,
             L},
   Title = {Signal recovery and system calibration from multiple
             compressive poisson measurements},
   Journal = {SIAM Journal on Imaging Sciences},
   Volume = {8},
   Number = {3},
   Pages = {1923-1954},
   Publisher = {Society for Industrial & Applied Mathematics
             (SIAM)},
   Year = {2015},
   Month = {September},
   url = {http://dx.doi.org/10.1137/140998779},
   Abstract = {The measurement matrix employed in compressive sensing
             typically cannot be known precisely a priori and must be
             estimated via calibration. One may take multiple compressive
             measurements, from which the measurement matrix and
             underlying signals may be estimated jointly. This is of
             interest as well when the measurement matrix may change as a
             function of the details of what is measured. This problem
             has been considered recently for Gaussian measurement noise,
             and here we develop this idea with application to Poisson
             systems. A collaborative maximum likelihood algorithm and
             alternating proximal gradient algorithm are proposed, and
             associated theoretical performance guarantees are
             established based on newly derived concentration-of-measure
             results. A Bayesian model is then introduced, to improve
             flexibility and generality. Connections between the maximum
             likelihood methods and the Bayesian model are developed, and
             example results are presented for a real compressive X-ray
             imaging system.},
   Doi = {10.1137/140998779},
   Key = {fds290773}
}

@article{fds235759,
   Author = {Bennatan, A and Shamai, S and Calderbank, AR},
   Title = {Soft-decoding-based strategies for relay and interference
             channels: Analysis and achievable rates using LDPC
             codes},
   Journal = {IEEE Transactions on Information Theory},
   Volume = {60},
   Number = {4},
   Pages = {1977-2009},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2014},
   Month = {January},
   ISSN = {0018-9448},
   url = {http://dx.doi.org/10.1109/TIT.2013.2294373},
   Abstract = {We provide a rigorous mathematical analysis of two
             communication strategies: soft decode-and-forward (soft-DF)
             for relay channels and soft partial interference-cancelation
             (soft-IC) for interference channels. Both strategies involve
             soft estimation, which assists the decoding process. We
             consider LDPC codes, not because of their practical
             benefits, but because of their analytic tractability, which
             enables an asymptotic analysis similar to random coding
             methods of information theory. Unlike some works on the
             closely-related demodulate-and-forward, we assume
             non-memoryless, code-structure-aware estimation. With
             soft-DF, we develop simultaneous density evolution to bound
             the decoding error probability at the destination. This
             result applies to erasure relay channels. In one variant of
             soft-DF, the relay applies Wyner-Ziv coding to enhance its
             communication with the destination, borrowing from
             compress-and-forward. To analyze soft-IC, we adapt existing
             techniques for iterative multiuser detection, and focus on
             binary-input additive white Gaussian noise interference
             channels. We prove that optimal point-to-point codes are
             unsuitable for soft-IC, as well as for all strategies that
             apply partial decoding to improve upon single-user detection
             and multiuser detection, including Han-Kobayashi. © 2013
             IEEE.},
   Doi = {10.1109/TIT.2013.2294373},
   Key = {fds235759}
}

@article{fds235832,
   Author = {Naguib, AF and Calderbank, R},
   Title = {Space - Time coding and signal processing for high data rate
             wireless communications},
   Journal = {Wireless Communications and Mobile Computing},
   Volume = {1},
   Number = {1},
   Pages = {13-34},
   Publisher = {WILEY},
   Year = {2001},
   ISSN = {1530-8669},
   url = {http://dx.doi.org/10.1002/1530-8677(200101/03)1:1<13::AID-WCM2>3.0.CO;2-J},
   Abstract = {The information capacity of wireless communication systems
             can be increased dramatically by employing multiple transmit
             and receive antennas [Foschini GJ, Gans MJ. On limits of
             wireless communications in a fading environment when using
             multiple antennas. Wireless Communications Magazine 1998; 6
             311-335. Telatar E. Capacity of Multi-Antenna Gaussian
             Channels, Technical Memorandum, AT&amp;T Bell Laboratories,
             1995.] An effective approach to increasing data rate over
             wireless channels is to employ coding techniques appropriate
             to multiple transmit antennas, that is space-time coding.
             Space-time codes introduce temporal and spatial correlation
             into signals transmitted from different antennas, in order
             to provide diversity at the receiver, and coding gain over
             an uncoded system. The spatial-temporal structure of these
             codes can be exploited to further increase the capacity of
             wireless systems with a relatively simple receiver
             structure. This paper provides an overview of space-time
             coding techniques and the associated signal processing
             framework. Copyright © 2001 John Wiley &amp; Sons,
             Ltd.},
   Doi = {10.1002/1530-8677(200101/03)1:1<13::AID-WCM2>3.0.CO;2-J},
   Key = {fds235832}
}

@article{fds236065,
   Author = {Tarokh, V and Jafarkhani, H and Calderbank, AR},
   Title = {Space-time block codes from orthogonal designs},
   Journal = {IEEE Transactions on Information Theory},
   Volume = {45},
   Number = {5},
   Pages = {1456-1467},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {1999},
   Month = {January},
   ISSN = {0018-9448},
   url = {http://dx.doi.org/10.1109/18.771146},
   Abstract = {The theory of space-time block coding is presented as a
             simple and elegant method for transmission using multiple
             transmit antennas in a wireless Rayleigh/Rician environment.
             These codes have a very simple maximum-likelihood decoding
             algorithm which is only based on linear processing. Designs
             that correspond to combined coding and linear processing at
             the transmitter are also considered.},
   Doi = {10.1109/18.771146},
   Key = {fds236065}
}

@article{fds236069,
   Author = {Tarokh, V and Jafarkhani, H and Calderbank, AR},
   Title = {Space-time block coding for wireless communications:
             Performance results},
   Journal = {IEEE Journal on Selected Areas in Communications},
   Volume = {17},
   Number = {3},
   Pages = {451-460},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {1999},
   Month = {March},
   url = {http://dx.doi.org/10.1109/49.753730},
   Abstract = {We document the performance of space-time block codes [13],
             [14], which provide a new paradigm for transmission over
             Rayleigh fading channels using multiple transmit antennas.
             Data is encoded using a space-time block code, and the
             encoded data is split into n streams which are
             simultaneously transmitted using n transmit antennas. The
             received signal at each receive antenna is a linear
             superposition of the n transmitted signals perturbed by
             noise. Maximum likelihood decoding is achieved in a simple
             way through decoupling of the signals transmitted from
             different antennas rather than joint detection. This uses
             the orthogonal structure of the space-time block code and
             gives a maximum likelihood decoding algorithm which is based
             only on linear processing at the receiver. We review the
             encoding and decoding algorithms for various codes and
             provide simulation results demonstrating their performance.
             It is shown that using multiple transmit antennas and
             space-time block coding provides remarkable performance at
             the expense of almost no extra processing.},
   Doi = {10.1109/49.753730},
   Key = {fds236069}
}

@article{fds235825,
   Author = {Tarokh, V and Naguib, AF and Seshadri, N and Calderbank,
             AR},
   Title = {Space-time codes for high data rate wireless communication:
             Mismatch analysis},
   Journal = {IEEE International Conference on Communications},
   Volume = {1},
   Pages = {309-313},
   Year = {1997},
   Month = {January},
   Abstract = {We revisit Space-Time Codes for a mobile communication
             system that employs multiple antennas at the base and
             optional antenna diversity at the mobile station. The
             realistic case when the channel state is not completely
             known is considered. It is assumed that the channel
             estimator extracts the fade coefficients using orthogonal
             pilot tones. Mismatch analysis is then carried out. It is
             proved that in the absence of ideal channel state
             information the design criteria for space-time codes
             developed in [11] is still valid for equal energy
             constellation case. Using our derivation, it is observed
             that channel estimation techniques commonly used over
             rapidly fading channels can be used in conjunction with
             space-time codes provided that the number of transmit
             antennas is small.},
   Key = {fds235825}
}

@article{fds235827,
   Author = {Tarokh, V and Seshadri, N and Calderbank, AR},
   Title = {Space-time codes for high data rate wireless communication:
             Performance criteria},
   Journal = {IEEE International Conference on Communications},
   Volume = {1},
   Pages = {299-303},
   Year = {1997},
   Month = {January},
   Abstract = {We consider the design of channel codes for improving the
             data rate and/or the reliability of communications over
             fading channels using multiple transmit antennas. Here, data
             is encoded by a channel code and the encoded data is split
             into n streams that are simultaneously transmitted using n
             transmit antennas. The received signal at each receive
             antenna is a linear superposition of the n transmitted
             signals. We derive performance criteria for designing
             channel codes under the assumption that the fading is slow
             and frequency non-selective. Performance is shown to be
             determined by diversity gain quantified by ranks and coding
             gain quantified by determinants of certain matrices that are
             constructed from the code sequences.},
   Key = {fds235827}
}

@article{fds236061,
   Author = {Tarokh, V and Seshadri, N and Calderbank, AR},
   Title = {Space-time codes for high data rate wireless communication:
             Performance criterion and code construction},
   Journal = {IEEE Transactions on Information Theory},
   Volume = {44},
   Number = {2},
   Pages = {744-765},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {1998},
   Month = {December},
   ISSN = {0018-9448},
   url = {http://dx.doi.org/10.1109/18.661517},
   Abstract = {We consider the design of channel codes for improving the
             data rate and/or the reliability of communications over
             fading channels using multiple transmit antennas. Data is
             encoded by a channel code and the encoded data is split into
             n streams that are simultaneously transmitted using n
             transmit antennas. The received signal at each receive
             antenna is a linear superposition of the n transmitted
             signals perturbed by noise. We derive performance criteria
             for designing such codes under the assumption that the
             fading is slow and frequency nonselective. Performance is
             shown to be determined by matrices constructed from pairs of
             distinct code sequences. The minimum rank among these
             matrices quantifies the diversity gain, while the minimum
             determinant of these matrices quantifies the coding gain.
             The results are then extended to fast fading channels. The
             design criteria are used to design trellis codes for high
             data rate wireless communication. The encoding/decoding
             complexity of these codes is comparable to trellis codes
             employed in practice over Gaussian channels. The codes
             constructed here provide the best tradeoff between data
             rate, diversity advantage, and trellis complexity.
             Simulation results are provided for 4 and 8 PSK signal sets
             with data rates of 2 and 3 bits/symbol, demonstrating
             excellent performance that is within 2-3 dB of the outage
             capacity for these channels using only 64 state encoders. ©
             1998 IEEE.},
   Doi = {10.1109/18.661517},
   Key = {fds236061}
}

@article{fds235826,
   Author = {Calderbank, AR and Seshadri, N and Tarokh, V},
   Title = {Space-time codes for wireless communication},
   Journal = {IEEE International Symposium on Information Theory -
             Proceedings},
   Pages = {146},
   Publisher = {IEEE},
   Year = {1997},
   Month = {December},
   url = {http://dx.doi.org/10.1109/ISIT.1997.613061},
   Abstract = {The design of channel codes for improving the data rate
             and/or the reliability of communications over fading
             channels using multiple transmit antennas is considered. A
             design criterion is provided and is then used to design
             space-time codes for high data rate wireless communication.
             These codes are trellis codes that are easy to encode and
             decode and have remarkable performance. © 1997
             IEEE.},
   Doi = {10.1109/ISIT.1997.613061},
   Key = {fds235826}
}

@article{fds235824,
   Author = {Seshadri, N and Tarokh, V and Calderbank, AR},
   Title = {Space-time codes for wireless communication: Code
             construction},
   Journal = {IEEE Vehicular Technology Conference},
   Volume = {2},
   Pages = {637-641},
   Year = {1997},
   Month = {January},
   Abstract = {We consider the design of channel codes for improving the
             data rate and/or the reliability of communications over
             fading channels using multiple transmit antennas. Here, data
             is encoded by a channel code and the encoded data is split
             into n streams that are simultaneously transmitted using n
             transmit antennas. The received signal at each receive
             antenna is a linear superposition of the n transmitted
             signals. We review the performance criteria for designing
             such codes under the assumption that the fading is slow and
             frequency non-selective established in [3]. Performance is
             determined by diversity gain quantified by ranks and coding
             gain quantified by determinants of certain matrices that are
             constructed from the code sequences. The performance
             criterion is then used to design trellis codes for high data
             rate wireless communication. These codes are easy to encode
             and decode. They provide the best trade-off between data
             rate, diversity gain, constellation size and trellis
             complexity. Simulation results are provided for 4 and 8 PSK
             signal sets with data rates of 2 and 3 bits/symbol,
             demonstrating excellent performance that is within 2-3 dB of
             the outage capacity for these channels.},
   Key = {fds235824}
}

@article{fds235837,
   Author = {Al-Dhahir, N and Fragouli, C and Stamoulis, A and Younis, W and Calderbank, R},
   Title = {Space-time processing for broadband wireless
             access},
   Journal = {IEEE Communications Magazine},
   Volume = {40},
   Number = {9},
   Pages = {136-142},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2002},
   Month = {September},
   ISSN = {0163-6804},
   url = {http://dx.doi.org/10.1109/MCOM.2002.1031840},
   Abstract = {We present an overview of research activities on space-time
             coding for broadband wireless transmission performed at AT&T
             Shannon Laboratory over the past two years. The emphasis is
             on physical layer modem algorithms such as channel
             estimation, equalization, and interference cancellation.
             However, we also discuss the impact of space-time coding
             gains at the physical layer on throughput at or above the
             networking layer. Furthermore, we describe a flexible
             graphical user interface attached to our physical layer
             simulation engine in order to explore the performance of
             space-time codes under a variety of practical transmission
             scenarios. Simulation results for the EDGE cellular system
             and the 802.11 wireless LAN environment are
             presented.},
   Doi = {10.1109/MCOM.2002.1031840},
   Key = {fds235837}
}

@article{fds235968,
   Author = {Lau, CC and Calderbank, R and Zoltowski, MD},
   Title = {Space-time processing for MIMO-OFDM using DFT-based
             complementary sequences},
   Journal = {Proceedings of SPIE - The International Society for Optical
             Engineering},
   Volume = {7706},
   Publisher = {SPIE},
   Year = {2010},
   Month = {December},
   ISSN = {0277-786X},
   url = {http://dx.doi.org/10.1117/12.851020},
   Abstract = {In this paper, a new space-time signaling scheme is proposed
             for Orthogonal Frequency Division Multiplexing (OFDM) using
             complementary sequences derived from the rows of the DFT
             matrix. The autocorrelative properties of the complementary
             sequences allows multiple complex data signals at the
             transmitter with an arbitrary number of antennas to be
             perfectly separated and reconstructed at the receiver
             without prior channel knowledge while achieving full-rate.
             This new method is proposed and derived for multiple
             MIMO-OFDM systems with multipath fading; at the receiver,
             symbol estimation is effected via maximum likelihood
             estimation (ML). © 2010 SPIE.},
   Doi = {10.1117/12.851020},
   Key = {fds235968}
}

@article{fds235851,
   Author = {Ashikhmin, A and Calderbank, AR},
   Title = {Space-time reed-muller codes for noncoherent MIMO
             transmission},
   Journal = {IEEE International Symposium on Information Theory -
             Proceedings},
   Volume = {2005},
   Pages = {1952-1956},
   Publisher = {IEEE},
   Year = {2005},
   Month = {December},
   url = {http://dx.doi.org/10.1109/ISIT.2005.1523686},
   Abstract = {We present a family of Space-Time codes for the noncoherent
             MIMO channel. The codes are constructed via functions that
             can be considered as a generalization of boolean functions
             to commuting projection operators which arise in the theory
             of quantum stabilizer codes. These space-time codes are
             strongly related to standard binary Reed-Muller codes. In
             particular, they can be decoded by adapting a decoding
             algorithm for Reed-Muller codes. We show that the first
             subclass of codes from this family, which we view as the
             first order space-time Reed-Muller codes, allow transmission
             with rates close to the MIMO noncoherent channel capacity in
             the low signal to noise ratio (SNR) regime.},
   Doi = {10.1109/ISIT.2005.1523686},
   Key = {fds235851}
}

@article{fds235844,
   Author = {Calderbank, AR and Diggavi, SN and Al-Dhahir, N},
   Title = {Space-time signaling based on Kerdock and Delsarte-Goethals
             codes},
   Journal = {IEEE International Conference on Communications},
   Volume = {1},
   Pages = {483-487},
   Year = {2004},
   Month = {August},
   Abstract = {This paper designs space-time codes for standard PSK and QAM
             signal constellations that have flexible rate, diversity and
             require no constellation expansion. Central to this
             construction are binary partitions of the PSK and QAM
             constellations that appear in codes designed for the
             Gaussian channel. The space-time codes presented here are
             designed by separately specifying the different levels of
             the binary partition in the space-time array. The individual
             levels are addressed by either the binary symmetric matrices
             associated with codewords in a Kerdock code or other
             families of binary matrices. Binary properties of these sets
             are sufficient to verify the diversity property of the
             codewords in the complex domain. Larger sets of binary
             symmetric matrices (such as the set used in
             Delsarte-Goethals codes) are used to trade diversity
             protection for increased rate.},
   Key = {fds235844}
}

@article{fds235996,
   Author = {Calderbank, R and Casazza, PG and Heinecke, A and Kutyniok, G and Pezeshki, A},
   Title = {Sparse fusion frames: Existence and construction},
   Journal = {Advances in Computational Mathematics},
   Volume = {35},
   Number = {1},
   Pages = {1-31},
   Publisher = {Springer Nature},
   Year = {2011},
   Month = {July},
   ISSN = {1019-7168},
   url = {http://dx.doi.org/10.1007/s10444-010-9162-3},
   Abstract = {Fusion frame theory is an emerging mathematical theory that
             provides a natural framework for performing hierarchical
             data processing. A fusion frame can be regarded as a
             frame-like collection of subspaces in a Hilbert space, and
             thereby generalizes the concept of a frame for signal
             representation. However, when the signal and/or subspace
             dimensions are large, the decomposition of the signal into
             its fusion frame measurements through subspace projections
             typically requires a large number of additions and
             multiplications, and this makes the decomposition
             intractable in applications with limited computing budget.
             To address this problem, in this paper, we introduce the
             notion of a sparse fusion frame, that is, a fusion frame
             whose subspaces are generated by orthonormal basis vectors
             that are sparse in a 'uniform basis' over all subspaces,
             thereby enabling low-complexity fusion frame decompositions.
             We study the existence and construction of sparse fusion
             frames, but our focus is on developing simple algorithmic
             constructions that can easily be adopted in practice to
             produce sparse fusion frames with desired (given) operators.
             By a desired (or given) operator we simply mean one that has
             a desired (or given) set of eigenvalues for the fusion frame
             operator. We start by presenting a complete characterization
             of Parseval fusion frames in terms of the existence of
             special isometries defined on an encompassing Hilbert space.
             We then introduce two general methodologies to generate new
             fusion frames from existing ones, namely the Spatial
             Complement Method and the Naimark Complement Method, and
             analyze the relationship between the parameters of the
             original and the new fusion frame. We proceed by
             establishing existence conditions for 2-sparse fusion frames
             for any given fusion frame operator, for which the
             eigenvalues are greater than or equal to two. We then
             provide an easily implementable algorithm for computing such
             2-sparse fusion frames. © 2010 Springer Science+Business
             Media, LLC.},
   Doi = {10.1007/s10444-010-9162-3},
   Key = {fds235996}
}

@article{fds235960,
   Author = {Calderbank, R and Howard, S and Jafarpour, S},
   Title = {Sparse reconstruction via the reed-muller
             sieve},
   Journal = {IEEE International Symposium on Information Theory -
             Proceedings},
   Pages = {1973-1977},
   Publisher = {IEEE},
   Year = {2010},
   Month = {August},
   url = {http://dx.doi.org/10.1109/ISIT.2010.5513361},
   Abstract = {This paper introduces the Reed Muller Sieve, a deterministic
             measurement matrix for compressed sensing. The columns of
             this matrix are obtained by exponentiating codewords in the
             quaternary second order Reed Muller code of length N. For k
             = O(N), the Reed Muller Sieve improves upon prior methods
             for identifying the support of a k-sparse vector by removing
             the requirement that the signal entries be independent. The
             Sieve also enables local detection; an algorithm is
             presented with complexity N2 log N that detects the presence
             or absence of a signal at any given position in the data
             domain without explicitly reconstructing the entire signal.
             Reconstruction is shown to be resilient to noise in both the
             measurement and data domains; the ℓ2/ℓ2 error bounds
             derived in this paper are tighter than the ℓ2/ℓ1 bounds
             arising from random ensembles and the ℓ1/ℓ1 bounds
             arising from expander-based ensembles. © 2010
             IEEE.},
   Doi = {10.1109/ISIT.2010.5513361},
   Key = {fds235960}
}

@article{fds235944,
   Author = {Lau, C and Zoltowski, M and Calderbank, R},
   Title = {Spatio-temporal scheduling of complementary sequences with
             application to MIMO-OFDM},
   Journal = {Conference Record - Asilomar Conference on Signals, Systems
             and Computers},
   Pages = {503-507},
   Publisher = {IEEE},
   Year = {2009},
   Month = {December},
   ISSN = {1058-6393},
   url = {http://dx.doi.org/10.1109/ACSSC.2009.5469876},
   Abstract = {In this paper, a new method of space-time processing is
             proposed for Orthogonal Frequency Division Multiplexing
             (OFDM) using complementary sequences derived from the rows
             of the DFT matrix. The autocorrelative properties of the
             complementary sequences allows multiple complex data signals
             at the transmitter with an arbitrary number of antennas to
             be perfectly separated at the receiver without prior channel
             knowledge while achieving full-rate. This new method is
             proposed and derived for multiple MIMO-OFDM systems with
             multipath fading; at the receiver, symbol estimation is
             effected via maximum likelhihood estimation (ML). © 2009
             IEEE.},
   Doi = {10.1109/ACSSC.2009.5469876},
   Key = {fds235944}
}

@article{fds235813,
   Author = {Calderbank, AR and Graham, RL and Shepp, LA and Frank, P and Li,
             WCW},
   Title = {Sperner capacity of linear and nonlinear codes for the
             cyclic triangle},
   Journal = {Proceedings of the 1993 IEEE International Symposium on
             Information Theory},
   Pages = {154},
   Year = {1993},
   Month = {January},
   Abstract = {Shannon introduced the concept of zero-error capacity of a
             discrete memoryless channel. The channel determines an
             undirected graph on the symbol alphabet, where adjacency
             means that symbols cannot be confused at the receiver. The
             zero-error or Shannon capacity is an invariant of this
             graph. Gargano, Koerner, and Vaccaro have recently extended
             the concept of Shannon capacity to directed graphs. Their
             generalization of Shannon capacity is called Sperner
             capacity. We resolve a problem posed by these authors by
             giving the first example (the two orientations of the
             triangle) of a graph where the Sperner capacity depends on
             the orientations of the edges. Sperner capacity seems to be
             achieved by nonlinear codes, whereas Shannon capacity seems
             to be attainable by linear codes. In particular, linear
             codes do not achieve Sperner capacity for the cyclic
             triangle. We use Fourier analysis or linear programming to
             obtain the best upper bounds for linear codes. The bound for
             unrestricted codes are obtained from rank arguments,
             eigenvalue interlacing inequalities and polynomial algebra.
             The statement of the cyclic q-gon problem is very simple:
             what is the maximum size Nq(n) of a subset Sn of {0, 1, ...,
             q - 1}n with the property that for every pair of distinct
             vectors x = (xi), y = (yi) member of Sn, we have xj - yj ≡
             1(mod q) for some j? For q = 3 (the cyclic triangle), we
             show N3(n) ≅ 2n. If however Sn is a subgroup, then we give
             a simple proof that |Sn| ≤ √3n.},
   Key = {fds235813}
}

@article{fds236006,
   Author = {Calderbank, R},
   Title = {SQUARE ROOT BOUND ON THE MINIMUM WEIGHT IN QUASI-CYCLIC
             CODES.},
   Journal = {IEEE Transactions on Information Theory},
   Volume = {IT-29},
   Number = {3},
   Pages = {332-337},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {1983},
   url = {http://dx.doi.org/10.1109/TIT.1983.1056673},
   Abstract = {The author establishes a square root bound on the minimum
             weight in the quasi-cyclic binary codes constructed by V. K.
             Bhargava, S. E. Tavares, and S. G. S. Shiva. The proof rests
             on viewing the codes as ideas in a group algebra over
             GF.},
   Doi = {10.1109/TIT.1983.1056673},
   Key = {fds236006}
}

@article{fds331062,
   Author = {Calderbank, AR},
   Title = {Symmetric Designs as the Solution of an Extremal Problem in
             Combinatorial Set Theory},
   Journal = {European Journal of Combinatorics},
   Volume = {9},
   Number = {2},
   Pages = {171-173},
   Publisher = {Elsevier BV},
   Year = {1988},
   Month = {January},
   url = {http://dx.doi.org/10.1016/S0195-6698(88)80043-X},
   Abstract = {We apply duality in the Johnson scheme J(v, k) to give a
             very short proof of a theorem of Frankl and Füredi. We
             consider a family ℱ of k-subsets of a v-set such that ℱ
             is a 1-design and |x ∪ y| ⩾ λ > 0 for all x, y ∈ ℱ.
             We prove v ⩽ (k2 − k + λ)/λ with equality if and only
             if ℱ is a symmetric 2 − (v, k, λ) design. © 1988,
             Academic Press Limited. All rights reserved.},
   Doi = {10.1016/S0195-6698(88)80043-X},
   Key = {fds331062}
}

@article{fds235821,
   Author = {Calderbank, AR and Georghiades, CN},
   Title = {Synchronizable Codes for the Optical OPPM
             Channel},
   Journal = {IEEE Transactions on Information Theory},
   Volume = {40},
   Number = {4},
   Pages = {1097-1107},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {1994},
   Month = {January},
   url = {http://dx.doi.org/10.1109/18.335965},
   Abstract = {Random overlapping pulse-position modulation (OPPM)
             sequences result in an unrecoverable error floor on both the
             probability of erroneous synchronization and the probability
             of symbol error when only chip synchronization is present It
             is known, however, that for a given sequence length M, a
             subset of the set of all possible sequences is
             synchronizable in the sense that in the absence of noise,
             the receiver can correctly symbol synchronize by observing M
             or more symbol intervals. In this paper we design
             finite-state machines and codes over a J-ary alphabet, which
             produce sequences with the property that every subsequence
             of length L is synchronizable. Some of the codes, in
             addition to being synchronizable, produce a coding gain. For
             an optical Poisson channel we introduce joint
             synchronization and detection algorithms that utilize the
             memory in the encoded sequences to produce joint estimates
             of timing and sequences. Their performance is analyzed
             through simulations and analytical results. © 1994
             IEEE},
   Doi = {10.1109/18.335965},
   Key = {fds235821}
}

@article{fds343650,
   Author = {Rengaswamy, N and Calderbank, R and Pfister, HD and Kadhe,
             S},
   Title = {Synthesis of Logical Clifford Operators via Symplectic
             Geometry},
   Journal = {IEEE International Symposium on Information Theory -
             Proceedings},
   Volume = {2018-June},
   Pages = {791-795},
   Year = {2018},
   Month = {August},
   url = {http://dx.doi.org/10.1109/ISIT.2018.8437652},
   Abstract = {Quantum error-correcting codes can be used to protect qubits
             involved in quantum computation. This requires that logical
             operators acting on protected qubits be translated to
             physical operators (circuits) acting on physical quantum
             states. We propose a mathematical framework for synthesizing
             physical circuits that implement logical Clifford operators
             for stabilizer codes. Circuit synthesis is enabled by
             representing the desired physical Clifford operator in
             \mathbb{C}-{N\times N} as a 2m\times 2m binary sym-plectic
             matrix, where N=2-{m}. We show that for an \!\!\!\![\!\!\![\
             {m, m-k}\ ]\!\!\!]\!\!\!\! stabilizer code every logical
             Clifford operator has 2-{k(k+1)/2} symplectic solutions, and
             we enumerate them efficiently using symplectic
             transvections. The desired circuits are then obtained by
             writing each of the solutions as a product of elementary
             symplectic matrices. For a given operator, our assembly of
             all of its physical realizations enables optimization over
             them with respect to a suitable metric. Our method of
             circuit synthesis can be applied to any stabilizer code, and
             this paper provides a proof of concept synthesis of
             universal Clifford gates for the well-known
             \!\!\!\![\!\!\![\ 6,4,2\ ]\!\!\!]\!\!\!\! code. Programs
             implementing our algorithms can be found at
             https://github.com/nrenga/symplectic-arxiv18a.},
   Doi = {10.1109/ISIT.2018.8437652},
   Key = {fds343650}
}

@article{fds235961,
   Author = {Qureshi, TR and Zoltowski, MD and Calderbank, R},
   Title = {Target detection in mimo radar in the presence of doppler
             using complementary sequences},
   Journal = {ICASSP, IEEE International Conference on Acoustics, Speech
             and Signal Processing - Proceedings},
   Pages = {2766-2769},
   Publisher = {IEEE},
   Year = {2010},
   Month = {January},
   ISSN = {1520-6149},
   url = {http://dx.doi.org/10.1109/ICASSP.2010.5496206},
   Abstract = {In this paper, we present a method for detecting a point
             target using multiple antennas when the relative motion
             between the receivers and the target induces a
             non-negligible Doppler shift. As a key illustrative example,
             we consider a 4x4 system employing a unitary matrix waveform
             set, e.g., formed from Golay complementary sequences. When a
             non-negligible Doppler shift is induced by the target
             motion, the waveform matrix formed from the complementary
             sequences is no longer unitary, resulting in significantly
             degraded target range estimates. To solve this problem, we
             adopt a subspace based approach exploiting the observation
             that the receive matrix formed from matched filtering of the
             reflected waveforms has a (non-trivial) null-space. Through
             processing of the waveforms with the appropriate vector from
             the null-space, we can significantly improve the detection
             performance. We provide simulation results to confirm the
             theoretical analysis. ©2010 IEEE.},
   Doi = {10.1109/ICASSP.2010.5496206},
   Key = {fds235961}
}

@article{fds235936,
   Author = {Qureshi, TR and Zoltowski, MD and Calderbank, R},
   Title = {Target detection in MIMO radar using Golay complementary
             sequences in the presence of doppler},
   Journal = {2009 47th Annual Allerton Conference on Communication,
             Control, and Computing, Allerton 2009},
   Pages = {1490-1493},
   Publisher = {IEEE},
   Year = {2009},
   Month = {December},
   url = {http://dx.doi.org/10.1109/ALLERTON.2009.5394498},
   Abstract = {In this paper, we present a method for detecting a point
             target using multiple antennas when the relative motion
             between the receivers and the target induces a
             non-negligible Doppler shift. As a key illustrative example,
             we consider a 4 x 4 system employing a unitary matrix
             waveform set, e.g., formed from Golay complementary
             sequences. When a non-negligible Doppler shift is induced by
             the target motion, the waveform matrix formed from the
             complementary sequences is no longer unitary, resulting in
             significantly degraded target range estimates. To solve this
             problem, we adopt a subspace based approach exploiting the
             observation that the receive matrix formed from matched
             filtering of the reflected waveforms has a (non-trivial)
             null-space. Through processing of the waveforms with the
             appropriate vector from the null-space, we can significantly
             improve the detection performance. We provide simulation
             results to confirm the theoretical analysis. ©2009
             IEEE.},
   Doi = {10.1109/ALLERTON.2009.5394498},
   Key = {fds235936}
}

@article{fds235943,
   Author = {Qureshi, T and Zoltowski, M and Calderbank, R},
   Title = {Target detection in MIMO radar using golay complementary
             sequences in the presence of doppler},
   Journal = {Conference Record - Asilomar Conference on Signals, Systems
             and Computers},
   Pages = {156-159},
   Publisher = {IEEE},
   Year = {2009},
   Month = {December},
   ISSN = {1058-6393},
   url = {http://dx.doi.org/10.1109/ACSSC.2009.5470143},
   Abstract = {In this paper, we present a method for detecting a point
             target using multiple antennas when the relative motion
             between the receivers and the target induces a non-trivial
             Doppler shift. We consider a 4×4 system employing sets of
             unitary waveforms. In case of a non-trivial Doppler shift
             induced by the target motion, the waveforms are no longer
             unitary, and unambiguous target ranging is not possible. To
             solve this problem, we adopt a subspace based approach where
             we show that the unitary waveforms used have a non-empty
             null-space under certain conditions, and by processing the
             waveforms with vectors from the null-space, we can
             significantly improve the detection performance. © 2009
             IEEE.},
   Doi = {10.1109/ACSSC.2009.5470143},
   Key = {fds235943}
}

@article{fds235769,
   Author = {Calderbank, R},
   Title = {Technology as driver of change in telecommunications},
   Pages = {69-86},
   Publisher = {Springer Verlag},
   Year = {2006},
   Month = {January},
   url = {http://dx.doi.org/10.1007/3-540-32556-5_4},
   Doi = {10.1007/3-540-32556-5_4},
   Key = {fds235769}
}

@article{fds236011,
   Author = {Calderbank, AR},
   Title = {The application of invariant theory to the existence of
             quasi-symmetric designs},
   Journal = {Journal of Combinatorial Theory, Series A},
   Volume = {44},
   Number = {1},
   Pages = {94-109},
   Publisher = {Elsevier BV},
   Year = {1987},
   Month = {January},
   ISSN = {0097-3165},
   url = {http://dx.doi.org/10.1016/0097-3165(87)90062-8},
   Abstract = {Gleason and Mallows and Sloane characterized the weight
             enumerators of maximal self-orthogonal codes with all
             weights divisible by 4. We apply these results to obtain a
             new necessary condition for the existence of 2 - (v, k, λ)
             designs where the intersection numbers s1...,sn satisfy s1
             ≡ s2 ≡ ... ≡ sn (mod 2). Non-existence of
             quasi-symmetric 2-(21, 18, 14), 2-(21, 9, 12), and 2-(35, 7,
             3) designs follows directly from the theorem. We also
             eliminate quasi-symmetric 2-(33, 9, 6) designs. We prove
             that the blocks of quasi-symmetric 2-(19, 9, 16), 2-(20, 10,
             18), 2-(20,8, 14), and 2-(22, 8, 12) designs are obtained
             from octads and dodecads in the [24, 12] Golay code. Finally
             we eliminate quasi-symmetric 2-(19,9, 16) and 2-(22, 8, 12)
             designs. © 1987.},
   Doi = {10.1016/0097-3165(87)90062-8},
   Key = {fds236011}
}

@article{fds236062,
   Author = {Calderbank, AR},
   Title = {The art of signaling: fifty years of coding
             theory},
   Journal = {IEEE Transactions on Information Theory},
   Volume = {44},
   Number = {6},
   Pages = {2561-2595},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {1998},
   Month = {December},
   ISSN = {0018-9448},
   url = {http://dx.doi.org/10.1109/18.720549},
   Abstract = {In 1948 Shannon developed fundamental limits on the
             efficiency of communication over noisy channels. The coding
             theorem asserts that there are block codes with code rates
             arbitrarily close to channel capacity and probabilities of
             error arbitrarily close to zero. Fifty years later, codes
             for the Gaussian channel have been discovered that come
             close to these fundamental limits. There is now a
             substantial algebraic theory of error-correcting codes with
             as many connections to mathematics as to engineering
             practice, and the last 20 years have seen the construction
             of algebraic-geometry codes that can be encoded and decoded
             in polynomial time, and that beat the Gilbert-Varshamov
             bound. Given the size of coding theory as a subject, this
             review is of necessity a personal perspective, and the focus
             is reliable communication, and not source coding or
             cryptography. The emphasis is on connecting coding theories
             for Hamming and Euclidean space and on future challenges,
             specifically in data networking, wireless communication, and
             quantum information theory. © 1998 IEEE.},
   Doi = {10.1109/18.720549},
   Key = {fds236062}
}

@article{fds235999,
   Author = {Goel, S and Aggarwal, V and Yener, A and Calderbank,
             AR},
   Title = {The effect of eavesdroppers on network connectivity: A
             secrecy graph approach},
   Journal = {IEEE Transactions on Information Forensics and
             Security},
   Volume = {6},
   Number = {3 PART 1},
   Pages = {712-724},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2011},
   Month = {September},
   ISSN = {1556-6013},
   url = {http://dx.doi.org/10.1109/TIFS.2011.2148714},
   Abstract = {This paper investigates the effect of eavesdroppers on
             network connectivity, using a wiretap model and percolation
             theory. The wiretap model captures the effect of
             eavesdroppers on link security. A link exists between two
             nodes only if the secrecy capacity of that link is positive.
             Network connectivity is defined in a percolation sense,
             i.e., connectivity exists if an infinite connected component
             exists in the corresponding secrecy graph. We consider
             uncertainty in location of eavesdroppers, which is modeled
             directly at the network level as correlated failures in the
             secrecy graph. Our approach attempts to bridge the gap
             between physical layer security under uncertain channel
             state information and network level connectivity under
             secrecy constraints. For square and triangular lattice
             secrecy graphs, we obtain bounds on the percolation
             threshold, which is the critical value of the probability of
             occurrence of an eavesdropper, above which network
             connectivity does not exist. For Poisson secrecy graphs,
             degree distribution and mean value of upper and lower bounds
             on node degree are obtained. Further, inner and outer bounds
             on the achievable region for network connectivity are
             obtained. Both analytic and simulation results show that
             uncertainty in location of eavesdroppers has a dramatic
             effect on network connectivity in a secrecy graph. © 2011
             IEEE.},
   Doi = {10.1109/TIFS.2011.2148714},
   Key = {fds235999}
}

@article{fds236008,
   Author = {Calderbank, AR and Hanlon, P},
   Title = {The extension to root systems of a theorem on
             tournaments},
   Journal = {Journal of Combinatorial Theory, Series A},
   Volume = {41},
   Number = {2},
   Pages = {228-245},
   Publisher = {Elsevier BV},
   Year = {1986},
   Month = {January},
   ISSN = {0097-3165},
   url = {http://dx.doi.org/10.1016/0097-3165(86)90082-8},
   Abstract = {M. G. Kendall and B. Babington-Smith proved that if a
             tournament p′ is obtained from a tournament p by reversing
             the edges of a 3-cycle then p and p′ contain the same
             number of 3-cycles. This theorem is the basis of a
             cancellation argument used by D. Zeilberer and D. M.
             Bressoud in their recent proof of the q-analog of Dyson's
             conjecture. The theorem may be restated in terms of the root
             system An and the main result of this paper is the extension
             of this theorem to arbitrary root systems. As one
             application we give a combinatorial proof of a special case
             of the Macdonald conjecture for root systems using the
             method of Zeilberger and Bressoud. A second application is a
             combinatorial proof of the Weyl denominator formula. ©
             1986.},
   Doi = {10.1016/0097-3165(86)90082-8},
   Key = {fds236008}
}

@article{fds235855,
   Author = {Howard, S and Calderbank, A and Moran, W},
   Title = {The finite Heisenberg-Weyl groups in radar and
             communications},
   Journal = {Eurasip Journal on Applied Signal Processing},
   Volume = {2006},
   Number = {1},
   Publisher = {Springer Nature},
   Year = {2006},
   Month = {April},
   ISSN = {1110-8657},
   url = {http://dx.doi.org/10.1155/ASP/2006/85685},
   Abstract = {We investigate the theory of the finite Heisenberg-Weylgroup
             in relation to the development of adaptive radar and to
             theconstruction of spreading sequences and error-correcting
             codes incommunications. We contend that this group can form
             the basis forthe representation of the radar environment in
             terms of operatorson the space of waveforms. We also
             demonstrate, following recentdevelopments in the theory of
             error-correcting codes, that thefinite Heisenberg-Weyl
             groups provide a unified basis for theconstruction of useful
             waveforms/sequences for radar,communications, and the theory
             of error-correcting codes.},
   Doi = {10.1155/ASP/2006/85685},
   Key = {fds235855}
}

@article{fds326909,
   Author = {Calderbank, R and Kantor, WM},
   Title = {The geometry of two-weight codes},
   Journal = {Bulletin of the London Mathematical Society},
   Volume = {18},
   Number = {2},
   Pages = {97-122},
   Publisher = {WILEY},
   Year = {1986},
   Month = {January},
   url = {http://dx.doi.org/10.1112/blms/18.2.97},
   Abstract = {We survey the relationships between two-weight linear [n, k]
             codes over GF(q), projective (n, k, h1, h2) sets in PG(k –
             1, q), and certain strongly regular graphs. We also describe
             and tabulate essentially all the known examples. © 1986,
             Oxford University Press. All rights reserved.},
   Doi = {10.1112/blms/18.2.97},
   Key = {fds326909}
}

@article{fds235792,
   Author = {Calderbank, R and Wales, DB},
   Title = {The Haemers partial geometry and the Steiner system S(5, 8,
             24)},
   Journal = {Discrete Mathematics},
   Volume = {51},
   Number = {2},
   Pages = {125-136},
   Year = {1984},
   Month = {January},
   ISSN = {0012-365X},
   url = {http://dx.doi.org/10.1016/0012-365X(84)90066-9},
   Abstract = {Haemers has constructed a partial geometry with parameters s
             = 4, t = 17, and α = 2, using properties of the
             Hoffman-Singleton graph. We describe this geometry in terms
             of the Steiner system S(5, 8, 24). © 1984.},
   Doi = {10.1016/0012-365X(84)90066-9},
   Key = {fds235792}
}

@article{fds235860,
   Author = {Liu, J and Calderbank, AR},
   Title = {The icosian code and the e8 lattice: A new 4 × 4
             space-time code with non-vanishing determinant},
   Journal = {IEEE International Symposium on Information Theory -
             Proceedings},
   Pages = {1006-1010},
   Publisher = {IEEE},
   Year = {2006},
   Month = {December},
   url = {http://dx.doi.org/10.1109/ISIT.2006.261879},
   Abstract = {This paper introduces a new full-rate, full-diversity
             space-time code for 4 transmit antennas. The 4 × 4 codeword
             matrix consists of four 2 × 2 Alamouti blocks with entries
             from Q(i, √5), and these blocks can be viewed as
             quaternions which in turn represent rotations in R3. The
             Alamouti blocks that appear in a codeword are drawn from the
             icosian ring consisting of all linear combinations of 120
             basic rotations corresponding to symmetries of the
             icosahedron. This algebraic structure is different from the
             Golden code, but the complex entries are taken from a
             similar underlying field. The minimum determinant is bounded
             below by a constant that is independent of the signal
             constellation, and the new code admits a simple decoding
             scheme that makes use of a geometric correspondence between
             the icosian ring and the E 8 lattice. © 2006
             IEEE.},
   Doi = {10.1109/ISIT.2006.261879},
   Key = {fds235860}
}

@article{fds235905,
   Author = {Liu, J and Calderbank, AR},
   Title = {The Icosian code and the E8 lattice: A new 4 × 4
             space-time code with nonvanishing determinant},
   Journal = {IEEE Transactions on Information Theory},
   Volume = {54},
   Number = {8},
   Pages = {3782-3789},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2008},
   Month = {August},
   ISSN = {0018-9448},
   url = {http://dx.doi.org/10.1109/TIT.2008.926352},
   Abstract = {This paper introduces a new rate-2, full-diversity
             space-time code for four transmit antennas and one receive
             antenna. The 4 × 4 codeword matrix consists of four 2 × 2
             Alamouti blocks with entries from Q(i,√5), and these
             blocks can be viewed as quaternions which in turn represent
             rotations in R3. The Alamouti blocks that appear in a
             codeword are drawn from the icosian ring consisting of all
             linear combinations of 120 basic rotations corresponding to
             symmetries of the icosahedron. This algebraic structure is
             different from the Golden code, but the complex entries are
             taken from a common underlying field. The minimum
             determinant is bounded below by a constant that is
             independent of the signal constellation, and the new code
             admits a simple decoding scheme that makes use of a
             geometric correspondence between the icosian ring and the E8
             lattice. © 2008 IEEE.},
   Doi = {10.1109/TIT.2008.926352},
   Key = {fds235905}
}

@article{fds235803,
   Author = {Calderbank, AR},
   Title = {The mathematics of moderns},
   Journal = {The Mathematical Intelligencer},
   Volume = {13},
   Number = {3},
   Pages = {56-65},
   Publisher = {Springer Nature},
   Year = {1991},
   Month = {September},
   ISSN = {0343-6993},
   url = {http://dx.doi.org/10.1007/BF03023836},
   Doi = {10.1007/BF03023836},
   Key = {fds235803}
}

@article{fds235816,
   Author = {Calderbank, AR and Fishburn, PC},
   Title = {The Normalized Second Moment of the Binary Lattice
             Determined by a Convolutional Code},
   Journal = {IEEE Transactions on Information Theory},
   Volume = {40},
   Number = {1},
   Pages = {166-174},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {1994},
   Month = {January},
   url = {http://dx.doi.org/10.1109/18.272475},
   Abstract = {We calculate the per-dimension mean squared error μ(S) of
             the two-state convolutional code C with generator matrix
             [1,1 + D], for the symmetric binary source S = [0,1], and
             for the uniform source S = [0,1]. When S = [0,1], the
             quantity μ(S) is the second moment of the coset weight
             distribution, which gives the expected Hamming distance of a
             random binary sequence from the code. When S = [0,1], the
             quantity μ(S) is the second moment of the Voronoi region of
             the modulo 2 binary lattice determined by C. The key
             observation is that a convolutional code with 2Vstates gives
             2Vapproximations to a given source sequence, and these
             approximations do not differ very much. It is possible to
             calculate the steady state distribution for the differences
             in these path metrics, and hence, the second moment. In this
             paper we shall only give details for the convolutional code
             [1,1 + D], but the method applies to arbitrary codes. We
             also define the covering radius of a convolutional code, and
             calculate this quantity for the code [1,1 + D]. © 1994
             IEEE},
   Doi = {10.1109/18.272475},
   Key = {fds235816}
}

@article{fds235969,
   Author = {Nastasescu, MM and Calderbank, AR},
   Title = {The projective Kerdock code},
   Journal = {2010 IEEE Information Theory Workshop, ITW 2010 -
             Proceedings},
   Publisher = {IEEE},
   Year = {2010},
   Month = {December},
   url = {http://dx.doi.org/10.1109/CIG.2010.5592761},
   Abstract = {Certain nonlinear binary codes can be constructed as binary
             images of Z4-linear codes under the Gray map. Examples
             include the second-order Reed-Muller code and the Kerdock
             and Preparata codes. In this paper, we consider a new
             quaternary code which is an additive subcode of the Z
             4-linear Kerdock code. The Kerdock code is the direct sum of
             a one-dimensional quaternary code and the quaternary subcode
             examined in this paper. This paper calculates the weight
             distribution of the projective Kerdock code from which the
             weight distribution of the dual code can be computed. The
             dual code is a supercode of the quaternary Preparata code.
             The projective Kerdock code is used to construct a
             deterministic measurement matrix for compressed sensing.
             Numerical experiments are presented for sparse
             reconstruction using the LASSO that show improvement over
             random Gaussian matrices of the same size. © 2010
             IEEE.},
   Doi = {10.1109/CIG.2010.5592761},
   Key = {fds235969}
}

@article{fds235838,
   Author = {Calderbank, AR and Daubechies, I},
   Title = {The pros and cons of democracy},
   Journal = {IEEE Transactions on Information Theory},
   Volume = {48},
   Number = {6},
   Pages = {1721-1725},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2002},
   Month = {June},
   ISSN = {0018-9448},
   url = {http://dx.doi.org/10.1109/TIT.2002.1003852},
   Abstract = {The concept of democracy was introduced in which the
             individual bits in a coarsely quantized representation of a
             signal were given equal weight in the approximation to the
             original signal. It was proved that such democratic
             representations could not achieve the same accuracy as
             optimal nondemocratic schemes. Convolutional decoding was
             found to be convenient in digital to analog
             conversion.},
   Doi = {10.1109/TIT.2002.1003852},
   Key = {fds235838}
}

@article{fds326890,
   Author = {Huang, J and Qiu, Q and Calderbank, R},
   Title = {The Role of Principal Angles in Subspace
             Classification},
   Journal = {IEEE Transactions on Signal Processing},
   Volume = {64},
   Number = {8},
   Pages = {1933-1945},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2016},
   Month = {April},
   url = {http://dx.doi.org/10.1109/TSP.2015.2500889},
   Abstract = {Subspace models play an important role in a wide range of
             signal processing tasks, and this paper explores how the
             pairwise geometry of subspaces influences the probability of
             misclassification. When the mismatch between the signal and
             the model is vanishingly small, the probability of
             misclassification is determined by the product of the sines
             of the principal angles between subspaces. When the mismatch
             is more significant, the probability of misclassification is
             determined by the sum of the squares of the sines of the
             principal angles. Reliability of classification is derived
             in terms of the distribution of signal energy across
             principal vectors. Larger principal angles lead to smaller
             classification error, motivating a linear transform that
             optimizes principal angles. The transform presented here
             (TRAIT) preserves some specific characteristic of each
             individual class, and this approach is shown to be
             complementary to a previously developed transform (LRT) that
             enlarges inter-class distance while suppressing intraclass
             dispersion. Theoretical results are supported by
             demonstration of superior classification accuracy on
             synthetic and measured data even in the presence of
             significant model mismatch.},
   Doi = {10.1109/TSP.2015.2500889},
   Key = {fds326890}
}

@article{fds331059,
   Author = {Calderbank, AR and Frankl, P and Graham, RL and Li, WCW and Shepp,
             LA},
   Title = {The Sperner Capacity of Linear and Nonlinear Codes for the
             Cyclic Triangle},
   Journal = {Journal of Algebraic Combinatorics: An International
             Journal},
   Volume = {2},
   Number = {1},
   Pages = {31-48},
   Year = {1993},
   Month = {January},
   url = {http://dx.doi.org/10.1023/A:1022424630332},
   Abstract = {Shannon introduced the concept of zero-error capacity of a
             discrete memoryless channel. The channel determines an
             undirected graph on the symbol alphabet, where adjacency
             means that symbols cannot be confused at the receiver. The
             zero-error or Shannon capacity is an invariant of this
             graph. Gargano, Körner, and Vaccaro have recently extended
             the concept of Shannon capacity to directed graphs. Their
             generalization of Shannon capacity is called Sperner
             capacity. We resolve a problem posed by these authors by
             giving the first example (the two orientations of the
             triangle) of a graph where the Sperner capacity depends on
             the orientations of the edges. Sperner capacity seems to be
             achieved by nonlinear codes, whereas Shannon capacity seems
             to be attainable by linear codes. In particular, linear
             codes do not achieve Sperner capacity for the cyclic
             triangle. We use Fourier analysis or linear programming to
             obtain the best upper bounds for linear codes. The bounds
             for unrestricted codes are obtained from rank arguments,
             eigenvalue interlacing inequalities and polynomial algebra.
             The statement of the cyclic q-gon problem is very simple:
             what is the maximum size Nq(n) of a subset Sn of {0, 1,
             (Formula presented.), q−1}n with the property that for
             every pair of distinct vectors x = (xi), y = (yi) (Formula
             presented.)Sn, we have xj−yj ≡ 1(mod q) for some j? For
             q = 3 (the cyclic triangle), we show N3(n)≃2n. If however
             Sn is a subgroup, then we give a simple proof that (Formula
             presented.). © 1993, Kluwer Academic Publishers. All rights
             reserved.},
   Doi = {10.1023/A:1022424630332},
   Key = {fds331059}
}

@article{fds236052,
   Author = {Calderbank, AR and Sloane, NJA},
   Title = {The ternary golay code, the integers mod 9, and the
             coxeter-todd lattice},
   Journal = {IEEE Transactions on Information Theory},
   Volume = {42},
   Number = {2},
   Pages = {636-637},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {1996},
   Month = {December},
   ISSN = {0018-9448},
   url = {http://dx.doi.org/10.1109/18.485733},
   Abstract = {The 12-dimensinnal Coxeter-Todd lattice can be obtained by
             lifting the ternary Golay code to a code over the integers
             mod 9 and applying Construction A. © 1996
             IEEE.},
   Doi = {10.1109/18.485733},
   Key = {fds236052}
}

@article{fds235980,
   Author = {Kostina, V and Duarte, MF and Jafarpour, S and Calderbank,
             R},
   Title = {The value of redundant measurement in compressed
             sensing},
   Journal = {ICASSP, IEEE International Conference on Acoustics, Speech
             and Signal Processing - Proceedings},
   Pages = {3656-3659},
   Publisher = {IEEE},
   Year = {2011},
   Month = {August},
   ISSN = {1520-6149},
   url = {http://dx.doi.org/10.1109/ICASSP.2011.5947143},
   Abstract = {The aim of compressed sensing is to recover attributes of
             sparse signals using very few measurements. Given an overall
             bit budget for quantization, this paper demonstrates that
             there is value to redundant measurement. The measurement
             matrices considered here are required to have the property
             that signal recovery is still possible even after dropping
             certain subsets of D measurements. It introduces the concept
             of a measurement matrix that is weakly democratic in the
             sense that the amount of information about the signal
             carried by each of the designated D-subsets is the same.
             Examples of deterministic measurement matrices that are
             weakly democratic are constructed by exponentiating
             codewords from the binary second order Reed Muller code. The
             value in rejecting D measurements that are on average
             larger, is to be able to provide a finer grid for vector
             quantization of the remaining measurements, even after
             discounting the original budget by the bits used to identify
             the reject set. Simulation results demonstrate that
             redundancy improves recovery SNR, sometimes by a wide
             margin. Optimum performance occurs when a significant
             fraction of measurements are rejected. © 2011
             IEEE.},
   Doi = {10.1109/ICASSP.2011.5947143},
   Key = {fds235980}
}

@article{fds362595,
   Author = {Wu, Y and Achtzehn, A and Petrova, M and Mahonen, P and Calderbank,
             R},
   Title = {The Value of Staying Current when Beamforming},
   Year = {2010},
   Month = {July},
   Abstract = {Beamforming is a widely used method of provisioning high
             quality wireless channels that leads to high data rates and
             simple decoding structures. It requires feedback of Channel
             State Information (CSI) from receiver to transmitter, and
             the accuracy of this information is limited by rate
             constraints on the feedback channel and by delay. It is
             important to understand how the performance gains associated
             with beamforming depend on the accuracy or currency of the
             Channel State Information. This paper quantifies performance
             degradation caused by aging of CSI. It uses outage
             probability to measure the currency of CSI, and to discount
             the performance gains associated with ideal beamforming.
             Outage probability is a function of the beamforming
             algorithm and results are presented for Transmit Antenna
             Selection and other widely used methods. These results are
             translated into effective diversity orders for Multiple
             Input Single Output (MISO) and Multiuser Multiple Input
             Multiple Output (MIMO) systems.},
   Key = {fds362595}
}

@article{fds235820,
   Author = {Hammons, AR and Kumar, PV and Calderbank, AR and Sloane, NJA and Solé,
             P},
   Title = {The Z4-Linearity of Kerdock, Preparata, Goethals, and
             Related Codes},
   Journal = {IEEE Transactions on Information Theory},
   Volume = {40},
   Number = {2},
   Pages = {301-319},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {1994},
   Month = {January},
   url = {http://dx.doi.org/10.1109/18.312154},
   Abstract = {Certain notorious nonlinear binary codes contain more
             codewords than any known linear code. These include the
             codes constructed by Nordstrom-Robinson, Kerdock, Preparata,
             Goethals, and Delsarte-Goethals. It is shown here that all
             these codes can be very simply constructed as binary images
             under the Gray map of linear codes over Z4, the integers mod
             4 (although this requires a slight modification of the
             Preparata and Goethals codes). The construction implies that
             all these binary codes are distance invariant. Duality in
             the Z4 domain implies that the binary images have dual
             weight distributions. The Kerdock and “Preparata” codes
             are duals over Z4—and the Nordstrom-Robinson code is
             self-dual—which explains why their weight distributions
             are dual to each other. The Kerdock and “Preparata”
             codes are Z4-analogues of first-order Reed-Muller and
             extended Hamming codes, respectively. All these codes are
             extended cyclic codes over Z4, which greatly simplifies
             encoding and decoding. An algebraic hard-decision decoding
             algorithm is given for the “Preparata” code and a
             Hadamard-transform soft-decision decoding algorithm for the
             Kerdock code. Binary first-and second-order Reed-Muller
             codes are also linear over Z4, but extended Hamming codes of
             length n > 32 and the Golay code are not. Using
             Z4-linearity, a new family of distance regular graphs are
             constructed on the cosets of the “Preparata” code. ©
             1994 IEEE},
   Doi = {10.1109/18.312154},
   Key = {fds235820}
}

@article{fds235790,
   Author = {Calderbank, AR and Goethals, JM},
   Title = {THREE-WEIGHT CODES AND ASSOCIATION SCHEMES.},
   Journal = {Philips Journal of Research},
   Volume = {39},
   Number = {4-5},
   Pages = {143-152},
   Year = {1984},
   Month = {December},
   Abstract = {Three-weight projective codes C are considered for which the
             restriction to C of the Hamming association scheme H//n(q)
             is an association scheme with three classes. Sufficient
             conditions are established and restrictions on the three
             weights of C are obtained. It is shown in the binary case
             that the three-weight subcodes of the shortened second-order
             Reed-Muller codes provide a large class of examples,
             Previously known examples were the duals of perfect
             3-error-correcting or uniformly packed 2-error-correcting
             codes.},
   Key = {fds235790}
}

@article{fds343647,
   Author = {Vahid, A and Calderbank, R},
   Title = {Throughput region of spatially correlated interference
             packet networks},
   Journal = {IEEE Transactions on Information Theory},
   Volume = {65},
   Number = {2},
   Pages = {1220-1235},
   Year = {2019},
   Month = {February},
   url = {http://dx.doi.org/10.1109/TIT.2018.2860041},
   Abstract = {In multi-user wireless packet networks, interference,
             typically modeled as packet collision, is the throughput
             bottleneck. Users become aware of the interference pattern
             via feedback and use this information for contention
             resolution and packet retransmission. Conventional random
             access protocols interrupt communication to resolve
             contention, which reduces network throughput and increases
             latency and power consumption. In this paper, we take a
             different approach, and we develop opportunistic random
             access protocols rather than pursuing conventional methods.
             We allow wireless nodes to communicate without interruption
             and to observe the interference pattern. We then use this
             interference pattern knowledge and channel statistics to
             counter the negative impact of interference. We prove the
             optimality of our protocols using an extremal rank-ratio
             inequality. An important part of our contributions is the
             integration of spatial correlation in our assumptions and
             results. We identify spatial correlation regimes in which
             inherently outdated feedback becomes as good as idealized
             instantaneous feedback and correlation regimes in which
             feedback does not provide any throughput gain. To better
             illustrate the results, and as an intermediate step, we
             characterize the capacity region of finite-field spatially
             correlated interference channels with delayed channel state
             information at the transmitters.},
   Doi = {10.1109/TIT.2018.2860041},
   Key = {fds343647}
}

@article{fds303200,
   Author = {Nokleby, M and Bajwa, WU and Calderbank, R and Aazhang,
             B},
   Title = {Toward resource-optimal averaging consensus over the
             wireless medium},
   Journal = {Conference Record - Asilomar Conference on Signals, Systems
             and Computers},
   Volume = {7},
   Number = {2},
   Pages = {1197-1201},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2012},
   Month = {December},
   url = {http://arxiv.org/abs/1208.3251v2},
   Abstract = {We carry out a comprehensive study of the resource costs of
             distributed averaging consensus in wireless sensor networks.
             In particular, we consider two metrics appropriate to the
             wireless medium: total transmit energy and time-bandwidth
             product. Most previous approaches, such as gossip
             algorithms, suppose a graphical network, which abstracts
             away crucial features of the wireless medium, and measure
             resource consumption only in terms of the total number of
             transmissions required to achieve consensus. Under a
             path-loss dominated protocol interference model, we study
             the performance of several popular gossip algorithms,
             showing that they are nearly order-optimal with respect to
             transmit energy but strictly sub-optimal with respect to
             time-bandwidth product. We also propose a new scheme, termed
             hierarchical averaging, which is tailored to the wireless
             medium, and show that in general this approach is nearly
             order-optimal with respect to time-bandwidth product but
             strictly sub-optimal with respect to transmit energy. For
             the special case of free-space propagation, however, the
             proposed hierarchical scheme is approximately order-optimal
             with respect to both metrics. © 2012 IEEE.},
   Doi = {10.1109/ACSSC.2012.6489211},
   Key = {fds303200}
}

@article{fds322367,
   Author = {Nokleby, M and Bajwa, WU and Calderbank, R and Aazhang,
             B},
   Title = {Toward resource-optimal consensus over the wireless
             medium},
   Journal = {IEEE Journal on Selected Topics in Signal
             Processing},
   Volume = {7},
   Number = {2},
   Pages = {284-295},
   Year = {2013},
   Month = {January},
   url = {http://dx.doi.org/10.1109/JSTSP.2013.2246765},
   Abstract = {We carry out a comprehensive study of the resource cost of
             averaging consensus in wireless networks. Most previous
             approaches suppose a graphical network, which abstracts away
             crucial features of the wireless medium, and measure
             resource consumption only in terms of the total number of
             transmissions required to achieve consensus. Under a
             path-loss model, we study the resource requirements of
             consensus with respect to three wireless- appropriate
             metrics: total transmit energy, elapsed time, and
             time-bandwidth product. First, we characterize the
             performance of several popular gossip algorithms, showing
             that they may be order-optimal with respect to transmit
             energy but are strictly suboptimal with respect to elapsed
             time and time-bandwidth product. Further, we propose a new
             consensus scheme, termed hierarchical averaging, and show
             that it is nearly order-optimal with respect to all three
             metrics. Finally, we examine the effects of quantization,
             showing that hierarchical averaging provides a nearly
             orderoptimal tradeoff between resource consumption and
             quantization error. © 2013 IEEE.},
   Doi = {10.1109/JSTSP.2013.2246765},
   Key = {fds322367}
}

@article{fds235997,
   Author = {Chi, Y and Gomaa, A and Al-Dhahir, N and Calderbank,
             AR},
   Title = {Training signal design and tradeoffs for
             spectrally-efficient multi-user MIMO-OFDM
             systems},
   Journal = {IEEE Transactions on Wireless Communications},
   Volume = {10},
   Number = {7},
   Pages = {2234-2245},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2011},
   Month = {July},
   ISSN = {1536-1276},
   url = {http://dx.doi.org/10.1109/TWC.2011.042211.101100},
   Abstract = {In this paper, we design MMSE-optimal training sequences for
             multi-user MIMO-OFDM systems with an arbitrary number of
             transmit antennas and an arbitrary number of training
             symbols. It addresses spectrally-efficient uplink
             transmission scenarios where the users overlap in time and
             frequency and are separated using spatial processing at the
             base station. The robustness of the proposed training
             sequences to residual carrier frequency offset and phase
             noise is evaluated. This analysis reveals an interesting
             design tradeoff between the peak-to-average power ratio of a
             training sequence and the increase in channel estimation
             mean squared error over the ideal case when these two
             impairments are not present. © 2011 IEEE.},
   Doi = {10.1109/TWC.2011.042211.101100},
   Key = {fds235997}
}

@article{fds236004,
   Author = {Bajwa, WU and Calderbank, R and Mixon, DG},
   Title = {Two are better than one: Fundamental parameters of frame
             coherence},
   Journal = {Applied and Computational Harmonic Analysis},
   Volume = {33},
   Number = {1},
   Pages = {58-78},
   Year = {2012},
   Month = {July},
   ISSN = {1063-5203},
   url = {http://dx.doi.org/10.1016/j.acha.2011.09.005},
   Abstract = {This paper investigates two parameters that measure the
             coherence of a frame: worst-case and average coherence. We
             first use worst-case and average coherence to derive
             near-optimal probabilistic guarantees on both sparse signal
             detection and reconstruction in the presence of noise. Next,
             we provide a catalog of nearly tight frames with small
             worst-case and average coherence. Later, we find a new lower
             bound on worst-case coherence; we compare it to the Welch
             bound and use it to interpret recently reported signal
             reconstruction results. Finally, we give an algorithm that
             transforms frames in a way that decreases average coherence
             without changing the spectral norm or worst-case coherence.
             © 2011 Elsevier Inc. All rights reserved.},
   Doi = {10.1016/j.acha.2011.09.005},
   Key = {fds236004}
}

@article{fds326884,
   Author = {Vahid, A and Calderbank, R},
   Title = {Two-user erasure interference channels with local delayed
             CSIT},
   Journal = {IEEE Transactions on Information Theory},
   Volume = {62},
   Number = {9},
   Pages = {4910-4923},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2016},
   Month = {September},
   url = {http://dx.doi.org/10.1109/TIT.2016.2594224},
   Abstract = {We study the capacity region of two-user erasure
             interference channels with local delayed channel state
             information at the transmitters. In our model, transmitters
             have local mismatched outdated knowledge of the channel
             gains. We propose a transmission strategy that only relies
             on the delayed knowledge of the outgoing links at each
             transmitter and achieves the outer bound for the scenario in
             which transmitters learn the entire channel state with
             delay. Our result reveals the subset of the channel state
             information that affects the capacity region the most. We
             also identify cases in which local delayed knowledge of the
             channel state does not provide any gain over the zero
             knowledge assumption. To do so, we revisit a long-known
             intuition about interference channels that as long as the
             marginal distributions at the receivers are conserved, the
             capacity remains the same. We take this intuition and impose
             a certain spatial correlation among channel gains such that
             the marginal distributions remain unchanged. Then, we
             provide an outer bound on the capacity region of the channel
             with correlation that matches the capacity region when
             transmitters do not have access to channel state
             information.},
   Doi = {10.1109/TIT.2016.2594224},
   Key = {fds326884}
}

@article{fds235789,
   Author = {Bremner, A and Calderbank, R and Hanlon, P and Morton, P and Wolfskill,
             J},
   Title = {Two-weight ternary codes and the equation y2 = 4
             × 3a + 13},
   Journal = {Journal of Number Theory},
   Volume = {16},
   Number = {2},
   Pages = {212-234},
   Year = {1983},
   Month = {January},
   ISSN = {0022-314X},
   url = {http://dx.doi.org/10.1016/0022-314X(83)90042-2},
   Abstract = {This paper determines the parameters of all two-weight
             ternary codes C with the property that the minimum weight in
             the dual code C⊥ is at least 4. This yields a
             characterization of uniformly packed ternary [n, k, 4]
             codes. The proof rests on finding all integer solutions of
             the equation y2 = 4 × 3a + 13. © 1983.},
   Doi = {10.1016/0022-314X(83)90042-2},
   Key = {fds235789}
}

@article{fds235977,
   Author = {Wolff, J and Martens, M and Jafarpour, S and Daubechies, I and Calderbank, R},
   Title = {Uncovering elements of style},
   Journal = {ICASSP, IEEE International Conference on Acoustics, Speech
             and Signal Processing - Proceedings},
   Pages = {1017-1020},
   Publisher = {IEEE},
   Year = {2011},
   Month = {August},
   ISSN = {1520-6149},
   url = {http://dx.doi.org/10.1109/ICASSP.2011.5946579},
   Abstract = {This paper relates the style of 16th century Flemish
             paintings by Goossen van der Weyden (GvdW) to the style of
             preliminary sketches or underpaintings made prior to
             executing the painting. Van der Weyden made underpaintings
             in markedly different styles for reasons as yet not
             understood by art historians. The analysis presented here
             starts from a classification of the underpaintings into four
             distinct styles by experts in art history. Analysis of the
             painted surfaces by a combination of wavelet analysis,
             hidden Markov trees and boosting algorithms can distinguish
             the four underpainting styles with greater than 90%
             cross-validation accuracy. On a subsequent blind test this
             classifier provided insight into the hypothesis by art
             historians that different patches of the finished painting
             were executed by different hands. © 2011
             IEEE.},
   Doi = {10.1109/ICASSP.2011.5946579},
   Key = {fds235977}
}

@article{fds235904,
   Author = {Zoltowski, MD and Qureshi, TR and Calderbank, R and Moran,
             W},
   Title = {Unitary design of radar waveform diversity
             sets},
   Journal = {Conference Record - Asilomar Conference on Signals, Systems
             and Computers},
   Pages = {26-30},
   Publisher = {IEEE},
   Year = {2008},
   Month = {December},
   ISSN = {1058-6393},
   url = {http://dx.doi.org/10.1109/ACSSC.2008.5074353},
   Abstract = {In this work, multiple radar waveforms are simultaneously
             transmitted, emitted from different "virtual" antennas. The
             goal is to process the returns in such a way that the
             overall ambiguity function is a sum of ambiguity functions
             better approximating the desired thumbtack shape. A 4×4
             example involves two spatially separated antennas with each
             able to transmit and receive simultaneously on two different
             polarizations. The 4×4 unitary design dictates the
             scheduling of the waveforms over the four virtual antennas
             over four PRIs (Pulse Repetition Intervals), and how the
             matched filtering of the returns over four PRIs is combined
             in to achieve both perfect separation (of the superimposed
             returns) and perfect reconstruction. Perfect reconstruction
             means the sum of the time-autocorrelations associated with
             each of the four waveforms is a delta function. Conditions
             for both perfect separation and perfect reconstruction are
             developed, and a variety of waveform sets satisfying both
             are presented. © 2008 IEEE.},
   Doi = {10.1109/ACSSC.2008.5074353},
   Key = {fds235904}
}

@article{fds235777,
   Author = {Zoltowski, MD and Qureshi, TR and Calderbank, R and Moran,
             B},
   Title = {Unitary Design of Radar Waveform Diversity
             Sets},
   Pages = {211-230},
   Publisher = {JOHN WILEY & SONS INC},
   Year = {2010},
   Month = {April},
   url = {http://dx.doi.org/10.1002/9780470487068.ch7},
   Doi = {10.1002/9780470487068.ch7},
   Key = {fds235777}
}

@article{fds235998,
   Author = {Qureshi, TR and Zoltowski, MD and Calderbank, R and Pezeshki,
             A},
   Title = {Unitary design of radar waveform diversity
             sets},
   Journal = {Digital Signal Processing: A Review Journal},
   Volume = {21},
   Number = {5},
   Pages = {552-567},
   Publisher = {Elsevier BV},
   Year = {2011},
   Month = {January},
   ISSN = {1051-2004},
   url = {http://dx.doi.org/10.1016/j.dsp.2010.09.006},
   Abstract = {In this work, multiple radar waveforms are simultaneously
             transmitted, emitted from different antennas. The goal is to
             process the returns in such a way that the overall ambiguity
             function is a sum of individual ambiguity functions, such
             that the sum better approximates the ideal thumbtack shape.
             A unitary design for the illustrative 4×4 example
             prescribes the scheduling of the waveforms over four
             transmit antennas over four PRIs. Further, it dictates how
             the matched filtering of the returns over four PRIs is
             combined in such a way so as to achieve both perfect
             separation (of the superimposed returns) AND perfect
             reconstruction. Perfect reconstruction implies that the sum
             of the time-autocorrelations associated with each of the
             four waveforms is a delta function. The net result of the
             processing of four PRIs over four virtual antennas yields 16
             cross-correlations all of which ideally exhibit a sharp peak
             at the target delay. Conditions for both perfect separation
             and perfect reconstruction are developed, and a variety of
             waveform sets satisfying both are presented. Doppler
             compensation is achieved by a data-dependent weighting of
             the different PRI matched-filtered outputs prior to summing.
             Simulations are presented verifying the efficacy of the
             proposed unitary waveform matrix designs in conjunction with
             the proposed Doppler compensation technique. © 2010
             Elsevier Inc. All rights reserved.},
   Doi = {10.1016/j.dsp.2010.09.006},
   Key = {fds235998}
}

@article{fds235763,
   Author = {Kumar, PV and Helleseth, T and Calderbank, AR},
   Title = {Upper bound for some exponential sums over Galois rings and
             applications},
   Journal = {IEEE International Symposium on Information Theory -
             Proceedings},
   Pages = {70-},
   Year = {1994},
   Month = {December},
   ISSN = {2157-8095},
   url = {http://dx.doi.org/10.1109/ISIT.1994.394900},
   Abstract = {An upper bound for Weil-type exponential sums over Galois
             rings is presented together with some examples where the
             bound is tight. The bound may be regarded as the Galois-ring
             analogue of the well-known Weil-Carlitz-Uchiyama bound for
             exponential sums over finite fields. An application of the
             bound to the design of large families of eight-phase
             sequences having low correlation is also
             given.},
   Doi = {10.1109/ISIT.1994.394900},
   Key = {fds235763}
}

@article{fds236032,
   Author = {Calderbank, AR and Pottie, GJ},
   Title = {Upper Bounds for Small Trellis Codes},
   Journal = {IEEE Transactions on Information Theory},
   Volume = {38},
   Number = {6},
   Pages = {1791-1795},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {1992},
   Month = {January},
   url = {http://dx.doi.org/10.1109/18.165452},
   Abstract = {An upper bound on the minimum squared distance of trellis
             codes by packing Voronoi cells is derived, and we compare
             this bound with previously known bounds. The bound is tight
             to search results for coset codes with a small number of
             states. © 1992 IEEE},
   Doi = {10.1109/18.165452},
   Key = {fds236032}
}

@article{fds376537,
   Author = {Calderbank, AR and Mazo, JE and Shapiro, HM},
   Title = {Upper Bounds on the Minimum Distance of Trellis
             Codes},
   Journal = {Bell System Technical Journal},
   Volume = {62},
   Number = {8},
   Pages = {2617-2646},
   Year = {1983},
   Month = {January},
   url = {http://dx.doi.org/10.1002/j.1538-7305.1983.tb03197.x},
   Abstract = {A trellis code is a “sliding window” method of encoding
             a binary data stream into a sequence of real numbers that
             are input to a noisy transmission channel. When a trellis
             code is used to encode data at the rate of k bits/channel
             symbol, each channel input will depend not only on the most
             recent block of k data bits to enter the encoder but will
             also depend on, say, the v bits preceding this block. The v
             bits determine the state of the encoder and the most recent
             block of k bits generates the channel symbol conditional on
             the encoder state. The performance of trellis codes, like
             that of block codes, depends on a suitably defined
             minimum‐distance property of the code. In this paper we
             obtain upper bounds on this minimum distance that are simple
             functions of k and v. These results also provide a lower
             bound on the number of states required to achieve a specific
             coding gain. © 1983 The Bell System Technical
             Journal},
   Doi = {10.1002/j.1538-7305.1983.tb03197.x},
   Key = {fds376537}
}

@article{fds235788,
   Author = {Calderbank, AR and Mazo, JE and Shapiro, HM},
   Title = {UPPER BOUNDS ON THE MINIMUM DISTANCE OF TRELLIS
             CODES.},
   Journal = {The Bell System technical journal},
   Volume = {62},
   Number = {8 pt 1},
   Pages = {2617-2646},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {1983},
   url = {http://dx.doi.org/10.1002/j.1538-7305.1983.tb03197.x},
   Abstract = {A trellis code is a 'sliding window' method of encoding a
             binary data stream into a sequence of real numbers that are
             input to a noisy transmission channel. When a trellis code
             is used to encode data at the rate of k bits/channel symbol,
             each channel input will depend not only on the most recent
             block of k data bits to enter the encoder but will also
             depend on, say, the nu bits preceding this block. The
             performance of trellis codes, like that of block codes,
             depends on a suitably defined minimum-distance property of
             the code. This paper obtains upper bounds on this minimum
             distance that are simple functions of k and nu . These
             results also provide a lower bound on the number of states
             required to achieve a specific coding gain.},
   Doi = {10.1002/j.1538-7305.1983.tb03197.x},
   Key = {fds235788}
}

@article{fds326749,
   Author = {Campbell, K and Carpenter, KLH and Espinosa, S and Hashemi, J and Qiu,
             Q and Tepper, M and Calderbank, R and Sapiro, G and Egger, HL and Baker,
             JP and Dawson, G},
   Title = {Use of a Digital Modified Checklist for Autism in Toddlers -
             Revised with Follow-up to Improve Quality of Screening for
             Autism.},
   Journal = {J Pediatr},
   Volume = {183},
   Pages = {133-139.e1},
   Year = {2017},
   Month = {April},
   url = {http://dx.doi.org/10.1016/j.jpeds.2017.01.021},
   Abstract = {OBJECTIVES: To assess changes in quality of care for
             children at risk for autism spectrum disorders (ASD) due to
             process improvement and implementation of a digital
             screening form. STUDY DESIGN: The process of screening for
             ASD was studied in an academic primary care pediatrics
             clinic before and after implementation of a digital version
             of the Modified Checklist for Autism in Toddlers - Revised
             with Follow-up with automated risk assessment. Quality
             metrics included accuracy of documentation of screening
             results and appropriate action for positive screens
             (secondary screening or referral). Participating physicians
             completed pre- and postintervention surveys to measure
             changes in attitudes toward feasibility and value of
             screening for ASD. Evidence of change was evaluated with
             statistical process control charts and χ2 tests. RESULTS:
             Accurate documentation in the electronic health record of
             screening results increased from 54% to 92% (38% increase,
             95% CI 14%-64%) and appropriate action for children
             screening positive increased from 25% to 85% (60% increase,
             95% CI 35%-85%). A total of 90% of participating physicians
             agreed that the transition to a digital screening form
             improved their clinical assessment of autism risk.
             CONCLUSIONS: Implementation of a tablet-based digital
             version of the Modified Checklist for Autism in Toddlers -
             Revised with Follow-up led to improved quality of care for
             children at risk for ASD and increased acceptability of
             screening for ASD. Continued efforts towards improving the
             process of screening for ASD could facilitate rapid, early
             diagnosis of ASD and advance the accuracy of studies of the
             impact of screening.},
   Doi = {10.1016/j.jpeds.2017.01.021},
   Key = {fds326749}
}

@article{fds235857,
   Author = {Lee, JW and Chiang, M and Calderbank, AR},
   Title = {Utility-optimal medium access control: Reverse and forward
             engineering},
   Journal = {Proceedings - IEEE INFOCOM},
   Publisher = {IEEE},
   Year = {2006},
   Month = {December},
   ISSN = {0743-166X},
   url = {http://dx.doi.org/10.1109/INFOCOM.2006.252},
   Abstract = {This paper analyzes and designs medium access control (MAC)
             protocols for wireless ad-hoc networks through the network
             utility maximization (NUM) framework. We first
             reverse-engineer the current exponential backoff (EB) type
             of MAC protocols such as the BEB (binary exponential
             backoff) in the IEEE 802.11 standard through a
             non-cooperative gametheoretic model. This MAC protocol is
             shown to be implicitly maximizing, using a stochastic
             subgradient, a selfish local utility at each link in the
             form of expected net reward for successful transmission.
             While the existence of a Nash equilibrium can be
             established, neither convergence nor social welfare
             optimality is guaranteed due to the inadequate feedback
             mechanism in the EB protocol. This motivates the
             forward-engineering part of the paper, where a network-wide
             utility maximization problem is formulated, using a
             collision and persistence probability model and aligning
             selfish utility with total social welfare. By adjusting the
             parameters in the utility objective functions of the NUM
             problem, we can also control the tradeoff between efficiency
             and fairness of radio resource allocation through a rigorous
             and systematic design. We develop two distributed algorithms
             to solve the MAC design NUM problem, which lead to random
             access protocols that have slightly more message passing
             overhead than the current EB protocol, but significant
             potential for efficiency and fairness improvement. We
             provide readily-verifiable sufficient conditions under which
             convergence of the proposed algorithms to a global
             optimality of network utility can be guaranteed, and through
             numerical examples illustrate the value of the NUM approach
             to the complexity-performance tradeoff in MAC design. ©
             2006 IEEE.},
   Doi = {10.1109/INFOCOM.2006.252},
   Key = {fds235857}
}

@article{fds235888,
   Author = {Lee, JW and Chiang, M and Calderbank, AR},
   Title = {Utility-optimal random-access control},
   Journal = {IEEE Transactions on Wireless Communications},
   Volume = {6},
   Number = {7},
   Pages = {2741-2750},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2007},
   Month = {July},
   ISSN = {1536-1276},
   url = {http://dx.doi.org/10.1109/TWC.2007.05991},
   Abstract = {This paper designs medium access control (MAC) protocols for
             wireless networks through the network utility maximization
             (NUM) framework. A network-wide utility maximization problem
             is formulated, using a collision/persistenceprobabilistic
             model and aligning selfish utility with total social
             welfare. By adjusting the parameters in the utility
             objective functions of the NUM problem, we can also control
             the tradeoff between efficiency and fairness of radio
             resource allocation. We develop two distributed algorithms
             to solve the utility-optimal random-access control problem,
             which lead to random access protocols that have slightly
             more message passing overhead than the current
             exponential-backoff protocols, but significant potential for
             efficiency and fairness improvement. We provide
             readily-verifiable sufficient conditions under which
             convergence of the proposed algorithms to a global
             optimality of network utility can be guaranteed, and
             numerical experiments that illustrate the value of the NUM
             approach to the complexity-performance tradeoff in MAC
             design. © 2007 IEEE.},
   Doi = {10.1109/TWC.2007.05991},
   Key = {fds235888}
}

@article{fds235893,
   Author = {Li, Y and Li, Z and Chiang, M and Calderbank, AR},
   Title = {Video transmission scheduling for Peer-to-Peer live
             streaming systems},
   Journal = {2008 IEEE International Conference on Multimedia and Expo,
             ICME 2008 - Proceedings},
   Pages = {653-656},
   Publisher = {IEEE},
   Year = {2008},
   Month = {October},
   url = {http://dx.doi.org/10.1109/ICME.2008.4607519},
   Abstract = {For Internet based video broadcasting applications such as
             IPTV, the Peer-to-Peer (P2P) streaming scheme has been found
             to be an effective solution. An important issue in live
             broadcasting is to avoid playback buffer underflow. How to
             utilize the playback buffer and upload bandwidth of peers to
             minimize the freeze-ups in playback, is the problem we try
             to solve. In this work, we propose a successive
             water-filling (SWaF) algorithm for the video transmission
             scheduling in P2P live streaming system, to minimize the
             playback freeze-ups among peers. SWaF algorithm only needs
             each peer to optimally transmit (within its uploading
             bandwidth) part of its available video segments in the
             buffer to other peers requiring the content and pass small
             amount message to some other peers. Moreover, SWaF has low
             complexity and provable optimality. Numerical results
             demonstrated the effectiveness of the proposed algorithm. ©
             2008 IEEE.},
   Doi = {10.1109/ICME.2008.4607519},
   Key = {fds235893}
}

@article{fds235975,
   Author = {Aggarwal, V and Calderbank, AR and Gilbert, G and Weinstein,
             YS},
   Title = {Volume thresholds for quantum fault tolerance},
   Journal = {Quantum Information Processing},
   Volume = {9},
   Number = {5},
   Pages = {541-549},
   Publisher = {Springer Nature},
   Year = {2010},
   Month = {October},
   ISSN = {1570-0755},
   url = {http://dx.doi.org/10.1007/s11128-010-0181-2},
   Abstract = {We introduce finite-level concatenation threshold regions
             for quantum fault tolerance. These volume thresholds are
             regions in an error probability manifold that allow for the
             implemented system dynamics to satisfy a prescribed
             implementation inaccuracy bound at a given level of quantum
             error correction concatenation. Satisfying this condition
             constitutes our fundamental definition of fault tolerance.
             The prescribed bound provides a halting condition
             identifying the attainment of fault tolerance that allows
             for the determination of the optimum choice of quantum error
             correction code(s) and number of concatenation levels. Our
             method is constructed to apply to finite levels of
             concatenation, does not require that error proabilities
             consistently decrease from one concatenation level to the
             next, and allows for analysis, without approximations, of
             physical systems characterized by non-equiprobable
             distributions of qubit error probabilities. We demonstrate
             the utility of this method via a general error model. ©
             2010 Springer Science+Business Media, LLC.},
   Doi = {10.1007/s11128-010-0181-2},
   Key = {fds235975}
}

@article{fds235909,
   Author = {Calderbank, R and Howard, SD and Moran, B},
   Title = {Waveform diversity in radar signal processing: A focus on
             the use and control of degrees of freedom},
   Journal = {IEEE Signal Processing Magazine},
   Volume = {26},
   Number = {1},
   Pages = {32-41},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2009},
   Month = {January},
   ISSN = {1053-5888},
   url = {http://dx.doi.org/10.1109/MSP.2008.930414},
   Abstract = {Complementary waveforms developed by Golay are conceived to
             improve the sensitivity of far infrared spectrometry, which
             is similar to those developed by Tseng and Liu to analyze
             acoustic surface wave phenomena. In such a matrix, phase
             coded waveforms indexed by array element and by the
             pulse-repetition intervals are the basis of radar
             illumination. Their polarization of constituent waveforms
             also may vary. This thus make it possible to segregate and
             calibrate methods of controlling individual degrees of
             freedom before examining them in combination.},
   Doi = {10.1109/MSP.2008.930414},
   Key = {fds235909}
}

@article{fds235907,
   Author = {Papandreou-Suppappola, A and Nehorai, A and Calderbank,
             R},
   Title = {Waveform-agile sensing and processing},
   Journal = {IEEE Signal Processing Magazine},
   Volume = {26},
   Number = {1},
   Pages = {10-11},
   Year = {2009},
   Month = {January},
   ISSN = {1053-5888},
   url = {http://dx.doi.org/10.1109/MSP.2008.930413},
   Abstract = {The six articles in this special issue focus on
             waveform-agile sensing and processing. © 2009
             IEEE.},
   Doi = {10.1109/MSP.2008.930413},
   Key = {fds235907}
}

@article{fds235829,
   Author = {Calderbank, AR and Daubechies, I and Sweldens, W and Yeo,
             BL},
   Title = {Wavelet Transforms That Map Integers to Integers},
   Journal = {Applied and Computational Harmonic Analysis},
   Volume = {5},
   Number = {3},
   Pages = {332-369},
   Publisher = {Elsevier BV},
   Year = {1998},
   Month = {January},
   url = {http://dx.doi.org/10.1006/acha.1997.0238},
   Abstract = {Invertible wavelet transforms that map integers to integers
             have important applications in lossless coding. In this
             paper we present two approaches to build integer to integer
             wavelet transforms. The first approach is to adapt the
             precoder of Laroia et al., which is used in information
             transmission; we combine it with expansion factors for the
             high and low pass band in subband filtering. The second
             approach builds upon the idea of factoring wavelet
             transforms into socalled lifting steps. This allows the
             construction of an integer version of every wavelet
             transform. Finally, we use these approaches in a lossless
             image coder and compare the results to those given in the
             literature. © 1998 Academic Press.},
   Doi = {10.1006/acha.1997.0238},
   Key = {fds235829}
}

@article{fds235978,
   Author = {Goparaju, S and Calderbank, AR and Carson, WR and Rodrigues, MRD and Perez-Cruz, F},
   Title = {When to add another dimension when communicating over MIMO
             channels},
   Journal = {ICASSP, IEEE International Conference on Acoustics, Speech
             and Signal Processing - Proceedings},
   Pages = {3100-3103},
   Publisher = {IEEE},
   Year = {2011},
   Month = {August},
   ISSN = {1520-6149},
   url = {http://dx.doi.org/10.1109/ICASSP.2011.5946351},
   Abstract = {This paper introduces a divide and conquer approach to the
             design of transmit and receive filters for communication
             over a Multiple Input Multiple Output (MIMO) Gaussian
             channel subject to an average power constraint. It involves
             conversion to a set of parallel scalar channels, possibly
             with very different gains, followed by coding per
             sub-channel (i.e. over time) rather than coding across
             sub-channels (i.e. over time and space). The loss in
             performance is negligible at high signal-to-noise ratio
             (SNR) and not significant at medium SNR. The advantages are
             reduction in signal processing complexity and greater
             insight into the SNR thresholds at which a channel is first
             allocated power. This insight is a consequence of
             formulating the optimal power allocation in terms of an
             upper bound on error rate that is determined by parameters
             of the input lattice such as the minimum distance and
             kissing number. The resulting thresholds are given
             explicitly in terms of these lattice parameters. By
             contrast, when the optimization problem is phrased in terms
             of maximizing mutual information, the solution is mercury
             waterfilling, and the thresholds are implicit. © 2011
             IEEE.},
   Doi = {10.1109/ICASSP.2011.5946351},
   Key = {fds235978}
}

@article{fds235974,
   Author = {Bajwa, WU and Calderbank, R and Jafarpour, S},
   Title = {Why Gabor frames? Two fundamental measures of coherence and
             their role in model selection},
   Journal = {Journal of Communications and Networks},
   Volume = {12},
   Number = {4},
   Pages = {289-307},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2010},
   Month = {January},
   ISSN = {1229-2370},
   url = {http://dx.doi.org/10.1109/JCN.2010.6388466},
   Abstract = {The problem of model selection arises in a number of
             contexts, such as subset selection in linear regression,
             estimation of structures in graphical models, and signal
             denoising. This paper studies non-asymptotic model selection
             for the general case of arbitrary (random or deterministic)
             design matrices and arbitrary nonzero entries of the signal.
             In this regard, it generalizes the notion of incoherence in
             the existing literature on model selection and introduces
             two fundamental measures of coherence- termed as the
             worst-case coherence and the average coherence-among the
             columns of a design matrix. It utilizes these two measures
             of coherence to provide an in-depth analysis of a simple,
             model-order agnostic one-step thresholding (OST) algorithm
             for model selection and proves that OST is feasible for
             exact as well as partial model selection as long as the
             design matrix obeys an easily verifiable property, which is
             termed as the coherence property. One of the key insights
             offered by the ensuing analysis in this regard is that OST
             can successfully carry out model selection even when methods
             based on convex optimization such as the lasso fail due to
             the rank deficiency of the submatrices of the design matrix.
             In addition, the paper establishes that if the design matrix
             has reasonably small worst-case and average coherence then
             OST performs near-optimally when either (i) the energy of
             any nonzero entry of the signal is close to the average
             signal energy per nonzero entry or (ii) the signal-to-noise
             ratio in the measurement system is not too high. Finally,
             two other key contributions of the paper are that (i) it
             provides bounds on the average coherence of Gaussian
             matrices and Gabor frames, and (ii) it extends the results
             on model selection using OST to low-complexity, model-order
             agnostic recovery of sparse signals with arbitrary nonzero
             entries. In particular, this part of the analysis in the
             paper implies that an Alltop Gabor frame together with OST
             can successfully carry out model selection and recovery of
             sparse signals irrespective of the phases of the nonzero
             entries even if the number of nonzero entries scales almost
             linearly with the number of rows of the Alltop Gabor frame.
             ©2010 KICS.},
   Doi = {10.1109/JCN.2010.6388466},
   Key = {fds235974}
}

@article{fds235928,
   Author = {Aggarwal, V and Lai, L and Calderbank, AR and Poor,
             HV},
   Title = {Wiretap channel type II with an active eavesdropper},
   Journal = {IEEE International Symposium on Information Theory -
             Proceedings},
   Pages = {1944-1948},
   Publisher = {IEEE},
   Year = {2009},
   Month = {November},
   url = {http://dx.doi.org/10.1109/ISIT.2009.5205631},
   Abstract = {The wiretap channel type II with an active eavesdropper is
             considered in this paper. Compared with the eavesdropper
             model considered in much of the literature, the eavesdropper
             considered here can not only overhear but also modify the
             signal transmitted over the channel. Two modification
             modelsare considered. In the first model, the eavesdropper
             erases the bits it observes. In the second model, the
             eavesdropper modifies the bits it observes. For this channel
             with memory (introduced bythe activity of the eavesdropper),
             one should conduct the worst case scenario analysis. Novel
             concatenated coding schemes that provide perfect security
             for the communications are developed for both models to give
             bounds on the achievable secrecy rate. The technique to
             modify the inner code to maintain the secrecyproperties of
             the outer code may be of independent interest. © 2009
             IEEE.},
   Doi = {10.1109/ISIT.2009.5205631},
   Key = {fds235928}
}

@article{fds235781,
   Author = {Jacobvitz, AN and Calderbank, R and Sorin, DJ},
   Title = {Writing cosets of a convolutional code to increase the
             Lifetime of Flash memory},
   Journal = {2012 50th Annual Allerton Conference on Communication,
             Control, and Computing, Allerton 2012},
   Pages = {308-318},
   Publisher = {IEEE},
   Year = {2012},
   Month = {December},
   url = {http://dx.doi.org/10.1109/Allerton.2012.6483234},
   Abstract = {The goal of this paper is to extend the lifetime of Flash
             memory by reducing the frequency with which a given page of
             memory is erased. This is accomplished by increasing the
             number of writes that are possible before erasure is
             necessary. Redundancy is introduced into the write process
             to decrease the number of memory cells that are impacted by
             a given write, and to even out the impact of writing across
             an entire page of memory. Improvements are expressed in
             terms of write efficiency and lifetime gain. Write
             efficiency is the ratio of cells written to cells available,
             and lifetime gain is the ratio of coded writes to the
             baseline of uncoded writing. We use a physical model that
             allows multiple writes to a given region of memory. This can
             be realized with single level cells or with multi-level
             cells. Data is written to memory in the form of a coset of a
             convolutional code. The coset is represented by a binary
             vector that is selected by the Viterbi algorithm to minimize
             the number of cells impacted by the write (Hamming weight)
             and to even out the number of writes to each cell within a
             given page. Several different Viterbi metrics are evaluated.
             It is shown that page write efficiencies of over 85% and
             lifetime gains of over 500% are possible with only modest
             encoding and decoding complexity. It is also straightforward
             to integrate lifetime extension with standard methods of
             error correction by requiring that the coset representative
             be drawn from an error correcting code. An example is
             provided where single error correction is provided using a
             Hamming code. © 2012 IEEE.},
   Doi = {10.1109/Allerton.2012.6483234},
   Key = {fds235781}
}

@article{fds236044,
   Author = {Calderbank, AR and McGuire, G},
   Title = {Z4-linear codes obtained as projections of
             Kerdock and Delsarte-Goethals codes},
   Journal = {Linear Algebra and Its Applications},
   Volume = {226-228},
   Number = {C},
   Pages = {647-665},
   Publisher = {Elsevier BV},
   Year = {1995},
   Month = {January},
   ISSN = {0024-3795},
   url = {http://dx.doi.org/10.1016/0024-3795(95)00239-N},
   Abstract = {The Kerdock and Delsarte-Goethals codes can be very simply
             constructed as binary images under a certain natural map,
             called the Gray map, of linear codes over Z4, the integers
             modulo 4. We consider the Gray images of linear codes over
             Z4 obtained from the Kerdock and Delsarte-Goethals codes by
             projection on a hyperplane. For m odd, certain Gray images
             have the same weight distribution as duals of extended
             binary BCH codes of length 2m, but are not equivalent to
             these codes. Inequivalence follows from a general theorem
             identifying binary linear codes that are not Gray images of
             linear codes over Z4. © 1995.},
   Doi = {10.1016/0024-3795(95)00239-N},
   Key = {fds236044}
}

@article{fds236058,
   Author = {Calderbank, AR and Cameron, PJ and Kantor, WM and Seidel,
             JJ},
   Title = {ℤ4-kerdock codes, orthogonal spreads, and
             extremal euclidean line-sets},
   Journal = {Proceedings of the London Mathematical Society},
   Volume = {75},
   Number = {2},
   Pages = {436-480},
   Publisher = {Oxford University Press (OUP)},
   Year = {1997},
   ISSN = {0024-6115},
   url = {http://dx.doi.org/10.1112/S0024611597000403},
   Doi = {10.1112/S0024611597000403},
   Key = {fds236058}
}


%% Books   
@book{fds335331,
   Author = {Boche, H and Calderbank, R and Kutyniok, G and Vybíral,
             J},
   Title = {Preface},
   Pages = {ix-x},
   Year = {2015},
   Month = {January},
   Key = {fds335331}
}

@book{fds335325,
   Author = {Boche, H and Caire, G and Calderbank, R and Kutyniok, G and März, M and Mathar, R},
   Title = {Preface},
   Pages = {ix-xi},
   Year = {2017},
   Month = {January},
   Key = {fds335325}
}


%% Conference articles PUBLISHED   
@article{fds326753,
   Author = {Wang, L and Renna, F and Yuan, X and Rodrigues, M and Calderbank, R and Carin, L},
   Title = {A general framework for reconstruction and classification
             from compressive measurements with side information},
   Journal = {ICASSP, IEEE International Conference on Acoustics, Speech
             and Signal Processing - Proceedings},
   Volume = {2016-May},
   Pages = {4239-4243},
   Publisher = {IEEE},
   Year = {2016},
   Month = {May},
   ISBN = {9781479999880},
   url = {http://dx.doi.org/10.1109/ICASSP.2016.7472476},
   Abstract = {We develop a general framework for compressive
             linear-projection measurements with side information. Side
             information is an additional signal correlated with the
             signal of interest. We investigate the impact of side
             information on classification and signal recovery from
             low-dimensional measurements. Motivated by real
             applications, two special cases of the general model are
             studied. In the first, a joint Gaussian mixture model is
             manifested on the signal and side information. The second
             example again employs a Gaussian mixture model for the
             signal, with side information drawn from a mixture in the
             exponential family. Theoretical results on recovery and
             classification accuracy are derived. The presence of side
             information is shown to yield improved performance, both
             theoretically and experimentally.},
   Doi = {10.1109/ICASSP.2016.7472476},
   Key = {fds326753}
}

@article{fds326888,
   Author = {Beirami, A and Calderbank, R and Christiansen, M and Duffy, K and Makhdoumi, A and Medard, M},
   Title = {A geometric perspective on guesswork},
   Journal = {2015 53rd Annual Allerton Conference on Communication,
             Control, and Computing, Allerton 2015},
   Pages = {941-948},
   Publisher = {IEEE},
   Year = {2016},
   Month = {April},
   ISBN = {9781509018239},
   url = {http://dx.doi.org/10.1109/ALLERTON.2015.7447109},
   Abstract = {Guesswork is the position at which a random string drawn
             from a given probability distribution appears in the list of
             strings ordered from the most likely to the least likely. We
             define the tilt operation on probability distributions and
             show that it parametrizes an exponential family of
             distributions, which we refer to as the tilted family of the
             source. We prove that two sources result in the same
             guesswork, i.e., the same ordering from most likely to least
             likely on all strings, if and only if they belong to the
             same tilted family. We also prove that the strings whose
             guesswork is smaller than a given string are concentrated on
             the tilted family. Applying Laplace's method, we derive
             precise approximations on the distribution of guesswork on
             i.i.d. sources. The simulations show a good match between
             the approximations and the actual guesswork for i.i.d.
             sources.},
   Doi = {10.1109/ALLERTON.2015.7447109},
   Key = {fds326888}
}

@article{fds235785,
   Author = {Calderbank, R and Das, S and Al-Dhahir, N and Diggavi,
             S},
   Title = {A novel full-rate full-diversity STBC with application to
             WiMAX},
   Journal = {IEEE Vehicular Technology Conference},
   Volume = {3},
   Pages = {1791-1795},
   Publisher = {IEEE},
   Year = {2005},
   Month = {January},
   ISBN = {0780391527},
   ISSN = {1090-3038},
   url = {http://gateway.webofknowledge.com/gateway/Gateway.cgi?GWVersion=2&SrcApp=PARTNER_APP&SrcAuth=LinksAMR&KeyUT=WOS:000235046902082&DestLinkType=FullRecord&DestApp=ALL_WOS&UsrCustomerID=47d3190e77e5a3a53558812f597b0b92},
   Abstract = {We 1 present a novel full-rate full-diversity orthogonal
             space-time block code for QPSK modulation and 4 transmit
             antennas based on quaternionic algebra. The code does not
             result in constellation expansion unlike other fullrate
             full-diversity codes in the literature. The quaternionic
             structure of the code is exploited to reduce the complexity
             of maximum likelihood (ML) coherent decoding from a size-
             256 search to a size-16 search. Furthermore, we show how to
             modify this low-complexity coherent ML decoding rule to
             derive a non-coherent differential ML decoding rule.
             Finally, extensive simulation results in a WiMAX 802.16
             broadband wireless access environment demonstrate that the
             proposed code increases the cell coverage area by 1.5 and
             2.6 folds compared to single-antenna transmission at 10 -3
             bit error rate when combined with 1 and 2 receive
             antenna(s), respectively.},
   Doi = {10.1109/VETECF.2005.1558413},
   Key = {fds235785}
}

@article{fds326893,
   Author = {Nokleby, M and Beirami, A and Calderbank, R},
   Title = {A rate-distortion framework for supervised
             learning},
   Journal = {IEEE International Workshop on Machine Learning for Signal
             Processing, MLSP},
   Volume = {2015-November},
   Publisher = {IEEE},
   Year = {2015},
   Month = {November},
   ISBN = {9781467374545},
   url = {http://dx.doi.org/10.1109/MLSP.2015.7324319},
   Abstract = {An information-theoretic framework is presented for bounding
             the number of samples needed for supervised learning in a
             parametric Bayesian setting. This framework is inspired by
             an analogy with rate-distortion theory, which characterizes
             tradeoffs in the lossy compression of random sources. In a
             parametric Bayesian environment, the maximum a posteriori
             classifier can be viewed as a random function of the model
             parameters. Labeled training data can be viewed as a
             finite-rate encoding of that source, and the excess loss due
             to using the learned classifier instead of the MAP
             classifier can be viewed as distortion. A strict bound on
             the loss-measured in terms of the expected total
             variation-is derived, providing a minimum number of training
             samples needed to drive the expected total variation to
             within a specified tolerance. The tightness of this bound is
             demonstrated on the classification of Gaus-sians, for which
             one can derive closed-form expressions for the
             bound.},
   Doi = {10.1109/MLSP.2015.7324319},
   Key = {fds326893}
}

@article{fds326902,
   Author = {Calderbank, R and Howard, S and Jafarpour, S},
   Title = {A Sublinear Algorithm for Sparse Reconstruction with
             2 Recovery Guarantees},
   Journal = {2009 3RD IEEE INTERNATIONAL WORKSHOP ON COMPUTATIONAL
             ADVANCES IN MULTI-SENSOR ADAPTIVE PROCESSING
             (CAMSAP)},
   Pages = {209-212},
   Year = {2009},
   Key = {fds326902}
}

@article{fds335329,
   Author = {Boche, H and Calderbank, R and Kutyniok, G and Vybíral,
             J},
   Title = {A survey of compressed sensing},
   Number = {9783319160412},
   Pages = {1-39},
   Publisher = {Springer International Publishing},
   Year = {2015},
   Month = {January},
   ISBN = {9783319160412},
   url = {http://dx.doi.org/10.1007/978-3-319-16042-9_1},
   Abstract = {Compressed sensing was introduced some ten years ago as an
             effective way of acquiring signals, which possess a sparse
             or nearly sparse representation in a suitable basis or
             dictionary. Due to its solid mathematical backgrounds, it
             quickly attracted the attention of mathematicians from
             several different areas, so that the most important aspects
             of the theory are nowadays very well understood. In recent
             years, its applications started to spread out through
             applied mathematics, signal processing, and electrical
             engineering. The aim of this chapter is to provide an
             introduction into the basic concepts of compressed sensing.
             In the first part of this chapter, we present the basic
             mathematical concepts of compressed sensing, including the
             Null Space Property, Restricted Isometry Property, their
             connection to basis pursuit and sparse recovery, and
             construction of matrices with small restricted isometry
             constants. This presentation is easily accessible, largely
             self-contained, and includes proofs of the most important
             theorems. The second part gives an overview of the most
             important extensions of these ideas, including recovery of
             vectors with sparse representation in frames and
             dictionaries, discussion of (in)coherence and its
             implications for compressed sensing, and presentation of
             other algorithms of sparse recovery.},
   Doi = {10.1007/978-3-319-16042-9_1},
   Key = {fds335329}
}

@article{fds322363,
   Author = {Huang, J and Qiu, Q and Calderbank, R and Rodrigues, M and Sapiro,
             G},
   Title = {Alignment with intra-class structure can improve
             classification},
   Journal = {ICASSP, IEEE International Conference on Acoustics, Speech
             and Signal Processing - Proceedings},
   Volume = {2015-August},
   Pages = {1921-1925},
   Publisher = {IEEE},
   Year = {2015},
   Month = {August},
   ISBN = {9781467369978},
   url = {http://dx.doi.org/10.1109/ICASSP.2015.7178305},
   Abstract = {High dimensional data is modeled using low-rank subspaces,
             and the probability of misclassification is expressed in
             terms of the principal angles between subspaces. The form
             taken by this expression motivates the design of a new
             feature extraction method that enlarges inter-class
             separation, while preserving intra-class structure. The
             method can be tuned to emphasize different features shared
             by members within the same class. Classification performance
             is compared to that of state-of-the-art methods on synthetic
             data and on the real face database. The probability of
             misclassification is decreased when intra-class structure is
             taken into account.},
   Doi = {10.1109/ICASSP.2015.7178305},
   Key = {fds322363}
}

@article{fds343652,
   Author = {Vahid, A and Calderbank, R},
   Title = {ARQ for Interference Packet Networks},
   Journal = {IEEE International Symposium on Information Theory -
             Proceedings},
   Volume = {2018-June},
   Pages = {781-785},
   Year = {2018},
   Month = {August},
   ISBN = {9781538647806},
   url = {http://dx.doi.org/10.1109/ISIT.2018.8437334},
   Abstract = {In multi-user wireless packet networks interference is the
             throughput bottleneck. Users become aware of the
             interference pattern via feedback and use this information
             for contention resolution and for packet retransmission. We
             consider networks with spatially correlated wireless links,
             and we develop an opportunistic automatic repeat request
             function for these networks. We prove the optimality of our
             protocol using an extremal rank-ratio inequality for
             spatially correlated channels.},
   Doi = {10.1109/ISIT.2018.8437334},
   Key = {fds343652}
}

@article{fds335332,
   Author = {Bajwa, WU and Duarte, MF and Calderbank, R},
   Title = {Average case analysis of high-dimensional block-sparse
             recovery and regression for arbitrary designs},
   Journal = {Journal of Machine Learning Research},
   Volume = {33},
   Pages = {57-67},
   Year = {2014},
   Month = {January},
   Abstract = {This paper studies conditions for high-dimensional inference
             when the set of observations is given by a linear
             combination of a small number of groups of columns of a
             design matrix, termed the "block-sparse" case. In this
             regard, it first specifies conditions on the design matrix
             under which most of its block submatrices are well
             conditioned. It then leverages this result for average-case
             analysis of high-dimensional block-sparse recovery and
             regression. In contrast to earlier works: (i) this paper
             provides conditions on arbitrary designs that can be
             explicitly computed in polynomial time, (ii) the provided
             conditions translate into near-optimal scaling of the number
             of observations with the number of active blocks of the
             design matrix, and (iii) the conditions suggest that the
             spectral norm, rather than the column/block coherences, of
             the design matrix fundamentally limits the performance of
             computational methods in high-dimensional
             settings.},
   Key = {fds335332}
}

@article{fds326751,
   Author = {Kumar, S and Calderbank, R and Pfister, HD},
   Title = {Beyond double transitivity: Capacity-achieving cyclic codes
             on erasure channels},
   Journal = {2016 IEEE Information Theory Workshop, ITW
             2016},
   Pages = {241-245},
   Publisher = {IEEE},
   Year = {2016},
   Month = {October},
   ISBN = {9781509010905},
   url = {http://dx.doi.org/10.1109/ITW.2016.7606832},
   Abstract = {Recently, sequences of error-correcting codes with
             doubly-transitive permutation groups were shown to achieve
             capacity on erasure channels under symbol-wise maximum a
             posteriori (MAP) decoding. From this, it follows that
             Reed-Muller and primitive narrow-sense BCH codes achieve
             capacity in the same setting. In this article, we extend
             this result to a large family of cyclic codes by considering
             codes whose permutation groups satisfy a condition weaker
             than double transitivity. The article combines two simple
             technical contributions. First, we show that the transition
             width of a monotone boolean function is O(1/log k), where k
             is the size of the smallest orbit induced by its symmetry
             group. The proof is based on Talagrand's lower bound on
             influences for monotone boolean functions. Second, we
             consider the extrinsic information transfer (EXIT) function
             of an Fq-linear cyclic code whose blocklength N divides qt-1
             and is coprime with q-1. We show that this EXIT function is
             a monotone boolean function whose symmetry group contains no
             orbits of size smaller than the smallest prime divisor of t.
             Combining these, we show that sequences of cyclic codes,
             whose blocklengths satisfy the above conditions, achieve
             capacity on the q-ary erasure channel if all prime divisors
             of t tend to infinity.},
   Doi = {10.1109/ITW.2016.7606832},
   Key = {fds326751}
}

@article{fds331061,
   Author = {Calderbank, AR},
   Title = {Binary covering codes and high speed data
             transmission},
   Journal = {Lecture Notes in Computer Science (including subseries
             Lecture Notes in Artificial Intelligence and Lecture Notes
             in Bioinformatics)},
   Volume = {514 LNCS},
   Pages = {319-336},
   Publisher = {Springer Berlin Heidelberg},
   Year = {1991},
   Month = {January},
   ISBN = {9783540543039},
   url = {http://dx.doi.org/10.1007/3-540-54303-1_143},
   Abstract = {There has been a great deal of recent interest in the
             covering radius of binary codes. We shall describe how good
             covering codes can be used to make high speed data
             transmission more reliable.},
   Doi = {10.1007/3-540-54303-1_143},
   Key = {fds331061}
}

@article{fds326882,
   Author = {Reboredo, H and Renna, F and Calderbank, R and Rodrigues,
             MRD},
   Title = {Bounds on the Number of Measurements for Reliable
             Compressive Classification},
   Journal = {IEEE Transactions on Signal Processing},
   Volume = {64},
   Number = {22},
   Pages = {5778-5793},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2016},
   Month = {November},
   url = {http://dx.doi.org/10.1109/TSP.2016.2599496},
   Abstract = {This paper studies the classification of high-dimensional
             Gaussian signals from low-dimensional noisy, linear
             measurements. In particular, it provides upper bounds
             (sufficient conditions) on the number of measurements
             required to drive the probability of misclassification to
             zero in the low-noise regime, both for random measurements
             and designed ones. Such bounds reveal two important
             operational regimes that are a function of the
             characteristics of the source: 1) when the number of classes
             is less than or equal to the dimension of the space spanned
             by signals in each class, reliable classification is
             possible in the low-noise regime by using a one-vs-all
             measurement design; 2) when the dimension of the spaces
             spanned by signals in each class is lower than the number of
             classes, reliable classification is guaranteed in the
             low-noise regime by using a simple random measurement
             design. Simulation results both with synthetic and real data
             show that our analysis is sharp, in the sense that it is
             able to gauge the number of measurements required to drive
             the misclassification probability to zero in the low-noise
             regime.},
   Doi = {10.1109/TSP.2016.2599496},
   Key = {fds326882}
}

@article{fds326891,
   Author = {Goparaju, S and Rouayheb, SE and Calderbank, R},
   Title = {Can linear minimum storage regenerating codes be universally
             secure?},
   Journal = {Conference Record - Asilomar Conference on Signals, Systems
             and Computers},
   Volume = {2016-February},
   Pages = {549-553},
   Publisher = {IEEE},
   Year = {2016},
   Month = {February},
   ISBN = {9781467385763},
   url = {http://dx.doi.org/10.1109/ACSSC.2015.7421189},
   Abstract = {We study the problem of making a distributed storage system
             information-theoretically secure against a passive
             eavesdropper, and aim to characterize coding schemes that
             are universally secure for up to a given number of
             eavesdropped nodes. Specifically, we consider minimum
             storage regenerating (MSR) codes and ask the following
             question: For an MSR code where a failed node is repaired
             using all the remaining nodes, is it possible to
             simultaneously be optimally secure using a single linear
             coding scheme? We define a pareto-optimality associated with
             this simultaneity and show that there exists at least one
             linear coding scheme that is pareto-optimal.},
   Doi = {10.1109/ACSSC.2015.7421189},
   Key = {fds326891}
}

@article{fds326901,
   Author = {Zoltowski, MD and Qureshi, TR and Calderbank, R},
   Title = {Channel Estimation for MIMO-OFDM using Complementary
             Codes},
   Journal = {RWS: 2009 IEEE RADIO AND WIRELESS SYMPOSIUM},
   Pages = {151-+},
   Year = {2009},
   ISBN = {978-1-4244-2698-0},
   Key = {fds326901}
}

@article{fds326758,
   Author = {Renna, F and Wang, L and Yuan, X and Yang, J and Reeves, G and Calderbank,
             R and Carin, L and Rodrigues, MRD},
   Title = {Classification and reconstruction of compressed GMM signals
             with side information},
   Journal = {IEEE International Symposium on Information Theory -
             Proceedings},
   Volume = {2015-June},
   Pages = {994-998},
   Year = {2015},
   Month = {September},
   ISBN = {9781467377041},
   url = {http://dx.doi.org/10.1109/ISIT.2015.7282604},
   Abstract = {This paper offers a characterization of performance limits
             for classification and reconstruction of high-dimensional
             signals from noisy compressive measurements, in the presence
             of side information. We assume the signal of interest and
             the side information signal are drawn from a correlated
             mixture of distributions/components, where each component
             associated with a specific class label follows a Gaussian
             mixture model (GMM). We provide sharp sufficient and/or
             necessary conditions for the phase transition of the
             misclassification probability and the reconstruction error
             in the low-noise regime. These conditions, which are
             reminiscent of the well-known Slepian-Wolf and Wyner-Ziv
             conditions, are a function of the number of measurements
             taken from the signal of interest, the number of
             measurements taken from the side information signal, and the
             geometry of these signals and their interplay.},
   Doi = {10.1109/ISIT.2015.7282604},
   Key = {fds326758}
}

@article{fds326750,
   Author = {Renna, F and Wang, L and Yuan, X and Yang, J and Reeves, G and Calderbank,
             R and Carin, L and Rodrigues, MRD},
   Title = {Classification and Reconstruction of High-Dimensional
             Signals from Low-Dimensional Features in the Presence of
             Side Information},
   Journal = {IEEE Transactions on Information Theory},
   Volume = {62},
   Number = {11},
   Pages = {6459-6492},
   Publisher = {Institute of Electrical and Electronics Engineers
             (IEEE)},
   Year = {2016},
   Month = {November},
   url = {http://dx.doi.org/10.1109/TIT.2016.2606646},
   Abstract = {This paper offers a characterization of fundamental limits
             on the classification and reconstruction of high-dimensional
             signals from low-dimensional features, in the presence of
             side information. We consider a scenario where a decoder has
             access both to linear features of the signal of interest and
             to linear features of the side information signal; while the
             side information may be in a compressed form, the objective
             is recovery or classification of the primary signal, not the
             side information. The signal of interest and the side
             information are each assumed to have (distinct) latent
             discrete labels; conditioned on these two labels, the signal
             of interest and side information are drawn from a
             multivariate Gaussian distribution that correlates the two.
             With joint probabilities on the latent labels, the overall
             signal-(side information) representation is defined by a
             Gaussian mixture model. By considering bounds to the
             misclassification probability associated with the recovery
             of the underlying signal label, and bounds to the
             reconstruction error associated with the recovery of the
             signal of interest itself, we then provide sharp sufficient
             and/or necessary conditions for these quantities to approach
             zero when the covariance matrices of the Gaussians are
             nearly low rank. These conditions, which are reminiscent of
             the well-known Slepian-Wolf and Wyner-Ziv conditions, are
             the function of the number of linear features extracted from
             signal of interest, the number of linear features extracted
             from the side information signal, and the geometry of these
             signals and their interplay. Moreover, on assuming that the
             signal of interest and the side information obey such an
             approximately low-rank model, we derive the expansions of
             the reconstruction error as a function of the deviation from
             an exactly low-rank model; such expansions also allow the
             identification of operational regimes, where the impact of
             side information on signal reconstruction is most relevant.
             Our framework, which offers a principled mechanism to
             integrate side information in high-dimensional data
             problems, is also tested in the context of imaging
             applications. In particular, we report state-of-theart
             results in compressive hyperspectral imaging applications,
             where the accompanying side information is a conventional
             digital photograph.},
   Doi = {10.1109/TIT.2016.2606646},
   Key = {fds326750}
}

@article{fds322362,
   Author = {Xian, Y and Thompson, A and Qiu, Q and Nolte, L and Nowacek, D and Lu, J and Calderbank, R},
   Title = {Classification of whale vocalizations using the Weyl
             transform},
   Volume = {2015-August},
   Pages = {773-777},
   Publisher = {IEEE},
   Year = {2015},
   Month = {January},
   ISBN = {9781467369978},
   url = {http://dx.doi.org/10.1109/ICASSP.2015.7178074},
   Abstract = {In this paper, we apply the Weyl transform to represent the
             vocalization of marine mammals. In contrast to other popular
             representation methods, such as the MFCC and the Chirplet
             transform, the Weyl transform captures the global
             information of signals. This is especially useful when the
             signal has low order polynomial phase. We can reconstruct
             the signal from the coefficients obtained from the Weyl
             transform, and perform classification based on these
             coefficients. Experimental results show that classification
             using features extracted from the Weyl transform outperforms
             the MFCC and the Chirplet transform on our collected whales
             data.},
   Doi = {10.1109/ICASSP.2015.7178074},
   Key = {fds322362}
}

@article{fds343649,
   Author = {Ahn, HK and Qiu, Q and Bosch, E and Thompson, A and Robles, FE and Sapiro,
             G and Warren, WS and Calderbank, R},
   Title = {Classifying Pump-Probe Images of Melanocytic Lesions Using
             the WEYL Transform},
   Journal = {ICASSP, IEEE International Conference on Acoustics, Speech
             and Signal Processing - Proceedings},
   Volume = {2018-April},
   Pages = {4209-4213},
   Year = {2018},
   Month = {September},
   ISBN = {9781538646588},
   url = {http://dx.doi.org/10.1109/ICASSP.2018.8461298},
   Abstract = {Diagnosis of melanoma is fraught with uncertainty, and
             discordance rates among physicians remain high because of
             the lack of a definitive criterion. Motivated by this
             challenge, this paper first introduces the Patch Weyl
             transform (PWT), a 2-dimensional variant of the Weyl
             transform. It then presents a method for classifying
             pump-probe images of melanocytic lesions based on the PWT
             coefficients. Performance of the PWT coefficients is shown
             to be superior to classification based on baseline
             intensity, on standard descriptors such as the Histogram of
             Oriented Gradients (HOG) and Local Binary Patterns (LBP),
             and on coefficients derived from PCA and Fourier
             representations of the data.},
   Doi = {10.1109/ICASSP.2018.8461298},
   Key = {fds343649}
}

@article{fds322366,
   Author = {Huang, J and Yuan, X and Calderbank, R},
   Title = {Collaborative compressive X-ray image reconstruction},
   Journal = {ICASSP, IEEE International Conference on Acoustics, Speech
             and Signal Processing - Proceedings},
   Volume = {2015-August},
   Pages = {3282-3286},
   Publisher = {IEEE},
   Year = {2015},
   Month = {August},
   ISBN = {9781467369978},
   url = {http://dx.doi.org/10.1109/ICASSP.2015.7178578},
   Abstract = {The Poisson Factor Analysis (PFA) is applied to recover
             signals from a Poisson compressive sensing system. Motivated
             by the recently developed compressive X-ray imaging system,
             Coded Aperture Coherent Scatter Spectral Imaging (CACSSI)
             [1], we propose a new Bayesian reconstruction algorithm. The
             proposed Poisson-Gamma (PG) approach uses multiple
             measurements to refine our knowledge on both sensing matrix
             and background noise to overcome the uncertainties and
             inaccuracy of the hardware system. Therefore, a
             collaborative compressive X-ray image reconstruction
             algorithm is proposed under a Bayesian framework.
             Experimental results on real data show competitive
             performance in comparison with point estimation based
             methods.},
   Doi = {10.1109/ICASSP.2015.7178578},
   Key = {fds322366}
}

@article{fds343651,
   Author = {Thompson, A and Calderbank, R},
   Title = {Compressed Neighbour Discovery using Sparse Kerdock
             Matrices},
   Journal = {IEEE International Symposium on Information Theory -
             Proceedings},
   Volume = {2018-June},
   Pages = {2286-2290},
   Year = {2018},
   Month = {August},
   ISBN = {9781538647806},
   url = {http://dx.doi.org/10.1109/ISIT.2018.8437324},
   Abstract = {We study the network-wide neighbour discovery problem in
             wireless networks in which each node in a network must
             discovery the network interface addresses (NIAs) of its
             neighbour. We work within the rapid on-off division duplex
             framework proposed by Guo and Zhang in [5] in which all
             nodes are assigned different on-off signatures which allow
             them listen to the transmissions of neighbouring nodes
             during their off slots; this leads to a compressed sensing
             problem at each node with a collapsed codebook determined by
             a given node's transmission signature. We propose sparse
             Kerdock matrices as codebooks for the neighbour discovery
             problem. These matrices share the same row space as certain
             Delsarte-Goethals frames based upon Reed Muller codes,
             whilst at the same time being extremely sparse. We present
             numerical experiments using two different compressed sensing
             recovery algorithms, One Step Thresholding (OST) and
             Normalised Iterative Hard Thresholding (NIHT). For both
             algorithms, a higher proportion of neighbours are
             successfully identified using sparse Kerdock matrices
             compared to codebooks based on Reed Muller codes with random
             erasures as proposed in [13]. We argue that the improvement
             is due to the better interference cancellation properties of
             sparse Kerdock matrices when collapsed according to a given
             node's transmission signature. We show by explicit
             calculation that the coherence of the collapsed codebooks
             resulting from sparse Kerdock matrices remains
             near-optimal.},
   Doi = {10.1109/ISIT.2018.8437324},
   Key = {fds343651}
}

@article{fds335330,
   Author = {Rodrigues, M and Nokleby, M and Renna, F and Calderbank,
             R},
   Title = {Compressive classification: Where wireless communications
             meets machine learning},
   Number = {9783319160412},
   Pages = {451-468},
   Publisher = {Springer International Publishing},
   Year = {2015},
   Month = {January},
   ISBN = {9783319160412},
   url = {http://dx.doi.org/10.1007/978-3-319-16042-9_15},
   Abstract = {This chapter introduces Shannon-inspired performance limits
             associated with the classification of low-dimensional
             subspaces embedded in a high-dimensional ambient space from
             compressive and noisy measurements. In particular, it
             introduces the diversity-discrimination tradeoff that
             describes the interplay between the number of classes that
             can be separated by a compressive classifier—measured via
             the discrimination gain—and the performance of such a
             classifier—measured via the diversity gain—and the
             relation of such an interplay to the underlying problem
             geometry, including the ambient space dimension, the
             subspaces dimension, and the number of compressive
             measurements. Such a fundamental limit on performance is
             derived from a syntactic equivalence between the compressive
             classification problem and certain wireless communications
             problems. This equivalence provides an opportunity to
             cross-pollinate ideas between the wireless information
             theory domain and the compressive classification domain.
             This chapter also demonstrates how theory aligns with
             practice in a concrete application: face recognition from a
             set of noisy compressive measurements.},
   Doi = {10.1007/978-3-319-16042-9_15},
   Key = {fds335330}
}

@article{fds326892,
   Author = {Thompson, A and Calderbank, R},
   Title = {Compressive imaging using fast transform
             coding},
   Journal = {Proceedings of SPIE - The International Society for Optical
             Engineering},
   Volume = {9992},
   Publisher = {SPIE},
   Year = {2016},
   Month = {January},
   ISBN = {9781510603882},
   url = {http://dx.doi.org/10.1117/12.2239999},
   Abstract = {We propose deterministic sampling strategies for compressive
             imaging based on Delsarte-Goethals frames. We show that
             these sampling strategies result in multi-scale measurements
             which can be related to the 2D Haar wavelet transform. We
             demonstrate the effectiveness of our proposed strategies
             through numerical experiments.},
   Doi = {10.1117/12.2239999},
   Key = {fds326892}
}

@article{fds326897,
   Author = {Tamo, I and Barg, A and Goparaju, S and Calderbank,
             R},
   Title = {Cyclic LRC codes and their subfield subcodes},
   Journal = {IEEE International Symposium on Information Theory -
             Proceedings},
   Volume = {2015-June},
   Pages = {1262-1266},
   Publisher = {IEEE},
   Year = {2015},
   Month = {September},
   ISBN = {9781467377041},
   url = {http://dx.doi.org/10.1109/ISIT.2015.7282658},
   Abstract = {We consider linear cyclic codes with the locality property,
             or locally recoverable codes (LRC codes). A family of LRC
             codes that generalizes the classical construction of
             Reed-Solomon codes was constructed in a recent paper by I.
             Tamo and A. Barg (IEEE Trans. IT, no. 8, 2014). In this
             paper we focus on the optimal cyclic codes that arise from
             the general construction. We give a characterization of
             these codes in terms of their zeros, and observe that there
             are many equivalent ways of constructing optimal cyclic LRC
             codes over a given field. We also study subfield subcodes of
             cyclic LRC codes (BCH-like LRC codes) and establish several
             results about their locality and minimum
             distance.},
   Doi = {10.1109/ISIT.2015.7282658},
   Key = {fds326897}
}

@article{fds335328,
   Author = {Huang, J and Qiu, Q and Sapiro, G and Calderbank,
             R},
   Title = {Discriminative robust transformation learning},
   Journal = {Advances in Neural Information Processing
             Systems},
   Volume = {2015-January},
   Pages = {1333-1341},
   Year = {2015},
   Month = {January},
   Abstract = {This paper proposes a framework for learning features that
             are robust to data variation, which is particularly
             important when only a limited number of training samples are
             available. The framework makes it possible to tradeoff the
             discriminative value of learned features against the
             generalization error of the learning algorithm. Robustness
             is achieved by encouraging the transform that maps data to
             features to be a local isometry. This geometric property is
             shown to improve (K, ∈)-robustness, thereby providing
             theoretical justification for reductions in generalization
             error observed in experiments. The proposed optimization
             framework is used to train standard learning algorithms such
             as deep neural networks. Experimental results obtained on
             benchmark datasets, such as labeled faces in the wild,
             demonstrate the value of being able to balance
             discrimination and robustness.},
   Key = {fds335328}
}

@article{fds326898,
   Author = {Michelusi, N and Nokleby, M and Mitra, U and Calderbank,
             R},
   Title = {Dynamic spectrum estimation with minimal overhead via
             multiscale information exchange},
   Journal = {Proceedings - IEEE Global Communications Conference,
             GLOBECOM},
   Year = {2015},
   Month = {January},
   ISBN = {9781479959525},
   url = {http://dx.doi.org/10.1109/GLOCOM.2014.7417532},
   Abstract = {In this paper, a multiscale approach to spectrum sensing in
             cognitive cellular networks is analyzed. Observing that
             wireless interference decays with distance, and that
             estimating the entire spectrum occupancy across the network
             entails substantial energy cost and communication overhead,
             a protocol for distributed spectrum estimation is defined by
             which secondary users maintain fine-grained estimates of the
             spectrum occupancy of nearby cells, but coarse-grained
             estimates of that of distant cells. This is accomplished by
             arranging the cellular network into a hierarchy of
             increasingly coarser macro-cells and having secondary users
             fuse local spectrum estimates up the hierarchy. The spectrum
             occupancy is modeled as a Markov process, and the system is
             optimized by defining a probabilistic framework for spectrum
             sensing and information exchange that balances improvements
             in spectrum estimation against energy costs. The performance
             of the multiscale scheme is evaluated numerically, showing
             that it offers substantial improvements in energy efficiency
             over local estimation. On the other hand, it is shown that
             schemes that attempt to estimate the state of the whole
             network perform poorly, due to the excessive cost of
             performing information exchange with far away cells, and to
             the fact that, knowing the spectrum occupancy of distant
             cells, which experience low interference levels, results in
             a small increase in reward.},
   Doi = {10.1109/GLOCOM.2014.7417532},
   Key = {fds326898}
}

@article{fds235744,
   Author = {Harms, A and Bajwa, W and Calderbank, R},
   Title = {Efficient linear time-varying system identification using
             chirp waveforms},
   Journal = {Conference Record - Asilomar Conference on Signals, Systems
             and Computers},
   Volume = {2015-April},
   Pages = {854-858},
   Publisher = {IEEE},
   Year = {2015},
   Month = {April},
   ISBN = {9781479982974},
   ISSN = {1058-6393},
   url = {http://dx.doi.org/10.1109/ACSSC.2014.7094572},
   Abstract = {Linear, time-varying (LTV) systems are operators composed of
             time shifts, frequency shifts, and complex amplitude
             scalings that act on continuous finite-energy waveforms.
             This paper builds upon a novel, resource-efficient method
             previously proposed by the authors for identifying the
             parametric description of such systems from the sampled
             response to linear frequency modulated (LFM) waveforms. If
             the LTV operator is probed with a sufficiently diverse set
             of LFM pulses, more LFM pulses than reflectors, then the
             system can be identified with high accuracy. The accuracy is
             shown to be proportional to the uncertainty in the estimated
             frequencies and confirmed with numerical
             experiments.},
   Doi = {10.1109/ACSSC.2014.7094572},
   Key = {fds235744}
}

@article{fds326755,
   Author = {Huang, J and Qiu, Q and Calderbank, R and Sapiro,
             G},
   Title = {Geometry-aware deep transform},
   Journal = {Proceedings of the IEEE International Conference on Computer
             Vision},
   Volume = {2015 International Conference on Compute},
   Pages = {4139-4147},
   Publisher = {IEEE},
   Year = {2015},
   Month = {February},
   ISBN = {9781467383912},
   url = {http://dx.doi.org/10.1109/ICCV.2015.471},
   Abstract = {Many recent efforts have been devoted to designing
             sophisticated deep learning structures, obtaining
             revolutionary results on benchmark datasets. The success of
             these deep learning methods mostly relies on an enormous
             volume of labeled training samples to learn a huge number of
             parameters in a network, therefore, understanding the
             generalization ability of a learned deep network cannot be
             overlooked, especially when restricted to a small training
             set, which is the case for many applications. In this paper,
             we propose a novel deep learning objective formulation that
             unifies both the classification and metric learning
             criteria. We then introduce a geometry-aware deep transform
             to enable a non-linear discriminative and robust feature
             transform, which shows competitive performance on small
             training sets for both synthetic and real-world data. We
             further support the proposed framework with a formal
             (K)-robustness analysis.},
   Doi = {10.1109/ICCV.2015.471},
   Key = {fds326755}
}

@article{fds326903,
   Author = {Chi, Y and Calderbank, R and Pezeshki, A},
   Title = {Golay Complementary Waveforms for Sparse Delay-Doppler Radar
             Imaging},
   Journal = {2009 3RD IEEE INTERNATIONAL WORKSHOP ON COMPUTATIONAL
             ADVANCES IN MULTI-SENSOR ADAPTIVE PROCESSING
             (CAMSAP)},
   Pages = {177-180},
   Year = {2009},
   Key = {fds326903}
}

@article{fds350794,
   Author = {Simpson, MJ and Wilson, JW and Matthews, TE and Duarte, M and Calderbank, R and Warren, WS},
   Title = {Imaging the distribution of melanin in human skin lesions
             with pump-probe microscopy},
   Journal = {Optics InfoBase Conference Papers},
   Year = {2011},
   Month = {January},
   ISBN = {9781557529176},
   url = {http://dx.doi.org/10.1364/ls.2011.lmc2},
   Doi = {10.1364/ls.2011.lmc2},
   Key = {fds350794}
}

@article{fds326894,
   Author = {Vahid, A and Calderbank, R},
   Title = {Impact of local delayed CSIT on the capacity region of the
             two-user interference channel},
   Journal = {IEEE International Symposium on Information Theory -
             Proceedings},
   Volume = {2015-June},
   Pages = {2421-2425},
   Publisher = {IEEE},
   Year = {2015},
   Month = {September},
   ISBN = {9781467377041},
   url = {http://dx.doi.org/10.1109/ISIT.2015.7282890},
   Abstract = {The coherence time of a wireless channel is often smaller
             than the delay with which channel state information is
             available at transmitters. In this paper, we aim to find the
             most important subset of the channel state information that
             transmitters need to learn with delay. We characterize the
             capacity region of the two-user interference channel with
             local delayed channel state information at transmitters. We
             propose a transmission strategy that only relies on the
             delayed knowledge of the outgoing links at each transmitter
             and achieves the outer-bound for the scenario in which
             transmitters learn the entire channel state with delay. We
             also show that the delayed knowledge of the outgoing links
             is the minimum delayed knowledge that is required to
             outperform the no knowledge assumption.},
   Doi = {10.1109/ISIT.2015.7282890},
   Key = {fds326894}
}

@article{fds326889,
   Author = {Vahid, A and Shomorony, I and Calderbank, R},
   Title = {Informational bottlenecks in two-unicast wireless networks
             with delayed CSIT},
   Journal = {2015 53rd Annual Allerton Conference on Communication,
             Control, and Computing, Allerton 2015},
   Pages = {1256-1263},
   Publisher = {IEEE},
   Year = {2016},
   Month = {April},
   ISBN = {9781509018239},
   url = {http://dx.doi.org/10.1109/ALLERTON.2015.7447152},
   Abstract = {We study the impact of delayed channel state information at
             the transmitters (CSIT) in two-unicast wireless networks
             with a layered topology and arbitrary connectivity. We
             introduce a technique to obtain outer bounds to the
             degrees-of-freedom (DoF) region through the new
             graph-theoretic notion of bottleneck nodes. Such nodes act
             as informational bottlenecks only under the assumption of
             delayed CSIT, and imply asymmetric DoF bounds of the form
             mD1 + D2 ≤ m. Combining this outer-bound technique with
             new achievability schemes, we characterize the sum DoF of a
             class of two-unicast wireless networks, which shows that,
             unlike in the case of instantaneous CSIT, the DoF of
             two-unicast networks with delayed CSIT can take an infinite
             set of values.},
   Doi = {10.1109/ALLERTON.2015.7447152},
   Key = {fds326889}
}

@article{fds335322,
   Author = {Mappouras, G and Vahid, A and Calderbank, R and Hower, DR and Sorin,
             DJ},
   Title = {Jenga: Efficient fault tolerance for stacked
             DRAM},
   Journal = {Proceedings - 35th IEEE International Conference on Computer
             Design, ICCD 2017},
   Pages = {361-368},
   Publisher = {IEEE},
   Year = {2017},
   Month = {November},
   ISBN = {9781538622544},
   url = {http://dx.doi.org/10.1109/ICCD.2017.62},
   Abstract = {In this paper, we introduce Jenga, a new scheme for
             protecting 3D DRAM, specifically high bandwidth memory
             (HBM), from failures in bits, rows, banks, channels, dies,
             and TSVs. By providing redundancy at the granularity of a
             cache block rather than across blocks, as in the current
             state of the art Jenga achieves greater error-free
             performance and lower error recovery latency. We show that
             Jenga's runtime is on average only 1.03 the runtime of our
             Baseline across a range of benchmarks. Additionally, for
             memory intensive benchmarks, Jenga is on average 1.11 faster
             than prior work.},
   Doi = {10.1109/ICCD.2017.62},
   Key = {fds335322}
}

@article{fds322368,
   Author = {Souvik, D and Thangaraj, A and McLaughlin, SW and Calderbank,
             AR},
   Title = {Linear-time decodable secrecy codes for binary erasure
             wiretap channels},
   Journal = {43rd Annual Allerton Conference on Communication, Control
             and Computing 2005},
   Volume = {3},
   Pages = {1548-1556},
   Year = {2005},
   Month = {January},
   ISBN = {9781604234916},
   Abstract = {In this paper we consider encoder and decoder design for
             codes achieving perfect secrecy on the wiretap channel. We
             consider the special case of a noiseless main channel and
             binary erasure channel (BEC) as the wiretapper's channel and
             show that it is possible to construct linear-time decodable
             secrecy codes based on LDPC codes that achieve perfect
             secrecy.},
   Key = {fds322368}
}

@article{fds326752,
   Author = {Mappouras, G and Vahid, A and Calderbank, R and Sorin,
             DJ},
   Title = {Methuselah flash: Rewriting codes for extra long storage
             lifetime},
   Journal = {Proceedings - 46th Annual IEEE/IFIP International Conference
             on Dependable Systems and Networks, DSN 2016},
   Pages = {180-191},
   Publisher = {IEEE},
   Year = {2016},
   Month = {September},
   ISBN = {9781467388917},
   url = {http://dx.doi.org/10.1109/DSN.2016.25},
   Abstract = {Motivated by embedded systems and datacenters that require
             long-life components, we extend the lifetime of Flash memory
             using rewriting codes that allow for multiple writes to a
             page before it needs to be erased. Although researchers have
             previously explored rewriting codes for this purpose, we
             make two significant contributions beyond prior work. First,
             we remove the assumption of idealized - and unrealistically
             optimistic - Flash cells used in prior work on endurance
             codes. Unfortunately, current Flash technology has a
             non-ideal interface, due to its underlying physical design,
             and does not, for example, allow all seemingly possible
             increases in a cell's level. We show how to provide the
             ideal multi-level cell interface, by developing a virtual
             Flash cell, and we evaluate its impact on existing endurance
             codes. Our second contribution is our development of novel
             endurance codes, called Methuselah Flash Codes (MFC), that
             provide better cost/lifetime trade-offs than previously
             studied codes.},
   Doi = {10.1109/DSN.2016.25},
   Key = {fds326752}
}

@article{fds326895,
   Author = {Sokolić, J and Renna, F and Calderbank, R and Rodrigues,
             MRD},
   Title = {Mismatch in the classification of linear subspaces: Upper
             bound to the probability of error},
   Journal = {IEEE International Symposium on Information Theory -
             Proceedings},
   Volume = {2015-June},
   Pages = {2201-2205},
   Publisher = {IEEE},
   Year = {2015},
   Month = {September},
   ISBN = {9781467377041},
   url = {http://dx.doi.org/10.1109/ISIT.2015.7282846},
   Abstract = {This paper studies the performance associated with the
             classification of linear subspaces corrupted by noise with a
             mismatched classifier. In particular, we consider a problem
             where the classifier observes a noisy signal, the signal
             distribution conditioned on the signal class is zero-mean
             Gaussian with low-rank covariance matrix, and the classifier
             knows only the mismatched parameters in lieu of the true
             parameters. We derive an upper bound to the
             misclassification probability of the mismatched classifier
             and characterize its behaviour. Specifically, our
             characterization leads to sharp sufficient conditions that
             describe the absence of an error floor in the low-noise
             regime, and that can be expressed in terms of the principal
             angles and the overlap between the true and the mismatched
             signal subspaces.},
   Doi = {10.1109/ISIT.2015.7282846},
   Key = {fds326895}
}

@article{fds322364,
   Author = {Huang, J and Yuan, X and Calderbank, R},
   Title = {Multi-scale Bayesian reconstruction of compressive X-ray
             image},
   Journal = {ICASSP, IEEE International Conference on Acoustics, Speech
             and Signal Processing - Proceedings},
   Volume = {2015-August},
   Pages = {1618-1622},
   Publisher = {IEEE},
   Year = {2015},
   Month = {August},
   ISBN = {9781467369978},
   url = {http://dx.doi.org/10.1109/ICASSP.2015.7178244},
   Abstract = {A novel multi-scale dictionary based Bayesian reconstruction
             algorithm is proposed for compressive X-ray imaging, which
             encodes the material's spectrum by Poisson measurements.
             Inspired by recently developed compressive X-ray imaging
             systems [1], this work aims to recover the material's
             spectrum from the compressive coded image by leveraging a
             reference spectrum library. Instead of directly using the
             huge and redundant library as a dictionary, which is
             cumbersome in computation and difficult for selecting those
             active dictionary atoms, a multi-scale tree structured
             dictionary is refined from the spectrum library, and
             following this a Bayesian reconstruction algorithm is
             developed. Experimental results on real data demonstrate
             superior performance in comparison with traditional
             methods.},
   Doi = {10.1109/ICASSP.2015.7178244},
   Key = {fds322364}
}

@article{fds343653,
   Author = {Michelusi, N and Nokleby, M and Mitra, U and Calderbank,
             R},
   Title = {Multi-scale spectrum sensing in millimeter wave cognitive
             networks},
   Journal = {Conference Record of 51st Asilomar Conference on Signals,
             Systems and Computers, ACSSC 2017},
   Volume = {2017-October},
   Pages = {1640-1644},
   Year = {2017},
   Month = {July},
   ISBN = {9781538618233},
   url = {http://dx.doi.org/10.1109/ACSSC.2017.8335637},
   Abstract = {In this paper, a multi-scale approach to spectrum sensing
             and information exchange in millimeter wave cognitive
             cellular networks is proposed. In order to overcome the huge
             energy cost of acquiring full network state information on
             the occupancy of each cell over the network, secondary users
             acquire local state estimates, which are aggregated up the
             hierarchy to produce multi-scale estimates of spectrum
             occupancy. The proposed design accounts for local estimation
             errors and the irregular interference patterns arising due
             to sensitivity to blockages, high attenuation, and high
             directionality at millimeter wave. A greedy algorithm based
             on agglomerative clustering is proposed to design an
             interference-based tree (IBT), matched to the interference
             pattern of the network. The proposed aggregation algorithm
             over IBT is shown to be much more cost efficient than
             acquiring full network state information from the
             neighboring cells, requiring as little as 1/5th of the
             energy cost.},
   Doi = {10.1109/ACSSC.2017.8335637},
   Key = {fds343653}
}

@article{fds335324,
   Author = {Michelusi, N and Nokleby, M and Mitra, U and Calderbank,
             R},
   Title = {Multi-scale spectrum sensing in small-cell mm-wave cognitive
             wireless networks},
   Journal = {IEEE International Conference on Communications},
   Publisher = {IEEE},
   Year = {2017},
   Month = {July},
   ISBN = {9781467389990},
   url = {http://dx.doi.org/10.1109/ICC.2017.7996657},
   Abstract = {In this paper, a multi-scale approach to spectrum sensing in
             cognitive cellular networks is proposed. In order to
             overcome the huge cost incurred in the acquisition of full
             network state information, a hierarchical scheme is
             proposed, based on which local state estimates are
             aggregated up the hierarchy to obtain aggregate state
             information at multiple scales, which are then sent back to
             each cell for local decision making. Thus, each cell obtains
             fine-grained estimates of the channel occupancies of nearby
             cells, but coarse-grained estimates of those of distant
             cells. The performance of the aggregation scheme is studied
             in terms of the trade-off between the throughput achievable
             by secondary users and the interference generated by the
             activity of these secondary users to primary users. In order
             to account for the irregular structure of interference
             patterns arising from path loss, shadowing, and blockages,
             which are especially relevant in millimeter wave networks, a
             greedy algorithm is proposed to find a multi-scale
             aggregation tree to optimize the performance. It is shown
             numerically that this tailored hierarchy outperforms a
             regular tree construction by 60%.},
   Doi = {10.1109/ICC.2017.7996657},
   Key = {fds335324}
}

@article{fds235750,
   Author = {Wang, L and Razi, A and Dias Rodrigues and M and Calderbank, R and Carin,
             L},
   Title = {Nonlinear information-theoretic compressive measurement
             design},
   Journal = {31st International Conference on Machine Learning, ICML
             2014},
   Volume = {4},
   Pages = {2896-2907},
   Year = {2014},
   Month = {January},
   ISBN = {9781634393973},
   Abstract = {We investigate design of general nonlinear functions for
             mapping high-dimensional data into a lower-dimensional
             (compressive) space. The nonlinear measurements are assumed
             contaminated by additive Gaussian noise. Depending on the
             application, we are either interested in recovering the
             high-dimensional data from the nonlinear compressive
             measurements, or performing classification directly based on
             these measurements. The latter case corresponds to
             classification based on nonlinearly constituted and noisy
             features. The nonlinear measurement functions are designed
             based on constrained mutual- information optimization. New
             analytic results are developed for the gradient of mutual
             information in this setting, for arbitrary input-signal
             statistics. We make connections to kernel-based methods,
             such as the support vector machine. Encouraging results are
             presented on multiple datasets, for both signal recovery and
             classification. The nonlinear approach is shown to be
             particularly valuable in high-noise scenarios.},
   Key = {fds235750}
}

@article{fds322370,
   Author = {Diggavi, S and Dusad, S and Calderbank, AR and Al-Dhahir,
             N},
   Title = {On embedded diversity codes},
   Journal = {43rd Annual Allerton Conference on Communication, Control
             and Computing 2005},
   Volume = {2},
   Pages = {1046-1055},
   Year = {2005},
   Month = {January},
   ISBN = {9781604234916},
   Abstract = {Diversity-embedded codes for fading channels are high-rate
             codes that are de-signed so that they have a high-diversity
             code embedded within them [3, 4]. This is equivalent to
             coding the data into two streams such that the high-priority
             stream has higher reliability (designed in terms of
             diversity order) than the low-priority stream. These codes
             also allow a form of opportunistic communication where the
             high-rate code opportunistically takes advantage of good
             channel realizations whereas the embedded high-diversity
             code ensures that at least part of the infor-mation is
             received reliably. In this paper we give the design for a
             class of such codes for a fixed transmit alphabet
             constraint. These constructions are a natural generalization
             of multilevel codes to diversity embedding. These codes give
             prov-able performance guarantees by using binary matrices
             with rank guarantees as the building blocks to generate the
             desired diversity embedding in the complex do-main. We also
             investigate the systems implications of these codes by
             examining its applications to unequal error protection, rate
             opportunism and packet delay optimization. Preliminary
             results in these applications demonstrate that
             diversity-embedded codes can outperform traditional
             single-layer codes in moderate SNR regimes.},
   Key = {fds322370}
}

@article{fds333193,
   Author = {Hammons, AR and Kumar, PV and Calderbank, AR and Sloane, NJA and Solé,
             P},
   Title = {On the apparent duality of the kerdock and preparata
             codes},
   Journal = {Lecture Notes in Computer Science (including subseries
             Lecture Notes in Artificial Intelligence and Lecture Notes
             in Bioinformatics)},
   Volume = {673 LNCS},
   Pages = {13-24},
   Year = {1993},
   Month = {January},
   ISBN = {9783540566861},
   url = {http://dx.doi.org/10.1007/3-540-56686-4_30},
   Abstract = {The Kerdock and extended Preparata codes are something of an
             enigma in coding theory since they are both Hamming-distance
             invariant and have weight enumerators that are MacWilliams
             duals just as if they were dual linear codes. In this paper,
             we explain, by constructing in a natural way a
             Preparata-like code PL from the Kerdock code K, why the
             existence of a distance-invariant code with weight
             distribution that is the McWilliams transform of that of the
             Kerdock code is only to be expected. The construction
             involves quaternary codes over the ring ℤ4 of integers
             modulo 4. We exhibit a quaternary code Q and its quaternary
             dual P⊥ which, under the Gray mapping, give rise to the
             Kerdock code K, and Preparata-like code PL, respectively.
             The code PL is identical in weight and distance distribution
             to the extended Preparata code. The linearity of Q and P⊥
             ensures that the binary codes K and PL are distance
             invariant, while their duality as quaternary codes
             guarantees that K and PL have dual weight distributions. The
             quaternary code Q is the ℤ4-analog of the first-order
             Reed-Muller code. As a result, PL has a simple description
             in the ℤ4-domain that admits a simple syndrome decoder. At
             length 16, the code PL coincides with the Preparata
             code.},
   Doi = {10.1007/3-540-56686-4_30},
   Key = {fds333193}
}

@article{fds235784,
   Author = {Das, S and Al-Dhahir, N and Diggavi, S and Calderbank,
             R},
   Title = {Opportunistic space-time block codes},
   Journal = {IEEE Vehicular Technology Conference},
   Volume = {3},
   Pages = {2025-2029},
   Publisher = {IEEE},
   Year = {2005},
   Month = {January},
   ISBN = {0780391527},
   ISSN = {1090-3038},
   url = {http://gateway.webofknowledge.com/gateway/Gateway.cgi?GWVersion=2&SrcApp=PARTNER_APP&SrcAuth=LinksAMR&KeyUT=WOS:000235046902131&DestLinkType=FullRecord&DestApp=ALL_WOS&UsrCustomerID=47d3190e77e5a3a53558812f597b0b92},
   Abstract = {Rate and diversity impose a fundamental tradeoff in
             space-time coding. High-rate space-time codes come at a cost
             of lower diversity, and high reliability (diversity) implies
             a lower rate. In [2], [3], we 1 proposed a different point
             of view where we designed high-rate space-time codes that
             have a high-diversity code embedded within them. This allows
             a form of wireless communications where the highrate code
             opportunistically takes advantage of good channel
             realizations while the embedded high-diversity code ensures
             that at least part of the information is decoded reliably.
             In [2], [3], we presented the code design strategy,
             criteria, and several constructions. In this paper, our
             focus is on decoding algorithms, quantifying the performance
             of the code examples in [2], [3] and presenting new
             examples.},
   Doi = {10.1109/VETECF.2005.1558464},
   Key = {fds235784}
}

@article{fds341949,
   Author = {Hadani, R and Rakib, S and Molisch, AF and Ibars, C and Monk, A and Tsatsanis, M and Delfeld, J and Goldsmith, A and Calderbank,
             R},
   Title = {Orthogonal Time Frequency Space (OTFS) modulation for
             millimeter-wave communications systems},
   Journal = {IEEE MTT-S International Microwave Symposium
             Digest},
   Pages = {681-683},
   Year = {2017},
   Month = {October},
   ISBN = {9781509063604},
   url = {http://dx.doi.org/10.1109/MWSYM.2017.8058662},
   Abstract = {Due to the increased demand for data rate, flexibility, and
             reliability of 5G cellular systems, new modulation formats
             need to be considered. A recently proposed scheme,
             Orthogonal Time Frequency Space (OTFS), offers various
             advantages in particular in environments with high frequency
             dispersion. Such environments are encountered, e.g, in
             mm-wave systems, both due to the higher phase noise, and the
             larger Doppler spreads encountered there. The current paper
             provides a performance evaluation of OTFS at 5G mm-wave
             frequencies. Comparisons with OFDM modulation show that OTFS
             has lower BER than OFDM in a number of situations.},
   Doi = {10.1109/MWSYM.2017.8058662},
   Key = {fds341949}
}

@article{fds326881,
   Author = {Hadani, R and Rakib, S and Tsatsanis, M and Monk, A and Goldsmith, AJ and Molisch, AF and Calderbank, R},
   Title = {Orthogonal time frequency space modulation},
   Journal = {IEEE Wireless Communications and Networking Conference,
             WCNC},
   Publisher = {IEEE},
   Year = {2017},
   Month = {May},
   ISBN = {9781509041831},
   url = {http://dx.doi.org/10.1109/WCNC.2017.7925924},
   Abstract = {A new two-dimensional modulation technique called Orthogonal
             Time Frequency Space (OTFS) modulation designed in the
             delay-Doppler domain is introduced. Through this design,
             which exploits full diversity over time and frequency, OTFS
             coupled with equalization converts the fading, time-varying
             wireless channel experienced by modulated signals such as
             OFDM into a time-independent channel with a complex channel
             gain that is roughly constant for all symbols. Thus,
             transmitter adaptation is not needed. This extraction of the
             full channel diversity allows OTFS to greatly simplify
             system operation and significantly improves performance,
             particular in systems with high Doppler, short packets, and
             large antenna arrays. Simulation results indicate at least
             several dB of block error rate performance improvement for
             OTFS over OFDM in all of these settings. In addition these
             results show that even at very high Dopplers (500 km/h),
             OTFS approaches channel capacity through linear scaling of
             throughput with the MIMO order, whereas the performance of
             OFDM under typical design parameters breaks down
             completely.},
   Doi = {10.1109/WCNC.2017.7925924},
   Key = {fds326881}
}

@article{fds322365,
   Author = {Yuan, X and Huang, J and Calderbank, R},
   Title = {Polynomial-phase signal direction-finding and
             source-tracking with a single acoustic vector
             sensor},
   Journal = {ICASSP, IEEE International Conference on Acoustics, Speech
             and Signal Processing - Proceedings},
   Volume = {2015-August},
   Pages = {2559-2563},
   Publisher = {IEEE},
   Year = {2015},
   Month = {August},
   ISBN = {9781467369978},
   url = {http://dx.doi.org/10.1109/ICASSP.2015.7178433},
   Abstract = {This paper introduces a new ESPRIT-based algorithm to
             estimate the direction-of-arrival of an arbitrary degree
             polynomial-phase signal with a single acoustic
             vector-sensor. The proposed time-invariant ESPRIT algorithm
             is based on a matrix-pencil pair derived from the
             time-delayed data-sets collected by a single acoustic
             vector-sensor. This approach requires neither a prior
             knowledge of the polynomial-phase signal's coefficients nor
             a prior knowledge of the polynomial-phase signal's
             frequency-spectrum. Furthermore, a preprocessing technique
             is proposed to incorporate the single-forgetting-factor
             algorithm and multiple-forgetting-factor adaptive tracking
             algorithm to track a polynomial-phase signal using one
             acoustic vector sensor. Simulation results verify the
             efficacy of the proposed direction finding and source
             tracking algorithms.},
   Doi = {10.1109/ICASSP.2015.7178433},
   Key = {fds322365}
}

@article{fds326896,
   Author = {Beirami, A and Calderbank, R and Duffy, K and Medard,
             M},
   Title = {Quantifying computational security subject to source
             constraints, guesswork and inscrutability},
   Journal = {IEEE International Symposium on Information Theory -
             Proceedings},
   Volume = {2015-June},
   Pages = {2757-2761},
   Publisher = {IEEE},
   Year = {2015},
   Month = {September},
   ISBN = {9781467377041},
   url = {http://dx.doi.org/10.1109/ISIT.2015.7282958},
   Abstract = {Guesswork forms the mathematical framework for quantifying
             computational security subject to brute-force determination
             by query. In this paper, we consider guesswork subject to a
             per-symbol Shannon entropy budget. We introduce
             inscrutability rate as the asymptotic rate of increase in
             the exponential number of guesses required of an adversary
             to determine one or more secret strings. We prove that the
             inscrutability rate of any string-source supported on a
             finite alphabet χ, if it exists, lies between the
             per-symbol Shannon entropy constraint and log χ. We further
             prove that the inscrutability rate of any finite-order
             Markov string-source with hidden statistics remains the same
             as the unhidden case, i.e., the asymptotic value of hiding
             the statistics per each symbol is vanishing. On the other
             hand, we show that there exists a string-source that
             achieves the upper limit on the inscrutability rate, i.e.,
             log χ, under the same Shannon entropy budget.},
   Doi = {10.1109/ISIT.2015.7282958},
   Key = {fds326896}
}

@article{fds335323,
   Author = {Kadhe, S and Calderbank, R},
   Title = {Rate optimal binary linear locally repairable codes with
             small availability},
   Journal = {IEEE International Symposium on Information Theory -
             Proceedings},
   Pages = {166-170},
   Publisher = {IEEE},
   Year = {2017},
   Month = {August},
   ISBN = {9781509040964},
   url = {http://dx.doi.org/10.1109/ISIT.2017.8006511},
   Abstract = {A locally repairable code with availability has the property
             that every code symbol can be recovered from multiple,
             disjoint subsets of other symbols of small size. In
             particular, a code symbol is said to have (r,
             t)-availability if it can be recovered from t disjoint
             subsets, each of size at most r. A code with availability is
             said to be rate optimal, if its rate is maximum among the
             class of codes with given locality, availability, and
             alphabet size. This paper focuses on rate-optimal binary,
             linear codes with small availability, and makes three
             contributions. First, it establishes tight upper bounds on
             the rate of binary linear codes with (r, 2) and (2, 3)
             availability. Second, it establishes a uniqueness result for
             binary rate-optimal codes, showing that for certain classes
             of binary linear codes with (r, 2) and (2, 3)-availability,
             any rate-optimal code must be a direct sum of shorter
             rateoptimal codes. Finally, it presents a class of locally
             repairable codes associated with convex polyhedra,
             especially, focusing on the codes associated with the
             Platonic solids. It demonstrates that these codes are
             locally repairable with t = 2, and that the codes associated
             with (geometric) dual polyhedra are (coding theoretic) duals
             of each other.},
   Doi = {10.1109/ISIT.2017.8006511},
   Key = {fds335323}
}

@article{fds326885,
   Author = {Nokleby, M and Beirami, A and Calderbank, R},
   Title = {Rate-distortion bounds on Bayes risk in supervised
             learning},
   Journal = {IEEE International Symposium on Information Theory -
             Proceedings},
   Volume = {2016-August},
   Pages = {2099-2103},
   Publisher = {IEEE},
   Year = {2016},
   Month = {August},
   ISBN = {9781509018062},
   url = {http://dx.doi.org/10.1109/ISIT.2016.7541669},
   Abstract = {An information-theoretic framework is presented for
             estimating the number of labeled samples needed to train a
             classifier in a parametric Bayesian setting. Ideas from
             rate-distortion theory are used to derive bounds for the
             average L1 or L∞ distance between the learned classifier
             and the true maximum a posteriori classifier in terms of
             familiar information-theoretic quantities and the number of
             training samples available. The maximum a posteriori
             classifier is viewed as a random source, labeled training
             data are viewed as a finite-rate encoding of the source, and
             the L1 or L∞ Bayes risk is viewed as the average
             distortion. The result is a framework dual to the well-known
             probably approximately correct (PAC) framework. PAC bounds
             characterize worst-case learning performance of a family of
             classifiers whose complexity is captured by the
             Vapnik-Chervonenkis (VC) dimension. The rate-distortion
             framework, on the other hand, characterizes the average-case
             performance of a family of data distributions in terms of a
             quantity called the interpolation dimension, which
             represents the complexity of the family of data
             distributions. The resulting bounds do not suffer from the
             pessimism typical of the PAC framework, particularly when
             the training set is small.},
   Doi = {10.1109/ISIT.2016.7541669},
   Key = {fds326885}
}

@article{fds335326,
   Author = {Kumar, S and Calderbank, R and Pfister, HD},
   Title = {Reed-muller codes achieve capacity on the quantum erasure
             channel},
   Journal = {IEEE International Symposium on Information Theory -
             Proceedings},
   Volume = {2016-August},
   Pages = {1750-1754},
   Publisher = {IEEE},
   Year = {2016},
   Month = {August},
   ISBN = {9781509018062},
   url = {http://dx.doi.org/10.1109/ISIT.2016.7541599},
   Abstract = {The quantum erasure channel is the simplest example of a
             quantum communication channel and its information capacity
             is known precisely. The subclass of quantum error-correcting
             codes called stabilizer codes is known to contain
             capacity-achieving sequences for the quantum erasure
             channel, but no efficient method is known to construct these
             sequences. In this article, we explicitly describe a
             capacity-achieving code sequence for the quantum erasure
             channel. In particular, we show that Calderbank-Shor-Steane
             (CSS) stabilizer codes constructed from self-orthogonal
             binary linear codes are capacity-achieving on the quantum
             erasure channel if the binary linear codes are
             capacity-achieving on the binary erasure channel. Recently,
             Reed-Muller codes were shown to achieve capacity on
             classical erasure channels. Using this, we show that CSS
             codes constructed from binary Reed-Muller codes achieve the
             capacity of the quantum erasure channel. The
             capacity-achieving nature of these CSS codes is also
             explained from a GF(4) perspective.},
   Doi = {10.1109/ISIT.2016.7541599},
   Key = {fds335326}
}

@article{fds343525,
   Author = {Nguyen, DM and Tsiligianni, E and Calderbank, R and Deligiannis,
             N},
   Title = {Regularizing autoencoder-based matrix completion models via
             manifold learning},
   Journal = {European Signal Processing Conference},
   Volume = {2018-September},
   Pages = {1880-1884},
   Year = {2018},
   Month = {November},
   ISBN = {9789082797015},
   url = {http://dx.doi.org/10.23919/EUSIPCO.2018.8553528},
   Abstract = {Autoencoders are popular among neural-network-based matrix
             completion models due to their ability to retrieve potential
             latent factors from the partially observed matrices.
             Nevertheless, when training data is scarce their performance
             is significantly degraded due to overfitting. In this paper,
             we mitigate overfitting with a data-dependent regularization
             technique that relies on the principles of multi-task
             learning. Specifically, we propose an autoencoder-based
             matrix completion model that performs prediction of the
             unknown matrix values as a main task, and manifold learning
             as an auxiliary task. The latter acts as an inductive bias,
             leading to solutions that generalize better. The proposed
             model outperforms the existing autoencoder-based models
             designed for matrix completion, achieving high
             reconstruction accuracy in well-known datasets.},
   Doi = {10.23919/EUSIPCO.2018.8553528},
   Key = {fds343525}
}

@article{fds326900,
   Author = {Harms, A and Bajwa, WU and Calderbank, R},
   Title = {Resource-Efficient Parametric Recovery of Linear
             Time-Varying Systems},
   Journal = {2013 IEEE 5TH INTERNATIONAL WORKSHOP ON COMPUTATIONAL
             ADVANCES IN MULTI-SENSOR ADAPTIVE PROCESSING (CAMSAP
             2013)},
   Pages = {200-+},
   Publisher = {IEEE},
   Year = {2013},
   Month = {January},
   ISBN = {978-1-4673-3144-9},
   Key = {fds326900}
}

@article{fds332945,
   Author = {Cnaan-On, I and Harms, A and Krolik, JL and Calderbank,
             AR},
   Title = {Run-length limited codes for backscatter
             communication},
   Journal = {ICASSP, IEEE International Conference on Acoustics, Speech
             and Signal Processing - Proceedings},
   Pages = {6110-6114},
   Publisher = {IEEE},
   Year = {2017},
   Month = {June},
   ISBN = {9781509041176},
   url = {http://dx.doi.org/10.1109/ICASSP.2017.7953330},
   Abstract = {In backscatter communications, ultra-low power devices
             signal by modulating the reflection of radio frequency
             signals emitted from an external source. Unlike conventional
             one-way communication, the backscatter channel experiences
             unique self-interference and spread Doppler clutter.
             Run-length limited (RLL) codes provide a method for spectrum
             shaping that requires no hardware changes to the
             communicating devices. The proposed coding framework is
             suitable for any arbitrarily-shaped pulse train or
             continuous wave reader waveform. It exploits the unique
             channel Doppler spread statistics to offer a trade-off
             between interference rejection and data rate. Analysis shows
             that code rates of 1 and 4/5 are achievable when dealing
             with low spread Doppler channels, which is an improvement
             over the current rate 1/2 with current mainstream
             backscatter communication techniques. Simulation results
             with realistic channel assumptions are analyzed and
             discussed to confirm the theoretical analysis.},
   Doi = {10.1109/ICASSP.2017.7953330},
   Key = {fds332945}
}

@article{fds331057,
   Author = {Calderbank, AR and Fishburn, PC and Rabinovich,
             A},
   Title = {Sequence based methods for data transmission and source
             compression},
   Journal = {Lecture Notes in Computer Science (including subseries
             Lecture Notes in Artificial Intelligence and Lecture Notes
             in Bioinformatics)},
   Volume = {673 LNCS},
   Pages = {1-12},
   Year = {1993},
   Month = {January},
   ISBN = {9783540566861},
   url = {http://dx.doi.org/10.1007/3-540-56686-4_29},
   Abstract = {In the last 10 years the invention of trellis coded
             modulation has revolutionized communication over bandlimited
             channels and is starting to be used in magnetic storage.
             Part of the reason is that sophisticated signal processing
             systems involving finite state machines can now be
             fabricated inexpensively. This paper discusses new
             developments in the performance analysis of finite state
             machines. This is the extended abstract of an invited
             lecture to be given at the 10th International Symposium on
             Applied Algebra, Algebraic Algorithms, and Error-Correcting
             Codes, Puerto Rico, May 10-14, 1993.},
   Doi = {10.1007/3-540-56686-4_29},
   Key = {fds331057}
}

@article{fds326899,
   Author = {Harms, A and Bajwa, WU and Calderbank, R},
   Title = {Shaping the Power Spectra of Bipolar Sequences with
             Application to Sub-Nyquist Sampling},
   Journal = {2013 IEEE 5TH INTERNATIONAL WORKSHOP ON COMPUTATIONAL
             ADVANCES IN MULTI-SENSOR ADAPTIVE PROCESSING (CAMSAP
             2013)},
   Pages = {236-+},
   Publisher = {IEEE},
   Year = {2013},
   Month = {January},
   ISBN = {978-1-4673-3144-9},
   Key = {fds326899}
}

@article{fds326907,
   Author = {Naguib, AF and Calderbank, R},
   Title = {Space-time coding and signal processing for high data rate
             wireless communications},
   Journal = {WIRELESS COMMUNICATION TECHNOLOGIES: NEW MULTIMEDIA
             SYSTEMS},
   Volume = {564},
   Pages = {23-59},
   Year = {2000},
   ISBN = {0-7923-7900-4},
   Key = {fds326907}
}

@article{fds335321,
   Author = {Thompson, A and Calderbank, R},
   Title = {Sparse near-equiangular tight frames with applications in
             full duplex wireless communication},
   Journal = {2017 IEEE Global Conference on Signal and Information
             Processing, GlobalSIP 2017 - Proceedings},
   Volume = {2018-January},
   Pages = {868-872},
   Publisher = {IEEE},
   Year = {2018},
   Month = {March},
   ISBN = {9781509059904},
   url = {http://dx.doi.org/10.1109/GlobalSIP.2017.8309084},
   Abstract = {We construct extremely sparse, near-equiangular tight frames
             which share the same row space as certain incomplete
             Delsarte-Goethals frames. Frames combining these properties
             have application in full duplex communication in ad-hoc
             wireless networks. We highlight their computational
             advantage over similar constructions of sparse equiangular
             tight frames: namely that their associated matrix-vector
             products can be implemented as a fast transform.},
   Doi = {10.1109/GlobalSIP.2017.8309084},
   Key = {fds335321}
}

@article{fds326908,
   Author = {Calderbank, R and Fishburn, P and Siegel, P},
   Title = {State-space characterization of viterbi detector path metric
             differences},
   Journal = {Conference Record - Asilomar Conference on Signals, Systems
             and Computers},
   Pages = {940-944},
   Year = {1992},
   Month = {January},
   ISBN = {0818631600},
   url = {http://dx.doi.org/10.1109/ACSSC.1992.269082},
   Abstract = {In the digital implementations of maximumlikelihood
             detectors based upon the Viterbi algorithm, bounds on the
             values of path metric differences are important parameters,
             and various techniques have been proposed for computing such
             bounds. This paper addresses the more general problem of
             characterizing the entire space of path metric differences,
             achievable from a given initial state, and calculating the
             probability density for the differences as a function of the
             distribution of the (noisy) channel output samples. Explicit
             results are given for two examples of interest in digital
             recording.},
   Doi = {10.1109/ACSSC.1992.269082},
   Key = {fds326908}
}

@article{fds331054,
   Author = {Tarokh, V and Jafarkhani, H and Calderbank, AR},
   Title = {The application of orthogonal designs to wireless
             communication},
   Journal = {1998 Information Theory Workshop, ITW 1998},
   Pages = {46-47},
   Publisher = {IEEE},
   Year = {1998},
   Month = {January},
   ISBN = {9780780344082},
   url = {http://dx.doi.org/10.1109/ITW.1998.706408},
   Abstract = {We introduce space-block codzng, a new paradigm for
             transmission over Rayleigh fading channels using multiple
             transmit antennas. Data is encoded using a space-block code
             and the encoded data is split into n streams which are
             simultaneously transmitted using n transmit antennas. The
             received signal at each receive antenna is a linear
             superposition of the n transmitted signal perturbed by
             noise. Decoding is achieved in a simple way using the
             orthogonal structure of tlie space-block code and maximum
             likelihood decoding algorithm is totally based on linear
             processing at the receiver. Space-black codes are designed
             to achieve the maximum diversity gain of transmit and
             receive antennas with the constraint of having a simple
             decoding algorithm. It is shown that the classical
             mathematical framework of orthogonal designs can be applied
             to construct cliannel codes which have a simple decoding
             algorithm, while providing tlie full spatial diversity
             order. Space-block codes constructed in this way only exist
             for few sporadic values of n and therefore there is a need
             for a new mathematical theory. In this light, we introduce
             the theory of Generalzzed Deszgns which provides codes for
             both real and complex constellations for any number of
             transmit antennas. Using this theory, we construct
             space-block codes that achieve the maximum possible
             transmission rate for any number of transmit antennas using
             any arbitrary real constellation such as PAM. For any
             arbitrary complex constellation such as PSK and QAM, we
             construct space-block codes that achieve half of the maximum
             possible transmission rate for any number of transmit
             antennas. For tlie specific cases of two, three and four
             transmit antennas, we provide space-block codes that achieve
             respectively the whole, 3/4 and 3/4 of maximum possible
             transmission rate using arbitrary complex constellations.
             The best trade-off between tlie decoding delay and the
             number of transmit antennas is also computed and it is shown
             that the designed codes are optimal in this sense as well.
             Various fundamental problems are posed which are both
             inatliematically appealing and have immediate application to
             the design of a physical layer for wireless communication
             systems.},
   Doi = {10.1109/ITW.1998.706408},
   Key = {fds331054}
}

@article{fds322369,
   Author = {Ashikhmin, A and Calderbank, AR},
   Title = {The first order space-time reed-muller codes},
   Journal = {43rd Annual Allerton Conference on Communication, Control
             and Computing 2005},
   Volume = {2},
   Pages = {1086-1095},
   Year = {2005},
   Month = {January},
   ISBN = {9781604234916},
   Abstract = {We present a family of Space-Time codes for the noncoherent
             MIMO channel. These space-time codes are strongly related to
             standard binary fist order Reed-Muller codes. In particular,
             their decoder can be built from several parallel decoders of
             first order Reed-Muller codes. We show that these codes
             allow transmission with rates close to the MIMO noncoherent
             unitary space-time codes capacity in the low signal to noise
             ratio (SNR) regime.},
   Key = {fds322369}
}

@article{fds326886,
   Author = {Vahid, A and Calderbank, R},
   Title = {When does spatial correlation add value to delayed channel
             state information?},
   Journal = {IEEE International Symposium on Information Theory -
             Proceedings},
   Volume = {2016-August},
   Pages = {2624-2628},
   Publisher = {IEEE},
   Year = {2016},
   Month = {August},
   ISBN = {9781509018062},
   url = {http://dx.doi.org/10.1109/ISIT.2016.7541774},
   Abstract = {Fast fading wireless networks with delayed knowledge of the
             channel state information have received significant
             attention in recent years. An exception is networks where
             channels are spatially correlated. This paper characterizes
             the capacity region of two-user erasure interference
             channels with delayed knowledge of the channel state
             information and spatially correlated channels. There are
             instances where spatial correlation eliminates any potential
             gain from delayed channel state information and instances
             where it enables the same performance that is possible with
             instantaneous knowledge of channel state. The key is an
             extremal entropy inequality for spatially correlated
             channels that separates the two types of instances. It is
             also shown that to achieve the capacity region, each
             transmitter only needs to rely on the delayed knowledge of
             the channels to which it is connected.},
   Doi = {10.1109/ISIT.2016.7541774},
   Key = {fds326886}
}

@article{fds326757,
   Author = {Eslami, A and Velasco, A and Vahid, A and Mappouras, G and Calderbank,
             R and Sorin, DJ},
   Title = {Writing without disturb on phase change memories by
             integrating coding and layout design},
   Journal = {ACM International Conference Proceeding Series},
   Volume = {05-08-October-2015},
   Pages = {71-77},
   Publisher = {ACM Press},
   Year = {2015},
   Month = {October},
   ISBN = {9781450336048},
   url = {http://dx.doi.org/10.1145/2818950.2818962},
   Abstract = {We integrate coding techniques and layout design to elimi-
             nate write-disturb in phase change memories (PCMs), while
             enhancing lifetime and host-visible capacity. We first pro-
             pose a checkerboard confguration for cell layout to elimi-
             nate write-disturb while doubling the memory lifetime. We
             then introduce two methods to jointly design Write-Once-
             Memory (WOM) codes and layout. The first WOM-layout design
             improves the lifetime by more than double without
             compromising the host-visible capacity. The second design
             applies WOM codes to even more dense layouts to achieve both
             lifetime and capacity gains. The constructions demon- strate
             that substantial improvements to lifetime and host- visible
             capacity are possible by co-designing coding and cell layout
             in PCM.},
   Doi = {10.1145/2818950.2818962},
   Key = {fds326757}
}